Merge "spi: spi-geni-qcom: Add support for DFS clk select"
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index d534246..01e865d 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -182,6 +182,11 @@
 
 When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA
 attribute can be used to force a buffer to be mapped as IO coherent.
+
+When the DMA_ATTR_FORCE_COHERENT attribute is set during a map call ensure
+that it is also set during for the matching unmap call to ensure that the
+correct cache maintenance is carried out.
+
 This DMA attribute is only currently supported for arm64 stage 1 IOMMU
 mappings.
 
@@ -193,5 +198,10 @@
 coherent.
 The DMA_ATTR_FORCE_NON_COHERENT DMA attribute overrides the buffer IO
 coherency configuration set by making the device IO coherent.
+
+When the DMA_ATTR_FORCE_NON_COHERENT attribute is set during a map call
+ensure that it is also set during for the matching unmap call to ensure
+that the correct cache maintenance is carried out.
+
 This DMA attribute is only currently supported for arm64 stage 1 IOMMU
 mappings.
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
index 50488b4..628b2aa 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -81,6 +81,10 @@
 -qcom,fg-cnt : The value of fine grained counter of activity monitor
 	        block.
 
+compatible devices:
+		qcom,sdm845-llcc,
+		qcom,sdm670-llcc
+
 Example:
 
 	qcom,llcc@01300000 {
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index c467327..0c75cf6 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -32,6 +32,8 @@
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
 		      order as the clocks property.
+- qcom,keep-radio-on-during-sleep: Boolean flag to indicate if to suspend to d3hot
+				   instead of turning off the device
 
 Example:
 	wil6210: qcom,wil6210 {
@@ -56,5 +58,6 @@
 		clocks = <&clock_gcc clk_rf_clk3>,
 			 <&clock_gcc clk_rf_clk3_pin>;
 		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+		qcom,keep-radio-on-during-sleep;
 	};
 
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
index dc93b35..313e50f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,camcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
@@ -2,7 +2,7 @@
 ----------------------------------------------------
 
 Required properties :
-- compatible : shall contain "qcom,cam_cc-sdm845"
+- compatible : shall contain "qcom,cam_cc-sdm845" or "qcom,cam_cc-sdm845-v2"
 - reg : shall contain base register location and length
 - reg-names: names of registers listed in the same order as in
 	     the reg property.
diff --git a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
index 92828e0..87af0f6 100644
--- a/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,dispcc.txt
@@ -2,7 +2,7 @@
 ----------------------------------------------------
 
 Required properties :
-- compatible : shall contain "qcom,dispcc-sdm845".
+- compatible : shall contain "qcom,dispcc-sdm845" or "qcom,dispcc-sdm845-v2".
 - reg : shall contain base register location and length.
 - reg-names: names of registers listed in the same order as in
 	     the reg property.
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index d95aa59..c280b92 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -17,6 +17,7 @@
 			"qcom,gcc-msm8996"
 			"qcom,gcc-mdm9615"
 			"qcom,gcc-sdm845"
+			"qcom,gcc-sdm845-v2"
 			"qcom,debugcc-sdm845"
 
 - reg : shall contain base register location and length
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
index 5dc109d..6bd0f0b 100644
--- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
@@ -2,12 +2,13 @@
 ----------------------------------------------------
 
 Required properties :
-- compatible : shall contain "qcom,video_cc-sdm845"
-- reg : shall contain base register location and length
+- compatible : shall contain "qcom,video_cc-sdm845" or
+	       "qcom,video_cc-sdm845-v2".
+- reg : shall contain base register location and length.
 - reg-names: names of registers listed in the same order as in
 	     the reg property.
-- #clock-cells : shall contain 1
-- #reset-cells : shall contain 1
+- #clock-cells : shall contain 1.
+- #reset-cells : shall contain 1.
 
 Optional properties :
 - vdd_<rail>-supply: The logic rail supply.
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index aefd42c..47fc465 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -132,8 +132,6 @@
 				power collapse feature available or not.
 - qcom,sde-has-mixer-gc:	Boolean property to indicate if mixer has gamma correction
 				feature available or not.
-- qcom,sde-has-cdp:		Boolean property to indicate if cdp feature is
-				available or not.
 - qcom,sde-sspp-clk-ctrl:	Array of offsets describing clk control
 				offsets for dynamic clock gating. 1st value
 				in the array represents offset of the control
@@ -236,6 +234,8 @@
 				of (pps, OT limit), where pps is pixel per second and
 				OT limit is the write limit to apply if the given
 				pps is not exceeded.
+- qcom,sde-vbif-memtype-0:	Array of u32 vbif memory type settings, group 0
+- qcom,sde-vbif-memtype-1:	Array of u32 vbif memory type settings, group 1
 - qcom,sde-wb-id:		Array of writeback ids corresponding to the
 				offsets defined in property: qcom,sde-wb-off.
 - qcom,sde-wb-clk-ctrl:		Array of 2 cell property describing clk control
@@ -319,6 +319,22 @@
 				<fill level, lut hi, lut lo> in ascending fill level
 				indicating the qos luts for cwb on sspp.
 				Zero fill level on the last entry identifies the default lut.
+- qcom,sde-cdp-setting:		Array of 2 cell property, with a format of
+				<read enable, write enable> for cdp use cases in
+				order of <real_time>, and <non_real_time>.
+- qcom,sde-inline-rot-xin:	An integer array of xin-ids related to inline
+				rotation.
+- qcom,sde-inline-rot-xin-type:	A string array indicating the type of xin,
+				namely sspp or wb. Number of entries should match
+				the number of xin-ids defined in
+				property: qcom,sde-inline-rot-xin
+- qcom,sde-inline-rot-clk-ctrl:	Array of offsets describing clk control
+				offsets for dynamic clock gating. 1st value
+				in the array represents offset of the control
+				register. 2nd value represents bit offset within
+				control register. Number of offsets defined should
+				match the number of xin-ids defined in
+				property: qcom,sde-inline-rot-xin
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -447,7 +463,6 @@
     qcom,sde-ubwc-static = <0x100>;
     qcom,sde-ubwc-swizzle = <0>;
     qcom,sde-panic-per-pipe;
-    qcom,sde-has-cdp;
     qcom,sde-has-src-split;
     qcom,sde-has-dim-layer;
     qcom,sde-sspp-src-size = <0x100>;
@@ -517,6 +532,8 @@
     qcom,sde-qos-lut-cwb =
             <0 0x75300000 0x00000000>;
 
+    qcom,sde-cdp-setting = <1 1>, <1 0>;
+
     qcom,sde-vbif-off = <0 0>;
     qcom,sde-vbif-id = <0 1>;
     qcom,sde-vbif-default-ot-rd-limit = <32>;
@@ -525,6 +542,8 @@
         <124416000 4>, <248832000 16>;
     qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
         <124416000 4>, <248832000 16>;
+    qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+    qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
 
     qcom,sde-dram-channels = <2>;
     qcom,sde-num-nrt-paths = <1>;
@@ -587,6 +606,9 @@
     };
 
     qcom,sde-inline-rotator = <&mdss_rotator 0>;
+    qcom,sde-inline-rot-xin = <10 11>;
+    qcom,sde-inline-rot-xin-type = "sspp", "wb";
+    qcom,sde-inline-rot-clk-ctrl = <0x2bc 0x8>, <0x2bc 0xc>;
 
     qcom,platform-supply-entries {
        #address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index d0d7fff..59fa6a0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -16,7 +16,7 @@
                         "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
                         "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
                         "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
-                        "qcom,mdss_hdmi_pll_8998"
+                        "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm".
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
 - reg-names :		names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
new file mode 100644
index 0000000..b7ce662
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-haptics.txt
@@ -0,0 +1,257 @@
+Qualcomm Technologies, Inc. Haptics driver
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) Haptics is a peripheral on some
+QTI PMICs. It can be interfaced with the host processor via SPMI or I2C bus.
+
+Haptics peripheral can support different actuators or vibrators,
+1. Eccentric Rotation Mass (ERM)
+2. Linear Resonant Actuator (LRA)
+
+Also, it can support multiple modes of operation: Direct, Buffer, PWM or Audio.
+
+Haptics device is described under a single level of node.
+
+Properties:
+
+- compatible
+  Usage:      required
+  Value type: <string>
+  Definition: "qcom,qpnp-haptics".
+
+- reg
+  Usage:      required
+  Value type: <u32>
+  Definition: Base address of haptics peripheral.
+
+- interrupts
+  Usage:      required
+  Value type: <prop-encoded-array>
+  Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+  Usage:      required
+  Value type: <stringlist>
+  Definition: Interrupt names.  This list must match up 1-to-1 with the
+		interrupts specified in the 'interrupts' property. Currently
+		supported interrupts are short-circuit and play.
+
+- qcom,pmic-revid
+  Usage:      required
+  Value type: <phandle>
+  Definition: Should specify the phandle of PMIC's revid module. This is used to
+		identify the PMIC subtype.
+
+- qcom,pmic-misc
+  Usage:      optional
+  Value type: <phandle>
+  Definition: Should specify the phandle of PMIC's misc module. This is used to
+		read the clock trim error register under MISC peripheral.
+
+- qcom,misc-clk-trim-error-reg
+  Usage:      optional
+  Value type: <u32>
+  Definition: Register offset in MISC peripheral to read the clock trim error.
+		If this is specified, then qcom,pmic-misc should be specified.
+
+- qcom,actuator-type
+  Usage:      optional
+  Value type: <u32>
+  Definition: Allowed values are 0 for LRA and 1 for ERM. If this is not
+		specified, then LRA type will be used by default.
+
+- qcom,play-mode
+  Usage:      optional
+  Value type: <string>
+  Definition: Allowed values are: "direct", "buffer", "pwm", "auto". If not
+		specified for LRA actuator, auto mode will be selected by
+		default.
+
+- qcom,wave-shape
+  Usage:      optional
+  Value type: <string>
+  Definition: Wave shape to be played. Allowed values: "sine" or "square".
+		Default value is "square".
+
+- qcom,wave-play-rate-us
+  Usage:      optional
+  Value type: <u32>
+  Definition: Wave sample duration in microseconds. This should match with
+		the frequency the vibrator supports.
+		Allowed values are: 0 to 20475. Default value is 5715.
+
+- qcom,max-play-time-us
+  Usage:      optional
+  Value type: <u32>
+  Definition: Maximum play time supported in microseconds. Default value is
+		15000.
+
+- qcom,vmax-mv
+  Usage:      optional
+  Value type: <u32>
+  Definition: Maximum output voltage in millivolts. Value specified here will
+		be rounded off to the closest multiple of 116 mV.
+		Allowed values: 0 to 3596. Default value is 3596.
+
+- qcom,ilim-ma
+  Usage:      optional
+  Value type: <u32>
+  Definition: Output current limit in mA. Allowed values: 400 or 800. Default
+		value is 400.
+
+- qcom,en-brake
+  Usage:      optional
+  Value type: <empty>
+  Definition: Enables internal reverse braking.
+
+- qcom,brake-pattern
+  Usage:      optional
+  Value type: <prop-encoded-array>
+  Definition: Brake pattern to be applied. If specified, should be having
+		4 elements. Allowed values for each element are:
+		0, 1: Vmax/4, 2: Vmax/2, 3: Vmax.
+
+- qcom,sc-dbc-cycles
+  Usage:      optional
+  Value type: <u32>
+  Definition: Short circuit debounce cycles for internal PWM.
+		Allowed values: 0, 8, 16 or 32.
+
+Following properties are specific only to LRA vibrators.
+
+- qcom,lra-auto-res-mode
+  Usage:      optional
+  Value type: <string>
+  Definition: Auto resonance method. Allowed values are:
+		For pmi8998 and chips earlier,
+		"none" : No auto resonance
+		"zxd" : Zero crossing detection method
+		"qwd" : Quarter wave drive method
+		"max-qwd" : Maximum QWD
+		"zxd-eop" : ZXD + End of Pattern
+		For pm660,
+		"zxd" : Zero crossing detection method
+		"qwd" : Quarter wave drive method
+
+- qcom,lra-high-z
+  Usage:      optional
+  Value type: <string>
+  Definition: High Z configuration for auto resonance. Allowed values are:
+		"none", "opt1", "opt2" and "opt3".
+		For pm660, "opt0" is valid value for 1 LRA period.
+
+- qcom,lra-res-cal-period
+  Usage:      optional
+  Value type: <u32>
+  Definition: Auto resonance calibration period. Allowed values are:
+		For pmi8998 and chips earlier: 4, 8, 16, and 32.
+		For pm660: 4, 8, 16, 32, 64, 128 and 256.
+
+- qcom,lra-qwd-drive-duration
+  Usage:      optional
+  Value type: <u32>
+  Definition: LRA drive duration in QWD mode. Applies only for pm660 currently.
+		Allowed values are: 0 and 1, for 1/4 and 3/8 LRA period.
+		respectively.
+
+- qcom,lra-calibrate-at-eop
+  Usage:      optional
+  Value type: <u32>
+  Definition: Enables calibration at end of pattern. Applies only for pm660
+		currently. Allowed values are: 0 and 1.
+
+- qcom,auto-res-err-recovery-hw
+  Usage:      optional
+  Value type: <empty>
+  Definition: Enables Hardware auto resonance error recovery. Applies only for
+		pm660 currently.
+
+- qcom,drive-period-code-max-variation-pct
+  Usage:      optional
+  Value type: <u32>
+  Definition: Maximum allowed variation of LRA drive period code in percentage
+		above which RATE_CFG registers will not be updated by SW when
+		auto resonance is enabled and auto resonance error correction
+		algorithm is running. If not specified, default value is 25%.
+
+- qcom,drive-period-code-min-variation-pct
+  Usage:      optional
+  Value type: <u32>
+  Definition: Minimum allowed variation of LRA drive period code in percentage
+		below which RATE_CFG registers will not be updated by SW when
+		auto resonance is enabled and auto resonance error correction
+		algorithm is running. If not specified, default value is 25%.
+
+Following properties are applicable only when "qcom,play-mode" is set to
+"buffer".
+
+- qcom,wave-rep-cnt
+  Usage:      optional
+  Value type: <u32>
+  Definition: Repetition count for wave form.
+		Allowed values are: 1, 2, 4, 8, 16, 32, 64 and 128. Default
+		value is 1.
+
+- qcom,wave-samp-rep-cnt
+  Usage:      optional
+  Value type: <u32>
+  Definition: Repetition count for each sample of wave form. Allowed values
+		are: 1, 2, 4 and 8. Default value is 1.
+
+- qcom,wave-samples
+  Usage:      optional
+  Value type: <prop-encoded-array>
+  Definition: Wave samples in an array of 8 elements. Each element takes the
+		following representation, bit 0: unused, bits[5:1] : amplitude,
+		bit 6: overdrive, bit 7: sign. Default sample value is 0x3E.
+
+Following properties are applicable only when "qcom,play-mode" is set to
+"pwm".
+
+- pwms
+  Usage:      required, if "qcom,play-mode" is set to "pwm".
+  Value type: <phandle>
+  Definition: PWM device that is feeding its output to Haptics.
+
+- qcom,period-us
+  Usage:      required, if "qcom,play-mode" is set to "pwm".
+  Value type: <u32>
+  Definition: PWM period in us.
+
+- qcom,duty-us
+  Usage:      required, if "qcom,play-mode" is set to "pwm".
+  Value type: <u32>
+  Definition: PWM duty cycle in us.
+
+- qcom,ext-pwm-freq-khz
+  Usage:      optional
+  Value type: <u32>
+  Definition: Frequency for external PWM in KHz.
+		Allowed values are: 25, 50, 75 and 100.
+
+- qcom,ext-pwm-dtest-line
+  Usage:      optional
+  Value type: <u32>
+  Definition: DTEST line which is used for external PWM.
+
+Example:
+	qcom,haptics@c000 {
+		compatible = "qcom,qpnp-haptics";
+		reg = <0xc000 0x100>;
+		interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_RISING>,
+			     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+		interrupt-names = "hap-sc-irq", "hap-play-irq";
+		qcom,pmic-revid = <&pmi8998_revid>;
+		qcom,pmic-misc = <&pmi8998_misc>;
+		qcom,misc-clk-trim-error-reg = <0xf3>;
+		qcom,actuator-type = <0>;
+		qcom,play-mode = "direct";
+		qcom,vmax-mv = <3200>;
+		qcom,ilim-ma = <800>;
+		qcom,sc-dbc-cycles = <8>;
+		qcom,wave-play-rate-us = <6667>;
+		qcom,en-brake;
+		qcom,brake-pattern = <0x3 0x0 0x0 0x0>;
+		qcom,lra-high-z = "opt1";
+		qcom,lra-auto-res-mode = "qwd";
+		qcom,lra-res-cal-period = <4>;
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index a61bab3..62a51cf 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -104,6 +104,17 @@
   Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
   for the properties above.
 
+- vdd-corners
+  Usage: required
+  Value type: <u32>
+  Definition: List of vdd corners to map for ahb level.
+
+- vdd-corner-ahb-mapping
+  Usage: required
+  Value type: <string>
+  Definition: List of ahb level strings corresponds to vdd-corners.
+  Supported strings: suspend, svs, nominal, turbo
+
 - client-id-based
   Usage: required
   Value type: <empty>
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index e1f194f3..441d771 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -35,11 +35,6 @@
 		addition battery properties will be faked such that the device
 		assumes normal operation.
 
-- qcom,external-vconn
-  Usage:      optional
-  Value type: <empty>
-  Definition: Boolean flag which indicates VCONN is sourced externally.
-
 - qcom,fcc-max-ua
   Usage:      optional
   Value type: <u32>
@@ -181,6 +176,12 @@
   Definition: Specifies the maximum charger buck/boost switching frequency in
 		 KHz. It overrides the max frequency defined for the charger.
 
+- qcom,otg-deglitch-time-ms
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the deglitch interval for OTG detection.
+		If the value is not present, 50 msec is used as default.
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/scheduler/energy.txt b/Documentation/devicetree/bindings/scheduler/energy.txt
new file mode 100644
index 0000000..3c7121c
--- /dev/null
+++ b/Documentation/devicetree/bindings/scheduler/energy.txt
@@ -0,0 +1,13 @@
+* Scheduler Energy Driver
+
+Scheduler Energy Driver updates capacities in the scheduler group energy array.
+The array contains power cost at each CPU operating points so energy aware
+scheduler (EAS) can utilize it for task placement.
+
+Required properties:
+- compatible:		Must be "sched-energy"
+
+Example:
+	energy-costs {
+		compatible = "sched-energy";
+	}
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 3a09b28..c116e42 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -186,6 +186,12 @@
  - qcom,msm-pcm-loopback-low-latency : Flag indicating whether
    the device node is of type low latency.
 
+* msm-transcode-loopback
+
+Required properties:
+
+ - compatible : "qcom,msm-transcode-loopback"
+
 * msm-dai-q6
 
 [First Level Nodes]
@@ -347,6 +353,8 @@
  - clocks:                                  phandle reference to the parent
                                             clock.
 
+ - qcom,mclk-clk-reg:                       Indicate the register address for mclk.
+
 * audio_slimslave
 
 Required properties:
@@ -1149,12 +1157,9 @@
 				When clock rate is set to zero,
 				then external clock is assumed.
 
- [Second Level Nodes]
-
-Required properties:
-
- - compatible : "qcom,msm-dai-q6-tdm"
- - qcom,msm-dai-q6-mi2s-dev-id: TDM port ID.
+ - qcom,msm-cpudai-tdm-clk-internal: Clock Source.
+				0 - EBIT clock from clk tree
+				1 - IBIT clock from clk tree
 
  - qcom,msm-cpudai-tdm-sync-mode: Synchronization setting.
 				0 - Short sync bit mode
@@ -1179,6 +1184,13 @@
 				1 - 1 bit clock cycle
 				2 - 2 bit clock cycle
 
+ [Second Level Nodes]
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-q6-tdm"
+ - qcom,msm-dai-q6-mi2s-dev-id: TDM port ID.
+
  - qcom,msm-cpudai-tdm-data-align: Indicate how data is packed
 				within the slot. For example, 32 slot width in case of
 				sample bit width is 24.
@@ -1213,17 +1225,18 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36912>;
 		qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <0>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <0>;
+		qcom,msm-cpudai-tdm-data-delay = <0>;
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&quat_tdm_active &quat_tdm_dout_active>;
 		pinctrl-1 = <&quat_tdm_sleep &quat_tdm_dout_sleep>;
 		dai_quat_tdm_rx_0: qcom,msm-dai-q6-tdm-quat-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36912>;
-			qcom,msm-cpudai-tdm-sync-mode = <0>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <0>;
-			qcom,msm-cpudai-tdm-data-delay = <0>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 			qcom,msm-cpudai-tdm-header-start-offset = <0>;
 			qcom,msm-cpudai-tdm-header-width = <2>;
@@ -2298,14 +2311,15 @@
 		qcom,tasha-mclk-clk-freq = <9600000>;
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
-				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>;
+				<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
+				<&trans_loopback>;
 		asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
 				"msm-pcm-dsp.2", "msm-voip-dsp",
 				"msm-pcm-voice", "msm-pcm-loopback",
 				"msm-compress-dsp", "msm-pcm-hostless",
 				"msm-pcm-afe", "msm-lsm-client",
 				"msm-pcm-routing", "msm-cpe-lsm",
-				"msm-compr-dsp";
+				"msm-compr-dsp","msm-transcode-loopback";
 		asoc-cpu = <&dai_hdmi>,
 				<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
 				<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 958194b..4901fa0 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -70,6 +70,7 @@
 			  2: 38.4 MHz
 			  3: 52 MHz
 			  Defaults to 26 MHz if not specified.
+- extcon:       phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
 
 Note: If above properties are not defined it can be assumed that the supply
 regulators or clocks are always on.
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index bc66690..4bb75aa 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -7,7 +7,6 @@
 	"core_base" : usb controller register set
 - interrupts: IRQ lines used by this controller
 - interrupt-names : Interrupt resource entries are :
-	"hs_phy_irq" : Interrupt from HS PHY for asynchronous events in LPM.
 	"pwr_event_irq" : Interrupt to controller for asynchronous events in LPM.
 	Used for SS-USB power events.
  - clocks: a list of phandles to the controller clocks. Use as per
@@ -36,6 +35,12 @@
 - interrupt-names : Optional interrupt resource entries are:
     "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
     "ss_phy_irq"  : Interrupt from super speed phy for wake up notification.
+    "hs_phy_irq" : Interrupt from HS PHY for asynchronous events in LPM.
+    "dp_hs_phy_irq" : Interrupt from HS PHY for asynchronous events in LPM
+    going through PDC. (use qcom,use-pdc-interrupts property)
+    "dm_hs_phy_irq" : Interrupt from HS PHY for asynchronous events in LPM
+    going through PDC. (use qcom,use-pdc-interrupts property)
+
  - clocks: a list of phandles to the controller clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
@@ -57,6 +62,8 @@
 - qcom,core-clk-rate: If present, indicates clock frequency to be set for USB master clock.
 - qcom,core-clk-rate-hs: If present, indicates min core clock frequency required to support
   hs speed.
+- qcom,use-pdc-interrupts: It present, it configures provided PDC IRQ with required
+  configuration for wakeup functionality.
 - extcon: phandles to external connector devices. First phandle should point to
 	  external connector, which provide "USB" cable events, the second
 	  should point to external connector device, which provide "USB-HOST"
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 0f0fc7d..9d2908d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -858,6 +858,11 @@
 
 	Default: Empty
 
+reserved_port_bind - BOOLEAN
+	If set, allows explicit bind requests to applications requesting
+	any port within the range of ip_local_reserved_ports.
+	Default: 1
+
 ip_nonlocal_bind - BOOLEAN
 	If set, allows processes to bind() to non-local IP addresses,
 	which can be quite useful - but may break some applications.
diff --git a/Makefile b/Makefile
index df4d437..9e428c5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 30
+SUBLEVEL = 32
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 9e46d6e..fa47df6a 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index fc0d3b0..40289a8 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -270,6 +270,8 @@
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_PWM=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_ANDROID=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 9d12771..d91f5f6 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -263,6 +263,8 @@
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_PWM=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index d060641..9edea10 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -24,6 +24,7 @@
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
+unsigned long arch_get_cpu_efficiency(int cpu);
 
 #ifdef CONFIG_CPU_FREQ
 #define arch_scale_freq_capacity cpufreq_scale_freq_capacity
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index fab57d3..877f461 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,7 +81,7 @@
 extern void init_default_cache_policy(unsigned long);
 extern void paging_init(const struct machine_desc *desc);
 extern void early_paging_init(const struct machine_desc *);
-extern void sanity_check_meminfo(void);
+extern void adjust_lowmem_bounds(void);
 extern enum reboot_mode reboot_mode;
 extern void setup_dma_zone(const struct machine_desc *desc);
 
@@ -1104,8 +1104,14 @@
 	setup_dma_zone(mdesc);
 	xen_early_init();
 	efi_init();
-	sanity_check_meminfo();
+	/*
+	 * Make sure the calculation for lowmem/highmem is set appropriately
+	 * before reserving/allocating any mmeory
+	 */
+	adjust_lowmem_bounds();
 	arm_memblock_init(mdesc);
+	/* Memory may have been removed so recalculate the bounds. */
+	adjust_lowmem_bounds();
 
 	early_ioremap_reset();
 
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index bd884da..2b6c530 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -196,6 +196,14 @@
 	return 0;
 }
 
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+	return per_cpu(cpu_efficiency, cpu);
+}
+EXPORT_SYMBOL(arch_get_cpu_efficiency);
+
 #ifdef CONFIG_OF
 struct cpu_efficiency {
 	const char *compatible;
@@ -272,6 +280,7 @@
 	for_each_possible_cpu(cpu) {
 		const u32 *rate;
 		int len;
+		u32 efficiency;
 
 		/* too early to use cpu->of_node */
 		cn = of_get_cpu_node(cpu, NULL);
@@ -280,12 +289,26 @@
 			continue;
 		}
 
-		for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
-			if (of_device_is_compatible(cn, cpu_eff->compatible))
-				break;
+		/*
+		 * The CPU efficiency value passed from the device tree
+		 * overrides the value defined in the table_efficiency[]
+		 */
+		if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
 
-		if (cpu_eff->compatible == NULL)
-			continue;
+			for (cpu_eff = table_efficiency;
+					cpu_eff->compatible; cpu_eff++)
+
+				if (of_device_is_compatible(cn,
+						cpu_eff->compatible))
+					break;
+
+			if (cpu_eff->compatible == NULL)
+				continue;
+
+			efficiency = cpu_eff->efficiency;
+		}
+
+		per_cpu(cpu_efficiency, cpu) = efficiency;
 
 		rate = of_get_property(cn, "clock-frequency", &len);
 		if (!rate || len != 4) {
@@ -294,7 +317,7 @@
 			continue;
 		}
 
-		capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+		capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
 
 		/* Save min capacity of the system */
 		if (capacity < min_capacity)
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index bf89c91..bd0ee7f 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -95,7 +95,6 @@
 	@  - Write permission implies XN: disabled
 	@  - Instruction cache: enabled
 	@  - Data/Unified cache: enabled
-	@  - Memory alignment checks: enabled
 	@  - MMU: enabled (this code must be run from an identity mapping)
 	mrc	p15, 4, r0, c1, c0, 0	@ HSCR
 	ldr	r2, =HSCTLR_MASK
@@ -103,8 +102,8 @@
 	mrc	p15, 0, r1, c1, c0, 0	@ SCTLR
 	ldr	r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
 	and	r1, r1, r2
- ARM(	ldr	r2, =(HSCTLR_M | HSCTLR_A)			)
- THUMB(	ldr	r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)		)
+ ARM(	ldr	r2, =(HSCTLR_M)					)
+ THUMB(	ldr	r2, =(HSCTLR_M | HSCTLR_TE)			)
 	orr	r1, r1, r2
 	orr	r0, r0, r1
 	mcr	p15, 4, r0, c1, c0, 0	@ HSCR
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 2fd5c13..332ce3b 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -872,6 +872,9 @@
 	pmd_t *pmd;
 
 	pud = stage2_get_pud(kvm, cache, addr);
+	if (!pud)
+		return NULL;
+
 	if (stage2_pud_none(*pud)) {
 		if (!cache)
 			return NULL;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4001dd1..5cbfd9f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1152,13 +1152,12 @@
 
 phys_addr_t arm_lowmem_limit __initdata = 0;
 
-void __init sanity_check_meminfo(void)
+void __init adjust_lowmem_bounds(void)
 {
 	phys_addr_t memblock_limit = 0;
-	int highmem = 0;
 	u64 vmalloc_limit;
 	struct memblock_region *reg;
-	bool should_use_highmem = false;
+	phys_addr_t lowmem_limit = 0;
 
 	/*
 	 * Let's use our own (unoptimized) equivalent of __pa() that is
@@ -1172,43 +1171,18 @@
 	for_each_memblock(memory, reg) {
 		phys_addr_t block_start = reg->base;
 		phys_addr_t block_end = reg->base + reg->size;
-		phys_addr_t size_limit = reg->size;
 
-		if (reg->base >= vmalloc_limit)
-			highmem = 1;
-		else
-			size_limit = vmalloc_limit - reg->base;
-
-
-		if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
-
-			if (highmem) {
-				pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
-					  &block_start, &block_end);
-				memblock_remove(reg->base, reg->size);
-				should_use_highmem = true;
-				continue;
-			}
-
-			if (reg->size > size_limit) {
-				phys_addr_t overlap_size = reg->size - size_limit;
-
-				pr_notice("Truncating RAM at %pa-%pa",
-					  &block_start, &block_end);
-				block_end = vmalloc_limit;
-				pr_cont(" to -%pa", &block_end);
-				memblock_remove(vmalloc_limit, overlap_size);
-				should_use_highmem = true;
-			}
-		}
-
-		if (!highmem) {
-			if (block_end > arm_lowmem_limit) {
-				if (reg->size > size_limit)
-					arm_lowmem_limit = vmalloc_limit;
-				else
-					arm_lowmem_limit = block_end;
-			}
+		if (reg->base < vmalloc_limit) {
+			if (block_end > lowmem_limit)
+				/*
+				 * Compare as u64 to ensure vmalloc_limit does
+				 * not get truncated. block_end should always
+				 * fit in phys_addr_t so there should be no
+				 * issue with assignment.
+				 */
+				lowmem_limit = min_t(u64,
+							 vmalloc_limit,
+							 block_end);
 
 			/*
 			 * Find the first non-pmd-aligned page, and point
@@ -1227,14 +1201,13 @@
 				if (!IS_ALIGNED(block_start, PMD_SIZE))
 					memblock_limit = block_start;
 				else if (!IS_ALIGNED(block_end, PMD_SIZE))
-					memblock_limit = arm_lowmem_limit;
+					memblock_limit = lowmem_limit;
 			}
 
 		}
 	}
 
-	if (should_use_highmem)
-		pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+	arm_lowmem_limit = lowmem_limit;
 
 	high_memory = __va(arm_lowmem_limit - 1) + 1;
 
@@ -1248,6 +1221,18 @@
 	if (!memblock_limit)
 		memblock_limit = arm_lowmem_limit;
 
+	if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
+		if (memblock_end_of_DRAM() > arm_lowmem_limit) {
+			phys_addr_t end = memblock_end_of_DRAM();
+
+			pr_notice("Ignoring RAM at %pa-%pa\n",
+				  &memblock_limit, &end);
+			pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+
+			memblock_remove(memblock_limit, end - memblock_limit);
+		}
+	}
+
 	memblock_set_current_limit(memblock_limit);
 }
 
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 2740967..13a25d6 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -85,7 +85,7 @@
 }
 
 /* MPU initialisation functions */
-void __init sanity_check_meminfo_mpu(void)
+void __init adjust_lowmem_bounds_mpu(void)
 {
 	phys_addr_t phys_offset = PHYS_OFFSET;
 	phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
@@ -274,7 +274,7 @@
 	}
 }
 #else
-static void sanity_check_meminfo_mpu(void) {}
+static void adjust_lowmem_bounds_mpu(void) {}
 static void __init mpu_setup(void) {}
 #endif /* CONFIG_ARM_MPU */
 
@@ -295,10 +295,10 @@
 #endif
 }
 
-void __init sanity_check_meminfo(void)
+void __init adjust_lowmem_bounds(void)
 {
 	phys_addr_t end;
-	sanity_check_meminfo_mpu();
+	adjust_lowmem_bounds_mpu();
 	end = memblock_end_of_DRAM();
 	high_memory = __va(end - 1) + 1;
 	memblock_set_current_limit(end);
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 7ad029a..64b250d 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -10,6 +10,7 @@
 	sdm845-v2-mtp.dtb \
 	sdm845-v2-cdp.dtb \
 	sdm845-qrd.dtb \
+	sdm845-v2-qrd.dtb \
 	sdm845-4k-panel-mtp.dtb \
 	sdm845-4k-panel-cdp.dtb \
 	sdm845-4k-panel-qrd.dtb
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
index aa52083..c2b054a 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -50,6 +50,7 @@
 			15 01 00 00 00 00 02 ff f0
 			15 01 00 00 00 00 02 92 01
 			15 01 00 00 00 00 02 ff 10
+			15 01 00 00 00 00 02 35 00 /* enable TE generation */
 			05 01 00 00 28 00 01 29];
 		qcom,mdss-dsi-off-command = [
 			05 01 00 00 10 00 01 28
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
new file mode 100644
index 0000000..8e5d854
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm670.dtsi
@@ -0,0 +1,316 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+&soc {
+	kgsl_smmu: arm,smmu-kgsl@5040000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x5040000 0x10000>;
+		#iommu-cells = <1>;
+		qcom,dynamic;
+		qcom,use-3-lvl-tables;
+		#global-interrupts = <2>;
+		qcom,regulator-names = "vdd";
+		vdd-supply = <&gpu_cx_gdsc>;
+		interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+		clock-names = "gcc_ddrss_gpu_axi_clk",
+				"gcc_gpu_memnoc_gfx_clk",
+				"gpu_cc_ahb_clk",
+				"gpu_cc_cx_gmu_clk";
+		clocks = <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+			<&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+			<&clock_gpucc GPU_CC_AHB_CLK>,
+			<&clock_gpucc GPU_CC_CX_GMU_CLK>;
+		attach-impl-defs =
+				<0x6000 0x2378>,
+				<0x6060 0x1055>,
+				<0x678c 0x8>,
+				<0x6794 0x28>,
+				<0x6800 0x6>,
+				<0x6900 0x3ff>,
+				<0x6924 0x204>,
+				<0x6928 0x11000>,
+				<0x6930 0x800>,
+				<0x6960 0xffffffff>,
+				<0x6b64 0x1a5551>,
+				<0x6b68 0x9a82a382>;
+	};
+
+	apps_smmu: apps-smmu@0x15000000 {
+		compatible = "qcom,qsmmu-v500";
+		reg = <0x15000000 0x80000>,
+			<0x150c2000 0x18>;
+		reg-names = "base", "tcu-base";
+		#iommu-cells = <2>;
+		qcom,skip-init;
+		qcom,use-3-lvl-tables;
+		#global-interrupts = <1>;
+		#size-cells = <1>;
+		#address-cells = <1>;
+		ranges;
+		interrupts =	<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,msm-bus,name = "apps_smmu";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,active-only;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_GNOC_SNOC>,
+			<MSM_BUS_SLAVE_IMEM_CFG>,
+			<0 0>,
+			<MSM_BUS_MASTER_GNOC_SNOC>,
+			<MSM_BUS_SLAVE_IMEM_CFG>,
+			<0 1000>;
+
+		anoc_1_tbu: anoc_1_tbu@0x150c5000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150c5000 0x1000>,
+				<0x150c2200 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x0 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>;
+			qcom,msm-bus,name = "apps_smmu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 0>,
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 1000>;
+		};
+
+		anoc_2_tbu: anoc_2_tbu@0x150c9000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150c9000 0x1000>,
+				<0x150c2208 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x400 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>;
+			qcom,msm-bus,name = "apps_smmu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 0>,
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 1000>;
+		};
+
+		mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x150cd000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150cd000 0x1000>,
+				<0x150c2210 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x800 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
+			qcom,msm-bus,name = "mnoc_hf_0_tbu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_MDP_PORT0>,
+				<MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+				<0 0>,
+				<MSM_BUS_MASTER_MDP_PORT0>,
+				<MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+				<0 1000>;
+		};
+
+		mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x150d1000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150d1000 0x1000>,
+				<0x150c2218 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0xc00 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
+			qcom,msm-bus,name = "mnoc_hf_1_tbu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_MDP_PORT0>,
+				<MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+				<0 0>,
+				<MSM_BUS_MASTER_MDP_PORT0>,
+				<MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>,
+				<0 1000>;
+		};
+
+		mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x150d5000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150d5000 0x1000>,
+				<0x150c2220 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1000 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+			qcom,msm-bus,name = "mnoc_sf_0_tbu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_CAMNOC_SF>,
+				<MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>,
+				<0 0>,
+				<MSM_BUS_MASTER_CAMNOC_SF>,
+				<MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>,
+				<0 1000>;
+		};
+
+		compute_dsp_tbu: compute_dsp_tbu@0x150d9000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150d9000 0x1000>,
+				<0x150c2228 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1400 0x400>;
+			/* No GDSC */
+			qcom,msm-bus,name = "apps_smmu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 0>,
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 1000>;
+		};
+
+		adsp_tbu: adsp_tbu@0x150dd000 {
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150dd000 0x1000>,
+				<0x150c2230 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,stream-id-range = <0x1800 0x400>;
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>;
+			qcom,msm-bus,name = "apps_smmu";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 0>,
+				<MSM_BUS_MASTER_GNOC_SNOC>,
+				<MSM_BUS_SLAVE_IMEM_CFG>,
+				<0 1000>;
+		};
+
+	};
+
+	kgsl_iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 0x7 isn't a valid sid, but should pass the sid sanity check.
+		 * We just need _something_ here to get this node recognized by
+		 * the SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&kgsl_smmu 0x7>;
+	};
+
+	apps_iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * This SID belongs to QUP1-GSI. We can't use a fake SID for
+		 * the apps_smmu device.
+		 */
+		iommus = <&apps_smmu 0x16 0x0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 442434d..1a2ca5b 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -330,10 +330,10 @@
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		/*
-		 * This SID belongs to QUP1-GSI. We can't use a fake SID for
+		 * This SID belongs to TSIF. We can't use a fake SID for
 		 * the apps_smmu device.
 		 */
-		iommus = <&apps_smmu 0x16 0>;
+		iommus = <&apps_smmu 0x20 0>;
 	};
 
 	apps_iommu_coherent_test_device {
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 655f447..bc0b118 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -377,14 +377,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36864>;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36864>;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -395,14 +396,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36865>;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36865>;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -413,14 +415,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36880>;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36880>;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -431,14 +434,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36881>;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_sec_tdm_tx_0: qcom,msm-dai-q6-tdm-sec-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36881>;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -449,14 +453,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36896>;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_tert_tdm_rx_0: qcom,msm-dai-q6-tdm-tert-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36896>;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -467,14 +472,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36897 >;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_tert_tdm_tx_0: qcom,msm-dai-q6-tdm-tert-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36897 >;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -485,14 +491,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36912>;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_quat_tdm_rx_0: qcom,msm-dai-q6-tdm-quat-rx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36912>;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
@@ -503,14 +510,15 @@
 		qcom,msm-cpudai-tdm-group-num-ports = <1>;
 		qcom,msm-cpudai-tdm-group-port-id = <36913 >;
 		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
 		dai_quat_tdm_tx_0: qcom,msm-dai-q6-tdm-quat-tx-0 {
 			compatible = "qcom,msm-dai-q6-tdm";
 			qcom,msm-cpudai-tdm-dev-id = <36913 >;
-			qcom,msm-cpudai-tdm-sync-mode = <1>;
-			qcom,msm-cpudai-tdm-sync-src = <1>;
-			qcom,msm-cpudai-tdm-data-out = <0>;
-			qcom,msm-cpudai-tdm-invert-sync = <1>;
-			qcom,msm-cpudai-tdm-data-delay = <1>;
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
new file mode 100644
index 0000000..ef8fe1b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -0,0 +1,631 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+&spmi_bus {
+	qcom,pm660@0 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x0 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm660_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+			qcom,fab-id-valid;
+		};
+
+		pm660_misc: qcom,misc@900 {
+			compatible = "qcom,qpnp-misc";
+			reg = <0x900 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			interrupts = <0x0 0x8 0x0 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x1 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x4 IRQ_TYPE_NONE>,
+				     <0x0 0x8 0x5 IRQ_TYPE_NONE>;
+			interrupt-names = "kpdpwr", "resin",
+					"resin-bark", "kpdpwr-resin-bark";
+			qcom,pon-dbc-delay = <15625>;
+			qcom,kpdpwr-sw-debounce;
+			qcom,system-reset;
+			qcom,store-hard-reset-reason;
+
+			qcom,pon_1 {
+				qcom,pon-type = <0>;
+				qcom,pull-up = <1>;
+				linux,code = <116>;
+			};
+
+			qcom,pon_2 {
+				qcom,pon-type = <1>;
+				qcom,pull-up = <1>;
+				linux,code = <114>;
+			};
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm660_tz";
+			qcom,channel-num = <6>;
+			qcom,temp_alarm-vadc = <&pm660_vadc>;
+		};
+
+		pm660_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm660-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+
+			gpio@c800 {
+				reg = <0xc800 0x100>;
+				qcom,pin-num = <9>;
+				status = "disabled";
+			};
+
+			gpio@c900 {
+				reg = <0xc900 0x100>;
+				qcom,pin-num = <10>;
+				status = "disabled";
+			};
+
+			gpio@ca00 {
+				reg = <0xca00 0x100>;
+				qcom,pin-num = <11>;
+				status = "disabled";
+			};
+
+			gpio@cb00 {
+				reg = <0xcb00 0x100>;
+				qcom,pin-num = <12>;
+				status = "disabled";
+			};
+
+			gpio@cc00 {
+				reg = <0xcc00 0x100>;
+				qcom,pin-num = <13>;
+				status = "disabled";
+			};
+		};
+
+		pm660_coincell: qcom,coincell@2800 {
+			compatible = "qcom,qpnp-coincell";
+			reg = <0x2800 0x100>;
+		};
+
+		pm660_rtc: qcom,pm660_rtc {
+			compatible = "qcom,qpnp-rtc";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,qpnp-rtc-write = <0>;
+			qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+			qcom,pm660_rtc_rw@6000 {
+				reg = <0x6000 0x100>;
+			};
+			qcom,pm660_rtc_alarm@6100 {
+				reg = <0x6100 0x100>;
+				interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
+			};
+		};
+
+		pm660_vadc: vadc@3100 {
+			compatible = "qcom,qpnp-vadc-hc";
+			reg = <0x3100 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+
+			chan@6 {
+				label = "die_temp";
+				reg = <6>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@0 {
+				label = "ref_gnd";
+				reg = <0>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@1 {
+				label = "ref_1250v";
+				reg = <1>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+
+			chan@83 {
+				label = "vph_pwr";
+				reg = <0x83>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@85 {
+				label = "vcoin";
+				reg = <0x85>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@4c {
+				label = "xo_therm";
+				reg = <0x4c>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@4d {
+				label = "msm_therm";
+				reg = <0x4d>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@51 {
+				label = "quiet_therm";
+				reg = <0x51>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+			};
+
+			chan@4e {
+				label = "emmc_therm";
+				reg = <0x4e>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@4f {
+				label = "pa_therm0";
+				reg = <0x4f>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,fast-avg-setup = <0>;
+				qcom,vadc-thermal-node;
+			};
+
+			chan@1d {
+				label = "drax_temp";
+				reg = <0x1d>;
+				qcom,decimation = <2>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <3>;
+				qcom,hw-settle-time = <0>;
+				qcom,fast-avg-setup = <0>;
+				qcom,cal-val = <0>;
+			};
+		};
+
+		pm660_charger: qcom,qpnp-smb2 {
+			compatible = "qcom,qpnp-smb2";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,pmic-revid = <&pm660_revid>;
+
+			io-channels = <&pm660_rradc 8>,
+				      <&pm660_rradc 10>,
+				      <&pm660_rradc 3>,
+				      <&pm660_rradc 4>;
+			io-channel-names = "charger_temp",
+					   "charger_temp_max",
+					   "usbin_i",
+					   "usbin_v";
+
+			qcom,wipower-max-uw = <5000000>;
+			dpdm-supply = <&qusb_phy0>;
+
+			qcom,thermal-mitigation
+					= <3000000 2500000 2000000 1500000
+						1000000 500000>;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =
+					<0x0 0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x10 0x4 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "chg-error",
+						  "chg-state-change",
+						  "step-chg-state-change",
+						  "step-chg-soc-update-fail",
+						  "step-chg-soc-update-request";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts = <0x0 0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "otg-fail",
+						  "otg-overcurrent",
+						  "otg-oc-dis-sw-sts",
+						  "testmode-change-detect";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =
+					<0x0 0x12 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "bat-temp",
+						  "bat-ocp",
+						  "bat-ov",
+						  "bat-low",
+						  "bat-therm-or-id-missing",
+						  "bat-terminal-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =
+					<0x0 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x13 0x7 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "usbin-collapse",
+						  "usbin-lt-3p6v",
+						  "usbin-uv",
+						  "usbin-ov",
+						  "usbin-plugin",
+						  "usbin-src-change",
+						  "usbin-icl-change",
+						  "type-c-change";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =
+					<0x0 0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x14 0x6 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "dcin-collapse",
+						  "dcin-lt-3p6v",
+						  "dcin-uv",
+						  "dcin-ov",
+						  "dcin-plugin",
+						  "div2-en-dg",
+						  "dcin-icl-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =
+					<0x0 0x16 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x0 0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x0 0x16 0x6 IRQ_TYPE_EDGE_FALLING>,
+					<0x0 0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "wdog-snarl",
+						  "wdog-bark",
+						  "aicl-fail",
+						  "aicl-done",
+						  "high-duty-cycle",
+						  "input-current-limiting",
+						  "temperature-change",
+						  "switcher-power-ok";
+			};
+		};
+
+		pm660_pdphy: qcom,usb-pdphy@1700 {
+			compatible = "qcom,qpnp-pdphy";
+			reg = <0x1700 0x100>;
+			vdd-pdphy-supply = <&pm660l_l7>;
+			vbus-supply = <&smb2_vbus>;
+			vconn-supply = <&smb2_vconn>;
+			interrupts = <0x0 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x0 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-names = "sig-tx",
+					  "sig-rx",
+					  "msg-tx",
+					  "msg-rx",
+					  "msg-tx-failed",
+					  "msg-tx-discarded",
+					  "msg-rx-discarded";
+
+			qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+						 <9000 3000>; /* 9V @ 3A */
+		};
+
+		pm660_adc_tm: vadc@3400 {
+			compatible = "qcom,qpnp-adc-tm-hc";
+			reg = <0x3400 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0x0 0x34 0x0 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "eoc-int-en-set";
+			qcom,adc-bit-resolution = <15>;
+			qcom,adc-vdd-reference = <1875>;
+			qcom,adc_tm-vadc = <&pm660_vadc>;
+			qcom,decimation = <0>;
+			qcom,fast-avg-setup = <0>;
+
+			chan@83 {
+				label = "vph_pwr";
+				reg = <0x83>;
+				qcom,pre-div-channel-scaling = <1>;
+				qcom,calibration-type = "absolute";
+				qcom,scale-function = <0>;
+				qcom,hw-settle-time = <0>;
+				qcom,btm-channel-number = <0x60>;
+			};
+
+			chan@4d {
+				label = "msm_therm";
+				reg = <0x4d>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,btm-channel-number = <0x68>;
+				qcom,thermal-node;
+			};
+
+			chan@51 {
+				label = "quiet_therm";
+				reg = <0x51>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <2>;
+				qcom,hw-settle-time = <2>;
+				qcom,btm-channel-number = <0x70>;
+				qcom,thermal-node;
+			};
+
+			chan@4c {
+				label = "xo_therm";
+				reg = <0x4c>;
+				qcom,pre-div-channel-scaling = <0>;
+				qcom,calibration-type = "ratiometric";
+				qcom,scale-function = <4>;
+				qcom,hw-settle-time = <2>;
+				qcom,btm-channel-number = <0x78>;
+				qcom,thermal-node;
+			};
+		};
+
+		pm660_rradc: rradc@4500 {
+			compatible = "qcom,rradc";
+			reg = <0x4500 0x100>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			#io-channel-cells = <1>;
+			qcom,pmic-revid = <&pm660_revid>;
+		};
+
+		pm660_fg: qpnp,fg {
+			compatible = "qcom,fg-gen3";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pm660_revid>;
+			io-channels = <&pm660_rradc 0>,
+				      <&pm660_rradc 7>;
+			io-channel-names = "rradc_batt_id",
+					   "rradc_die_temp";
+			qcom,rradc-base = <0x4500>;
+			qcom,fg-esr-timer-awake = <96 96>;
+			qcom,fg-esr-timer-asleep = <256 256>;
+			qcom,fg-esr-timer-charging = <0 96>;
+			qcom,cycle-counter-en;
+			status = "okay";
+
+			qcom,fg-batt-soc@4000 {
+				status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts = <0x0 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x40 0x2
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x0 0x40 0x3
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x0 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x40 0x5
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x0 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "soc-update",
+						  "soc-ready",
+						  "bsoc-delta",
+						  "msoc-delta",
+						  "msoc-low",
+						  "msoc-empty",
+						  "msoc-high",
+						  "msoc-full";
+			};
+
+			qcom,fg-batt-info@4100 {
+				status = "okay";
+				reg = <0x4100 0x100>;
+				interrupts = <0x0 0x41 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x41 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x41 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x41 0x3 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x41 0x6 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "vbatt-pred-delta",
+						  "vbatt-low",
+						  "esr-delta",
+						  "batt-missing",
+						  "batt-temp-delta";
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts = <0x0 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x0 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "ima-rdy",
+						  "mem-xcp",
+						  "dma-grant";
+			};
+		};
+
+		bcl@4200 {
+			compatible = "qcom,msm-bcl-lmh";
+			reg = <0x4200 0xff>,
+				<0x4300 0xff>;
+			reg-names = "fg_user_adc",
+					"fg_lmh";
+			interrupts = <0x0 0x42 0x0 IRQ_TYPE_NONE>,
+					<0x0 0x42 0x2 IRQ_TYPE_NONE>;
+			interrupt-names = "bcl-high-ibat-int",
+					"bcl-low-vbat-int";
+			qcom,vbat-polling-delay-ms = <100>;
+			qcom,ibat-polling-delay-ms = <100>;
+		};
+	};
+
+	qcom,pm660@1 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x1 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pm660a.dtsi b/arch/arm64/boot/dts/qcom/pm660a.dtsi
new file mode 100644
index 0000000..bfe1b5a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm660a.dtsi
@@ -0,0 +1,29 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Disable WLED */
+&pm660l_wled {
+	status = "disabled";
+};
+
+/* disable LCDB */
+&pm660l_lcdb {
+	status = "disabled";
+};
+
+&pm660a_oledb {
+	status = "okay";
+};
+
+&pm660a_labibb {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/pm660l.dtsi b/arch/arm64/boot/dts/qcom/pm660l.dtsi
new file mode 100644
index 0000000..0f18ba5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm660l.dtsi
@@ -0,0 +1,470 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+#include <dt-bindings/msm/power-on.h>
+
+&spmi_bus {
+	qcom,pm660l@2 {
+		compatible = "qcom,spmi-pmic";
+		reg = <0x2 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm660l_revid: qcom,revid@100 {
+			compatible = "qcom,qpnp-revid";
+			reg = <0x100 0x100>;
+		};
+
+		pm660l_pbs: qcom,pbs@7300 {
+			compatible = "qcom,qpnp-pbs";
+			reg = <0x7300 0x100>;
+		};
+
+		qcom,power-on@800 {
+			compatible = "qcom,qpnp-power-on";
+			reg = <0x800 0x100>;
+			qcom,secondary-pon-reset;
+			qcom,hard-reset-poweroff-type =
+				<PON_POWER_OFF_SHUTDOWN>;
+		};
+
+		qcom,temp-alarm@2400 {
+			compatible = "qcom,qpnp-temp-alarm";
+			reg = <0x2400 0x100>;
+			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+			label = "pm660l_tz";
+		};
+
+		pm660l_gpios: gpios {
+			compatible = "qcom,qpnp-pin";
+			gpio-controller;
+			#gpio-cells = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			label = "pm660l-gpio";
+
+			gpio@c000 {
+				reg = <0xc000 0x100>;
+				qcom,pin-num = <1>;
+				status = "disabled";
+			};
+
+			gpio@c100 {
+				reg = <0xc100 0x100>;
+				qcom,pin-num = <2>;
+				status = "disabled";
+			};
+
+			gpio@c200 {
+				reg = <0xc200 0x100>;
+				qcom,pin-num = <3>;
+				status = "disabled";
+			};
+
+			gpio@c300 {
+				reg = <0xc300 0x100>;
+				qcom,pin-num = <4>;
+				status = "disabled";
+			};
+
+			gpio@c400 {
+				reg = <0xc400 0x100>;
+				qcom,pin-num = <5>;
+				status = "disabled";
+			};
+
+			gpio@c500 {
+				reg = <0xc500 0x100>;
+				qcom,pin-num = <6>;
+				status = "disabled";
+			};
+
+			gpio@c600 {
+				reg = <0xc600 0x100>;
+				qcom,pin-num = <7>;
+				status = "disabled";
+			};
+
+			gpio@c700 {
+				reg = <0xc700 0x100>;
+				qcom,pin-num = <8>;
+				status = "disabled";
+			};
+
+			gpio@c800 {
+				reg = <0xc800 0x100>;
+				qcom,pin-num = <9>;
+				status = "disabled";
+			};
+
+			gpio@c900 {
+				reg = <0xc900 0x100>;
+				qcom,pin-num = <10>;
+				status = "disabled";
+			};
+
+			gpio@ca00 {
+				reg = <0xca00 0x100>;
+				qcom,pin-num = <11>;
+				status = "disabled";
+			};
+
+			gpio@cb00 {
+				reg = <0xcb00 0x100>;
+				qcom,pin-num = <12>;
+				status = "disabled";
+			};
+
+		};
+	};
+
+	pm660l_3: qcom,pm660l@3 {
+		compatible ="qcom,spmi-pmic";
+		reg = <0x3 SPMI_USID>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		pm660l_pwm_1: pwm@b100 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb100 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					"qpnp-lpg-lut-base";
+			qcom,channel-id = <1>;
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <0>;
+			#pwm-cells = <2>;
+		};
+
+		pm660l_pwm_2: pwm@b200 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb200 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,channel-id = <2>;
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <1>;
+			#pwm-cells = <2>;
+		};
+
+		pm660l_pwm_3: pwm@b300 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb300 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,channel-id = <3>;
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <2>;
+			#pwm-cells = <2>;
+			qcom,period = <6000000>;
+
+			qcom,lpg {
+				label = "lpg";
+				cell-index = <0>;
+				qcom,duty-percents =
+					<0x01 0x0a 0x14 0x1e 0x28 0x32 0x3c
+					0x46 0x50 0x5a 0x64
+					0x64 0x5a 0x50 0x46 0x3c 0x32 0x28 0x1e
+					0x14 0x0a 0x01>;
+			};
+		};
+
+		pm660l_pwm_4: pwm@b400 {
+			compatible = "qcom,qpnp-pwm";
+			reg = <0xb400 0x100>,
+			      <0xb042 0x7e>;
+			reg-names = "qpnp-lpg-channel-base",
+					 "qpnp-lpg-lut-base";
+			qcom,channel-id = <4>;
+			qcom,lpg-lut-size = <0x7e>;
+			qcom,supported-sizes = <6>, <9>;
+			qcom,ramp-index = <3>;
+			#pwm-cells = <2>;
+			status = "disabled";
+		};
+
+		qcom,leds@d000 {
+			compatible = "qcom,leds-qpnp";
+			reg = <0xd000 0x100>;
+			label = "rgb";
+
+			red_led: qcom,rgb_0 {
+				label = "rgb";
+				qcom,id = <3>;
+				qcom,mode = "pwm";
+				pwms = <&pm660l_pwm_3 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "red";
+				qcom,start-idx = <0>;
+				qcom,idx-len = <22>;
+				qcom,duty-pcts =
+					[01 0a 14 1e 28 32 3c 46 50 5a 64
+					64 5a 50 46 3c 32 28 1e 14 0a 01];
+				qcom,use-blink;
+			};
+
+			green_led: qcom,rgb_1 {
+				label = "rgb";
+				qcom,id = <4>;
+				qcom,mode = "pwm";
+				pwms = <&pm660l_pwm_2 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "green";
+			};
+
+			blue_led: qcom,rgb_2 {
+				label = "rgb";
+				qcom,id = <5>;
+				qcom,mode = "pwm";
+				pwms = <&pm660l_pwm_1 0 0>;
+				qcom,pwm-us = <1000>;
+				qcom,max-current = <12>;
+				qcom,default-state = "off";
+				linux,name = "blue";
+			};
+		};
+
+		pm660l_wled: qcom,leds@d800 {
+			compatible = "qcom,qpnp-wled";
+			reg = <0xd800 0x100>,
+				<0xd900 0x100>;
+			reg-names = "qpnp-wled-ctrl-base",
+					"qpnp-wled-sink-base";
+			interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "ovp-irq", "sc-irq";
+			linux,name = "wled";
+			linux,default-trigger = "bkl-trigger";
+			qcom,fdbk-output = "auto";
+			qcom,vref-uv = <127500>;
+			qcom,switch-freq-khz = <800>;
+			qcom,ovp-mv = <29600>;
+			qcom,ilim-ma = <970>;
+			qcom,boost-duty-ns = <26>;
+			qcom,mod-freq-khz = <9600>;
+			qcom,dim-mode = "hybrid";
+			qcom,hyb-thres = <625>;
+			qcom,sync-dly-us = <800>;
+			qcom,fs-curr-ua = <25000>;
+			qcom,cons-sync-write-delay-us = <1000>;
+			qcom,led-strings-list = [00 01 02];
+			qcom,en-ext-pfet-sc-pro;
+			qcom,loop-auto-gm-en;
+			qcom,pmic-revid = <&pm660l_revid>;
+			status = "ok";
+		};
+
+		flash_led: qcom,leds@d300 {
+			compatible = "qcom,qpnp-flash-led-v2";
+			reg = <0xd300 0x100>;
+			label = "flash";
+			interrupts = <0x3 0xd3 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd3 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x3 0xd3 0x4 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "led-fault-irq",
+					"all-ramp-down-done-irq",
+					"all-ramp-up-done-irq";
+			qcom,hdrm-auto-mode;
+			qcom,short-circuit-det;
+			qcom,open-circuit-det;
+			qcom,vph-droop-det;
+			qcom,thermal-derate-en;
+			qcom,thermal-derate-current = <200 500 1000>;
+			qcom,isc-delay = <192>;
+			qcom,pmic-revid = <&pm660l_revid>;
+
+			pm660l_flash0: qcom,flash_0 {
+				label = "flash";
+				qcom,led-name = "led:flash_0";
+				qcom,max-current = <1500>;
+				qcom,default-led-trigger = "flash0_trigger";
+				qcom,id = <0>;
+				qcom,current-ma = <1000>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pm660l_flash1: qcom,flash_1 {
+				label = "flash";
+				qcom,led-name = "led:flash_1";
+				qcom,max-current = <1500>;
+				qcom,default-led-trigger = "flash1_trigger";
+				qcom,id = <1>;
+				qcom,current-ma = <1000>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pm660l_flash2: qcom,flash_2 {
+				label = "flash";
+				qcom,led-name = "led:flash_2";
+				qcom,max-current = <750>;
+				qcom,default-led-trigger = "flash2_trigger";
+				qcom,id = <2>;
+				qcom,current-ma = <500>;
+				qcom,duration-ms = <1280>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pm660l_torch0: qcom,torch_0 {
+				label = "torch";
+				qcom,led-name = "led:torch_0";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch0_trigger";
+				qcom,id = <0>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pm660l_torch1: qcom,torch_1 {
+				label = "torch";
+				qcom,led-name = "led:torch_1";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch1_trigger";
+				qcom,id = <1>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pm660l_torch2: qcom,torch_2 {
+				label = "torch";
+				qcom,led-name = "led:torch_2";
+				qcom,max-current = <500>;
+				qcom,default-led-trigger = "torch2_trigger";
+				qcom,id = <2>;
+				qcom,current-ma = <300>;
+				qcom,ires-ua = <12500>;
+				qcom,hdrm-voltage-mv = <325>;
+				qcom,hdrm-vol-hi-lo-win-mv = <100>;
+			};
+
+			pm660l_switch0: qcom,led_switch_0 {
+				label = "switch";
+				qcom,led-name = "led:switch_0";
+				qcom,led-mask = <3>;
+				qcom,default-led-trigger = "switch0_trigger";
+			};
+
+			pm660l_switch1: qcom,led_switch_1 {
+				label = "switch";
+				qcom,led-name = "led:switch_1";
+				qcom,led-mask = <4>;
+				qcom,default-led-trigger = "switch1_trigger";
+			};
+		};
+
+		pm660l_lcdb: qpnp-lcdb@ec00 {
+			compatible = "qcom,qpnp-lcdb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0xec00 0x100>;
+			interrupts = <0x3 0xec 0x1 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "sc-irq";
+
+			qcom,pmic-revid = <&pm660l_revid>;
+
+			lcdb_ldo_vreg: ldo {
+				label = "ldo";
+				regulator-name = "lcdb_ldo";
+				regulator-min-microvolt = <4000000>;
+				regulator-max-microvolt = <6000000>;
+			};
+
+			lcdb_ncp_vreg: ncp {
+				label = "ncp";
+				regulator-name = "lcdb_ncp";
+				regulator-min-microvolt = <4000000>;
+				regulator-max-microvolt = <6000000>;
+			};
+		};
+
+		pm660a_oledb: qpnp-oledb@e000 {
+		       compatible = "qcom,qpnp-oledb-regulator";
+		       #address-cells = <1>;
+		       #size-cells = <1>;
+		       qcom,pmic-revid = <&pm660l_revid>;
+		       reg = <0xe000 0x100>;
+		       qcom,pbs-client = <&pm660l_pbs>;
+
+		       label = "oledb";
+		       regulator-name = "regulator-oledb";
+		       regulator-min-microvolt = <5000000>;
+		       regulator-max-microvolt = <8100000>;
+
+		       qcom,swire-control;
+		       qcom,ext-pin-control;
+		       status = "disabled";
+		};
+
+		pm660a_labibb: qpnp-labibb-regulator {
+			compatible = "qcom,qpnp-labibb-regulator";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pm660l_revid>;
+			qcom,swire-control;
+			status = "disabled";
+
+			ibb_regulator: qcom,ibb@dc00 {
+				reg = <0xdc00 0x100>;
+				reg-names = "ibb_reg";
+				regulator-name = "ibb_reg";
+
+				regulator-min-microvolt = <4000000>;
+				regulator-max-microvolt = <6300000>;
+
+				qcom,qpnp-ibb-min-voltage = <1400000>;
+				qcom,qpnp-ibb-step-size = <100000>;
+				qcom,qpnp-ibb-slew-rate = <2000000>;
+				qcom,qpnp-ibb-init-voltage = <4000000>;
+				qcom,qpnp-ibb-init-amoled-voltage = <4000000>;
+			};
+
+			lab_regulator: qcom,lab@de00 {
+				reg = <0xde00 0x100>;
+				reg-names = "lab";
+				regulator-name = "lab_reg";
+
+				regulator-min-microvolt = <4600000>;
+				regulator-max-microvolt = <6100000>;
+
+				qcom,qpnp-lab-min-voltage = <4600000>;
+				qcom,qpnp-lab-step-size = <100000>;
+				qcom,qpnp-lab-slew-rate = <5000>;
+				qcom,qpnp-lab-init-voltage = <4600000>;
+				qcom,qpnp-lab-init-amoled-voltage = <4600000>;
+
+				qcom,notify-lab-vreg-ok-sts;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 660dac5..71eee1f 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -26,16 +26,22 @@
 			qcom,fab-id-valid;
 		};
 
+		pmi8998_misc: qcom,misc@900 {
+			compatible = "qcom,qpnp-misc";
+			reg = <0x900 0x100>;
+		};
+
 		qcom,power-on@800 {
 			compatible = "qcom,qpnp-power-on";
 			reg = <0x800 0x100>;
 		};
 
 		pmi8998_tz: qcom,temp-alarm@2400 {
-			compatible = "qcom,qpnp-temp-alarm";
+			compatible = "qcom,spmi-temp-alarm";
 			reg = <0x2400 0x100>;
 			interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
-			label = "pmi8998_tz";
+			io-channels = <&pmi8998_rradc 7>;
+			io-channel-names = "thermal";
 			#thermal-sensor-cells = <0>;
 		};
 
@@ -671,6 +677,28 @@
 				qcom,default-led-trigger = "switch1_trigger";
 			};
 		};
+
+		pmi8998_haptics: qcom,haptics@c000 {
+			compatible = "qcom,qpnp-haptics";
+			reg = <0xc000 0x100>;
+			interrupts = <0x3 0xc0 0x0 IRQ_TYPE_EDGE_BOTH>,
+				     <0x3 0xc0 0x1 IRQ_TYPE_EDGE_BOTH>;
+			interrupt-names = "hap-sc-irq", "hap-play-irq";
+			qcom,pmic-revid = <&pmi8998_revid>;
+			qcom,pmic-misc = <&pmi8998_misc>;
+			qcom,misc-clk-trim-error-reg = <0xf3>;
+			qcom,actuator-type = <0>;
+			qcom,play-mode = "direct";
+			qcom,vmax-mv = <3200>;
+			qcom,ilim-ma = <800>;
+			qcom,sc-dbc-cycles = <8>;
+			qcom,wave-play-rate-us = <6667>;
+			qcom,en-brake;
+			qcom,lra-high-z = "opt1";
+			qcom,lra-auto-res-mode = "qwd";
+			qcom,lra-res-cal-period = <4>;
+			status = "disabled";
+		};
 	};
 };
 
@@ -720,19 +748,27 @@
 		cooling-maps {
 			vbat_cpu4 {
 				trip = <&low_vbat>;
-				cooling-device = <&CPU4 22 22>;
+				cooling-device =
+					<&CPU4 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 			vbat_cpu5 {
 				trip = <&low_vbat>;
-				cooling-device = <&CPU5 22 22>;
+				cooling-device =
+					<&CPU5 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 			vbat_map6 {
 				trip = <&low_vbat>;
-				cooling-device = <&CPU6 22 22>;
+				cooling-device =
+					<&CPU6 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 			vbat_map7 {
 				trip = <&low_vbat>;
-				cooling-device = <&CPU7 22 22>;
+				cooling-device =
+					<&CPU7 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 		};
 	};
@@ -783,19 +819,27 @@
 		cooling-maps {
 			soc_cpu4 {
 				trip = <&low_soc>;
-				cooling-device = <&CPU4 22 22>;
+				cooling-device =
+					<&CPU4 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 			soc_cpu5 {
 				trip = <&low_soc>;
-				cooling-device = <&CPU5 22 22>;
+				cooling-device =
+					<&CPU5 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 			soc_map6 {
 				trip = <&low_soc>;
-				cooling-device = <&CPU6 22 22>;
+				cooling-device =
+					<&CPU6 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 			soc_map7 {
 				trip = <&low_soc>;
-				cooling-device = <&CPU7 22 22>;
+				cooling-device =
+					<&CPU7 THERMAL_MAX_LIMIT
+						THERMAL_MAX_LIMIT>;
 			};
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
new file mode 100644
index 0000000..61ef7ff
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@22 { /* ADSP HEAP */
+			reg = <22>;
+			memory-region = <&adsp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@13 { /* SPSS HEAP */
+			reg = <13>;
+			memory-region = <&sp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+			reg = <10>;
+			memory-region = <&secure_display_memory>;
+			qcom,ion-heap-type = "HYP_CMA";
+		};
+
+		qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
new file mode 100644
index 0000000..a1dc261
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -0,0 +1,363 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR  PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/* Stub regulators */
+
+/ {
+	pm660_s4: regulator-pm660-s4 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_s4";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <2040000>;
+		regulator-max-microvolt = <2040000>;
+	};
+
+	/* pm660 S5 - VDD_MODEM supply */
+	pm660_s5_level: regulator-pm660-s5 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_s5_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660_s6: regulator-pm660-s6 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_s6";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+	};
+
+	/* pm660l S1 - VDD_MX supply */
+	pm660l_s1_level: regulator-pm660l-s1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_s1_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_s1_floor_level: regulator-pm660l-s1-floor-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_s1_floor_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_s1_level_ao: regulator-pm660l-s1-level-ao {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_s1_level_ao";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	/* pm660l S2 - VDD_GFX supply */
+	pm660l_s2_level: regulator-pm660l-s2 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_s2_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	/* pm660l S3 + S4 - VDD_CX supply */
+	pm660l_s3_level: regulator-pm660l-s3-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_s3_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_s3_floor_level: regulator-pm660l-s3-floor-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_s3_floor_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_s3_level_ao: regulator-pm660l-s3-level-ao {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_s3_level_ao";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660_l1: regulator-pm660-l1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l1";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1250000>;
+	};
+
+	pm660_l2: regulator-pm660-l2 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l2";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1000000>;
+		regulator-max-microvolt = <1000000>;
+	};
+
+	pm660_l3: regulator-pm660-l3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l3";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1000000>;
+		regulator-max-microvolt = <1000000>;
+	};
+
+	pm660_l5: regulator-pm660-l5 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l5";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <800000>;
+	};
+
+	pm660_l6: regulator-pm660-l6 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l6";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1304000>;
+		regulator-max-microvolt = <1304000>;
+	};
+
+	pm660_l7: regulator-pm660-l7 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l7";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+	};
+
+	pm660_l8: regulator-pm660-l8 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l8";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l9: regulator-pm660-l9 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l9";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l10: regulator-pm660-l10 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l10";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l11: regulator-pm660-l11 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l11";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l12: regulator-pm660-l12 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l12";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l13: regulator-pm660-l13 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l13";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l14: regulator-pm660-l14 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l14";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pm660_l15: regulator-pm660-l15 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l15";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <2950000>;
+	};
+
+	pm660_l16: regulator-pm660-l16 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l16";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2700000>;
+		regulator-max-microvolt = <2700000>;
+	};
+
+	pm660_l17: regulator-pm660-l17 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l17";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <2950000>;
+	};
+
+	pm660_l19: regulator-pm660-l19 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660_l19";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3312000>;
+		regulator-max-microvolt = <3312000>;
+	};
+
+	pm660l_l1: regulator-pm660l-l1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l1";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <880000>;
+		regulator-max-microvolt = <900000>;
+	};
+
+	pm660l_l2: regulator-pm660l-l2 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l2";
+		qcom,hpm-min-load = <5000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pm660l_l3: regulator-pm660l-l3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l3";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2850000>;
+		regulator-max-microvolt = <3008000>;
+	};
+
+	pm660l_l4: regulator-pm660l-l4 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l4";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2960000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pm660l_l5: regulator-pm660l-l5 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l5";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2960000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pm660l_l6: regulator-pm660l-l6 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l6";
+		qcom,hpm-min-load = <5000>;
+		regulator-min-microvolt = <3008000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	pm660l_l7: regulator-pm660l-l7 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l7";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3088000>;
+		regulator-max-microvolt = <3100000>;
+	};
+
+	pm660l_l8: regulator-pm660l-l8 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l8";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3312000>;
+	};
+
+	/* pm660l L9 = VDD_LPI_CX supply */
+	pm660l_l9_level: regulator-pm660l-l9-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l9_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_l9_floor_level: regulator-pm660l-l9-floor-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l9_floor_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	/* pm660l L10 = VDD_LPI_MX supply */
+	pm660l_l10_level: regulator-pm660l-l10-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l10_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_l10_floor_level: regulator-pm660l-l10-floor-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_l10_floor_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm660l_bob: regulator-pm660l-bob {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm660l_bob";
+		regulator-min-microvolt = <3312000>;
+		regulator-max-microvolt = <3312000>;
+	};
+
+	apc0_pwrcl_vreg: regulator-pwrcl {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "apc0_pwrcl_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <7>;
+	};
+
+	apc0_l3_vreg: regulator-l3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "apc0_l3_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <7>;
+	};
+
+	apc1_perfcl_vreg: regulator-perfcl {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "apc1_perfcl_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <7>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 2cbb990..927e0b2 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -12,6 +12,14 @@
 
 #include "skeleton64.dtsi"
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+#include <dt-bindings/clock/qcom,videocc-sdm845.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670";
@@ -279,9 +287,113 @@
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
+
+		removed_regions: removed_regions@85700000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x85700000 0 0x3800000>;
+		};
+
+		pil_camera_mem: camera_region@8ab00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8ab00000 0 0x500000>;
+		};
+
+		pil_modem_mem: modem_region@8b000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8b000000 0 0x7e00000>;
+		};
+
+		pil_video_mem: pil_video_region@92e00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x92e00000 0 0x500000>;
+		};
+
+		pil_cdsp_mem: cdsp_regions@93300000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93300000 0 0x600000>;
+		};
+
+		pil_mba_mem: pil_mba_region@0x93900000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93900000 0 0x200000>;
+		};
+
+		pil_adsp_mem: pil_adsp_region@93b00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93b00000 0 0x1e00000>;
+		};
+
+		pil_ipa_fw_mem: pil_ipa_fw_region@95900000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95900000 0 0x10000>;
+		};
+
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95910000 0 0x5000>;
+		};
+
+		pil_gpu_mem: pil_gpu_region@95915000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95915000 0 0x1000>;
+		};
+
+		adsp_mem: adsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0xc00000>;
+		};
+
+		qseecom_mem: qseecom_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1400000>;
+		};
+
+		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+
+		secure_display_memory: secure_display_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x5c00000>;
+		};
+
+		/* global autoconfigured region for contiguous allocations */
+		linux,cma {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x2000000>;
+			linux,cma-default;
+		};
 	};
 };
 
+#include "sdm670-ion.dtsi"
+
 &soc {
 	#address-cells = <1>;
 	#size-cells = <1>;
@@ -308,6 +420,11 @@
 		clock-frequency = <19200000>;
 	};
 
+	qcom,sps {
+		compatible = "qcom,msm_sps_4k";
+		qcom,pipe-attr-ee;
+	};
+
 	timer@0x17c90000{
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -374,6 +491,54 @@
 		reg-names = "pshold-base", "tcsr-boot-misc-detect";
 	};
 
+	clock_rpmh: qcom,rpmhclk {
+		compatible = "qcom,dummycc";
+		clock-output-names = "rpmh_clocks";
+		#clock-cells = <1>;
+	};
+
+	clock_gcc: qcom,gcc@100000 {
+		compatible = "qcom,dummycc";
+		clock-output-names = "gcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_videocc: qcom,videocc@ab00000 {
+		compatible = "qcom,dummycc";
+		clock-output-names = "videocc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,dummycc";
+		clock-output-names = "camcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_dispcc: qcom,dispcc@af00000 {
+		compatible = "qcom,dummycc";
+		clock-output-names = "dispcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gpucc: qcom,gpucc@5090000 {
+		compatible = "qcom,dummycc";
+		clock-output-names = "gpucc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gfx: qcom,gfxcc@5090000  {
+		compatible = "qcom,dummycc";
+		clock-output-names = "gfxcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
 	clock_cpucc: qcom,cpucc {
 		compatible = "qcom,dummycc";
 		clock-output-names = "cpucc_clocks";
@@ -491,6 +656,14 @@
 			qcom,dump-node = <&L1_D_700>;
 			qcom,dump-id = <0x87>;
 		};
+		qcom,llcc1_d_cache {
+			qcom,dump-node = <&LLCC_1>;
+			qcom,dump-id = <0x140>;
+		};
+		qcom,llcc2_d_cache {
+			qcom,dump-node = <&LLCC_2>;
+			qcom,dump-id = <0x141>;
+		};
 	};
 
 	kryo3xx-erp {
@@ -535,6 +708,39 @@
 		interrupts = <0 17 0>;
 	};
 
+	qcom,llcc@1100000 {
+		compatible = "qcom,llcc-core", "syscon", "simple-mfd";
+		reg = <0x1100000 0x250000>;
+		reg-names = "llcc_base";
+		qcom,llcc-banks-off = <0x0 0x80000 >;
+		qcom,llcc-broadcast-off = <0x200000>;
+
+		llcc: qcom,sdm670-llcc {
+			compatible = "qcom,sdm670-llcc";
+			#cache-cells = <1>;
+			max-slices = <32>;
+			qcom,dump-size = <0x80000>;
+		};
+
+		qcom,llcc-erp {
+			compatible = "qcom,llcc-erp";
+			interrupt-names = "ecc_irq";
+			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		qcom,llcc-amon {
+			compatible = "qcom,llcc-amon";
+		};
+
+		LLCC_1: llcc_1_dcache {
+			qcom,dump-size = <0xd8000>;
+		};
+
+		LLCC_2: llcc_2_dcache {
+			qcom,dump-size = <0xd8000>;
+		};
+	};
+
 	dcc: dcc_v2@10a2000 {
 		compatible = "qcom,dcc_v2";
 		reg = <0x10a2000 0x1000>,
@@ -545,3 +751,80 @@
 };
 
 #include "sdm670-pinctrl.dtsi"
+#include "msm-arm-smmu-sdm670.dtsi"
+#include "msm-gdsc-sdm845.dtsi"
+
+&usb30_prim_gdsc {
+	status = "ok";
+};
+
+&ufs_phy_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_tbu1_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_tbu2_gdsc {
+	status = "ok";
+};
+
+&bps_gdsc {
+	status = "ok";
+};
+
+&ife_0_gdsc {
+	status = "ok";
+};
+
+&ife_1_gdsc {
+	status = "ok";
+};
+
+&ipe_0_gdsc {
+	status = "ok";
+};
+
+&ipe_1_gdsc {
+	status = "ok";
+};
+
+&titan_top_gdsc {
+	status = "ok";
+};
+
+&mdss_core_gdsc {
+	status = "ok";
+};
+
+&gpu_cx_gdsc {
+	status = "ok";
+};
+
+&gpu_gx_gdsc {
+	clock-names = "core_root_clk";
+	clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
+	qcom,force-enable-root-clk;
+	status = "ok";
+};
+
+&vcodec0_gdsc {
+	qcom,support-hw-trigger;
+	status = "ok";
+};
+
+&vcodec1_gdsc {
+	qcom,support-hw-trigger;
+	status = "ok";
+};
+
+&venus_gdsc {
+	status = "ok";
+};
+
+#include "sdm670-regulator.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index e9b71b9..4b7a680 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -22,7 +22,7 @@
 	qcom,board-id = <1 1>;
 };
 
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 73df071..67c3bcd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -22,12 +22,12 @@
 	qcom,board-id = <8 1>;
 };
 
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb>;
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
 };
 
 &dsi_sharp_4k_dsc_video {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index 709c89d..ad046e9 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -56,6 +56,10 @@
 			"hifi amp", "LINEOUT2",
 			"AMIC2", "MIC BIAS2",
 			"MIC BIAS2", "Headset Mic",
+			"AMIC3", "MIC BIAS2",
+			"MIC BIAS2", "ANCRight Headset Mic",
+			"AMIC4", "MIC BIAS2",
+			"MIC BIAS2", "ANCLeft Headset Mic",
 			"AMIC5", "MIC BIAS3",
 			"MIC BIAS3", "Handset Mic",
 			"DMIC0", "MIC BIAS1",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index b1c91bf..e26f888 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -25,12 +25,14 @@
 			<0x1380000 0x40000>,
 			<0x1740000 0x40000>,
 			<0x1620000 0x40000>,
+			<0x1620000 0x40000>,
 			<0x1620000 0x40000>;
 
 		reg-names = "aggre1_noc-base", "aggre2_noc-base",
 			"config_noc-base", "dc_noc-base",
 			"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
-			"mmss_noc-base", "system_noc-base", "ipa_virt-base";
+			"mmss_noc-base", "system_noc-base", "ipa_virt-base",
+			"camnoc_virt-base";
 
 		mbox-names = "apps_rsc", "disp_rsc";
 		mboxes = <&apps_rsc 0 &disp_rsc 0>;
@@ -368,6 +370,15 @@
 			clocks = <>;
 		};
 
+		fab_camnoc_virt: fab-camnoc_virt {
+			cell-id = <MSM_BUS_FAB_CAMNOC_VIRT>;
+			label = "fab-camnoc_virt";
+			qcom,fab-dev;
+			qcom,base-name = "camnoc_virt-base";
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
 		fab_config_noc: fab-config_noc {
 			cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
 			label = "fab-config_noc";
@@ -654,6 +665,33 @@
 			qcom,bus-dev = <&fab_aggre2_noc>;
 		};
 
+		mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP>;
+			label = "mas-qxm-camnoc-hf0-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
+		mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP>;
+			label = "mas-qxm-camnoc-hf1-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
+		mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP>;
+			label = "mas-qxm-camnoc-sf-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
 		mas_qhm_spdm: mas-qhm-spdm {
 			cell-id = <MSM_BUS_MASTER_SPDM>;
 			label = "mas-qhm-spdm";
@@ -900,12 +938,23 @@
 			qcom,bus-dev = <&fab_mmss_noc>;
 		};
 
-		mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
-			cell-id = <MSM_BUS_MASTER_CAMNOC_HF>;
-			label = "mas-qxm-camnoc-hf";
+		mas_qxm_camnoc_hf0: mas-qxm-camnoc-hf0 {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF0>;
+			label = "mas-qxm-camnoc-hf0";
 			qcom,buswidth = <32>;
-			qcom,agg-ports = <2>;
-			qcom,qport = <1 2>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <1>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF1>;
+			label = "mas-qxm-camnoc-hf1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
 			qcom,connections = <&slv_qns_mem_noc_hf>;
 			qcom,bus-dev = <&fab_mmss_noc>;
 			qcom,bcms = <&bcm_mm1>;
@@ -1193,6 +1242,15 @@
 			qcom,bcms = <&bcm_sn11>;
 		};
 
+		slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp {
+			cell-id = <MSM_BUS_SLAVE_CAMNOC_UNCOMP>;
+			label = "slv-qns-camnoc-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
 		slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
 			cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
 			label = "slv-qhs-a1-noc-cfg";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 922e990..a715025 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -73,7 +73,7 @@
 	};
 };
 
-&cci {
+&cam_cci {
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index 922e990..a715025 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -73,7 +73,7 @@
 	};
 };
 
-&cci {
+&cam_cci {
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index cb20e0f..91b8738 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -17,7 +17,7 @@
 		status = "ok";
 	};
 
-	qcom,csiphy@ac65000 {
+	cam_csiphy0: qcom,csiphy@ac65000 {
 		cell-index = <0>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0x0ac65000 0x1000>;
@@ -53,7 +53,7 @@
 		status = "ok";
 	};
 
-	qcom,csiphy@ac66000{
+	cam_csiphy1: qcom,csiphy@ac66000{
 		cell-index = <1>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0xac66000 0x1000>;
@@ -90,7 +90,7 @@
 		status = "ok";
 	};
 
-	qcom,csiphy@ac67000 {
+	cam_csiphy2: qcom,csiphy@ac67000 {
 		cell-index = <2>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0xac67000 0x1000>;
@@ -126,7 +126,7 @@
 		status = "ok";
 	};
 
-	cci: qcom,cci@ac4a000 {
+	cam_cci: qcom,cci@ac4a000 {
 		cell-index = <0>;
 		compatible = "qcom,cci";
 		reg = <0xac4a000 0x4000>;
@@ -343,17 +343,17 @@
 		clock-names = "gcc_ahb_clk",
 			"gcc_axi_clk",
 			"soc_ahb_clk",
-			"cpas_ahb_clk",
 			"slow_ahb_clk_src",
+			"cpas_ahb_clk",
 			"camnoc_axi_clk";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
 			<&clock_gcc GCC_CAMERA_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
-			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		src-clock-name = "slow_ahb_clk_src";
-		clock-rates = <0 0 0 0 80000000 0>;
+		clock-rates = <0 0 0 80000000 0 0>;
 		qcom,msm-bus,name = "cam_ahb";
 		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,num-paths = <1>;
@@ -366,6 +366,21 @@
 			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
+			RPMH_REGULATOR_LEVEL_RETENTION
+			RPMH_REGULATOR_LEVEL_MIN_SVS
+			RPMH_REGULATOR_LEVEL_LOW_SVS
+			RPMH_REGULATOR_LEVEL_SVS
+			RPMH_REGULATOR_LEVEL_SVS_L1
+			RPMH_REGULATOR_LEVEL_NOM
+			RPMH_REGULATOR_LEVEL_NOM_L1
+			RPMH_REGULATOR_LEVEL_NOM_L2
+			RPMH_REGULATOR_LEVEL_TURBO
+			RPMH_REGULATOR_LEVEL_TURBO_L1>;
+		vdd-corner-ahb-mapping = "suspend", "suspend",
+			"svs", "svs", "svs", "svs",
+			"nominal", "nominal", "nominal",
+			"turbo", "turbo";
 		client-id-based;
 		client-names =
 			"csiphy0", "csiphy1", "csiphy2", "cci0",
@@ -389,10 +404,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF0
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF0
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
 				};
 				qcom,axi-port-camnoc {
 					qcom,msm-bus,name = "cam_hf_1_camnoc";
@@ -400,10 +415,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
 				};
 			};
 			qcom,axi-port2 {
@@ -414,21 +429,21 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF1
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF1
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
 				};
 				qcom,axi-port-camnoc {
-					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus,name = "cam_hf_2_camnoc";
 					qcom,msm-bus-vector-dyn-vote;
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
 				};
 			};
 			qcom,axi-port3 {
@@ -439,10 +454,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_SF
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_SF
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
 				};
 				qcom,axi-port-camnoc {
 					qcom,msm-bus,name = "cam_sf_1_camnoc";
@@ -450,10 +465,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
 				};
 			};
 		};
@@ -503,7 +518,7 @@
 		status = "ok";
 	};
 
-	qcom,csid0@acb3000 {
+	cam_csid0: qcom,csid0@acb3000 {
 		cell-index = <0>;
 		compatible = "qcom,csid170";
 		reg-names = "csid";
@@ -545,7 +560,7 @@
 		status = "ok";
 	};
 
-	qcom,vfe0@acaf000 {
+	cam_vfe0: qcom,vfe0@acaf000 {
 		cell-index = <0>;
 		compatible = "qcom,vfe170";
 		reg-names = "ife";
@@ -582,7 +597,7 @@
 		status = "ok";
 	};
 
-	qcom,csid1@acba000 {
+	cam_csid1: qcom,csid1@acba000 {
 		cell-index = <1>;
 		compatible = "qcom,csid170";
 		reg-names = "csid";
@@ -624,7 +639,7 @@
 		status = "ok";
 	};
 
-	qcom,vfe1@acb6000 {
+	cam_vfe1: qcom,vfe1@acb6000 {
 		cell-index = <1>;
 		compatible = "qcom,vfe170";
 		reg-names = "ife";
@@ -661,7 +676,7 @@
 		status = "ok";
 	};
 
-	qcom,csid-lite@acc8000 {
+	cam_csid_lite: qcom,csid-lite@acc8000 {
 		cell-index = <2>;
 		compatible = "qcom,csid-lite170";
 		reg-names = "csid-lite";
@@ -700,7 +715,7 @@
 		status = "ok";
 	};
 
-	qcom,vfe-lite@acc4000 {
+	cam_vfe_lite: qcom,vfe-lite@acc4000 {
 		cell-index = <2>;
 		compatible = "qcom,vfe-lite170";
 		reg-names = "ife-lite";
@@ -743,7 +758,7 @@
 		status = "ok";
 	};
 
-	qcom,a5@ac00000 {
+	cam_a5: qcom,a5@ac00000 {
 		cell-index = <0>;
 		compatible = "qcom,cam_a5";
 		reg = <0xac00000 0x6000>,
@@ -757,6 +772,7 @@
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_cam_ahb_clk",
 			"gcc_cam_axi_clk",
+			"soc_fast_ahb",
 			"soc_ahb_clk",
 			"cpas_ahb_clk",
 			"camnoc_axi_clk",
@@ -765,6 +781,7 @@
 			"icp_clk_src";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
 				<&clock_gcc GCC_CAMERA_AXI_CLK>,
+				<&clock_camcc CAM_CC_FAST_AHB_CLK_SRC>,
 				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
@@ -772,12 +789,12 @@
 				<&clock_camcc CAM_CC_ICP_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
-		clock-rates = <0 0 0 80000000 0 0 0 600000000>;
+		clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
 		fw_name = "CAMERA_ICP.elf";
 		status = "ok";
 	};
 
-	qcom,ipe0 {
+	cam_ipe0: qcom,ipe0 {
 		cell-index = <0>;
 		compatible = "qcom,cam_ipe";
 		regulator-names = "ipe0-vdd";
@@ -793,11 +810,11 @@
 				<&clock_camcc CAM_CC_IPE_0_CLK>,
 				<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+		clock-rates = <0 0 0 0 600000000>;
 		status = "ok";
 	};
 
-	qcom,ipe1 {
+	cam_ipe1: qcom,ipe1 {
 		cell-index = <1>;
 		compatible = "qcom,cam_ipe";
 		regulator-names = "ipe1-vdd";
@@ -813,11 +830,11 @@
 				<&clock_camcc CAM_CC_IPE_1_CLK>,
 				<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+		clock-rates = <0 0 0 0 600000000>;
 		status = "ok";
 	};
 
-	qcom,bps {
+	cam_bps: qcom,bps {
 		cell-index = <0>;
 		compatible = "qcom,cam_bps";
 		regulator-names = "bps-vdd";
@@ -833,7 +850,7 @@
 				<&clock_camcc CAM_CC_BPS_CLK>,
 				<&clock_camcc CAM_CC_BPS_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+		clock-rates = <0 0 0 0 600000000>;
 		status = "ok";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index fff9160..7d7c9cf 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -13,7 +13,11 @@
 /dts-v1/;
 /plugin/;
 
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "sdm845-cdp.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index a1e0e4f..c8f84fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -155,6 +155,8 @@
 	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -213,14 +215,6 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&usb2_vbus_boost_default>;
 	};
-
-aliases {
-		serial0 = &qupv3_se9_2uart;
-		spi0 = &qupv3_se8_spi;
-		i2c0 = &qupv3_se10_i2c;
-		i2c1 = &qupv3_se3_i2c;
-		hsuart0 = &qupv3_se6_4uart;
-	};
 };
 
 &qupv3_se9_2uart {
@@ -274,6 +268,14 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
+&dsi_sharp_1080_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
 &dsi_sim_vid {
 	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
@@ -298,7 +300,7 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	qcom,dsi-display-active;
 };
 
@@ -307,6 +309,12 @@
 	qcom,led-strings-list = [01 02];
 };
 
+&pmi8998_haptics {
+	qcom,vmax-mv = <2400>;
+	qcom,lra-auto-mode;
+	status = "okay";
+};
+
 &qupv3_se8_spi {
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 92f8586..1ce68e1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -219,6 +219,7 @@
 
 		qcom,secure_align_mask = <0xfff>;
 		qcom,global_pt;
+		qcom,hyp_secure_alloc;
 
 		gfx3d_user: gfx3d_user {
 			compatible = "qcom,smmu-kgsl-cb";
@@ -277,7 +278,7 @@
 
 			qcom,gmu-pwrlevel@1 {
 				reg = <1>;
-				qcom,gmu-freq = <19200000>;
+				qcom,gmu-freq = <200000000>;
 			};
 
 			qcom,gmu-pwrlevel@2 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index 79fa580..e299744 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -13,7 +13,11 @@
 /dts-v1/;
 /plugin/;
 
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "sdm845-mtp.dtsi"
 
 / {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 85bec57..c75eb48 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -150,7 +150,7 @@
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
 
-&dsi_nt35597_truly_dsc_cmd_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	qcom,dsi-display-active;
 };
 
@@ -159,6 +159,12 @@
 	qcom,led-strings-list = [01 02];
 };
 
+&pmi8998_haptics {
+	qcom,vmax-mv = <2400>;
+	qcom,lra-auto-mode;
+	status = "okay";
+};
+
 &mdss_mdp {
 	#cooling-cells = <2>;
 };
@@ -188,6 +194,17 @@
 	status = "ok";
 };
 
+&extcon_storage_cd {
+	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+	debounce-ms = <200>;
+	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&storage_cd>;
+
+	status = "ok";
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -210,6 +227,8 @@
 	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -230,6 +249,8 @@
 				50000000 100000000 200000000>;
 	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -264,16 +285,6 @@
 	status = "ok";
 };
 
-/ {
-aliases {
-		serial0 = &qupv3_se9_2uart;
-		spi0 = &qupv3_se8_spi;
-		i2c0 = &qupv3_se10_i2c;
-		i2c1 = &qupv3_se3_i2c;
-		hsuart0 = &qupv3_se6_4uart;
-	};
-};
-
 &qupv3_se9_2uart {
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index bba95a3..c2fbed5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -168,6 +168,17 @@
 	status = "ok";
 };
 
+&extcon_storage_cd {
+	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+	debounce-ms = <200>;
+	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&storage_cd>;
+
+	status = "ok";
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -190,6 +201,30 @@
 	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	extcon = <&extcon_storage_cd>;
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000
+				50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index bf58da6..726a63f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -153,8 +153,8 @@
 		label = "dsi_sharp_1080_cmd_display";
 		qcom,display-type = "primary";
 
-		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
-		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
 		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
 			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
@@ -368,10 +368,106 @@
 		cell-index = <0>;
 		label = "wb_display";
 	};
+
+	sde_dp: qcom,dp_display@0{
+		cell-index = <0>;
+		compatible = "qcom,dp-display";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+
+		reg =	<0xae90000 0xa84>,
+			<0x88eaa00 0x200>,
+			<0x88ea200 0x200>,
+			<0x88ea600 0x200>,
+			<0xaf02000 0x1a0>,
+			<0x780000 0x621c>,
+			<0x88ea030 0x10>,
+			<0x0aee1000 0x034>;
+		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+			"dp_mmss_cc", "qfprom_physical", "dp_pll",
+			"hdcp_physical";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <12 0>;
+
+		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+			"core_usb_pipe_clk", "ctrl_link_clk",
+			"ctrl_link_iface_clk", "ctrl_crypto_clk",
+			"ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+
+		qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
+
+		qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+
+		qcom,core-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,core-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
+};
+
+&sde_dp {
+	pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+	pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+	pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+	qcom,aux-en-gpio = <&tlmm 43 0>;
+	qcom,aux-sel-gpio = <&tlmm 51 0>;
+	qcom,usbplug-cc-gpio = <&tlmm 38 0>;
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb>;
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
 };
 
 &dsi_dual_nt35597_truly_video {
@@ -397,7 +493,8 @@
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
 	qcom,display-topology = <1 1 1>,
-				<2 2 1>;
+				<2 2 1>, /* dsc merge */
+				<2 1 1>; /* 3d mux */
 	qcom,default-topology-index = <0>;
 };
 
@@ -406,7 +503,8 @@
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
 	qcom,display-topology = <1 1 1>,
-				<2 2 1>;
+				<2 2 1>, /* dsc merge */
+				<2 1 1>; /* 3d mux */
 	qcom,default-topology-index = <0>;
 };
 
@@ -436,8 +534,10 @@
 };
 
 &dsi_sharp_1080_cmd {
-	qcom,display-topology = <2 0 2>,
-				<1 0 2>;
+	qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07 07 04 03 04 00];
+	qcom,mdss-dsi-t-clk-post = <0x0c>;
+	qcom,mdss-dsi-t-clk-pre = <0x29>;
+	qcom,display-topology = <1 0 1>;
 	qcom,default-topology-index = <0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
index 168f2a9..b9eac3c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -64,4 +64,46 @@
 			};
 		};
 	};
+
+	mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+		compatible = "qcom,mdss_dp_pll_10nm";
+		label = "MDSS DP PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0x088ea000 0x200>,
+		      <0x088eaa00 0x200>,
+		      <0x088ea200 0x200>,
+		      <0x088ea600 0x200>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "ln_tx0_base",
+			"ln_tx1_base", "gdsc_base";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+		clock-names = "iface_clk", "ref_clk_src", "ref_clk",
+			"cfg_ahb_clk", "pipe_clk";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index c350800..2ae3832 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -86,8 +86,6 @@
 		qcom,sde-dither-version = <0x00010000>;
 		qcom,sde-dither-size = <0x20>;
 
-		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
-
 		qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
 					"dma", "dma", "dma", "dma";
 
@@ -136,11 +134,48 @@
 		qcom,sde-vbif-off = <0>;
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
+		qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+		qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
 
 		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
 		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
 
+		qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+			0x00000000>;
+		qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+		qcom,sde-qos-lut-linear =
+			<4 0x00000000 0x00000357>,
+			<5 0x00000000 0x00003357>,
+			<6 0x00000000 0x00023357>,
+			<7 0x00000000 0x00223357>,
+			<8 0x00000000 0x02223357>,
+			<9 0x00000000 0x22223357>,
+			<10 0x00000002 0x22223357>,
+			<11 0x00000022 0x22223357>,
+			<12 0x00000222 0x22223357>,
+			<13 0x00002222 0x22223357>,
+			<14 0x00012222 0x22223357>,
+			<0 0x00112222 0x22223357>;
+		qcom,sde-qos-lut-macrotile =
+			<10 0x00000003 0x44556677>,
+			<11 0x00000033 0x44556677>,
+			<12 0x00000233 0x44556677>,
+			<13 0x00002233 0x44556677>,
+			<14 0x00012233 0x44556677>,
+			<0 0x00112233 0x44556677>;
+		qcom,sde-qos-lut-nrt =
+			<0 0x00000000 0x00000000>;
+		qcom,sde-qos-lut-cwb =
+			<0 0x75300000 0x00000000>;
+
+		qcom,sde-cdp-setting = <1 1>, <1 0>;
+
 		qcom,sde-inline-rotator = <&mdss_rotator 0>;
+		qcom,sde-inline-rot-xin = <10 11>;
+		qcom,sde-inline-rot-xin-type = "sspp", "wb";
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+		qcom,sde-inline-rot-clk-ctrl = <0x2bc 0x8>, <0x2bc 0xc>;
 
 		qcom,sde-reg-dma-off = <0>;
 		qcom,sde-reg-dma-version = <0x1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 3870d8f..df6ffad 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -25,13 +25,15 @@
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <0 489 0>, <0 130 0>, <0 486 0>;
-		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
+		interrupts = <0 489 0>, <0 130 0>, <0 486 0>, <0 488 0>;
+		interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
+				"ss_phy_irq", "dm_hs_phy_irq";
 
 		USB3_GDSC-supply = <&usb30_prim_gdsc>;
 		qcom,usb-dbm = <&dbm_1p5>;
 		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
 		qcom,num-gsi-evt-buffs = <0x3>;
+		qcom,use-pdc-interrupts;
 		extcon = <&pmi8998_pdphy>, <&pmi8998_pdphy>, <&eud>;
 
 		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
@@ -322,11 +324,13 @@
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <0 491 0>, <0 135 0>, <0 487 0>;
-		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
+		interrupts = <0 491 0>, <0 135 0>, <0 487 0>, <0 490 0>;
+		interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
+				"ss_phy_irq", "dm_hs_phy_irq";
 
 		USB3_GDSC-supply = <&usb30_sec_gdsc>;
 		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+		qcom,use-pdc-interrupts;
 
 		clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>,
 			 <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
new file mode 100644
index 0000000..8a9a544
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845-v2.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 V2 QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index efd8c32..bf72741 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -20,3 +20,19 @@
 &spmi_debug_bus {
 	status = "ok";
 };
+
+&clock_gcc {
+	compatible = "qcom,gcc-sdm845-v2";
+};
+
+&clock_camcc {
+	compatible = "qcom,cam_cc-sdm845-v2";
+};
+
+&clock_dispcc {
+	compatible = "qcom,dispcc-sdm845-v2";
+};
+
+&clock_videocc {
+	compatible = "qcom,video_cc-sdm845-v2";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 4fe9282..af12224 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -27,6 +27,10 @@
 		qcom,max-secure-instances = <5>;
 		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
 
+		/* LLCC Info */
+		cache-slice-names = "vidsc0", "vidsc1";
+		cache-slices = <&llcc 2>, <&llcc 3>;
+
 		/* Supply */
 		venus-supply = <&venus_gdsc>;
 		venus-core0-supply = <&vcodec0_gdsc>;
@@ -91,6 +95,14 @@
 			qcom,bus-governor = "performance";
 			qcom,bus-range-kbps = <1000 1000>;
 		};
+		venus_bus_llcc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-llcc";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <17000 125700>;
+		};
 
 		/* MMUs */
 		non_secure_cb {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index a38ac59..7ea200e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -39,6 +39,14 @@
 		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
 	};
 
+	aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+		hsuart0 = &qupv3_se6_4uart;
+	};
+
 	cpus {
 		#address-cells = <2>;
 		#size-cells = <0>;
@@ -337,25 +345,31 @@
 	};
 
 	energy-costs {
+		compatible = "sched-energy";
+
 		CPU_COST_0: core-cost0 {
 			busy-cost-data = <
-				 92   34 /*  300000 */
-				129   40 /*  422400 */
-				153   43 /*  499200 */
-				177   48 /*  576000 */
-				200   52 /*  652800 */
-				230   58 /*  748800 */
-				253   64 /*  825600 */
-				277   70 /*  902400 */
-				301   76 /*  979200 */
-				324   83 /* 1056000 */
-				348   90 /* 1132800 */
-				371   98 /* 1209600 */
-				395  105 /* 1286400 */
-				419  114 /* 1363200 */
-				442  123 /* 1440000 */
-				466  135 /* 1516800 */
-				490  152 /* 1593600 */
+				 300000   31
+				 422400   38
+				 499200   42
+				 576000   46
+				 652800   51
+				 748800   58
+				 825600   64
+				 902400   70
+				 979200   76
+				1056000   83
+				1132800   90
+				1209600   97
+				1286400  105
+				1363200  114
+				1440000  124
+				1516800  136
+				1593600  152
+				1651200  167 /* speedbin 0,1 */
+				1670400  173 /* speedbin 2 */
+				1708800  186 /* speedbin 0,1 */
+				1747200  201 /* speedbin 2 */
 			>;
 			idle-cost-data = <
 				22 18 14 12
@@ -363,28 +377,32 @@
 		};
 		CPU_COST_1: core-cost1 {
 			busy-cost-data = <
-				156  240 /*  300000 */
-				220  247 /*  422400 */
-				261  252 /*  499200 */
-				301  257 /*  576000 */
-				341  264 /*  652800 */
-				381  272 /*  729600 */
-				421  281 /*  806400 */
-				461  292 /*  883200 */
-				501  306 /*  960000 */
-				542  324 /* 1036800 */
-				582  346 /* 1113600 */
-				622  373 /* 1190400 */
-				662  407 /* 1267200 */
-				702  450 /* 1344000 */
-				742  504 /* 1420800 */
-				783  570 /* 1497600 */
-				823  649 /* 1574400 */
-				863  743 /* 1651200 */
-				903  849 /* 1728000 */
-				943  960 /* 1804800 */
-				983 1062 /* 1881600 */
-			       1024 1131 /* 1958400 */
+				300000   258
+				422400   260
+				499200   261
+				576000   263
+				652800   267
+				729600   272
+				806400   280
+				883200   291
+				960000   305
+			       1036800   324
+			       1113600   348
+			       1190400   378
+			       1267200   415
+			       1344000   460
+			       1420800   513
+			       1497600   576
+			       1574400   649
+			       1651200   732
+			       1728000   824
+			       1804800   923
+			       1881600  1027
+			       1958400  1131
+			       2035000  1228 /* speedbin 1,2 */
+			       2092000  1290 /* speedbin 1 */
+			       2112000  1308 /* speedbin 2 */
+			       2208000  1363 /* speedbin 2 */
 			>;
 			idle-cost-data = <
 				100 80 60 40
@@ -392,23 +410,27 @@
 		};
 		CLUSTER_COST_0: cluster-cost0 {
 			busy-cost-data = <
-				 92   3 /*  300000 */
-				129   4 /*  422400 */
-				153   4 /*  499200 */
-				177   4 /*  576000 */
-				200   5 /*  652800 */
-				230   5 /*  748800 */
-				253   6 /*  825600 */
-				277   7 /*  902400 */
-				301   7 /*  979200 */
-				324   8 /* 1056000 */
-				348   9 /* 1132800 */
-				371   9 /* 1209600 */
-				395  10 /* 1286400 */
-				419  11 /* 1363200 */
-				442  12 /* 1440000 */
-				466  13 /* 1516800 */
-				490  15 /* 1593600 */
+				 300000   3
+				 422400   4
+				 499200   4
+				 576000   4
+				 652800   5
+				 748800   5
+				 825600   6
+				 902400   7
+				 979200   7
+				1056000   8
+				1132800   9
+				1209600   9
+				1286400  10
+				1363200  11
+				1440000  12
+				1516800  13
+				1593600  15
+				1651200  17 /* speedbin 0,1 */
+				1670400  19 /* speedbin 2 */
+				1708800  21 /* speedbin 0,1 */
+				1747200  23 /* speedbin 2 */
 			>;
 			idle-cost-data = <
 				4 3 2 1
@@ -416,28 +438,32 @@
 		};
 		CLUSTER_COST_1: cluster-cost1 {
 			busy-cost-data = <
-				156  24 /*  300000 */
-				220  24 /*  422400 */
-				261  25 /*  499200 */
-				301  25 /*  576000 */
-				341  26 /*  652800 */
-				381  27 /*  729600 */
-				421  28 /*  806400 */
-				461  29 /*  883200 */
-				501  30 /*  960000 */
-				542  32 /* 1036800 */
-				582  34 /* 1113600 */
-				622  37 /* 1190400 */
-				662  40 /* 1267200 */
-				702  45 /* 1344000 */
-				742  50 /* 1420800 */
-				783  57 /* 1497600 */
-				823  64 /* 1574400 */
-				863  74 /* 1651200 */
-				903  84 /* 1728000 */
-				943  96 /* 1804800 */
-				983 106 /* 1881600 */
-			       1024 113 /* 1958400 */
+				300000  24
+				422400  24
+				499200  25
+				576000  25
+				652800  26
+				729600  27
+				806400  28
+				883200  29
+				960000  30
+			       1036800  32
+			       1113600  34
+			       1190400  37
+			       1267200  40
+			       1344000  45
+			       1420800  50
+			       1497600  57
+			       1574400  64
+			       1651200  74
+			       1728000  84
+			       1804800  96
+			       1881600 106
+			       1958400 113
+			       2035000 120 /* speedbin 1,2 */
+			       2092000 125 /* speedbin 1 */
+			       2112000 127 /* speedbin 2 */
+			       2208000 130 /* speedbin 2 */
 			>;
 			idle-cost-data = <
 				4 3 2 1
@@ -459,14 +485,35 @@
 		compatible = "simple-bus";
 	};
 
+	firmware: firmware {
+		android {
+			compatible = "android,firmware";
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait,slotselect";
+				};
+			};
+		};
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
 
-		removed_regions: removed_regions@85800000 {
+		removed_region1: removed_region1@85700000 {
 			no-map;
-			reg = <0 0x85800000 0 0x3700000>;
+			reg = <0 0x85700000 0 0x800000>;
+		};
+
+		removed_region2: removed_region2@85fc0000 {
+			no-map;
+			reg = <0 0x85fc0000 0 0x2f40000>;
 		};
 
 		pil_camera_mem: camera_region@8ab00000 {
@@ -755,79 +802,19 @@
 		};
 	};
 
-	msm_cpufreq: qcom,msm-cpufreq {
-		compatible = "qcom,msm-cpufreq";
-		clock-names = "cpu0_clk", "cpu4_clk";
-		clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
-			 <&clock_cpucc CPU4_PERFCL_CLK>;
-
-		qcom,governor-per-policy;
-
-		qcom,cpufreq-table-0 =
-			<  300000 >,
-			<  422400 >,
-			<  499200 >,
-			<  576000 >,
-			<  652800 >,
-			<  748800 >,
-			<  825600 >,
-			<  902400 >,
-			<  979200 >,
-			< 1056000 >,
-			< 1132800 >,
-			< 1209600 >,
-			< 1286400 >,
-			< 1363200 >,
-			< 1440000 >,
-			< 1516800 >,
-			< 1593600 >,
-			< 1651200 >,
-			< 1708800 >;
-
-		qcom,cpufreq-table-4 =
-			<  300000 >,
-			<  422400 >,
-			<  499200 >,
-			<  576000 >,
-			<  652800 >,
-			<  729600 >,
-			<  806400 >,
-			<  883200 >,
-			<  960000 >,
-			< 1036800 >,
-			< 1113600 >,
-			< 1190400 >,
-			< 1267200 >,
-			< 1344000 >,
-			< 1420800 >,
-			< 1497600 >,
-			< 1574400 >,
-			< 1651200 >,
-			< 1728000 >,
-			< 1804800 >,
-			< 1881600 >,
-			< 1958400 >,
-			< 2035200 >,
-			< 2092800 >,
-			< 2208000 >;
-	};
-
 	cpubw: qcom,cpubw {
 		compatible = "qcom,devbw";
 		governor = "performance";
-		qcom,src-dst-ports = <1 512>;
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
 		qcom,active-only;
 		qcom,bw-tbl =
-			<  762 /*  200 MHz */ >,
-			< 1144 /*  300 MHz */ >,
-			< 1720 /*  451 MHz */ >,
-			< 2086 /*  547 MHz */ >,
-			< 2597 /*  681 MHz */ >,
-			< 2929 /*  768 MHz */ >,
-			< 3879 /* 1017 MHz */ >,
-			< 4943 /* 1296 MHz */ >,
-			< 5931 /* 1555 MHz */ >,
-			< 6881 /* 1804 MHz */ >;
+			<  2288 /* 150 MHz */ >,
+			<  4577 /* 300 MHz */ >,
+			<  6500 /* 426 MHz */ >,
+			<  8132 /* 533 MHz */ >,
+			<  9155 /* 600 MHz */ >,
+			< 10681 /* 700 MHz */ >;
 	};
 
 	bwmon: qcom,cpu-bwmon {
@@ -844,7 +831,7 @@
 		compatible = "qcom,devbw";
 		governor = "powersave";
 		qcom,src-dst-ports =
-			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_EBI_CH0>;
+			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
 		qcom,active-only;
 		qcom,bw-tbl =
 			<  762 /*  200 MHz */ >,
@@ -1084,6 +1071,20 @@
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
 		vdd_mx-supply = <&pm8998_s6_level>;
+		qcom,cam_cc_csi0phytimer_clk_src-opp-handle = <&cam_csiphy0>;
+		qcom,cam_cc_csi1phytimer_clk_src-opp-handle = <&cam_csiphy1>;
+		qcom,cam_cc_csi2phytimer_clk_src-opp-handle = <&cam_csiphy2>;
+		qcom,cam_cc_cci_clk_src-opp-handle = <&cam_cci>;
+		qcom,cam_cc_ife_0_csid_clk_src-opp-handle = <&cam_csid0>;
+		qcom,cam_cc_ife_0_clk_src-opp-handle = <&cam_vfe0>;
+		qcom,cam_cc_ife_1_csid_clk_src-opp-handle = <&cam_csid1>;
+		qcom,cam_cc_ife_1_clk_src-opp-handle = <&cam_vfe1>;
+		qcom,cam_cc_ife_lite_csid_clk_src-opp-handle = <&cam_csid_lite>;
+		qcom,cam_cc_ife_lite_clk_src-opp-handle = <&cam_vfe_lite>;
+		qcom,cam_cc_icp_clk_src-opp-handle = <&cam_a5>;
+		qcom,cam_cc_ipe_0_clk_src-opp-handle = <&cam_ipe0>;
+		qcom,cam_cc_ipe_1_clk_src-opp-handle = <&cam_ipe1>;
+		qcom,cam_cc_bps_clk_src-opp-handle = <&cam_bps>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -3264,44 +3265,48 @@
 			};
 		};
 
-		gpu0-step {
+		gpu-virt-max-step {
 			polling-delay-passive = <10>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 11>;
+			polling-delay = <100>;
 			thermal-governor = "step_wise";
 			trips {
-				gpu0_trip: gpu0-trip {
+				gpu_trip0: gpu-trip0 {
 					temperature = <95000>;
 					hysteresis = <0>;
 					type = "passive";
 				};
 			};
 			cooling-maps {
-				gpu0_cdev {
-					trip = <&gpu0_trip>;
+				gpu_cdev0 {
+					trip = <&gpu_trip0>;
 					cooling-device =
-						<&msm_gpu 1 THERMAL_NO_LIMIT>;
+						<&msm_gpu 0 THERMAL_NO_LIMIT>;
 				};
 			};
 		};
 
-		gpu1-step {
-			polling-delay-passive = <10>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 12>;
-			thermal-governor = "step_wise";
+		silver-virt-max-usr {
+			polling-delay-passive = <100>;
+			polling-delay = <100>;
+			thermal-governor = "user_space";
 			trips {
-				gpu1_trip: gpu1-trip {
-					temperature = <95000>;
+				silver-trip {
+					temperature = <120000>;
 					hysteresis = <0>;
 					type = "passive";
 				};
 			};
-			cooling-maps {
-				gpu1_cdev {
-					trip = <&gpu1_trip>;
-					cooling-device =
-						<&msm_gpu 1 THERMAL_NO_LIMIT>;
+		};
+
+		gold-virt-max-usr {
+			polling-delay-passive = <100>;
+			polling-delay = <100>;
+			thermal-governor = "user_space";
+			trips {
+				gold-trip {
+					temperature = <120000>;
+					hysteresis = <0>;
+					type = "passive";
 				};
 			};
 		};
@@ -3319,10 +3324,29 @@
 				};
 			};
 			cooling-maps {
-				pop_cdev {
+				pop_cdev4 {
 					trip = <&pop_trip>;
 					cooling-device =
-						<&CPU4 1 THERMAL_NO_LIMIT>;
+						<&CPU4 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-1)>;
+				};
+				pop_cdev5 {
+					trip = <&pop_trip>;
+					cooling-device =
+						<&CPU5 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-1)>;
+				};
+				pop_cdev6 {
+					trip = <&pop_trip>;
+					cooling-device =
+						<&CPU6 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-1)>;
+				};
+				pop_cdev7 {
+					trip = <&pop_trip>;
+					cooling-device =
+						<&CPU7 THERMAL_NO_LIMIT
+							(THERMAL_MAX_LIMIT-1)>;
 				};
 			};
 		};
@@ -3343,15 +3367,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&aoss0_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&aoss0_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&aoss0_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3372,15 +3396,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpu0_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpu0_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpu0_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3401,15 +3425,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpu1_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpu1_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpu1_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3430,15 +3454,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpu2_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpu2_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpu2_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3459,15 +3483,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpu3_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpu3_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpu3_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3488,15 +3512,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&l3_0_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&l3_0_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&l3_0_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3517,15 +3541,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&l3_1_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&l3_1_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&l3_1_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3546,15 +3570,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpug0_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpug0_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpug0_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3575,15 +3599,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpug1_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpug1_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpug1_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3604,15 +3628,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpug2_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpug2_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpug2_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3633,15 +3657,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&cpug3_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&cpug3_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&cpug3_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3662,15 +3686,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&gpu0_trip_l>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&gpu0_trip_l>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&gpu0_trip_l>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3691,15 +3715,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&gpu1_trip_l>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&gpu1_trip_l>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&gpu1_trip_l>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3720,15 +3744,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&aoss1_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&aoss1_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&aoss1_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3749,15 +3773,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&dsp_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&dsp_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&dsp_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3778,15 +3802,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&ddr_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&ddr_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&ddr_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3807,15 +3831,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&wlan_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&wlan_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&wlan_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3836,15 +3860,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&hvx_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&hvx_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&hvx_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3865,15 +3889,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&camera_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&camera_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&camera_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3894,15 +3918,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&mmss_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&mmss_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&mmss_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -3923,15 +3947,15 @@
 			cooling-maps {
 				cpu0_vdd_cdev {
 					trip = <&mdm_trip>;
-					cooling-device = <&CPU0 12 12>;
+					cooling-device = <&CPU0 4 4>;
 				};
 				cpu4_vdd_cdev {
 					trip = <&mdm_trip>;
-					cooling-device = <&CPU4 12 12>;
+					cooling-device = <&CPU4 9 9>;
 				};
 				gpu_vdd_cdev {
 					trip = <&mdm_trip>;
-					cooling-device = <&msm_gpu 4 4>;
+					cooling-device = <&msm_gpu 1 1>;
 				};
 			};
 		};
@@ -4203,10 +4227,12 @@
 };
 
 &vcodec0_gdsc {
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &vcodec1_gdsc {
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
@@ -4236,39 +4262,86 @@
 	cooling-maps {
 		trip0_cpu0 {
 			trip = <&pm8998_trip0>;
-			cooling-device = <&CPU0 21 21>;
+			cooling-device =
+				<&CPU0 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
+		};
+		trip0_cpu1 {
+			trip = <&pm8998_trip0>;
+			cooling-device =
+				<&CPU1 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
+		};
+		trip0_cpu2 {
+			trip = <&pm8998_trip0>;
+			cooling-device =
+				<&CPU2 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
+		};
+		trip0_cpu3 {
+			trip = <&pm8998_trip0>;
+			cooling-device =
+				<&CPU3 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
 		};
 		trip0_cpu4 {
 			trip = <&pm8998_trip0>;
-			cooling-device = <&CPU4 21 21>;
+			cooling-device =
+				<&CPU4 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
+		};
+		trip0_cpu5 {
+			trip = <&pm8998_trip0>;
+			cooling-device =
+				<&CPU5 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
+		};
+		trip0_cpu6 {
+			trip = <&pm8998_trip0>;
+			cooling-device =
+				<&CPU6 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
+		};
+		trip0_cpu7 {
+			trip = <&pm8998_trip0>;
+			cooling-device =
+				<&CPU7 (THERMAL_MAX_LIMIT-1)
+					(THERMAL_MAX_LIMIT-1)>;
 		};
 		trip1_cpu1 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU1 22 22>;
+			cooling-device =
+				<&CPU1 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 		trip1_cpu2 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU2 22 22>;
+			cooling-device =
+				<&CPU2 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 		trip1_cpu3 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU3 22 22>;
+			cooling-device =
+				<&CPU3 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 		trip1_cpu4 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU4 22 22>;
+			cooling-device =
+				<&CPU4 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 		trip1_cpu5 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU5 22 22>;
+			cooling-device =
+				<&CPU5 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 		trip1_cpu6 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU6 22 22>;
+			cooling-device =
+				<&CPU6 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 		trip1_cpu7 {
 			trip = <&pm8998_trip1>;
-			cooling-device = <&CPU7 22 22>;
+			cooling-device =
+				<&CPU7 THERMAL_MAX_LIMIT THERMAL_MAX_LIMIT>;
 		};
 	};
 };
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 8c20b3f..c69e015 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -9,6 +9,8 @@
 CONFIG_SCHED_WALT=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -81,7 +83,6 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -317,11 +318,13 @@
 CONFIG_THERMAL_GOV_LOW_LIMITS=y
 CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
@@ -400,6 +403,7 @@
 CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -412,6 +416,7 @@
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 0bebc63b..cc111b2 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -12,6 +12,8 @@
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -87,7 +89,6 @@
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_BOOST=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_CPU_FREQ_MSM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -326,11 +327,13 @@
 CONFIG_THERMAL_GOV_LOW_LIMITS=y
 CONFIG_CPU_THERMAL=y
 CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
 CONFIG_THERMAL_QPNP=y
 CONFIG_THERMAL_QPNP_ADC_TM=y
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
@@ -408,6 +411,8 @@
 CONFIG_USB_CONFIGFS_F_GSI=y
 CONFIG_USB_CONFIGFS_F_QDSS=y
 CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_RING_BUFFER=y
 CONFIG_MMC_CLKGATE=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
@@ -420,6 +425,7 @@
 CONFIG_LEDS_QPNP=y
 CONFIG_LEDS_QPNP_FLASH_V2=y
 CONFIG_LEDS_QPNP_WLED=y
+CONFIG_LEDS_QPNP_HAPTICS=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_EDAC=y
 CONFIG_EDAC_MM_EDAC=y
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644
index 0000000..be2d234
--- /dev/null
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ASM_UACCESS_H
+#define __ASM_ASM_UACCESS_H
+
+/*
+ * Remove the address tag from a virtual address, if present.
+ */
+	.macro	clear_address_tag, dst, addr
+	tst	\addr, #(1 << 55)
+	bic	\dst, \addr, #(0xff << 56)
+	csel	\dst, \dst, \addr, eq
+	.endm
+
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index b89a7f3..0f2704c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -65,8 +65,6 @@
 /* do not use this function in a driver */
 static inline bool is_device_dma_coherent(struct device *dev)
 {
-	if (!dev)
-		return false;
 	return dev->archdata.dma_coherent;
 }
 
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6c80b36..7393cc7 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -94,6 +94,10 @@
 #define SCTLR_ELx_A	(1 << 1)
 #define SCTLR_ELx_M	1
 
+#define SCTLR_EL2_RES1	((1 << 4)  | (1 << 5)  | (1 << 11) | (1 << 16) | \
+			 (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
+			 (1 << 28) | (1 << 29))
+
 #define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
 			 SCTLR_ELx_SA | SCTLR_ELx_I)
 
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 61e032f2..b58f429 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -41,6 +41,9 @@
 #define arch_scale_cpu_capacity scale_cpu_capacity
 extern unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu);
 
+#define arch_update_cpu_capacity update_cpu_power_capacity
+extern void update_cpu_power_capacity(int cpu);
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 21934d1..2df5d5f 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -110,9 +110,9 @@
 })
 
 /*
- * When dealing with data aborts or instruction traps we may end up with
- * a tagged userland pointer. Clear the tag to get a sane pointer to pass
- * on to access_ok(), for instance.
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
  */
 #define untagged_addr(addr)		sign_extend64(addr, 55)
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1f0cea7..c44a933 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -32,6 +32,7 @@
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/uaccess.h>
+#include <asm/asm-uaccess.h>
 #include <asm/unistd.h>
 
 /*
@@ -428,12 +429,13 @@
 	/*
 	 * Data abort handling
 	 */
-	mrs	x0, far_el1
+	mrs	x3, far_el1
 	enable_dbg
 	// re-enable interrupts if they were enabled in the aborted context
 	tbnz	x23, #7, 1f			// PSR_I_BIT
 	enable_irq
 1:
+	clear_address_tag x0, x3
 	mov	x2, sp				// struct pt_regs
 	bl	do_mem_abort
 
@@ -594,7 +596,7 @@
 	// enable interrupts before calling the main handler
 	enable_dbg_and_irq
 	ct_user_exit
-	bic	x0, x26, #(0xff << 56)
+	clear_address_tag x0, x26
 	mov	x1, x25
 	mov	x2, sp
 	bl	do_mem_abort
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 1b3c747..fb0082a 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -36,6 +36,7 @@
 #include <asm/traps.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
+#include <asm/uaccess.h>
 
 /* Breakpoint currently in use for each BRP. */
 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 852548c..bb24b4e 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -29,6 +29,8 @@
 #include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
+static DEFINE_PER_CPU(bool, is_hotplugging);
+
 /*
  * ARMv8 PMUv3 Performance Events handling code.
  * Common event types (some are defined in asm/perf_event.h).
@@ -982,6 +984,9 @@
 	if (!cpu_pmu)
 		return;
 
+	if (__this_cpu_read(is_hotplugging))
+		return;
+
 	hw_events = this_cpu_ptr(cpu_pmu->hw_events);
 
 	if (!hw_events)
@@ -1031,14 +1036,13 @@
 
 	pmu_idle_nb->cpu_pmu = cpu_pmu;
 	pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;
-	idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
 
 	ret = smp_call_function_any(&cpu_pmu->supported_cpus,
 				    __armv8pmu_probe_pmu,
 				    cpu_pmu, 1);
 
-	if (ret)
-		idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
+	if (!ret)
+		idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
 
 	return ret;
 }
@@ -1140,6 +1144,37 @@
 	{},
 };
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int perf_event_hotplug_coming_up(unsigned int cpu)
+{
+	per_cpu(is_hotplugging, cpu) = false;
+	return 0;
+}
+
+static int perf_event_hotplug_going_down(unsigned int cpu)
+{
+	per_cpu(is_hotplugging, cpu) = true;
+	return 0;
+}
+
+static int perf_event_cpu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_NOTIFY_ONLINE,
+				"PERF_EVENT/CPUHP_AP_NOTIFY_ONLINE",
+				perf_event_hotplug_coming_up,
+				perf_event_hotplug_going_down);
+	if (ret)
+		pr_err("CPU hotplug notifier for perf_event.c could not be registered: %d\n",
+		       ret);
+
+	return ret;
+}
+#else
+static int perf_event_cpu_hp_init(void) { return 0; }
+#endif
+
 /*
  * Non DT systems have their micro/arch events probed at run-time.
  * A fairly complete list of generic events are provided and ones that
@@ -1152,6 +1187,16 @@
 
 static int armv8_pmu_device_probe(struct platform_device *pdev)
 {
+	int ret, cpu;
+
+	for_each_possible_cpu(cpu)
+		per_cpu(is_hotplugging, cpu) = false;
+
+	ret = perf_event_cpu_hp_init();
+
+	if (ret)
+		return ret;
+
 	if (acpi_disabled)
 		return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids,
 					    NULL);
diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c
index 1f0b74a..d87106d 100644
--- a/arch/arm64/kernel/perf_trace_counters.c
+++ b/arch/arm64/kernel/perf_trace_counters.c
@@ -59,7 +59,7 @@
 {
 	u32 cnten_val;
 	int current_pid;
-	u32 cpu = task_thread_info(next)->cpu;
+	u32 cpu = task_cpu(next);
 
 	if (tp_pid_state != 1)
 		return;
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index aaf4bd7..7b670f1 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -449,6 +449,12 @@
 		cpu, arch_scale_cpu_capacity(NULL, cpu));
 }
 
+void update_cpu_power_capacity(int cpu)
+{
+	update_cpu_power(cpu);
+	update_cpu_capacity(cpu);
+}
+
 static void update_siblings_masks(unsigned int cpuid)
 {
 	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -510,8 +516,6 @@
 
 topology_populated:
 	update_siblings_masks(cpuid);
-	update_cpu_power(cpuid);
-	update_cpu_capacity(cpuid);
 }
 
 static void __init reset_cpu_topology(void)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d84c7d0..d8253fb 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -470,7 +470,7 @@
 }
 
 #define __user_cache_maint(insn, address, res)			\
-	if (untagged_addr(address) >= user_addr_max()) {	\
+	if (address >= user_addr_max()) {			\
 		res = -EFAULT;					\
 	} else {						\
 		uaccess_ttbr0_enable();				\
@@ -496,7 +496,7 @@
 	int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
 	int ret = 0;
 
-	address = (rt == 31) ? 0 : regs->regs[rt];
+	address = (rt == 31) ? 0 : untagged_addr(regs->regs[rt]);
 
 	switch (crm) {
 	case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:	/* DC CVAU, gets promoted */
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 6b29d3d..4bbff90 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -102,10 +102,13 @@
 	tlbi	alle2
 	dsb	sy
 
-	mrs	x4, sctlr_el2
-	and	x4, x4, #SCTLR_ELx_EE	// preserve endianness of EL2
-	ldr	x5, =SCTLR_ELx_FLAGS
-	orr	x4, x4, x5
+	/*
+	 * Preserve all the RES1 bits while setting the default flags,
+	 * as well as the EE bit on BE. Drop the A flag since the compiler
+	 * is allowed to generate unaligned accesses.
+	 */
+	ldr	x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+CPU_BE(	orr	x4, x4, #SCTLR_ELx_EE)
 	msr	sctlr_el2, x4
 	isb
 
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 75088c00..acbe515 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -157,11 +157,6 @@
 				  dma_addr_t *dma_handle, gfp_t flags,
 				  unsigned long attrs)
 {
-	if (dev == NULL) {
-		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
-		return NULL;
-	}
-
 	if (IS_ENABLED(CONFIG_ZONE_DMA) &&
 	    dev->coherent_dma_mask <= DMA_BIT_MASK(32))
 		flags |= GFP_DMA;
@@ -201,10 +196,6 @@
 	phys_addr_t paddr = dma_to_phys(dev, dma_handle);
 
 	size = PAGE_ALIGN(size);
-	if (dev == NULL) {
-		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
-		return;
-	}
 
 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
 	    (attrs & DMA_ATTR_STRONGLY_ORDERED))
@@ -1153,16 +1144,6 @@
 		set_bit(PG_dcache_clean, &page->flags);
 }
 
-static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
-{
-	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
-		return -EIO;
-
-	*dev->dma_mask = dma_mask;
-
-	return 0;
-}
-
 /* IOMMU */
 
 static void __dma_clear_buffer(struct page *page, size_t size,
@@ -1810,10 +1791,8 @@
 						mapping->domain, iova));
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
-	bool iova_coherent = iommu_is_iova_coherent(mapping->domain,
-							handle);
 
-	if (!(iova_coherent ||
+	if (!(is_dma_coherent(dev, attrs) ||
 	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
 		__dma_page_dev_to_cpu(page, offset, size, dir);
 
@@ -1913,7 +1892,6 @@
 	.map_resource		= arm_iommu_dma_map_resource,
 	.unmap_resource		= arm_iommu_dma_unmap_resource,
 
-	.set_dma_mask		= arm_dma_set_mask,
 	.mapping_error		= arm_iommu_mapping_error,
 };
 
@@ -2006,6 +1984,7 @@
 	int err;
 	int s1_bypass = 0, is_fast = 0;
 	struct iommu_group *group;
+	dma_addr_t iova_end;
 
 	group = dev->iommu_group;
 	if (!group) {
@@ -2018,6 +1997,13 @@
 		return -EINVAL;
 	}
 
+	iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1;
+	if (iova_end > dma_get_mask(dev)) {
+		dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n",
+			dma_get_mask(dev), &mapping->base, &iova_end);
+		return -EINVAL;
+	}
+
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
 	if (is_fast)
 		return fast_smmu_attach_device(dev, mapping);
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 9c4b57a..d8199e1 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -252,8 +252,9 @@
 	 */
 	off = offsetof(struct bpf_array, ptrs);
 	emit_a64_mov_i64(tmp, off, ctx);
-	emit(A64_LDR64(tmp, r2, tmp), ctx);
-	emit(A64_LDR64(prg, tmp, r3), ctx);
+	emit(A64_ADD(1, tmp, r2, tmp), ctx);
+	emit(A64_LSL(1, prg, r3, 3), ctx);
+	emit(A64_LDR64(prg, tmp, prg), ctx);
 	emit(A64_CBZ(1, prg, jmp_offset), ctx);
 
 	/* goto *(prog->bpf_func + prologue_size); */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 1fd147f..5f10f9b 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index afbc98f0..ed960d3 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -90,5 +90,7 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 0018fad..9790d13 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -99,4 +99,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5fe42fc..ad25676 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2027240a..2f106d0 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -108,4 +108,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 5129f23..69f9618 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 9c935d7..b96a193 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -89,4 +89,6 @@
 
 #define SO_CNX_ADVICE		0x402E
 
+#define SO_COOKIE		0x4032
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b..3297715 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
+static inline int early_cpu_to_node(int cpu)
+{
+	int nid;
+
+	nid = numa_cpu_lookup_table[cpu];
+
+	/*
+	 * Fall back to node 0 if nid is unset (it should be, except bugs).
+	 * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+	 */
+	return (nid < 0) ? 0 : nid;
+}
 #else
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 1672e33..e78550f 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif	/* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c716473..b249c2f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1659,6 +1659,7 @@
 #ifdef CONFIG_VSX
 	current->thread.used_vsr = 0;
 #endif
+	current->thread.load_fp = 0;
 	memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
 	current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
@@ -1667,6 +1668,7 @@
 	current->thread.vr_save_area = NULL;
 	current->thread.vrsave = 0;
 	current->thread.used_vr = 0;
+	current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1678,6 +1680,7 @@
 	current->thread.tm_tfhar = 0;
 	current->thread.tm_texasr = 0;
 	current->thread.tm_tfiar = 0;
+	current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index a12be60..ada71be 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -595,7 +595,7 @@
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-	return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+	return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
 				    __pa(MAX_DMA_ADDRESS));
 }
 
@@ -606,7 +606,7 @@
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-	if (cpu_to_node(from) == cpu_to_node(to))
+	if (early_cpu_to_node(from) == early_cpu_to_node(to))
 		return LOCAL_DISTANCE;
 	else
 		return REMOTE_DISTANCE;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e84d8fb..378c37a 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -197,7 +197,9 @@
 	    (REGION_ID(ea) != USER_REGION_ID)) {
 
 		spin_unlock(&spu->register_lock);
-		ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+		ret = hash_page(ea,
+				_PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+				0x300, dsisr);
 		spin_lock(&spu->register_lock);
 
 		if (!ret) {
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 76ec104..c0a0947 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@
 	for (i = 0; i < num_lmbs; i++) {
 		lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
 		lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+		lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
 		lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
 	}
 
@@ -147,6 +148,7 @@
 	for (i = 0; i < num_lmbs; i++) {
 		lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
 		lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+		lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
 		lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
 	}
 
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b4..6afddae 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-	struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+	struct u8_gpio_chip *u8_gc =
+		container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
 	u8_gc->data = in_8(mm_gc->regs);
 }
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 41b51c2..04fe908 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -96,4 +96,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 165ecdd..b27e48e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -187,9 +187,9 @@
 	int "Maximum number of CPUs"
 	depends on SMP
 	range 2 32 if SPARC32
-	range 2 1024 if SPARC64
+	range 2 4096 if SPARC64
 	default 32 if SPARC32
-	default 64 if SPARC64
+	default 4096 if SPARC64
 
 source kernel/Kconfig.hz
 
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0db..83b36a5 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
 #define CTX_NR_MASK		TAG_CONTEXT_BITS
 #define CTX_HW_MASK		(CTX_NR_MASK | CTX_PGSZ_MASK)
 
-#define CTX_FIRST_VERSION	((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION	BIT(CTX_VERSION_SHIFT)
 #define CTX_VALID(__ctx)	\
 	 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
 #define CTX_HWBITS(__ctx)	((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be67..349dd23 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,13 +17,8 @@
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
@@ -74,8 +69,9 @@
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long ctx_valid, flags;
-	int cpu;
+	int cpu = smp_processor_id();
 
+	per_cpu(per_cpu_secondary_mm, cpu) = mm;
 	if (unlikely(mm == &init_mm))
 		return;
 
@@ -121,7 +117,6 @@
 	 * for the first time, we must flush that context out of the
 	 * local TLB.
 	 */
-	cpu = smp_processor_id();
 	if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
 		cpumask_set_cpu(cpu, mm_cpumask(mm));
 		__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -131,26 +126,7 @@
 }
 
 #define deactivate_mm(tsk,mm)	do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
-	unsigned long flags;
-	int cpu;
-
-	spin_lock_irqsave(&mm->context.lock, flags);
-	if (!CTX_VALID(mm->context))
-		get_new_mmu_context(mm);
-	cpu = smp_processor_id();
-	if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
-		cpumask_set_cpu(cpu, mm_cpumask(mm));
-
-	load_secondary_context(mm);
-	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-	tsb_context_switch(mm);
-	spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ce6f569..cf19072 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 /*
  * In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 26693703..522b43d 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
 #define PIL_SMP_CALL_FUNC	1
 #define PIL_SMP_RECEIVE_SIGNAL	2
 #define PIL_SMP_CAPTURE		3
-#define PIL_SMP_CTX_NEW_VERSION	4
 #define PIL_DEVICE_IRQ		5
 #define PIL_SMP_CALL_FUNC_SNGL	6
 #define PIL_DEFERRED_PCR_WORK	7
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 29d64b1..be0cc1b 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@
  */
 extern unsigned char boot_cpu_id;
 
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 extern int serial_console;
 static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6c..9dca7a8 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@
 	int			compat_len;
 
 	u64			dev_no;
+	u64			id;
 
 	unsigned long		channel_id;
 
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 31aede3..de15f0a 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -86,6 +86,8 @@
 
 #define SO_CNX_ADVICE		0x0037
 
+#define SO_COOKIE		0x003b
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT	0x5002
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 6bcff69..cec54dc 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -130,18 +130,17 @@
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return parent + 8UL;
 
+	trace.func = self_addr;
+	trace.depth = current->curr_ret_stack + 1;
+
+	/* Only trace if the calling function expects to */
+	if (!ftrace_graph_entry(&trace))
+		return parent + 8UL;
+
 	if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
 				     frame_pointer, NULL) == -EBUSY)
 		return parent + 8UL;
 
-	trace.func = self_addr;
-
-	/* Only trace if the calling function expects to */
-	if (!ftrace_graph_entry(&trace)) {
-		current->curr_ret_stack--;
-		return parent + 8UL;
-	}
-
 	return return_hooker;
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 34a7930..e1b1ce6 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@
 {
 #ifdef CONFIG_SMP
 	unsigned long page;
+	void *mondo, *p;
 
-	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+	BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+	/* Make sure mondo block is 64byte aligned */
+	p = kzalloc(127, GFP_KERNEL);
+	if (!p) {
+		prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+		prom_halt();
+	}
+	mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+	tb->cpu_mondo_block_pa = __pa(mondo);
 
 	page = get_zeroed_page(GFP_KERNEL);
 	if (!page) {
-		prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+		prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
 		prom_halt();
 	}
 
-	tb->cpu_mondo_block_pa = __pa(page);
-	tb->cpu_list_pa = __pa(page + 64);
+	tb->cpu_list_pa = __pa(page);
 #endif
 }
 
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index c9804551..6ae1e77 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@
 /* smp_64.c */
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
 
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 8182f7c..d5807d2 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -963,37 +963,6 @@
 	preempt_enable();
 }
 
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
-	struct mm_struct *mm;
-	unsigned long flags;
-
-	clear_softint(1 << irq);
-
-	/* See if we need to allocate a new TLB context because
-	 * the version of the one we are using is now out of date.
-	 */
-	mm = current->active_mm;
-	if (unlikely(!mm || (mm == &init_mm)))
-		return;
-
-	spin_lock_irqsave(&mm->context.lock, flags);
-
-	if (unlikely(!CTX_VALID(mm->context)))
-		get_new_mmu_context(mm);
-
-	spin_unlock_irqrestore(&mm->context.lock, flags);
-
-	load_secondary_context(mm);
-	__flush_tlb_mm(CTX_HWBITS(mm->context),
-		       SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
-	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
 #ifdef CONFIG_KGDB
 void kgdb_roundup_cpus(unsigned long flags)
 {
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index d568c82..395ec18 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -470,13 +470,16 @@
 	.type	copy_tsb,#function
 copy_tsb:		/* %o0=old_tsb_base, %o1=old_tsb_size
 			 * %o2=new_tsb_base, %o3=new_tsb_size
+			 * %o4=page_size_shift
 			 */
 	sethi		%uhi(TSB_PASS_BITS), %g7
 	srlx		%o3, 4, %o3
-	add		%o0, %o1, %g1	/* end of old tsb */
+	add		%o0, %o1, %o1	/* end of old tsb */
 	sllx		%g7, 32, %g7
 	sub		%o3, 1, %o3	/* %o3 == new tsb hash mask */
 
+	mov		%o4, %g1	/* page_size_shift */
+
 661:	prefetcha	[%o0] ASI_N, #one_read
 	.section	.tsb_phys_patch, "ax"
 	.word		661b
@@ -501,9 +504,9 @@
 	/* This can definitely be computed faster... */
 	srlx		%o0, 4, %o5	/* Build index */
 	and		%o5, 511, %o5	/* Mask index */
-	sllx		%o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+	sllx		%o5, %g1, %o5	/* Put into vaddr position */
 	or		%o4, %o5, %o4	/* Full VADDR. */
-	srlx		%o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+	srlx		%o4, %g1, %o4	/* Shift down to create index */
 	and		%o4, %o3, %o4	/* Mask with new_tsb_nents-1 */
 	sllx		%o4, 4, %o4	/* Shift back up into tsb ent offset */
 	TSB_STORE(%o2 + %o4, %g2)	/* Store TAG */
@@ -511,7 +514,7 @@
 	TSB_STORE(%o2 + %o4, %g3)	/* Store TTE */
 
 80:	add		%o0, 16, %o0
-	cmp		%o0, %g1
+	cmp		%o0, %o1
 	bne,pt		%xcc, 90b
 	 nop
 
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index c6dfdaa..170ead6 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@
 tl0_irq1:	TRAP_IRQ(smp_call_function_client, 1)
 tl0_irq2:	TRAP_IRQ(smp_receive_signal_client, 2)
 tl0_irq3:	TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4:	TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4:       BTRAP(0x44)
 #else
 tl0_irq1:	BTRAP(0x41)
 tl0_irq2:	BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index f6bb857..075d389 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -302,13 +302,16 @@
 	if (!id) {
 		dev_set_name(&vdev->dev, "%s", bus_id_name);
 		vdev->dev_no = ~(u64)0;
+		vdev->id = ~(u64)0;
 	} else if (!cfg_handle) {
 		dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
 		vdev->dev_no = *id;
+		vdev->id = ~(u64)0;
 	} else {
 		dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
 			     *cfg_handle, *id);
 		vdev->dev_no = *cfg_handle;
+		vdev->id = *id;
 	}
 
 	vdev->dev.parent = parent;
@@ -351,27 +354,84 @@
 	(void) vio_create_one(hp, node, &root_vdev->dev);
 }
 
+struct vio_md_node_query {
+	const char *type;
+	u64 dev_no;
+	u64 id;
+};
+
 static int vio_md_node_match(struct device *dev, void *arg)
 {
+	struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
 	struct vio_dev *vdev = to_vio_dev(dev);
 
-	if (vdev->mp == (u64) arg)
-		return 1;
+	if (vdev->dev_no != query->dev_no)
+		return 0;
+	if (vdev->id != query->id)
+		return 0;
+	if (strcmp(vdev->type, query->type))
+		return 0;
 
-	return 0;
+	return 1;
 }
 
 static void vio_remove(struct mdesc_handle *hp, u64 node)
 {
+	const char *type;
+	const u64 *id, *cfg_handle;
+	u64 a;
+	struct vio_md_node_query query;
 	struct device *dev;
 
-	dev = device_find_child(&root_vdev->dev, (void *) node,
+	type = mdesc_get_property(hp, node, "device-type", NULL);
+	if (!type) {
+		type = mdesc_get_property(hp, node, "name", NULL);
+		if (!type)
+			type = mdesc_node_name(hp, node);
+	}
+
+	query.type = type;
+
+	id = mdesc_get_property(hp, node, "id", NULL);
+	cfg_handle = NULL;
+	mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+		u64 target;
+
+		target = mdesc_arc_target(hp, a);
+		cfg_handle = mdesc_get_property(hp, target,
+						"cfg-handle", NULL);
+		if (cfg_handle)
+			break;
+	}
+
+	if (!id) {
+		query.dev_no = ~(u64)0;
+		query.id = ~(u64)0;
+	} else if (!cfg_handle) {
+		query.dev_no = *id;
+		query.id = ~(u64)0;
+	} else {
+		query.dev_no = *cfg_handle;
+		query.id = *id;
+	}
+
+	dev = device_find_child(&root_vdev->dev, &query,
 				vio_md_node_match);
 	if (dev) {
 		printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
 
 		device_unregister(dev);
 		put_device(dev);
+	} else {
+		if (!id)
+			printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+			       type);
+		else if (!cfg_handle)
+			printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+			       type, *id);
+		else
+			printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+			       type, *cfg_handle, *id);
 	}
 }
 
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 69912d2..07c03e7 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -15,6 +15,7 @@
 lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644
index 0000000..d6b6c97
--- /dev/null
+++ b/arch/sparc/lib/multi3.S
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+	.text
+	.align	4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+	mov	%o1, %g1
+	srl	%o3, 0, %g4
+	mulx	%g4, %g1, %o1
+	srlx	%g1, 0x20, %g3
+	mulx	%g3, %g4, %g5
+	sllx	%g5, 0x20, %o5
+	srl	%g1, 0, %g4
+	sub	%o1, %o5, %o5
+	srlx	%o5, 0x20, %o5
+	addcc	%g5, %o5, %g5
+	srlx	%o3, 0x20, %o5
+	mulx	%g4, %o5, %g4
+	mulx	%g3, %o5, %o5
+	sethi	%hi(0x80000000), %g3
+	addcc	%g5, %g4, %g5
+	srlx	%g5, 0x20, %g5
+	add	%g3, %g3, %g3
+	movcc	%xcc, %g0, %g3
+	addcc	%o5, %g5, %o5
+	sllx	%g4, 0x20, %g4
+	add	%o1, %g4, %o1
+	add	%o5, %g3, %g2
+	mulx	%g1, %o2, %g1
+	add	%g1, %g2, %g1
+	mulx	%o0, %o3, %o0
+	retl
+	 add	%g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb82871..3b7092d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -301,7 +301,7 @@
 
 
 	/* Saves us work later. */
-	memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 
 	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
 	i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index bd7e2aa..57154c6 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -658,10 +658,58 @@
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR	(1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+	unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+	unsigned long new_ver, new_ctx, old_ctx;
+	struct mm_struct *mm;
+	int cpu;
+
+	bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+	/* Reserve kernel context */
+	set_bit(0, mmu_context_bmap);
+
+	new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+	if (unlikely(new_ver == 0))
+		new_ver = CTX_FIRST_VERSION;
+	tlb_context_cache = new_ver;
+
+	/*
+	 * Make sure that any new mm that are added into per_cpu_secondary_mm,
+	 * are going to go through get_new_mmu_context() path.
+	 */
+	mb();
+
+	/*
+	 * Updated versions to current on those CPUs that had valid secondary
+	 * contexts
+	 */
+	for_each_online_cpu(cpu) {
+		/*
+		 * If a new mm is stored after we took this mm from the array,
+		 * it will go into get_new_mmu_context() path, because we
+		 * already bumped the version in tlb_context_cache.
+		 */
+		mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+		if (unlikely(!mm || mm == &init_mm))
+			continue;
+
+		old_ctx = mm->context.sparc64_ctx_val;
+		if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+			new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+			set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+			mm->context.sparc64_ctx_val = new_ctx;
+		}
+	}
+}
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -677,48 +725,30 @@
 {
 	unsigned long ctx, new_ctx;
 	unsigned long orig_pgsz_bits;
-	int new_version;
 
 	spin_lock(&ctx_alloc_lock);
+retry:
+	/* wrap might have happened, test again if our context became valid */
+	if (unlikely(CTX_VALID(mm->context)))
+		goto out;
 	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
-	new_version = 0;
 	if (new_ctx >= (1 << CTX_NR_BITS)) {
 		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
 		if (new_ctx >= ctx) {
-			int i;
-			new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
-				CTX_FIRST_VERSION;
-			if (new_ctx == 1)
-				new_ctx = CTX_FIRST_VERSION;
-
-			/* Don't call memset, for 16 entries that's just
-			 * plain silly...
-			 */
-			mmu_context_bmap[0] = 3;
-			mmu_context_bmap[1] = 0;
-			mmu_context_bmap[2] = 0;
-			mmu_context_bmap[3] = 0;
-			for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
-				mmu_context_bmap[i + 0] = 0;
-				mmu_context_bmap[i + 1] = 0;
-				mmu_context_bmap[i + 2] = 0;
-				mmu_context_bmap[i + 3] = 0;
-			}
-			new_version = 1;
-			goto out;
+			mmu_context_wrap();
+			goto retry;
 		}
 	}
+	if (mm->context.sparc64_ctx_val)
+		cpumask_clear(mm_cpumask(mm));
 	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
 	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
 	tlb_context_cache = new_ctx;
 	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
 	spin_unlock(&ctx_alloc_lock);
-
-	if (unlikely(new_version))
-		smp_new_mmu_context_version();
 }
 
 static int numa_enabled = 1;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index e20fbba..84cd593 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -451,7 +451,8 @@
 		extern void copy_tsb(unsigned long old_tsb_base,
 				     unsigned long old_tsb_size,
 				     unsigned long new_tsb_base,
-				     unsigned long new_tsb_size);
+				     unsigned long new_tsb_size,
+				     unsigned long page_size_shift);
 		unsigned long old_tsb_base = (unsigned long) old_tsb;
 		unsigned long new_tsb_base = (unsigned long) new_tsb;
 
@@ -459,7 +460,9 @@
 			old_tsb_base = __pa(old_tsb_base);
 			new_tsb_base = __pa(new_tsb_base);
 		}
-		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+			tsb_index == MM_TSB_BASE ?
+			PAGE_SHIFT : REAL_HPAGE_SHIFT);
 	}
 
 	mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6c..fcf4d27 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@
 	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 	retry
 
-	.globl		xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
-	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
-	retry
-
 #ifdef CONFIG_KGDB
 	.globl		xcall_kgdb_capture
 xcall_kgdb_capture:
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 34d9e15..4669b3a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -94,7 +94,7 @@
 quiet_cmd_check_data_rel = DATAREL $@
 define cmd_check_data_rel
 	for obj in $(filter %.o,$^); do \
-		readelf -S $$obj | grep -qF .rel.local && { \
+		${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
 			echo "error: $$obj has data relocations!" >&2; \
 			exit 1; \
 		} || true; \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 9bd7ff5..70c9cc3 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -257,6 +257,7 @@
 #endif
 
 int mce_available(struct cpuinfo_x86 *c);
+bool mce_is_memory_error(struct mce *m);
 
 DECLARE_PER_CPU(unsigned, mce_exception_count);
 DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 22cda29..8ca5f8a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -598,16 +598,14 @@
 	}
 }
 
-static bool memory_error(struct mce *m)
+bool mce_is_memory_error(struct mce *m)
 {
-	struct cpuinfo_x86 *c = &boot_cpu_data;
-
-	if (c->x86_vendor == X86_VENDOR_AMD) {
+	if (m->cpuvendor == X86_VENDOR_AMD) {
 		/* ErrCodeExt[20:16] */
 		u8 xec = (m->status >> 16) & 0x1f;
 
 		return (xec == 0x0 || xec == 0x8);
-	} else if (c->x86_vendor == X86_VENDOR_INTEL) {
+	} else if (m->cpuvendor == X86_VENDOR_INTEL) {
 		/*
 		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 		 *
@@ -628,6 +626,7 @@
 
 	return false;
 }
+EXPORT_SYMBOL_GPL(mce_is_memory_error);
 
 DEFINE_PER_CPU(unsigned, mce_poll_count);
 
@@ -691,7 +690,7 @@
 
 		severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
 
-		if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
+		if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
 			if (m.status & MCI_STATUS_ADDRV)
 				m.severity = severity;
 
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index edbbfc8..9cf697c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -162,8 +162,8 @@
 			 */
 			rcu_irq_exit();
 			native_safe_halt();
-			rcu_irq_enter();
 			local_irq_disable();
+			rcu_irq_enter();
 		}
 	}
 	if (!n.halted)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 967e459..649d8f2 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -765,18 +765,20 @@
 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
 {
 	struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
-	int j, nent = vcpu->arch.cpuid_nent;
+	struct kvm_cpuid_entry2 *ej;
+	int j = i;
+	int nent = vcpu->arch.cpuid_nent;
 
 	e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
 	/* when no next entry is found, the current entry[i] is reselected */
-	for (j = i + 1; ; j = (j + 1) % nent) {
-		struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
-		if (ej->function == e->function) {
-			ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-			return j;
-		}
-	}
-	return 0; /* silence gcc, even though control never reaches here */
+	do {
+		j = (j + 1) % nent;
+		ej = &vcpu->arch.cpuid_entries[j];
+	} while (ej->function != e->function);
+
+	ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+	return j;
 }
 
 /* find an entry with matching function, matching index (if needed), and that
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d9c7e98..5f24127 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3489,12 +3489,15 @@
 	return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 {
 	if (unlikely(!lapic_in_kernel(vcpu) ||
 		     kvm_event_needs_reinjection(vcpu)))
 		return false;
 
+	if (is_guest_mode(vcpu))
+		return false;
+
 	return kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
@@ -3510,7 +3513,7 @@
 	if (!async)
 		return false; /* *pfn has correct page already */
 
-	if (!prefault && can_do_async_pf(vcpu)) {
+	if (!prefault && kvm_can_do_async_pf(vcpu)) {
 		trace_kvm_try_async_get_page(gva, gfn);
 		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
 			trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index ddc56e9..c92834c5 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -75,6 +75,7 @@
 int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 81bba3c..62cde4f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8444,8 +8444,7 @@
 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
 		return true;
 	else
-		return !kvm_event_needs_reinjection(vcpu) &&
-			kvm_x86_ops->interrupt_allowed(vcpu);
+		return kvm_can_do_async_pf(vcpu);
 }
 
 void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index cdfe8c6..393a0c0 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -358,6 +358,9 @@
 		free_bootmem_late(start, size);
 	}
 
+	if (!num_entries)
+		return;
+
 	new_size = efi.memmap.desc_size * num_entries;
 	new_phys = efi_memmap_alloc(num_entries);
 	if (!new_phys) {
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 81435d9..fc7ca28 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -101,4 +101,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif	/* _XTENSA_SOCKET_H */
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 19b1d9c..df02faf 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -35,37 +35,19 @@
 {
 	unsigned int *map = set->mq_map;
 	unsigned int nr_queues = set->nr_hw_queues;
-	const struct cpumask *online_mask = cpu_online_mask;
-	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
+	unsigned int i, queue, first_sibling;
 	cpumask_var_t cpus;
 
-	if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
-		return -ENOMEM;
-
-	cpumask_clear(cpus);
-	nr_cpus = nr_uniq_cpus = 0;
-	for_each_cpu(i, online_mask) {
-		nr_cpus++;
-		first_sibling = get_first_sibling(i);
-		if (!cpumask_test_cpu(first_sibling, cpus))
-			nr_uniq_cpus++;
-		cpumask_set_cpu(i, cpus);
-	}
-
 	queue = 0;
 	for_each_possible_cpu(i) {
-		if (!cpumask_test_cpu(i, online_mask)) {
-			map[i] = 0;
-			continue;
-		}
-
 		/*
 		 * Easy case - we have equal or more hardware queues. Or
 		 * there are no thread siblings to take into account. Do
 		 * 1:1 if enough, or sequential mapping if less.
 		 */
-		if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) {
-			map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue);
+		if (nr_queues >= nr_cpu_ids) {
+			map[i] = cpu_to_queue_index(nr_cpu_ids, nr_queues,
+						queue);
 			queue++;
 			continue;
 		}
@@ -77,7 +59,7 @@
 		 */
 		first_sibling = get_first_sibling(i);
 		if (first_sibling == i) {
-			map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues,
+			map[i] = cpu_to_queue_index(nr_cpu_ids, nr_queues,
 							queue);
 			queue++;
 		} else
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7b597ec..a7db634 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1713,10 +1713,6 @@
 		INIT_LIST_HEAD(&__ctx->rq_list);
 		__ctx->queue = q;
 
-		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpu_online(i))
-			continue;
-
 		hctx = blk_mq_map_queue(q, i);
 
 		/*
@@ -1750,14 +1746,11 @@
 	 * Map software to hardware queues
 	 */
 	for_each_possible_cpu(i) {
-		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpumask_test_cpu(i, online_mask))
-			continue;
-
 		ctx = per_cpu_ptr(q->queue_ctx, i);
 		hctx = blk_mq_map_queue(q, i);
 
-		cpumask_set_cpu(i, hctx->cpumask);
+		if (cpumask_test_cpu(i, online_mask))
+			cpumask_set_cpu(i, hctx->cpumask);
 		ctx->index_hw = hctx->nr_ctx;
 		hctx->ctxs[hctx->nr_ctx++] = ctx;
 	}
@@ -1793,9 +1786,16 @@
 
 		/*
 		 * Initialize batch roundrobin counts
+		 * Set next_cpu for only those hctxs that have an online CPU
+		 * in their cpumask field. For hctxs that belong to few online
+		 * and few offline CPUs, this will always provide one CPU from
+		 * online ones. For hctxs belonging to all offline CPUs, their
+		 * cpumask will be updated in reinit_notify.
 		 */
-		hctx->next_cpu = cpumask_first(hctx->cpumask);
-		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+		if (cpumask_first(hctx->cpumask) < nr_cpu_ids) {
+			hctx->next_cpu = cpumask_first(hctx->cpumask);
+			hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+		}
 	}
 }
 
@@ -2067,50 +2067,20 @@
 	blk_mq_sysfs_register(q);
 }
 
-/*
- * New online cpumask which is going to be set in this hotplug event.
- * Declare this cpumasks as global as cpu-hotplug operation is invoked
- * one-by-one and dynamically allocating this could result in a failure.
- */
-static struct cpumask cpuhp_online_new;
-
-static void blk_mq_queue_reinit_work(void)
-{
-	struct request_queue *q;
-
-	mutex_lock(&all_q_mutex);
-	/*
-	 * We need to freeze and reinit all existing queues.  Freezing
-	 * involves synchronous wait for an RCU grace period and doing it
-	 * one by one may take a long time.  Start freezing all queues in
-	 * one swoop and then wait for the completions so that freezing can
-	 * take place in parallel.
-	 */
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_freeze_queue_start(q);
-	list_for_each_entry(q, &all_q_list, all_q_node) {
-		blk_mq_freeze_queue_wait(q);
-
-		/*
-		 * timeout handler can't touch hw queue during the
-		 * reinitialization
-		 */
-		del_timer_sync(&q->timeout);
-	}
-
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_queue_reinit(q, &cpuhp_online_new);
-
-	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_unfreeze_queue(q);
-
-	mutex_unlock(&all_q_mutex);
-}
-
 static int blk_mq_queue_reinit_dead(unsigned int cpu)
 {
-	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
-	blk_mq_queue_reinit_work();
+	struct request_queue *q;
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	mutex_lock(&all_q_mutex);
+	list_for_each_entry(q, &all_q_list, all_q_node) {
+		queue_for_each_hw_ctx(q, hctx, i) {
+			cpumask_clear_cpu(cpu, hctx->cpumask);
+		}
+	}
+	mutex_unlock(&all_q_mutex);
+
 	return 0;
 }
 
@@ -2132,9 +2102,17 @@
  */
 static int blk_mq_queue_reinit_prepare(unsigned int cpu)
 {
-	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
-	cpumask_set_cpu(cpu, &cpuhp_online_new);
-	blk_mq_queue_reinit_work();
+	struct request_queue *q;
+	struct blk_mq_hw_ctx *hctx;
+	int i;
+
+	mutex_lock(&all_q_mutex);
+	list_for_each_entry(q, &all_q_list, all_q_node) {
+		queue_for_each_hw_ctx(q, hctx, i) {
+			cpumask_set_cpu(cpu, hctx->cpumask);
+		}
+	}
+	mutex_unlock(&all_q_mutex);
 	return 0;
 }
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3ab6807..c7c3d4e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -36,9 +36,13 @@
 static const int cfq_hist_divisor = 4;
 
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
 #define CFQ_IDLE_DELAY		(NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 /*
  * below this threshold, we consider thinktime immediate
@@ -1370,6 +1374,14 @@
 	cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+	if (!iops_mode(cfqd))
+		return CFQ_SLICE_MODE_GROUP_DELAY;
+	else
+		return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
@@ -1389,7 +1401,8 @@
 	n = rb_last(&st->rb);
 	if (n) {
 		__cfqg = rb_entry_cfqg(n);
-		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+		cfqg->vdisktime = __cfqg->vdisktime +
+			cfq_get_cfqg_vdisktime_delay(cfqd);
 	} else
 		cfqg->vdisktime = st->min_vdisktime;
 	cfq_group_service_tree_add(st, cfqg);
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index fd76b5f..4955eb6 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -140,7 +140,7 @@
 	 * signature and returns that to us.
 	 */
 	ret = crypto_akcipher_verify(req);
-	if (ret == -EINPROGRESS) {
+	if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
 		wait_for_completion(&compl.completion);
 		ret = compl.err;
 	}
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 053035b..123d211 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1768,9 +1768,8 @@
 			break;
 		case -EINPROGRESS:
 		case -EBUSY:
-			ret = wait_for_completion_interruptible(
-				&drbg->ctr_completion);
-			if (!ret && !drbg->ctr_async_err) {
+			wait_for_completion(&drbg->ctr_completion);
+			if (!drbg->ctr_async_err) {
 				reinit_completion(&drbg->ctr_completion);
 				break;
 			}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index f624ac9..dd33fbd 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@
 
 	err = crypto_skcipher_encrypt(&data->req);
 	if (err == -EINPROGRESS || err == -EBUSY) {
-		err = wait_for_completion_interruptible(
-			&data->result.completion);
-		if (!err)
-			err = data->result.err;
+		wait_for_completion(&data->result.completion);
+		err = data->result.err;
 	}
 
 	if (err)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018..93110d7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -221,6 +221,44 @@
 	return 0;
 }
 
+static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
+				     const u8 *key, unsigned int keylen)
+{
+	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+	u8 *buffer, *alignbuffer;
+	unsigned long absize;
+	int ret;
+
+	absize = keylen + alignmask;
+	buffer = kmalloc(absize, GFP_ATOMIC);
+	if (!buffer)
+		return -ENOMEM;
+
+	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+	memcpy(alignbuffer, key, keylen);
+	ret = cipher->setkey(tfm, alignbuffer, keylen);
+	kzfree(buffer);
+	return ret;
+}
+
+static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+
+	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if ((unsigned long)key & alignmask)
+		return skcipher_setkey_unaligned(tfm, key, keylen);
+
+	return cipher->setkey(tfm, key, keylen);
+}
+
 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -241,7 +279,7 @@
 	    tfm->__crt_alg->cra_type == &crypto_givcipher_type)
 		return crypto_init_skcipher_ops_ablkcipher(tfm);
 
-	skcipher->setkey = alg->setkey;
+	skcipher->setkey = skcipher_setkey;
 	skcipher->encrypt = alg->encrypt;
 	skcipher->decrypt = alg->decrypt;
 	skcipher->ivsize = alg->ivsize;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6d5a8c1..e19f530 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -113,7 +113,7 @@
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
 static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
 
 static unsigned long lid_report_interval __read_mostly = 500;
 module_param(lid_report_interval, ulong, 0644);
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e5ce81c..e25787a 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -26,7 +26,7 @@
 	struct nfit_spa *nfit_spa;
 
 	/* We only care about memory errors */
-	if (!(mce->status & MCACOD))
+	if (!mce_is_memory_error(mce))
 		return NOTIFY_DONE;
 
 	/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 74f4c66..c940382 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1362,6 +1362,40 @@
 {}
 #endif
 
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+				    struct pci_dev *pdev)
+{
+	static const struct dmi_system_id sysids[] = {
+		{
+			.ident = "Acer Switch Alpha 12",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+			},
+		},
+		{ }
+	};
+
+	if (dmi_check_system(sysids)) {
+		dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+		if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+			hpriv->port_map = 0x7;
+			hpriv->cap = 0xC734FF02;
+		}
+	}
+}
+
 #ifdef CONFIG_ARM64
 /*
  * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1597,6 +1631,10 @@
 			 "online status unreliable, applying workaround\n");
 	}
 
+
+	/* Acer SA5-271 workaround modifies private_data */
+	acer_sa5_271_workaround(hpriv, pdev);
+
 	/* CAP.NP sometimes indicate the index of the last enabled
 	 * port, at other times, that of the last possible port, so
 	 * determining the maximum port number requires looking at
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4609244..624f069 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -182,8 +182,8 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static ssize_t show_cpu_isolated(struct device *dev,
-				struct device_attribute *attr, char *buf)
+static ssize_t isolate_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
 {
 	struct cpu *cpu = container_of(dev, struct cpu, dev);
 	ssize_t rc;
@@ -195,31 +195,7 @@
 	return rc;
 }
 
-static ssize_t __ref store_cpu_isolated(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t count)
-{
-	struct cpu *cpu = container_of(dev, struct cpu, dev);
-	int err;
-	int cpuid = cpu->dev.id;
-	unsigned int isolated;
-
-	err = kstrtouint(strstrip((char *)buf), 0, &isolated);
-	if (err)
-		return err;
-
-	if (isolated > 1)
-		return -EINVAL;
-
-	if (isolated)
-		sched_isolate_cpu(cpuid);
-	else
-		sched_unisolate_cpu(cpuid);
-
-	return count;
-}
-
-static DEVICE_ATTR(isolate, 0644, show_cpu_isolated, store_cpu_isolated);
+static DEVICE_ATTR_RO(isolate);
 
 static struct attribute *cpu_isolated_attrs[] = {
 	&dev_attr_isolate.attr,
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 8ea7c31..9102df7 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1381,24 +1381,14 @@
 			goto bail;
 	}
 
-	PERF(fl->profile, fl->perf.invargs,
-	if (!fl->sctx->smmu.coherent) {
+	if (!fl->sctx->smmu.coherent)
 		inv_args_pre(ctx);
-		if (mode == FASTRPC_MODE_SERIAL)
-			inv_args(ctx);
-	}
-	PERF_END);
-
 	PERF(fl->profile, fl->perf.link,
 	VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
 	PERF_END);
 
 	if (err)
 		goto bail;
-	PERF(fl->profile, fl->perf.invargs,
-	if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
-		inv_args(ctx);
-	PERF_END);
  wait:
 	if (kernel)
 		wait_for_completion(&ctx->work);
@@ -1408,6 +1398,12 @@
 		if (err)
 			goto bail;
 	}
+
+	PERF(fl->profile, fl->perf.invargs,
+	if (!fl->sctx->smmu.coherent)
+		inv_args(ctx);
+	PERF_END);
+
 	VERIFY(err, 0 == (err = ctx->retval));
 	if (err)
 		goto bail;
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index e907d0d..8f0597f 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2849,6 +2849,8 @@
 		new_entry->num_buffers = 1;
 		break;
 	}
+
+	new_entry->buffers = NULL;
 	new_entry->real_time = MODE_REALTIME;
 	new_entry->in_service = 0;
 	INIT_LIST_HEAD(&new_entry->list_write_buf);
@@ -2919,7 +2921,8 @@
 
 fail_alloc:
 	if (new_entry) {
-		for (i = 0; i < new_entry->num_buffers; i++) {
+		for (i = 0; ((i < new_entry->num_buffers) &&
+			new_entry->buffers); i++) {
 			proc_buf = &new_entry->buffers[i];
 			if (proc_buf) {
 				mutex_destroy(&proc_buf->health_mutex);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6e0cbe0..593a881 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,7 +343,7 @@
 	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 
 	/* It's illegal to wrap around the end of the physical address space. */
-	if (offset + (phys_addr_t)size < offset)
+	if (offset + (phys_addr_t)size - 1 < offset)
 		return -EINVAL;
 
 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index fc061f7..a7de8ae 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@
 
 	rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
 	if (rc <= 0) {
-		DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
 		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 		if (rc == -ERESTARTSYS)
 			return rc;
@@ -387,7 +387,7 @@
 	for (i = 0; i < bytes_to_write; i++) {
 		rc = wait_for_bulk_out_ready(dev);
 		if (rc <= 0) {
-			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
 			       rc);
 			DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 			if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@
 	rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
 	if (rc <= 0) {
-		DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
 		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 		if (rc == -ERESTARTSYS)
 			return rc;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 89201e2..7cdf45b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -10,6 +10,8 @@
  * Standard functionality for the common clock API.  See Documentation/clk.txt
  */
 
+#define pr_fmt(fmt) "clk: " fmt
+
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clk/clk-conf.h>
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index adbabea..9ccef91 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -87,7 +89,7 @@
 };
 
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
@@ -278,6 +280,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
 	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -316,7 +319,6 @@
 	{ }
 };
 
-
 static struct clk_rcg2 cam_cc_cci_clk_src = {
 	.cmd_rcgr = 0xb0d8,
 	.mnd_width = 8,
@@ -341,7 +343,13 @@
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
 	F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
-	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	{ }
 };
 
@@ -429,7 +437,27 @@
 	},
 };
 
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+	.cmd_rcgr = 0x5070,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = NULL,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi3phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
 static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
@@ -468,6 +496,15 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src_sdm845_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_fd_core_clk_src = {
 	.cmd_rcgr = 0xb0b0,
 	.mnd_width = 0,
@@ -490,13 +527,31 @@
 	},
 };
 
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src_sdm845_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_icp_clk_src = {
 	.cmd_rcgr = 0xb088,
 	.mnd_width = 0,
 	.hid_width = 5,
 	.enable_safe_config = true,
 	.parent_map = cam_cc_parent_map_0,
-	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.freq_tbl = ftbl_cam_cc_icp_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_icp_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -513,6 +568,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
 	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -544,6 +600,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
 	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
@@ -655,6 +712,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
 	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -733,6 +791,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
 	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
@@ -741,6 +800,16 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src_sdm845_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_lrme_clk_src = {
 	.cmd_rcgr = 0xb0f8,
 	.mnd_width = 0,
@@ -1059,6 +1128,24 @@
 	},
 };
 
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+	.halt_reg = 0x5088,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5088,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi3phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi3phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_branch cam_cc_csiphy0_clk = {
 	.halt_reg = 0x5020,
 	.halt_check = BRANCH_HALT,
@@ -1116,6 +1203,25 @@
 	},
 };
 
+static struct clk_branch cam_cc_csiphy3_clk = {
+	.halt_reg = 0x508c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x508c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
 static struct clk_branch cam_cc_fd_core_clk = {
 	.halt_reg = 0xb0c8,
 	.halt_check = BRANCH_HALT,
@@ -1749,9 +1855,12 @@
 	[CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
 	[CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
 	[CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+	[CAM_CC_CSI3PHYTIMER_CLK] = NULL,
+	[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL,
 	[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
 	[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
 	[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+	[CAM_CC_CSIPHY3_CLK] = NULL,
 	[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
 	[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
 	[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
@@ -1860,10 +1969,49 @@
 
 static const struct of_device_id cam_cc_sdm845_match_table[] = {
 	{ .compatible = "qcom,cam_cc-sdm845" },
+	{ .compatible = "qcom,cam_cc-sdm845-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, cam_cc_sdm845_match_table);
 
+static void cam_cc_sdm845_fixup_sdm845v2(void)
+{
+	cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK] =
+		&cam_cc_csi3phytimer_clk.clkr;
+	cam_cc_sdm845_clocks[CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr;
+	cam_cc_sdm845_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] =
+		&cam_cc_csi3phytimer_clk_src.clkr;
+	cam_cc_cphy_rx_clk_src.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src_sdm845_v2;
+	cam_cc_cphy_rx_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
+	cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_sdm845_v2;
+	cam_cc_fd_core_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
+	cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_icp_clk_src_sdm845_v2;
+	cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 384000000;
+	cam_cc_icp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 600000000;
+	cam_cc_ipe_0_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 600000000;
+	cam_cc_ipe_1_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 600000000;
+	cam_cc_lrme_clk_src.freq_tbl = ftbl_cam_cc_lrme_clk_src_sdm845_v2;
+	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 269333333;
+	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] = 320000000;
+	cam_cc_lrme_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] = 400000000;
+	cam_cc_slow_ahb_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] = 80000000;
+}
+
+static int cam_cc_sdm845_fixup(struct platform_device *pdev)
+{
+	const char *compat = NULL;
+	int compatlen = 0;
+
+	compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+	if (!compat || (compatlen <= 0))
+		return -EINVAL;
+
+	if (!strcmp(compat, "qcom,cam_cc-sdm845-v2"))
+		cam_cc_sdm845_fixup_sdm845v2();
+
+	return 0;
+}
+
 static int cam_cc_sdm845_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
@@ -1891,6 +2039,10 @@
 		return PTR_ERR(vdd_mx.regulator[0]);
 	}
 
+	ret = cam_cc_sdm845_fixup(pdev);
+	if (ret)
+		return ret;
+
 	clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
 	clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
 	clk_fabia_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index fd3617b..4d3b427 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/clk-provider.h>
@@ -100,7 +102,7 @@
 		udelay(1);
 	}
 
-	WARN(1, "%s failed to %s!\n", name, action);
+	WARN(1, "clk: %s failed to %s!\n", name, action);
 	return -ETIMEDOUT;
 }
 
@@ -635,7 +637,7 @@
 	udelay(1);
 	regmap_read(pll->clkr.regmap, off + PLL_MODE, &regval);
 	if (!(regval & FABIA_PLL_ACK_LATCH)) {
-		WARN(1, "PLL latch failed. Output may be unstable!\n");
+		WARN(1, "clk: PLL latch failed. Output may be unstable!\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c
index f698a55..f6aeb19 100644
--- a/drivers/clk/qcom/clk-aop-qmp.c
+++ b/drivers/clk/qcom/clk-aop-qmp.c
@@ -11,7 +11,7 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s: " fmt, __func__
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
 
 #include <linux/clk-provider.h>
 #include <linux/clk.h>
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 5c4ddcc..3ca8e1c 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -97,7 +99,7 @@
 				return 0;
 			udelay(1);
 		}
-		WARN(1, "%s status stuck at 'o%s'", name,
+		WARN(1, "clk: %s status stuck at 'o%s'", name,
 				enabling ? "ff" : "n");
 		return -EBUSY;
 	}
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 1e71204..7aef887 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -11,7 +11,7 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s: " fmt, __func__
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
 
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
@@ -33,6 +33,8 @@
 #include <linux/regmap.h>
 #include <linux/uaccess.h>
 #include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
 #include <soc/qcom/scm.h>
 #include <dt-bindings/clock/qcom,cpucc-sdm845.h>
 
@@ -42,6 +44,7 @@
 #include "clk-voter.h"
 #include "clk-debug.h"
 
+#define OSM_INIT_RATE			300000000UL
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
 #define MAX_CLUSTER_CNT			3
@@ -923,6 +926,193 @@
 	return cpu_clk_map[cpu];
 }
 
+static struct clk_osm *osm_configure_policy(struct cpufreq_policy *policy)
+{
+	int cpu;
+	struct clk_hw *parent, *c_parent;
+	struct clk_osm *first;
+	struct clk_osm *c, *n;
+
+	c = logical_cpu_to_clk(policy->cpu);
+	if (!c)
+		return NULL;
+
+	c_parent = clk_hw_get_parent(&c->hw);
+	if (!c_parent)
+		return NULL;
+
+	/*
+	 * Don't put any other CPUs into the policy if we're doing
+	 * per_core_dcvs
+	 */
+	if (to_clk_osm(c_parent)->per_core_dcvs)
+		return c;
+
+	first = c;
+	/* Find CPUs that share the same clock domain */
+	for_each_possible_cpu(cpu) {
+		n = logical_cpu_to_clk(cpu);
+		if (!n)
+			continue;
+
+		parent = clk_hw_get_parent(&n->hw);
+		if (!parent)
+			return NULL;
+		if (parent != c_parent)
+			continue;
+
+		cpumask_set_cpu(cpu, policy->cpus);
+		if (n->core_num == 0)
+			first = n;
+	}
+
+	return first;
+}
+
+static void
+osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
+{
+	clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num), OSM_BASE);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(c, OSM_BASE);
+}
+
+static int
+osm_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
+{
+	struct clk_osm *c = policy->driver_data;
+
+	osm_set_index(c, index, c->core_num);
+	return 0;
+}
+
+static unsigned int osm_cpufreq_get(unsigned int cpu)
+{
+	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+	struct clk_osm *c;
+	u32 index;
+
+	if (!policy)
+		return 0;
+
+	c = policy->driver_data;
+	index = clk_osm_read_reg(c, DCVS_PERF_STATE_DESIRED_REG(c->core_num));
+
+	return policy->freq_table[index].frequency;
+}
+
+static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	struct cpufreq_frequency_table *table;
+	struct clk_osm *c, *parent;
+	struct clk_hw *p_hw;
+	int ret;
+	unsigned int i;
+	unsigned int xo_kHz;
+
+	c = osm_configure_policy(policy);
+	if (!c) {
+		pr_err("no clock for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+
+	p_hw = clk_hw_get_parent(&c->hw);
+	if (!p_hw) {
+		pr_err("no parent clock for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+
+	parent = to_clk_osm(p_hw);
+	c->vbases[OSM_BASE] = parent->vbases[OSM_BASE];
+
+	p_hw = clk_hw_get_parent(p_hw);
+	if (!p_hw) {
+		pr_err("no xo clock for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+	xo_kHz = clk_hw_get_rate(p_hw) / 1000;
+
+	table = kcalloc(OSM_TABLE_SIZE + 1, sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		u32 data, src, div, lval, core_count;
+
+		data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+		src = (data & GENMASK(31, 30)) >> 30;
+		div = (data & GENMASK(29, 28)) >> 28;
+		lval = data & GENMASK(7, 0);
+		core_count = CORE_COUNT_VAL(data);
+
+		if (!src)
+			table[i].frequency = OSM_INIT_RATE / 1000;
+		else
+			table[i].frequency = xo_kHz * lval;
+		table[i].driver_data = table[i].frequency;
+
+		if (core_count != MAX_CORE_COUNT)
+			table[i].frequency = CPUFREQ_ENTRY_INVALID;
+
+		/* Two of the same frequencies means end of table */
+		if (i > 0 && table[i - 1].driver_data == table[i].driver_data) {
+			struct cpufreq_frequency_table *prev = &table[i - 1];
+
+			if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
+				prev->flags = CPUFREQ_BOOST_FREQ;
+				prev->frequency = prev->driver_data;
+			}
+
+			break;
+		}
+	}
+	table[i].frequency = CPUFREQ_TABLE_END;
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+		goto err;
+	}
+
+	policy->driver_data = c;
+
+	clk_osm_enable(&parent->hw);
+	udelay(300);
+
+	return 0;
+
+err:
+	kfree(table);
+	return ret;
+}
+
+static int osm_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+	kfree(policy->freq_table);
+	policy->freq_table = NULL;
+	return 0;
+}
+
+static struct freq_attr *osm_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	&cpufreq_freq_attr_scaling_boost_freqs,
+	NULL
+};
+
+static struct cpufreq_driver qcom_osm_cpufreq_driver = {
+	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+			  CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= osm_cpufreq_target_index,
+	.get		= osm_cpufreq_get,
+	.init		= osm_cpufreq_cpu_init,
+	.exit		= osm_cpufreq_cpu_exit,
+	.name		= "osm-cpufreq",
+	.attr		= osm_cpufreq_attr,
+	.boost_enabled	= true,
+};
+
 static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
 {
 	u64 temp;
@@ -2890,16 +3080,13 @@
 	return 0;
 }
 
-static unsigned long init_rate = 300000000;
-
 static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
 {
-	int rc = 0, cpu, i;
+	int rc = 0, i;
 	int pvs_ver = 0;
 	u32 pte_efuse, val;
 	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
 	struct clk *ext_xo_clk, *clk;
-	struct clk_osm *c, *parent;
 	struct device *dev = &pdev->dev;
 	struct clk_onecell_data *clk_data;
 	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
@@ -3207,53 +3394,22 @@
 	get_online_cpus();
 
 	/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
-	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+	rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
 			rc);
 		goto provider_err;
 	}
 	WARN(clk_prepare_enable(l3_clk.hw.clk),
-		     "Failed to enable clock for L3\n");
+		     "clk: Failed to enable clock for L3\n");
 	udelay(300);
 
-	/* Set CPU clocks to run off GPLL0 and enable OSM for both domains */
-	for_each_online_cpu(cpu) {
-		c = logical_cpu_to_clk(cpu);
-		if (!c) {
-			pr_err("no clock device for CPU=%d\n", cpu);
-			return -EINVAL;
-		}
-
-		parent = to_clk_osm(clk_hw_get_parent(&c->hw));
-		if (!parent->per_core_dcvs) {
-			if (cpu >= 0 && cpu <= 3)
-				c = logical_cpu_to_clk(0);
-			else if (cpu >= 4 && cpu <= 7)
-				c = logical_cpu_to_clk(4);
-			if (!c)
-				return -EINVAL;
-		}
-
-		rc = clk_set_rate(c->hw.clk, init_rate);
-		if (rc) {
-			dev_err(&pdev->dev, "Unable to set init rate on %s, rc=%d\n",
-					clk_hw_get_name(&parent->hw), rc);
-			goto provider_err;
-		}
-		WARN(clk_prepare_enable(c->hw.clk),
-					"Failed to enable OSM for %s\n",
-					clk_hw_get_name(&parent->hw));
-		udelay(300);
+	/* Configure default rate to lowest frequency */
+	for (i = 0; i < MAX_CORE_COUNT; i++) {
+		osm_set_index(&pwrcl_clk, 0, i);
+		osm_set_index(&perfcl_clk, 0, i);
 	}
 
-	/*
-	 * Add always-on votes for the CPU cluster clocks since we do not want
-	 * to re-enable OSM at any point.
-	 */
-	clk_prepare_enable(pwrcl_clk.hw.clk);
-	clk_prepare_enable(perfcl_clk.hw.clk);
-
 	populate_opp_table(pdev);
 	populate_debugfs_dir(&l3_clk);
 	populate_debugfs_dir(&pwrcl_clk);
@@ -3261,18 +3417,24 @@
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	register_cpu_cycle_counter_cb(&cb);
-	pr_info("OSM driver inited\n");
 	put_online_cpus();
 
+	rc = cpufreq_register_driver(&qcom_osm_cpufreq_driver);
+	if (rc)
+		goto provider_err;
+
+	pr_info("OSM CPUFreq driver inited\n");
 	return 0;
+
 provider_err:
 	if (clk_data)
 		devm_kfree(&pdev->dev, clk_data->clks);
 clk_err:
 	devm_kfree(&pdev->dev, clk_data);
 exit:
-	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
-	panic("Unable to Setup OSM");
+	dev_err(&pdev->dev, "OSM CPUFreq driver failed to initialize, rc=%d\n",
+		rc);
+	panic("Unable to Setup OSM CPUFreq");
 }
 
 static const struct of_device_id match_table[] = {
diff --git a/drivers/clk/qcom/clk-debug.c b/drivers/clk/qcom/clk-debug.c
index fcc2493..d366ad4 100644
--- a/drivers/clk/qcom/clk-debug.c
+++ b/drivers/clk/qcom/clk-debug.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/clk.h>
 #include <linux/export.h>
 #include <linux/module.h>
@@ -141,7 +143,7 @@
 	for (i = 0; i < num_parents; i++) {
 		if (!strcmp(meas->parent[i].parents,
 					clk_hw_get_name(hw_clk))) {
-			pr_debug("%s: clock parent - %s, index %d\n", __func__,
+			pr_debug("clock parent - %s, index %d\n",
 					meas->parent[i].parents, i);
 			return i;
 		}
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
index 3435999..07991b1 100644
--- a/drivers/clk/qcom/clk-dummy.c
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -68,14 +70,14 @@
 static int dummy_reset_assert(struct reset_controller_dev *rcdev,
 				unsigned long id)
 {
-	pr_debug("%s\n", __func__);
+	pr_debug("\n");
 	return 0;
 }
 
 static int dummy_reset_deassert(struct reset_controller_dev *rcdev,
 				unsigned long id)
 {
-	pr_debug("%s\n", __func__);
+	pr_debug("\n");
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/clk-qpnp-div.c b/drivers/clk/qcom/clk-qpnp-div.c
index 1c3eacb..2ee1c18 100644
--- a/drivers/clk/qcom/clk-qpnp-div.c
+++ b/drivers/clk/qcom/clk-qpnp-div.c
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 6bdea53..b63c3c3 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -99,8 +101,8 @@
 			return i;
 
 err:
-	pr_debug("%s: Clock %s has invalid parent, using default.\n",
-		 __func__, clk_hw_get_name(hw));
+	pr_debug("Clock %s has invalid parent, using default.\n",
+		 clk_hw_get_name(hw));
 	return 0;
 }
 
@@ -126,7 +128,7 @@
 		udelay(1);
 	}
 
-	WARN(1, "%s: rcg didn't update its configuration.", name);
+	WARN(1, "clk: %s: rcg didn't update its configuration.", name);
 	return 0;
 }
 
@@ -164,7 +166,7 @@
 		udelay(1);
 	}
 
-	WARN(1, "%s: rcg didn't turn on.", clk_hw_get_name(hw));
+	WARN(1, "clk: %s: rcg didn't turn on.", clk_hw_get_name(hw));
 	return ret;
 }
 
@@ -1220,8 +1222,8 @@
 		if (cfg == rcg->parent_map[i].cfg)
 			return i;
 err:
-	pr_debug("%s: Clock %s has invalid parent, using default.\n",
-		 __func__, clk_hw_get_name(hw));
+	pr_debug("Clock %s has invalid parent, using default.\n",
+		 clk_hw_get_name(hw));
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 5e11485..89bae2e 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
@@ -243,7 +245,7 @@
 
 	if (ret) {
 		c->state = c->valid_state_mask;
-		WARN(1, "%s failed to disable\n", c->res_name);
+		WARN(1, "clk: %s failed to disable\n", c->res_name);
 	}
 
 out:
diff --git a/drivers/clk/qcom/clk-voter.c b/drivers/clk/qcom/clk-voter.c
index b0c7e4a..1a8f0ca 100644
--- a/drivers/clk/qcom/clk-voter.c
+++ b/drivers/clk/qcom/clk-voter.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/clk.h>
 
 #include "clk-voter.h"
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index b2ff04a..d426691 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/regmap.h>
@@ -181,7 +183,7 @@
 	unsigned int idx = clkspec->args[0];
 
 	if (idx >= cc->num_rclks) {
-		pr_err("%s: invalid index %u\n", __func__, idx);
+		pr_err("invalid index %u\n", idx);
 		return ERR_PTR(-EINVAL);
 	}
 
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index 44c5b81..10b71ff 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
@@ -45,6 +47,7 @@
 	"cam_cc_csiphy0_clk",
 	"cam_cc_csiphy1_clk",
 	"cam_cc_csiphy2_clk",
+	"cam_cc_csiphy3_clk",
 	"cam_cc_fd_core_clk",
 	"cam_cc_fd_core_uar_clk",
 	"cam_cc_icp_apb_clk",
@@ -104,14 +107,6 @@
 	"disp_cc_mdss_rot_clk",
 	"disp_cc_mdss_rscc_ahb_clk",
 	"disp_cc_mdss_rscc_vsync_clk",
-	"disp_cc_mdss_spdm_debug_clk",
-	"disp_cc_mdss_spdm_dp_crypto_clk",
-	"disp_cc_mdss_spdm_dp_pixel1_clk",
-	"disp_cc_mdss_spdm_dp_pixel_clk",
-	"disp_cc_mdss_spdm_mdp_clk",
-	"disp_cc_mdss_spdm_pclk0_clk",
-	"disp_cc_mdss_spdm_pclk1_clk",
-	"disp_cc_mdss_spdm_rot_clk",
 	"disp_cc_mdss_vsync_clk",
 	"measure_only_snoc_clk",
 	"measure_only_cnoc_clk",
@@ -254,13 +249,13 @@
 	"gpu_cc_cxo_aon_clk",
 	"gpu_cc_cxo_clk",
 	"gpu_cc_gx_cxo_clk",
+	"gpu_cc_gx_gfx3d_clk",
 	"gpu_cc_gx_gmu_clk",
 	"gpu_cc_gx_qdss_tsctr_clk",
 	"gpu_cc_gx_vsense_clk",
 	"gpu_cc_rbcpr_ahb_clk",
 	"gpu_cc_rbcpr_clk",
 	"gpu_cc_sleep_clk",
-	"gpu_cc_spdm_gx_gfx3d_div_clk",
 	"video_cc_apb_clk",
 	"video_cc_at_clk",
 	"video_cc_qdss_trig_clk",
@@ -315,6 +310,8 @@
 			0x8, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
 		{ "cam_cc_csiphy2_clk", 0x46, 4, CAM_CC,
 			0xA, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
+		{ "cam_cc_csiphy3_clk", 0x46, 4, CAM_CC,
+			0x36, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
 		{ "cam_cc_fd_core_clk", 0x46, 4, CAM_CC,
 			0x28, 0xFF, 0, 0x3, 0, 1, 0xC000, 0xC004, 0xC008 },
 		{ "cam_cc_fd_core_uar_clk", 0x46, 4, CAM_CC,
@@ -433,22 +430,6 @@
 			0x17, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "disp_cc_mdss_rscc_vsync_clk", 0x47, 4, DISP_CC,
 			0x18, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_debug_clk", 0x47, 4, DISP_CC,
-			0x20, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_dp_crypto_clk", 0x47, 4, DISP_CC,
-			0x1D, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_dp_pixel1_clk", 0x47, 4, DISP_CC,
-			0x1F, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_dp_pixel_clk", 0x47, 4, DISP_CC,
-			0x1E, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_mdp_clk", 0x47, 4, DISP_CC,
-			0x1B, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_pclk0_clk", 0x47, 4, DISP_CC,
-			0x19, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_pclk1_clk", 0x47, 4, DISP_CC,
-			0x1A, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_rot_clk", 0x47, 4, DISP_CC,
-			0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
 			0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "measure_only_snoc_clk", 0x7, 4, GCC,
@@ -733,6 +714,8 @@
 			0xA, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_gx_cxo_clk", 0x144, 4, GPU_CC,
 			0xF, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_gx_gfx3d_clk", 0x144, 4, GPU_CC,
+			0xC, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_gx_gmu_clk", 0x144, 4, GPU_CC,
 			0x10, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_gx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
@@ -745,8 +728,6 @@
 			0x1C, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_sleep_clk", 0x144, 4, GPU_CC,
 			0x17, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_spdm_gx_gfx3d_div_clk", 0x144, 4, GPU_CC,
-			0x1E, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "video_cc_apb_clk", 0x48, 4, VIDEO_CC,
 			0x8, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
 		{ "video_cc_at_clk", 0x48, 4, VIDEO_CC,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index d3a28e6..6acab9f 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -126,13 +128,18 @@
 };
 
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
 static const struct pll_config disp_cc_pll0_config = {
+	.l = 0x15,
+	.frac = 0x7c00,
+};
+
+static const struct pll_config disp_cc_pll0_config_v2 = {
 	.l = 0x2c,
-	.frac = 0xcaab,
+	.frac = 0xcaaa,
 };
 
 static struct clk_alpha_pll disp_cc_pll0 = {
@@ -365,6 +372,19 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sdm845_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(85714286, P_GPLL0_OUT_MAIN, 7, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
 	.cmd_rcgr = 0x2088,
 	.mnd_width = 0,
@@ -434,6 +454,15 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src_sdm845_v2[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
 	.cmd_rcgr = 0x20a0,
 	.mnd_width = 0,
@@ -986,10 +1015,73 @@
 
 static const struct of_device_id disp_cc_sdm845_match_table[] = {
 	{ .compatible = "qcom,dispcc-sdm845" },
+	{ .compatible = "qcom,dispcc-sdm845-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, disp_cc_sdm845_match_table);
 
+static void disp_cc_sdm845_fixup_sdm845v2(struct regmap *regmap)
+{
+	clk_fabia_pll_configure(&disp_cc_pll0, regmap,
+					&disp_cc_pll0_config_v2);
+	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
+		180000000;
+	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+		275000000;
+	disp_cc_mdss_byte0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		358000000;
+	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
+		180000000;
+	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+		275000000;
+	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		358000000;
+	disp_cc_mdss_dp_pixel1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+		337500000;
+	disp_cc_mdss_dp_pixel_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+		337500000;
+	disp_cc_mdss_mdp_clk_src.freq_tbl =
+		ftbl_disp_cc_mdss_mdp_clk_src_sdm845_v2;
+	disp_cc_mdss_mdp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
+		171428571;
+	disp_cc_mdss_mdp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		344000000;
+	disp_cc_mdss_mdp_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		430000000;
+	disp_cc_mdss_pclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
+		280000000;
+	disp_cc_mdss_pclk0_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+		430000000;
+	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
+		280000000;
+	disp_cc_mdss_pclk1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
+		430000000;
+	disp_cc_mdss_rot_clk_src.freq_tbl =
+		ftbl_disp_cc_mdss_rot_clk_src_sdm845_v2;
+	disp_cc_mdss_rot_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
+		171428571;
+	disp_cc_mdss_rot_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		344000000;
+	disp_cc_mdss_rot_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		430000000;
+}
+
+static int disp_cc_sdm845_fixup(struct platform_device *pdev,
+						struct regmap *regmap)
+{
+	const char *compat = NULL;
+	int compatlen = 0;
+
+	compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+	if (!compat || (compatlen <= 0))
+		return -EINVAL;
+
+	if (!strcmp(compat, "qcom,dispcc-sdm845-v2"))
+		disp_cc_sdm845_fixup_sdm845v2(regmap);
+
+	return 0;
+}
+
 static int disp_cc_sdm845_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
@@ -1014,6 +1106,10 @@
 	/* Enable clock gating for DSI and MDP clocks */
 	regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x7f0, 0x7f0);
 
+	ret = disp_cc_sdm845_fixup(pdev, regmap);
+	if (ret)
+		return ret;
+
 	ret = qcom_cc_really_probe(pdev, &disp_cc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 2742ab3..13de253 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -197,7 +199,7 @@
 };
 
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
@@ -466,6 +468,25 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2[] = {
+	F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+	F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625),
+	F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75),
+	F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25),
+	F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75),
+	F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15),
+	F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25),
+	F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+	F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375),
+	F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75),
+	F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625),
+	F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0),
+	F(128000000, P_GPLL0_OUT_MAIN, 1, 16, 75),
+	{ }
+};
+
 static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
 	.cmd_rcgr = 0x17034,
 	.mnd_width = 16,
@@ -790,8 +811,8 @@
 	F(400000, P_BI_TCXO, 12, 1, 4),
 	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
-	F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
-	F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
 	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
 	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
 	{ }
@@ -879,6 +900,15 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2[] = {
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 gcc_ufs_card_axi_clk_src = {
 	.cmd_rcgr = 0x7501c,
 	.mnd_width = 8,
@@ -3542,10 +3572,132 @@
 
 static const struct of_device_id gcc_sdm845_match_table[] = {
 	{ .compatible = "qcom,gcc-sdm845" },
+	{ .compatible = "qcom,gcc-sdm845-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, gcc_sdm845_match_table);
 
+static void gcc_sdm845_fixup_sdm845v2(void)
+{
+	gcc_qupv3_wrap0_s0_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s0_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s1_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s1_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s2_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s2_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s2_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s3_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s3_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s3_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s4_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s4_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s4_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s5_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s5_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s5_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s6_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s6_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s6_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap0_s7_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap0_s7_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap0_s7_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s0_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s0_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s0_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s1_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s1_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s1_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s2_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s2_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s2_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s3_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s3_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s3_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s4_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s4_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s4_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s5_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s5_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s5_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s6_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s6_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s6_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_qupv3_wrap1_s7_clk_src.freq_tbl =
+		ftbl_gcc_qupv3_wrap0_s0_clk_src_sdm845_v2;
+	gcc_qupv3_wrap1_s7_clk_src.clkr.hw.init->rate_max[VDD_CX_MIN] =
+		50000000;
+	gcc_qupv3_wrap1_s7_clk_src.clkr.hw.init->rate_max[VDD_CX_NOMINAL] =
+		128000000;
+	gcc_ufs_card_axi_clk_src.freq_tbl =
+		ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2;
+	gcc_ufs_card_axi_clk_src.clkr.hw.init->rate_max[VDD_CX_HIGH] =
+		240000000;
+	gcc_ufs_phy_axi_clk_src.freq_tbl =
+		ftbl_gcc_ufs_card_axi_clk_src_sdm845_v2;
+}
+
+static int gcc_sdm845_fixup(struct platform_device *pdev)
+{
+	const char *compat = NULL;
+	int compatlen = 0;
+
+	compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+	if (!compat || (compatlen <= 0))
+		return -EINVAL;
+
+	if (!strcmp(compat, "qcom,gcc-sdm845-v2"))
+		gcc_sdm845_fixup_sdm845v2();
+
+	return 0;
+}
+
 static int gcc_sdm845_probe(struct platform_device *pdev)
 {
 	struct clk *clk;
@@ -3580,6 +3732,10 @@
 		return PTR_ERR(vdd_cx_ao.regulator[0]);
 	}
 
+	ret = gcc_sdm845_fixup(pdev);
+	if (ret)
+		return ret;
+
 	/* Register the dummy measurement clocks */
 	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
 		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
index 90c76e6..0899138 100644
--- a/drivers/clk/qcom/gdsc-regulator.c
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "gdsc: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/io.h>
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index f2fa577..8442890 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index d183393..87feee6 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,3 +1,6 @@
 obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
 obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
 obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
new file mode 100644
index 0000000..eb2092a
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
@@ -0,0 +1,766 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+#define DP_PHY_REVISION_ID0			0x0000
+#define DP_PHY_REVISION_ID1			0x0004
+#define DP_PHY_REVISION_ID2			0x0008
+#define DP_PHY_REVISION_ID3			0x000C
+
+#define DP_PHY_CFG				0x0010
+#define DP_PHY_PD_CTL				0x0018
+#define DP_PHY_MODE				0x001C
+
+#define DP_PHY_AUX_CFG0				0x0020
+#define DP_PHY_AUX_CFG1				0x0024
+#define DP_PHY_AUX_CFG2				0x0028
+#define DP_PHY_AUX_CFG3				0x002C
+#define DP_PHY_AUX_CFG4				0x0030
+#define DP_PHY_AUX_CFG5				0x0034
+#define DP_PHY_AUX_CFG6				0x0038
+#define DP_PHY_AUX_CFG7				0x003C
+#define DP_PHY_AUX_CFG8				0x0040
+#define DP_PHY_AUX_CFG9				0x0044
+#define DP_PHY_AUX_INTERRUPT_MASK		0x0048
+#define DP_PHY_AUX_INTERRUPT_CLEAR		0x004C
+#define DP_PHY_AUX_BIST_CFG			0x0050
+
+#define DP_PHY_VCO_DIV				0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL			0x006C
+#define DP_PHY_TX2_TX3_LANE_CTL			0x0088
+
+#define DP_PHY_SPARE0				0x00AC
+#define DP_PHY_STATUS				0x00C0
+
+/* Tx registers */
+#define TXn_BIST_MODE_LANENO			0x0000
+#define TXn_CLKBUF_ENABLE			0x0008
+#define TXn_TX_EMP_POST1_LVL			0x000C
+
+#define TXn_TX_DRV_LVL				0x001C
+
+#define TXn_RESET_TSYNC_EN			0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
+#define TXn_TX_BAND				0x002C
+#define TXn_SLEW_CNTL				0x0030
+#define TXn_INTERFACE_SELECT			0x0034
+
+#define TXn_RES_CODE_LANE_TX			0x003C
+#define TXn_RES_CODE_LANE_RX			0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
+
+#define TXn_DEBUG_BUS_SEL			0x0058
+#define TXn_TRANSCEIVER_BIAS_EN			0x005C
+#define TXn_HIGHZ_DRVR_EN			0x0060
+#define TXn_TX_POL_INV				0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
+
+#define TXn_LANE_MODE_1				0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN			0x00C0
+#define TXn_TX_INTERFACE_MODE			0x00C4
+
+#define TXn_VMODE_CTRL1				0x00F0
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1			0x0000
+#define QSERDES_COM_ATB_SEL2			0x0004
+#define QSERDES_COM_FREQ_UPDATE			0x0008
+#define QSERDES_COM_BG_TIMER			0x000C
+#define QSERDES_COM_SSC_EN_CENTER		0x0010
+#define QSERDES_COM_SSC_ADJ_PER1		0x0014
+#define QSERDES_COM_SSC_ADJ_PER2		0x0018
+#define QSERDES_COM_SSC_PER1			0x001C
+#define QSERDES_COM_SSC_PER2			0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
+#define QSERDES_COM_POST_DIV			0x002C
+#define QSERDES_COM_POST_DIV_MUX		0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
+#define QSERDES_COM_CLK_ENABLE1			0x0038
+#define QSERDES_COM_SYS_CLK_CTRL		0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
+#define QSERDES_COM_PLL_EN			0x0044
+#define QSERDES_COM_PLL_IVCO			0x0048
+#define QSERDES_COM_CMN_IETRIM			0x004C
+#define QSERDES_COM_CMN_IPTRIM			0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0		0x0060
+#define QSERDES_COM_CP_CTRL_MODE1		0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
+#define QSERDES_COM_PLL_CNTRL			0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
+#define QSERDES_COM_RESETSM_CNTRL		0x0088
+#define QSERDES_COM_RESETSM_CNTRL2		0x008C
+#define QSERDES_COM_LOCK_CMP_EN			0x0090
+#define QSERDES_COM_LOCK_CMP_CFG		0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0		0x00B0
+#define QSERDES_COM_DEC_START_MODE1		0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
+#define QSERDES_COM_INTEGLOOP_EN		0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
+
+#define QSERDES_COM_CMN_STATUS			0x0124
+#define QSERDES_COM_RESET_SM_STATUS		0x0128
+
+#define QSERDES_COM_CLK_SEL			0x0138
+#define QSERDES_COM_HSCLK_SEL			0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
+
+#define QSERDES_COM_SW_RESET			0x0150
+#define QSERDES_COM_CORE_CLK_EN			0x0154
+#define QSERDES_COM_C_READY_STATUS		0x0158
+#define QSERDES_COM_CMN_CONFIG			0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
+
+#define DP_PHY_PLL_POLL_SLEEP_US		500
+#define DP_PHY_PLL_POLL_TIMEOUT_US		10000
+
+#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
+#define DP_VCO_RATE_9720MHZDIV1000		9720000UL
+#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
+
+int dp_mux_set_parent_10nm(void *context, unsigned int reg, unsigned int val)
+{
+	struct mdss_pll_resources *dp_res = context;
+	int rc;
+	u32 auxclk_div;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= ~0x03;	/* bits 0 to 1 */
+
+	if (val == 0) /* mux parent index = 0 */
+		auxclk_div |= 1;
+	else if (val == 1) /* mux parent index = 1 */
+		auxclk_div |= 2;
+	else if (val == 2) /* mux parent index = 2 */
+		auxclk_div |= 0;
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_VCO_DIV, auxclk_div);
+	/* Make sure the PHY registers writes are done */
+	wmb();
+	pr_debug("%s: mux=%d auxclk_div=%x\n", __func__, val, auxclk_div);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return 0;
+}
+
+int dp_mux_get_parent_10nm(void *context, unsigned int reg, unsigned int *val)
+{
+	int rc;
+	u32 auxclk_div = 0;
+	struct mdss_pll_resources *dp_res = context;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= 0x03;
+
+	if (auxclk_div == 1) /* Default divider */
+		*val = 0;
+	else if (auxclk_div == 2)
+		*val = 1;
+	else if (auxclk_div == 0)
+		*val = 2;
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	pr_debug("%s: auxclk_div=%d, val=%d\n", __func__, auxclk_div, *val);
+
+	return 0;
+}
+
+static int dp_vco_pll_init_db_10nm(struct dp_pll_db *pdb,
+		unsigned long rate)
+{
+	struct mdss_pll_resources *dp_res = pdb->pll;
+	u32 spare_value = 0;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	pdb->lane_cnt = spare_value & 0x0F;
+	pdb->orientation = (spare_value & 0xF0) >> 4;
+
+	pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+			__func__, spare_value, pdb->lane_cnt, pdb->orientation);
+
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_9720MHZDIV1000);
+		pdb->hsclk_sel = 0x0c;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x6f;
+		pdb->lock_cmp2_mode0 = 0x08;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x04;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x0f;
+		pdb->lock_cmp2_mode0 = 0x0e;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x00;
+		pdb->dec_start_mode0 = 0x8c;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x00;
+		pdb->div_frac_start3_mode0 = 0x0a;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x1f;
+		pdb->lock_cmp2_mode0 = 0x1c;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x2;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_8100MHZDIV1000);
+		pdb->hsclk_sel = 0x03;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x2f;
+		pdb->lock_cmp2_mode0 = 0x2a;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x0;
+		pdb->lock_cmp_en = 0x08;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int dp_config_vco_rate_10nm(struct dp_pll_vco_clk *vco,
+		unsigned long rate)
+{
+	u32 res = 0;
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+	res = dp_vco_pll_init_db_10nm(pdb, rate);
+	if (res) {
+		pr_err("VCO Init DB failed\n");
+		return res;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC2)
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
+		else
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
+	}
+
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0e);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+	/* Different for each clock rates */
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x07);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	if (pdb->orientation == ORIENTATION_CC2)
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
+	else
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	/* TX Lane configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+	/* TX-0 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
+
+	/* TX-1 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	/* dependent on the vco frequency */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+
+	return res;
+}
+
+static bool dp_10nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL lock status */
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: C_READY status is not high. Status=%x\n",
+				__func__, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static bool dp_10nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool phy_ready = true;
+
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & (BIT(1))) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: Phy_ready is not high. Status=%x\n",
+				__func__, status);
+		phy_ready = false;
+	}
+
+	return phy_ready;
+}
+
+static int dp_pll_enable_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+	u32 bias_en, drvr_en;
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x04);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+	wmb(); /* Make sure the PHY register writes are done */
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+	wmb();	/* Make sure the PLL register writes are done */
+
+	if (!dp_10nm_pll_lock_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	/* poll for PHY ready status */
+	if (!dp_10nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	pr_debug("%s: PLL is locked\n", __func__);
+
+	if (pdb->lane_cnt == 1) {
+		bias_en = 0x3e;
+		drvr_en = 0x13;
+	} else {
+		bias_en = 0x3f;
+		drvr_en = 0x10;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC1) {
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		} else {
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		}
+	} else {
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+	udelay(2000);
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+
+	/*
+	 * Make sure all the register writes are completed before
+	 * doing any other operation
+	 */
+	wmb();
+
+	/* poll for PHY ready status */
+	if (!dp_10nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x38);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x38);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x20);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x20);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+lock_err:
+	return rc;
+}
+
+static int dp_pll_disable_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	/* Assert DP PHY power down */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+	/*
+	 * Make sure all the register writes to disable PLL are
+	 * completed before doing any other operation
+	 */
+	wmb();
+
+	return rc;
+}
+
+
+int dp_vco_prepare_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	pr_debug("rate=%ld\n", vco->rate);
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll resources\n");
+		goto error;
+	}
+
+	if ((dp_res->vco_cached_rate != 0)
+		&& (dp_res->vco_cached_rate == vco->rate)) {
+		rc = vco->hw.init->ops->set_rate(hw,
+			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+				rc, dp_res->index);
+			mdss_pll_resource_enable(dp_res, false);
+			goto error;
+		}
+	}
+
+	rc = dp_pll_enable_10nm(hw);
+	if (rc) {
+		mdss_pll_resource_enable(dp_res, false);
+		pr_err("ndx=%d failed to enable dp pll\n",
+					dp_res->index);
+		goto error;
+	}
+
+	mdss_pll_resource_enable(dp_res, false);
+error:
+	return rc;
+}
+
+void dp_vco_unprepare_10nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	if (!dp_res) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!dp_res->pll_on &&
+		mdss_pll_resource_enable(dp_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+	dp_res->vco_cached_rate = vco->rate;
+	dp_pll_disable_10nm(hw);
+
+	dp_res->handoff_resources = false;
+	mdss_pll_resource_enable(dp_res, false);
+	dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	int rc;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	pr_debug("DP lane CLK rate=%ld\n", rate);
+
+	rc = dp_config_vco_rate_10nm(vco, rate);
+	if (rc)
+		pr_err("%s: Failed to set clk rate\n", __func__);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	vco->rate = rate;
+
+	return 0;
+}
+
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	int rc;
+	u32 div, hsclk_div, link_clk_div = 0;
+	u64 vco_rate;
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+	div &= 0x0f;
+
+	if (div == 12)
+		hsclk_div = 6; /* Default */
+	else if (div == 4)
+		hsclk_div = 4;
+	else if (div == 0)
+		hsclk_div = 2;
+	else if (div == 3)
+		hsclk_div = 1;
+	else {
+		pr_debug("unknown divider. forcing to default\n");
+		hsclk_div = 5;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
+	div >>= 2;
+
+	if ((div & 0x3) == 0)
+		link_clk_div = 5;
+	else if ((div & 0x3) == 1)
+		link_clk_div = 10;
+	else if ((div & 0x3) == 2)
+		link_clk_div = 20;
+	else
+		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+	if (link_clk_div == 20) {
+		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	} else {
+		if (hsclk_div == 6)
+			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+		else if (hsclk_div == 4)
+			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+		else if (hsclk_div == 2)
+			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+		else
+			vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	dp_res->vco_cached_rate = vco->rate = vco_rate;
+	return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+			unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+
+	if (rate <= vco->min_rate)
+		rrate = vco->min_rate;
+	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+	else
+		rrate = vco->max_rate;
+
+	pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+	*parent_rate = rrate;
+	return rrate;
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
new file mode 100644
index 0000000..e30ef82
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ *		+------------------------------+
+ *		|         DP_VCO_CLK           |
+ *		|                              |
+ *		|    +-------------------+     |
+ *		|    |   (DP PLL/VCO)    |     |
+ *		|    +---------+---------+     |
+ *		|              v               |
+ *		|   +----------+-----------+   |
+ *		|   | hsclk_divsel_clk_src |   |
+ *		|   +----------+-----------+   |
+ *		+------------------------------+
+ *				|
+ *	 +------------<---------v------------>----------+
+ *	 |                                              |
+ * +-----v------------+                                 |
+ * | dp_link_clk_src  |                                 |
+ * |    divsel_ten    |                                 |
+ * +---------+--------+                                 |
+ *	|                                               |
+ *	|                                               |
+ *	v                                               v
+ * Input to DISPCC block                                |
+ * for link clk, crypto clk                             |
+ * and interface clock                                  |
+ *							|
+ *							|
+ *	+--------<------------+-----------------+---<---+
+ *	|                     |                 |
+ * +-------v------+  +--------v-----+  +--------v------+
+ * | vco_divided  |  | vco_divided  |  | vco_divided   |
+ * |    _clk_src  |  |    _clk_src  |  |    _clk_src   |
+ * |              |  |              |  |               |
+ * |divsel_six    |  |  divsel_two  |  |  divsel_four  |
+ * +-------+------+  +-----+--------+  +--------+------+
+ *         |	           |		        |
+ *	v------->----------v-------------<------v
+ *                         |
+ *		+----------+---------+
+ *		|   vco_divided_clk  |
+ *		|       _src_mux     |
+ *		+---------+----------+
+ *                        |
+ *                        v
+ *              Input to DISPCC block
+ *              for DP pixel clock
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+static struct dp_pll_db dp_pdb;
+static struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_10nm_cfg = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+	.reg_write = dp_mux_set_parent_10nm,
+	.reg_read = dp_mux_get_parent_10nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_10nm_vco_clk_ops = {
+	.recalc_rate = dp_vco_recalc_rate_10nm,
+	.set_rate = dp_vco_set_rate_10nm,
+	.round_rate = dp_vco_round_rate_10nm,
+	.prepare = dp_vco_prepare_10nm,
+	.unprepare = dp_vco_unprepare_10nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+	.max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_clk",
+		.parent_names = (const char *[]){ "xo_board" },
+		.num_parents = 1,
+		.ops = &dp_10nm_vco_clk_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_link_clk_divsel_ten = {
+	.div = 10,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_link_clk_divsel_ten",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_two_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+	.div = 4,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_four_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
+	.div = 6,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_six_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+				     struct clk_rate_request *req)
+{
+	int ret = 0;
+
+	ret = __clk_mux_determine_rate_closest(hw, req);
+	if (ret)
+		return ret;
+
+	/* Set the new parent of mux if there is a new valid parent */
+	if (hw->clk && req->best_parent_hw->clk)
+		clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+	return 0;
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk *div_clk = NULL, *vco_clk = NULL;
+	struct dp_pll_vco_clk *vco = NULL;
+
+	div_clk = clk_get_parent(hw->clk);
+	if (!div_clk)
+		return 0;
+
+	vco_clk = clk_get_parent(div_clk);
+	if (!vco_clk)
+		return 0;
+
+	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+	if (!vco)
+		return 0;
+
+	if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
+		return (vco->rate / 6);
+	else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		return (vco->rate / 4);
+	else
+		return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
+	.reg = 0x64,
+	.shift = 0,
+	.width = 2,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dp_vco_divided_clk_src_mux",
+			.parent_names =
+				(const char *[]){"dp_vco_divsel_two_clk_src",
+					"dp_vco_divsel_four_clk_src",
+					"dp_vco_divsel_six_clk_src"},
+			.num_parents = 3,
+			.ops = &mux_clk_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_hw *mdss_dp_pllcc_10nm[] = {
+	[DP_VCO_CLK] = &dp_vco_clk.hw,
+	[DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
+	[DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+	[DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+	[DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
+	[DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
+};
+
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP, i = 0;
+	struct clk_onecell_data *clk_data;
+	struct clk *clk;
+	struct regmap *regmap;
+	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_10nm);
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	if (!pll_res || !pll_res->pll_base || !pll_res->phy_base ||
+		!pll_res->ln_tx0_base || !pll_res->ln_tx1_base) {
+		pr_err("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+				sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks) {
+		devm_kfree(&pdev->dev, clk_data);
+		return -ENOMEM;
+	}
+	clk_data->clk_num = num_clks;
+
+	pll_res->priv = &dp_pdb;
+	dp_pdb.pll = pll_res;
+
+	/* Set client data for vco, mux and div clocks */
+	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+			pll_res, &dp_pll_10nm_cfg);
+	dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
+	mux_clk_ops = clk_regmap_mux_closest_ops;
+	mux_clk_ops.determine_rate = clk_mux_determine_rate;
+	mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+	dp_vco_clk.priv = pll_res;
+
+	for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
+		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+		clk = devm_clk_register(&pdev->dev,
+				mdss_dp_pllcc_10nm[i]);
+		if (IS_ERR(clk)) {
+			pr_err("clk registration failed for DP: %d\n",
+					pll_res->index);
+			rc = -EINVAL;
+			goto clk_reg_fail;
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node,
+			of_clk_src_onecell_get, clk_data);
+	if (rc) {
+		pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		pr_debug("%s SUCCESS\n", __func__);
+	}
+	return 0;
+clk_reg_fail:
+	devm_kfree(&pdev->dev, clk_data->clks);
+	devm_kfree(&pdev->dev, clk_data);
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
new file mode 100644
index 0000000..c3b5635
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_10NM_H
+#define __MDSS_DP_PLL_10NM_H
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
+#define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
+
+struct dp_pll_db {
+	struct mdss_pll_resources *pll;
+
+	/* lane and orientation settings */
+	u8 lane_cnt;
+	u8 orientation;
+
+	/* COM PHY settings */
+	u32 hsclk_sel;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 vco_tune_map;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp3_mode0;
+	u32 lock_cmp_en;
+
+	/* PHY vco divider */
+	u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+				unsigned long parent_rate);
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate);
+int dp_vco_prepare_10nm(struct clk_hw *hw);
+void dp_vco_unprepare_10nm(struct clk_hw *hw);
+int dp_mux_set_parent_10nm(void *context,
+				unsigned int reg, unsigned int val);
+int dp_mux_get_parent_10nm(void *context,
+				unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_10NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
deleted file mode 100644
index a3ed8a8..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-int link2xclk_divsel_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	u32 link2xclk_div_tx0, link2xclk_div_tx1;
-	u32 phy_mode;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	link2xclk_div_tx0 = MDSS_PLL_REG_R(dp_res->phy_base,
-				QSERDES_TX0_OFFSET + TXn_TX_BAND);
-	link2xclk_div_tx1 = MDSS_PLL_REG_R(dp_res->phy_base,
-				QSERDES_TX1_OFFSET + TXn_TX_BAND);
-
-	link2xclk_div_tx0 &= ~0x07;	/* bits 0 to 2 */
-	link2xclk_div_tx1 &= ~0x07;	/* bits 0 to 2 */
-
-	/* Configure TX band Mux */
-	link2xclk_div_tx0 |= 0x4;
-	link2xclk_div_tx1 |= 0x4;
-
-	/*configure DP PHY MODE */
-	phy_mode = 0x58;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_BAND,
-			link2xclk_div_tx0);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_BAND,
-			link2xclk_div_tx1);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_MODE, phy_mode);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
-			__func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return rc;
-}
-
-int link2xclk_divsel_get_div(struct div_clk *clk)
-{
-	int rc;
-	u32 div = 0, phy_mode;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	phy_mode = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
-
-	if (phy_mode & 0x48)
-		pr_err("%s: DP PAR Rate not correct\n", __func__);
-
-	if ((phy_mode & 0x3) == 1)
-		div = 10;
-	else if ((phy_mode & 0x3) == 0)
-		div = 5;
-	else
-		pr_err("%s: unsupported div: %d\n", __func__, phy_mode);
-
-	mdss_pll_resource_enable(dp_res, false);
-	pr_debug("%s: phy_mode=%d, div=%d\n", __func__,
-						phy_mode, div);
-
-	return div;
-}
-
-int vco_divided_clk_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	u32 auxclk_div;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= ~0x03;	/* bits 0 to 1 */
-
-	auxclk_div |= 1; /* Default divider */
-
-	if (div == 4)
-		auxclk_div |= 2;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_VCO_DIV, auxclk_div);
-	/* Make sure the PHY registers writes are done */
-	wmb();
-	pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return rc;
-}
-
-
-enum handoff vco_divided_clk_handoff(struct clk *c)
-{
-	/*
-	 * Since cont-splash is not enabled, disable handoff
-	 * for vco_divider_clk.
-	 */
-	return HANDOFF_DISABLED_CLK;
-}
-
-int vco_divided_clk_get_div(struct div_clk *clk)
-{
-	int rc;
-	u32 div, auxclk_div;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= 0x03;
-
-	div = 2; /* Default divider */
-	if (auxclk_div == 2)
-		div = 4;
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	pr_debug("%s: auxclk_div=%d, div=%d\n", __func__, auxclk_div, div);
-
-	return div;
-}
-
-int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
-{
-	u32 res = 0;
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_PD_CTL, 0x3d);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CLK_ENABLE1, 0x0e);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CLK_SEL, 0x30);
-
-	/* Different for each clock rates */
-	if (rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000) {
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_8100MHZDIV1000);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYS_CLK_CTRL, 0x02);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_HSCLK_SEL, 0x2c);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP_EN, 0x04);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DEC_START_MODE0, 0x69);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CMN_CONFIG, 0x42);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP1_MODE0, 0xbf);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP2_MODE0, 0x21);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	} else if (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000) {
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_8100MHZDIV1000);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYS_CLK_CTRL, 0x06);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_HSCLK_SEL, 0x84);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP_EN, 0x08);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DEC_START_MODE0, 0x69);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CMN_CONFIG, 0x02);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	} else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) {
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_10800MHZDIV1000);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYS_CLK_CTRL, 0x06);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_HSCLK_SEL, 0x80);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP_EN, 0x08);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DEC_START_MODE0, 0x8c);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START3_MODE0, 0xa0);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CMN_CONFIG, 0x12);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	} else {
-		pr_err("%s: unsupported rate: %ld\n", __func__, rate);
-		return -EINVAL;
-	}
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	if ((rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000)
-	    || (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000)) {
-		MDSS_PLL_REG_W(dp_res->phy_base,
-				DP_PHY_VCO_DIV, 0x1);
-	} else {
-		MDSS_PLL_REG_W(dp_res->phy_base,
-				DP_PHY_VCO_DIV, 0x2);
-	}
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_VCO_TUNE_MAP, 0x00);
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_BG_TIMER, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_BG_TIMER, 0x0a);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_VCO_TUNE_CTRL, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CP_CTRL_MODE0, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_PLL_IVCO, 0x07);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CORE_CLK_EN, 0x0f);
-
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_MODE, 0x58);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x1a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x1a);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
-			0x40);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
-			0x40);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
-			0x30);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
-			0x30);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT,
-			0x3d);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT,
-			0x3d);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
-			0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
-			0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE,
-			0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
-			0x00);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_BAND,
-			0x4);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_BAND,
-			0x4);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	return res;
-}
-
-static bool dp_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: C_READY status is not high. Status=%x\n",
-				__func__, status);
-		pll_locked = false;
-	} else if (readl_poll_timeout_atomic((dp_res->pll_base +
-			DP_PHY_STATUS),
-			status,
-			((status & BIT(1)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-
-static int dp_pll_enable(struct clk *c)
-{
-	int rc = 0;
-	u32 status;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x09);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_RESETSM_CNTRL, 0x20);
-	/* Make sure the PLL register writes are done */
-	wmb();
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: C_READY status is not high. Status=%x\n",
-				__func__, status);
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x19);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & BIT(1)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	pr_debug("%s: PLL is locked\n", __func__);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x3f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
-			0x10);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x3f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
-			0x10);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
-			0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_POL_INV,
-			0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x18);
-	udelay(2000);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x19);
-
-	/*
-	 * Make sure all the register writes are completed before
-	 * doing any other operation
-	 */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_LANE_MODE_1,
-			0xf6);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_LANE_MODE_1,
-			0xf6);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
-			0x1f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
-			0x1f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	/*
-	 * Make sure all the register writes are completed before
-	 * doing any other operation
-	 */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x09);
-	udelay(2000);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x19);
-	udelay(2000);
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & BIT(1)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Lane_mode: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
-			0x2a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
-			0x2a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
-			0x20);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
-			0x20);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
-			0x11);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
-			0x11);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
-			0x11);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
-			0x11);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-lock_err:
-	return rc;
-}
-
-static int dp_pll_disable(struct clk *c)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	/* Assert DP PHY power down */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_PD_CTL, 0x2);
-	/*
-	 * Make sure all the register writes to disable PLL are
-	 * completed before doing any other operation
-	 */
-	wmb();
-
-	return rc;
-}
-
-
-int dp_vco_prepare(struct clk *c)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *dp_pll_res = vco->priv;
-
-	DEV_DBG("rate=%ld\n", vco->rate);
-	rc = mdss_pll_resource_enable(dp_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll resources\n");
-		goto error;
-	}
-
-	rc = dp_pll_enable(c);
-	if (rc) {
-		mdss_pll_resource_enable(dp_pll_res, false);
-		pr_err("ndx=%d failed to enable dsi pll\n",
-					dp_pll_res->index);
-		goto error;
-	}
-
-	mdss_pll_resource_enable(dp_pll_res, false);
-error:
-	return rc;
-}
-
-void dp_vco_unprepare(struct clk *c)
-{
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (!io) {
-		DEV_ERR("Invalid input parameter\n");
-		return;
-	}
-
-	if (!io->pll_on &&
-		mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return;
-	}
-	dp_pll_disable(c);
-
-	io->handoff_resources = false;
-	mdss_pll_resource_enable(io, false);
-	io->pll_on = false;
-}
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	int rc;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	DEV_DBG("DP lane CLK rate=%ld\n", rate);
-
-	rc = dp_config_vco_rate(vco, rate);
-	if (rc)
-		DEV_ERR("%s: Failed to set clk rate\n", __func__);
-
-	mdss_pll_resource_enable(io, false);
-
-	vco->rate = rate;
-
-	return 0;
-}
-
-unsigned long dp_vco_get_rate(struct clk *c)
-{
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	int rc;
-	u32 div, hsclk_div, link2xclk_div;
-	u64 vco_rate;
-	struct mdss_pll_resources *pll = vco->priv;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll=%d\n", pll->index);
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
-	div &= 0x0f;
-
-	if (div == 12)
-		hsclk_div = 5; /* Default */
-	else if (div == 4)
-		hsclk_div = 3;
-	else if (div == 0)
-		hsclk_div = 2;
-	else {
-		pr_debug("unknown divider. forcing to default\n");
-		hsclk_div = 5;
-	}
-
-	div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
-
-	if (div & 0x58)
-		pr_err("%s: DP PAR Rate not correct\n", __func__);
-
-	if ((div & 0x3) == 1)
-		link2xclk_div = 10;
-	else if ((div & 0x3) == 0)
-		link2xclk_div = 5;
-	else
-		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
-
-	if (link2xclk_div == 10) {
-		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	} else {
-		if (hsclk_div == 5)
-			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
-		else if (hsclk_div == 3)
-			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-		else
-			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-	}
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return (unsigned long)vco_rate;
-}
-
-long dp_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-
-	if (rate <= vco->min_rate)
-		rrate = vco->min_rate;
-	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	else
-		rrate = vco->max_rate;
-
-	pr_debug("%s: rrate=%ld\n", __func__, rrate);
-
-	return rrate;
-}
-
-enum handoff dp_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return ret;
-	}
-
-	if (dp_pll_lock_status(io)) {
-		io->pll_on = true;
-		c->rate = dp_vco_get_rate(c);
-		io->handoff_resources = true;
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		io->handoff_resources = false;
-		mdss_pll_resource_enable(io, false);
-		DEV_DBG("%s: PLL not locked\n", __func__);
-	}
-
-	DEV_DBG("done, ret=%d\n", ret);
-	return ret;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
deleted file mode 100644
index 6a49d15..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-/*
- ***************************************************************************
- ******** Display Port PLL driver block diagram for branch clocks **********
- ***************************************************************************
-
-			+--------------------------+
-			|       DP_VCO_CLK         |
-			|			   |
-			|  +-------------------+   |
-			|  |   (DP PLL/VCO)    |   |
-			|  +---------+---------+   |
-			|	     v		   |
-			| +----------+-----------+ |
-			| | hsclk_divsel_clk_src | |
-			| +----------+-----------+ |
-			+--------------------------+
-				     |
-				     v
-	   +------------<------------|------------>-------------+
-	   |                         |                          |
-+----------v----------+	  +----------v----------+    +----------v----------+
-|   dp_link_2x_clk    |	  | vco_divided_clk_src	|    | vco_divided_clk_src |
-|     divsel_five     |	  |			|    |			   |
-v----------+----------v	  |	divsel_two	|    |	   divsel_four	   |
-	   |		  +----------+----------+    +----------+----------+
-	   |                         |                          |
-	   v			     v				v
-				     |	+---------------------+	|
-  Input to MMSSCC block		     |	|    (aux_clk_ops)    |	|
-  for link clk, crypto clk	     +-->   vco_divided_clk   <-+
-  and interface clock			|	_src_mux      |
-					+----------+----------+
-						   |
-						   v
-					 Input to MMSSCC block
-					 for DP pixel clock
-
- ******************************************************************************
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-static const struct clk_ops clk_ops_vco_divided_clk_src_c;
-static const struct clk_ops clk_ops_link_2x_clk_div_c;
-static const struct clk_ops clk_ops_gen_mux_dp;
-
-static struct clk_div_ops link2xclk_divsel_ops = {
-	.set_div = link2xclk_divsel_set_div,
-	.get_div = link2xclk_divsel_get_div,
-};
-
-static struct clk_div_ops vco_divided_clk_ops = {
-	.set_div = vco_divided_clk_set_div,
-	.get_div = vco_divided_clk_get_div,
-};
-
-static const struct clk_ops dp_8998_vco_clk_ops = {
-	.set_rate = dp_vco_set_rate,
-	.round_rate = dp_vco_round_rate,
-	.prepare = dp_vco_prepare,
-	.unprepare = dp_vco_unprepare,
-	.handoff = dp_vco_handoff,
-};
-
-static struct clk_mux_ops mdss_mux_ops = {
-	.set_mux_sel = mdss_set_mux_sel,
-	.get_mux_sel = mdss_get_mux_sel,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
-	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
-	.max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
-	.c = {
-		.dbg_name = "dp_vco_clk",
-		.ops = &dp_8998_vco_clk_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dp_vco_clk.c),
-	},
-};
-
-static struct div_clk dp_link_2x_clk_divsel_five = {
-	.data = {
-		.div = 5,
-		.min_div = 5,
-		.max_div = 5,
-	},
-	.ops = &link2xclk_divsel_ops,
-	.c = {
-		.parent = &dp_vco_clk.c,
-		.dbg_name = "dp_link_2x_clk_divsel_five",
-		.ops = &clk_ops_link_2x_clk_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dp_link_2x_clk_divsel_five.c),
-	},
-};
-
-static struct div_clk vco_divsel_four_clk_src = {
-	.data = {
-		.div = 4,
-		.min_div = 4,
-		.max_div = 4,
-	},
-	.ops = &vco_divided_clk_ops,
-	.c = {
-		.parent = &dp_vco_clk.c,
-		.dbg_name = "vco_divsel_four_clk_src",
-		.ops = &clk_ops_vco_divided_clk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(vco_divsel_four_clk_src.c),
-	},
-};
-
-static struct div_clk vco_divsel_two_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.ops = &vco_divided_clk_ops,
-	.c = {
-		.parent = &dp_vco_clk.c,
-		.dbg_name = "vco_divsel_two_clk_src",
-		.ops = &clk_ops_vco_divided_clk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(vco_divsel_two_clk_src.c),
-	},
-};
-
-static struct mux_clk vco_divided_clk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&vco_divsel_two_clk_src.c, 0},
-		{&vco_divsel_four_clk_src.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &vco_divsel_two_clk_src.c,
-		.dbg_name = "vco_divided_clk_src_mux",
-		.ops = &clk_ops_gen_mux_dp,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(vco_divided_clk_src_mux.c),
-	}
-};
-
-static struct clk_lookup dp_pllcc_8998[] = {
-	CLK_LIST(dp_vco_clk),
-	CLK_LIST(dp_link_2x_clk_divsel_five),
-	CLK_LIST(vco_divsel_four_clk_src),
-	CLK_LIST(vco_divsel_two_clk_src),
-	CLK_LIST(vco_divided_clk_src_mux),
-};
-
-int dp_pll_clock_register_8998(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP;
-
-	if (!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
-		DEV_ERR("%s: Invalid input parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	/* Set client data for vco, mux and div clocks */
-	dp_vco_clk.priv = pll_res;
-	vco_divided_clk_src_mux.priv = pll_res;
-	vco_divsel_two_clk_src.priv = pll_res;
-	vco_divsel_four_clk_src.priv = pll_res;
-	dp_link_2x_clk_divsel_five.priv = pll_res;
-
-	clk_ops_link_2x_clk_div_c = clk_ops_div;
-	clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
-
-	/*
-	 * Set the ops for the divider in the pixel clock tree to the
-	 * slave_div to ensure that a set rate on this divider clock will not
-	 * be propagated to it's parent. This is needed ensure that when we set
-	 * the rate for pixel clock, the vco is not reconfigured
-	 */
-	clk_ops_vco_divided_clk_src_c = clk_ops_slave_div;
-	clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
-	clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
-
-	clk_ops_gen_mux_dp = clk_ops_gen_mux;
-	clk_ops_gen_mux_dp.get_rate = parent_get_rate;
-
-	/* We can select different clock ops for future versions */
-	dp_vco_clk.c.ops = &dp_8998_vco_clk_ops;
-
-	rc = of_msm_clock_register(pdev->dev.of_node, dp_pllcc_8998,
-					ARRAY_SIZE(dp_pllcc_8998));
-	if (rc) {
-		DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		DEV_DBG("%s SUCCESS\n", __func__);
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
deleted file mode 100644
index 11d5ddc..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __MDSS_DP_PLL_8998_H
-#define __MDSS_DP_PLL_8998_H
-
-#define DP_PHY_REVISION_ID0			0x0000
-#define DP_PHY_REVISION_ID1			0x0004
-#define DP_PHY_REVISION_ID2			0x0008
-#define DP_PHY_REVISION_ID3			0x000C
-
-#define DP_PHY_CFG				0x0010
-#define DP_PHY_PD_CTL				0x0014
-#define DP_PHY_MODE				0x0018
-
-#define DP_PHY_AUX_CFG0				0x001C
-#define DP_PHY_AUX_CFG1				0x0020
-#define DP_PHY_AUX_CFG2				0x0024
-#define DP_PHY_AUX_CFG3				0x0028
-#define DP_PHY_AUX_CFG4				0x002C
-#define DP_PHY_AUX_CFG5				0x0030
-#define DP_PHY_AUX_CFG6				0x0034
-#define DP_PHY_AUX_CFG7				0x0038
-#define DP_PHY_AUX_CFG8				0x003C
-#define DP_PHY_AUX_CFG9				0x0040
-#define DP_PHY_AUX_INTERRUPT_MASK		0x0044
-#define DP_PHY_AUX_INTERRUPT_CLEAR		0x0048
-#define DP_PHY_AUX_BIST_CFG			0x004C
-
-#define DP_PHY_VCO_DIV				0x0064
-#define DP_PHY_TX0_TX1_LANE_CTL			0x0068
-
-#define DP_PHY_TX2_TX3_LANE_CTL			0x0084
-#define DP_PHY_STATUS				0x00BC
-
-/* Tx registers */
-#define QSERDES_TX0_OFFSET			0x0400
-#define QSERDES_TX1_OFFSET			0x0800
-
-#define TXn_BIST_MODE_LANENO			0x0000
-#define TXn_CLKBUF_ENABLE			0x0008
-#define TXn_TX_EMP_POST1_LVL			0x000C
-
-#define TXn_TX_DRV_LVL				0x001C
-
-#define TXn_RESET_TSYNC_EN			0x0024
-#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
-#define TXn_TX_BAND				0x002C
-#define TXn_SLEW_CNTL				0x0030
-#define TXn_INTERFACE_SELECT			0x0034
-
-#define TXn_RES_CODE_LANE_TX			0x003C
-#define TXn_RES_CODE_LANE_RX			0x0040
-#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
-#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
-
-#define TXn_DEBUG_BUS_SEL			0x0058
-#define TXn_TRANSCEIVER_BIAS_EN			0x005C
-#define TXn_HIGHZ_DRVR_EN			0x0060
-#define TXn_TX_POL_INV				0x0064
-#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
-
-#define TXn_LANE_MODE_1				0x008C
-
-#define TXn_TRAN_DRVR_EMP_EN			0x00C0
-#define TXn_TX_INTERFACE_MODE			0x00C4
-
-#define TXn_VMODE_CTRL1				0x00F0
-
-
-/* PLL register offset */
-#define QSERDES_COM_ATB_SEL1			0x0000
-#define QSERDES_COM_ATB_SEL2			0x0004
-#define QSERDES_COM_FREQ_UPDATE			0x0008
-#define QSERDES_COM_BG_TIMER			0x000C
-#define QSERDES_COM_SSC_EN_CENTER		0x0010
-#define QSERDES_COM_SSC_ADJ_PER1		0x0014
-#define QSERDES_COM_SSC_ADJ_PER2		0x0018
-#define QSERDES_COM_SSC_PER1			0x001C
-#define QSERDES_COM_SSC_PER2			0x0020
-#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
-#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
-#define QSERDES_COM_POST_DIV			0x002C
-#define QSERDES_COM_POST_DIV_MUX		0x0030
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
-#define QSERDES_COM_CLK_ENABLE1			0x0038
-#define QSERDES_COM_SYS_CLK_CTRL		0x003C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
-#define QSERDES_COM_PLL_EN			0x0044
-#define QSERDES_COM_PLL_IVCO			0x0048
-#define QSERDES_COM_CMN_IETRIM			0x004C
-#define QSERDES_COM_CMN_IPTRIM			0x0050
-
-#define QSERDES_COM_CP_CTRL_MODE0		0x0060
-#define QSERDES_COM_CP_CTRL_MODE1		0x0064
-#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
-#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
-#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
-#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
-#define QSERDES_COM_PLL_CNTRL			0x0078
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
-#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
-#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
-#define QSERDES_COM_RESETSM_CNTRL		0x0088
-#define QSERDES_COM_RESETSM_CNTRL2		0x008C
-#define QSERDES_COM_LOCK_CMP_EN			0x0090
-#define QSERDES_COM_LOCK_CMP_CFG		0x0094
-#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
-#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
-#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
-
-#define QSERDES_COM_DEC_START_MODE0		0x00B0
-#define QSERDES_COM_DEC_START_MODE1		0x00B4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
-#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
-#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
-#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
-#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
-#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
-#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
-#define QSERDES_COM_INTEGLOOP_EN		0x00D4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
-#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
-#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
-#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
-
-#define QSERDES_COM_CMN_STATUS			0x0124
-#define QSERDES_COM_RESET_SM_STATUS		0x0128
-
-#define QSERDES_COM_CLK_SEL			0x0138
-#define QSERDES_COM_HSCLK_SEL			0x013C
-
-#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
-
-#define QSERDES_COM_SW_RESET			0x0150
-#define QSERDES_COM_CORE_CLK_EN			0x0154
-#define QSERDES_COM_C_READY_STATUS		0x0158
-#define QSERDES_COM_CMN_CONFIG			0x015C
-
-#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
-
-#define DP_PLL_POLL_SLEEP_US			500
-#define DP_PLL_POLL_TIMEOUT_US			10000
-
-#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
-#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate);
-unsigned long dp_vco_get_rate(struct clk *c);
-long dp_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff dp_vco_handoff(struct clk *c);
-enum handoff vco_divided_clk_handoff(struct clk *c);
-int dp_vco_prepare(struct clk *c);
-void dp_vco_unprepare(struct clk *c);
-int hsclk_divsel_set_div(struct div_clk *clk, int div);
-int hsclk_divsel_get_div(struct div_clk *clk);
-int link2xclk_divsel_set_div(struct div_clk *clk, int div);
-int link2xclk_divsel_get_div(struct div_clk *clk);
-int vco_divided_clk_set_div(struct div_clk *clk, int div);
-int vco_divided_clk_get_div(struct div_clk *clk);
-
-#endif /* __MDSS_DP_PLL_8998_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll.h b/drivers/clk/qcom/mdss/mdss-dp-pll.h
index 2805ff9..2b1d70e 100644
--- a/drivers/clk/qcom/mdss/mdss-dp-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,21 +15,19 @@
 #define __MDSS_DP_PLL_H
 
 struct dp_pll_vco_clk {
+	struct clk_hw hw;
 	unsigned long	rate;		/* current vco rate */
 	u64		min_rate;	/* min vco rate */
 	u64		max_rate;	/* max vco rate */
 	void		*priv;
-
-	struct clk	c;
 };
 
-static inline struct dp_pll_vco_clk *mdss_dp_to_vco_clk(struct clk *clk)
+static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)
 {
-	return container_of(clk, struct dp_pll_vco_clk, c);
+	return container_of(hw, struct dp_pll_vco_clk, hw);
 }
 
-int dp_pll_clock_register_8998(struct platform_device *pdev,
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
 				struct mdss_pll_resources *pll_res);
 
-
 #endif /* __MDSS_DP_PLL_H */
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index 7f82fda..e292ef8 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -21,6 +21,7 @@
 #include <linux/iopoll.h>
 #include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
+#include "mdss-dp-pll.h"
 
 int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
 {
@@ -126,6 +127,8 @@
 
 	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
 		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+	if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DP_PLL_10NM;
 	else
 		goto err;
 
@@ -151,6 +154,9 @@
 	case MDSS_DSI_PLL_10NM:
 		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
 		break;
+	case MDSS_DP_PLL_10NM:
+		rc = dp_pll_clock_register_10nm(pdev, pll_res);
+		break;
 	case MDSS_UNKNOWN_PLL:
 	default:
 		rc = -EINVAL;
@@ -171,6 +177,7 @@
 	const char *label;
 	struct resource *pll_base_reg;
 	struct resource *phy_base_reg;
+	struct resource *tx0_base_reg, *tx1_base_reg;
 	struct resource *dynamic_pll_base_reg;
 	struct resource *gdsc_base_reg;
 	struct mdss_pll_resources *pll_res;
@@ -272,6 +279,30 @@
 		}
 	}
 
+	tx0_base_reg = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ln_tx0_base");
+	if (tx0_base_reg) {
+		pll_res->ln_tx0_base = ioremap(tx0_base_reg->start,
+				resource_size(tx0_base_reg));
+		if (!pll_res->ln_tx0_base) {
+			pr_err("Unable to remap Lane TX0 base resources\n");
+			rc = -ENOMEM;
+			goto tx0_io_error;
+		}
+	}
+
+	tx1_base_reg = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ln_tx1_base");
+	if (tx1_base_reg) {
+		pll_res->ln_tx1_base = ioremap(tx1_base_reg->start,
+				resource_size(tx1_base_reg));
+		if (!pll_res->ln_tx1_base) {
+			pr_err("Unable to remap Lane TX1 base resources\n");
+			rc = -ENOMEM;
+			goto tx1_io_error;
+		}
+	}
+
 	gdsc_base_reg = platform_get_resource_byname(pdev,
 					IORESOURCE_MEM, "gdsc_base");
 	if (!gdsc_base_reg) {
@@ -309,6 +340,12 @@
 	if (pll_res->gdsc_base)
 		iounmap(pll_res->gdsc_base);
 gdsc_io_error:
+	if (pll_res->ln_tx1_base)
+		iounmap(pll_res->ln_tx1_base);
+tx1_io_error:
+	if (pll_res->ln_tx0_base)
+		iounmap(pll_res->ln_tx0_base);
+tx0_io_error:
 	if (pll_res->dyn_pll_base)
 		iounmap(pll_res->dyn_pll_base);
 dyn_pll_io_error:
@@ -347,6 +384,7 @@
 
 static const struct of_device_id mdss_pll_dt_match[] = {
 	{.compatible = "qcom,mdss_dsi_pll_10nm"},
+	{.compatible = "qcom,mdss_dp_pll_10nm"},
 	{}
 };
 
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index ee91e11..033462d 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -37,6 +37,7 @@
 
 enum {
 	MDSS_DSI_PLL_10NM,
+	MDSS_DP_PLL_10NM,
 	MDSS_UNKNOWN_PLL,
 };
 
@@ -81,6 +82,8 @@
 	 */
 	void __iomem	*pll_base;
 	void __iomem	*phy_base;
+	void __iomem	*ln_tx0_base;
+	void __iomem	*ln_tx1_base;
 	void __iomem	*gdsc_base;
 	void __iomem	*dyn_pll_base;
 
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 4eb8a04..14a9cff 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -11,6 +11,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/err.h>
@@ -63,7 +65,7 @@
 };
 
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
@@ -102,6 +104,16 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_video_cc_venus_clk_src_sdm845_v2[] = {
+	F(100000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+	F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(330000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(404000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(444000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(533000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 video_cc_venus_clk_src = {
 	.cmd_rcgr = 0x7f0,
 	.mnd_width = 0,
@@ -324,10 +336,34 @@
 
 static const struct of_device_id video_cc_sdm845_match_table[] = {
 	{ .compatible = "qcom,video_cc-sdm845" },
+	{ .compatible = "qcom,video_cc-sdm845-v2" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, video_cc_sdm845_match_table);
 
+static void video_cc_sdm845_fixup_sdm845v2(void)
+{
+	video_cc_venus_clk_src.freq_tbl = ftbl_video_cc_venus_clk_src_sdm845_v2;
+	video_cc_venus_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 330000000;
+	video_cc_venus_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		404000000;
+}
+
+static int video_cc_sdm845_fixup(struct platform_device *pdev)
+{
+	const char *compat = NULL;
+	int compatlen = 0;
+
+	compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+	if (!compat || (compatlen <= 0))
+		return -EINVAL;
+
+	if (!strcmp(compat, "qcom,video_cc-sdm845-v2"))
+		video_cc_sdm845_fixup_sdm845v2();
+
+	return 0;
+}
+
 static int video_cc_sdm845_probe(struct platform_device *pdev)
 {
 	struct regmap *regmap;
@@ -347,6 +383,10 @@
 		return PTR_ERR(vdd_cx.regulator[0]);
 	}
 
+	ret = video_cc_sdm845_fixup(pdev);
+	if (ret)
+		return ret;
+
 	clk_fabia_pll_configure(&video_pll0, regmap, &video_pll0_config);
 
 	ret = qcom_cc_really_probe(pdev, &video_cc_sdm845_desc, regmap);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b315236..062d297 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2550,6 +2550,7 @@
 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
 	    list_empty(&cpufreq_policy_list)) {
 		/* if all ->init() calls failed, unregister */
+		ret = -ENODEV;
 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
 			 driver_data->name);
 		goto err_if_unreg;
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index 2404e17..ed239c4 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -25,6 +25,7 @@
 enum lpm_type {
 	IDLE = 0,
 	SUSPEND,
+	LATENCY,
 	LPM_TYPE_NR
 };
 
@@ -36,6 +37,7 @@
 static const struct lpm_type_str lpm_types[] = {
 	{IDLE, "idle_enabled"},
 	{SUSPEND, "suspend_enabled"},
+	{LATENCY, "latency_us"},
 };
 
 static DEFINE_PER_CPU(uint32_t *, max_residency);
@@ -67,6 +69,9 @@
 	else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str))
 		avail = container_of(attr, struct lpm_level_avail,
 					suspend_enabled_attr);
+	else if (!strcmp(attr->attr.name, lpm_types[LATENCY].str))
+		avail = container_of(attr, struct lpm_level_avail,
+					latency_attr);
 
 	return avail;
 }
@@ -163,6 +168,28 @@
 {
 	return per_cpu(min_residency, cpu);
 }
+
+static ssize_t lpm_latency_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct kernel_param kp;
+	struct lpm_level_avail *avail = get_avail_ptr(kobj, attr);
+
+	if (!avail)
+		pr_info("Error\n");
+
+	kp.arg = &avail->latency_us;
+
+	ret = param_get_uint(buf, &kp);
+	if (ret > 0) {
+		strlcat(buf, "\n", PAGE_SIZE);
+		ret++;
+	}
+
+	return ret;
+}
+
 ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr,
 				char *buf)
 {
@@ -239,9 +266,16 @@
 	avail->suspend_enabled_attr.show = lpm_enable_show;
 	avail->suspend_enabled_attr.store = lpm_enable_store;
 
+	sysfs_attr_init(&avail->latency_attr.attr);
+	avail->latency_attr.attr.name = lpm_types[LATENCY].str;
+	avail->latency_attr.attr.mode = 0444;
+	avail->latency_attr.show = lpm_latency_show;
+	avail->latency_attr.store = NULL;
+
 	attr[0] = &avail->idle_enabled_attr.attr;
 	attr[1] = &avail->suspend_enabled_attr.attr;
-	attr[2] = NULL;
+	attr[2] = &avail->latency_attr.attr;
+	attr[3] = NULL;
 	attr_group->attrs = attr;
 
 	ret = sysfs_create_group(kobj, attr_group);
@@ -301,6 +335,7 @@
 		 */
 		for (i = 1; i < p->cpu->nlevels; i++) {
 
+			level_list[i].latency_us = p->levels[i].pwr.latency_us;
 			ret = create_lvl_avail_nodes(p->cpu->levels[i].name,
 					cpu_kobj[cpu_idx], &level_list[i],
 					(void *)p->cpu, cpu, true);
@@ -336,6 +371,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < p->nlevels; i++) {
+		p->levels[i].available.latency_us = p->levels[i].pwr.latency_us;
 		ret = create_lvl_avail_nodes(p->levels[i].level_name,
 				cluster_kobj, &p->levels[i].available,
 				(void *)p, 0, false);
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 6c9a50b..3d35ae9 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,9 +57,11 @@
 struct lpm_level_avail {
 	bool idle_enabled;
 	bool suspend_enabled;
+	uint32_t latency_us;
 	struct kobject *kobj;
 	struct kobj_attribute idle_enabled_attr;
 	struct kobj_attribute suspend_enabled_attr;
+	struct kobj_attribute latency_attr;
 	void *data;
 	int idx;
 	bool cpu_node;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 8af73ac..d9ebe113 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -4877,15 +4877,12 @@
 	if (handle == NULL)
 		return -ENODEV;
 
-	qce_enable_clk(pce_dev);
-
 	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
 	sps_disconnect(sps_pipe_info);
 
 	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
 	sps_disconnect(sps_pipe_info);
 
-	qce_disable_clk(pce_dev);
 	return 0;
 }
 
@@ -4899,8 +4896,6 @@
 	if (handle == NULL)
 		return -ENODEV;
 
-	qce_enable_clk(pce_dev);
-
 	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
 	sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
 	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
@@ -4923,7 +4918,6 @@
 	if (rc)
 		pr_err("Producer callback registration failed rc = %d\n", rc);
 
-	qce_disable_clk(pce_dev);
 	return rc;
 }
 
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index ff64631..56fbb94 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -5371,8 +5371,11 @@
 	spin_unlock_irqrestore(&cp->lock, flags);
 	if (ret)
 		return ret;
-	if (qce_pm_table.suspend)
+	if (qce_pm_table.suspend) {
+		qcrypto_ce_set_bus(pengine, true);
 		qce_pm_table.suspend(pengine->qce);
+		qcrypto_ce_set_bus(pengine, false);
+	}
 	return 0;
 }
 
@@ -5393,9 +5396,11 @@
 	spin_lock_irqsave(&cp->lock, flags);
 	if (pengine->bw_state == BUS_SUSPENDED) {
 		spin_unlock_irqrestore(&cp->lock, flags);
-		if (qce_pm_table.resume)
+		if (qce_pm_table.resume) {
+			qcrypto_ce_set_bus(pengine, true);
 			qce_pm_table.resume(pengine->qce);
-
+			qcrypto_ce_set_bus(pengine, false);
+		}
 		spin_lock_irqsave(&cp->lock, flags);
 		pengine->bw_state = BUS_NO_BANDWIDTH;
 		pengine->active_seq++;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index d37e8dd..ec24059 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -201,6 +201,7 @@
 	struct dma_device	dma_dev;
 	bool			m2m;
 	int			(*hw_setup)(struct ep93xx_dma_chan *);
+	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
 	void			(*hw_submit)(struct ep93xx_dma_chan *);
 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
@@ -323,6 +324,8 @@
 		| M2P_CONTROL_ENABLE;
 	m2p_set_control(edmac, control);
 
+	edmac->buffer = 0;
+
 	return 0;
 }
 
@@ -331,21 +334,27 @@
 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
 }
 
-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
 {
+	unsigned long flags;
 	u32 control;
 
+	spin_lock_irqsave(&edmac->lock, flags);
 	control = readl(edmac->regs + M2P_CONTROL);
 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
 	m2p_set_control(edmac, control);
+	spin_unlock_irqrestore(&edmac->lock, flags);
 
 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
-		cpu_relax();
+		schedule();
+}
 
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
 	m2p_set_control(edmac, 0);
 
-	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
-		cpu_relax();
+	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
 }
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@@ -1161,6 +1170,26 @@
 }
 
 /**
+ * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+ * current context.
+ * @chan: channel
+ *
+ * Synchronizes the DMA channel termination to the current context. When this
+ * function returns it is guaranteed that all transfers for previously issued
+ * descriptors have stopped and and it is safe to free the memory associated
+ * with them. Furthermore it is guaranteed that all complete callback functions
+ * for a previously submitted descriptor have finished running and it is safe to
+ * free resources accessed from within the complete callbacks.
+ */
+static void ep93xx_dma_synchronize(struct dma_chan *chan)
+{
+	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+	if (edmac->edma->hw_synchronize)
+		edmac->edma->hw_synchronize(edmac);
+}
+
+/**
  * ep93xx_dma_terminate_all - terminate all transactions
  * @chan: channel
  *
@@ -1323,6 +1352,7 @@
 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
 	dma_dev->device_config = ep93xx_dma_slave_config;
+	dma_dev->device_synchronize = ep93xx_dma_synchronize;
 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
@@ -1340,6 +1370,7 @@
 	} else {
 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
 
+		edma->hw_synchronize = m2p_hw_synchronize;
 		edma->hw_setup = m2p_hw_setup;
 		edma->hw_shutdown = m2p_hw_shutdown;
 		edma->hw_submit = m2p_hw_submit;
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index a28a01f..f3e211f 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -161,6 +161,7 @@
 	struct mv_xor_v2_sw_desc *sw_desq;
 	int desc_size;
 	unsigned int npendings;
+	unsigned int hw_queue_idx;
 };
 
 /**
@@ -214,18 +215,6 @@
 }
 
 /*
- * Return the next available index in the DESQ.
- */
-static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
-{
-	/* read the index for the next available descriptor in the DESQ */
-	u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
-
-	return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
-		& MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
-}
-
-/*
  * notify the engine of new descriptors, and update the available index.
  */
 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
@@ -257,22 +246,6 @@
 	return MV_XOR_V2_EXT_DESC_SIZE;
 }
 
-/*
- * Set the IMSG threshold
- */
-static inline
-void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
-{
-	u32 reg;
-
-	reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-
-	reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-	reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
-
-	writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
-}
-
 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 {
 	struct mv_xor_v2_device *xor_dev = data;
@@ -288,12 +261,6 @@
 	if (!ndescs)
 		return IRQ_NONE;
 
-	/*
-	 * Update IMSG threshold, to disable new IMSG interrupts until
-	 * end of the tasklet
-	 */
-	mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
-
 	/* schedule a tasklet to handle descriptors callbacks */
 	tasklet_schedule(&xor_dev->irq_tasklet);
 
@@ -306,7 +273,6 @@
 static dma_cookie_t
 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 {
-	int desq_ptr;
 	void *dest_hw_desc;
 	dma_cookie_t cookie;
 	struct mv_xor_v2_sw_desc *sw_desc =
@@ -322,15 +288,15 @@
 	spin_lock_bh(&xor_dev->lock);
 	cookie = dma_cookie_assign(tx);
 
-	/* get the next available slot in the DESQ */
-	desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
-
 	/* copy the HW descriptor from the SW descriptor to the DESQ */
-	dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
+	dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 
 	memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 
 	xor_dev->npendings++;
+	xor_dev->hw_queue_idx++;
+	if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
+		xor_dev->hw_queue_idx = 0;
 
 	spin_unlock_bh(&xor_dev->lock);
 
@@ -344,6 +310,7 @@
 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 {
 	struct mv_xor_v2_sw_desc *sw_desc;
+	bool found = false;
 
 	/* Lock the channel */
 	spin_lock_bh(&xor_dev->lock);
@@ -355,19 +322,23 @@
 		return NULL;
 	}
 
-	/* get a free SW descriptor from the SW DESQ */
-	sw_desc = list_first_entry(&xor_dev->free_sw_desc,
-				   struct mv_xor_v2_sw_desc, free_list);
+	list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
+		if (async_tx_test_ack(&sw_desc->async_tx)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		spin_unlock_bh(&xor_dev->lock);
+		return NULL;
+	}
+
 	list_del(&sw_desc->free_list);
 
 	/* Release the channel */
 	spin_unlock_bh(&xor_dev->lock);
 
-	/* set the async tx descriptor */
-	dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
-	sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
-	async_tx_ack(&sw_desc->async_tx);
-
 	return sw_desc;
 }
 
@@ -389,6 +360,8 @@
 		__func__, len, &src, &dest, flags);
 
 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+	if (!sw_desc)
+		return NULL;
 
 	sw_desc->async_tx.flags = flags;
 
@@ -443,6 +416,8 @@
 		__func__, src_cnt, len, &dest, flags);
 
 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+	if (!sw_desc)
+		return NULL;
 
 	sw_desc->async_tx.flags = flags;
 
@@ -491,6 +466,8 @@
 		container_of(chan, struct mv_xor_v2_device, dmachan);
 
 	sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
+	if (!sw_desc)
+		return NULL;
 
 	/* set the HW descriptor */
 	hw_descriptor = &sw_desc->hw_desc;
@@ -554,7 +531,6 @@
 {
 	struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
 	int pending_ptr, num_of_pending, i;
-	struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
 	struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 
 	dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@@ -562,17 +538,10 @@
 	/* get the pending descriptors parameters */
 	num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 
-	/* next HW descriptor */
-	next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
-
 	/* loop over free descriptors */
 	for (i = 0; i < num_of_pending; i++) {
-
-		if (pending_ptr > MV_XOR_V2_DESC_NUM)
-			pending_ptr = 0;
-
-		if (next_pending_sw_desc != NULL)
-			next_pending_hw_desc++;
+		struct mv_xor_v2_descriptor *next_pending_hw_desc =
+			xor_dev->hw_desq_virt + pending_ptr;
 
 		/* get the SW descriptor related to the HW descriptor */
 		next_pending_sw_desc =
@@ -608,15 +577,14 @@
 
 		/* increment the next descriptor */
 		pending_ptr++;
+		if (pending_ptr >= MV_XOR_V2_DESC_NUM)
+			pending_ptr = 0;
 	}
 
 	if (num_of_pending != 0) {
 		/* free the descriptores */
 		mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
 	}
-
-	/* Update IMSG threshold, to enable new IMSG interrupts */
-	mv_xor_v2_set_imsg_thrd(xor_dev, 0);
 }
 
 /*
@@ -648,9 +616,6 @@
 	writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
 	       xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 
-	/* enable the DMA engine */
-	writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
 	/*
 	 * This is a temporary solution, until we activate the
 	 * SMMU. Set the attributes for reading & writing data buffers
@@ -694,6 +659,9 @@
 	reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
 	writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 
+	/* enable the DMA engine */
+	writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
 	return 0;
 }
 
@@ -725,6 +693,10 @@
 
 	platform_set_drvdata(pdev, xor_dev);
 
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+	if (ret)
+		return ret;
+
 	xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
@@ -785,8 +757,15 @@
 
 	/* add all SW descriptors to the free list */
 	for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
-		xor_dev->sw_desq[i].idx = i;
-		list_add(&xor_dev->sw_desq[i].free_list,
+		struct mv_xor_v2_sw_desc *sw_desc =
+			xor_dev->sw_desq + i;
+		sw_desc->idx = i;
+		dma_async_tx_descriptor_init(&sw_desc->async_tx,
+					     &xor_dev->dmachan);
+		sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
+		async_tx_ack(&sw_desc->async_tx);
+
+		list_add(&sw_desc->free_list,
 			 &xor_dev->free_sw_desc);
 	}
 
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 06ecdc3..6682b3e 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -117,7 +117,7 @@
 #define USB_DMASWR			0x0008
 #define USB_DMASWR_SWR			(1 << 0)
 #define USB_DMAOR			0x0060
-#define USB_DMAOR_AE			(1 << 2)
+#define USB_DMAOR_AE			(1 << 1)
 #define USB_DMAOR_DME			(1 << 0)
 
 #define USB_DMASAR			0x0000
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index 4403f86..a8ec359 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -103,7 +103,7 @@
 
 struct erp_drvdata {
 	struct regmap *llcc_map;
-	phys_addr_t *llcc_banks;
+	u32 *llcc_banks;
 	u32 ecc_irq;
 	u32 num_banks;
 	u32 b_off;
@@ -353,12 +353,13 @@
 	struct erp_drvdata *drv;
 	struct edac_device_ctl_info *edev_ctl;
 	struct device *dev = &pdev->dev;
-	u32 *banks;
-	u32 i;
 
 	/* Allocate edac control info */
 	edev_ctl = edac_device_alloc_ctl_info(sizeof(*drv), "qcom-llcc", 1,
-			NULL, 1, 1, NULL, 0, edac_device_alloc_index());
+			NULL, 0, 1, NULL, 0, edac_device_alloc_index());
+
+	if (!edev_ctl)
+		return -ENOMEM;
 
 	edev_ctl->dev = dev;
 	edev_ctl->mod_name = dev_name(dev);
@@ -404,20 +405,15 @@
 	drv->num_banks >>= LLCC_LB_CNT_SHIFT;
 
 	drv->llcc_banks = devm_kzalloc(&pdev->dev,
-		sizeof(phys_addr_t) * drv->num_banks, GFP_KERNEL);
+		sizeof(u32) * drv->num_banks, GFP_KERNEL);
 
-	if (!drv->num_banks) {
+	if (!drv->llcc_banks) {
 		dev_err(dev, "Cannot allocate memory for llcc_banks\n");
 		return -ENOMEM;
 	}
 
-	banks = devm_kzalloc(&pdev->dev,
-		sizeof(u32) * drv->num_banks, GFP_KERNEL);
-	if (!banks)
-		return -ENOMEM;
-
 	rc = of_property_read_u32_array(dev->parent->of_node,
-			"qcom,llcc-banks-off", banks, drv->num_banks);
+			"qcom,llcc-banks-off", drv->llcc_banks, drv->num_banks);
 	if (rc) {
 		dev_err(dev, "Cannot read llcc-banks-off property\n");
 		return -EINVAL;
@@ -430,9 +426,6 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; i < drv->num_banks; i++)
-		drv->llcc_banks[i] = banks[i];
-
 	platform_set_drvdata(pdev, edev_ctl);
 
 	rc = edac_device_add_device(edev_ctl);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5be788b..1679727 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -900,6 +900,12 @@
 	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
 	u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
 
+	/* disable mclk switching if the refresh is >120Hz, even if the
+	 * blanking period would allow it
+	 */
+	if (amdgpu_dpm_get_vrefresh(adev) > 120)
+		return true;
+
 	if (vblank_time < switch_limit)
 		return true;
 	else
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6efdba4..0f2fa90 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -379,7 +379,12 @@
 void drm_unplug_dev(struct drm_device *dev)
 {
 	/* for a USB device */
-	drm_dev_unregister(dev);
+	if (drm_core_check_feature(dev, DRIVER_MODESET))
+		drm_modeset_unregister_all(dev);
+
+	drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+	drm_minor_unregister(dev, DRM_MINOR_RENDER);
+	drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
 	mutex_lock(&drm_global_mutex);
 
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index fd7c912..79e9d36 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -774,20 +774,23 @@
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
 			mode_dev->panel_fixed_mode =
 			    drm_mode_duplicate(dev, scan);
+			DRM_DEBUG_KMS("Using mode from DDC\n");
 			goto out;	/* FIXME: check for quirks */
 		}
 	}
 
 	/* Failed to get EDID, what about VBT? do we need this? */
-	if (mode_dev->vbt_mode)
+	if (dev_priv->lfp_lvds_vbt_mode) {
 		mode_dev->panel_fixed_mode =
-		    drm_mode_duplicate(dev, mode_dev->vbt_mode);
+			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
 
-	if (!mode_dev->panel_fixed_mode)
-		if (dev_priv->lfp_lvds_vbt_mode)
-			mode_dev->panel_fixed_mode =
-				drm_mode_duplicate(dev,
-					dev_priv->lfp_lvds_vbt_mode);
+		if (mode_dev->panel_fixed_mode) {
+			mode_dev->panel_fixed_mode->type |=
+				DRM_MODE_TYPE_PREFERRED;
+			DRM_DEBUG_KMS("Using mode from VBT\n");
+			goto out;
+		}
+	}
 
 	/*
 	 * If we didn't get EDID, try checking if the panel is already turned
@@ -804,6 +807,7 @@
 		if (mode_dev->panel_fixed_mode) {
 			mode_dev->panel_fixed_mode->type |=
 			    DRM_MODE_TYPE_PREFERRED;
+			DRM_DEBUG_KMS("Using pre-programmed mode\n");
 			goto out;	/* FIXME: check for quirks */
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 923150d..ca6efb6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -573,9 +573,7 @@
 	if (i915_inject_load_failure())
 		return -ENODEV;
 
-	ret = intel_bios_init(dev_priv);
-	if (ret)
-		DRM_INFO("failed to find VBIOS tables\n");
+	intel_bios_init(dev_priv);
 
 	/* If we have > 1 VGA cards, then we need to arbitrate access
 	 * to the common VGA resources.
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e0d7245..36a665f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3584,7 +3584,7 @@
 extern void intel_i2c_reset(struct drm_device *dev);
 
 /* intel_bios.c */
-int intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_init(struct drm_i915_private *dev_priv);
 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index cf25607..4ac36e3 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1332,6 +1332,7 @@
 	return;
 }
 
+/* Common defaults which may be overridden by VBT. */
 static void
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
@@ -1368,6 +1369,18 @@
 			&dev_priv->vbt.ddi_port_info[port];
 
 		info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
+	}
+}
+
+/* Defaults to initialize only if there is no VBT. */
+static void
+init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
+{
+	enum port port;
+
+	for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+		struct ddi_vbt_port_info *info =
+			&dev_priv->vbt.ddi_port_info[port];
 
 		info->supports_dvi = (port != PORT_A && port != PORT_E);
 		info->supports_hdmi = info->supports_dvi;
@@ -1450,36 +1463,35 @@
  * intel_bios_init - find VBT and initialize settings from the BIOS
  * @dev_priv: i915 device instance
  *
- * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
- * to appropriate values.
- *
- * Returns 0 on success, nonzero on failure.
+ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
+ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
+ * initialize some defaults if the VBT is not present at all.
  */
-int
-intel_bios_init(struct drm_i915_private *dev_priv)
+void intel_bios_init(struct drm_i915_private *dev_priv)
 {
 	struct pci_dev *pdev = dev_priv->drm.pdev;
 	const struct vbt_header *vbt = dev_priv->opregion.vbt;
 	const struct bdb_header *bdb;
 	u8 __iomem *bios = NULL;
 
-	if (HAS_PCH_NOP(dev_priv))
-		return -ENODEV;
+	if (HAS_PCH_NOP(dev_priv)) {
+		DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+		return;
+	}
 
 	init_vbt_defaults(dev_priv);
 
+	/* If the OpRegion does not have VBT, look in PCI ROM. */
 	if (!vbt) {
 		size_t size;
 
 		bios = pci_map_rom(pdev, &size);
 		if (!bios)
-			return -1;
+			goto out;
 
 		vbt = find_vbt(bios, size);
-		if (!vbt) {
-			pci_unmap_rom(pdev, bios);
-			return -1;
-		}
+		if (!vbt)
+			goto out;
 
 		DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
 	}
@@ -1504,10 +1516,14 @@
 	parse_mipi_sequence(dev_priv, bdb);
 	parse_ddi_ports(dev_priv, bdb);
 
+out:
+	if (!vbt) {
+		DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+		init_vbt_missing_defaults(dev_priv);
+	}
+
 	if (bios)
 		pci_unmap_rom(pdev, bios);
-
-	return 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index a79a9c9..70581e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -20,14 +20,8 @@
 
 #define DP_AUX_ENUM_STR(x)		#x
 
-struct aux_buf {
-	u8 *start;      /* buffer start addr */
-	u8 *end;	/* buffer end addr */
-	u8 *data;       /* data pou32er */
-	u32 size;       /* size of buffer */
-	u32 len;	/* dara length */
-	u8 trans_num;   /* transaction number */
-	enum aux_tx_mode tx_mode;
+enum {
+	DP_AUX_DATA_INDEX_WRITE = BIT(31),
 };
 
 struct dp_aux_private {
@@ -38,15 +32,12 @@
 	struct mutex mutex;
 	struct completion comp;
 
-	struct aux_cmd *cmds;
-	struct aux_buf txp;
-	struct aux_buf rxp;
-
 	u32 aux_error_num;
 	bool cmd_busy;
+	bool native;
+	bool read;
 
-	u8 txbuf[256];
-	u8 rxbuf[256];
+	struct drm_dp_aux drm_aux;
 };
 
 static char *dp_aux_get_error(u32 aux_error)
@@ -69,159 +60,104 @@
 	}
 }
 
-static void dp_aux_buf_init(struct aux_buf *buf, u8 *data, u32 size)
+static u32 dp_aux_write(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg)
 {
-	buf->start     = data;
-	buf->size      = size;
-	buf->data      = buf->start;
-	buf->end       = buf->start + buf->size;
-	buf->len       = 0;
-	buf->trans_num = 0;
-	buf->tx_mode   = AUX_NATIVE;
-}
+	u32 data[4], reg, len;
+	u8 *msgdata = msg->buffer;
+	int const aux_cmd_fifo_len = 128;
+	int i = 0;
 
-static void dp_aux_buf_set(struct dp_aux_private *aux)
-{
-	init_completion(&aux->comp);
-	aux->cmd_busy = false;
-	mutex_init(&aux->mutex);
-
-	dp_aux_buf_init(&aux->txp, aux->txbuf, sizeof(aux->txbuf));
-	dp_aux_buf_init(&aux->rxp, aux->rxbuf, sizeof(aux->rxbuf));
-}
-
-static void dp_aux_buf_reset(struct aux_buf *buf)
-{
-	buf->data      = buf->start;
-	buf->len       = 0;
-	buf->trans_num = 0;
-	buf->tx_mode   = AUX_NATIVE;
-
-	memset(buf->start, 0x0, 256);
-}
-
-static void dp_aux_buf_push(struct aux_buf *buf, u32 len)
-{
-	buf->data += len;
-	buf->len  += len;
-}
-
-static u32 dp_aux_buf_trailing(struct aux_buf *buf)
-{
-	return (u32)(buf->end - buf->data);
-}
-
-static u32 dp_aux_add_cmd(struct aux_buf *buf, struct aux_cmd *cmd)
-{
-	u8 data;
-	u8 *bp, *cp;
-	u32 i, len;
-
-	if (cmd->ex_mode == AUX_READ)
+	if (aux->read)
 		len = 4;
 	else
-		len = cmd->len + 4;
-
-	if (dp_aux_buf_trailing(buf) < len) {
-		pr_err("buf trailing error\n");
-		return 0;
-	}
+		len = msg->size + 4;
 
 	/*
 	 * cmd fifo only has depth of 144 bytes
 	 * limit buf length to 128 bytes here
 	 */
-	if ((buf->len + len) > 128) {
+	if (len > aux_cmd_fifo_len) {
 		pr_err("buf len error\n");
 		return 0;
 	}
 
-	bp = buf->data;
-	data = cmd->addr >> 16;
-	data &= 0x0f;  /* 4 addr bits */
+	/* Pack cmd and write to HW */
+	data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+	if (aux->read)
+		data[0] |=  BIT(4); /* R/W */
 
-	if (cmd->ex_mode == AUX_READ)
-		data |=  BIT(4);
+	data[1] = (msg->address >> 8) & 0xff;	/* addr[15:8] */
+	data[2] = msg->address & 0xff;		/* addr[7:0] */
+	data[3] = (msg->size - 1) & 0xff;	/* len[7:0] */
 
-	*bp++ = data;
-	*bp++ = cmd->addr >> 8;
-	*bp++ = cmd->addr;
-	*bp++ = cmd->len - 1;
-
-	if (cmd->ex_mode == AUX_WRITE) {
-		cp = cmd->buf;
-
-		for (i = 0; i < cmd->len; i++)
-			*bp++ = *cp++;
-	}
-
-	dp_aux_buf_push(buf, len);
-
-	buf->tx_mode = cmd->tx_mode;
-
-	buf->trans_num++;
-
-	return cmd->len - 1;
-}
-
-static u32 dp_aux_cmd_fifo_tx(struct dp_aux_private *aux)
-{
-	u8 *dp;
-	u32 data, len, cnt;
-	struct aux_buf *tp = &aux->txp;
-
-	len = tp->len;
-	if (len == 0) {
-		pr_err("invalid len\n");
-		return 0;
-	}
-
-	cnt = 0;
-	dp = tp->start;
-
-	while (cnt < len) {
-		data = *dp;
-		data <<= 8;
-		data &= 0x00ff00;
-		if (cnt == 0)
-			data |= BIT(31);
-
-		aux->catalog->data = data;
+	for (i = 0; i < len; i++) {
+		reg = (i < 4) ? data[i] : msgdata[i - 4];
+		reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */
+		if (i == 0)
+			reg |= DP_AUX_DATA_INDEX_WRITE;
+		aux->catalog->data = reg;
 		aux->catalog->write_data(aux->catalog);
-
-		cnt++;
-		dp++;
 	}
 
-	data = (tp->trans_num - 1);
-	if (tp->tx_mode == AUX_I2C) {
-		data |= BIT(8); /* I2C */
-		data |= BIT(10); /* NO SEND ADDR */
-		data |= BIT(11); /* NO SEND STOP */
-	}
+	reg = 0; /* Transaction number == 1 */
+	if (!aux->native) /* i2c */
+		reg |= (BIT(8) | BIT(10) | BIT(11));
 
-	data |= BIT(9); /* GO */
-	aux->catalog->data = data;
+	reg |= BIT(9);
+	aux->catalog->data = reg;
 	aux->catalog->write_trans(aux->catalog);
 
-	return tp->len;
+	return len;
 }
 
-static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg)
+{
+	u32 ret = 0, len = 0, timeout;
+	int const aux_timeout_ms = HZ/4;
+
+	reinit_completion(&aux->comp);
+
+	len = dp_aux_write(aux, msg);
+	if (len == 0) {
+		pr_err("DP AUX write failed\n");
+		return -EINVAL;
+	}
+
+	timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+	if (!timeout) {
+		pr_err("aux write timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	pr_debug("aux status %s\n",
+		dp_aux_get_error(aux->aux_error_num));
+
+	if (aux->aux_error_num == DP_AUX_ERR_NONE)
+		ret = len;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg)
 {
 	u32 data;
 	u8 *dp;
 	u32 i;
-	struct aux_buf *rp = &aux->rxp;
+	u32 len = msg->size;
 
 	data = 0;
-	data |= BIT(31); /* INDEX_WRITE */
+	data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
 	data |= BIT(0);  /* read */
 
 	aux->catalog->data = data;
 	aux->catalog->write_data(aux->catalog);
 
-	dp = rp->data;
+	dp = msg->buffer;
 
 	/* discard first byte */
 	data = aux->catalog->read_data(aux->catalog);
@@ -230,9 +166,6 @@
 		data = aux->catalog->read_data(aux->catalog);
 		*dp++ = (u8)((data >> 8) & 0xff);
 	}
-
-	rp->len = len;
-	return len;
 }
 
 static void dp_aux_native_handler(struct dp_aux_private *aux)
@@ -292,219 +225,76 @@
 	if (!aux->cmd_busy)
 		return;
 
-	if (aux->cmds->tx_mode == AUX_NATIVE)
+	if (aux->native)
 		dp_aux_native_handler(aux);
 	else
 		dp_aux_i2c_handler(aux);
 }
 
-
-
-static int dp_aux_write(struct dp_aux_private *aux)
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
+		struct drm_dp_aux_msg *msg)
 {
-	struct aux_cmd *cm;
-	struct aux_buf *tp;
-	u32 len, ret, timeout;
+	ssize_t ret;
+	int const aux_cmd_native_max = 16;
+	int const aux_cmd_i2c_max = 128;
+	struct dp_aux_private *aux = container_of(drm_aux,
+		struct dp_aux_private, drm_aux);
 
 	mutex_lock(&aux->mutex);
 
-	tp = &aux->txp;
-	dp_aux_buf_reset(tp);
-
-	cm = aux->cmds;
-	while (cm) {
-		ret = dp_aux_add_cmd(tp, cm);
-		if (ret <= 0)
-			break;
-
-		if (!cm->next)
-			break;
-		cm++;
-	}
-
-	reinit_completion(&aux->comp);
+	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
 	aux->cmd_busy = true;
 
-	len = dp_aux_cmd_fifo_tx(aux);
-
-	timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
-	if (!timeout)
-		pr_err("aux write timeout\n");
-
-	pr_debug("aux status %s\n",
-		dp_aux_get_error(aux->aux_error_num));
-
-	if (aux->aux_error_num == DP_AUX_ERR_NONE)
-		ret = len;
-	else
-		ret = aux->aux_error_num;
-
-	aux->cmd_busy = false;
-	mutex_unlock(&aux->mutex);
-	return  ret;
-}
-
-static int dp_aux_read(struct dp_aux_private *aux)
-{
-	struct aux_cmd *cm;
-	struct aux_buf *tp, *rp;
-	u32 len, ret, timeout;
-
-	mutex_lock(&aux->mutex);
-
-	tp = &aux->txp;
-	rp = &aux->rxp;
-
-	dp_aux_buf_reset(tp);
-	dp_aux_buf_reset(rp);
-
-	cm = aux->cmds;
-	len = 0;
-
-	while (cm) {
-		ret = dp_aux_add_cmd(tp, cm);
-		len += cm->len;
-
-		if (ret <= 0)
-			break;
-
-		if (!cm->next)
-			break;
-		cm++;
+	/* Ignore address only message */
+	if ((msg->size == 0) || (msg->buffer == NULL)) {
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+		ret = msg->size;
+		goto unlock_exit;
 	}
 
-	reinit_completion(&aux->comp);
-	aux->cmd_busy = true;
+	/* msg sanity check */
+	if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+		(msg->size > aux_cmd_i2c_max)) {
+		pr_err("%s: invalid msg: size(%zu), request(%x)\n",
+			__func__, msg->size, msg->request);
+		ret = -EINVAL;
+		goto unlock_exit;
+	}
 
-	dp_aux_cmd_fifo_tx(aux);
+	ret = dp_aux_cmd_fifo_tx(aux, msg);
+	if (ret < 0) {
+		aux->catalog->reset(aux->catalog); /* reset aux */
+		goto unlock_exit;
+	}
 
-	timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
-	if (!timeout)
-		pr_err("aux read timeout\n");
+	if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+		if (aux->read)
+			dp_aux_cmd_fifo_rx(aux, msg);
 
-	pr_debug("aux status %s\n",
-		dp_aux_get_error(aux->aux_error_num));
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+	} else {
+		/* Reply defer to retry */
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+	}
 
-	if (aux->aux_error_num == DP_AUX_ERR_NONE)
-		ret = dp_cmd_fifo_rx(aux, len);
-	else
-		ret = aux->aux_error_num;
+	/* Return requested size for success or retry */
+	ret = msg->size;
 
-	aux->cmds->buf = rp->data;
+unlock_exit:
 	aux->cmd_busy = false;
-
 	mutex_unlock(&aux->mutex);
-
 	return ret;
 }
 
-static int dp_aux_write_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
-				enum aux_tx_mode mode, u8 *buf)
-{
-	struct aux_cmd cmd = {0};
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !len) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	cmd.ex_mode = AUX_WRITE;
-	cmd.tx_mode = mode;
-	cmd.addr    = addr;
-	cmd.len     = len;
-	cmd.buf     = buf;
-
-	aux->cmds = &cmd;
-
-	return dp_aux_write(aux);
-}
-
-static int dp_aux_read_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
-				enum aux_tx_mode mode, u8 **buf)
-{
-	int rc = 0;
-	struct aux_cmd cmd = {0};
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !len) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	cmd.ex_mode = AUX_READ;
-	cmd.tx_mode = mode;
-	cmd.addr    = addr;
-	cmd.len     = len;
-
-	aux->cmds = &cmd;
-
-	rc = dp_aux_read(aux);
-	if (rc <= 0) {
-		rc = -EINVAL;
-		goto end;
-	}
-
-	*buf = cmd.buf;
-end:
-	return rc;
-}
-
-static int dp_aux_process(struct dp_aux *dp_aux, struct aux_cmd *cmds)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !cmds) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	aux->cmds = cmds;
-
-	if (cmds->ex_mode == AUX_READ)
-		return dp_aux_read(aux);
-	else
-		return dp_aux_write(aux);
-}
-
-static bool dp_aux_ready(struct dp_aux *dp_aux)
-{
-	u8 data = 0;
-	int count, ret;
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		goto error;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	for (count = 5; count; count--) {
-		ret = dp_aux_write_ex(dp_aux, 0x50, 1, AUX_I2C, &data);
-		if (ret >= 0)
-			break;
-
-		msleep(100);
-	}
-
-	if (count <= 0) {
-		pr_err("aux chan NOT ready\n");
-		goto error;
-	}
-
-	return true;
-error:
-	return false;
-}
-
 static void dp_aux_init(struct dp_aux *dp_aux, u32 *aux_cfg)
 {
 	struct dp_aux_private *aux;
@@ -535,6 +325,45 @@
 	aux->catalog->enable(aux->catalog, false);
 }
 
+static int dp_aux_register(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+	int ret = 0;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->drm_aux.name = "sde_dp_aux";
+	aux->drm_aux.dev = aux->dev;
+	aux->drm_aux.transfer = dp_aux_transfer;
+	ret = drm_dp_aux_register(&aux->drm_aux);
+	if (ret) {
+		pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
+		goto exit;
+	}
+	dp_aux->drm_aux = &aux->drm_aux;
+exit:
+	return ret;
+}
+
+static void dp_aux_deregister(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+	drm_dp_aux_unregister(&aux->drm_aux);
+}
+
 struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
 {
 	int rc = 0;
@@ -553,21 +382,19 @@
 		goto error;
 	}
 
+	init_completion(&aux->comp);
+	aux->cmd_busy = false;
+	mutex_init(&aux->mutex);
+
 	aux->dev = dev;
-
-	dp_aux_buf_set(aux);
-
 	aux->catalog = catalog;
-
 	dp_aux = &aux->dp_aux;
 
-	dp_aux->process = dp_aux_process;
-	dp_aux->read    = dp_aux_read_ex;
-	dp_aux->write   = dp_aux_write_ex;
-	dp_aux->ready   = dp_aux_ready;
 	dp_aux->isr     = dp_aux_isr;
 	dp_aux->init    = dp_aux_init;
 	dp_aux->deinit  = dp_aux_deinit;
+	dp_aux->drm_aux_register = dp_aux_register;
+	dp_aux->drm_aux_deregister = dp_aux_deregister;
 
 	return dp_aux;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 0603c15..f08c12b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -16,6 +16,7 @@
 #define _DP_AUX_H_
 
 #include "dp_catalog.h"
+#include "drm_dp_helper.h"
 
 enum dp_aux_error {
 	DP_AUX_ERR_NONE	= 0,
@@ -26,32 +27,10 @@
 	DP_AUX_ERR_NACK_DEFER	= -5,
 };
 
-enum aux_tx_mode {
-	AUX_NATIVE,
-	AUX_I2C,
-};
-
-enum aux_exe_mode {
-	AUX_WRITE,
-	AUX_READ,
-};
-
-struct aux_cmd {
-	enum aux_exe_mode ex_mode;
-	enum aux_tx_mode tx_mode;
-	u32 addr;
-	u32 len;
-	u8 *buf;
-	bool next;
-};
-
 struct dp_aux {
-	int (*process)(struct dp_aux *aux, struct aux_cmd *cmd);
-	int (*write)(struct dp_aux *aux, u32 addr, u32 len,
-			enum aux_tx_mode mode, u8 *buf);
-	int (*read)(struct dp_aux *aux, u32 addr, u32 len,
-			enum aux_tx_mode mode, u8 **buf);
-	bool (*ready)(struct dp_aux *aux);
+	struct drm_dp_aux *drm_aux;
+	int (*drm_aux_register)(struct dp_aux *aux);
+	void (*drm_aux_deregister)(struct dp_aux *aux);
 	void (*isr)(struct dp_aux *aux);
 	void (*init)(struct dp_aux *aux, u32 *aux_cfg);
 	void (*deinit)(struct dp_aux *aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index ca55d16..9361b52 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -177,8 +177,6 @@
 
 #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
 
-#define EDID_START_ADDRESS			0x50
-
 /* DP MMSS_CC registers */
 #define MMSS_DP_LINK_CMD_RCGR			(0x0138)
 #define MMSS_DP_LINK_CFG_RCGR			(0x013C)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 56f6052..954a2fa 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -20,14 +20,9 @@
 
 #include "dp_ctrl.h"
 
-#define DP_LINK_RATE_MULTIPLIER	27000000
 #define DP_KHZ_TO_HZ 1000
 #define DP_CRYPTO_CLK_RATE_KHZ 180000
 
-/* sink power state  */
-#define SINK_POWER_ON		1
-#define SINK_POWER_OFF		2
-
 #define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
 #define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
 
@@ -71,12 +66,7 @@
 	struct completion video_comp;
 	struct completion irq_comp;
 
-	bool hpd_irq_on;
-	bool power_on;
-	bool sink_info_read;
-	bool cont_splash;
 	bool psm_enabled;
-	bool initialized;
 	bool orientation;
 
 	u32 pixel_rate;
@@ -103,14 +93,6 @@
 	complete(&ctrl->video_comp);
 }
 
-static void dp_ctrl_set_sink_power_state(struct dp_ctrl_private *ctrl,
-		u8 power_state)
-{
-	const int len = 1;
-
-	ctrl->aux->write(ctrl->aux, 0x600, len, AUX_NATIVE, &power_state);
-}
-
 static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
 {
 	ctrl->catalog->state_ctrl(ctrl->catalog, state);
@@ -128,7 +110,7 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
-	dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_OFF);
+	drm_dp_link_power_down(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
 
 	reinit_completion(&ctrl->idle_comp);
 	dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
@@ -143,12 +125,13 @@
 static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
 {
 	u32 config = 0, tbd;
+	u8 *dpcd = ctrl->panel->dpcd;
 
 	config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK  */
 	config |= (0 << 11); /* RGB */
 
 	/* Scrambler reset enable */
-	if (ctrl->panel->dpcd.scrambler_reset)
+	if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
 		config |= (1 << 10);
 
 	tbd = ctrl->link->get_test_bits_depth(ctrl->link,
@@ -158,7 +141,7 @@
 	/* Num of Lanes */
 	config |= ((ctrl->link->lane_count - 1) << 4);
 
-	if (ctrl->panel->dpcd.enhanced_frame)
+	if (drm_dp_enhanced_frame_cap(dpcd))
 		config |= 0x40;
 
 	config |= 0x04; /* progressive video */
@@ -327,7 +310,7 @@
 	even_distribution = 0;
 	min_hblank = 0;
 
-	lclk = link_rate * DP_LINK_RATE_MULTIPLIER;
+	lclk = drm_dp_bw_code_to_link_rate(link_rate) * DP_KHZ_TO_HZ;
 
 	pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
 						pclk, lwidth, h_blank);
@@ -724,9 +707,6 @@
 {
 	int ret = 0;
 
-	if (ctrl->cont_splash)
-		return ret;
-
 	ret = wait_for_completion_timeout(&ctrl->video_comp, HZ / 2);
 	if (ret <= 0) {
 		pr_err("Link Train timedout\n");
@@ -763,7 +743,7 @@
 		buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
 
 	pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
-	return ctrl->aux->write(ctrl->aux, 0x103, 4, AUX_NATIVE, buf);
+	return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
 }
 
 static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
@@ -778,25 +758,6 @@
 	dp_ctrl_update_sink_vx_px(ctrl, link->v_level, link->p_level);
 }
 
-static void dp_ctrl_cap_lane_rate_set(struct dp_ctrl_private *ctrl)
-{
-	u8 buf[4];
-	struct dp_panel_dpcd *cap;
-
-	cap = &ctrl->panel->dpcd;
-
-	pr_debug("bw=%x lane=%d\n", ctrl->link->link_rate,
-		ctrl->link->lane_count);
-
-	buf[0] = ctrl->link->link_rate;
-	buf[1] = ctrl->link->lane_count;
-
-	if (cap->enhanced_frame)
-		buf[1] |= 0x80;
-
-	ctrl->aux->write(ctrl->aux, 0x100, 2, AUX_NATIVE, buf);
-}
-
 static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
 		u8 pattern)
 {
@@ -805,33 +766,39 @@
 	pr_debug("pattern=%x\n", pattern);
 
 	buf[0] = pattern;
-	ctrl->aux->write(ctrl->aux, 0x102, 1, AUX_NATIVE, buf);
+	drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
 }
 
 static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
 {
-	int tries, old_v_level;
-	int ret = 0;
-	int usleep_time;
+	int tries, old_v_level, ret = 0, len = 0;
+	u8 link_status[DP_LINK_STATUS_SIZE];
 	int const maximum_retries = 5;
 
 	dp_ctrl_state_ctrl(ctrl, 0);
-
 	/* Make sure to clear the current pattern before starting a new one */
 	wmb();
 
 	ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
-	dp_ctrl_cap_lane_rate_set(ctrl);
-	dp_ctrl_train_pattern_set(ctrl, 0x21); /* train_1 */
+	dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+		DP_RECOVERED_CLOCK_OUT_EN); /* train_1 */
 	dp_ctrl_update_vx_px(ctrl);
 
 	tries = 0;
 	old_v_level = ctrl->link->v_level;
 	while (1) {
-		usleep_time = ctrl->panel->dpcd.training_read_interval;
-		usleep_range(usleep_time, usleep_time * 2);
+		drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
 
-		if (ctrl->link->clock_recovery(ctrl->link)) {
+		len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+			link_status);
+		if (len < DP_LINK_STATUS_SIZE) {
+			pr_err("[%s]: DP link status read failed\n", __func__);
+			ret = -1;
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(link_status,
+			ctrl->link->lane_count)) {
 			ret = 0;
 			break;
 		}
@@ -852,8 +819,7 @@
 			old_v_level = ctrl->link->v_level;
 		}
 
-		ctrl->link->adjust_levels(ctrl->link);
-
+		ctrl->link->adjust_levels(ctrl->link, link_status);
 		dp_ctrl_update_vx_px(ctrl);
 	}
 
@@ -869,15 +835,15 @@
 
 	switch (ctrl->link->link_rate) {
 	case DP_LINK_RATE_810:
-		ctrl->link->link_rate = DP_LINK_RATE_540;
+		ctrl->link->link_rate = DP_LINK_BW_5_4;
 		break;
-	case DP_LINK_RATE_540:
-		ctrl->link->link_rate = DP_LINK_RATE_270;
+	case DP_LINK_BW_5_4:
+		ctrl->link->link_rate = DP_LINK_BW_2_7;
 		break;
-	case DP_LINK_RATE_270:
-		ctrl->link->link_rate = DP_LINK_RATE_162;
+	case DP_LINK_BW_2_7:
+		ctrl->link->link_rate = DP_LINK_BW_1_62;
 		break;
-	case DP_LINK_RATE_162:
+	case DP_LINK_BW_1_62:
 	default:
 		ret = -EINVAL;
 		break;
@@ -890,36 +856,38 @@
 
 static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
 {
-	int usleep_time;
-
 	dp_ctrl_train_pattern_set(ctrl, 0);
-
-	usleep_time = ctrl->panel->dpcd.training_read_interval;
-	usleep_range(usleep_time, usleep_time * 2);
+	drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
 }
 
 static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
 {
-	int tries = 0;
-	int ret = 0;
-	int usleep_time;
+	int tries = 0, ret = 0, len = 0;
 	char pattern;
 	int const maximum_retries = 5;
+	u8 link_status[DP_LINK_STATUS_SIZE];
 
-	if (ctrl->panel->dpcd.flags & DPCD_TPS3)
-		pattern = 0x03;
+	if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+		pattern = DP_TRAINING_PATTERN_3;
 	else
-		pattern = 0x02;
+		pattern = DP_TRAINING_PATTERN_2;
 
 	dp_ctrl_update_vx_px(ctrl);
 	ctrl->catalog->set_pattern(ctrl->catalog, pattern);
-	dp_ctrl_train_pattern_set(ctrl, pattern | 0x20);
+	dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
 
 	do  {
-		usleep_time = ctrl->panel->dpcd.training_read_interval;
-		usleep_range(usleep_time, usleep_time * 2);
+		drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
 
-		if (ctrl->link->channel_equalization(ctrl->link)) {
+		len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+			link_status);
+		if (len < DP_LINK_STATUS_SIZE) {
+			pr_err("[%s]: DP link status read failed\n", __func__);
+			ret = -1;
+			break;
+		}
+
+		if (drm_dp_channel_eq_ok(link_status, ctrl->link->lane_count)) {
 			ret = 0;
 			break;
 		}
@@ -930,8 +898,7 @@
 		}
 		tries++;
 
-		ctrl->link->adjust_levels(ctrl->link);
-
+		ctrl->link->adjust_levels(ctrl->link, link_status);
 		dp_ctrl_update_vx_px(ctrl);
 	} while (1);
 
@@ -941,12 +908,7 @@
 static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
 {
 	int ret = 0;
-
-	ret = ctrl->aux->ready(ctrl->aux);
-	if (!ret) {
-		pr_err("aux chan NOT ready\n");
-		return ret;
-	}
+	struct drm_dp_link dp_link;
 
 	ctrl->link->p_level = 0;
 	ctrl->link->v_level = 0;
@@ -954,6 +916,11 @@
 	dp_ctrl_config_ctrl(ctrl);
 	dp_ctrl_state_ctrl(ctrl, 0);
 
+	dp_link.num_lanes = ctrl->link->lane_count;
+	dp_link.rate = ctrl->link->link_rate;
+	dp_link.capabilities = ctrl->panel->dp_link.capabilities;
+	drm_dp_link_configure(ctrl->aux->drm_aux, &dp_link);
+
 	ret = dp_ctrl_link_train_1(ctrl);
 	if (ret < 0) {
 		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
@@ -1007,7 +974,7 @@
 
 	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
 
-	dp_ctrl_set_sink_power_state(ctrl, SINK_POWER_ON);
+	drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
 
 	if (ctrl->link->phy_pattern_requested(ctrl->link))
 		goto end;
@@ -1065,8 +1032,7 @@
 	ctrl->power->set_pixel_clk_parent(ctrl->power);
 
 	dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
-		(ctrl->link->link_rate * DP_LINK_RATE_MULTIPLIER) /
-			DP_KHZ_TO_HZ);
+		drm_dp_bw_code_to_link_rate(ctrl->link->link_rate));
 
 	dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ);
 
@@ -1098,11 +1064,6 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
-	if (ctrl->initialized) {
-		pr_debug("host init done already\n");
-		return 0;
-	}
-
 	ctrl->orientation = flip;
 	catalog = ctrl->catalog;
 
@@ -1110,8 +1071,6 @@
 	catalog->phy_reset(ctrl->catalog);
 	catalog->enable_irq(ctrl->catalog, true);
 
-	ctrl->initialized = true;
-
 	return 0;
 }
 
@@ -1133,11 +1092,6 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
-	if (!ctrl->initialized) {
-		pr_debug("host deinit done already\n");
-		return;
-	}
-
 	ctrl->catalog->enable_irq(ctrl->catalog, false);
 	ctrl->catalog->reset(ctrl->catalog);
 
@@ -1146,7 +1100,6 @@
 
 	dp_ctrl_disable_mainlink_clocks(ctrl);
 
-	ctrl->initialized = false;
 	pr_debug("Host deinitialized successfully\n");
 }
 
@@ -1180,8 +1133,6 @@
 
 		reinit_completion(&ctrl->idle_comp);
 
-		ctrl->power_on = true;
-
 		if (ctrl->psm_enabled) {
 			ret = ctrl->link->send_psm_request(ctrl->link, false);
 			if (ret) {
@@ -1201,14 +1152,11 @@
 {
 	int ret = 0;
 
-	if (ctrl->cont_splash)
-		goto link_training;
-
 	ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
 	ctrl->catalog->hpd_config(ctrl->catalog, true);
 
 	ctrl->link->link_rate  = ctrl->panel->get_link_rate(ctrl->panel);
-	ctrl->link->lane_count = ctrl->panel->dpcd.max_lane_count;
+	ctrl->link->lane_count = ctrl->panel->dp_link.num_lanes;
 	ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
 
 	pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
@@ -1228,28 +1176,18 @@
 
 	if (ctrl->psm_enabled)
 		ret = ctrl->link->send_psm_request(ctrl->link, false);
-link_training:
-	ctrl->power_on = true;
 
 	while (-EAGAIN == dp_ctrl_setup_main_link(ctrl, true))
 		pr_debug("MAIN LINK TRAINING RETRY\n");
 
-	ctrl->cont_splash = 0;
-
-	ctrl->power_on = true;
 	pr_debug("End-\n");
 
 exit:
 	return ret;
 }
 
-static int dp_ctrl_off_irq(struct dp_ctrl_private *ctrl)
+static void dp_ctrl_off_irq(struct dp_ctrl_private *ctrl)
 {
-	if (!ctrl->power_on) {
-		pr_debug("ctrl already powered off\n");
-		return 0;
-	}
-
 	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
 
 	/* Make sure DP mainlink and audio engines are disabled */
@@ -1257,28 +1195,15 @@
 
 	complete_all(&ctrl->irq_comp);
 	pr_debug("end\n");
-
-	return 0;
 }
 
-static int dp_ctrl_off_hpd(struct dp_ctrl_private *ctrl)
+static void dp_ctrl_off_hpd(struct dp_ctrl_private *ctrl)
 {
-	if (!ctrl->power_on) {
-		pr_debug("panel already powered off\n");
-		return 0;
-	}
-
 	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
-
-	ctrl->power_on = false;
-	ctrl->sink_info_read = false;
-
 	pr_debug("DP off done\n");
-
-	return 0;
 }
 
-static int dp_ctrl_on(struct dp_ctrl *dp_ctrl)
+static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool hpd_irq)
 {
 	int rc = 0;
 	struct dp_ctrl_private *ctrl;
@@ -1290,7 +1215,7 @@
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
-	if (ctrl->hpd_irq_on)
+	if (hpd_irq)
 		rc = dp_ctrl_on_irq(ctrl, false);
 	else
 		rc = dp_ctrl_on_hpd(ctrl);
@@ -1298,24 +1223,19 @@
 	return rc;
 }
 
-static int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+static void dp_ctrl_off(struct dp_ctrl *dp_ctrl, bool hpd_irq)
 {
-	int rc = 0;
 	struct dp_ctrl_private *ctrl;
 
-	if (!dp_ctrl) {
-		rc = -EINVAL;
-		goto end;
-	}
+	if (!dp_ctrl)
+		return;
 
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
-	if (ctrl->hpd_irq_on)
-		rc = dp_ctrl_off_irq(ctrl);
+	if (hpd_irq)
+		dp_ctrl_off_irq(ctrl);
 	else
-		rc = dp_ctrl_off_hpd(ctrl);
-end:
-	return rc;
+		dp_ctrl_off_hpd(ctrl);
 }
 
 static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 5efe505..474e0ad 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -25,8 +25,8 @@
 struct dp_ctrl {
 	int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
 	void (*deinit)(struct dp_ctrl *dp_ctrl);
-	int (*on)(struct dp_ctrl *dp_ctrl);
-	int (*off)(struct dp_ctrl *dp_ctrl);
+	int (*on)(struct dp_ctrl *dp_ctrl, bool hpd_irq);
+	void (*off)(struct dp_ctrl *dp_ctrl, bool hpd_irq);
 	void (*push_idle)(struct dp_ctrl *dp_ctrl);
 	void (*isr)(struct dp_ctrl *dp_ctrl);
 };
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 850acbf..a3c6f58 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -38,6 +38,11 @@
 	char *name;
 	int irq;
 
+	/* state variables */
+	bool core_initialized;
+	bool power_on;
+	bool hpd_irq_on;
+
 	struct platform_device *pdev;
 	struct dentry *root;
 	struct mutex lock;
@@ -195,6 +200,18 @@
 		goto end;
 	}
 
+	rc = dp->aux->drm_aux_register(dp->aux);
+	if (rc) {
+		pr_err("DRM DP AUX register failed\n");
+		goto end;
+	}
+
+	rc = dp->panel->sde_edid_register(dp->panel);
+	if (rc) {
+		pr_err("DRM DP EDID register failed\n");
+		goto end;
+	}
+
 	rc = dp->power->power_client_init(dp->power, &priv->phandle);
 	if (rc) {
 		pr_err("Power client create failed\n");
@@ -227,6 +244,10 @@
 
 	(void)dp->power->power_client_deinit(dp->power);
 
+	(void) dp->panel->sde_edid_deregister(dp->panel);
+
+	(void) dp->aux->drm_aux_deregister(dp->aux);
+
 	(void)dp_display_debugfs_deinit(dp);
 
 	mutex_unlock(&dp->lock);
@@ -239,30 +260,63 @@
 
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
-	int rc;
+	int rc = 0;
 
 	rc = dp->panel->read_dpcd(dp->panel);
 	if (rc)
-		goto end;
+		return rc;
 
-	rc = dp->panel->read_edid(dp->panel);
-	if (rc)
-		goto end;
+	sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
+		(void **)&dp->panel->edid_ctrl);
 
-	return 0;
-end:
+	dp->dp_display.is_connected = true;
+	drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
+
 	return rc;
 }
 
-static int dp_display_process_hpd_low(struct dp_display_private *dp)
+static void dp_display_host_init(struct dp_display_private *dp)
 {
-	return 0;
+	bool flip = false;
+
+	if (dp->core_initialized) {
+		pr_debug("DP core already initialized\n");
+		return;
+	}
+
+	if (dp->usbpd->orientation == ORIENTATION_CC2)
+		flip = true;
+
+	dp->power->init(dp->power, flip);
+	dp->ctrl->init(dp->ctrl, flip);
+	dp->aux->init(dp->aux, dp->parser->aux_cfg);
+	enable_irq(dp->irq);
+	dp->core_initialized = true;
+}
+
+static void dp_display_host_deinit(struct dp_display_private *dp)
+{
+	if (!dp->core_initialized) {
+		pr_debug("DP core already off\n");
+		return;
+	}
+
+	dp->aux->deinit(dp->aux);
+	dp->ctrl->deinit(dp->ctrl);
+	dp->power->deinit(dp->power);
+	disable_irq(dp->irq);
+	dp->core_initialized = false;
+}
+
+static void dp_display_process_hpd_low(struct dp_display_private *dp)
+{
+	dp->dp_display.is_connected = false;
+	drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
 }
 
 static int dp_display_usbpd_configure_cb(struct device *dev)
 {
 	int rc = 0;
-	bool flip = false;
 	struct dp_display_private *dp;
 
 	if (!dev) {
@@ -279,18 +333,9 @@
 	}
 
 	mutex_lock(&dp->lock);
-
-	if (dp->usbpd->orientation == ORIENTATION_CC2)
-		flip = true;
-
-	dp->power->init(dp->power, flip);
-	dp->ctrl->init(dp->ctrl, flip);
-	dp->aux->init(dp->aux, dp->parser->aux_cfg);
-	enable_irq(dp->irq);
-
+	dp_display_host_init(dp);
 	if (dp->usbpd->hpd_high)
 		dp_display_process_hpd_high(dp);
-
 	mutex_unlock(&dp->lock);
 end:
 	return rc;
@@ -315,9 +360,23 @@
 	}
 
 	mutex_lock(&dp->lock);
-	disable_irq(dp->irq);
-	mutex_unlock(&dp->lock);
 
+	dp->dp_display.is_connected = false;
+	drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
+
+	/*
+	 * If a cable/dongle is connected to the TX device but
+	 * no sink device is connected, we call host
+	 * initialization where orientation settings are
+	 * configured. When the cable/dongle is disconnect,
+	 * call host de-initialization to make sure
+	 * we re-configure the orientation settings during
+	 * the next connect event.
+	 */
+	if (!dp->power_on && dp->core_initialized)
+		dp_display_host_deinit(dp);
+
+	mutex_unlock(&dp->lock);
 end:
 	return rc;
 }
@@ -329,31 +388,36 @@
 
 	if (!dev) {
 		pr_err("invalid dev\n");
-		rc = -EINVAL;
-		goto end;
+		return -EINVAL;
 	}
 
 	dp = dev_get_drvdata(dev);
 	if (!dp) {
 		pr_err("no driver data found\n");
-		rc = -ENODEV;
-		goto end;
+		return -ENODEV;
 	}
 
 	mutex_lock(&dp->lock);
 
 	if (dp->usbpd->hpd_irq) {
-		if (!dp->link->process_request(dp->link))
+		dp->hpd_irq_on = true;
+		rc = dp->link->process_request(dp->link);
+		dp->hpd_irq_on = false;
+		if (!rc)
 			goto end;
 	}
 
-	if (dp->usbpd->hpd_high)
-		dp_display_process_hpd_high(dp);
-	else
+	if (!dp->usbpd->hpd_high) {
 		dp_display_process_hpd_low(dp);
+		goto end;
+	}
 
-	mutex_unlock(&dp->lock);
+	if (dp->usbpd->alt_mode_cfg_done) {
+		dp_display_host_init(dp);
+		dp_display_process_hpd_high(dp);
+	}
 end:
+	mutex_unlock(&dp->lock);
 	return rc;
 }
 
@@ -474,7 +538,9 @@
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
 	mutex_lock(&dp->lock);
-	dp->ctrl->on(dp->ctrl);
+	rc = dp->ctrl->on(dp->ctrl, dp->hpd_irq_on);
+	if (!rc)
+		dp->power_on = true;
 	mutex_unlock(&dp->lock);
 error:
 	return rc;
@@ -499,9 +565,7 @@
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
 	mutex_lock(&dp->lock);
-
-	dp->ctrl->off(dp->ctrl);
-
+	dp->ctrl->push_idle(dp->ctrl);
 	mutex_unlock(&dp->lock);
 error:
 	return rc;
@@ -521,11 +585,9 @@
 	dp = container_of(dp_display, struct dp_display_private, dp_display);
 
 	mutex_lock(&dp->lock);
-
-	dp->aux->deinit(dp->aux);
-	dp->ctrl->deinit(dp->ctrl);
-	dp->power->deinit(dp->power);
-
+	dp->ctrl->off(dp->ctrl, dp->hpd_irq_on);
+	dp_display_host_deinit(dp);
+	dp->power_on = false;
 	mutex_unlock(&dp->lock);
 error:
 	return rc;
@@ -573,33 +635,17 @@
 	return 0;
 }
 
-static int dp_display_get_modes(struct dp_display *dp,
-	struct dp_display_mode *modes, u32 *count)
+static int dp_display_get_modes(struct dp_display *dp)
 {
-	*count = 1;
+	int ret = 0;
+	struct dp_display_private *dp_display;
 
-	if (modes) {
-		modes->timing.h_active = 1920;
-		modes->timing.v_active = 1080;
-		modes->timing.h_back_porch = 148;
-		modes->timing.h_front_porch = 88;
-		modes->timing.h_sync_width = 44;
-		modes->timing.h_active_low = 0;
-		modes->timing.v_back_porch = 36;
-		modes->timing.v_front_porch = 4;
-		modes->timing.v_sync_width = 5;
-		modes->timing.v_active_low = 0;
-		modes->timing.h_skew = 0;
-		modes->timing.refresh_rate = 60;
-		modes->timing.pixel_clk_khz = 148500;
-	}
+	dp_display = container_of(dp, struct dp_display_private, dp_display);
 
-	return 0;
-}
+	ret = _sde_edid_update_modes(dp->connector,
+		dp_display->panel->edid_ctrl);
 
-static int dp_display_detect(struct dp_display *dp)
-{
-	return 0;
+	return ret;
 }
 
 static int dp_display_probe(struct platform_device *pdev)
@@ -637,7 +683,6 @@
 	g_dp_display->set_mode      = dp_display_set_mode;
 	g_dp_display->validate_mode = dp_display_validate_mode;
 	g_dp_display->get_modes     = dp_display_get_modes;
-	g_dp_display->detect        = dp_display_detect;
 	g_dp_display->prepare       = dp_display_prepare;
 	g_dp_display->unprepare     = dp_display_unprepare;
 	g_dp_display->request_irq   = dp_request_irq;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index e684854..877287a 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -27,6 +27,8 @@
 struct dp_display {
 	struct drm_device *drm_dev;
 	struct dp_bridge *bridge;
+	struct drm_connector *connector;
+	bool is_connected;
 
 	int (*enable)(struct dp_display *dp_display);
 	int (*post_enable)(struct dp_display *dp_display);
@@ -38,11 +40,7 @@
 			struct dp_display_mode *mode);
 	int (*validate_mode)(struct dp_display *dp_display,
 			struct dp_display_mode *mode);
-	int (*get_modes)(struct dp_display *dp_display,
-		struct dp_display_mode *modes, u32 *count);
-
-	int (*detect)(struct dp_display *dp_display);
-
+	int (*get_modes)(struct dp_display *dp_display);
 	int (*prepare)(struct dp_display *dp_display);
 	int (*unprepare)(struct dp_display *dp_display);
 	int (*request_irq)(struct dp_display *dp_display);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 0f6e36f..78c04c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -26,8 +26,10 @@
 #define to_dp_bridge(x)     container_of((x), struct dp_bridge, base)
 
 static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
-				struct dp_display_mode *dp_mode)
+			struct dp_display_mode *dp_mode, struct dp_display *dp)
 {
+	const u32 num_components = 3;
+
 	memset(dp_mode, 0, sizeof(*dp_mode));
 
 	dp_mode->timing.h_active = drm_mode->hdisplay;
@@ -45,6 +47,7 @@
 
 	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
 					 drm_mode->vdisplay;
+	dp_mode->timing.bpp = dp->connector->display_info.bpc * num_components;
 
 	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
 
@@ -235,7 +238,7 @@
 	dp = bridge->display;
 
 	memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
-	convert_to_dp_mode(adjusted_mode, &bridge->dp_mode);
+	convert_to_dp_mode(adjusted_mode, &bridge->dp_mode, dp);
 }
 
 static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
@@ -257,7 +260,7 @@
 	bridge = to_dp_bridge(drm_bridge);
 	dp = bridge->display;
 
-	convert_to_dp_mode(mode, &dp_mode);
+	convert_to_dp_mode(mode, &dp_mode, dp);
 
 	rc = dp->validate_mode(dp, &dp_mode);
 	if (rc) {
@@ -289,6 +292,7 @@
 	if (!info || !dp_display)
 		return -EINVAL;
 
+	dp_display->connector = connector;
 	return 0;
 }
 
@@ -315,7 +319,7 @@
 
 int dp_connector_get_info(struct msm_display_info *info, void *data)
 {
-	struct dsi_display *display = data;
+	struct dp_display *display = data;
 
 	if (!info || !display) {
 		pr_err("invalid params\n");
@@ -326,17 +330,10 @@
 
 	info->num_of_h_tiles = 1;
 	info->h_tile_instance[0] = 0;
-
-	info->is_connected = true;
-	info->frame_rate = 60;
-	info->width_mm = 160;
-	info->height_mm = 90;
-	info->max_width = 1920;
-	info->max_height = 1080;
-	info->vtotal = 1125;
-	info->is_primary = true;
+	info->is_connected = display->is_connected;
 	info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
-	info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+	info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
+		MSM_DISPLAY_CAP_HOT_PLUG;
 
 	return 0;
 }
@@ -375,60 +372,23 @@
 int dp_connector_get_modes(struct drm_connector *connector,
 		void *display)
 {
-	u32 count = 0;
-	u32 size = 0;
-	struct dp_display_mode *modes;
-	struct drm_display_mode drm_mode;
+	int rc = 0;
 	struct dp_display *dp;
-	int rc, i;
 
-	if (!connector || !display || sde_connector_get_panel(connector))
-		goto end;
+	if (!connector || !display)
+		return -EINVAL;
 
 	dp = display;
-
-	rc = dp->get_modes(dp, NULL, &count);
-	if (rc) {
-		pr_err("failed to get num of modes, rc=%d\n", rc);
-		goto end;
+	/* pluggable case assumes EDID is read when HPD */
+	if (dp->is_connected) {
+		rc = dp->get_modes(dp);
+		if (!rc)
+			pr_err("failed to get DP sink modes, rc=%d\n", rc);
+	} else {
+		pr_err("No sink connected\n");
 	}
 
-	size = count * sizeof(*modes);
-	modes = kzalloc(size,  GFP_KERNEL);
-	if (!modes) {
-		count = 0;
-		goto end;
-	}
-
-	rc = dp->get_modes(dp, modes, &count);
-	if (rc) {
-		pr_err("failed to get modes, rc=%d\n", rc);
-		count = 0;
-		goto error;
-	}
-
-	for (i = 0; i < count; i++) {
-		struct drm_display_mode *m;
-
-		memset(&drm_mode, 0x0, sizeof(drm_mode));
-		convert_to_drm_mode(&modes[i], &drm_mode);
-		m = drm_mode_duplicate(connector->dev, &drm_mode);
-		if (!m) {
-			pr_err("failed to add mode %ux%u\n",
-			       drm_mode.hdisplay,
-			       drm_mode.vdisplay);
-			count = -ENOMEM;
-			goto error;
-		}
-		m->width_mm = connector->display_info.width_mm;
-		m->height_mm = connector->display_info.height_mm;
-		drm_mode_probed_add(connector, m);
-	}
-error:
-	kfree(modes);
-end:
-	pr_debug("MODE COUNT =%d\n\n", count);
-	return count;
+	return 0;
 }
 
 int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index e9955a9..741acfca 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -114,18 +114,6 @@
 	bool cp_ready;
 };
 
-struct dp_link_status {
-	u8 lane_01_status;
-	u8 lane_23_status;
-	u8 interlane_align_done;
-	u8 downstream_port_status_changed;
-	u8 link_status_updated;
-	u8 port_0_in_sync;
-	u8 port_1_in_sync;
-	u8 req_voltage_swing[4];
-	u8 req_pre_emphasis[4];
-};
-
 struct dp_link_private {
 	struct device *dev;
 	struct dp_aux *aux;
@@ -133,7 +121,7 @@
 
 	struct dp_link_request request;
 	struct dp_link_sink_count sink_count;
-	struct dp_link_status link_status;
+	u8 link_status[DP_LINK_STATUS_SIZE];
 };
 
 /**
@@ -232,13 +220,12 @@
 	int ret = 0;
 	u8 *bp;
 	u8 data;
-	int rlen;
 	u32 const param_len = 0x1;
 	u32 const max_audio_period = 0xA;
 
 	/* TEST_AUDIO_PERIOD_CH_XX */
-	rlen = link->aux->read(link->aux, addr, param_len, AUX_NATIVE, &bp);
-	if (rlen < param_len) {
+	if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp,
+		param_len) < param_len) {
 		pr_err("failed to read test_audio_period (0x%x)\n", addr);
 		ret = -EINVAL;
 		goto exit;
@@ -350,8 +337,8 @@
 	int const max_audio_pattern_type = 0x1;
 
 	/* Read the requested audio pattern type (Byte 0x272). */
-	rlen = link->aux->read(link->aux, test_audio_pattern_type_addr,
-			param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+		test_audio_pattern_type_addr, &bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read link audio mode data\n");
 		ret = -EINVAL;
@@ -387,8 +374,8 @@
 	int channel_count = 0x0;
 
 	/* Read the requested audio mode (Byte 0x271). */
-	rlen = link->aux->read(link->aux, test_audio_mode_addr,
-			param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_audio_mode_addr,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read link audio mode data\n");
 		ret = -EINVAL;
@@ -555,7 +542,7 @@
 		return -EINVAL;
 
 	/* Read the requested video link pattern (Byte 0x221). */
-	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
 	if (rlen < len) {
 		pr_err("failed to read 0x%x\n", addr);
 		return -EINVAL;
@@ -576,7 +563,7 @@
 		return -EINVAL;
 
 	/* Read the requested video link pattern (Byte 0x221). */
-	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
 	if (rlen < len) {
 		pr_err("failed to read 0x%x\n", addr);
 		return -EINVAL;
@@ -596,7 +583,7 @@
 	int rlen;
 
 	/* Read the requested video link pattern (Byte 0x221). */
-	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
 	if (rlen < 1) {
 		pr_err("failed to read 0x%x\n", addr);
 		return -EINVAL;
@@ -625,8 +612,8 @@
 	int const test_misc_addr = 0x232;
 
 	/* Read the requested video link pattern (Byte 0x221). */
-	rlen = link->aux->read(link->aux, test_video_pattern_addr,
-			param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_video_pattern_addr,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read link video pattern\n");
 		ret = -EINVAL;
@@ -647,8 +634,8 @@
 			link->request.test_video_pattern));
 
 	/* Read the requested color bit depth and dynamic range (Byte 0x232) */
-	rlen = link->aux->read(link->aux, test_misc_addr,
-			param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_misc_addr,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read link bit depth\n");
 		ret = -EINVAL;
@@ -780,9 +767,9 @@
  */
 static bool dp_link_is_link_rate_valid(u32 link_rate)
 {
-	return ((link_rate == DP_LINK_RATE_162) ||
-		(link_rate == DP_LINK_RATE_270) ||
-		(link_rate == DP_LINK_RATE_540) ||
+	return ((link_rate == DP_LINK_BW_1_62) ||
+		(link_rate == DP_LINK_BW_2_7) ||
+		(link_rate == DP_LINK_BW_5_4) ||
 		(link_rate == DP_LINK_RATE_810));
 }
 
@@ -814,12 +801,10 @@
 	int ret = 0;
 	int rlen;
 	int const param_len = 0x1;
-	int const test_link_rate_addr = 0x219;
-	int const test_lane_count_addr = 0x220;
 
 	/* Read the requested link rate (Byte 0x219). */
-	rlen = link->aux->read(link->aux, test_link_rate_addr,
-			param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read link rate\n");
 		ret = -EINVAL;
@@ -837,8 +822,8 @@
 	pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
 
 	/* Read the requested lane count (Byte 0x220). */
-	rlen = link->aux->read(link->aux, test_lane_count_addr,
-			param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read lane count\n");
 		ret = -EINVAL;
@@ -890,8 +875,8 @@
 	int const phy_test_pattern_addr = 0x248;
 	int ret = 0;
 
-	rlen = link->aux->read(link->aux, phy_test_pattern_addr,
-				param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, phy_test_pattern_addr,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read phy link pattern\n");
 		ret = -EINVAL;
@@ -965,16 +950,14 @@
 	u8 data;
 	int rlen;
 	u32 const param_len = 0x1;
-	u32 const device_service_irq_addr = 0x201;
-	u32 const test_request_addr = 0x218;
 	u8 buf[4];
 
 	/**
 	 * Read the device service IRQ vector (Byte 0x201) to determine
 	 * whether an automated link has been requested by the sink.
 	 */
-	rlen = link->aux->read(link->aux, device_service_irq_addr,
-				param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+		DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len);
 	if (rlen < param_len) {
 		pr_err("aux read failed\n");
 		ret = -EINVAL;
@@ -994,8 +977,8 @@
 	 * Read the link request byte (Byte 0x218) to determine what type
 	 * of automated link has been requested by the sink.
 	 */
-	rlen = link->aux->read(link->aux, test_request_addr,
-				param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("aux read failed\n");
 		ret = -EINVAL;
@@ -1033,7 +1016,7 @@
 end:
 	/* clear the link request IRQ */
 	buf[0] = 1;
-	link->aux->write(link->aux, test_request_addr, 1, AUX_NATIVE, buf);
+	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_REQUEST, buf, 1);
 
 	/**
 	 * Send a TEST_ACK if all link parameters are valid, otherwise send
@@ -1060,10 +1043,9 @@
 	u8 data;
 	int rlen;
 	int const param_len = 0x1;
-	int const sink_count_addr = 0x200;
 
-	rlen = link->aux->read(link->aux, sink_count_addr,
-				param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT,
+			&bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed to read sink count\n");
 		return;
@@ -1080,67 +1062,16 @@
 		link->sink_count.count, link->sink_count.cp_ready);
 }
 
-static int dp_link_link_status_read(struct dp_link_private *link)
-{
-	u8 *bp;
-	u8 data;
-	int rlen, ret = 0;
-	int const addr = 0x202;
-	int const len = 6;
-	struct dp_link_status *sp;
-
-	rlen = link->aux->read(link->aux, addr, len, AUX_NATIVE, &bp);
-	if (rlen < len) {
-		pr_err("edp aux read failed\n");
-		ret = -EINVAL;
-		goto error;
-	}
-
-	sp = &link->link_status;
-
-	data = *bp++; /* byte 0x202 */
-	sp->lane_01_status = data; /* lane 0, 1 */
-
-	data = *bp++; /* byte 0x203 */
-	sp->lane_23_status = data; /* lane 2, 3 */
-
-	data = *bp++; /* byte 0x204 */
-	sp->interlane_align_done = (data & BIT(0));
-	sp->downstream_port_status_changed = (data & BIT(6));
-	sp->link_status_updated = (data & BIT(7));
-
-	data = *bp++; /* byte 0x205 */
-	sp->port_0_in_sync = (data & BIT(0));
-	sp->port_1_in_sync = (data & BIT(1));
-
-	data = *bp++; /* byte 0x206 */
-	sp->req_voltage_swing[0] = data & 0x03;
-	data >>= 2;
-	sp->req_pre_emphasis[0] = data & 0x03;
-	data >>= 2;
-	sp->req_voltage_swing[1] = data & 0x03;
-	data >>= 2;
-	sp->req_pre_emphasis[1] = data & 0x03;
-
-	data = *bp++; /* byte 0x207 */
-	sp->req_voltage_swing[2] = data & 0x03;
-	data >>= 2;
-	sp->req_pre_emphasis[2] = data & 0x03;
-	data >>= 2;
-	sp->req_voltage_swing[3] = data & 0x03;
-	data >>= 2;
-	sp->req_pre_emphasis[3] = data & 0x03;
-
-	return 0;
-error:
-	return ret;
-}
-
 static void dp_link_parse_sink_status_field(struct dp_link_private *link)
 {
+	int len = 0;
+
 	dp_link_parse_sink_count(link);
 	dp_link_parse_request(link);
-	dp_link_link_status_read(link);
+	len = drm_dp_dpcd_read_link_status(link->aux->drm_aux,
+		link->link_status);
+	if (len < DP_LINK_STATUS_SIZE)
+		pr_err("DP link status read failed\n");
 }
 
 static bool dp_link_is_link_training_requested(struct dp_link_private *link)
@@ -1196,7 +1127,7 @@
 
 	pr_debug("\n");
 
-	rlen = link->aux->read(link->aux, addr1, param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr1, &bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed reading lanes 0/1\n");
 		ret = -EINVAL;
@@ -1217,7 +1148,7 @@
 	p1 = data & 0x3;
 	data = data >> 2;
 
-	rlen = link->aux->read(link->aux, addr2, param_len, AUX_NATIVE, &bp);
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr2, &bp, param_len);
 	if (rlen < param_len) {
 		pr_err("failed reading lanes 2/3\n");
 		ret = -EINVAL;
@@ -1294,76 +1225,6 @@
 	return 0;
 }
 
-static bool dp_link_is_link_status_updated(struct dp_link_private *link)
-{
-	return link->link_status.link_status_updated;
-}
-
-static bool dp_link_channel_eq_done(struct dp_link_private *link)
-{
-	u32 mask, data;
-	struct dp_link *dp_link = &link->dp_link;
-
-	pr_debug("\n");
-
-	dp_link_link_status_read(link);
-
-	if (!link->link_status.interlane_align_done) { /* not align */
-		pr_err("interlane align failed\n");
-		return 0;
-	}
-
-	if (dp_link->lane_count == 1) {
-		mask = 0x7;
-		data = link->link_status.lane_01_status;
-	} else if (dp_link->lane_count == 2) {
-		mask = 0x77;
-		data = link->link_status.lane_01_status;
-	} else {
-		mask = 0x7777;
-		data = link->link_status.lane_23_status;
-		data <<= 8;
-		data |= link->link_status.lane_01_status;
-	}
-
-	data &= mask;
-	pr_debug("data=%x mask=%x\n", data, mask);
-
-	if (data == mask)/* all done */
-		return true;
-
-	return false;
-}
-
-static bool dp_link_clock_recovery_done(struct dp_link_private *link)
-{
-	u32 mask, data;
-	struct dp_link *dp_link = &link->dp_link;
-
-	dp_link_link_status_read(link);
-
-	if (dp_link->lane_count == 1) {
-		mask = 0x01;	/* lane 0 */
-		data = link->link_status.lane_01_status;
-	} else if (dp_link->lane_count == 2) {
-		mask = 0x011; /*B lane 0, 1 */
-		data = link->link_status.lane_01_status;
-	} else {
-		mask = 0x01111; /*B lane 0, 1 */
-		data = link->link_status.lane_23_status;
-		data <<= 8;
-		data |= link->link_status.lane_01_status;
-	}
-
-	data &= mask;
-	pr_debug("data=%x mask=%x\n", data, mask);
-
-	if (data == mask) /* all done */
-		return true;
-
-	return false;
-}
-
 /**
  * dp_link_process_link_status_update() - processes link status updates
  * @link: Display Port link module data
@@ -1377,21 +1238,25 @@
  */
 static int dp_link_process_link_status_update(struct dp_link_private *link)
 {
-	if (!dp_link_is_link_status_updated(link) ||
-	    (dp_link_channel_eq_done(link) &&
-	     dp_link_clock_recovery_done(link)))
+	if (!(link->link_status[2] & BIT(7)) || /* link status updated */
+		(drm_dp_clock_recovery_ok(link->link_status,
+			link->dp_link.lane_count) &&
+	     drm_dp_channel_eq_ok(link->link_status,
+			link->dp_link.lane_count)))
 		return -EINVAL;
 
 	pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
-			dp_link_channel_eq_done(link),
-			dp_link_clock_recovery_done(link));
+			drm_dp_clock_recovery_ok(link->link_status,
+			link->dp_link.lane_count),
+			drm_dp_clock_recovery_ok(link->link_status,
+			link->dp_link.lane_count));
 
 	return 0;
 }
 
 static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
 {
-	return link->link_status.downstream_port_status_changed;
+	return (link->link_status[2] & BIT(6)); /* port status changed */
 }
 
 /**
@@ -1562,37 +1427,6 @@
 	return ret;
 }
 
-static u8 *dp_link_get_voltage_swing(struct dp_link *dp_link)
-
-{
-	struct dp_link_private *link;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	return link->link_status.req_voltage_swing;
-}
-
-static u8 *dp_link_get_pre_emphasis(struct dp_link *dp_link)
-
-{
-	struct dp_link_private *link;
-
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	return link->link_status.req_pre_emphasis;
-}
-
 static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
 {
 	u32 cc;
@@ -1625,38 +1459,11 @@
 	return cc;
 }
 
-static bool dp_link_clock_recovery(struct dp_link *dp_link)
-{
-	struct dp_link_private *link;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	return dp_link_clock_recovery_done(link);
-}
-
-static bool dp_link_channel_equalization(struct dp_link *dp_link)
-{
-	struct dp_link_private *link;
-
-	if (!dp_link) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	link = container_of(dp_link, struct dp_link_private, dp_link);
-
-	return dp_link_channel_eq_done(link);
-}
-
-static int dp_link_adjust_levels(struct dp_link *dp_link)
+static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
 {
 	int i;
 	int max = 0;
+	u8 data;
 	struct dp_link_private *link;
 
 	if (!dp_link) {
@@ -1668,24 +1475,24 @@
 
 	/* use the max level across lanes */
 	for (i = 0; i < dp_link->lane_count; i++) {
-		pr_debug("lane=%d req_voltage_swing=%d\n",
-			i, link->link_status.req_voltage_swing[i]);
-		if (max < link->link_status.req_voltage_swing[i])
-			max = link->link_status.req_voltage_swing[i];
+		data = drm_dp_get_adjust_request_voltage(link_status, i);
+		pr_debug("lane=%d req_voltage_swing=%d\n", i, data);
+		if (max < data)
+			max = data;
 	}
 
-	dp_link->v_level = max;
+	dp_link->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
 
 	/* use the max level across lanes */
 	max = 0;
 	for (i = 0; i < dp_link->lane_count; i++) {
-		pr_debug("lane=%d req_pre_emphasis=%d\n",
-			i, link->link_status.req_pre_emphasis[i]);
-		if (max < link->link_status.req_pre_emphasis[i])
-			max = link->link_status.req_pre_emphasis[i];
+		data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+		pr_debug("lane=%d req_pre_emphasis=%d\n", i, data);
+		if (max < data)
+			max = data;
 	}
 
-	dp_link->p_level = max;
+	dp_link->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
 
 	/**
 	 * Adjust the voltage swing and pre-emphasis level combination to within
@@ -1781,12 +1588,8 @@
 	dp_link = &link->dp_link;
 
 	dp_link->process_request        = dp_link_process_request;
-	dp_link->get_voltage_swing      = dp_link_get_voltage_swing;
 	dp_link->get_test_bits_depth    = dp_link_get_test_bits_depth;
-	dp_link->get_pre_emphasis       = dp_link_get_pre_emphasis;
 	dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
-	dp_link->clock_recovery         = dp_link_clock_recovery;
-	dp_link->channel_equalization   = dp_link_channel_equalization;
 	dp_link->adjust_levels          = dp_link_adjust_levels;
 	dp_link->send_psm_request       = dp_link_send_psm_request;
 	dp_link->phy_pattern_requested  = dp_link_phy_pattern_requested;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index de10e9a..26249d6 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -54,15 +54,11 @@
 	u32 v_level;
 	u32 p_level;
 
-	u8 *(*get_voltage_swing)(struct dp_link *dp_link);
-	u8 *(*get_pre_emphasis)(struct dp_link *dp_link);
 	u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
 	int (*process_request)(struct dp_link *dp_link);
 	int (*get_colorimetry_config)(struct dp_link *dp_link);
-	int (*adjust_levels)(struct dp_link *dp_link);
+	int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
 	int (*send_psm_request)(struct dp_link *dp_link, bool req);
-	bool (*clock_recovery)(struct dp_link *dp_link);
-	bool (*channel_equalization)(struct dp_link *dp_link);
 	bool (*phy_pattern_requested)(struct dp_link *dp_link);
 };
 
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index f9616c4..fed1dbb 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -16,7 +16,9 @@
 
 #include "dp_panel.h"
 
-#define DP_LINK_RATE_MULTIPLIER	27000000
+enum {
+	DP_LINK_RATE_MULTIPLIER = 27000000,
+};
 
 struct dp_panel_private {
 	struct device *dev;
@@ -27,13 +29,10 @@
 
 static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
 {
-	u8 *bp;
-	u8 data;
-	u32 const addr = 0x0;
-	u32 const len = 16;
 	int rlen, rc = 0;
 	struct dp_panel_private *panel;
-	struct dp_panel_dpcd *cap;
+	struct drm_dp_link *dp_link;
+	u8 major = 0, minor = 0;
 
 	if (!dp_panel) {
 		pr_err("invalid input\n");
@@ -41,236 +40,38 @@
 		goto end;
 	}
 
-	cap = &dp_panel->dpcd;
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	dp_link = &dp_panel->dp_link;
 
-	rlen = panel->aux->read(panel->aux, addr, len, AUX_NATIVE, &bp);
-	if (rlen != len) {
+	rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
+		dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+	if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
 		pr_err("dpcd read failed, rlen=%d\n", rlen);
 		rc = -EINVAL;
 		goto end;
 	}
 
-	memset(cap, 0, sizeof(*cap));
+	dp_link->revision = dp_panel->dpcd[DP_DPCD_REV];
 
-	data = *bp++; /* byte 0 */
-	cap->major = (data >> 4) & 0x0f;
-	cap->minor = data & 0x0f;
-	pr_debug("version: %d.%d\n", cap->major, cap->minor);
+	major = (dp_link->revision >> 4) & 0x0f;
+	minor = dp_link->revision & 0x0f;
+	pr_debug("version: %d.%d\n", major, minor);
 
-	data = *bp++; /* byte 1 */
-	/* 162, 270, 540, 810 MB, symbol rate, NOT bit rate */
-	cap->max_link_rate = data;
-	pr_debug("link_rate=%d\n", cap->max_link_rate);
+	dp_link->rate =
+		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+	pr_debug("link_rate=%d\n", dp_link->rate);
 
-	data = *bp++; /* byte 2 */
-	if (data & BIT(7))
-		cap->enhanced_frame++;
+	dp_link->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
+			DP_MAX_LANE_COUNT_MASK;
+	pr_debug("lane_count=%d\n", dp_link->num_lanes);
 
-	if (data & 0x40) {
-		cap->flags |=  DPCD_TPS3;
-		pr_debug("pattern 3 supported\n");
-	} else {
-		pr_debug("pattern 3 not supported\n");
-	}
+	if (dp_panel->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+		dp_link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
 
-	data &= 0x0f;
-	cap->max_lane_count = data;
-	pr_debug("lane_count=%d\n", cap->max_lane_count);
-
-	data = *bp++; /* byte 3 */
-	if (data & BIT(0)) {
-		cap->flags |= DPCD_MAX_DOWNSPREAD_0_5;
-		pr_debug("max_downspread\n");
-	}
-
-	if (data & BIT(6)) {
-		cap->flags |= DPCD_NO_AUX_HANDSHAKE;
-		pr_debug("NO Link Training\n");
-	}
-
-	data = *bp++; /* byte 4 */
-	cap->num_rx_port = (data & BIT(0)) + 1;
-	pr_debug("rx_ports=%d", cap->num_rx_port);
-
-	data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */
-	cap->downstream_port.dfp_present = data & BIT(0);
-	cap->downstream_port.dfp_type = data & 0x6;
-	cap->downstream_port.format_conversion = data & BIT(3);
-	cap->downstream_port.detailed_cap_info_available = data & BIT(4);
-	pr_debug("dfp_present = %d, dfp_type = %d\n",
-			cap->downstream_port.dfp_present,
-			cap->downstream_port.dfp_type);
-	pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n",
-			cap->downstream_port.format_conversion,
-			cap->downstream_port.detailed_cap_info_available);
-
-	bp += 1;	/* Skip Byte 6 */
-	rlen -= 1;
-
-	data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
-	cap->downstream_port.dfp_count = data & 0x7;
-	cap->downstream_port.msa_timing_par_ignored = data & BIT(6);
-	cap->downstream_port.oui_support = data & BIT(7);
-	pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n",
-			cap->downstream_port.dfp_count,
-			cap->downstream_port.msa_timing_par_ignored);
-	pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
-
-	data = *bp++; /* byte 8 */
-	if (data & BIT(1)) {
-		cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
-		pr_debug("edid presented\n");
-	}
-
-	data = *bp++; /* byte 9 */
-	cap->rx_port0_buf_size = (data + 1) * 32;
-	pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
-
-	bp += 2; /* skip 10, 11 port1 capability */
-	rlen -= 2;
-
-	data = *bp++;	/* byte 12 */
-	cap->i2c_speed_ctrl = data;
-	if (cap->i2c_speed_ctrl > 0)
-		pr_debug("i2c_rate=%d", cap->i2c_speed_ctrl);
-
-	data = *bp++;	/* byte 13 */
-	cap->scrambler_reset = data & BIT(0);
-	pr_debug("scrambler_reset=%d\n", cap->scrambler_reset);
-
-	if (data & BIT(1))
-		cap->enhanced_frame++;
-
-	pr_debug("enhanced_framing=%d\n", cap->enhanced_frame);
-
-	data = *bp++; /* byte 14 */
-	if (data == 0)
-		cap->training_read_interval = 4000; /* us */
-	else
-		cap->training_read_interval = 4000 * data; /* us */
-	pr_debug("training_interval=%d\n", cap->training_read_interval);
 end:
 	return rc;
 }
 
-/*
- * edid standard header bytes
- */
-static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
-
-static bool dp_panel_is_edid_header_valid(u8 *buf)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
-		if (buf[i] != edid_hdr[i])
-			return false;
-	}
-
-	return true;
-}
-
-static int dp_panel_validate_edid(u8 *bp, int len)
-{
-	int i;
-	u8 csum = 0;
-	u32 const size = 128;
-
-	if (len < size) {
-		pr_err("Error: len=%x\n", len);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < size; i++)
-		csum += *bp++;
-
-	if (csum != 0) {
-		pr_err("error: csum=0x%x\n", csum);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int dp_panel_read_edid(struct dp_panel *dp_panel)
-{
-	u8 *edid_buf;
-	u32 checksum = 0;
-	int rlen, ret = 0;
-	int edid_blk = 0, blk_num = 0, retries = 10;
-	u32 const segment_addr = 0x30;
-	bool edid_parsing_done = false;
-	struct dp_panel_private *panel;
-
-	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
-	ret = panel->aux->ready(panel->aux);
-	if (!ret) {
-		pr_err("aux chan NOT ready\n");
-		goto end;
-	}
-
-	do {
-		u8 segment;
-
-
-		/*
-		 * Write the segment first.
-		 * Segment = 0, for blocks 0 and 1
-		 * Segment = 1, for blocks 2 and 3
-		 * Segment = 2, for blocks 3 and 4
-		 * and so on ...
-		 */
-		segment = blk_num >> 1;
-
-		panel->aux->write(panel->aux, segment_addr, 1, AUX_I2C,
-					&segment);
-
-		rlen = panel->aux->read(panel->aux, EDID_START_ADDRESS +
-				(blk_num * EDID_BLOCK_SIZE),
-				EDID_BLOCK_SIZE, AUX_I2C, &edid_buf);
-		if (rlen != EDID_BLOCK_SIZE) {
-			pr_err("invalid edid len: %d\n", rlen);
-			continue;
-		}
-
-		pr_debug("=== EDID data ===\n");
-		print_hex_dump(KERN_DEBUG, "EDID: ", DUMP_PREFIX_NONE, 16, 1,
-			edid_buf, EDID_BLOCK_SIZE, false);
-
-		pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
-
-		if (dp_panel_is_edid_header_valid(edid_buf)) {
-			ret = dp_panel_validate_edid(edid_buf, rlen);
-			if (ret) {
-				pr_err("corrupt edid block detected\n");
-				goto end;
-			}
-
-			if (edid_parsing_done) {
-				blk_num++;
-				continue;
-			}
-
-			dp_panel->edid.ext_block_cnt = edid_buf[0x7E];
-			edid_parsing_done = true;
-			checksum = edid_buf[rlen - 1];
-		} else {
-			edid_blk++;
-			blk_num++;
-		}
-
-		memcpy(dp_panel->edid.buf + (edid_blk * EDID_BLOCK_SIZE),
-			edid_buf, EDID_BLOCK_SIZE);
-
-		if (edid_blk == dp_panel->edid.ext_block_cnt)
-			goto end;
-	} while (retries--);
-end:
-	return ret;
-}
-
 static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
 {
 	int rc = 0;
@@ -334,6 +135,36 @@
 	return rc;
 }
 
+static int dp_panel_edid_register(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_panel->edid_ctrl = sde_edid_init();
+	if (!dp_panel->edid_ctrl) {
+		pr_err("sde edid init for DP failed\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+end:
+	return rc;
+}
+
+static void dp_panel_edid_deregister(struct dp_panel *dp_panel)
+{
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	sde_edid_deinit((void **)&dp_panel->edid_ctrl);
+}
+
 static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
 {
 	int rc = 0;
@@ -350,12 +181,12 @@
 	return rc;
 }
 
-static u8 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+static u32 dp_panel_get_link_rate(struct dp_panel *dp_panel)
 {
 	const u32 encoding_factx10 = 8;
 	const u32 ln_to_link_ratio = 10;
 	u32 min_link_rate, reminder = 0;
-	u8 calc_link_rate = 0, lane_cnt;
+	u32 calc_link_rate = 0, lane_cnt, max_rate = 0;
 	struct dp_panel_private *panel;
 	struct dp_panel_info *pinfo;
 
@@ -366,11 +197,10 @@
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
-	lane_cnt = dp_panel->dpcd.max_lane_count;
+	lane_cnt = dp_panel->dp_link.num_lanes;
+	max_rate = drm_dp_link_rate_to_bw_code(dp_panel->dp_link.rate);
 	pinfo = &dp_panel->pinfo;
 
-	pinfo->bpp = 24;
-
 	/*
 	 * The max pixel clock supported is 675Mhz. The
 	 * current calculations below will make sure
@@ -393,12 +223,12 @@
 		min_link_rate += 1;
 	pr_debug("min_link_rate = %d\n", min_link_rate);
 
-	if (min_link_rate <= DP_LINK_RATE_162)
-		calc_link_rate = DP_LINK_RATE_162;
-	else if (min_link_rate <= DP_LINK_RATE_270)
-		calc_link_rate = DP_LINK_RATE_270;
-	else if (min_link_rate <= DP_LINK_RATE_540)
-		calc_link_rate = DP_LINK_RATE_540;
+	if (min_link_rate <= DP_LINK_BW_1_62)
+		calc_link_rate = DP_LINK_BW_1_62;
+	else if (min_link_rate <= DP_LINK_BW_2_7)
+		calc_link_rate = DP_LINK_BW_2_7;
+	else if (min_link_rate <= DP_LINK_BW_5_4)
+		calc_link_rate = DP_LINK_BW_5_4;
 	else if (min_link_rate <= DP_LINK_RATE_810)
 		calc_link_rate = DP_LINK_RATE_810;
 	else {
@@ -407,8 +237,8 @@
 		calc_link_rate = DP_LINK_RATE_810;
 	}
 
-	if (calc_link_rate > dp_panel->dpcd.max_link_rate)
-		calc_link_rate = dp_panel->dpcd.max_link_rate;
+	if (calc_link_rate > max_rate)
+		calc_link_rate = max_rate;
 
 	pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
 end:
@@ -440,12 +270,10 @@
 
 	dp_panel = &panel->dp_panel;
 
-	dp_panel->edid.buf = devm_kzalloc(dev,
-				sizeof(EDID_BLOCK_SIZE) * 4, GFP_KERNEL);
-
+	dp_panel->sde_edid_register = dp_panel_edid_register;
+	dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
 	dp_panel->init_info = dp_panel_init_panel_info;
 	dp_panel->timing_cfg = dp_panel_timing_cfg;
-	dp_panel->read_edid = dp_panel_read_edid;
 	dp_panel->read_dpcd = dp_panel_read_dpcd;
 	dp_panel->get_link_rate = dp_panel_get_link_rate;
 
@@ -463,6 +291,5 @@
 
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
-	devm_kfree(panel->dev, dp_panel->edid.buf);
 	devm_kfree(panel->dev, panel);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 5c145eb..5852c70 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -16,66 +16,9 @@
 #define _DP_PANEL_H_
 
 #include "dp_aux.h"
+#include "sde_edid_parser.h"
 
-#define DPCD_ENHANCED_FRAME     BIT(0)
-#define DPCD_TPS3               BIT(1)
-#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
-#define DPCD_NO_AUX_HANDSHAKE   BIT(3)
-#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
-
-#define EDID_START_ADDRESS	0x50
-#define EDID_BLOCK_SIZE		0x80
-
-
-#define DP_LINK_RATE_162	6	/* 1.62G = 270M * 6 */
-#define DP_LINK_RATE_270	10	/* 2.70G = 270M * 10 */
-#define DP_LINK_RATE_540	20	/* 5.40G = 270M * 20 */
 #define DP_LINK_RATE_810	30	/* 8.10G = 270M * 30 */
-#define DP_LINK_RATE_MAX	DP_LINK_RATE_810
-
-struct downstream_port_config {
-	/* Byte 02205h */
-	bool dfp_present;
-	u32 dfp_type;
-	bool format_conversion;
-	bool detailed_cap_info_available;
-	/* Byte 02207h */
-	u32 dfp_count;
-	bool msa_timing_par_ignored;
-	bool oui_support;
-};
-
-struct dp_panel_dpcd {
-	u8 major;
-	u8 minor;
-	u8 max_lane_count;
-	u8 num_rx_port;
-	u8 i2c_speed_ctrl;
-	u8 scrambler_reset;
-	u8 enhanced_frame;
-	u32 max_link_rate;  /* 162, 270 and 540 Mb, divided by 10 */
-	u32 flags;
-	u32 rx_port0_buf_size;
-	u32 training_read_interval;/* us */
-	struct downstream_port_config downstream_port;
-};
-
-struct dp_panel_edid {
-	u8 *buf;
-	u8 id_name[4];
-	u8 id_product;
-	u8 version;
-	u8 revision;
-	u8 video_intf;	/* dp == 0x5 */
-	u8 color_depth;	/* 6, 8, 10, 12 and 14 bits */
-	u8 color_format;	/* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
-	u8 dpm;		/* display power management */
-	u8 sync_digital;	/* 1 = digital */
-	u8 sync_separate;	/* 1 = separate */
-	u8 vsync_pol;		/* 0 = negative, 1 = positive */
-	u8 hsync_pol;		/* 0 = negative, 1 = positive */
-	u8 ext_block_cnt;
-};
 
 struct dp_panel_info {
 	u32 h_active;
@@ -95,17 +38,21 @@
 };
 
 struct dp_panel {
-	struct dp_panel_dpcd dpcd;
-	struct dp_panel_edid edid;
+	/* dpcd raw data */
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	struct drm_dp_link dp_link;
+
+	struct sde_edid_ctrl *edid_ctrl;
 	struct dp_panel_info pinfo;
 
 	u32 vic;
 
+	int (*sde_edid_register)(struct dp_panel *dp_panel);
+	void (*sde_edid_deregister)(struct dp_panel *dp_panel);
 	int (*init_info)(struct dp_panel *dp_panel);
 	int (*timing_cfg)(struct dp_panel *dp_panel);
-	int (*read_edid)(struct dp_panel *dp_panel);
 	int (*read_dpcd)(struct dp_panel *dp_panel);
-	u8 (*get_link_rate)(struct dp_panel *dp_panel);
+	u32 (*get_link_rate)(struct dp_panel *dp_panel);
 };
 
 struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index 089177f..6ef8266 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -345,8 +345,12 @@
 		pd->vdo = *vdos;
 		dp_usbpd_get_status(pd);
 
-		if (pd->dp_cb && pd->dp_cb->attention)
+		if (pd->dp_cb && pd->dp_cb->attention) {
 			pd->dp_cb->attention(pd->dev);
+
+			if (!pd->dp_usbpd.alt_mode_cfg_done)
+				dp_usbpd_send_event(pd, DP_USBPD_EVT_CONFIGURE);
+		}
 		break;
 	case DP_USBPD_VDM_STATUS:
 		pd->vdo = *vdos;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index 133dc93..3dd4950 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1371,21 +1371,21 @@
 			goto error_disable_cmd_engine;
 		}
 	}
-	return rc;
 
-put_iova:
-	msm_gem_put_iova(display->tx_cmd_buf, 0);
-free_gem:
-	msm_gem_free_object(display->tx_cmd_buf);
 error_disable_cmd_engine:
 	(void)dsi_display_cmd_engine_disable(display);
 error_disable_clks:
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
 			DSI_ALL_CLKS, DSI_CLK_OFF);
 	if (rc) {
-		pr_err("[%s] failed to enable all DSI clocks, rc=%d\n",
+		pr_err("[%s] failed to disable all DSI clocks, rc=%d\n",
 		       display->name, rc);
 	}
+	return rc;
+put_iova:
+	msm_gem_put_iova(display->tx_cmd_buf, 0);
+free_gem:
+	msm_gem_free_object(display->tx_cmd_buf);
 error:
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 83bf997..5e67e8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -218,9 +218,10 @@
 
 	mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
 			sizeof(*mdp5_state), GFP_KERNEL);
+	if (!mdp5_state)
+		return NULL;
 
-	if (mdp5_state && mdp5_state->base.fb)
-		drm_framebuffer_reference(mdp5_state->base.fb);
+	__drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
 
 	mdp5_state->mode_changed = false;
 	mdp5_state->pending = false;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f75be8a..747d9a6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -59,12 +59,68 @@
 
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
 
+static void msm_drm_helper_hotplug_event(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	char *event_string;
+	char const *connector_name;
+	char *envp[2];
+
+	if (!dev) {
+		DRM_ERROR("hotplug_event failed, invalid input\n");
+		return;
+	}
+
+	if (!dev->mode_config.poll_enabled)
+		return;
+
+	event_string = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!event_string) {
+		DRM_ERROR("failed to allocate event string\n");
+		return;
+	}
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev) {
+		/* Only handle HPD capable connectors. */
+		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		connector->status = connector->funcs->detect(connector, false);
+
+		if (connector->name)
+			connector_name = connector->name;
+		else
+			connector_name = "unknown";
+
+		snprintf(event_string, SZ_4K, "name=%s status=%s\n",
+			connector_name,
+			drm_get_connector_status_name(connector->status));
+		DRM_DEBUG("generating hotplug event [%s]\n", event_string);
+		envp[0] = event_string;
+		envp[1] = NULL;
+		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+				envp);
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+	kfree(event_string);
+}
+
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
-	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_private *priv = NULL;
+
+	if (!dev) {
+		DRM_ERROR("output_poll_changed failed, invalid input\n");
+		return;
+	}
+
+	priv = dev->dev_private;
 
 	if (priv->fbdev)
 		drm_fb_helper_hotplug_event(priv->fbdev);
+	else
+		msm_drm_helper_hotplug_event(dev);
 }
 
 int msm_atomic_check(struct drm_device *dev,
@@ -1519,6 +1575,7 @@
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export   = drm_gem_prime_export,
 	.gem_prime_import   = drm_gem_prime_import,
+	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 	.gem_prime_pin      = msm_gem_prime_pin,
 	.gem_prime_unpin    = msm_gem_prime_unpin,
 	.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 2cd9aa1..77dde55 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -206,6 +206,16 @@
 };
 
 /**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ */
+enum msm_event_wait {
+	MSM_ENC_COMMIT_DONE = 0,
+	MSM_ENC_TX_COMPLETE,
+};
+
+/**
  * struct msm_roi_alignment - region of interest alignment restrictions
  * @xstart_pix_align: left x offset alignment restriction
  * @width_pix_align: width alignment restriction
@@ -652,6 +662,7 @@
 void *msm_gem_prime_vmap(struct drm_gem_object *obj);
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_evtlog.c b/drivers/gpu/drm/msm/msm_evtlog.c
deleted file mode 100644
index dbe9b88..0000000
--- a/drivers/gpu/drm/msm/msm_evtlog.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)	"msm_evtlog:[%s] " fmt, __func__
-
-#include "msm_evtlog.h"
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <asm-generic/current.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-
-#include "sde_trace.h"
-
-#define SIZE_MASK(x) (x - 1)
-
-static int msm_evtlog_debugfs_dump(struct seq_file *s, void *data)
-{
-	struct msm_evtlog *log = s->private;
-	unsigned long cnt;	/* # of samples since clear */
-	unsigned long n;	/* # of samples to print, also head index */
-	unsigned long i;
-	struct timespec timespec;
-
-	/**
-	 * Prints in chronological order, oldest -> newest
-	 * Note due to lock-less design, the first few printed entries
-	 * may be corrupted by new writer not oldest.
-	 * This is a tradeoff for speed of sampling
-	 */
-	cnt = atomic_read(&log->cnt);
-	if (!cnt)
-		return 0;
-
-	n = cnt & SIZE_MASK(log->size);
-
-	/**
-	 * If not full, print from first log
-	 * (which is index 1 since atomic_inc_return is prefix operator)
-	 */
-	i = (cnt < log->size) ? 0 : n;
-
-	seq_puts(s, "time_ns, pid, func, line, val1, val2, msg\n");
-	do {
-		i = (i + 1) & SIZE_MASK(log->size);
-		timespec = ktime_to_timespec(log->events[i].ktime);
-		seq_printf(s, "[%5lu.%06lu], %d, %s, %d, %llu, %llu, %s\n",
-				timespec.tv_sec,
-				timespec.tv_nsec / 1000,
-				log->events[i].pid,
-				log->events[i].func,
-				log->events[i].line,
-				log->events[i].val1,
-				log->events[i].val2,
-				log->events[i].msg);
-	} while (i != n);
-
-	return 0;
-}
-
-static int msm_evtlog_debugfs_open_dump(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_evtlog_debugfs_dump, inode->i_private);
-}
-
-static ssize_t msm_evtlog_debugfs_write(
-		struct file *file,
-		const char __user *user_buf,
-		size_t size,
-		loff_t *ppos)
-{
-	struct seq_file *s = file->private_data;
-	struct msm_evtlog *log = s->private;
-	char buf[64];
-	int buf_size;
-
-	buf_size = min(size, (sizeof(buf) - 1));
-	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
-		return -EFAULT;
-	buf[buf_size] = 0;
-
-	if (strcmp(buf, "0") == 0)
-		atomic_set(&log->cnt, 0);
-
-	return size;
-
-}
-
-static const struct file_operations msm_evtlog_fops = {
-	.open =		msm_evtlog_debugfs_open_dump,
-	.read =		seq_read,
-	.write =	msm_evtlog_debugfs_write,
-	.llseek =	seq_lseek,
-	.release =	single_release,
-};
-
-int msm_evtlog_init(
-		struct msm_evtlog *log,
-		int size,
-		struct dentry *parent)
-{
-	if (!log || size < 1) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	memset(log, 0, sizeof(*log));
-	log->size = roundup_pow_of_two(size);
-	log->events = kcalloc(log->size, sizeof(struct msm_evtlog_evt),
-			GFP_KERNEL);
-
-	if (!log->events) {
-		pr_err("Insufficient memory\n");
-		return -ENOMEM;
-	}
-
-	atomic_set(&log->cnt, 0);
-
-	log->dentry = debugfs_create_file("evtlog", 0644, parent,
-			log, &msm_evtlog_fops);
-
-	if (IS_ERR_OR_NULL(log->dentry)) {
-		int rc = PTR_ERR(log->dentry);
-
-		pr_err("debugfs create file failed, rc=%d\n", rc);
-		kfree(log->events);
-		return rc;
-	}
-
-	return 0;
-}
-
-void msm_evtlog_destroy(struct msm_evtlog *log)
-{
-	debugfs_remove(log->dentry);
-
-	/* Caller needs to make sure that log sampling has stopped */
-	kfree(log->events);
-
-}
-
-void msm_evtlog_sample(
-		struct msm_evtlog *log,
-		const char *func,
-		const char *msg,
-		uint64_t val1,
-		uint64_t val2,
-		uint32_t line)
-{
-	unsigned long i;
-
-	/**
-	 * Since array sized with pow of 2, roll to 0 when cnt overflows
-	 * mod the value with the size to get current idx into array
-	 */
-	i = (unsigned long)(atomic_inc_return(&log->cnt)) &
-			SIZE_MASK(log->size);
-	log->events[i].ktime = ktime_get();
-	log->events[i].func = func;
-	log->events[i].msg = msg;
-	log->events[i].val1 = val1;
-	log->events[i].val2 = val2;
-	log->events[i].line = line;
-	log->events[i].pid = current->pid;
-
-	trace_sde_evtlog(func, line, val1, val2);
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 60bb290..13403c6 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,3 +70,10 @@
 	if (!obj->import_attach)
 		msm_gem_put_pages(obj);
 }
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	return msm_obj->resv;
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index aa1b090..d8ac407 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -62,6 +62,9 @@
 	/* functions to wait for atomic commit completed on each CRTC */
 	void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
 					struct drm_crtc *crtc);
+	/* function pointer to wait for pixel transfer to panel to complete*/
+	void (*wait_for_tx_complete)(struct msm_kms *kms,
+					struct drm_crtc *crtc);
 	/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
 	const struct msm_format *(*get_format)(struct msm_kms *kms,
 					const uint32_t format,
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 4d45898..7fbcff4 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -27,6 +27,7 @@
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_mmu.h"
+#include "sde_dbg.h"
 
 #ifndef SZ_4G
 #define SZ_4G	(((size_t) SZ_1G) * 4)
@@ -238,6 +239,13 @@
 		return -ENOMEM;
 	}
 
+	if (sgt && sgt->sgl) {
+		DRM_DEBUG("%pad/0x%x/0x%x/0x%lx\n", &sgt->sgl->dma_address,
+				sgt->sgl->dma_length, dir, attrs);
+		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
+				dir, attrs);
+	}
+
 	return 0;
 }
 
@@ -248,6 +256,12 @@
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
 
+	if (sgt && sgt->sgl) {
+		DRM_DEBUG("%pad/0x%x/0x%x\n", &sgt->sgl->dma_address,
+				sgt->sgl->dma_length, dir);
+		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length, dir);
+	}
+
 	msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
 }
 
@@ -386,6 +400,37 @@
 	return &smmu->base;
 }
 
+static int msm_smmu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova,
+		int flags, void *token)
+{
+	struct msm_smmu_client *client;
+	int rc = -EINVAL;
+
+	if (!token) {
+		DRM_ERROR("Error: token is NULL\n");
+		return -EINVAL;
+	}
+
+	client = (struct msm_smmu_client *)token;
+
+	/* see iommu.h for fault flags definition */
+	SDE_EVT32(iova, flags);
+	DRM_ERROR("trigger dump, iova=0x%08lx, flags=0x%x\n", iova, flags);
+	DRM_ERROR("SMMU device:%s", client->dev ? client->dev->kobj.name : "");
+
+	/* generate dump, but no panic */
+	SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
+			"dsi1_phy", "vbif", "dbg_bus",
+			"vbif_dbg_bus");
+
+	/*
+	 * return -ENOSYS to allow smmu driver to dump out useful
+	 * debug info.
+	 */
+	return rc;
+}
+
 static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
 	const struct msm_smmu_domain *domain)
 {
@@ -411,6 +456,9 @@
 		}
 	}
 
+	iommu_set_fault_handler(client->mmu_mapping->domain,
+			msm_smmu_fault_handler, (void *)client);
+
 	DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
 			domain->label, domain->va_start, domain->va_size,
 			domain->secure);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index f13c6c9..6551257 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -856,7 +856,7 @@
 
 	sde_connector = to_sde_connector(connector);
 
-	if (!debugfs_create_bool("fb_kmap", 0644, connector->debugfs_entry,
+	if (!debugfs_create_bool("fb_kmap", 0600, connector->debugfs_entry,
 			&sde_connector->fb_kmap)) {
 		SDE_ERROR("failed to create connector fb_kmap\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 1b40161..cec2b5f 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -195,6 +195,12 @@
 			!sde_kms->hw_intr->ops.get_interrupt_status)
 		return 0;
 
+	if (irq_idx < 0) {
+		SDE_ERROR("[%pS] invalid irq_idx=%d\n",
+				__builtin_return_address(0), irq_idx);
+		return 0;
+	}
+
 	return sde_kms->hw_intr->ops.get_interrupt_status(sde_kms->hw_intr,
 			irq_idx, clear);
 }
@@ -323,7 +329,7 @@
 int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
 		struct dentry *parent)
 {
-	sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0644,
+	sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
 			parent, &sde_kms->irq_obj,
 			&sde_debugfs_core_irq_fops);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index f42e510..b1f8b0f 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -104,13 +104,14 @@
 	return intf_connected;
 }
 
-static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
+		struct drm_crtc *crtc,
 		struct drm_crtc_state *state,
 		struct sde_core_perf_params *perf)
 {
 	struct sde_crtc_state *sde_cstate;
 
-	if (!crtc || !state || !perf) {
+	if (!kms || !kms->catalog || !crtc || !state || !perf) {
 		SDE_ERROR("invalid parameters\n");
 		return;
 	}
@@ -124,6 +125,20 @@
 	perf->core_clk_rate =
 			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
 
+	if (!sde_cstate->bw_control) {
+		perf->bw_ctl = kms->catalog->perf.max_bw_high * 1000ULL;
+		perf->max_per_pipe_ib = perf->bw_ctl;
+		perf->core_clk_rate = kms->perf.max_core_clk_rate;
+	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) {
+		perf->bw_ctl = 0;
+		perf->max_per_pipe_ib = 0;
+		perf->core_clk_rate = 0;
+	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
+		perf->bw_ctl = kms->perf.fix_core_ab_vote;
+		perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
+		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+	}
+
 	SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n",
 			crtc->base.id, perf->core_clk_rate,
 			perf->max_per_pipe_ib, perf->bw_ctl);
@@ -157,9 +172,8 @@
 
 	sde_cstate = to_sde_crtc_state(state);
 
-	/* swap state and obtain new values */
-	sde_cstate->cur_perf = sde_cstate->new_perf;
-	_sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+	/* obtain new values */
+	_sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf);
 
 	bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
 	curr_client_type = sde_crtc_get_client_type(crtc);
@@ -189,11 +203,9 @@
 	if (!sde_cstate->bw_control) {
 		SDE_DEBUG("bypass bandwidth check\n");
 	} else if (!threshold) {
-		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("no bandwidth limits specified\n");
 		return -E2BIG;
 	} else if (bw > threshold) {
-		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
 		return -E2BIG;
 	}
@@ -332,6 +344,7 @@
 void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 {
 	struct drm_crtc *tmp_crtc;
+	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_cstate;
 	struct sde_kms *kms;
 
@@ -346,6 +359,7 @@
 		return;
 	}
 
+	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
 	/* only do this for command mode rt client (non-rsc client) */
@@ -368,8 +382,7 @@
 	/* Release the bandwidth */
 	if (kms->perf.enable_bw_release) {
 		trace_sde_cmd_release_bw(crtc->base.id);
-		sde_cstate->cur_perf.bw_ctl = 0;
-		sde_cstate->new_perf.bw_ctl = 0;
+		sde_crtc->cur_perf.bw_ctl = 0;
 		SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
 		_sde_core_perf_crtc_update_bus(kms, crtc);
 	}
@@ -432,19 +445,21 @@
 	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
 			crtc->base.id, stop_req, kms->perf.core_clk_rate);
 
-	old = &sde_cstate->cur_perf;
+	old = &sde_crtc->cur_perf;
 	new = &sde_cstate->new_perf;
 
 	if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
 		/*
 		 * cases for bus bandwidth update.
-		 * 1. new bandwidth vote or writeback output vote
-		 *    are higher than current vote for update request.
-		 * 2. new bandwidth vote or writeback output vote are
-		 *    lower than current vote at end of commit or stop.
+		 * 1. new bandwidth vote - "ab or ib vote" is higher
+		 *    than current vote for update request.
+		 * 2. new bandwidth vote - "ab or ib vote" is lower
+		 *    than current vote at end of commit or stop.
 		 */
-		if ((params_changed && ((new->bw_ctl > old->bw_ctl))) ||
-		    (!params_changed && ((new->bw_ctl < old->bw_ctl)))) {
+		if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+			  (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
+		    (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+			  (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
 			SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
 				crtc->base.id, params_changed, new->bw_ctl,
 				old->bw_ctl);
@@ -458,7 +473,8 @@
 				get_sde_rsc_current_state(SDE_RSC_INDEX) ==
 							    SDE_RSC_CMD_STATE) {
 			/* update new bandwdith in all cases */
-			if (params_changed && new->bw_ctl != old->bw_ctl) {
+			if (params_changed && ((new->bw_ctl != old->bw_ctl) ||
+			      (new->max_per_pipe_ib != old->max_per_pipe_ib))) {
 				old->bw_ctl = new->bw_ctl;
 				old->max_per_pipe_ib = new->max_per_pipe_ib;
 				update_bus = 1;
@@ -618,27 +634,27 @@
 		return -EINVAL;
 	}
 
-	debugfs_create_u64("max_core_clk_rate", 0644, perf->debugfs_root,
+	debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
 			&perf->max_core_clk_rate);
-	debugfs_create_u64("core_clk_rate", 0644, perf->debugfs_root,
+	debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
 			&perf->core_clk_rate);
-	debugfs_create_u32("enable_bw_release", 0644, perf->debugfs_root,
+	debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
 			(u32 *)&perf->enable_bw_release);
-	debugfs_create_u32("threshold_low", 0644, perf->debugfs_root,
+	debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
 			(u32 *)&catalog->perf.max_bw_low);
-	debugfs_create_u32("threshold_high", 0644, perf->debugfs_root,
+	debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
 			(u32 *)&catalog->perf.max_bw_high);
-	debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
+	debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
 			(u32 *)perf, &sde_core_perf_mode_fops);
 	debugfs_create_u32("bw_vote_mode", 0600, perf->debugfs_root,
 			&perf->bw_vote_mode);
 	debugfs_create_bool("bw_vote_mode_updated", 0600, perf->debugfs_root,
 			&perf->bw_vote_mode_updated);
-	debugfs_create_u64("fix_core_clk_rate", 0644, perf->debugfs_root,
+	debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
 			&perf->fix_core_clk_rate);
-	debugfs_create_u64("fix_core_ib_vote", 0644, perf->debugfs_root,
+	debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
 			&perf->fix_core_ib_vote);
-	debugfs_create_u64("fix_core_ab_vote", 0644, perf->debugfs_root,
+	debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
 			&perf->fix_core_ab_vote);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index cf0cc56..e708290 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -735,6 +735,25 @@
 	return 0;
 }
 
+static bool _sde_crtc_setup_is_3dmux_dsc(struct drm_crtc_state *state)
+{
+	int i;
+	struct sde_crtc_state *cstate;
+	bool is_3dmux_dsc = false;
+
+	cstate = to_sde_crtc_state(state);
+
+	for (i = 0; i < cstate->num_connectors; i++) {
+		struct drm_connector *conn = cstate->connectors[i];
+
+		if (sde_connector_get_topology_name(conn) ==
+				SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC)
+			is_3dmux_dsc = true;
+	}
+
+	return is_3dmux_dsc;
+}
+
 static int _sde_crtc_set_crtc_roi(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -768,6 +787,12 @@
 
 		sde_conn_state = to_sde_connector_state(conn_state);
 
+		/*
+		 * current driver only supports same connector and crtc size,
+		 * but if support for different sizes is added, driver needs
+		 * to check the connector roi here to make sure is full screen
+		 * for dsc 3d-mux topology that doesn't support partial update.
+		 */
 		if (memcmp(&sde_conn_state->rois, &crtc_state->user_roi_list,
 				sizeof(crtc_state->user_roi_list))) {
 			SDE_ERROR("%s: crtc -> conn roi scaling unsupported\n",
@@ -778,6 +803,23 @@
 
 	sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
 
+	/*
+	 * for 3dmux dsc, make sure is full ROI, since current driver doesn't
+	 * support partial update for this configuration.
+	 */
+	if (!sde_kms_rect_is_null(crtc_roi) &&
+		_sde_crtc_setup_is_3dmux_dsc(state)) {
+		struct drm_display_mode *adj_mode = &state->adjusted_mode;
+
+		if (crtc_roi->w != adj_mode->hdisplay ||
+			crtc_roi->h != adj_mode->vdisplay) {
+			SDE_ERROR("%s: unsupported top roi[%d %d] wxh[%d %d]\n",
+				sde_crtc->name, crtc_roi->w, crtc_roi->h,
+				adj_mode->hdisplay, adj_mode->vdisplay);
+			return -EINVAL;
+		}
+	}
+
 	SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
 			crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
 
@@ -1112,12 +1154,11 @@
 	struct sde_hw_stage_cfg *stage_cfg;
 	struct sde_rect plane_crtc_roi;
 
-	u32 flush_mask = 0;
-	uint32_t lm_idx = LEFT_MIXER, stage_idx;
-	bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
-	int zpos_cnt[CRTC_DUAL_MIXERS][SDE_STAGE_MAX + 1] = { {0} };
+	u32 flush_mask, flush_sbuf, flush_tmp;
+	uint32_t stage_idx, lm_idx;
+	int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
 	int i;
-	bool sbuf_mode = false;
+	bool bg_alpha_enable = false;
 	u32 prefill = 0;
 
 	if (!sde_crtc || !mixer) {
@@ -1129,6 +1170,10 @@
 	lm = mixer->hw_lm;
 	stage_cfg = &sde_crtc->stage_cfg;
 	cstate = to_sde_crtc_state(crtc->state);
+	flush_sbuf = 0x0;
+
+	cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+	cstate->sbuf_prefill_line = 0;
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		state = plane->state;
@@ -1144,10 +1189,16 @@
 		fb = state->fb;
 
 		if (sde_plane_is_sbuf_mode(plane, &prefill))
-			sbuf_mode = true;
+			cstate->sbuf_cfg.rot_op_mode =
+					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+		if (prefill > cstate->sbuf_prefill_line)
+			cstate->sbuf_prefill_line = prefill;
 
-		sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
+		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_tmp);
 
+		/* persist rotator flush bit(s) for one more commit */
+		flush_mask |= cstate->sbuf_flush_mask | flush_tmp;
+		flush_sbuf |= flush_tmp;
 
 		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 				crtc->base.id,
@@ -1157,57 +1208,44 @@
 				state->fb ? state->fb->base.id : -1);
 
 		format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
+		if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable)
+			bg_alpha_enable = true;
 
 		SDE_EVT32(DRMID(crtc), DRMID(plane),
 				state->fb ? state->fb->base.id : -1,
 				state->src_x >> 16, state->src_y >> 16,
 				state->src_w >> 16, state->src_h >> 16,
 				state->crtc_x, state->crtc_y,
-				state->crtc_w, state->crtc_h);
+				state->crtc_w, state->crtc_h,
+				cstate->sbuf_cfg.rot_op_mode);
 
-		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-			struct sde_rect intersect;
-
-			/* skip if the roi doesn't fall within LM's bounds */
-			sde_kms_rect_intersect(&plane_crtc_roi,
-					&cstate->lm_bounds[lm_idx],
-					&intersect);
-			if (sde_kms_rect_is_null(&intersect))
-				continue;
-
-			stage_idx = zpos_cnt[lm_idx][pstate->stage]++;
-			stage_cfg->stage[lm_idx][pstate->stage][stage_idx] =
+		stage_idx = zpos_cnt[pstate->stage]++;
+		stage_cfg->stage[pstate->stage][stage_idx] =
 					sde_plane_pipe(plane);
-			stage_cfg->multirect_index
-					[lm_idx][pstate->stage][stage_idx] =
+		stage_cfg->multirect_index[pstate->stage][stage_idx] =
 					pstate->multirect_index;
 
+		SDE_EVT32(DRMID(crtc), DRMID(plane), stage_idx,
+			sde_plane_pipe(plane) - SSPP_VIG0, pstate->stage,
+			pstate->multirect_index, pstate->multirect_mode,
+			format->base.pixel_format, fb ? fb->modifier[0] : 0);
+
+		/* blend config update */
+		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
+			_sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
+								format);
 			mixer[lm_idx].flush_mask |= flush_mask;
 
-
-			SDE_EVT32(DRMID(plane), DRMID(crtc), lm_idx, stage_idx,
-				pstate->stage, pstate->multirect_index,
-				pstate->multirect_mode,
-				format->base.pixel_format,
-				fb ? fb->modifier[0] : 0);
-
-			/* blend config update */
-			if (pstate->stage != SDE_STAGE_BASE) {
-				_sde_crtc_setup_blend_cfg(mixer + lm_idx,
-						pstate, format);
-
-				if (bg_alpha_enable[lm_idx] &&
-						!format->alpha_enable)
-					mixer[lm_idx].mixer_op_mode = 0;
-				else
-					mixer[lm_idx].mixer_op_mode |=
+			if (bg_alpha_enable && !format->alpha_enable)
+				mixer[lm_idx].mixer_op_mode = 0;
+			else
+				mixer[lm_idx].mixer_op_mode |=
 						1 << pstate->stage;
-			} else if (format->alpha_enable) {
-				bg_alpha_enable[lm_idx] = true;
-			}
 		}
 	}
 
+	cstate->sbuf_flush_mask = flush_sbuf;
+
 	if (lm && lm->ops.setup_dim_layer) {
 		cstate = to_sde_crtc_state(crtc->state);
 		for (i = 0; i < cstate->num_dim_layers; i++)
@@ -1215,20 +1253,8 @@
 					mixer, &cstate->dim_layer[i]);
 	}
 
-	if (ctl->ops.setup_sbuf_cfg) {
-		cstate = to_sde_crtc_state(crtc->state);
-		if (!sbuf_mode) {
-			cstate->sbuf_cfg.rot_op_mode =
-					SDE_CTL_ROT_OP_MODE_OFFLINE;
-			cstate->sbuf_prefill_line = 0;
-		} else {
-			cstate->sbuf_cfg.rot_op_mode =
-					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
-			cstate->sbuf_prefill_line = prefill;
-		}
-
+	if (ctl->ops.setup_sbuf_cfg)
 		ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
-	}
 
 	_sde_crtc_program_lm_output_roi(crtc);
 }
@@ -1377,7 +1403,7 @@
 			mixer[i].flush_mask);
 
 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
-			&sde_crtc->stage_cfg, i);
+			&sde_crtc->stage_cfg);
 	}
 
 	_sde_crtc_program_lm_output_roi(crtc);
@@ -2117,6 +2143,7 @@
 		SDE_DEBUG("crtc%d commit\n", crtc->base.id);
 		SDE_EVT32(DRMID(crtc), 2);
 	}
+	sde_crtc->play_count++;
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->crtc != crtc)
@@ -2453,6 +2480,9 @@
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
 
+	/* disable clk & bw control until clk & bw properties are set */
+	cstate->bw_control = false;
+
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
 		ret = 0;
@@ -2799,43 +2829,67 @@
 		goto end;
 	}
 
-	/*
-	 * enforce pipe priority restrictions
+	/* validate source split:
 	 * use pstates sorted by stage to check planes on same stage
 	 * we assume that all pipes are in source split so its valid to compare
 	 * without taking into account left/right mixer placement
 	 */
 	for (i = 1; i < cnt; i++) {
 		struct plane_state *prv_pstate, *cur_pstate;
-		int32_t prv_x, cur_x, prv_id, cur_id;
+		struct sde_rect left_rect, right_rect;
+		int32_t left_pid, right_pid;
+		int32_t stage;
 
 		prv_pstate = &pstates[i - 1];
 		cur_pstate = &pstates[i];
 		if (prv_pstate->stage != cur_pstate->stage)
 			continue;
 
-		prv_x = prv_pstate->drm_pstate->crtc_x;
-		cur_x = cur_pstate->drm_pstate->crtc_x;
-		prv_id = prv_pstate->sde_pstate->base.plane->base.id;
-		cur_id = cur_pstate->sde_pstate->base.plane->base.id;
+		stage = cur_pstate->stage;
 
-		/*
-		 * Planes are enumerated in pipe-priority order such that planes
-		 * with lower drm_id must be left-most in a shared blend-stage
-		 * when using source split.
+		left_pid = prv_pstate->sde_pstate->base.plane->base.id;
+		POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
+			prv_pstate->drm_pstate->crtc_y,
+			prv_pstate->drm_pstate->crtc_w,
+			prv_pstate->drm_pstate->crtc_h, false);
+
+		right_pid = cur_pstate->sde_pstate->base.plane->base.id;
+		POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
+			cur_pstate->drm_pstate->crtc_y,
+			cur_pstate->drm_pstate->crtc_w,
+			cur_pstate->drm_pstate->crtc_h, false);
+
+		if (right_rect.x < left_rect.x) {
+			swap(left_pid, right_pid);
+			swap(left_rect, right_rect);
+		}
+
+		/**
+		 * - planes are enumerated in pipe-priority order such that
+		 *   planes with lower drm_id must be left-most in a shared
+		 *   blend-stage when using source split.
+		 * - planes in source split must be contiguous in width
+		 * - planes in source split must have same dest yoff and height
 		 */
-		if (cur_x > prv_x && cur_id < prv_id) {
+		if (right_pid < left_pid) {
 			SDE_ERROR(
-				"shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n",
-				cur_pstate->stage, cur_id, cur_x,
-				prv_id, prv_x);
+				"invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+				stage, left_pid, right_pid);
 			rc = -EINVAL;
 			goto end;
-		} else if (cur_x < prv_x && cur_id > prv_id) {
+		} else if (right_rect.x != (left_rect.x + left_rect.w)) {
 			SDE_ERROR(
-				"shared z_pos %d lower id plane%d @ x%d should be left of plane%d @ x %d\n",
-				cur_pstate->stage, prv_id, prv_x,
-				cur_id, cur_x);
+				"non-contiguous coordinates for src split. stage: %d left: %d - %d right: %d - %d\n",
+				stage, left_rect.x, left_rect.w,
+				right_rect.x, right_rect.w);
+			rc = -EINVAL;
+			goto end;
+		} else if ((left_rect.y != right_rect.y) ||
+				(left_rect.h != right_rect.h)) {
+			SDE_ERROR(
+				"source split at stage: %d. invalid yoff/height: l_y: %d r_y: %d l_h: %d r_h: %d\n",
+				stage, left_rect.y, right_rect.y,
+				left_rect.h, right_rect.h);
 			rc = -EINVAL;
 			goto end;
 		}
@@ -3020,7 +3074,7 @@
 			catalog->perf.min_prefill_lines);
 
 	msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
-			info->data, info->len, CRTC_PROP_INFO);
+			info->data, SDE_KMS_INFO_DATALEN(info), CRTC_PROP_INFO);
 
 	kfree(info);
 }
@@ -3063,6 +3117,7 @@
 			case CRTC_PROP_ROI_V1:
 				ret = _sde_crtc_set_roi_v1(state, (void *)val);
 				break;
+			case CRTC_PROP_CORE_CLK:
 			case CRTC_PROP_CORE_AB:
 			case CRTC_PROP_CORE_IB:
 			case CRTC_PROP_MEM_AB:
@@ -3278,10 +3333,9 @@
 				sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
 
 		seq_printf(s,
-			"vblank fps:%lld count:%u total:%llums\n",
-				fps,
-				sde_crtc->vblank_cb_count,
-				ktime_to_ms(diff));
+			"vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+				fps, sde_crtc->vblank_cb_count,
+				ktime_to_ms(diff), sde_crtc->play_count);
 
 		/* reset time & count for next measurement */
 		sde_crtc->vblank_cb_count = 0;
@@ -3418,16 +3472,18 @@
 static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
 	struct sde_crtc_res *res;
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
 	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
-	seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
-	seq_printf(s, "core_clk_rate: %llu\n", cstate->cur_perf.core_clk_rate);
+	seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+	seq_printf(s, "core_clk_rate: %llu\n",
+			sde_crtc->cur_perf.core_clk_rate);
 	seq_printf(s, "max_per_pipe_ib: %llu\n",
-			cstate->cur_perf.max_per_pipe_ib);
+			sde_crtc->cur_perf.max_per_pipe_ib);
 
 	seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
 	list_for_each_entry(res, &cstate->rp.res_list, list)
@@ -3471,14 +3527,14 @@
 		return -ENOMEM;
 
 	/* don't error check these */
-	debugfs_create_file("status", 0444,
+	debugfs_create_file("status", 0400,
 			sde_crtc->debugfs_root,
 			sde_crtc, &debugfs_status_fops);
-	debugfs_create_file("state", 0644,
+	debugfs_create_file("state", 0600,
 			sde_crtc->debugfs_root,
 			&sde_crtc->base,
 			&sde_crtc_debugfs_state_fops);
-	debugfs_create_file("misr_data", 0644, sde_crtc->debugfs_root,
+	debugfs_create_file("misr_data", 0600, sde_crtc->debugfs_root,
 					sde_crtc, &debugfs_misr_fops);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 4b3c814..38311c1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -121,6 +121,7 @@
  * @stage_cfg     : H/w mixer stage configuration
  * @debugfs_root  : Parent of debugfs node
  * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count    : frame count between crtc enable and disable
  * @vblank_cb_time  : ktime at vblank count reset
  * @vblank_refcount : reference count for vblank enable request
  * @suspend         : whether or not a suspend operation is in progress
@@ -141,6 +142,7 @@
  * @event_lock    : Spinlock around event handling code
  * @misr_enable   : boolean entry indicates misr enable/disable status.
  * @power_event   : registered power event handle
+ * @cur_perf      : current performance committed to clock/bandwidth driver
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -166,6 +168,7 @@
 	struct dentry *debugfs_root;
 
 	u32 vblank_cb_count;
+	u64 play_count;
 	ktime_t vblank_cb_time;
 	atomic_t vblank_refcount;
 	bool suspend;
@@ -193,6 +196,8 @@
 	bool misr_enable;
 
 	struct sde_power_event *power_event;
+
+	struct sde_core_perf_params cur_perf;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -255,7 +260,7 @@
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
  * @is_ppsplit    : Whether current topology requires PPSplit special handling
- * @bw_control    : true if bw controlled by bw properties
+ * @bw_control    : true if bw/clk controlled by bw/clk properties
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
@@ -268,10 +273,10 @@
  * @property_blobs: Reference pointers for blob properties
  * @num_dim_layers: Number of dim layers
  * @dim_layer: Dim layer configs
- * @cur_perf: current performance state
- * @new_perf: new performance state
+ * @new_perf: new performance state being requested
  * @sbuf_cfg: stream buffer configuration
  * @sbuf_prefill_line: number of line for inline rotator prefetch
+ * @sbuf_flush_mask: flush mask for inline rotator
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -295,10 +300,10 @@
 	uint32_t num_dim_layers;
 	struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
 
-	struct sde_core_perf_params cur_perf;
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
-	u64 sbuf_prefill_line;
+	u32 sbuf_prefill_line;
+	u32 sbuf_flush_mask;
 
 	struct sde_crtc_respool rp;
 };
@@ -433,10 +438,14 @@
  */
 static inline u32 sde_crtc_get_inline_prefill(struct drm_crtc *crtc)
 {
+	struct sde_crtc_state *cstate;
+
 	if (!crtc || !crtc->state)
 		return 0;
 
-	return to_sde_crtc_state(crtc->state)->sbuf_prefill_line;
+	cstate = to_sde_crtc_state(crtc->state);
+	return cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE ?
+		cstate->sbuf_prefill_line : 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 5ccd385..e1caeaf 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -36,6 +36,7 @@
 #include "sde_hw_dsc.h"
 #include "sde_crtc.h"
 #include "sde_trace.h"
+#include "sde_core_irq.h"
 
 #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -43,6 +44,18 @@
 #define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
 
+#define SDE_DEBUG_PHYS(p, fmt, ...) SDE_DEBUG("enc%d intf%d pp%d " fmt,\
+		(p) ? (p)->parent->base.id : -1, \
+		(p) ? (p)->intf_idx - INTF_0 : -1, \
+		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+		##__VA_ARGS__)
+
+#define SDE_ERROR_PHYS(p, fmt, ...) SDE_ERROR("enc%d intf%d pp%d " fmt,\
+		(p) ? (p)->parent->base.id : -1, \
+		(p) ? (p)->intf_idx - INTF_0 : -1, \
+		(p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+		##__VA_ARGS__)
+
 /* timeout in frames waiting for frame done */
 #define SDE_ENCODER_FRAME_DONE_TIMEOUT	60
 
@@ -278,6 +291,174 @@
 									enable);
 }
 
+void sde_encoder_helper_report_irq_timeout(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx)
+{
+	SDE_EVT32(DRMID(phys_enc->parent),
+			phys_enc->intf_idx - INTF_0,
+			phys_enc->hw_pp->idx - PINGPONG_0,
+			intr_idx);
+	SDE_ERROR_PHYS(phys_enc, "irq %d timeout\n", intr_idx);
+
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				SDE_ENCODER_FRAME_EVENT_ERROR);
+}
+
+int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx,
+		struct sde_encoder_wait_info *wait_info)
+{
+	struct sde_encoder_irq *irq;
+	u32 irq_status;
+	int ret;
+
+	if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+	irq = &phys_enc->irq[intr_idx];
+
+	/* note: do master / slave checking outside */
+
+	/* return EWOULDBLOCK since we know the wait isn't necessary */
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR_PHYS(phys_enc, "encoder is disabled\n");
+		return -EWOULDBLOCK;
+	}
+
+	if (irq->irq_idx < 0) {
+		SDE_DEBUG_PHYS(phys_enc, "irq %s hw %d disabled, skip wait\n",
+				irq->name, irq->hw_idx);
+		SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+				irq->irq_idx);
+		return 0;
+	}
+
+	SDE_DEBUG_PHYS(phys_enc, "pending_cnt %d\n",
+			atomic_read(wait_info->atomic_cnt));
+	SDE_EVT32(DRMID(phys_enc->parent), irq->hw_idx,
+			atomic_read(wait_info->atomic_cnt),
+			SDE_EVTLOG_FUNC_ENTRY);
+
+	ret = sde_encoder_helper_wait_event_timeout(
+			DRMID(phys_enc->parent),
+			irq->hw_idx,
+			wait_info);
+
+	if (ret <= 0) {
+		irq_status = sde_core_irq_read(phys_enc->sde_kms,
+				irq->irq_idx, true);
+		if (irq_status) {
+			unsigned long flags;
+
+			SDE_EVT32(DRMID(phys_enc->parent),
+					irq->hw_idx,
+					atomic_read(wait_info->atomic_cnt));
+			SDE_DEBUG_PHYS(phys_enc,
+					"done but irq %d not triggered\n",
+					irq->irq_idx);
+			local_irq_save(flags);
+			irq->cb.func(phys_enc, irq->irq_idx);
+			local_irq_restore(flags);
+			ret = 0;
+		} else {
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		ret = 0;
+	}
+
+	SDE_EVT32(DRMID(phys_enc->parent), irq->hw_idx, ret,
+			SDE_EVTLOG_FUNC_EXIT);
+
+	return ret;
+}
+
+int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx)
+{
+	struct sde_encoder_irq *irq;
+	int ret = 0;
+
+	if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+	irq = &phys_enc->irq[intr_idx];
+
+	if (irq->irq_idx >= 0) {
+		SDE_ERROR_PHYS(phys_enc,
+				"skipping already registered irq %s type %d\n",
+				irq->name, irq->intr_type);
+		return 0;
+	}
+
+	irq->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+			irq->intr_type, irq->hw_idx);
+	if (irq->irq_idx < 0) {
+		SDE_ERROR_PHYS(phys_enc,
+			"failed to lookup IRQ index for %s type:%d\n",
+			irq->name, irq->intr_type);
+		return -EINVAL;
+	}
+
+	ret = sde_core_irq_register_callback(phys_enc->sde_kms, irq->irq_idx,
+			&irq->cb);
+	if (ret) {
+		SDE_ERROR_PHYS(phys_enc,
+			"failed to register IRQ callback for %s\n",
+			irq->name);
+		irq->irq_idx = -EINVAL;
+		return ret;
+	}
+
+	ret = sde_core_irq_enable(phys_enc->sde_kms, &irq->irq_idx, 1);
+	if (ret) {
+		SDE_ERROR_PHYS(phys_enc,
+			"enable IRQ for intr:%s failed, irq_idx %d\n",
+			irq->name, irq->irq_idx);
+
+		sde_core_irq_unregister_callback(phys_enc->sde_kms,
+				irq->irq_idx, &irq->cb);
+		irq->irq_idx = -EINVAL;
+		return ret;
+	}
+
+	SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
+	SDE_DEBUG_PHYS(phys_enc, "registered irq %s idx: %d\n",
+			irq->name, irq->irq_idx);
+
+	return ret;
+}
+
+int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx)
+{
+	struct sde_encoder_irq *irq;
+
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+	irq = &phys_enc->irq[intr_idx];
+
+	/* silently skip irqs that weren't registered */
+	if (irq->irq_idx < 0)
+		return 0;
+
+	sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
+	sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
+			&irq->cb);
+	irq->irq_idx = -EINVAL;
+
+	SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
+	SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", irq->irq_idx);
+
+	return 0;
+}
+
 void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
 		struct sde_encoder_hw_resources *hw_res,
 		struct drm_connector_state *conn_state)
@@ -651,7 +832,7 @@
 	sde_kms_rect_merge_rectangles(&c_state->rois, merged_conn_roi);
 }
 
-static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
 {
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
@@ -692,6 +873,7 @@
 
 	return 0;
 }
+
 static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -892,7 +1074,7 @@
 		return -EINVAL;
 	}
 
-	SDE_DEBUG_ENC(sde_enc, "\n");
+	SDE_DEBUG_ENC(sde_enc, "topology:%d\n", topology);
 	SDE_EVT32(DRMID(&sde_enc->base));
 
 	if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
@@ -901,7 +1083,8 @@
 
 	switch (topology) {
 	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
-		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
+	case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC:
+		ret = _sde_encoder_dsc_n_lm_1_enc_1_intf(sde_enc);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
 		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc, params);
@@ -1416,6 +1599,9 @@
 		hw_mdptop->ops.setup_vsync_sel(hw_mdptop, &te_cfg,
 				sde_enc->disp_info.is_te_using_watchdog_timer);
 	}
+
+	memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
+	memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
 }
 
 void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
@@ -1782,23 +1968,23 @@
 int sde_encoder_helper_wait_event_timeout(
 		int32_t drm_id,
 		int32_t hw_id,
-		wait_queue_head_t *wq,
-		atomic_t *cnt,
-		s64 timeout_ms)
+		struct sde_encoder_wait_info *info)
 {
 	int rc = 0;
-	s64 expected_time = ktime_to_ms(ktime_get()) + timeout_ms;
-	s64 jiffies = msecs_to_jiffies(timeout_ms);
+	s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+	s64 jiffies = msecs_to_jiffies(info->timeout_ms);
 	s64 time;
 
 	do {
-		rc = wait_event_timeout(*wq, atomic_read(cnt) == 0, jiffies);
+		rc = wait_event_timeout(*(info->wq),
+				atomic_read(info->atomic_cnt) == 0, jiffies);
 		time = ktime_to_ms(ktime_get());
 
 		SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
-				atomic_read(cnt));
+				atomic_read(info->atomic_cnt));
 	/* If we timed out, counter is valid and time is less, wait again */
-	} while (atomic_read(cnt) && (rc == 0) && (time < expected_time));
+	} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+			(time < expected_time));
 
 	return rc;
 }
@@ -2217,8 +2403,7 @@
 		/* only enable border color on LM */
 		if (phys_enc->hw_ctl->ops.setup_blendstage)
 			phys_enc->hw_ctl->ops.setup_blendstage(
-					phys_enc->hw_ctl,
-					hw_lm->idx, 0, 0);
+					phys_enc->hw_ctl, hw_lm->idx, NULL);
 	}
 
 	if (!lm_valid) {
@@ -2228,6 +2413,25 @@
 	return 0;
 }
 
+void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	int i;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return;
+	}
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+		if (phys && phys->ops.prepare_commit)
+			phys->ops.prepare_commit(phys);
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 static int _sde_encoder_status_show(struct seq_file *s, void *data)
 {
@@ -2425,10 +2629,10 @@
 		return -ENOMEM;
 
 	/* don't error check these */
-	debugfs_create_file("status", 0644,
+	debugfs_create_file("status", 0600,
 		sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
 
-	debugfs_create_file("misr_data", 0644,
+	debugfs_create_file("misr_data", 0600,
 		sde_enc->debugfs_root, sde_enc, &debugfs_misr_fops);
 
 	for (i = 0; i < sde_enc->num_phys_encs; i++)
@@ -2789,8 +2993,10 @@
 	return ERR_PTR(ret);
 }
 
-int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
+	enum msm_event_wait event)
 {
+	int (*fn_wait)(struct sde_encoder_phys *phys_enc) = NULL;
 	struct sde_encoder_virt *sde_enc = NULL;
 	int i, ret = 0;
 
@@ -2804,8 +3010,17 @@
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
-		if (phys && phys->ops.wait_for_commit_done) {
-			ret = phys->ops.wait_for_commit_done(phys);
+		switch (event) {
+		case MSM_ENC_COMMIT_DONE:
+			fn_wait = phys->ops.wait_for_commit_done;
+			break;
+		case MSM_ENC_TX_COMPLETE:
+			fn_wait = phys->ops.wait_for_tx_complete;
+			break;
+		};
+
+		if (phys && fn_wait) {
+			ret = fn_wait(phys);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index d3a9bb4..0b14a58 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -128,14 +128,24 @@
 void sde_encoder_kickoff(struct drm_encoder *encoder);
 
 /**
- * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
- *	current pending frames to hardware at a vblank or ctl_start
- *	Encoders will map this differently depending on irqs
- *	vid mode -> vsync_irq
+ * sde_encoder_wait_for_event - Waits for encoder events
  * @encoder:	encoder pointer
+ * @event:      event to wait for
+ * MSM_ENC_COMMIT_DONE -  Wait for hardware to have flushed the current pending
+ *                        frames to hardware at a vblank or ctl_start
+ *                        Encoders will map this differently depending on the
+ *                        panel type.
+ *	                  vid mode -> vsync_irq
+ *                        cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE -  Wait for the hardware to transfer all the pixels to
+ *                        the panel. Encoders will map this differently
+ *                        depending on the panel type.
+ *                        vid mode -> vsync_irq
+ *                        cmd mode -> pp_done
  * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
  */
-int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
+int sde_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+						enum msm_event_wait event);
 
 /*
  * sde_encoder_get_intf_mode - get interface mode of the given encoder
@@ -179,4 +189,11 @@
  */
 void sde_encoder_destroy(struct drm_encoder *drm_enc);
 
+/**
+ * sde_encoder_prepare_commit - prepare encoder at the very beginning of an
+ *	atomic commit, before any registers are written
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+void sde_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
 #endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 3d6dc32..6e6960a 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -94,6 +94,7 @@
  * struct sde_encoder_phys_ops - Interface the physical encoders provide to
  *	the containing virtual encoder.
  * @late_register:		DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit:		MSM Atomic Call, start of atomic commit sequence
  * @is_master:			Whether this phys_enc is the current master
  *				encoder. Can be switched at enable time. Based
  *				on split_role and current mode (CMD/VID).
@@ -110,6 +111,8 @@
  * @control_vblank_irq		Register/Deregister for VBLANK IRQ
  * @wait_for_commit_done:	Wait for hardware to have flushed the
  *				current pending frames to hardware
+ * @wait_for_tx_complete:	Wait for hardware to transfer the pixels
+ *				to the panel
  * @prepare_for_kickoff:	Do any work necessary prior to a kickoff
  *				For CMD encoder, may wait for previous tx done
  * @handle_post_kickoff:	Do any work necessary post-kickoff work
@@ -127,6 +130,7 @@
 struct sde_encoder_phys_ops {
 	int (*late_register)(struct sde_encoder_phys *encoder,
 			struct dentry *debugfs_root);
+	void (*prepare_commit)(struct sde_encoder_phys *encoder);
 	bool (*is_master)(struct sde_encoder_phys *encoder);
 	bool (*mode_fixup)(struct sde_encoder_phys *encoder,
 			const struct drm_display_mode *mode,
@@ -145,6 +149,7 @@
 			struct drm_connector_state *conn_state);
 	int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
 	int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
+	int (*wait_for_tx_complete)(struct sde_encoder_phys *phys_enc);
 	void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc,
 			struct sde_encoder_kickoff_params *params);
 	void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
@@ -178,6 +183,25 @@
 };
 
 /**
+ * sde_encoder_irq - tracking structure for interrupts
+ * @name:		string name of interrupt
+ * @intr_type:		Encoder interrupt type
+ * @intr_idx:		Encoder interrupt enumeration
+ * @hw_idx:		HW Block ID
+ * @irq_idx:		IRQ interface lookup index from SDE IRQ framework
+ *			will be -EINVAL if IRQ is not registered
+ * @irq_cb:		interrupt callback
+ */
+struct sde_encoder_irq {
+	const char *name;
+	enum sde_intr_type intr_type;
+	enum sde_intr_idx intr_idx;
+	int hw_idx;
+	int irq_idx;
+	struct sde_irq_callback cb;
+};
+
+/**
  * struct sde_encoder_phys - physical encoder that drives a single INTF block
  *	tied to a specific panel / sub-panel. Abstract type, sub-classed by
  *	phys_vid or phys_cmd for video mode or command mode encs respectively.
@@ -209,6 +233,7 @@
  * @pending_ctlstart_cnt:	Atomic counter tracking the number of ctl start
  *                              pending.
  * @pending_kickoff_wq:		Wait queue for blocking until kickoff completes
+ * @irq:			IRQ tracking structures
  */
 struct sde_encoder_phys {
 	struct drm_encoder *parent;
@@ -234,6 +259,7 @@
 	atomic_t pending_ctlstart_cnt;
 	atomic_t pending_kickoff_cnt;
 	wait_queue_head_t pending_kickoff_wq;
+	struct sde_encoder_irq irq[INTR_IDX_MAX];
 };
 
 static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
@@ -246,16 +272,12 @@
  * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
  *	mode specific operations
  * @base:	Baseclass physical encoder structure
- * @irq_idx:	IRQ interface lookup index
- * @irq_cb:	interrupt callback
  * @hw_intf:	Hardware interface to the intf registers
  * @timing_params: Current timing parameter
  * @rot_prefill_line: number of line to prefill for inline rotation; 0 disable
  */
 struct sde_encoder_phys_vid {
 	struct sde_encoder_phys base;
-	int irq_idx[INTR_IDX_MAX];
-	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
 	struct sde_hw_intf *hw_intf;
 	struct intf_timing_params timing_params;
 	u64 rot_prefill_line;
@@ -267,10 +289,6 @@
  * @base:	Baseclass physical encoder structure
  * @intf_idx:	Intf Block index used by this phys encoder
  * @stream_sel:	Stream selection for multi-stream interfaces
- * @pp_rd_ptr_irq_idx:	IRQ signifying panel's frame read pointer
- *			For CMD encoders, VBLANK is driven by the PP RD Done IRQ
- * @pp_tx_done_irq_idx:	IRQ signifying frame transmission to panel complete
- * @irq_cb:		interrupt callback
  * @serialize_wait4pp:	serialize wait4pp feature waits for pp_done interrupt
  *			after ctl_start instead of before next frame kickoff
  * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
@@ -278,8 +296,6 @@
 struct sde_encoder_phys_cmd {
 	struct sde_encoder_phys base;
 	int stream_sel;
-	int irq_idx[INTR_IDX_MAX];
-	struct sde_irq_callback irq_cb[INTR_IDX_MAX];
 	bool serialize_wait4pp;
 	int pp_timeout_report_cnt;
 };
@@ -294,6 +310,7 @@
  * @bypass_irqreg:	Bypass irq register/unregister if non-zero
  * @wbdone_complete:	for wbdone irq synchronization
  * @wb_cfg:		Writeback hardware configuration
+ * @cdp_cfg:		Writeback CDP configuration
  * @intf_cfg:		Interface hardware configuration
  * @wb_roi:		Writeback region-of-interest
  * @wb_fmt:		Writeback pixel format
@@ -315,6 +332,7 @@
 	u32 bypass_irqreg;
 	struct completion wbdone_complete;
 	struct sde_hw_wb_cfg wb_cfg;
+	struct sde_hw_wb_cdp_cfg cdp_cfg;
 	struct sde_hw_intf_cfg intf_cfg;
 	struct sde_rect wb_roi;
 	const struct sde_format *wb_fmt;
@@ -351,6 +369,18 @@
 };
 
 /**
+ * sde_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct sde_encoder_wait_info {
+	wait_queue_head_t *wq;
+	atomic_t *atomic_cnt;
+	s64 timeout_ms;
+};
+
+/**
  * sde_encoder_phys_vid_init - Construct a new video mode physical encoder
  * @p:	Pointer to init params structure
  * Return: Error code or newly allocated encoder
@@ -402,16 +432,12 @@
  *	making sure that elapsed time during wait is valid.
  * @drm_id: drm object id for logging
  * @hw_id: hw instance id for logging
- * @wq: wait queue structure
- * @cnt: atomic counter to wait on
- * @timeout_ms: timeout value in milliseconds
+ * @info: wait info structure
  */
 int sde_encoder_helper_wait_event_timeout(
 		int32_t drm_id,
 		int32_t hw_id,
-		wait_queue_head_t *wq,
-		atomic_t *cnt,
-		s64 timeout_ms);
+		struct sde_encoder_wait_info *info);
 
 /**
  * sde_encoder_helper_hw_reset - issue ctl hw reset
@@ -432,7 +458,8 @@
 
 	topology = sde_connector_get_topology_name(phys_enc->connector);
 	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-			topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+			(topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE ||
+			 topology == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC))
 		return BLEND_3D_H_ROW_INT;
 
 	return BLEND_3D_NONE;
@@ -458,4 +485,43 @@
 int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
 		struct drm_framebuffer *fb);
 
+/**
+ * sde_encoder_helper_report_irq_timeout - utility to report error that irq has
+ *	timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void sde_encoder_helper_report_irq_timeout(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx);
+
+/**
+ * sde_encoder_helper_wait_for_irq - utility to wait on an irq.
+ *	note: will call sde_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx,
+		struct sde_encoder_wait_info *wait_info);
+
+/**
+ * sde_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx);
+
+/**
+ * sde_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc,
+		enum sde_intr_idx intr_idx);
+
 #endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 7adab09..447fdcc 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -99,6 +99,98 @@
 	ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
 }
 
+static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys *phys_enc = arg;
+	unsigned long lock_flags;
+	int new_cnt;
+
+	if (!phys_enc)
+		return;
+
+	/* notify all synchronous clients first, then asynchronous clients */
+	if (phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+				phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+
+	/* Signal any waiting atomic commit thread */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+			phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
+
+	if (phys_enc->parent_ops.handle_vblank_virt)
+		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys *phys_enc = arg;
+	struct sde_hw_ctl *ctl;
+
+	if (!phys_enc)
+		return;
+
+	if (!phys_enc->hw_ctl)
+		return;
+
+	ctl = phys_enc->hw_ctl;
+	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
+	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+	/* Signal any waiting ctl start interrupt */
+	wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+	struct sde_encoder_phys *phys_enc = arg;
+
+	if (!phys_enc)
+		return;
+
+	if (phys_enc->parent_ops.handle_underrun_virt)
+		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+			phys_enc);
+}
+
+static void _sde_encoder_phys_cmd_setup_irq_hw_idx(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_irq *irq;
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->hw_idx = phys_enc->hw_ctl->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->hw_idx = phys_enc->hw_pp->idx;
+	irq->irq_idx = -EINVAL;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->hw_idx = phys_enc->intf_idx;
+	irq->irq_idx = -EINVAL;
+}
 
 static void sde_encoder_phys_cmd_mode_set(
 		struct sde_encoder_phys *phys_enc,
@@ -112,8 +204,7 @@
 	int i, instance;
 
 	if (!phys_enc || !mode || !adj_mode) {
-		SDE_ERROR("invalid arg(s), enc %d mode %d adj_mode %d\n",
-				phys_enc != 0, mode != 0, adj_mode != 0);
+		SDE_ERROR("invalid args\n");
 		return;
 	}
 	phys_enc->cached_mode = *adj_mode;
@@ -135,71 +226,8 @@
 		phys_enc->hw_ctl = NULL;
 		return;
 	}
-}
 
-static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys_cmd *cmd_enc = arg;
-	struct sde_encoder_phys *phys_enc;
-	unsigned long lock_flags;
-	int new_cnt;
-
-	if (!cmd_enc)
-		return;
-
-	phys_enc = &cmd_enc->base;
-
-	/* notify all synchronous clients first, then asynchronous clients */
-	if (phys_enc->parent_ops.handle_frame_done)
-		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
-				phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
-
-	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
-	new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
-	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
-
-	/* Signal any waiting atomic commit thread */
-	wake_up_all(&phys_enc->pending_kickoff_wq);
-}
-
-static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys_cmd *cmd_enc = arg;
-	struct sde_encoder_phys *phys_enc = &cmd_enc->base;
-
-	if (!cmd_enc)
-		return;
-
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0, 0xfff);
-
-	if (phys_enc->parent_ops.handle_vblank_virt)
-		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
-			phys_enc);
-}
-
-static void sde_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys_cmd *cmd_enc = arg;
-	struct sde_encoder_phys *phys_enc;
-	struct sde_hw_ctl *ctl;
-
-	if (!cmd_enc)
-		return;
-
-	phys_enc = &cmd_enc->base;
-	if (!phys_enc->hw_ctl)
-		return;
-
-	ctl = phys_enc->hw_ctl;
-	SDE_EVT32_IRQ(DRMID(phys_enc->parent), ctl->idx - CTL_0, 0xfff);
-	atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
-
-	/* Signal any waiting ctl start interrupt */
-	wake_up_all(&phys_enc->pending_kickoff_wq);
+	_sde_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
 }
 
 static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
@@ -279,7 +307,7 @@
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
 			to_sde_encoder_phys_cmd(phys_enc);
-	u32 irq_status;
+	struct sde_encoder_wait_info wait_info;
 	int ret;
 
 	if (!phys_enc) {
@@ -287,154 +315,24 @@
 		return -EINVAL;
 	}
 
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
 	/* slave encoder doesn't enable for ppsplit */
 	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
 		return 0;
 
-	/* return EWOULDBLOCK since we know the wait isn't necessary */
-	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
-		SDE_ERROR_CMDENC(cmd_enc, "encoder is disabled\n");
-		return -EWOULDBLOCK;
-	}
-
-	/* wait for previous kickoff to complete */
-	ret = sde_encoder_helper_wait_event_timeout(
-			DRMID(phys_enc->parent),
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			&phys_enc->pending_kickoff_wq,
-			&phys_enc->pending_kickoff_cnt,
-			KICKOFF_TIMEOUT_MS);
-	if (ret <= 0) {
-		/* read and clear interrupt */
-		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				cmd_enc->irq_idx[INTR_IDX_PINGPONG], true);
-		if (irq_status) {
-			unsigned long flags;
-			SDE_EVT32(DRMID(phys_enc->parent),
-					phys_enc->hw_pp->idx - PINGPONG_0);
-			SDE_DEBUG_CMDENC(cmd_enc,
-					"pp:%d done but irq not triggered\n",
-					phys_enc->hw_pp->idx - PINGPONG_0);
-			local_irq_save(flags);
-			sde_encoder_phys_cmd_pp_tx_done_irq(cmd_enc,
-					INTR_IDX_PINGPONG);
-			local_irq_restore(flags);
-			ret = 0;
-		} else {
-			ret = _sde_encoder_phys_cmd_handle_ppdone_timeout(
-					phys_enc);
-		}
-	} else {
-		ret = 0;
-	}
-
-	if (!ret)
+	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+			&wait_info);
+	if (ret == -ETIMEDOUT)
+		_sde_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+	else if (!ret)
 		cmd_enc->pp_timeout_report_cnt = 0;
 
 	return ret;
 }
 
-static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
-{
-	struct sde_encoder_phys_cmd *cmd_enc = arg;
-	struct sde_encoder_phys *phys_enc;
-
-	if (!cmd_enc)
-		return;
-
-	phys_enc = &cmd_enc->base;
-	if (phys_enc->parent_ops.handle_underrun_virt)
-		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
-			phys_enc);
-}
-
-static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
-	enum sde_intr_type intr_type, int idx,
-	void (*irq_func)(void *, int), const char *irq_name)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-	int idx_lookup = 0;
-	int ret = 0;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	if (intr_type == SDE_IRQ_TYPE_INTF_UNDER_RUN)
-		idx_lookup = phys_enc->intf_idx;
-	else if (intr_type == SDE_IRQ_TYPE_CTL_START)
-		idx_lookup = phys_enc->hw_ctl ? phys_enc->hw_ctl->idx : -1;
-	else
-		idx_lookup = phys_enc->hw_pp->idx;
-
-	cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
-			intr_type, idx_lookup);
-	if (cmd_enc->irq_idx[idx] < 0) {
-		SDE_ERROR_CMDENC(cmd_enc,
-			"failed to lookup IRQ index for %s with pp=%d\n",
-			irq_name,
-			phys_enc->hw_pp->idx - PINGPONG_0);
-		return -EINVAL;
-	}
-
-	cmd_enc->irq_cb[idx].func = irq_func;
-	cmd_enc->irq_cb[idx].arg = cmd_enc;
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
-			cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
-	if (ret) {
-		SDE_ERROR_CMDENC(cmd_enc,
-				"failed to register IRQ callback %s\n",
-				irq_name);
-		return ret;
-	}
-
-	ret = sde_core_irq_enable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
-	if (ret) {
-		SDE_ERROR_CMDENC(cmd_enc,
-			"failed to enable IRQ for %s, pp %d, irq_idx %d\n",
-			irq_name,
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			cmd_enc->irq_idx[idx]);
-		cmd_enc->irq_idx[idx] = -EINVAL;
-
-		/* Unregister callback on IRQ enable failure */
-		sde_core_irq_unregister_callback(phys_enc->sde_kms,
-				cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
-		return ret;
-	}
-
-	SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
-			irq_name,
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			cmd_enc->irq_idx[idx]);
-
-	return ret;
-}
-
-static int sde_encoder_phys_cmd_unregister_irq(
-		struct sde_encoder_phys *phys_enc, int idx)
-{
-	struct sde_encoder_phys_cmd *cmd_enc =
-			to_sde_encoder_phys_cmd(phys_enc);
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
-
-	sde_core_irq_disable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
-	sde_core_irq_unregister_callback(phys_enc->sde_kms,
-			cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
-
-	SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
-			phys_enc->hw_pp->idx - PINGPONG_0,
-			cmd_enc->irq_idx[idx]);
-
-	return 0;
-}
-
 static int sde_encoder_phys_cmd_control_vblank_irq(
 		struct sde_encoder_phys *phys_enc,
 		bool enable)
@@ -460,13 +358,9 @@
 			enable, atomic_read(&phys_enc->vblank_refcount));
 
 	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
-		ret = sde_encoder_phys_cmd_register_irq(phys_enc,
-				SDE_IRQ_TYPE_PING_PONG_RD_PTR,
-				INTR_IDX_RDPTR,
-				sde_encoder_phys_cmd_pp_rd_ptr_irq,
-				"pp_rd_ptr");
+		ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
 	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
-		ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+		ret = sde_encoder_helper_unregister_irq(phys_enc,
 				INTR_IDX_RDPTR);
 
 end:
@@ -489,35 +383,22 @@
 	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
 
 	if (enable) {
-		sde_encoder_phys_cmd_register_irq(phys_enc,
-				SDE_IRQ_TYPE_PING_PONG_COMP,
-				INTR_IDX_PINGPONG,
-				sde_encoder_phys_cmd_pp_tx_done_irq,
-				"pp_tx_done");
-
+		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+		sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
 
-		sde_encoder_phys_cmd_register_irq(phys_enc,
-				SDE_IRQ_TYPE_INTF_UNDER_RUN,
-				INTR_IDX_UNDERRUN,
-				sde_encoder_phys_cmd_underrun_irq,
-				"underrun");
+		if (sde_encoder_phys_cmd_is_master(phys_enc))
+			sde_encoder_helper_register_irq(phys_enc,
+					INTR_IDX_CTL_START);
+	} else {
 
 		if (sde_encoder_phys_cmd_is_master(phys_enc))
-			sde_encoder_phys_cmd_register_irq(phys_enc,
-				SDE_IRQ_TYPE_CTL_START,
-				INTR_IDX_CTL_START,
-				sde_encoder_phys_cmd_ctl_start_irq,
-				"ctl_start");
-	} else {
-		if (sde_encoder_phys_cmd_is_master(phys_enc))
-			sde_encoder_phys_cmd_unregister_irq(
-				phys_enc, INTR_IDX_CTL_START);
-		sde_encoder_phys_cmd_unregister_irq(
-				phys_enc, INTR_IDX_UNDERRUN);
+			sde_encoder_helper_unregister_irq(phys_enc,
+					INTR_IDX_CTL_START);
+
+		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
 		sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
-		sde_encoder_phys_cmd_unregister_irq(
-				phys_enc, INTR_IDX_PINGPONG);
+		sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
 	}
 }
 
@@ -644,9 +525,20 @@
 
 	_sde_encoder_phys_cmd_pingpong_config(phys_enc);
 
+	/*
+	 * For pp-split, skip setting the flush bit for the slave intf, since
+	 * both intfs use same ctl and HW will only flush the master.
+	 */
+	if (_sde_encoder_phys_is_ppsplit(phys_enc) &&
+		!sde_encoder_phys_cmd_is_master(phys_enc))
+		goto skip_flush;
+
 	ctl = phys_enc->hw_ctl;
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+	return;
 }
 
 static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
@@ -765,45 +657,51 @@
 static int _sde_encoder_phys_cmd_wait_for_ctl_start(
 		struct sde_encoder_phys *phys_enc)
 {
-	int rc = 0;
-	struct sde_hw_ctl *ctl;
-	u32 irq_status;
-	struct sde_encoder_phys_cmd *cmd_enc;
+	struct sde_encoder_phys_cmd *cmd_enc =
+			to_sde_encoder_phys_cmd(phys_enc);
+	struct sde_encoder_wait_info wait_info;
+	int ret;
 
-	if (!phys_enc->hw_ctl) {
-		SDE_ERROR("invalid ctl\n");
+	if (!phys_enc) {
+		SDE_ERROR("invalid encoder\n");
 		return -EINVAL;
 	}
 
-	ctl = phys_enc->hw_ctl;
-	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
-	rc = sde_encoder_helper_wait_event_timeout(DRMID(phys_enc->parent),
-			ctl->idx - CTL_0,
-			&phys_enc->pending_kickoff_wq,
-			&phys_enc->pending_ctlstart_cnt,
-			CTL_START_TIMEOUT_MS);
-	if (rc <= 0) {
-		/* read and clear interrupt */
-		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				cmd_enc->irq_idx[INTR_IDX_CTL_START], true);
-		if (irq_status) {
-			unsigned long flags;
+	wait_info.wq = &phys_enc->pending_kickoff_wq;
+	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+	wait_info.timeout_ms = CTL_START_TIMEOUT_MS;
 
-			SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
-			SDE_DEBUG_CMDENC(cmd_enc,
-					"ctl:%d start done but irq not triggered\n",
-					ctl->idx - CTL_0);
-			local_irq_save(flags);
-			sde_encoder_phys_cmd_ctl_start_irq(cmd_enc,
-					INTR_IDX_CTL_START);
-			local_irq_restore(flags);
-			rc = 0;
-		} else {
-			SDE_ERROR("ctl start interrupt wait failed\n");
-			rc = -EINVAL;
-		}
-	} else {
-		rc = 0;
+	/* slave encoder doesn't enable for ppsplit */
+	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		return 0;
+
+	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+			&wait_info);
+	if (ret == -ETIMEDOUT) {
+		SDE_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+		ret = -EINVAL;
+	} else if (!ret)
+		ret = 0;
+
+	return ret;
+}
+
+static int sde_encoder_phys_cmd_wait_for_tx_complete(
+		struct sde_encoder_phys *phys_enc)
+{
+	int rc;
+	struct sde_encoder_phys_cmd *cmd_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+
+	rc = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+	if (rc) {
+		SDE_EVT32(DRMID(phys_enc->parent),
+				phys_enc->intf_idx - INTF_0);
+		SDE_ERROR("failed wait_for_idle: %d\n", rc);
 	}
 
 	return rc;
@@ -864,9 +762,15 @@
 	_sde_encoder_phys_cmd_update_flush_mask(phys_enc);
 }
 
+static void sde_encoder_phys_cmd_prepare_commit(
+		struct sde_encoder_phys *phys_enc)
+{
+}
+
 static void sde_encoder_phys_cmd_init_ops(
 		struct sde_encoder_phys_ops *ops)
 {
+	ops->prepare_commit = sde_encoder_phys_cmd_prepare_commit;
 	ops->is_master = sde_encoder_phys_cmd_is_master;
 	ops->mode_set = sde_encoder_phys_cmd_mode_set;
 	ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
@@ -877,6 +781,7 @@
 	ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
 	ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
 	ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
+	ops->wait_for_tx_complete = sde_encoder_phys_cmd_wait_for_tx_complete;
 	ops->trigger_start = sde_encoder_helper_trigger_start;
 	ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
@@ -891,6 +796,7 @@
 	struct sde_encoder_phys *phys_enc = NULL;
 	struct sde_encoder_phys_cmd *cmd_enc = NULL;
 	struct sde_hw_mdp *hw_mdp;
+	struct sde_encoder_irq *irq;
 	int i, ret = 0;
 
 	SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
@@ -922,8 +828,38 @@
 	cmd_enc->stream_sel = 0;
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 	phys_enc->comp_type = p->comp_type;
-	for (i = 0; i < INTR_IDX_MAX; i++)
-		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_CTL_START];
+	irq->name = "ctl_start";
+	irq->intr_type = SDE_IRQ_TYPE_CTL_START;
+	irq->intr_idx = INTR_IDX_CTL_START;
+	irq->cb.func = sde_encoder_phys_cmd_ctl_start_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+	irq->name = "pp_done";
+	irq->intr_type = SDE_IRQ_TYPE_PING_PONG_COMP;
+	irq->intr_idx = INTR_IDX_PINGPONG;
+	irq->cb.func = sde_encoder_phys_cmd_pp_tx_done_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_RDPTR];
+	irq->name = "pp_rd_ptr";
+	irq->intr_type = SDE_IRQ_TYPE_PING_PONG_RD_PTR;
+	irq->intr_idx = INTR_IDX_RDPTR;
+	irq->cb.func = sde_encoder_phys_cmd_pp_rd_ptr_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = SDE_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = sde_encoder_phys_cmd_underrun_irq;
+
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
 	atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 5cb84b4..007738a6 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -222,7 +222,7 @@
  * @rot_fetch_lines: number of line to prefill, or 0 to disable
  */
 static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
-		u64 rot_fetch_lines)
+		u32 rot_fetch_lines)
 {
 	struct sde_encoder_phys_vid *vid_enc =
 		to_sde_encoder_phys_vid(phys_enc);
@@ -232,9 +232,12 @@
 	u32 horiz_total = 0;
 	u32 vert_total = 0;
 	u32 rot_fetch_start_vsync_counter = 0;
+	u32 flush_mask = 0;
 	unsigned long lock_flags;
 
-	if (!phys_enc || !vid_enc->hw_intf ||
+	if (!phys_enc || !vid_enc->hw_intf || !phys_enc->hw_ctl ||
+			!phys_enc->hw_ctl->ops.get_bitmask_intf ||
+			!phys_enc->hw_ctl->ops.update_pending_flush ||
 			!vid_enc->hw_intf->ops.setup_rot_start)
 		return;
 
@@ -253,9 +256,14 @@
 	}
 
 	SDE_DEBUG_VIDENC(vid_enc,
-		"rot_fetch_lines %llu rot_fetch_start_vsync_counter %u\n",
+		"rot_fetch_lines %u rot_fetch_start_vsync_counter %u\n",
 		rot_fetch_lines, rot_fetch_start_vsync_counter);
 
+	phys_enc->hw_ctl->ops.get_bitmask_intf(
+			phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
+	phys_enc->hw_ctl->ops.update_pending_flush(
+			phys_enc->hw_ctl, flush_mask);
+
 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 	vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -337,17 +345,17 @@
 
 static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
 {
-	struct sde_encoder_phys_vid *vid_enc = arg;
-	struct sde_encoder_phys *phys_enc;
+	struct sde_encoder_phys *phys_enc = arg;
+	struct sde_encoder_phys_vid *vid_enc =
+			to_sde_encoder_phys_vid(phys_enc);
 	struct sde_hw_ctl *hw_ctl;
 	unsigned long lock_flags;
 	u32 flush_register = 0;
 	int new_cnt = -1, old_cnt = -1;
 
-	if (!vid_enc)
+	if (!phys_enc)
 		return;
 
-	phys_enc = &vid_enc->base;
 	hw_ctl = phys_enc->hw_ctl;
 
 	if (phys_enc->parent_ops.handle_vblank_virt)
@@ -379,13 +387,11 @@
 
 static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
 {
-	struct sde_encoder_phys_vid *vid_enc = arg;
-	struct sde_encoder_phys *phys_enc;
+	struct sde_encoder_phys *phys_enc = arg;
 
-	if (!vid_enc)
+	if (!phys_enc)
 		return;
 
-	phys_enc = &vid_enc->base;
 	if (phys_enc->parent_ops.handle_underrun_virt)
 		phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
 			phys_enc);
@@ -411,77 +417,18 @@
 	return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc);
 }
 
-static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
-	enum sde_intr_type intr_type, int idx,
-	void (*irq_func)(void *, int), const char *irq_name)
+static void _sde_encoder_phys_vid_setup_irq_hw_idx(
+		struct sde_encoder_phys *phys_enc)
 {
-	struct sde_encoder_phys_vid *vid_enc;
-	int ret = 0;
+	struct sde_encoder_irq *irq;
 
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		return -EINVAL;
-	}
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	irq->hw_idx = phys_enc->intf_idx;
+	irq->irq_idx = -EINVAL;
 
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	vid_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
-			intr_type, vid_enc->hw_intf->idx);
-	if (vid_enc->irq_idx[idx] < 0) {
-		SDE_ERROR_VIDENC(vid_enc,
-			"failed to lookup IRQ index for %s type:%d\n", irq_name,
-			intr_type);
-		return -EINVAL;
-	}
-
-	vid_enc->irq_cb[idx].func = irq_func;
-	vid_enc->irq_cb[idx].arg = vid_enc;
-	ret = sde_core_irq_register_callback(phys_enc->sde_kms,
-			vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
-	if (ret) {
-		SDE_ERROR_VIDENC(vid_enc,
-			"failed to register IRQ callback for %s\n", irq_name);
-		return ret;
-	}
-
-	ret = sde_core_irq_enable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
-	if (ret) {
-		SDE_ERROR_VIDENC(vid_enc,
-			"enable IRQ for intr:%s failed, irq_idx %d\n",
-			irq_name, vid_enc->irq_idx[idx]);
-		vid_enc->irq_idx[idx] = -EINVAL;
-
-		/* unregister callback on IRQ enable failure */
-		sde_core_irq_unregister_callback(phys_enc->sde_kms,
-				vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
-		return ret;
-	}
-
-	SDE_DEBUG_VIDENC(vid_enc, "registered irq %s idx: %d\n",
-			irq_name, vid_enc->irq_idx[idx]);
-
-	return ret;
-}
-
-static int sde_encoder_phys_vid_unregister_irq(
-	struct sde_encoder_phys *phys_enc, int idx)
-{
-	struct sde_encoder_phys_vid *vid_enc;
-
-	if (!phys_enc) {
-		SDE_ERROR("invalid encoder\n");
-		goto end;
-	}
-
-	vid_enc = to_sde_encoder_phys_vid(phys_enc);
-	sde_core_irq_disable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
-
-	sde_core_irq_unregister_callback(phys_enc->sde_kms,
-			vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
-
-	SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", vid_enc->irq_idx[idx]);
-
-end:
-	return 0;
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->hw_idx = phys_enc->intf_idx;
+	irq->irq_idx = -EINVAL;
 }
 
 static void sde_encoder_phys_vid_mode_set(
@@ -519,6 +466,8 @@
 		phys_enc->hw_ctl = NULL;
 		return;
 	}
+
+	_sde_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
 }
 
 static int sde_encoder_phys_vid_control_vblank_irq(
@@ -547,13 +496,10 @@
 			atomic_read(&phys_enc->vblank_refcount));
 
 	if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
-		ret = sde_encoder_phys_vid_register_irq(phys_enc,
-			SDE_IRQ_TYPE_INTF_VSYNC,
-			INTR_IDX_VSYNC,
-			sde_encoder_phys_vid_vblank_irq, "vsync_irq");
+		ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
 	else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
-		ret = sde_encoder_phys_vid_unregister_irq(phys_enc,
-			INTR_IDX_VSYNC);
+		ret = sde_encoder_helper_unregister_irq(phys_enc,
+				INTR_IDX_VSYNC);
 
 	if (ret)
 		SDE_ERROR_VIDENC(vid_enc,
@@ -600,18 +546,24 @@
 	if (ret)
 		goto end;
 
-	ret = sde_encoder_phys_vid_register_irq(phys_enc,
-		SDE_IRQ_TYPE_INTF_UNDER_RUN,
-		INTR_IDX_UNDERRUN,
-		sde_encoder_phys_vid_underrun_irq, "underrun");
+	ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
 	if (ret) {
 		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
 		goto end;
 	}
 
+	/*
+	 * For pp-split, skip setting the flush bit for the slave intf, since
+	 * both intfs use same ctl and HW will only flush the master.
+	 */
+	if (_sde_encoder_phys_is_ppsplit(phys_enc) &&
+		!sde_encoder_phys_vid_is_master(phys_enc))
+		goto skip_flush;
+
 	ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
 	ctl->ops.update_pending_flush(ctl, flush_mask);
 
+skip_flush:
 	SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
 		ctl->idx - CTL_0, flush_mask);
 
@@ -663,68 +615,35 @@
 static int sde_encoder_phys_vid_wait_for_vblank(
 		struct sde_encoder_phys *phys_enc, bool notify)
 {
-	struct sde_encoder_phys_vid *vid_enc =
-			to_sde_encoder_phys_vid(phys_enc);
-	u32 irq_status;
+	struct sde_encoder_wait_info wait_info = {
+		.wq = &phys_enc->pending_kickoff_wq,
+		.atomic_cnt = &phys_enc->pending_kickoff_cnt,
+		.timeout_ms = KICKOFF_TIMEOUT_MS,
+	};
 	int ret;
 
 	if (!sde_encoder_phys_vid_is_master(phys_enc)) {
-		/* always signal done for slave video encoder */
-		if (notify && phys_enc->parent_ops.handle_frame_done)
+		/* signal done for slave video encoder, unless it is pp-split */
+		if (!_sde_encoder_phys_is_ppsplit(phys_enc) &&
+			notify && phys_enc->parent_ops.handle_frame_done)
 			phys_enc->parent_ops.handle_frame_done(
 					phys_enc->parent, phys_enc,
 					SDE_ENCODER_FRAME_EVENT_DONE);
 		return 0;
 	}
 
-	if (phys_enc->enable_state != SDE_ENC_ENABLED) {
-		SDE_ERROR("encoder not enabled\n");
-		return -EWOULDBLOCK;
-	}
-
-	SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
-			SDE_EVTLOG_FUNC_ENTRY);
-
 	/* Wait for kickoff to complete */
-	ret = sde_encoder_helper_wait_event_timeout(
-			DRMID(phys_enc->parent),
-			vid_enc->hw_intf->idx - INTF_0,
-			&phys_enc->pending_kickoff_wq,
-			&phys_enc->pending_kickoff_cnt,
-			KICKOFF_TIMEOUT_MS);
-	if (ret <= 0) {
-		irq_status = sde_core_irq_read(phys_enc->sde_kms,
-				vid_enc->irq_idx[INTR_IDX_VSYNC], true);
-		if (irq_status) {
-			SDE_EVT32(DRMID(phys_enc->parent),
-					vid_enc->hw_intf->idx - INTF_0);
-			SDE_DEBUG_VIDENC(vid_enc, "done, irq not triggered\n");
-			if (notify && phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
-						phys_enc->parent, phys_enc,
-						SDE_ENCODER_FRAME_EVENT_DONE);
-			sde_encoder_phys_vid_vblank_irq(vid_enc,
-					INTR_IDX_VSYNC);
-			ret = 0;
-		} else {
-			SDE_EVT32(DRMID(phys_enc->parent),
-					vid_enc->hw_intf->idx - INTF_0);
-			SDE_ERROR_VIDENC(vid_enc, "kickoff timed out\n");
-			if (notify && phys_enc->parent_ops.handle_frame_done)
-				phys_enc->parent_ops.handle_frame_done(
-						phys_enc->parent, phys_enc,
-						SDE_ENCODER_FRAME_EVENT_ERROR);
-			ret = -ETIMEDOUT;
-		}
-	} else {
-		if (notify && phys_enc->parent_ops.handle_frame_done)
-			phys_enc->parent_ops.handle_frame_done(
-					phys_enc->parent, phys_enc,
-					SDE_ENCODER_FRAME_EVENT_DONE);
-		ret = 0;
-	}
+	ret = sde_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+			&wait_info);
 
-	return 0;
+	if (ret == -ETIMEDOUT) {
+		sde_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+	} else if (!ret && notify && phys_enc->parent_ops.handle_frame_done)
+		phys_enc->parent_ops.handle_frame_done(
+				phys_enc->parent, phys_enc,
+				SDE_ENCODER_FRAME_EVENT_DONE);
+
+	return ret;
 }
 
 static int sde_encoder_phys_vid_wait_for_commit_done(
@@ -827,6 +746,8 @@
 		sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
 	}
 
+	sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+
 	if (atomic_read(&phys_enc->vblank_refcount))
 		SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
 				atomic_read(&phys_enc->vblank_refcount));
@@ -914,6 +835,7 @@
 	struct sde_encoder_phys_vid *vid_enc = NULL;
 	struct sde_rm_hw_iter iter;
 	struct sde_hw_mdp *hw_mdp;
+	struct sde_encoder_irq *irq;
 	int i, ret = 0;
 
 	if (!p) {
@@ -968,8 +890,26 @@
 	phys_enc->intf_mode = INTF_MODE_VIDEO;
 	phys_enc->enc_spinlock = p->enc_spinlock;
 	phys_enc->comp_type = p->comp_type;
-	for (i = 0; i < INTR_IDX_MAX; i++)
-		INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
+	for (i = 0; i < INTR_IDX_MAX; i++) {
+		irq = &phys_enc->irq[i];
+		INIT_LIST_HEAD(&irq->cb.list);
+		irq->irq_idx = -EINVAL;
+		irq->hw_idx = -EINVAL;
+		irq->cb.arg = phys_enc;
+	}
+
+	irq = &phys_enc->irq[INTR_IDX_VSYNC];
+	irq->name = "vsync_irq";
+	irq->intr_type = SDE_IRQ_TYPE_INTF_VSYNC;
+	irq->intr_idx = INTR_IDX_VSYNC;
+	irq->cb.func = sde_encoder_phys_vid_vblank_irq;
+
+	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+	irq->name = "underrun";
+	irq->intr_type = SDE_IRQ_TYPE_INTF_UNDER_RUN;
+	irq->intr_idx = INTR_IDX_UNDERRUN;
+	irq->cb.func = sde_encoder_phys_vid_underrun_irq;
+
 	atomic_set(&phys_enc->vblank_refcount, 0);
 	atomic_set(&phys_enc->pending_kickoff_cnt, 0);
 	init_waitqueue_head(&phys_enc->pending_kickoff_wq);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 385c610..54c1397 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -248,16 +248,18 @@
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb;
 	struct sde_hw_wb_cfg *wb_cfg;
+	struct sde_hw_wb_cdp_cfg *cdp_cfg;
 	const struct msm_format *format;
 	int ret, mmu_id;
 
-	if (!phys_enc) {
+	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
 	hw_wb = wb_enc->hw_wb;
 	wb_cfg = &wb_enc->wb_cfg;
+	cdp_cfg = &wb_enc->cdp_cfg;
 	memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
 
 	wb_cfg->intf_mode = phys_enc->intf_mode;
@@ -325,8 +327,35 @@
 	if (hw_wb->ops.setup_outformat)
 		hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
 
-	if (hw_wb->ops.setup_outaddress)
+	if (hw_wb->ops.setup_cdp) {
+		memset(cdp_cfg, 0, sizeof(struct sde_hw_wb_cdp_cfg));
+
+		cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg
+				[SDE_PERF_CDP_USAGE_NRT].wr_enable;
+		cdp_cfg->ubwc_meta_enable =
+				SDE_FORMAT_IS_UBWC(wb_cfg->dest.format);
+		cdp_cfg->tile_amortize_enable =
+				SDE_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+				SDE_FORMAT_IS_TILE(wb_cfg->dest.format);
+		cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+		hw_wb->ops.setup_cdp(hw_wb, cdp_cfg);
+	}
+
+	if (hw_wb->ops.setup_outaddress) {
+		SDE_EVT32(hw_wb->idx,
+				wb_cfg->dest.width,
+				wb_cfg->dest.height,
+				wb_cfg->dest.plane_addr[0],
+				wb_cfg->dest.plane_size[0],
+				wb_cfg->dest.plane_addr[1],
+				wb_cfg->dest.plane_size[1],
+				wb_cfg->dest.plane_addr[2],
+				wb_cfg->dest.plane_size[2],
+				wb_cfg->dest.plane_addr[3],
+				wb_cfg->dest.plane_size[3]);
 		hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+	}
 }
 
 /**
@@ -1104,13 +1133,13 @@
 	if (!phys_enc || !wb_enc->hw_wb || !debugfs_root)
 		return -EINVAL;
 
-	if (!debugfs_create_u32("wbdone_timeout", 0644,
+	if (!debugfs_create_u32("wbdone_timeout", 0600,
 			debugfs_root, &wb_enc->wbdone_timeout)) {
 		SDE_ERROR("failed to create debugfs/wbdone_timeout\n");
 		return -ENOMEM;
 	}
 
-	if (!debugfs_create_u32("bypass_irqreg", 0644,
+	if (!debugfs_create_u32("bypass_irqreg", 0600,
 			debugfs_root, &wb_enc->bypass_irqreg)) {
 		SDE_ERROR("failed to create debugfs/bypass_irqreg\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index c83472a..bd9fdac 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -138,12 +138,16 @@
 {
 	struct sde_fence *f = to_sde_fence(fence);
 	struct sde_fence *fc, *next;
-	struct sde_fence_context *ctx = f->ctx;
+	struct sde_fence_context *ctx;
 	bool release_kref = false;
 
+	if (!fence || !f->ctx)
+		return;
+
+	ctx = f->ctx;
+
 	spin_lock(&ctx->list_lock);
-	list_for_each_entry_safe(fc, next, &ctx->fence_list_head,
-				 fence_list) {
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
 		/* fence release called before signal */
 		if (f == fc) {
 			list_del_init(&fc->fence_list);
@@ -156,20 +160,25 @@
 	/* keep kput outside spin_lock because it may release ctx */
 	if (release_kref)
 		kref_put(&ctx->kref, sde_fence_destroy);
-	kfree_rcu(f, base.rcu);
+	kfree(f);
 }
 
-static void sde_fence_value_str(struct fence *fence,
-				    char *str, int size)
+static void sde_fence_value_str(struct fence *fence, char *str, int size)
 {
+	if (!fence || !str)
+		return;
+
 	snprintf(str, size, "%d", fence->seqno);
 }
 
-static void sde_fence_timeline_value_str(struct fence *fence,
-					     char *str, int size)
+static void sde_fence_timeline_value_str(struct fence *fence, char *str,
+		int size)
 {
 	struct sde_fence *f = to_sde_fence(fence);
 
+	if (!fence || !f->ctx || !str)
+		return;
+
 	snprintf(str, size, "%d", f->ctx->done_count);
 }
 
@@ -226,6 +235,7 @@
 	sync_file = sync_file_create(&sde_fence->base);
 	if (sync_file == NULL) {
 		put_unused_fd(fd);
+		fd = -EINVAL;
 		fence_put(&sde_fence->base);
 		SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
 		goto exit;
@@ -244,16 +254,15 @@
 }
 
 int sde_fence_init(struct sde_fence_context *ctx,
-		const char *name,
-		uint32_t drm_id)
+		const char *name, uint32_t drm_id)
 {
-	if (!ctx) {
+	if (!ctx || !name) {
 		SDE_ERROR("invalid argument(s)\n");
 		return -EINVAL;
 	}
 	memset(ctx, 0, sizeof(*ctx));
 
-	strlcpy(ctx->name, name, SDE_FENCE_NAME_SIZE);
+	strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
 	ctx->drm_id = drm_id;
 	kref_init(&ctx->kref);
 	ctx->context = fence_context_alloc(1);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 9f1b6cb..1cbbe1e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -136,7 +136,6 @@
 	QSEED_TYPE,
 	CSC_TYPE,
 	PANIC_PER_PIPE,
-	CDP,
 	SRC_SPLIT,
 	DIM_LAYER,
 	SMART_DMA_REV,
@@ -165,6 +164,7 @@
 	PERF_QOS_LUT_MACROTILE,
 	PERF_QOS_LUT_NRT,
 	PERF_QOS_LUT_CWB,
+	PERF_CDP_SETTING,
 	PERF_PROP_MAX,
 };
 
@@ -290,6 +290,8 @@
 	VBIF_DYNAMIC_OT_WR_LIMIT,
 	VBIF_QOS_RT_REMAP,
 	VBIF_QOS_NRT_REMAP,
+	VBIF_MEMTYPE_0,
+	VBIF_MEMTYPE_1,
 	VBIF_PROP_MAX,
 };
 
@@ -300,6 +302,13 @@
 	REG_DMA_PROP_MAX
 };
 
+enum {
+	INLINE_ROT_XIN,
+	INLINE_ROT_XIN_TYPE,
+	INLINE_ROT_CLK_CTRL,
+	INLINE_ROT_PROP_MAX
+};
+
 /*************************************************************
  * dts property definition
  *************************************************************/
@@ -349,7 +358,6 @@
 	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
 	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
 	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
-	{CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
 	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
 	{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
 	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
@@ -391,6 +399,8 @@
 			PROP_TYPE_U32_ARRAY},
 	{PERF_QOS_LUT_CWB, "qcom,sde-qos-lut-cwb", false,
 			PROP_TYPE_U32_ARRAY},
+	{PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
+			PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type sspp_prop[] = {
@@ -529,6 +539,8 @@
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
 		PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
@@ -541,6 +553,15 @@
 		PROP_TYPE_U32},
 };
 
+static struct sde_prop_type inline_rot_prop[INLINE_ROT_PROP_MAX] = {
+	{INLINE_ROT_XIN, "qcom,sde-inline-rot-xin", false,
+							PROP_TYPE_U32_ARRAY},
+	{INLINE_ROT_XIN_TYPE, "qcom,sde-inline-rot-xin-type", false,
+							PROP_TYPE_STRING_ARRAY},
+	{INLINE_ROT_CLK_CTRL, "qcom,sde-inline-rot-clk-ctrl", false,
+						PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
 /*************************************************************
  * static API list
  *************************************************************/
@@ -1061,6 +1082,9 @@
 
 		set_bit(SDE_SSPP_SRC, &sspp->features);
 
+		if (sde_cfg->has_cdp)
+			set_bit(SDE_SSPP_CDP, &sspp->features);
+
 		if (sde_cfg->ts_prefill_rev == 1) {
 			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
 		} else if (sde_cfg->ts_prefill_rev == 2) {
@@ -1399,6 +1423,8 @@
 			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
 
 		intf->prog_fetch_lines_worst_case =
+				!prop_exists[INTF_PREFETCH] ?
+				sde_cfg->perf.min_prefill_lines :
 				PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
 
 		of_property_read_string_index(np,
@@ -1500,6 +1526,9 @@
 		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
 		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
 
+		if (sde_cfg->has_cdp)
+			set_bit(SDE_WB_CDP, &wb->features);
+
 		set_bit(SDE_WB_QOS, &wb->features);
 		if (sde_cfg->vbif_qos_nlvl == 8)
 			set_bit(SDE_WB_QOS_8LVL, &wb->features);
@@ -1637,6 +1666,87 @@
 	}
 }
 
+static void _sde_inline_rot_parse_dt(struct device_node *np,
+		struct sde_mdss_cfg *sde_cfg, struct sde_rot_cfg *rot)
+{
+	int rc, prop_count[INLINE_ROT_PROP_MAX], i, j, index;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[INLINE_ROT_PROP_MAX];
+	u32 off_count, sspp_count = 0, wb_count = 0;
+	const char *type;
+
+	prop_value = kzalloc(INLINE_ROT_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value)
+		return;
+
+	rc = _validate_dt_entry(np, inline_rot_prop,
+			ARRAY_SIZE(inline_rot_prop), prop_count, &off_count);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, inline_rot_prop, ARRAY_SIZE(inline_rot_prop),
+			prop_count, prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		rot->vbif_cfg[i].xin_id = PROP_VALUE_ACCESS(prop_value,
+							INLINE_ROT_XIN, i);
+		of_property_read_string_index(np,
+				inline_rot_prop[INLINE_ROT_XIN_TYPE].prop_name,
+				i, &type);
+
+		if (!strcmp(type, "sspp")) {
+			rot->vbif_cfg[i].num = INLINE_ROT0_SSPP + sspp_count;
+			rot->vbif_cfg[i].is_read = true;
+			rot->vbif_cfg[i].clk_ctrl =
+					SDE_CLK_CTRL_INLINE_ROT0_SSPP
+					+ sspp_count;
+			sspp_count++;
+		} else if (!strcmp(type, "wb")) {
+			rot->vbif_cfg[i].num = INLINE_ROT0_WB + wb_count;
+			rot->vbif_cfg[i].is_read = false;
+			rot->vbif_cfg[i].clk_ctrl =
+					SDE_CLK_CTRL_INLINE_ROT0_WB
+					+ wb_count;
+			wb_count++;
+		} else {
+			SDE_ERROR("invalid rotator vbif type:%s\n", type);
+			goto end;
+		}
+
+		index = rot->vbif_cfg[i].clk_ctrl;
+		if (index < 0 || index >= SDE_CLK_CTRL_MAX) {
+			SDE_ERROR("invalid clk_ctrl enum:%d\n", index);
+			goto end;
+		}
+
+		for (j = 0; j < sde_cfg->mdp_count; j++) {
+			sde_cfg->mdp[j].clk_ctrls[index].reg_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						INLINE_ROT_CLK_CTRL, i, 0);
+			sde_cfg->mdp[j].clk_ctrls[index].bit_off =
+				PROP_BITVALUE_ACCESS(prop_value,
+						INLINE_ROT_CLK_CTRL, i, 1);
+		}
+
+		SDE_DEBUG("rot- xin:%d, num:%d, rd:%d, clk:%d:0x%x/%d\n",
+				rot->vbif_cfg[i].xin_id,
+				rot->vbif_cfg[i].num,
+				rot->vbif_cfg[i].is_read,
+				rot->vbif_cfg[i].clk_ctrl,
+				sde_cfg->mdp[0].clk_ctrls[index].reg_off,
+				sde_cfg->mdp[0].clk_ctrls[index].bit_off);
+	}
+
+	rot->vbif_idx = VBIF_RT;
+	rot->xin_count = off_count;
+
+end:
+	kfree(prop_value);
+}
+
 static int sde_rot_parse_dt(struct device_node *np,
 		struct sde_mdss_cfg *sde_cfg)
 {
@@ -1682,10 +1792,11 @@
 				rot->slice_size = llcc_get_slice_size(slice);
 				rot->pdev = pdev;
 				llcc_slice_putd(slice);
-				sde_cfg->rot_count++;
 				SDE_DEBUG("rot:%d scid:%d slice_size:%zukb\n",
 						rot->id, rot->scid,
 						rot->slice_size);
+				_sde_inline_rot_parse_dt(np, sde_cfg, rot);
+				sde_cfg->rot_count++;
 			}
 		} else {
 			rot->pdev = NULL;
@@ -1970,6 +2081,16 @@
 	if (rc)
 		goto end;
 
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
+			&prop_count[VBIF_MEMTYPE_0], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
+			&prop_count[VBIF_MEMTYPE_1], NULL);
+	if (rc)
+		goto end;
+
 	sde_cfg->vbif_count = off_count;
 
 	rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
@@ -2118,6 +2239,19 @@
 		if (vbif->qos_rt_tbl.npriority_lvl ||
 				vbif->qos_nrt_tbl.npriority_lvl)
 			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
+
+		vbif->memtype_count = prop_count[VBIF_MEMTYPE_0] +
+					prop_count[VBIF_MEMTYPE_1];
+		if (vbif->memtype_count > MAX_XIN_COUNT) {
+			vbif->memtype_count = 0;
+			SDE_ERROR("too many memtype defs, ignoring entries\n");
+		}
+		for (j = 0, k = 0; j < prop_count[VBIF_MEMTYPE_0]; j++)
+			vbif->memtype[k++] = PROP_VALUE_ACCESS(
+					prop_value, VBIF_MEMTYPE_0, j);
+		for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
+			vbif->memtype[k++] = PROP_VALUE_ACCESS(
+					prop_value, VBIF_MEMTYPE_1, j);
 	}
 
 end:
@@ -2420,6 +2554,11 @@
 	if (rc)
 		goto freeprop;
 
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_CDP_SETTING], 1,
+			&prop_count[PERF_CDP_SETTING], NULL);
+	if (rc)
+		goto freeprop;
+
 	rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
 			prop_count, prop_exists, prop_value);
 	if (rc)
@@ -2559,6 +2698,27 @@
 		cfg->perf.qos_lut_tbl[j].nentry = count;
 	}
 
+	if (prop_exists[PERF_CDP_SETTING]) {
+		const u32 prop_size = 2;
+		u32 count = prop_count[PERF_CDP_SETTING] / prop_size;
+
+		count = min_t(u32, count, SDE_PERF_CDP_USAGE_MAX);
+
+		for (j = 0; j < count; j++) {
+			cfg->perf.cdp_cfg[j].rd_enable =
+					PROP_VALUE_ACCESS(prop_value,
+					PERF_CDP_SETTING, j * prop_size);
+			cfg->perf.cdp_cfg[j].wr_enable =
+					PROP_VALUE_ACCESS(prop_value,
+					PERF_CDP_SETTING, j * prop_size + 1);
+			SDE_DEBUG("cdp usage:%d rd:%d wr:%d\n",
+				j, cfg->perf.cdp_cfg[j].rd_enable,
+				cfg->perf.cdp_cfg[j].wr_enable);
+		}
+
+		cfg->has_cdp = true;
+	}
+
 freeprop:
 	kfree(prop_value);
 end:
@@ -2760,6 +2920,10 @@
 	if (rc)
 		goto end;
 
+	rc = sde_perf_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = sde_rot_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
@@ -2810,10 +2974,6 @@
 	if (rc)
 		goto end;
 
-	rc = sde_perf_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
 	return sde_cfg;
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 7d1c180..74fa8f9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -60,6 +60,8 @@
 #define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
 #define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
 
+#define MAX_XIN_COUNT 16
+
 /**
  * Supported UBWC feature versions
  */
@@ -79,7 +81,6 @@
  * @SDE_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
  *                         compression initial revision
  * @SDE_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
- * @SDE_MDP_CDP,           Client driven prefetch
  * @SDE_MDP_MAX            Maximum value
 
  */
@@ -89,7 +90,6 @@
 	SDE_MDP_BWC,
 	SDE_MDP_UBWC_1_0,
 	SDE_MDP_UBWC_1_5,
-	SDE_MDP_CDP,
 	SDE_MDP_MAX
 };
 
@@ -114,6 +114,7 @@
  * @SDE_SSPP_SBUF,           SSPP support inline stream buffer
  * @SDE_SSPP_TS_PREFILL      Supports prefill with traffic shaper
  * @SDE_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @SDE_SSPP_CDP             Supports client driven prefetch
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -136,6 +137,7 @@
 	SDE_SSPP_SBUF,
 	SDE_SSPP_TS_PREFILL,
 	SDE_SSPP_TS_PREFILL_REC1,
+	SDE_SSPP_CDP,
 	SDE_SSPP_MAX
 };
 
@@ -245,6 +247,7 @@
  *                          the destination image
  * @SDE_WB_QOS,             Writeback supports QoS control, danger/safe/creq
  * @SDE_WB_QOS_8LVL,        Writeback supports 8-level QoS control
+ * @SDE_WB_CDP              Writeback supports client driven prefetch
  * @SDE_WB_MAX              maximum value
  */
 enum {
@@ -262,6 +265,7 @@
 	SDE_WB_XY_ROI_OFFSET,
 	SDE_WB_QOS,
 	SDE_WB_QOS_8LVL,
+	SDE_WB_CDP,
 	SDE_WB_MAX
 };
 
@@ -484,6 +488,8 @@
 	SDE_CLK_CTRL_WB0,
 	SDE_CLK_CTRL_WB1,
 	SDE_CLK_CTRL_WB2,
+	SDE_CLK_CTRL_INLINE_ROT0_SSPP,
+	SDE_CLK_CTRL_INLINE_ROT0_WB,
 	SDE_CLK_CTRL_MAX,
 };
 
@@ -644,6 +650,20 @@
 };
 
 /**
+ * struct sde_rot_vbif_cfg - inline rotator vbif configs
+ * @xin_id             xin client id
+ * @num                enum identifying this block
+ * @is_read            indicates read/write client
+ * @clk_ctrl           index to clk control
+ */
+struct sde_rot_vbif_cfg {
+	u32 xin_id;
+	u32 num;
+	bool is_read;
+	enum sde_clk_ctrl_type clk_ctrl;
+};
+
+/**
  * struct sde_rot_cfg - information of rotator blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -652,12 +672,19 @@
  * @pdev               private device handle
  * @scid               subcache identifier
  * @slice_size         subcache slice size
+ * @vbif_idx           vbif identifier
+ * @xin_count          number of xin clients
+ * @vbif_cfg           vbif settings related to rotator
  */
 struct sde_rot_cfg {
 	SDE_HW_BLK_INFO;
 	void *pdev;
 	int scid;
 	size_t slice_size;
+	u32 vbif_idx;
+
+	u32 xin_count;
+	struct sde_rot_vbif_cfg vbif_cfg[MAX_BLOCKS];
 };
 
 /**
@@ -703,6 +730,8 @@
  * @dynamic_ot_wr_tbl  dynamic OT write configuration table
  * @qos_rt_tbl         real-time QoS priority table
  * @qos_nrt_tbl        non-real-time QoS priority table
+ * @memtype_count      number of defined memtypes
+ * @memtype            array of xin memtype definitions
  */
 struct sde_vbif_cfg {
 	SDE_HW_BLK_INFO;
@@ -713,6 +742,8 @@
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
 	struct sde_vbif_qos_tbl qos_rt_tbl;
 	struct sde_vbif_qos_tbl qos_nrt_tbl;
+	u32 memtype_count;
+	u32 memtype[MAX_XIN_COUNT];
 };
 /**
  * struct sde_reg_dma_cfg - information of lut dma blocks
@@ -729,6 +760,27 @@
 };
 
 /**
+ * Define CDP use cases
+ * @SDE_PERF_CDP_UDAGE_RT: real-time use cases
+ * @SDE_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+	SDE_PERF_CDP_USAGE_RT,
+	SDE_PERF_CDP_USAGE_NRT,
+	SDE_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct sde_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct sde_perf_cdp_cfg {
+	bool rd_enable;
+	bool wr_enable;
+};
+
+/**
  * struct sde_perf_cfg - performance control settings
  * @max_bw_low         low threshold of maximum bandwidth (kbps)
  * @max_bw_high        high threshold of maximum bandwidth (kbps)
@@ -748,6 +800,7 @@
  * @safe_lut_tbl: LUT tables for safe signals
  * @danger_lut_tbl: LUT tables for danger signals
  * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg            cdp use case configurations
  */
 struct sde_perf_cfg {
 	u32 max_bw_low;
@@ -768,6 +821,7 @@
 	u32 safe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
 	u32 danger_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
 	struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+	struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
 };
 
 /**
@@ -785,7 +839,7 @@
  * @csc_type           csc or csc_10bit support.
  * @smart_dma_rev      Supported version of SmartDMA feature.
  * @has_src_split      source split feature status
- * @has_cdp            Client driver prefetch feature status
+ * @has_cdp            Client driven prefetch feature status
  * @has_wb_ubwc        UBWC feature supported on WB
  * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @has_sbuf           indicate if stream buffer is available
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index ad2910e..304106d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -15,6 +15,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_cdm.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define CDM_CSC_10_OPMODE                  0x000
 #define CDM_CSC_10_BASE                    0x004
@@ -267,6 +268,11 @@
 	ops->disable = sde_hw_cdm_disable;
 }
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
 		void __iomem *addr,
 		struct sde_mdss_cfg *m,
@@ -274,6 +280,7 @@
 {
 	struct sde_hw_cdm *c;
 	struct sde_cdm_cfg *cfg;
+	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -286,12 +293,19 @@
 	}
 
 	c->idx = idx;
-	c->cdm_hw_cap = cfg;
-	_setup_cdm_ops(&c->ops, c->cdm_hw_cap->features);
+	c->caps = cfg;
+	_setup_cdm_ops(&c->ops, c->caps->features);
 	c->hw_mdp = hw_mdp;
 
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CDM, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
 	/*
 	 * Perform any default initialization for the chroma down module
 	 * @setup default csc coefficients
@@ -299,9 +313,16 @@
 	sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm)
 {
+	if (cdm)
+		sde_hw_blk_destroy(&cdm->base);
 	kfree(cdm);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
index a0afd89..2b3683d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
@@ -15,6 +15,7 @@
 
 #include "sde_hw_mdss.h"
 #include "sde_hw_top.h"
+#include "sde_hw_blk.h"
 
 struct sde_hw_cdm;
 
@@ -92,11 +93,11 @@
 };
 
 struct sde_hw_cdm {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
 	/* chroma down */
-	const struct sde_cdm_cfg   *cdm_hw_cap;
+	const struct sde_cdm_cfg *caps;
 	enum  sde_cdm  idx;
 
 	/* mdp top hw driver */
@@ -107,6 +108,16 @@
 };
 
 /**
+ * sde_hw_cdm - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_cdm *to_sde_hw_cdm(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_cdm, base);
+}
+
+/**
  * sde_hw_cdm_init - initializes the cdm hw driver object.
  * should be called once before accessing every cdm.
  * @idx:  cdm index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index a62aa6e..ba55086 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -14,6 +14,7 @@
 #include "sde_hwio.h"
 #include "sde_hw_ctl.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define   CTL_LAYER(lm)                 \
 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -354,7 +355,7 @@
 }
 
 static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
-	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
+	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg)
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
@@ -363,9 +364,6 @@
 	u8 stages;
 	int pipes_per_stage;
 
-	if (index >= CRTC_DUAL_MIXERS)
-		return;
-
 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
 	if (stages < 0)
 		return;
@@ -388,9 +386,9 @@
 
 		for (j = 0 ; j < pipes_per_stage; j++) {
 			enum sde_sspp_multirect_index rect_index =
-				stage_cfg->multirect_index[index][i][j];
+				stage_cfg->multirect_index[i][j];
 
-			switch (stage_cfg->stage[index][i][j]) {
+			switch (stage_cfg->stage[i][j]) {
 			case SSPP_VIG0:
 				if (rect_index == SDE_SSPP_RECT_1) {
 					mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
@@ -562,12 +560,18 @@
 	}
 };
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
 		void __iomem *addr,
 		struct sde_mdss_cfg *m)
 {
 	struct sde_hw_ctl *c;
 	struct sde_ctl_cfg *cfg;
+	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -586,13 +590,26 @@
 	c->mixer_count = m->mixer_count;
 	c->mixer_hw_caps = m->mixer;
 
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_CTL, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
 {
+	if (ctx)
+		sde_hw_blk_destroy(&ctx->base);
 	kfree(ctx);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index ace05e8..a111916 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -17,6 +17,7 @@
 #include "sde_hw_util.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_sspp.h"
+#include "sde_hw_blk.h"
 
 /**
  * sde_ctl_mode_sel: Interface mode selection
@@ -49,8 +50,8 @@
  * @multirect_index: index of the rectangle of SSPP.
  */
 struct sde_hw_stage_cfg {
-	enum sde_sspp stage[CRTC_DUAL_MIXERS][SDE_STAGE_MAX][PIPES_PER_STAGE];
-	enum sde_sspp_multirect_index multirect_index[CRTC_DUAL_MIXERS]
+	enum sde_sspp stage[SDE_STAGE_MAX][PIPES_PER_STAGE];
+	enum sde_sspp_multirect_index multirect_index
 					[SDE_STAGE_MAX][PIPES_PER_STAGE];
 };
 
@@ -201,7 +202,7 @@
 	 * @cfg       : blend stage configuration
 	 */
 	void (*setup_blendstage)(struct sde_hw_ctl *ctx,
-		enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
+		enum sde_lm lm, struct sde_hw_stage_cfg *cfg);
 
 	void (*setup_sbuf_cfg)(struct sde_hw_ctl *ctx,
 		struct sde_ctl_sbuf_cfg *cfg);
@@ -209,16 +210,17 @@
 
 /**
  * struct sde_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
  * @hw: block register map object
  * @idx: control path index
- * @ctl_hw_caps: control path capabilities
+ * @caps: control path capabilities
  * @mixer_count: number of mixers
  * @mixer_hw_caps: mixer hardware capabilities
  * @pending_flush_mask: storage for pending ctl_flush managed via ops
  * @ops: operation list
  */
 struct sde_hw_ctl {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
 	/* ctl path */
@@ -233,6 +235,16 @@
 };
 
 /**
+ * sde_hw_ctl - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_ctl *to_sde_hw_ctl(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_ctl, base);
+}
+
+/**
  * sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
  * should be called before accessing every ctl path registers.
  * @idx:  ctl_path index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
index 62193f9..1a346f0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
@@ -16,6 +16,7 @@
 #include "sde_hw_dsc.h"
 #include "sde_hw_pingpong.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define DSC_COMMON_MODE	                0x000
 #define DSC_ENC                         0X004
@@ -200,12 +201,18 @@
 	ops->dsc_config_thresh = sde_hw_dsc_config_thresh;
 };
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_dsc *sde_hw_dsc_init(enum sde_dsc idx,
 		void __iomem *addr,
 		struct sde_mdss_cfg *m)
 {
 	struct sde_hw_dsc *c;
 	struct sde_dsc_cfg *cfg;
+	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -218,16 +225,29 @@
 	}
 
 	c->idx = idx;
-	c->dsc_hw_cap = cfg;
-	_setup_dsc_ops(&c->ops, c->dsc_hw_cap->features);
+	c->caps = cfg;
+	_setup_dsc_ops(&c->ops, c->caps->features);
+
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_DSC, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 		c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_dsc_destroy(struct sde_hw_dsc *dsc)
 {
+	if (dsc)
+		sde_hw_blk_destroy(&dsc->base);
 	kfree(dsc);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.h b/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
index 0703531..d1678f4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
@@ -13,6 +13,11 @@
 #ifndef _SDE_HW_DSC_H
 #define _SDE_HW_DSC_H
 
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_blk.h"
+
 struct sde_hw_dsc;
 struct msm_display_dsc_info;
 
@@ -52,18 +57,28 @@
 };
 
 struct sde_hw_dsc {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
 	/* dsc */
 	enum sde_dsc idx;
-	const struct sde_dsc_cfg *dsc_hw_cap;
+	const struct sde_dsc_cfg *caps;
 
 	/* ops */
 	struct sde_hw_dsc_ops ops;
 };
 
 /**
+ * sde_hw_dsc - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_dsc *to_sde_hw_dsc(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_dsc, base);
+}
+
+/**
  * sde_hw_dsc_init - initializes the dsc block for the passed
  *                   dsc idx.
  * @idx:  DSC index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 586d1f1..e766cdb 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -17,6 +17,7 @@
 #include "sde_hw_color_processing.h"
 #include "sde_dbg.h"
 #include "sde_ad4.h"
+#include "sde_kms.h"
 
 static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
 		struct sde_mdss_cfg *m,
@@ -118,12 +119,18 @@
 	}
 }
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
 			void __iomem *addr,
 			struct sde_mdss_cfg *m)
 {
 	struct sde_hw_dspp *c;
 	struct sde_dspp_cfg *cfg;
+	int rc;
 
 	if (!addr || !m)
 		return ERR_PTR(-EINVAL);
@@ -143,15 +150,28 @@
 	c->cap = cfg;
 	_setup_dspp_ops(c, c->cap->features);
 
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_DSPP, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp)
 {
-	if (dspp)
+	if (dspp) {
 		reg_dmav1_deinit_dspp_ops(dspp->idx);
+		sde_hw_blk_destroy(&dspp->base);
+	}
 	kfree(dspp);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 70b3e56..0baa970 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -13,6 +13,8 @@
 #ifndef _SDE_HW_DSPP_H
 #define _SDE_HW_DSPP_H
 
+#include "sde_hw_blk.h"
+
 struct sde_hw_dspp;
 
 /**
@@ -166,17 +168,14 @@
 
 /**
  * struct sde_hw_dspp - dspp description
- * @base_off:     MDP register mapped offset
- * @blk_off:      DSPP offset relative to mdss offset
- * @length        Length of register block offset
- * @hwversion     Mdss hw version number
- * @idx:          DSPP index
- * @dspp_hw_cap:  Pointer to layer_cfg
- * @highest_bank_bit:
- * @ops:          Pointer to operations possible for this dspp
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: DSPP index
+ * @cap: Pointer to layer_cfg
+ * @ops: Pointer to operations possible for this DSPP
  */
 struct sde_hw_dspp {
-	/* base */
+	struct sde_hw_blk base;
 	 struct sde_hw_blk_reg_map hw;
 
 	/* dspp */
@@ -188,6 +187,16 @@
 };
 
 /**
+ * sde_hw_dspp - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_dspp *to_sde_hw_dspp(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_dspp, base);
+}
+
+/**
  * sde_hw_dspp_init - initializes the dspp hw driver object.
  * should be called once before accessing every dspp.
  * @idx:  DSPP index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 53a48c8..8c3d4fc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -974,6 +974,11 @@
 	if (!intr)
 		return 0;
 
+	if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return 0;
+	}
+
 	spin_lock_irqsave(&intr->mask_lock, irq_flags);
 
 	reg_idx = sde_irq_map[irq_idx].reg_idx;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index be83afe..35f1800 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -14,6 +14,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_intf.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define INTF_TIMING_ENGINE_EN           0x000
 #define INTF_CONFIG                     0x004
@@ -301,12 +302,18 @@
 		ops->setup_rot_start = sde_hw_intf_setup_rot_start;
 }
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
 		void __iomem *addr,
 		struct sde_mdss_cfg *m)
 {
 	struct sde_hw_intf *c;
 	struct sde_intf_cfg *cfg;
+	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -327,14 +334,27 @@
 	c->mdss = m;
 	_setup_intf_ops(&c->ops, c->cap->features);
 
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_INTF, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_intf_destroy(struct sde_hw_intf *intf)
 {
+	if (intf)
+		sde_hw_blk_destroy(&intf->base);
 	kfree(intf);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index d24e83a..83e206d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -16,6 +16,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
+#include "sde_hw_blk.h"
 
 struct sde_hw_intf;
 
@@ -86,7 +87,7 @@
 };
 
 struct sde_hw_intf {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
 	/* intf */
@@ -99,6 +100,16 @@
 };
 
 /**
+ * to_sde_hw_intf - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_intf *to_sde_hw_intf(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_intf, base);
+}
+
+/**
  * sde_hw_intf_init(): Initializes the intf driver for the passed
  * interface idx.
  * @idx:  interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index fedc72c..3d282ee 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -16,6 +16,7 @@
 #include "sde_hw_lm.h"
 #include "sde_hw_mdss.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define LM_OP_MODE                        0x00
 #define LM_OUT_SIZE                       0x04
@@ -277,12 +278,18 @@
 	}
 };
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
 		void __iomem *addr,
 		struct sde_mdss_cfg *m)
 {
 	struct sde_hw_mixer *c;
 	struct sde_lm_cfg *cfg;
+	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -299,13 +306,26 @@
 	c->cap = cfg;
 	_setup_mixer_ops(m, &c->ops, c->cap->features);
 
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_LM, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_lm_destroy(struct sde_hw_mixer *lm)
 {
+	if (lm)
+		sde_hw_blk_destroy(&lm->base);
 	kfree(lm);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 45c0fc9..8a146bd 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -15,6 +15,7 @@
 
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
+#include "sde_hw_blk.h"
 
 struct sde_hw_mixer;
 
@@ -89,7 +90,7 @@
 };
 
 struct sde_hw_mixer {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
 	/* lm */
@@ -106,6 +107,16 @@
 };
 
 /**
+ * to_sde_hw_mixer - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_mixer *to_sde_hw_mixer(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_mixer, base);
+}
+
+/**
  * sde_hw_lm_init(): Initializes the mixer hw driver object.
  * should be called once before accessing every mixer.
  * @idx:  mixer index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 31aa031..582ab5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -283,6 +283,13 @@
 	ROT_MAX
 };
 
+enum sde_inline_rot {
+	INLINE_ROT_NONE,
+	INLINE_ROT0_SSPP,
+	INLINE_ROT0_WB,
+	INLINE_ROT_MAX
+};
+
 /**
  * SDE HW,Component order color map
  */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index a77b8d3..37b74df 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -10,11 +10,14 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/iopoll.h>
+
 #include "sde_hw_mdss.h"
 #include "sde_hwio.h"
 #include "sde_hw_catalog.h"
 #include "sde_hw_pingpong.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define PP_TEAR_CHECK_EN                0x000
 #define PP_SYNC_CONFIG_VSYNC            0x004
@@ -84,38 +87,76 @@
 	return 0;
 }
 
-int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
+static int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
 		struct sde_hw_autorefresh *cfg)
 {
-	struct sde_hw_blk_reg_map *c = &pp->hw;
+	struct sde_hw_blk_reg_map *c;
 	u32 refresh_cfg;
 
+	if (!pp || !cfg)
+		return -EINVAL;
+	c = &pp->hw;
+
 	if (cfg->enable)
 		refresh_cfg = BIT(31) | cfg->frame_count;
 	else
 		refresh_cfg = 0;
 
-	SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG,
-			refresh_cfg);
+	SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG, refresh_cfg);
+	SDE_EVT32(pp->idx - PINGPONG_0, refresh_cfg);
 
 	return 0;
 }
 
-void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp)
+static int sde_hw_pp_get_autorefresh_config(struct sde_hw_pingpong *pp,
+		struct sde_hw_autorefresh *cfg)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 val;
+
+	if (!pp || !cfg)
+		return -EINVAL;
+
+	c = &pp->hw;
+	val = SDE_REG_READ(c, PP_AUTOREFRESH_CONFIG);
+	cfg->enable = (val & BIT(31)) >> 31;
+	cfg->frame_count = val & 0xffff;
+
+	return 0;
+}
+
+static int sde_hw_pp_poll_timeout_wr_ptr(struct sde_hw_pingpong *pp,
+		u32 timeout_us)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 val;
+	int rc;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+			val, (val & 0xffff) >= 1, 10, timeout_us);
+
+	return rc;
+}
+
+static void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp)
 {
 	struct sde_hw_blk_reg_map *c = &pp->hw;
 
 	SDE_REG_WRITE(c, PP_DSC_MODE, 1);
 }
 
-void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp)
+static void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp)
 {
 	struct sde_hw_blk_reg_map *c = &pp->hw;
 
 	SDE_REG_WRITE(c, PP_DSC_MODE, 0);
 }
 
-int sde_hw_pp_setup_dsc(struct sde_hw_pingpong *pp)
+static int sde_hw_pp_setup_dsc(struct sde_hw_pingpong *pp)
 {
 	struct sde_hw_blk_reg_map *pp_c = &pp->hw;
 	int data;
@@ -126,7 +167,7 @@
 	return 0;
 }
 
-int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
+static int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
 {
 	struct sde_hw_blk_reg_map *c = &pp->hw;
 
@@ -134,18 +175,44 @@
 	return 0;
 }
 
-int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
+static int sde_hw_pp_connect_external_te(struct sde_hw_pingpong *pp,
+		bool enable_external_te)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+	u32 cfg;
+	int orig;
+
+	if (!pp)
+		return -EINVAL;
+
+	c = &pp->hw;
+	cfg = SDE_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+	orig = (bool)(cfg & BIT(20));
+	if (enable_external_te)
+		cfg |= BIT(20);
+	else
+		cfg &= ~BIT(20);
+	SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+	SDE_EVT32(pp->idx - PINGPONG_0, cfg);
+
+	return orig;
+}
+
+static int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
 		struct sde_hw_pp_vsync_info *info)
 {
 	struct sde_hw_blk_reg_map *c = &pp->hw;
 	u32 val;
 
 	val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL);
-	info->init_val = val & 0xffff;
+	info->rd_ptr_init_val = val & 0xffff;
 
 	val = SDE_REG_READ(c, PP_INT_COUNT_VAL);
-	info->vsync_count = (val & 0xffff0000) >> 16;
-	info->line_count = val & 0xffff;
+	info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+	info->rd_ptr_line_count = val & 0xffff;
+
+	val = SDE_REG_READ(c, PP_LINE_COUNT);
+	info->wr_ptr_line_count = val & 0xffff;
 
 	return 0;
 }
@@ -155,11 +222,19 @@
 {
 	ops->setup_tearcheck = sde_hw_pp_setup_te_config;
 	ops->enable_tearcheck = sde_hw_pp_enable_te;
+	ops->connect_external_te = sde_hw_pp_connect_external_te;
 	ops->get_vsync_info = sde_hw_pp_get_vsync_info;
 	ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
 	ops->setup_dsc = sde_hw_pp_setup_dsc;
 	ops->enable_dsc = sde_hw_pp_dsc_enable;
 	ops->disable_dsc = sde_hw_pp_dsc_disable;
+	ops->get_autorefresh = sde_hw_pp_get_autorefresh_config;
+	ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr;
+};
+
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
 };
 
 struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
@@ -168,6 +243,7 @@
 {
 	struct sde_hw_pingpong *c;
 	struct sde_pingpong_cfg *cfg;
+	int rc;
 
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
@@ -180,16 +256,29 @@
 	}
 
 	c->idx = idx;
-	c->pingpong_hw_cap = cfg;
-	_setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features);
+	c->caps = cfg;
+	_setup_pingpong_ops(&c->ops, c->caps->features);
+
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_PINGPONG, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp)
 {
+	if (pp)
+		sde_hw_blk_destroy(&pp->base);
 	kfree(pp);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index 90f6171..6dbf4aa 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -13,6 +13,11 @@
 #ifndef _SDE_HW_PINGPONG_H
 #define _SDE_HW_PINGPONG_H
 
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_blk.h"
+
 struct sde_hw_pingpong;
 
 struct sde_hw_tear_check {
@@ -36,9 +41,10 @@
 };
 
 struct sde_hw_pp_vsync_info {
-	u32 init_val; /* value of rd pointer at vsync edge */
-	u32 vsync_count;    /* mdp clocks to complete one line */
-	u32 line_count;   /* current line count */
+	u32 rd_ptr_init_val;	/* value of rd pointer at vsync edge */
+	u32 rd_ptr_frame_count;	/* num frames sent since enabling interface */
+	u32 rd_ptr_line_count;	/* current line on panel (rd ptr) */
+	u32 wr_ptr_line_count;	/* current line within pp fifo (wr ptr) */
 };
 
 struct sde_hw_dsc_cfg {
@@ -72,6 +78,13 @@
 			bool enable);
 
 	/**
+	 * read, modify, write to either set or clear listening to external TE
+	 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+	 */
+	int (*connect_external_te)(struct sde_hw_pingpong *pp,
+			bool enable_external_te);
+
+	/**
 	 * provides the programmed and current
 	 * line_count
 	 */
@@ -85,6 +98,18 @@
 			struct sde_hw_autorefresh *cfg);
 
 	/**
+	 * retrieve autorefresh config from hardware
+	 */
+	int (*get_autorefresh)(struct sde_hw_pingpong *pp,
+			struct sde_hw_autorefresh *cfg);
+
+	/**
+	 * poll until write pointer transmission starts
+	 * @Return: 0 on success, -ETIMEDOUT on timeout
+	 */
+	int (*poll_timeout_wr_ptr)(struct sde_hw_pingpong *pp, u32 timeout_us);
+
+	/**
 	 * Program the dsc compression block
 	 */
 	int (*setup_dsc)(struct sde_hw_pingpong *pp);
@@ -101,18 +126,28 @@
 };
 
 struct sde_hw_pingpong {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
 	/* pingpong */
 	enum sde_pingpong idx;
-	const struct sde_pingpong_cfg *pingpong_hw_cap;
+	const struct sde_pingpong_cfg *caps;
 
 	/* ops */
 	struct sde_hw_pingpong_ops ops;
 };
 
 /**
+ * sde_hw_pingpong - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_pingpong *to_sde_hw_pingpong(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_pingpong, base);
+}
+
+/**
  * sde_hw_pingpong_init - initializes the pingpong driver for the passed
  *	pingpong idx.
  * @idx:  Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index d5f03a6a..bbd5931 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -900,8 +900,7 @@
 	c->caps = cfg;
 	_setup_rot_ops(&c->ops, c->caps->features);
 
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx,
-			&sde_hw_rot_ops);
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx, &sde_hw_rot_ops);
 	if (rc) {
 		SDE_ERROR("failed to init hw blk %d\n", rc);
 		goto blk_init_error;
@@ -922,9 +921,11 @@
  */
 void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot)
 {
-	sde_hw_blk_destroy(&hw_rot->base);
-	kfree(hw_rot->downscale_caps);
-	kfree(hw_rot->format_caps);
+	if (hw_rot) {
+		sde_hw_blk_destroy(&hw_rot->base);
+		kfree(hw_rot->downscale_caps);
+		kfree(hw_rot->format_caps);
+	}
 	kfree(hw_rot);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index a9a493f..bc1b1e7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -16,6 +16,7 @@
 #include "sde_hw_sspp.h"
 #include "sde_hw_color_processing.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define SDE_FETCH_CONFIG_RESET_VALUE   0x00000087
 
@@ -82,6 +83,7 @@
 #define SSPP_SW_PIX_EXT_C3_TB              0x124
 #define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
 #define SSPP_TRAFFIC_SHAPER                0x130
+#define SSPP_CDP_CNTL                      0x134
 #define SSPP_UBWC_ERROR_STATUS             0x138
 #define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
 #define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
@@ -1102,6 +1104,30 @@
 	SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
 }
 
+static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cdp_cfg *cfg)
+{
+	u32 idx;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->tile_amortize_enable)
+		cdp_cntl |= BIT(2);
+	if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
 static void _setup_layer_ops(struct sde_hw_pipe *c,
 		unsigned long features)
 {
@@ -1163,6 +1189,9 @@
 		c->ops.setup_sys_cache = sde_hw_sspp_setup_sys_cache;
 		c->ops.get_sbuf_status = sde_hw_sspp_get_sbuf_status;
 	}
+
+	if (test_bit(SDE_SSPP_CDP, &features))
+		c->ops.setup_cdp = sde_hw_sspp_setup_cdp;
 }
 
 static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
@@ -1188,12 +1217,18 @@
 	return ERR_PTR(-ENOMEM);
 }
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
 			void __iomem *addr,
 			struct sde_mdss_cfg *catalog)
 {
 	struct sde_hw_pipe *hw_pipe;
 	struct sde_sspp_cfg *cfg;
+	int rc;
 
 	if (!addr || !catalog)
 		return ERR_PTR(-EINVAL);
@@ -1215,6 +1250,12 @@
 	hw_pipe->cap = cfg;
 	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
 
+	rc = sde_hw_blk_init(&hw_pipe->base, SDE_HW_BLK_SSPP, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
 			hw_pipe->hw.blk_off,
 			hw_pipe->hw.blk_off + hw_pipe->hw.length,
@@ -1229,10 +1270,17 @@
 			hw_pipe->hw.xin_id);
 
 	return hw_pipe;
+
+blk_init_error:
+	kzfree(hw_pipe);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
 {
+	if (ctx)
+		sde_hw_blk_destroy(&ctx->base);
 	kfree(ctx);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 44b7ea9..e4be055 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -16,6 +16,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
+#include "sde_hw_blk.h"
 #include "sde_formats.h"
 #include "sde_color_processing.h"
 
@@ -308,6 +309,30 @@
 };
 
 /**
+ * enum CDP preload ahead address size
+ */
+enum {
+	SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+	SDE_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+ *	SDE_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_pipe_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
  * enum system cache rotation operation mode
  */
 enum {
@@ -574,24 +599,28 @@
 	void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
 			struct sde_hw_pipe_ts_cfg *cfg,
 			enum sde_sspp_multirect_index index);
+
+	/**
+	 * setup_cdp - setup client driven prefetch
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to cdp configuration
+	 */
+	void (*setup_cdp)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_cdp_cfg *cfg);
 };
 
 /**
  * struct sde_hw_pipe - pipe description
- * @base_off:     mdp register mapped offset
- * @blk_off:      pipe offset relative to mdss offset
- * @length        length of register block offset
- * @hwversion     mdss hw version number
- * @catalog:      back pointer to catalog
- * @mdp:          pointer to associated mdp portion of the catalog
- * @idx:          pipe index
- * @type :        pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
- *                supported for each pipe type
- * @pipe_hw_cap:  pointer to layer_cfg
- * @ops:          pointer to operations possible for this pipe
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
  */
 struct sde_hw_pipe {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 	struct sde_mdss_cfg *catalog;
 	struct sde_mdp_cfg *mdp;
@@ -605,6 +634,16 @@
 };
 
 /**
+ * sde_hw_pipe - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_pipe *to_sde_hw_pipe(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_pipe, base);
+}
+
+/**
  * sde_hw_sspp_init - initializes the sspp hw driver object.
  * Should be called once before accessing every pipe.
  * @idx:  Pipe index for which driver object is required
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index 19f999e..b773187 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -14,6 +14,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_top.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define SSPP_SPARE                        0x28
 #define UBWC_STATIC                       0x144
@@ -152,8 +153,8 @@
 	if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
 		return false;
 
-	reg_off = mdp->cap->clk_ctrls[clk_ctrl].reg_off;
-	bit_off = mdp->cap->clk_ctrls[clk_ctrl].bit_off;
+	reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+	bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
 
 	reg_val = SDE_REG_READ(c, reg_off);
 
@@ -337,12 +338,18 @@
 	return ERR_PTR(-EINVAL);
 }
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
 		void __iomem *addr,
 		const struct sde_mdss_cfg *m)
 {
 	struct sde_hw_mdp *mdp;
 	const struct sde_mdp_cfg *cfg;
+	int rc;
 
 	if (!addr || !m)
 		return ERR_PTR(-EINVAL);
@@ -361,8 +368,14 @@
 	 * Assign ops
 	 */
 	mdp->idx = idx;
-	mdp->cap = cfg;
-	_setup_mdp_ops(&mdp->ops, mdp->cap->features);
+	mdp->caps = cfg;
+	_setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+	rc = sde_hw_blk_init(&mdp->base, SDE_HW_BLK_TOP, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
 			mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
@@ -370,10 +383,17 @@
 	sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
 
 	return mdp;
+
+blk_init_error:
+	kzfree(mdp);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
 {
+	if (mdp)
+		sde_hw_blk_destroy(&mdp->base);
 	kfree(mdp);
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index faf25c7..573780e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -16,6 +16,7 @@
 #include "sde_hw_catalog.h"
 #include "sde_hw_mdss.h"
 #include "sde_hw_util.h"
+#include "sde_hw_blk.h"
 
 struct sde_hw_mdp;
 
@@ -179,19 +180,29 @@
 };
 
 struct sde_hw_mdp {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 
-	/* intf */
+	/* top */
 	enum sde_mdp idx;
-	const struct sde_mdp_cfg *cap;
+	const struct sde_mdp_cfg *caps;
 
 	/* ops */
 	struct sde_hw_mdp_ops ops;
 };
 
 /**
- * sde_hw_intf_init - initializes the intf driver for the passed interface idx
+ * to_sde_hw_mdp - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_mdp *to_sde_hw_mdp(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_mdp, base);
+}
+
+/**
+ * sde_hw_mdptop_init - initializes the top driver for the passed idx
  * @idx:  Interface index for which driver object is required
  * @addr: Mapped register io address of MDP
  * @m:    Pointer to mdss catalog data
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index 9b9763a..b5c273a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -31,11 +31,43 @@
 #define VBIF_IN_WR_LIM_CONF2		0x00C8
 #define VBIF_OUT_RD_LIM_CONF0		0x00D0
 #define VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
 #define VBIF_XIN_HALT_CTRL0		0x0200
 #define VBIF_XIN_HALT_CTRL1		0x0204
 #define VBIF_XINL_QOS_RP_REMAP_000	0x0550
 #define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
 
+static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
+		u32 xin_id, u32 value)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 reg_off;
+	u32 bit_off;
+	u32 reg_val;
+
+	/*
+	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
+	 * 16 bit fields maximum across two registers
+	 */
+	if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+		return;
+
+	c = &vbif->hw;
+
+	if (xin_id >= 8) {
+		xin_id -= 8;
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+	} else {
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+	}
+	bit_off = (xin_id & 0x7) * 4;
+	reg_val = SDE_REG_READ(c, reg_off);
+	reg_val &= ~(0x7 << bit_off);
+	reg_val |= (value & 0x7) << bit_off;
+	SDE_REG_WRITE(c, reg_off, reg_val);
+}
+
 static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
 		u32 xin_id, bool rd, u32 limit)
 {
@@ -144,6 +176,7 @@
 	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
 	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
 		ops->set_qos_remap = sde_hw_set_qos_remap;
+	ops->set_mem_type = sde_hw_set_mem_type;
 }
 
 static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index c67738b..80a9e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -71,6 +71,15 @@
 	 */
 	void (*set_qos_remap)(struct sde_hw_vbif *vbif,
 			u32 xin_id, u32 level, u32 remap_level);
+
+	/**
+	 * set_mem_type - set memory type
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @value: memory type value
+	 */
+	void (*set_mem_type)(struct sde_hw_vbif *vbif,
+			u32 xin_id, u32 value);
 };
 
 struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index 5dbd794..e1bd841 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -16,6 +16,7 @@
 #include "sde_hw_wb.h"
 #include "sde_formats.h"
 #include "sde_dbg.h"
+#include "sde_kms.h"
 
 #define WB_DST_FORMAT			0x000
 #define WB_DST_OP_MODE			0x004
@@ -49,7 +50,7 @@
 #define WB_UBWC_STATIC_CTRL		0x144
 #define WB_CSC_BASE			0x260
 #define WB_DST_ADDR_SW_STATUS		0x2B0
-#define WB_CDP_CTRL			0x2B4
+#define WB_CDP_CNTL			0x2B4
 #define WB_OUT_IMAGE_SIZE		0x2C0
 #define WB_OUT_XY			0x2C4
 
@@ -96,7 +97,6 @@
 	u32 write_config = 0;
 	u32 opmode = 0;
 	u32 dst_addr_sw = 0;
-	u32 cdp_settings = 0x0;
 
 	chroma_samp = fmt->chroma_sample;
 
@@ -165,18 +165,6 @@
 	SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
 	SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
 	SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
-
-	/* Enable CDP */
-	cdp_settings = BIT(0);
-
-	if (!SDE_FORMAT_IS_LINEAR(fmt))
-		cdp_settings |= BIT(1);
-
-	/* Enable 64 transactions if line mode*/
-	if (data->intf_mode == INTF_MODE_WB_LINE)
-		cdp_settings |= BIT(3);
-
-	SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
 }
 
 static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
@@ -234,6 +222,27 @@
 	SDE_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
 }
 
+static void sde_hw_wb_setup_cdp(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cdp_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	c = &ctx->hw;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->preload_ahead == SDE_WB_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	SDE_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
 static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
 	unsigned long features)
 {
@@ -249,8 +258,16 @@
 		ops->setup_creq_lut = sde_hw_wb_setup_creq_lut;
 		ops->setup_qos_ctrl = sde_hw_wb_setup_qos_ctrl;
 	}
+
+	if (test_bit(SDE_WB_CDP, &features))
+		ops->setup_cdp = sde_hw_wb_setup_cdp;
 }
 
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
 struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
 		void __iomem *addr,
 		struct sde_mdss_cfg *m,
@@ -258,6 +275,7 @@
 {
 	struct sde_hw_wb *c;
 	struct sde_wb_cfg *cfg;
+	int rc;
 
 	if (!addr || !m || !hw_mdp)
 		return ERR_PTR(-EINVAL);
@@ -281,13 +299,26 @@
 	_setup_wb_ops(&c->ops, c->caps->features);
 	c->hw_mdp = hw_mdp;
 
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_WB, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
 
 	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
 }
 
 void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb)
 {
+	if (hw_wb)
+		sde_hw_blk_destroy(&hw_wb->base);
 	kfree(hw_wb);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index caf574e..70fe8a5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -29,6 +29,30 @@
 };
 
 /**
+ * enum CDP preload ahead address size
+ */
+enum {
+	SDE_WB_CDP_PRELOAD_AHEAD_32,
+	SDE_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_wb_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	SDE_WB_CDP_PRELOAD_AHEAD_32,
+ *	SDE_WB_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_wb_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
  * struct sde_hw_wb_qos_cfg : Writeback pipe QoS configuration
  * @danger_lut: LUT for generate danger level based on fill level
  * @safe_lut: LUT for generate safe level based on fill level
@@ -95,20 +119,29 @@
 	 */
 	void (*setup_qos_ctrl)(struct sde_hw_wb *ctx,
 			struct sde_hw_wb_qos_cfg *cfg);
+
+	/**
+	 * setup_cdp - setup CDP
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe CDP configuration
+	 */
+	void (*setup_cdp)(struct sde_hw_wb *ctx,
+			struct sde_hw_wb_cdp_cfg *cfg);
 };
 
 /**
  * struct sde_hw_wb : WB driver object
- * @struct sde_hw_blk_reg_map *hw;
+ * @base: hardware block base structure
+ * @hw: block hardware details
  * @catalog: back pointer to catalog
- * @mdp:          pointer to associated mdp portion of the catalog
- * @idx
- * @wb_hw_caps
- * @ops
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: hardware index number within type
+ * @wb_hw_caps: hardware capabilities
+ * @ops: function pointers
  * @hw_mdp: MDP top level hardware block
  */
 struct sde_hw_wb {
-	/* base */
+	struct sde_hw_blk base;
 	struct sde_hw_blk_reg_map hw;
 	struct sde_mdss_cfg *catalog;
 	struct sde_mdp_cfg *mdp;
@@ -124,6 +157,16 @@
 };
 
 /**
+ * sde_hw_wb - convert base object sde_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct sde_hw_wb *to_sde_hw_wb(struct sde_hw_blk *hw)
+{
+	return container_of(hw, struct sde_hw_wb, base);
+}
+
+/**
  * sde_hw_wb_init(): Initializes and return writeback hw driver object.
  * @idx:  wb_path index for which driver object is required
  * @addr: mapped register io address of MDP
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index a7d6ecf..bda89cf 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -178,9 +178,9 @@
 		return -EINVAL;
 	}
 
-	debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
+	debugfs_create_file("danger_status", 0600, sde_kms->debugfs_danger,
 			sde_kms, &sde_debugfs_danger_stats_fops);
-	debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
+	debugfs_create_file("safe_status", 0600, sde_kms->debugfs_danger,
 			sde_kms, &sde_debugfs_safe_stats_fops);
 
 	return 0;
@@ -303,7 +303,7 @@
 		return -EINVAL;
 
 	/* allow debugfs_root to be NULL */
-	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0644, debugfs_root, p);
+	debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
 
 	(void) sde_debugfs_danger_init(sde_kms, debugfs_root);
 	(void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
@@ -353,16 +353,24 @@
 {
 	struct sde_kms *sde_kms;
 	struct msm_drm_private *priv;
+	struct drm_device *dev;
+	struct drm_encoder *encoder;
 
 	if (!kms)
 		return;
 	sde_kms = to_sde_kms(kms);
+	dev = sde_kms->dev;
 
-	if (!sde_kms->dev || !sde_kms->dev->dev_private)
+	if (!dev || !dev->dev_private)
 		return;
-	priv = sde_kms->dev->dev_private;
+	priv = dev->dev_private;
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->crtc != NULL)
+			sde_encoder_prepare_commit(encoder);
+
 }
 
 static void sde_kms_commit(struct msm_kms *kms,
@@ -404,6 +412,49 @@
 	SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
 }
 
+static void sde_kms_wait_for_tx_complete(struct msm_kms *kms,
+		struct drm_crtc *crtc)
+{
+	struct drm_encoder *encoder;
+	struct drm_device *dev;
+	int ret;
+
+	if (!kms || !crtc || !crtc->state || !crtc->dev) {
+		SDE_ERROR("invalid params\n");
+		return;
+	}
+
+	if (!crtc->state->enable) {
+		SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+		return;
+	}
+
+	if (!crtc->state->active) {
+		SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+		return;
+	}
+
+	dev = crtc->dev;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+		/*
+		 * Video Mode - Wait for VSYNC
+		 * Cmd Mode   - Wait for PP_DONE. Will be no-op if transfer is
+		 *              complete
+		 */
+		SDE_EVT32_VERBOSE(DRMID(crtc));
+		ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
+		if (ret && ret != -EWOULDBLOCK) {
+			SDE_ERROR(
+			"[crtc: %d][enc: %d] wait for commit done returned %d\n",
+			crtc->base.id, encoder->base.id, ret);
+			break;
+		}
+	}
+}
+
 static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
 		struct drm_crtc *crtc)
 {
@@ -435,7 +486,7 @@
 		 * mode panels. This may be a no-op for command mode panels.
 		 */
 		SDE_EVT32_VERBOSE(DRMID(crtc));
-		ret = sde_encoder_wait_for_commit_done(encoder);
+		ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
 		if (ret && ret != -EWOULDBLOCK) {
 			SDE_ERROR("wait for commit done returned %d\n", ret);
 			break;
@@ -1238,6 +1289,10 @@
 		sde_hw_intr_destroy(sde_kms->hw_intr);
 	sde_kms->hw_intr = NULL;
 
+	if (sde_kms->power_event)
+		sde_power_handle_unregister_event(
+				&priv->phandle, sde_kms->power_event);
+
 	_sde_kms_release_displays(sde_kms);
 
 	/* safe to call these more than once during shutdown */
@@ -1328,6 +1383,7 @@
 	.commit          = sde_kms_commit,
 	.complete_commit = sde_kms_complete_commit,
 	.wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
+	.wait_for_tx_complete = sde_kms_wait_for_tx_complete,
 	.enable_vblank   = sde_kms_enable_vblank,
 	.disable_vblank  = sde_kms_disable_vblank,
 	.check_modified_format = sde_format_check_modified_format,
@@ -1443,6 +1499,16 @@
 	return ptr;
 }
 
+static void sde_kms_handle_power_event(u32 event_type, void *usr)
+{
+	struct sde_kms *sde_kms = usr;
+
+	if (!sde_kms)
+		return;
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE)
+		sde_vbif_init_memtypes(sde_kms);
+}
 
 static int sde_kms_hw_init(struct msm_kms *kms)
 {
@@ -1660,6 +1726,14 @@
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
+	/*
+	 * Handle (re)initializations during power enable
+	 */
+	sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
+	sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
+			SDE_POWER_EVENT_POST_ENABLE,
+			sde_kms_handle_power_event, sde_kms, "kms");
+
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 	return 0;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index d20af9f..058f19b 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -159,6 +159,7 @@
 	struct sde_power_client *core_client;
 
 	struct ion_client *iclient;
+	struct sde_power_event *power_event;
 
 	/* directory entry for debugfs */
 	struct dentry *debugfs_danger;
@@ -312,10 +313,12 @@
 
 /**
  * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
+ *			it adds an extra character length to count null.
  * @S: Pointer to sde_kms_info structure
  * Returns: Size of available byte data
  */
-#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len : 0)
+#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len + 1 \
+							: 0)
 
 /**
  * sde_kms_info_reset - reset sde_kms_info structure
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 88b0543..6b8a9b9 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -94,6 +94,25 @@
 	SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
 };
 
+/**
+ * enum sde_plane_sclcheck_state - User scaler data status
+ *
+ * @SDE_PLANE_SCLCHECK_NONE: No user data provided
+ * @SDE_PLANE_SCLCHECK_INVALID: Invalid user data provided
+ * @SDE_PLANE_SCLCHECK_SCALER_V1: Valid scaler v1 data
+ * @SDE_PLANE_SCLCHECK_SCALER_V1_CHECK: Unchecked scaler v1 data
+ * @SDE_PLANE_SCLCHECK_SCALER_V2: Valid scaler v2 data
+ * @SDE_PLANE_SCLCHECK_SCALER_V2_CHECK: Unchecked scaler v2 data
+ */
+enum sde_plane_sclcheck_state {
+	SDE_PLANE_SCLCHECK_NONE,
+	SDE_PLANE_SCLCHECK_INVALID,
+	SDE_PLANE_SCLCHECK_SCALER_V1,
+	SDE_PLANE_SCLCHECK_SCALER_V1_CHECK,
+	SDE_PLANE_SCLCHECK_SCALER_V2,
+	SDE_PLANE_SCLCHECK_SCALER_V2_CHECK,
+};
+
 /*
  * struct sde_plane - local sde plane structure
  * @csc_cfg: Decoded user configuration for csc
@@ -104,6 +123,7 @@
  * @sbuf_mode: force stream buffer mode if set
  * @sbuf_writeback: force stream buffer writeback if set
  * @revalidate: force revalidation of all the plane properties
+ * @scaler_check_state: Indicates status of user provided pixle extension data
  * @blob_rot_caps: Pointer to rotator capability blob
  */
 struct sde_plane {
@@ -134,7 +154,7 @@
 	bool revalidate;
 
 	struct sde_hw_pixel_ext pixel_ext;
-	bool pixel_ext_usr;
+	enum sde_plane_sclcheck_state scaler_check_state;
 
 	struct sde_csc_cfg csc_cfg;
 	struct sde_csc_cfg *csc_usr_ptr;
@@ -627,10 +647,11 @@
 	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
 	qos_params.is_rt = psde->is_rt_pipe;
 
-	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d\n",
+	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
 			plane->base.id, qos_params.num,
 			qos_params.vbif_idx,
-			qos_params.xin_id, qos_params.is_rt);
+			qos_params.xin_id, qos_params.is_rt,
+			qos_params.clk_ctrl);
 
 	sde_vbif_set_qos_remap(sde_kms, &qos_params);
 }
@@ -704,6 +725,90 @@
 	SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
 }
 
+/**
+ * _sde_plane_inline_rot_set_ot_limit - set OT limit for the given inline
+ * rotation xin client
+ * @plane: pointer to drm plane
+ * @crtc: pointer to drm crtc
+ * @cfg: pointer to rotator vbif config
+ * @rect_w: rotator frame width
+ * @rect_h: rotator frame height
+ */
+static void _sde_plane_inline_rot_set_ot_limit(struct drm_plane *plane,
+		struct drm_crtc *crtc, const struct sde_rot_vbif_cfg *cfg,
+		u32 rect_w, u32 rect_h)
+{
+	struct sde_vbif_set_ot_params ot_params;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	memset(&ot_params, 0, sizeof(ot_params));
+	ot_params.xin_id = cfg->xin_id;
+	ot_params.num = cfg->num;
+	ot_params.width = rect_w;
+	ot_params.height = rect_h;
+	ot_params.is_wfd = false;
+	ot_params.frame_rate = crtc->mode.vrefresh;
+	ot_params.vbif_idx = VBIF_RT;
+	ot_params.clk_ctrl = cfg->clk_ctrl;
+	ot_params.rd = cfg->is_read;
+
+	sde_vbif_set_ot_limit(sde_kms, &ot_params);
+}
+
+/**
+ * _sde_plane_inline_rot_set_qos_remap - set vbif QoS for the given inline
+ * rotation xin client
+ * @plane: Pointer to drm plane
+ * @cfg: Pointer to rotator vbif cfg
+ */
+static void _sde_plane_inline_rot_set_qos_remap(struct drm_plane *plane,
+		const struct sde_rot_vbif_cfg *cfg)
+{
+	struct sde_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.xin_id = cfg->xin_id;
+	qos_params.clk_ctrl = cfg->clk_ctrl;
+	qos_params.num = cfg->num;
+	qos_params.is_rt = true;
+
+	SDE_DEBUG("vbif:%d xin:%d num:%d rt:%d clk_ctrl:%d\n",
+			qos_params.vbif_idx, qos_params.xin_id,
+			qos_params.num, qos_params.is_rt, qos_params.clk_ctrl);
+
+	sde_vbif_set_qos_remap(sde_kms, &qos_params);
+}
+
 int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
 {
 	struct sde_plane *psde;
@@ -788,9 +893,22 @@
 		SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
 	else if (ret)
 		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
-	else if (psde->pipe_hw->ops.setup_sourceaddress)
+	else if (psde->pipe_hw->ops.setup_sourceaddress) {
+		SDE_EVT32(psde->pipe_hw->idx,
+				pipe_cfg->layout.width,
+				pipe_cfg->layout.height,
+				pipe_cfg->layout.plane_addr[0],
+				pipe_cfg->layout.plane_size[0],
+				pipe_cfg->layout.plane_addr[1],
+				pipe_cfg->layout.plane_size[1],
+				pipe_cfg->layout.plane_addr[2],
+				pipe_cfg->layout.plane_size[2],
+				pipe_cfg->layout.plane_addr[3],
+				pipe_cfg->layout.plane_size[3],
+				pstate->multirect_index);
 		psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg,
 						pstate->multirect_index);
+	}
 }
 
 static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
@@ -1184,8 +1302,9 @@
 		int error;
 
 		error = _sde_plane_setup_scaler3_lut(psde, pstate);
-		if (error || !psde->pixel_ext_usr ||
-				psde->debugfs_default_scale) {
+		if (error || psde->debugfs_default_scale ||
+			psde->scaler_check_state !=
+				SDE_PLANE_SCLCHECK_SCALER_V2) {
 			/* calculate default config for QSEED3 */
 			_sde_plane_setup_scaler3(psde,
 					psde->pipe_cfg.src_rect.w,
@@ -1195,8 +1314,8 @@
 					psde->scaler3_cfg, fmt,
 					chroma_subsmpl_h, chroma_subsmpl_v);
 		}
-	} else if (!psde->pixel_ext_usr || !pstate ||
-			psde->debugfs_default_scale) {
+	} else if (psde->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 ||
+			!pstate || psde->debugfs_default_scale) {
 		uint32_t deci_dim, i;
 
 		/* calculate default configuration for QSEED2 */
@@ -1368,33 +1487,31 @@
 static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
 {
 	struct drm_plane_state *state;
-	struct drm_crtc_state *cstate;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_kms *sde_kms;
 	u32 blocksize = 128;
 	u32 prefill_line = 0;
 
-	if (!plane || !plane->state || !plane->state->fb ||
-			!plane->state->crtc || !plane->state->crtc->state) {
+	if (!plane || !plane->state || !plane->state->fb) {
 		SDE_ERROR("invalid parameters\n");
 		return 0;
 	}
 
 	sde_kms = _sde_plane_get_kms(plane);
 	state = plane->state;
-	cstate = state->crtc->state;
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
-	if (!rstate->rot_hw || !rstate->rot_hw->caps || !rstate->out_src_h ||
-			!sde_kms || !sde_kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
+	if (!sde_kms || !sde_kms->catalog) {
+		SDE_ERROR("invalid kms\n");
 		return 0;
 	}
 
-	sde_format_get_block_size(rstate->out_fb_format, &blocksize,
-			&blocksize);
+	if (rstate->out_fb_format)
+		sde_format_get_block_size(rstate->out_fb_format,
+				&blocksize, &blocksize);
+
 	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
 
 	SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
@@ -1416,7 +1533,7 @@
 	struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
 	bool sbuf_mode = rstate ? rstate->out_sbuf : false;
 
-	if (prefill && sbuf_mode)
+	if (prefill)
 		*prefill = sde_plane_rot_calc_prefill(plane);
 
 	return sbuf_mode;
@@ -1695,6 +1812,24 @@
 			rot_cmd->dst_len[i] = layout.plane_size[i];
 		}
 		rot_cmd->dst_planes = layout.num_planes;
+
+		/* VBIF remapper settings */
+		for (i = 0; rstate->rot_hw->caps->xin_count; i++) {
+			const struct sde_rot_vbif_cfg *cfg =
+					&rstate->rot_hw->caps->vbif_cfg[i];
+
+			_sde_plane_inline_rot_set_qos_remap(plane, cfg);
+
+			if (cfg->is_read) {
+				_sde_plane_inline_rot_set_ot_limit(plane,
+					state->crtc, cfg, rot_cmd->src_rect_w,
+					rot_cmd->src_rect_h);
+			} else {
+				_sde_plane_inline_rot_set_ot_limit(plane,
+					state->crtc, cfg, rot_cmd->dst_rect_w,
+					rot_cmd->dst_rect_h);
+			}
+		}
 	}
 
 	ret = rstate->rot_hw->ops.commit(rstate->rot_hw, rot_cmd, hw_cmd);
@@ -2274,7 +2409,8 @@
 				rot_hw->ops.get_maxlinewidth(rot_hw));
 
 	msm_property_set_blob(&psde->property_info, &psde->blob_rot_caps,
-			info->data, info->len, PLANE_PROP_ROT_CAPS_V1);
+			info->data, SDE_KMS_INFO_DATALEN(info),
+			PLANE_PROP_ROT_CAPS_V1);
 
 	sde_hw_rot_put(rot_hw);
 error_rot:
@@ -2402,16 +2538,9 @@
 
 	/* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
 	if (parallel_fetch_qualified) {
-		if (dst[R0].x <= dst[R1].x) {
-			pstate[R0]->multirect_index = SDE_SSPP_RECT_0;
-			pstate[R1]->multirect_index = SDE_SSPP_RECT_1;
-		} else {
-			pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
-			pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
-		}
-
 		pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
 		pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_PARALLEL;
+
 		goto done;
 	}
 
@@ -2419,12 +2548,10 @@
 	if (SDE_FORMAT_IS_UBWC(fmt[R0]))
 		buffer_lines = 2 * fmt[R0]->tile_height;
 
-	if (dst[R1].y >= dst[R0].y + dst[R0].h + buffer_lines) {
-		pstate[R0]->multirect_index = SDE_SSPP_RECT_0;
-		pstate[R1]->multirect_index = SDE_SSPP_RECT_1;
-	} else if (dst[R0].y >= dst[R1].y + dst[R1].h + buffer_lines) {
-		pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
-		pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
+	if ((dst[R1].y >= dst[R0].y + dst[R0].h + buffer_lines) ||
+		(dst[R0].y >= dst[R1].y + dst[R1].h + buffer_lines)) {
+		pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
+		pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
 	} else {
 		SDE_ERROR(
 			"No multirect mode possible for the planes (%d - %d)\n",
@@ -2433,9 +2560,15 @@
 		return -EINVAL;
 	}
 
-	pstate[R0]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
-	pstate[R1]->multirect_mode = SDE_SSPP_MULTIRECT_TIME_MX;
 done:
+	if (sde_plane[R0]->is_virtual) {
+		pstate[R0]->multirect_index = SDE_SSPP_RECT_1;
+		pstate[R1]->multirect_index = SDE_SSPP_RECT_0;
+	} else {
+		pstate[R0]->multirect_index = SDE_SSPP_RECT_0;
+		pstate[R1]->multirect_index = SDE_SSPP_RECT_1;
+	};
+
 	SDE_DEBUG_PLANE(sde_plane[R0], "R0: %d - %d\n",
 		pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
 	SDE_DEBUG_PLANE(sde_plane[R1], "R1: %d - %d\n",
@@ -2447,16 +2580,16 @@
  * sde_plane_get_ctl_flush - get control flush for the given plane
  * @plane: Pointer to drm plane structure
  * @ctl: Pointer to hardware control driver
- * @flush: Pointer to flush control word
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
  */
 void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
-		u32 *flush)
+		u32 *flush_sspp, u32 *flush_rot)
 {
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
-	u32 bitmask;
 
-	if (!plane || !flush) {
+	if (!plane || !flush_sspp) {
 		SDE_ERROR("invalid parameters\n");
 		return;
 	}
@@ -2464,13 +2597,15 @@
 	pstate = to_sde_plane_state(plane->state);
 	rstate = &pstate->rot;
 
-	bitmask = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+	*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
 
+	if (!flush_rot)
+		return;
+
+	*flush_rot = 0x0;
 	if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
 			ctl->ops.get_bitmask_rot)
-		ctl->ops.get_bitmask_rot(ctl, &bitmask, rstate->rot_hw->idx);
-
-	*flush = bitmask;
+		ctl->ops.get_bitmask_rot(ctl, flush_rot, rstate->rot_hw->idx);
 }
 
 static int sde_plane_prepare_fb(struct drm_plane *plane,
@@ -2479,6 +2614,7 @@
 	struct drm_framebuffer *fb = new_state->fb;
 	struct sde_plane *psde = to_sde_plane(plane);
 	struct sde_plane_rot_state *new_rstate;
+	struct sde_hw_fmt_layout layout;
 	int ret;
 
 	if (!new_state->fb)
@@ -2500,6 +2636,14 @@
 		return ret;
 	}
 
+	/* validate framebuffer layout before commit */
+	ret = sde_format_populate_layout(new_rstate->mmu_id,
+			new_rstate->out_fb, &layout);
+	if (ret) {
+		SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -2619,6 +2763,101 @@
 	}
 }
 
+static int _sde_plane_validate_scaler_v2(struct sde_plane *psde,
+		const struct sde_format *fmt,
+		uint32_t img_w, uint32_t img_h,
+		uint32_t src_w, uint32_t src_h,
+		uint32_t deci_w, uint32_t deci_h)
+{
+	int i;
+
+	if (!psde || !fmt) {
+		SDE_ERROR_PLANE(psde, "invalid arguments\n");
+		return -EINVAL;
+	}
+
+	/* don't run checks unless scaler data was changed */
+	if (psde->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2_CHECK ||
+			!psde->scaler3_cfg)
+		return 0;
+
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_INVALID;
+
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		uint32_t hor_req_pixels, hor_fetch_pixels;
+		uint32_t vert_req_pixels, vert_fetch_pixels;
+		uint32_t src_w_tmp, src_h_tmp;
+
+		/* re-use color plane 1's config for plane 2 */
+		if (i == 2)
+			continue;
+
+		src_w_tmp = src_w;
+		src_h_tmp = src_h;
+
+		/*
+		 * For chroma plane, width is half for the following sub sampled
+		 * formats. Except in case of decimation, where hardware avoids
+		 * 1 line of decimation instead of downsampling.
+		 */
+		if (i == 1) {
+			if (!deci_w &&
+					(fmt->chroma_sample == SDE_CHROMA_420 ||
+					 fmt->chroma_sample == SDE_CHROMA_H2V1))
+				src_w_tmp >>= 1;
+			if (!deci_h &&
+					(fmt->chroma_sample == SDE_CHROMA_420 ||
+					 fmt->chroma_sample == SDE_CHROMA_H1V2))
+				src_h_tmp >>= 1;
+		}
+
+		hor_req_pixels = psde->pixel_ext.roi_w[i];
+		vert_req_pixels = psde->pixel_ext.roi_h[i];
+
+		hor_fetch_pixels = DECIMATED_DIMENSION(src_w_tmp +
+				(int8_t)(psde->pixel_ext.left_ftch[i] & 0xFF) +
+				(int8_t)(psde->pixel_ext.right_ftch[i] & 0xFF),
+				deci_w);
+		vert_fetch_pixels = DECIMATED_DIMENSION(src_h_tmp +
+				(int8_t)(psde->pixel_ext.top_ftch[i] & 0xFF) +
+				(int8_t)(psde->pixel_ext.btm_ftch[i] & 0xFF),
+				deci_h);
+
+		if ((hor_req_pixels != hor_fetch_pixels) ||
+			(hor_fetch_pixels > img_w) ||
+			(vert_req_pixels != vert_fetch_pixels) ||
+			(vert_fetch_pixels > img_h)) {
+			SDE_ERROR_PLANE(psde,
+					"req %d/%d, fetch %d/%d, src %dx%d\n",
+					hor_req_pixels, vert_req_pixels,
+					hor_fetch_pixels, vert_fetch_pixels,
+					src_w, src_h);
+			return -EINVAL;
+		}
+
+		/*
+		 * Alpha plane can only be scaled using bilinear or pixel
+		 * repeat/drop, src_width and src_height are only specified
+		 * for Y and UV plane
+		 */
+		if (i != 3 &&
+			(hor_req_pixels != psde->scaler3_cfg->src_width[i] ||
+			vert_req_pixels != psde->scaler3_cfg->src_height[i])) {
+			SDE_ERROR_PLANE(psde,
+				"roi[%d] %d/%d, scaler src %dx%d, src %dx%d\n",
+				i, psde->pixel_ext.roi_w[i],
+				psde->pixel_ext.roi_h[i],
+				psde->scaler3_cfg->src_width[i],
+				psde->scaler3_cfg->src_height[i],
+				src_w, src_h);
+			return -EINVAL;
+		}
+	}
+
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2;
+	return 0;
+}
+
 static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
 		struct drm_plane_state *state)
 {
@@ -2754,10 +2993,15 @@
 			"too much scaling requested %ux%u->%ux%u\n",
 			src_deci_w, src_deci_h, dst.w, dst.h);
 		ret = -E2BIG;
+	} else if (_sde_plane_validate_scaler_v2(psde, fmt,
+				rstate->out_fb_width,
+				rstate->out_fb_height,
+				src.w, src.h, deci_w, deci_h)) {
+		ret = -EINVAL;
 	}
 
 	/* check excl rect configs */
-	if (pstate->excl_rect.w && pstate->excl_rect.h) {
+	if (!ret && pstate->excl_rect.w && pstate->excl_rect.h) {
 		struct sde_rect intersect;
 
 		/*
@@ -2931,6 +3175,9 @@
 		switch (idx) {
 		case PLANE_PROP_SCALER_V1:
 		case PLANE_PROP_SCALER_V2:
+		case PLANE_PROP_SCALER_LUT_ED:
+		case PLANE_PROP_SCALER_LUT_CIR:
+		case PLANE_PROP_SCALER_LUT_SEP:
 		case PLANE_PROP_H_DECIMATE:
 		case PLANE_PROP_V_DECIMATE:
 		case PLANE_PROP_SRC_CONFIG:
@@ -3049,7 +3296,8 @@
 					pstate->multirect_index);
 		}
 
-		if (psde->pipe_hw->ops.setup_pe)
+		if (psde->pipe_hw->ops.setup_pe &&
+				(pstate->multirect_index != SDE_SSPP_RECT_1))
 			psde->pipe_hw->ops.setup_pe(psde->pipe_hw,
 					&psde->pixel_ext);
 
@@ -3090,6 +3338,23 @@
 		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
 				pstate->multirect_index);
 
+		if (psde->pipe_hw->ops.setup_cdp) {
+			struct sde_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+			memset(cdp_cfg, 0, sizeof(struct sde_hw_pipe_cdp_cfg));
+
+			cdp_cfg->enable = psde->catalog->perf.cdp_cfg
+					[SDE_PERF_CDP_USAGE_RT].rd_enable;
+			cdp_cfg->ubwc_meta_enable =
+					SDE_FORMAT_IS_UBWC(fmt);
+			cdp_cfg->tile_amortize_enable =
+					SDE_FORMAT_IS_UBWC(fmt) ||
+					SDE_FORMAT_IS_TILE(fmt);
+			cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+			psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg);
+		}
+
 		if (psde->pipe_hw->ops.setup_sys_cache) {
 			if (rstate->out_sbuf) {
 				if (rstate->nplane < 2)
@@ -3407,7 +3672,8 @@
 	sde_kms_info_add_keyint(info, "max_per_pipe_bw",
 			psde->pipe_sblk->max_per_pipe_bw * 1000LL);
 	msm_property_set_blob(&psde->property_info, &psde->blob_info,
-			info->data, info->len, PLANE_PROP_INFO);
+			info->data, SDE_KMS_INFO_DATALEN(info),
+			PLANE_PROP_INFO);
 
 	kfree(info);
 	kfree(virt_format_list);
@@ -3477,7 +3743,7 @@
 		return;
 	}
 
-	psde->pixel_ext_usr = false;
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
 	if (!usr) {
 		SDE_DEBUG_PLANE(psde, "scale data removed\n");
 		return;
@@ -3514,8 +3780,9 @@
 		pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
 	}
 
-	psde->pixel_ext_usr = true;
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V1;
 
+	SDE_EVT32_VERBOSE(DRMID(&psde->base));
 	SDE_DEBUG_PLANE(psde, "user property data copied\n");
 }
 
@@ -3527,13 +3794,13 @@
 	int i;
 	struct sde_hw_scaler3_cfg *cfg;
 
-	if (!psde) {
+	if (!psde || !psde->scaler3_cfg) {
 		SDE_ERROR("invalid plane\n");
 		return;
 	}
 
 	cfg = psde->scaler3_cfg;
-	psde->pixel_ext_usr = false;
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
 	if (!usr) {
 		SDE_DEBUG_PLANE(psde, "scale data removed\n");
 		return;
@@ -3544,6 +3811,12 @@
 		return;
 	}
 
+	/* detach/ignore user data if 'disabled' */
+	if (!scale_v2.enable) {
+		SDE_DEBUG_PLANE(psde, "scale data removed\n");
+		return;
+	}
+
 	/* populate from user space */
 	pe = &(psde->pixel_ext);
 	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
@@ -3603,8 +3876,11 @@
 		pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
 		pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
 	}
-	psde->pixel_ext_usr = true;
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK;
 
+	SDE_EVT32_VERBOSE(DRMID(&psde->base), cfg->enable, cfg->de.enable,
+			cfg->src_width[0], cfg->src_height[0],
+			cfg->dst_width, cfg->dst_height);
 	SDE_DEBUG_PLANE(psde, "user property data copied\n");
 }
 
@@ -3996,7 +4272,7 @@
 		return -ENOMEM;
 
 	/* don't error check these */
-	debugfs_create_x32("features", 0644,
+	debugfs_create_x32("features", 0600,
 			psde->debugfs_root, &psde->features);
 
 	/* add register dump support */
@@ -4004,7 +4280,7 @@
 			sblk->src_blk.base + cfg->base,
 			sblk->src_blk.len,
 			kms);
-	sde_debugfs_create_regset32("src_blk", 0444,
+	sde_debugfs_create_regset32("src_blk", 0400,
 			psde->debugfs_root, &psde->debugfs_src);
 
 	if (cfg->features & BIT(SDE_SSPP_SCALER_QSEED3) ||
@@ -4013,11 +4289,11 @@
 				sblk->scaler_blk.base + cfg->base,
 				sblk->scaler_blk.len,
 				kms);
-		sde_debugfs_create_regset32("scaler_blk", 0444,
+		sde_debugfs_create_regset32("scaler_blk", 0400,
 				psde->debugfs_root,
 				&psde->debugfs_scaler);
 		debugfs_create_bool("default_scaling",
-				0644,
+				0600,
 				psde->debugfs_root,
 				&psde->debugfs_default_scale);
 	}
@@ -4028,36 +4304,36 @@
 				sblk->csc_blk.base + cfg->base,
 				sblk->csc_blk.len,
 				kms);
-		sde_debugfs_create_regset32("csc_blk", 0444,
+		sde_debugfs_create_regset32("csc_blk", 0400,
 				psde->debugfs_root, &psde->debugfs_csc);
 	}
 
 	debugfs_create_u32("xin_id",
-			0444,
+			0400,
 			psde->debugfs_root,
 			(u32 *) &cfg->xin_id);
 	debugfs_create_u32("clk_ctrl",
-			0444,
+			0400,
 			psde->debugfs_root,
 			(u32 *) &cfg->clk_ctrl);
 	debugfs_create_x32("creq_vblank",
-			0644,
+			0600,
 			psde->debugfs_root,
 			(u32 *) &sblk->creq_vblank);
 	debugfs_create_x32("danger_vblank",
-			0644,
+			0600,
 			psde->debugfs_root,
 			(u32 *) &sblk->danger_vblank);
 
 	debugfs_create_file("disable_danger",
-			0644,
+			0600,
 			psde->debugfs_root,
 			kms, &sde_plane_danger_enable);
 	debugfs_create_u32("sbuf_mode",
-			0644,
+			0600,
 			psde->debugfs_root, &psde->sbuf_mode);
 	debugfs_create_u32("sbuf_writeback",
-			0644,
+			0600,
 			psde->debugfs_root,
 			&psde->sbuf_writeback);
 
@@ -4174,6 +4450,7 @@
 	psde->pipe = pipe;
 	psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
 	psde->is_virtual = (master_plane_id != 0);
+	psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE;
 	INIT_LIST_HEAD(&psde->mplane_list);
 	master_plane = drm_plane_find(dev, master_plane_id);
 	if (master_plane) {
@@ -4274,7 +4551,8 @@
 
 	mutex_init(&psde->lock);
 
-	SDE_DEBUG("%s created for pipe %u\n", psde->pipe_name, pipe);
+	SDE_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", psde->pipe_name,
+					pipe, plane->base.id, master_plane_id);
 	return plane;
 
 clean_sspp:
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 47611d1..f83a891 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -110,6 +110,7 @@
  * @multirect_index: index of the rectangle of SSPP
  * @multirect_mode: parallel or time multiplex multirect mode
  * @pending:	whether the current update is still pending
+ * @cdp_cfg:	CDP configuration
  */
 struct sde_plane_state {
 	struct drm_plane_state base;
@@ -126,6 +127,8 @@
 	/* @sc_cfg: system_cache configuration */
 	struct sde_hw_pipe_sc_cfg sc_cfg;
 	struct sde_plane_rot_state rot;
+
+	struct sde_hw_pipe_cdp_cfg cdp_cfg;
 };
 
 /**
@@ -169,10 +172,11 @@
  * sde_plane_get_ctl_flush - get control flush mask
  * @plane:   Pointer to DRM plane object
  * @ctl: Pointer to control hardware
- * @flush: Pointer to updated flush mask
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
  */
 void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
-		u32 *flush);
+		u32 *flush_sspp, u32 *flush_rot);
 
 /**
  * sde_plane_is_sbuf_mode - return status of stream buffer mode
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 427a93b..0382ed0 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -106,10 +106,8 @@
 	struct sde_rm_rsvp *rsvp;
 	struct sde_rm_rsvp *rsvp_nxt;
 	enum sde_hw_blk_type type;
-	const char *type_name;
 	uint32_t id;
-	void *catalog;
-	void *hw;
+	struct sde_hw_blk *hw;
 };
 
 /**
@@ -143,12 +141,12 @@
 			if (!blk->rsvp && !blk->rsvp_nxt)
 				continue;
 
-			SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %s %d\n", stage,
+			SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
 				(blk->rsvp) ? blk->rsvp->seq : 0,
 				(blk->rsvp) ? blk->rsvp->enc_id : 0,
 				(blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
 				(blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
-				blk->type_name, blk->id);
+				blk->type, blk->id);
 
 			SDE_EVT32(stage,
 				(blk->rsvp) ? blk->rsvp->seq : 0,
@@ -175,7 +173,7 @@
 	iter->type = type;
 }
 
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
 {
 	struct list_head *blk_list;
 
@@ -205,9 +203,8 @@
 
 		if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
 			i->hw = i->blk->hw;
-			SDE_DEBUG("found type %d %s id %d for enc %d\n",
-					i->type, i->blk->type_name, i->blk->id,
-					i->enc_id);
+			SDE_DEBUG("found type %d id %d for enc %d\n",
+					i->type, i->blk->id, i->enc_id);
 			return true;
 		}
 	}
@@ -217,6 +214,17 @@
 	return false;
 }
 
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+	bool ret;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _sde_rm_get_hw_locked(rm, i);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
 static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
 {
 	switch (type) {
@@ -288,6 +296,8 @@
 	sde_hw_mdp_destroy(rm->hw_mdp);
 	rm->hw_mdp = NULL;
 
+	mutex_destroy(&rm->rm_lock);
+
 	return 0;
 }
 
@@ -301,7 +311,6 @@
 {
 	struct sde_rm_hw_blk *blk;
 	struct sde_hw_mdp *hw_mdp;
-	const char *name;
 	void *hw;
 
 	hw_mdp = rm->hw_mdp;
@@ -309,39 +318,30 @@
 	switch (type) {
 	case SDE_HW_BLK_LM:
 		hw = sde_hw_lm_init(id, mmio, cat);
-		name = "lm";
 		break;
 	case SDE_HW_BLK_DSPP:
 		hw = sde_hw_dspp_init(id, mmio, cat);
-		name = "dspp";
 		break;
 	case SDE_HW_BLK_CTL:
 		hw = sde_hw_ctl_init(id, mmio, cat);
-		name = "ctl";
 		break;
 	case SDE_HW_BLK_CDM:
 		hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
-		name = "cdm";
 		break;
 	case SDE_HW_BLK_PINGPONG:
 		hw = sde_hw_pingpong_init(id, mmio, cat);
-		name = "pp";
 		break;
 	case SDE_HW_BLK_INTF:
 		hw = sde_hw_intf_init(id, mmio, cat);
-		name = "intf";
 		break;
 	case SDE_HW_BLK_WB:
 		hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
-		name = "wb";
 		break;
 	case SDE_HW_BLK_DSC:
 		hw = sde_hw_dsc_init(id, mmio, cat);
-		name = "dsc";
 		break;
 	case SDE_HW_BLK_ROT:
 		hw = sde_hw_rot_init(id, mmio, cat);
-		name = "rot";
 		break;
 	case SDE_HW_BLK_SSPP:
 		/* SSPPs are not managed by the resource manager */
@@ -365,10 +365,8 @@
 		return -ENOMEM;
 	}
 
-	blk->type_name = name;
 	blk->type = type;
 	blk->id = id;
-	blk->catalog = hw_catalog_info;
 	blk->hw = hw;
 	list_add_tail(&blk->list, &rm->hw_blks[type]);
 
@@ -390,6 +388,9 @@
 
 	/* Clear, setup lists */
 	memset(rm, 0, sizeof(*rm));
+
+	mutex_init(&rm->rm_lock);
+
 	INIT_LIST_HEAD(&rm->rsvps);
 	for (type = 0; type < SDE_HW_BLK_MAX; type++)
 		INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -545,8 +546,8 @@
 		struct sde_rm_hw_blk **pp,
 		struct sde_rm_hw_blk *primary_lm)
 {
-	struct sde_lm_cfg *lm_cfg = (struct sde_lm_cfg *)lm->catalog;
-	struct sde_pingpong_cfg *pp_cfg;
+	const struct sde_lm_cfg *lm_cfg = to_sde_hw_mixer(lm->hw)->cap;
+	const struct sde_pingpong_cfg *pp_cfg;
 	struct sde_rm_hw_iter iter;
 
 	*dspp = NULL;
@@ -557,8 +558,8 @@
 
 	/* Check if this layer mixer is a peer of the proposed primary LM */
 	if (primary_lm) {
-		struct sde_lm_cfg *prim_lm_cfg =
-				(struct sde_lm_cfg *)primary_lm->catalog;
+		const struct sde_lm_cfg *prim_lm_cfg =
+				to_sde_hw_mixer(primary_lm->hw)->cap;
 
 		if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
 			SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
@@ -584,7 +585,7 @@
 
 	if (lm_cfg->dspp != DSPP_MAX) {
 		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
-		while (sde_rm_get_hw(rm, &iter)) {
+		while (_sde_rm_get_hw_locked(rm, &iter)) {
 			if (iter.blk->id == lm_cfg->dspp) {
 				*dspp = iter.blk;
 				break;
@@ -605,7 +606,7 @@
 	}
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (iter.blk->id == lm_cfg->pingpong) {
 			*pp = iter.blk;
 			break;
@@ -624,7 +625,7 @@
 		return false;
 	}
 
-	pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
+	pp_cfg = to_sde_hw_pingpong((*pp)->hw)->caps;
 	if ((reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
 			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
 		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
@@ -656,7 +657,7 @@
 	/* Find a primary mixer */
 	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
 	while (lm_count != reqs->topology->num_lm &&
-			sde_rm_get_hw(rm, &iter_i)) {
+			_sde_rm_get_hw_locked(rm, &iter_i)) {
 		memset(&lm, 0, sizeof(lm));
 		memset(&dspp, 0, sizeof(dspp));
 		memset(&pp, 0, sizeof(pp));
@@ -675,7 +676,7 @@
 		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
 
 		while (lm_count != reqs->topology->num_lm &&
-				sde_rm_get_hw(rm, &iter_j)) {
+				_sde_rm_get_hw_locked(rm, &iter_j)) {
 			if (iter_i.blk == iter_j.blk)
 				continue;
 
@@ -711,10 +712,10 @@
 		/* reserve a free PINGPONG_SLAVE block */
 		rc = -ENAVAIL;
 		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
-		while (sde_rm_get_hw(rm, &iter_i)) {
-			struct sde_pingpong_cfg *pp_cfg =
-				(struct sde_pingpong_cfg *)
-				(iter_i.blk->catalog);
+		while (_sde_rm_get_hw_locked(rm, &iter_i)) {
+			const struct sde_hw_pingpong *pp =
+					to_sde_hw_pingpong(iter_i.blk->hw);
+			const struct sde_pingpong_cfg *pp_cfg = pp->caps;
 
 			if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
 				continue;
@@ -742,18 +743,18 @@
 	memset(&ctls, 0, sizeof(ctls));
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
-	while (sde_rm_get_hw(rm, &iter)) {
-		unsigned long caps;
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
+		const struct sde_hw_ctl *ctl = to_sde_hw_ctl(iter.blk->hw);
+		unsigned long features = ctl->caps->features;
 		bool has_split_display, has_ppsplit;
 
 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
 			continue;
 
-		caps = ((struct sde_ctl_cfg *)iter.blk->catalog)->features;
-		has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & caps;
-		has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & caps;
+		has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & features;
+		has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & features;
 
-		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
+		SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
 
 		if (top->needs_split_display != has_split_display)
 			continue;
@@ -793,7 +794,7 @@
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
 
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
 			continue;
 
@@ -817,24 +818,23 @@
 		enum sde_hw_blk_type type)
 {
 	struct sde_rm_hw_iter iter;
-	struct sde_cdm_cfg *cdm;
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
+		const struct sde_hw_cdm *cdm = to_sde_hw_cdm(iter.blk->hw);
+		const struct sde_cdm_cfg *caps = cdm->caps;
 		bool match = false;
 
 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
 			continue;
 
-		cdm = (struct sde_cdm_cfg *)(iter.blk->catalog);
-
 		if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
-			match = test_bit(id, &cdm->intf_connect);
+			match = test_bit(id, &caps->intf_connect);
 		else if (type == SDE_HW_BLK_WB && id != WB_MAX)
-			match = test_bit(id, &cdm->wb_connect);
+			match = test_bit(id, &caps->wb_connect);
 
 		SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
-				type, id, cdm->intf_connect, cdm->wb_connect,
+				type, id, caps->intf_connect, caps->wb_connect,
 				match);
 
 		if (!match)
@@ -865,7 +865,7 @@
 
 	/* Find the block entry in the rm, and note the reservation */
 	sde_rm_init_hw_iter(&iter, 0, type);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (iter.blk->id != id)
 			continue;
 
@@ -1071,7 +1071,7 @@
  * @rm:	KMS handle
  * @rsvp:	RSVP pointer to release and release resources for
  */
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
 		struct drm_connector *conn)
@@ -1096,15 +1096,15 @@
 		list_for_each_entry(blk, &rm->hw_blks[type], list) {
 			if (blk->rsvp == rsvp) {
 				blk->rsvp = NULL;
-				SDE_DEBUG("rel rsvp %d enc %d %s %d\n",
+				SDE_DEBUG("rel rsvp %d enc %d %d %d\n",
 						rsvp->seq, rsvp->enc_id,
-						blk->type_name, blk->id);
+						blk->type, blk->id);
 			}
 			if (blk->rsvp_nxt == rsvp) {
 				blk->rsvp_nxt = NULL;
-				SDE_DEBUG("rel rsvp_nxt %d enc %d %s %d\n",
+				SDE_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
 						rsvp->seq, rsvp->enc_id,
-						blk->type_name, blk->id);
+						blk->type, blk->id);
 			}
 		}
 	}
@@ -1123,16 +1123,18 @@
 		return;
 	}
 
+	mutex_lock(&rm->rm_lock);
+
 	rsvp = _sde_rm_get_rsvp(rm, enc);
 	if (!rsvp) {
 		SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
-		return;
+		goto end;
 	}
 
 	conn = _sde_rm_get_connector(enc);
 	if (!conn) {
 		SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
-		return;
+		goto end;
 	}
 
 	top_ctrl = sde_connector_get_property(conn->state,
@@ -1152,6 +1154,9 @@
 				CONNECTOR_PROP_TOPOLOGY_NAME,
 				SDE_RM_TOPOLOGY_NONE);
 	}
+
+end:
+	mutex_unlock(&rm->rm_lock);
 }
 
 static int _sde_rm_commit_rsvp(
@@ -1219,13 +1224,15 @@
 			crtc_state->crtc->base.id, test_only);
 	SDE_EVT32(enc->base.id, conn_state->connector->base.id);
 
+	mutex_lock(&rm->rm_lock);
+
 	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
 
 	ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
 			conn_state, &reqs);
 	if (ret) {
 		SDE_ERROR("failed to populate hw requirements\n");
-		return ret;
+		goto end;
 	}
 
 	/*
@@ -1240,8 +1247,10 @@
 	 * replace the current with the next.
 	 */
 	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
-	if (!rsvp_nxt)
-		return -ENOMEM;
+	if (!rsvp_nxt) {
+		ret = -ENOMEM;
+		goto end;
+	}
 
 	rsvp_cur = _sde_rm_get_rsvp(rm, enc);
 
@@ -1293,5 +1302,8 @@
 
 	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
 
+end:
+	mutex_unlock(&rm->rm_lock);
+
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 059952a..b4a801a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -72,6 +72,7 @@
  * @hw_mdp: hardware object for mdp_top
  * @lm_max_width: cached layer mixer maximum width
  * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
  */
 struct sde_rm {
 	struct drm_device *dev;
@@ -80,6 +81,7 @@
 	struct sde_hw_mdp *hw_mdp;
 	uint32_t lm_max_width;
 	uint32_t rsvp_next_seq;
+	struct mutex rm_lock;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index f731a30..6962bef 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -159,25 +159,37 @@
 			__get_str(counter_name), __entry->value)
 )
 
+#define SDE_TRACE_EVTLOG_SIZE	15
 TRACE_EVENT(sde_evtlog,
-	TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
-	TP_ARGS(tag, tag_id, value1, value2),
+	TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 data[]),
+	TP_ARGS(tag, tag_id, cnt, data),
 	TP_STRUCT__entry(
 			__field(int, pid)
 			__string(evtlog_tag, tag)
 			__field(u32, tag_id)
-			__field(u64, value1)
-			__field(u64, value2)
+			__array(u32, data, SDE_TRACE_EVTLOG_SIZE)
 	),
 	TP_fast_assign(
 			__entry->pid = current->tgid;
 			__assign_str(evtlog_tag, tag);
 			__entry->tag_id = tag_id;
-			__entry->value1 = value1;
-			__entry->value2 = value2;
+			if (cnt > SDE_TRACE_EVTLOG_SIZE)
+				cnt = SDE_TRACE_EVTLOG_SIZE;
+			memcpy(__entry->data, data, cnt * sizeof(u32));
+			memset(&__entry->data[cnt], 0,
+				(SDE_TRACE_EVTLOG_SIZE - cnt) * sizeof(u32));
 	),
-	TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
-			__entry->tag_id, __entry->value1, __entry->value2)
+	TP_printk("%d|%s:%d|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u",
+			__entry->pid, __get_str(evtlog_tag),
+			__entry->tag_id,
+			__entry->data[0], __entry->data[1],
+			__entry->data[2], __entry->data[3],
+			__entry->data[4], __entry->data[5],
+			__entry->data[6], __entry->data[7],
+			__entry->data[8], __entry->data[9],
+			__entry->data[10], __entry->data[11],
+			__entry->data[12], __entry->data[13],
+			__entry->data[14])
 )
 
 TRACE_EVENT(sde_perf_crtc_update,
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index c675216..847572b 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -265,6 +265,26 @@
 		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
 }
 
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms)
+{
+	struct sde_hw_vbif *vbif;
+	int i, j;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		vbif = sde_kms->hw_vbif[i];
+		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+			for (j = 0; j < vbif->cap->memtype_count; j++)
+				vbif->ops.set_mem_type(
+						vbif, j, vbif->cap->memtype[j]);
+		}
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
 {
@@ -292,16 +312,16 @@
 		debugfs_vbif = debugfs_create_dir(vbif_name,
 				sde_kms->debugfs_vbif);
 
-		debugfs_create_u32("features", 0644, debugfs_vbif,
+		debugfs_create_u32("features", 0600, debugfs_vbif,
 			(u32 *)&vbif->features);
 
-		debugfs_create_u32("xin_halt_timeout", 0444, debugfs_vbif,
+		debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
 			(u32 *)&vbif->xin_halt_timeout);
 
-		debugfs_create_u32("default_rd_ot_limit", 0444, debugfs_vbif,
+		debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
 			(u32 *)&vbif->default_ot_rd_limit);
 
-		debugfs_create_u32("default_wr_ot_limit", 0444, debugfs_vbif,
+		debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
 			(u32 *)&vbif->default_ot_wr_limit);
 
 		for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
@@ -310,11 +330,11 @@
 
 			snprintf(vbif_name, sizeof(vbif_name),
 					"dynamic_ot_rd_%d_pps", j);
-			debugfs_create_u64(vbif_name, 0444, debugfs_vbif,
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
 					(u64 *)&cfg->pps);
 			snprintf(vbif_name, sizeof(vbif_name),
 					"dynamic_ot_rd_%d_ot_limit", j);
-			debugfs_create_u32(vbif_name, 0444, debugfs_vbif,
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
 					(u32 *)&cfg->ot_limit);
 		}
 
@@ -324,11 +344,11 @@
 
 			snprintf(vbif_name, sizeof(vbif_name),
 					"dynamic_ot_wr_%d_pps", j);
-			debugfs_create_u64(vbif_name, 0444, debugfs_vbif,
+			debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
 					(u64 *)&cfg->pps);
 			snprintf(vbif_name, sizeof(vbif_name),
 					"dynamic_ot_wr_%d_ot_limit", j);
-			debugfs_create_u32(vbif_name, 0444, debugfs_vbif,
+			debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
 					(u32 *)&cfg->ot_limit);
 		}
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index d05c2e0..f1da68b1 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -27,6 +27,13 @@
 	u32 clk_ctrl;
 };
 
+struct sde_vbif_set_memtype_params {
+	u32 xin_id;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+	bool is_cacheable;
+};
+
 /**
  * struct sde_vbif_set_qos_params - QoS remapper parameter
  * @vbif_idx: vbif identifier
@@ -59,6 +66,12 @@
 void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
 		struct sde_vbif_set_qos_params *params);
 
+/**
+ * sde_vbif_init_memtypes - initialize xin memory types for vbif
+ * @sde_kms:	SDE handler
+ */
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms);
+
 #ifdef CONFIG_DEBUG_FS
 int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index a4b918e..a420ffb 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -2723,6 +2723,9 @@
 	if (off > dbg->max_offset)
 		return -EINVAL;
 
+	if (off % sizeof(u32))
+		return -EINVAL;
+
 	if (cnt > (dbg->max_offset - off))
 		cnt = dbg->max_offset - off;
 
@@ -2754,6 +2757,9 @@
 	if (*ppos)
 		return 0;	/* the end */
 
+	if (dbg->off % sizeof(u32))
+		return -EFAULT;
+
 	len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
 	if (len < 0 || len >= sizeof(buf))
 		return 0;
@@ -2797,6 +2803,9 @@
 	if (cnt < 2)
 		return -EFAULT;
 
+	if (off % sizeof(u32))
+		return -EFAULT;
+
 	if (off >= dbg->max_offset)
 		return -EFAULT;
 
@@ -2841,6 +2850,9 @@
 		if (!dbg->buf)
 			return -ENOMEM;
 
+		if (dbg->off % sizeof(u32))
+			return -EFAULT;
+
 		ptr = dbg->base + dbg->off;
 		tot = 0;
 
@@ -2904,16 +2916,16 @@
 	if (!debugfs_root)
 		return -EINVAL;
 
-	debugfs_create_file("dump", 0644, debugfs_root, NULL,
+	debugfs_create_file("dump", 0600, debugfs_root, NULL,
 			&sde_evtlog_fops);
-	debugfs_create_u32("enable", 0644, debugfs_root,
+	debugfs_create_u32("enable", 0600, debugfs_root,
 			&(sde_dbg_base.evtlog->enable));
-	debugfs_create_file("filter", 0644, debugfs_root,
+	debugfs_create_file("filter", 0600, debugfs_root,
 			sde_dbg_base.evtlog,
 			&sde_evtlog_filter_fops);
-	debugfs_create_u32("panic", 0644, debugfs_root,
+	debugfs_create_u32("panic", 0600, debugfs_root,
 			&sde_dbg_base.panic_on_err);
-	debugfs_create_u32("reg_dump", 0644, debugfs_root,
+	debugfs_create_u32("reg_dump", 0600, debugfs_root,
 			&sde_dbg_base.enable_reg_dump);
 
 	if (dbg->dbgbus_sde.entries) {
@@ -2921,7 +2933,7 @@
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
 				dbg->dbgbus_sde.cmn.name);
 		dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
-		debugfs_create_u32(debug_name, 0644, debugfs_root,
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
 				&dbg->dbgbus_sde.cmn.enable_mask);
 	}
 
@@ -2930,19 +2942,19 @@
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
 				dbg->dbgbus_vbif_rt.cmn.name);
 		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
-		debugfs_create_u32(debug_name, 0644, debugfs_root,
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
 				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
 	}
 
 	list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
 		snprintf(debug_name, sizeof(debug_name), "%s_off",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
+		debugfs_create_file(debug_name, 0600, debugfs_root, blk_base,
 				&sde_off_fops);
 
 		snprintf(debug_name, sizeof(debug_name), "%s_reg",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
+		debugfs_create_file(debug_name, 0600, debugfs_root, blk_base,
 				&sde_reg_fops);
 	}
 
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 699396f..67c664f 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -99,8 +99,7 @@
 	evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY;
 	evtlog->last++;
 
-	trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
-			i > 1 ? log->data[1] : 0);
+	trace_sde_evtlog(name, line, log->data_cnt, log->data);
 exit:
 	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
 }
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index fb7f85c..452a3be 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -368,7 +368,6 @@
 		u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
 		u32 total_axi_port_cnt = pdbus->axi_port_cnt;
 		u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
-		int match_cnt = 0;
 
 		if (!bw_table || !total_axi_port_cnt ||
 		    total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
@@ -408,20 +407,6 @@
 			}
 		}
 
-		for (i = 0; i < total_axi_port_cnt; i++) {
-			vect = &bw_table->usecase
-				[pdbus->curr_bw_uc_idx].vectors[i];
-			/* avoid performing updates for small changes */
-			if ((ab_quota[i] == vect->ab) &&
-				(ib_quota[i] == vect->ib))
-				match_cnt++;
-		}
-
-		if (match_cnt == total_axi_port_cnt) {
-			pr_debug("skip BW vote\n");
-			return 0;
-		}
-
 		new_uc_idx = (pdbus->curr_bw_uc_idx %
 			(bw_table->num_usecases - 1)) + 1;
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 7bf2211..caa8cdf 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -31,16 +31,16 @@
 #include "sde_dbg.h"
 
 /* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
-#define TCS_CASE_EXECUTION_TIME				1064000
+#define SINGLE_TCS_EXECUTION_TIME				1064000
 
 /* this time is ~1ms - only wake tcs in any mode */
-#define RSC_BACKOFF_TIME_NS		 (TCS_CASE_EXECUTION_TIME + 100)
+#define RSC_BACKOFF_TIME_NS		 (SINGLE_TCS_EXECUTION_TIME + 100)
 
 /* this time is ~1ms - only wake TCS in mode-0 */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS	((TCS_CASE_EXECUTION_TIME >> 1) + 100)
+#define RSC_MODE_THRESHOLD_TIME_IN_NS	(SINGLE_TCS_EXECUTION_TIME + 100)
 
 /* this time is ~2ms - sleep+ wake TCS in mode-1 */
-#define RSC_TIME_SLOT_0_NS		((TCS_CASE_EXECUTION_TIME * 2) + 100)
+#define RSC_TIME_SLOT_0_NS		((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER		5
@@ -996,13 +996,13 @@
 		return;
 
 	/* don't error check these */
-	debugfs_create_file("status", 0444, rsc->debugfs_root, rsc,
+	debugfs_create_file("status", 0400, rsc->debugfs_root, rsc,
 							&debugfs_status_fops);
-	debugfs_create_file("mode_control", 0644, rsc->debugfs_root, rsc,
+	debugfs_create_file("mode_control", 0600, rsc->debugfs_root, rsc,
 							&mode_control_fops);
-	debugfs_create_file("vsync_mode", 0644, rsc->debugfs_root, rsc,
+	debugfs_create_file("vsync_mode", 0600, rsc->debugfs_root, rsc,
 							&vsync_status_fops);
-	debugfs_create_x32("debug_mode", 0644, rsc->debugfs_root,
+	debugfs_create_x32("debug_mode", 0600, rsc->debugfs_root,
 							&rsc->debug_mode);
 }
 
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 3332a05..e5ae0ad 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -206,7 +206,7 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
 						0xa7e9a920, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0x002079e7, rsc->debug_mode);
+						0x002089e7, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 82d3e28..7e4f24a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
 
 struct nvkm_alarm {
 	struct list_head head;
+	struct list_head exec;
 	u64 timestamp;
 	void (*func)(struct nvkm_alarm *);
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index f2a86ea..2437f7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -50,7 +50,8 @@
 		/* Move to completed list.  We'll drop the lock before
 		 * executing the callback so it can reschedule itself.
 		 */
-		list_move_tail(&alarm->head, &exec);
+		list_del_init(&alarm->head);
+		list_add(&alarm->exec, &exec);
 	}
 
 	/* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@
 	spin_unlock_irqrestore(&tmr->lock, flags);
 
 	/* Execute completed callbacks. */
-	list_for_each_entry_safe(alarm, atemp, &exec, head) {
-		list_del_init(&alarm->head);
+	list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+		list_del(&alarm->exec);
 		alarm->func(alarm);
 	}
 }
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7ba4508..ea36dc4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@
 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
 
+	/* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+	if (r600_dpm_get_vrefresh(rdev) > 120)
+		return true;
+
 	if (vblank_time < switch_limit)
 		return true;
 	else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index f6ff41a..edee6a5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7416,7 +7416,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
@@ -7446,7 +7446,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_RX_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0b6b576..6068b8a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4933,7 +4933,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
@@ -4964,7 +4964,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_RX_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a951881..f2eac6b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3995,7 +3995,7 @@
 			WREG32(DC_HPD5_INT_CONTROL, tmp);
 		}
 		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
 			WREG32(DC_HPD6_INT_CONTROL, tmp);
 		}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e0c143b..30bd4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	48
+#define KMS_DRIVER_MINOR	49
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511..3168567 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@
 
 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
-	args->vram_size = rdev->mc.real_vram_size;
-	args->vram_visible = (u64)man->size << PAGE_SHIFT;
+	args->vram_size = (u64)man->size << PAGE_SHIFT;
+	args->vram_visible = rdev->mc.visible_vram_size;
 	args->vram_visible -= rdev->vram_pin_size;
 	args->gart_size = rdev->mc.gtt_size;
 	args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 877af4a..3333e8a 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6330,7 +6330,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
@@ -6361,7 +6361,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_RX_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 35cc16f..c18fc31 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1602,7 +1602,14 @@
 int ttm_bo_wait(struct ttm_buffer_object *bo,
 		bool interruptible, bool no_wait)
 {
-	long timeout = no_wait ? 0 : 15 * HZ;
+	long timeout = 15 * HZ;
+
+	if (no_wait) {
+		if (reservation_object_test_signaled_rcu(bo->resv, true))
+			return 0;
+		else
+			return -EBUSY;
+	}
 
 	timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
 						      interruptible, timeout);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806..a1c68e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@
 				return fifo_state->static_buffer;
 			else {
 				fifo_state->dynamic_buffer = vmalloc(bytes);
+				if (!fifo_state->dynamic_buffer)
+					goto out_err;
 				return fifo_state->dynamic_buffer;
 			}
 		}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 05fa092..56b8033 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1275,11 +1275,14 @@
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 	int ret;
 	uint32_t size;
-	uint32_t backup_handle;
+	uint32_t backup_handle = 0;
 
 	if (req->multisample_count != 0)
 		return -EINVAL;
 
+	if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+		return -EINVAL;
+
 	if (unlikely(vmw_user_surface_size == 0))
 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
 			128;
@@ -1315,12 +1318,16 @@
 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
 					     &res->backup,
 					     &user_srf->backup_base);
-		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
-		    res->backup_size) {
-			DRM_ERROR("Surface backup buffer is too small.\n");
-			vmw_dmabuf_unreference(&res->backup);
-			ret = -EINVAL;
-			goto out_unlock;
+		if (ret == 0) {
+			if (res->backup->base.num_pages * PAGE_SIZE <
+			    res->backup_size) {
+				DRM_ERROR("Surface backup buffer is too small.\n");
+				vmw_dmabuf_unreference(&res->backup);
+				ret = -EINVAL;
+				goto out_unlock;
+			} else {
+				backup_handle = req->buffer_handle;
+			}
 		}
 	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index dbacb20..58ef5ee 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -798,8 +798,19 @@
 #define A6XX_GMU_CM3_CFG			0x1F82D
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE	0x1F840
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0	0x1F841
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1	0x1F842
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L	0x1F844
 #define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H	0x1F845
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L	0x1F846
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H	0x1F847
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L	0x1F848
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H	0x1F849
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L	0x1F84A
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H	0x1F84B
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L	0x1F84C
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H	0x1F84D
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L	0x1F84E
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H	0x1F84F
 #define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL	0x1F8C0
 #define A6XX_GMU_PWR_COL_INTER_FRAME_HYST	0x1F8C1
 #define A6XX_GMU_PWR_COL_SPTPRAC_HYST		0x1F8C2
@@ -852,6 +863,7 @@
 #define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL      0x23B0A
 #define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL       0x23B0B
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2	0x23B0D
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK	0x23B0E
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 9a44f34..3c47762 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -326,7 +326,8 @@
 		.major = 3,
 		.minor = 0,
 		.patchid = ANY_ID,
-		.features = ADRENO_64BIT | ADRENO_RPMH,
+		.features = ADRENO_64BIT | ADRENO_RPMH |
+			ADRENO_CONTENT_PROTECTION,
 		.sqefw_name = "a630_sqe.fw",
 		.zap_name = "a630_zap",
 		.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index f581cff..3672273 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2855,8 +2855,14 @@
 			gpu_busy += adj;
 		}
 
-		stats->busy_time = adreno_ticks_to_us(gpu_busy,
-			kgsl_pwrctrl_active_freq(pwr));
+		if (kgsl_gmu_isenabled(device)) {
+			/* clock sourced from XO */
+			stats->busy_time = gpu_busy * 10 / 192;
+		} else {
+			/* clock sourced from GFX3D */
+			stats->busy_time = adreno_ticks_to_us(gpu_busy,
+				kgsl_pwrctrl_active_freq(pwr));
+		}
 	}
 
 	if (device->pwrctrl.bus_control) {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 26c5505..7a6581c 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -641,6 +641,7 @@
 	ADRENO_REG_GMU_HOST2GMU_INTR_SET,
 	ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
 	ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
+	ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
 	ADRENO_REG_REGISTER_MAX,
 };
 
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 314ac85a..13c36e6 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -3017,6 +3017,8 @@
 				A5XX_VBIF_XIN_HALT_CTRL1),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION,
 				A5XX_VBIF_VERSION),
+	ADRENO_REG_DEFINE(ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
+				A5XX_GPMU_POWER_COUNTER_ENABLE),
 };
 
 static const struct adreno_reg_offsets a5xx_reg_offsets = {
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 314b2d8..33854ea 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -1202,7 +1202,7 @@
 			OOB_BOOT_SLUMBER_CLEAR_MASK);
 
 	if (ret)
-		dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
+		dev_err(&gmu->pdev->dev, "Boot OOB timed out\n");
 
 	return ret;
 }
@@ -1222,6 +1222,9 @@
 	int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
 	int ret, state;
 
+	/* Disable the power counter so that the GMU is not busy */
+	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
 	if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
 		return ret;
@@ -1238,7 +1241,7 @@
 	a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
 
 	if (ret)
-		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
+		dev_err(&gmu->pdev->dev, "Notify slumber OOB timed out\n");
 	else {
 		kgsl_gmu_regread(device,
 			A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
@@ -1286,6 +1289,9 @@
 	/* Turn on the HM and SPTP head switches */
 	ret = a6xx_hm_sptprac_enable(device);
 
+	/* Enable the power counter because it was disabled before slumber */
+	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
 	return ret;
 error_rsc:
 	dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
@@ -1466,7 +1472,7 @@
 		OOB_DCVS_CLEAR_MASK);
 
 	if (ret) {
-		dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
+		dev_err(&gmu->pdev->dev, "DCVS OOB timed out\n");
 		goto done;
 	}
 
@@ -1495,10 +1501,17 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
+	unsigned int status, status2;
 
 	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
 			0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
-		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
+		kgsl_gmu_regread(device,
+				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &status);
+		kgsl_gmu_regread(device,
+				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
+		dev_err(&gmu->pdev->dev,
+				"GMU not idling: status=0x%x, status2=0x%x\n",
+				status, status2);
 		return -ETIMEDOUT;
 	}
 
@@ -1566,6 +1579,8 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	unsigned int reg;
+	unsigned long time;
+	bool vbif_acked = false;
 
 	/*
 	 * For the soft reset case with GMU enabled this part is done
@@ -1584,12 +1599,19 @@
 	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
 	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
 
-	/* Check VBIF status after reset */
-	if (timed_poll_check(device,
-			A6XX_RBBM_VBIF_GX_RESET_STATUS,
-			VBIF_RESET_ACK_MASK,
-			VBIF_RESET_ACK_TIMEOUT,
-			VBIF_RESET_ACK_MASK))
+	/* Wait for the VBIF reset ack to complete */
+	time = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
+
+	do {
+		kgsl_regread(device, A6XX_RBBM_VBIF_GX_RESET_STATUS, &reg);
+		if ((reg & VBIF_RESET_ACK_MASK) == VBIF_RESET_ACK_MASK) {
+			vbif_acked = true;
+			break;
+		}
+		cpu_relax();
+	} while (!time_after(jiffies, time));
+
+	if (!vbif_acked)
 		return -ETIMEDOUT;
 
 	a6xx_sptprac_enable(adreno_dev);
@@ -2431,12 +2453,47 @@
 		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
 };
 
+static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
+	/*
+	 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
+	 * busy count (see the PWR group above). Mark it as broken
+	 * so it's not re-used.
+	 */
+	{ KGSL_PERFCOUNTER_BROKEN, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
+};
+
 #define A6XX_PERFCOUNTER_GROUP(offset, name) \
 	ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
 
 #define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
 	ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
 
+#define A6XX_POWER_COUNTER_GROUP(offset, name) \
+	ADRENO_POWER_COUNTER_GROUP(a6xx, offset, name)
+
 static struct adreno_perfcount_group a6xx_perfcounter_groups
 				[KGSL_PERFCOUNTER_GROUP_MAX] = {
 	A6XX_PERFCOUNTER_GROUP(CP, cp),
@@ -2462,6 +2519,7 @@
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
 };
 
 static struct adreno_perfcounters a6xx_perfcounters = {
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index bca3dd0..54acd73 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -206,8 +206,38 @@
 };
 
 static const unsigned int a6xx_gmu_registers[] = {
-	/* GMU */
+	/* GMU GX */
+	0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
+	0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
+	0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
+	0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
+	0x1A900, 0x1A92B, 0x1A940, 0x1A940,
+	/* GMU TCM */
 	0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
+	/* GMU CX */
+	0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
+	0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
+	0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
+	0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
+	0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
+	0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
+	0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
+	0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
+	0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
+	/* GPU RSCC */
+	0x23740, 0x23742, 0x23744, 0x23747, 0x2374C, 0x23787, 0x237EC, 0x237EF,
+	0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, 0x2393C, 0x2393F,
+	0x23944, 0x2397F,
+	/* GMU AO */
+	0x23B00, 0x23B16, 0x23C00, 0x23C00,
+	/* GPU CC */
+	0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
+	0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
+	0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
+	0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
+	0x26000, 0x26002,
+	/* GPU CC ACD */
+	0x26400, 0x26416, 0x26420, 0x26427,
 };
 
 static const struct adreno_vbif_snapshot_registers
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index cd95003..0da4da9 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -654,7 +654,7 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
-	unsigned int shift = counter << 3;
+	unsigned int shift = (counter << 3) % (sizeof(unsigned int) * 8);
 
 	if (adreno_is_a530(adreno_dev)) {
 		if (countable > 43)
@@ -662,13 +662,16 @@
 	} else if (adreno_is_a540(adreno_dev)) {
 		if (countable > 47)
 			return;
+	} else if (adreno_is_a6xx(adreno_dev)) {
+		if (countable > 34)
+			return;
 	} else
 		/* return on platforms that have no GPMU */
 		return;
 
 	reg = &counters->groups[group].regs[counter];
 	kgsl_regrmw(device, reg->select, 0xff << shift, countable << shift);
-	kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
+	adreno_writereg(adreno_dev, ADRENO_REG_GPMU_POWER_COUNTER_ENABLE, 1);
 	reg->value = 0;
 }
 
@@ -684,7 +687,7 @@
 
 	reg = &counters->groups[group].regs[counter];
 	kgsl_regwrite(device, reg->select, countable);
-	kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
+	adreno_writereg(adreno_dev, ADRENO_REG_GPMU_POWER_COUNTER_ENABLE, 1);
 	reg->value = 0;
 }
 
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 6a39792..6bd212d 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -43,7 +43,6 @@
 #include "kgsl_sync.h"
 #include "kgsl_compat.h"
 #include "kgsl_pool.h"
-#include "adreno.h"
 
 #undef MODULE_PARAM_PREFIX
 #define MODULE_PARAM_PREFIX "kgsl."
@@ -1054,10 +1053,7 @@
 	int result = 0;
 
 	mutex_lock(&device->mutex);
-
-	if (!adreno_is_a6xx(ADRENO_DEVICE(device)))
-		device->open_count--;
-
+	device->open_count--;
 	if (device->open_count == 0) {
 
 		/* Wait for the active count to go to 0 */
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index f87e4da..2a6e7dd 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1238,19 +1238,38 @@
 	return ret;
 }
 
+#define CX_GDSC_TIMEOUT	10	/* ms */
 static int gmu_disable_gdsc(struct gmu_device *gmu)
 {
 	int ret;
+	unsigned long t;
 
 	if (IS_ERR_OR_NULL(gmu->cx_gdsc))
 		return 0;
 
 	ret = regulator_disable(gmu->cx_gdsc);
-	if (ret)
+	if (ret) {
 		dev_err(&gmu->pdev->dev,
 			"Failed to disable GMU CX gdsc, error %d\n", ret);
+		return ret;
+	}
 
-	return ret;
+	/*
+	 * After GX GDSC is off, CX GDSC must be off
+	 * Voting off alone from GPU driver cannot
+	 * Guarantee CX GDSC off. Polling with 10ms
+	 * timeout to ensure
+	 */
+	t = jiffies + msecs_to_jiffies(CX_GDSC_TIMEOUT);
+	do {
+		if (!regulator_is_enabled(gmu->cx_gdsc))
+			return 0;
+		udelay(100);
+
+	} while (!(time_after(jiffies, t)));
+
+	dev_err(&gmu->pdev->dev, "GMU CX gdsc off timeout");
+	return -ETIMEDOUT;
 }
 
 static int gmu_fast_boot(struct kgsl_device *device)
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 938c96d..b3d02e6 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -22,7 +22,6 @@
 #include <linux/of_platform.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
-#include <stddef.h>
 #include <linux/compat.h>
 
 #include "kgsl.h"
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 4dd7b8e..5c53a05c 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2740,7 +2740,8 @@
 	int ret = 0;
 
 	if ((device->state == KGSL_STATE_NONE) ||
-			(device->state == KGSL_STATE_INIT))
+			(device->state == KGSL_STATE_INIT) ||
+			(device->state == KGSL_STATE_SUSPEND))
 		return ret;
 
 	/* drain to prevent from more commands being submitted */
@@ -2807,6 +2808,7 @@
 		break;
 	case KGSL_STATE_SUSPEND:
 		status = _suspend(device);
+		break;
 	case KGSL_STATE_RESET:
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET);
 		break;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 73e6c53..eb67657 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -560,16 +560,74 @@
 }
 #endif
 
+static inline void _cache_op(unsigned int op,
+			const void *start, const void *end)
+{
+	/*
+	 * The dmac_xxx_range functions handle addresses and sizes that
+	 * are not aligned to the cacheline size correctly.
+	 */
+	switch (_fixup_cache_range_op(op)) {
+	case KGSL_CACHE_OP_FLUSH:
+		dmac_flush_range(start, end);
+		break;
+	case KGSL_CACHE_OP_CLEAN:
+		dmac_clean_range(start, end);
+		break;
+	case KGSL_CACHE_OP_INV:
+		dmac_inv_range(start, end);
+		break;
+	}
+}
+
+static int kgsl_do_cache_op(struct page *page, void *addr,
+		uint64_t offset, uint64_t size, unsigned int op)
+{
+	if (page != NULL) {
+		unsigned long pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+		/*
+		 *  page_address() returns the kernel virtual address of page.
+		 *  For high memory kernel virtual address exists only if page
+		 *  has been mapped. So use a version of kmap rather than
+		 *  page_address() for high memory.
+		 */
+		if (PageHighMem(page)) {
+			offset &= ~PAGE_MASK;
+
+			do {
+				unsigned int len = size;
+
+				if (len + offset > PAGE_SIZE)
+					len = PAGE_SIZE - offset;
+
+				page = pfn_to_page(pfn++);
+				addr = kmap_atomic(page);
+				_cache_op(op, addr + offset,
+						addr + offset + len);
+				kunmap_atomic(addr);
+
+				size -= len;
+				offset = 0;
+			} while (size);
+
+			return 0;
+		}
+
+		addr = page_address(page);
+	}
+
+	_cache_op(op, addr + offset, addr + offset + (size_t) size);
+	return 0;
+}
+
 int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
 		uint64_t size, unsigned int op)
 {
-	/*
-	 * If the buffer is mapped in the kernel operate on that address
-	 * otherwise use the user address
-	 */
-
-	void *addr = (memdesc->hostptr) ?
-		memdesc->hostptr : (void *) memdesc->useraddr;
+	void *addr = NULL;
+	struct sg_table *sgt = NULL;
+	struct scatterlist *sg;
+	unsigned int i, pos = 0;
+	int ret = 0;
 
 	if (size == 0 || size > UINT_MAX)
 		return -EINVAL;
@@ -578,38 +636,57 @@
 	if ((offset + size < offset) || (offset + size < size))
 		return -ERANGE;
 
-	/* Make sure the offset + size do not overflow the address */
-	if (addr + ((size_t) offset + (size_t) size) < addr)
-		return -ERANGE;
-
 	/* Check that offset+length does not exceed memdesc->size */
 	if (offset + size > memdesc->size)
 		return -ERANGE;
 
-	/* Return quietly if the buffer isn't mapped on the CPU */
-	if (addr == NULL)
-		return 0;
+	if (memdesc->hostptr) {
+		addr = memdesc->hostptr;
+		/* Make sure the offset + size do not overflow the address */
+		if (addr + ((size_t) offset + (size_t) size) < addr)
+			return -ERANGE;
 
-	addr = addr + offset;
-
-	/*
-	 * The dmac_xxx_range functions handle addresses and sizes that
-	 * are not aligned to the cacheline size correctly.
-	 */
-
-	switch (_fixup_cache_range_op(op)) {
-	case KGSL_CACHE_OP_FLUSH:
-		dmac_flush_range(addr, addr + (size_t) size);
-		break;
-	case KGSL_CACHE_OP_CLEAN:
-		dmac_clean_range(addr, addr + (size_t) size);
-		break;
-	case KGSL_CACHE_OP_INV:
-		dmac_inv_range(addr, addr + (size_t) size);
-		break;
+		ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
+		return ret;
 	}
 
-	return 0;
+	/*
+	 * If the buffer is not to mapped to kernel, perform cache
+	 * operations after mapping to kernel.
+	 */
+	if (memdesc->sgt != NULL)
+		sgt = memdesc->sgt;
+	else {
+		if (memdesc->pages == NULL)
+			return ret;
+
+		sgt = kgsl_alloc_sgt_from_pages(memdesc);
+		if (IS_ERR(sgt))
+			return PTR_ERR(sgt);
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		uint64_t sg_offset, sg_left;
+
+		if (offset >= (pos + sg->length)) {
+			pos += sg->length;
+			continue;
+		}
+		sg_offset = offset > pos ? offset - pos : 0;
+		sg_left = (sg->length - sg_offset > size) ? size :
+					sg->length - sg_offset;
+		ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
+							sg_left, op);
+		size -= sg_left;
+		if (size == 0)
+			break;
+		pos += sg->length;
+	}
+
+	if (memdesc->sgt == NULL)
+		kgsl_free_sgt(sgt);
+
+	return ret;
 }
 EXPORT_SYMBOL(kgsl_cache_range_op);
 
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 7f8ff39..e46f656 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,6 +28,8 @@
 #define UHID_NAME	"uhid"
 #define UHID_BUFSIZE	32
 
+static DEFINE_MUTEX(uhid_open_mutex);
+
 struct uhid_device {
 	struct mutex devlock;
 	bool running;
@@ -142,15 +144,26 @@
 static int uhid_hid_open(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
+	int retval = 0;
 
-	return uhid_queue_event(uhid, UHID_OPEN);
+	mutex_lock(&uhid_open_mutex);
+	if (!hid->open++) {
+		retval = uhid_queue_event(uhid, UHID_OPEN);
+		if (retval)
+			hid->open--;
+	}
+	mutex_unlock(&uhid_open_mutex);
+	return retval;
 }
 
 static void uhid_hid_close(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
 
-	uhid_queue_event(uhid, UHID_CLOSE);
+	mutex_lock(&uhid_open_mutex);
+	if (!--hid->open)
+		uhid_queue_event(uhid, UHID_CLOSE);
+	mutex_unlock(&uhid_open_mutex);
 }
 
 static int uhid_hid_parse(struct hid_device *hid)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0e07a76..c6a922e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1400,37 +1400,38 @@
 {
 	unsigned char *data = wacom->data;
 
-	if (wacom->pen_input)
+	if (wacom->pen_input) {
 		dev_dbg(wacom->pen_input->dev.parent,
 			"%s: received report #%d\n", __func__, data[0]);
-	else if (wacom->touch_input)
+
+		if (len == WACOM_PKGLEN_PENABLED ||
+		    data[0] == WACOM_REPORT_PENABLED)
+			return wacom_tpc_pen(wacom);
+	}
+	else if (wacom->touch_input) {
 		dev_dbg(wacom->touch_input->dev.parent,
 			"%s: received report #%d\n", __func__, data[0]);
 
-	switch (len) {
-	case WACOM_PKGLEN_TPC1FG:
-		return wacom_tpc_single_touch(wacom, len);
-
-	case WACOM_PKGLEN_TPC2FG:
-		return wacom_tpc_mt_touch(wacom);
-
-	case WACOM_PKGLEN_PENABLED:
-		return wacom_tpc_pen(wacom);
-
-	default:
-		switch (data[0]) {
-		case WACOM_REPORT_TPC1FG:
-		case WACOM_REPORT_TPCHID:
-		case WACOM_REPORT_TPCST:
-		case WACOM_REPORT_TPC1FGE:
+		switch (len) {
+		case WACOM_PKGLEN_TPC1FG:
 			return wacom_tpc_single_touch(wacom, len);
 
-		case WACOM_REPORT_TPCMT:
-		case WACOM_REPORT_TPCMT2:
-			return wacom_mt_touch(wacom);
+		case WACOM_PKGLEN_TPC2FG:
+			return wacom_tpc_mt_touch(wacom);
 
-		case WACOM_REPORT_PENABLED:
-			return wacom_tpc_pen(wacom);
+		default:
+			switch (data[0]) {
+			case WACOM_REPORT_TPC1FG:
+			case WACOM_REPORT_TPCHID:
+			case WACOM_REPORT_TPCST:
+			case WACOM_REPORT_TPC1FGE:
+				return wacom_tpc_single_touch(wacom, len);
+
+			case WACOM_REPORT_TPCMT:
+			case WACOM_REPORT_TPCMT2:
+				return wacom_mt_touch(wacom);
+
+			}
 		}
 	}
 
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index 8a57ed2..d26e0d0 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -64,7 +64,7 @@
 #define ITCHIN			(0xEF4)
 #define ITTRIGIN		(0xEF8)
 
-#define CTI_MAX_TRIGGERS	(8)
+#define CTI_MAX_TRIGGERS	(32)
 #define CTI_MAX_CHANNELS	(4)
 #define AFFINITY_LEVEL_L2	1
 
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 9bdde0b..966a988 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -940,11 +940,12 @@
 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
 {
 	unsigned long flags;
+	void *vaddr = NULL;
 
 	/* config types are set a boot time and never change */
 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
 		return -EINVAL;
-
+	mutex_lock(&drvdata->mem_lock);
 	spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	/* RE-enable the TMC if need be */
@@ -957,12 +958,16 @@
 		 */
 		tmc_etr_enable_hw(drvdata);
 	} else {
-		tmc_etr_free_mem(drvdata);
+		vaddr = drvdata->vaddr;
 		drvdata->buf = NULL;
 	}
 
 	drvdata->reading = false;
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
+	if (vaddr)
+		tmc_etr_free_mem(drvdata);
+
+	mutex_unlock(&drvdata->mem_lock);
 	return 0;
 }
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 989af91..051ab8e 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -25,10 +25,12 @@
 #include <linux/pm_runtime.h>
 #include <linux/dma-mapping.h>
 #include <linux/qcom-geni-se.h>
+#include <linux/ipc_logging.h>
 
 #define SE_I2C_TX_TRANS_LEN		(0x26C)
 #define SE_I2C_RX_TRANS_LEN		(0x270)
 #define SE_I2C_SCL_COUNTERS		(0x278)
+#define SE_GENI_IOS			(0x908)
 
 #define SE_I2C_ERR  (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
 			M_GP_IRQ_1_EN | M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
@@ -53,6 +55,21 @@
 #define SLV_ADDR_SHFT		(9)
 
 #define I2C_CORE2X_VOTE		(10000)
+#define GP_IRQ0			0
+#define GP_IRQ1			1
+#define GP_IRQ2			2
+#define GP_IRQ3			3
+#define GP_IRQ4			4
+#define GP_IRQ5			5
+#define GENI_OVERRUN		6
+#define GENI_ILLEGAL_CMD	7
+#define GENI_ABORT_DONE		8
+#define GENI_TIMEOUT		9
+
+#define I2C_NACK		GP_IRQ1
+#define I2C_BUS_PROTO		GP_IRQ3
+#define I2C_ARB_LOST		GP_IRQ4
+#define DM_I2C_RX_ERR		((GP_IRQ1 | GP_IRQ3 | GP_IRQ4) >> 4)
 
 struct geni_i2c_dev {
 	struct device *dev;
@@ -67,6 +84,29 @@
 	int cur_wr;
 	int cur_rd;
 	struct device *wrapper_dev;
+	void *ipcl;
+};
+
+struct geni_i2c_err_log {
+	int err;
+	const char *msg;
+};
+
+static struct geni_i2c_err_log gi2c_log[] = {
+	[GP_IRQ0] = {-EINVAL, "Unknown I2C err GP_IRQ0"},
+	[I2C_NACK] = {-ENOTCONN,
+			"NACK: slv unresponsive, check its power/reset-ln"},
+	[GP_IRQ2] = {-EINVAL, "Unknown I2C err GP IRQ2"},
+	[I2C_BUS_PROTO] = {-EPROTO,
+				"Bus proto err, noisy/unepxected start/stop"},
+	[I2C_ARB_LOST] = {-EBUSY,
+				"Bus arbitration lost, clock line undriveable"},
+	[GP_IRQ5] = {-EINVAL, "Unknown I2C err GP IRQ5"},
+	[GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"},
+	[GENI_ILLEGAL_CMD] = {-EILSEQ,
+				"Illegal cmd, check GENI cmd-state machine"},
+	[GENI_ABORT_DONE] = {-ETIMEDOUT, "Abort after timeout successful"},
+	[GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
 };
 
 static inline void qcom_geni_i2c_conf(void __iomem *base, int dfs, int div)
@@ -81,25 +121,67 @@
 	mb();
 }
 
+static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
+{
+	u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
+	u32 rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
+	u32 tx_st = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
+	u32 m_cmd = readl_relaxed(gi2c->base + SE_GENI_M_CMD0);
+	u32 geni_s = readl_relaxed(gi2c->base + SE_GENI_STATUS);
+	u32 geni_ios = readl_relaxed(gi2c->base + SE_GENI_IOS);
+
+	if (err == I2C_NACK || err == GENI_ABORT_DONE) {
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n",
+			    gi2c_log[err].msg);
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+			     "m_stat:0x%x, tx_stat:0x%x, rx_stat:0x%x, ",
+			     m_stat, tx_st, rx_st);
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+			     "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
+			     m_cmd, geni_s, geni_ios);
+	} else {
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev, "%s\n",
+			     gi2c_log[err].msg);
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+			     "m_stat:0x%x, tx_stat:0x%x, rx_stat:0x%x, ",
+			     m_stat, tx_st, rx_st);
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+			     "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
+			     m_cmd, geni_s, geni_ios);
+	}
+	gi2c->err = gi2c_log[err].err;
+}
+
 static irqreturn_t geni_i2c_irq(int irq, void *dev)
 {
 	struct geni_i2c_dev *gi2c = dev;
 	int i, j;
 	u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
-	u32 tx_stat = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
-	u32 rx_stat = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
+	u32 rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
 	u32 dm_tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
 	u32 dm_rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
 	u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
 	struct i2c_msg *cur = gi2c->cur;
 
-	dev_dbg(gi2c->dev,
-		"got i2c irq:%d, stat:0x%x, tx stat:0x%x, rx stat:0x%x\n",
-		irq, m_stat, tx_stat, rx_stat);
-	if (!cur || (m_stat & SE_I2C_ERR) || (dm_tx_st & TX_SBE) ||
-		    (dm_rx_st & RX_SBE)) {
-		dev_err(gi2c->dev, "i2c err:st:0x%x, dm_t: 0x%x, dm_r: 0x%x\n",
-				   m_stat, dm_tx_st, dm_tx_st);
+	if (!cur || (m_stat & M_CMD_FAILURE_EN) ||
+		    (dm_rx_st & (DM_I2C_RX_ERR)) ||
+		    (m_stat & M_CMD_ABORT_EN)) {
+
+		if (m_stat & M_GP_IRQ_1_EN)
+			geni_i2c_err(gi2c, I2C_NACK);
+		if (m_stat & M_GP_IRQ_3_EN)
+			geni_i2c_err(gi2c, I2C_BUS_PROTO);
+		if (m_stat & M_GP_IRQ_4_EN)
+			geni_i2c_err(gi2c, I2C_ARB_LOST);
+		if (m_stat & M_CMD_OVERRUN_EN)
+			geni_i2c_err(gi2c, GENI_OVERRUN);
+		if (m_stat & M_ILLEGAL_CMD_EN)
+			geni_i2c_err(gi2c, GENI_ILLEGAL_CMD);
+		if (m_stat & M_CMD_ABORT_EN)
+			geni_i2c_err(gi2c, GENI_ABORT_DONE);
+		if (m_stat & M_GP_IRQ_0_EN)
+			geni_i2c_err(gi2c, GP_IRQ0);
+
 		if (!dma)
 			writel_relaxed(0, (gi2c->base +
 					   SE_GENI_TX_WATERMARK_REG));
@@ -115,7 +197,7 @@
 
 	if (((m_stat & M_RX_FIFO_WATERMARK_EN) ||
 		(m_stat & M_RX_FIFO_LAST_EN)) && (cur->flags & I2C_M_RD)) {
-		u32 rxcnt = rx_stat & RX_FIFO_WC_MSK;
+		u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
 
 		for (j = 0; j < rxcnt; j++) {
 			u32 temp;
@@ -131,7 +213,6 @@
 					i, temp);
 				break;
 			}
-			dev_dbg(gi2c->dev, "FIFO i: %d, read 0x%x\n", i, temp);
 		}
 	} else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
 					!(cur->flags & I2C_M_RD)) {
@@ -188,7 +269,8 @@
 	reinit_completion(&gi2c->xfer);
 	ret = pm_runtime_get_sync(gi2c->dev);
 	if (ret < 0) {
-		dev_err(gi2c->dev, "error turning SE resources:%d\n", ret);
+		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
+			    "error turning SE resources:%d\n", ret);
 		pm_runtime_put_noidle(gi2c->dev);
 		/* Set device in suspended since resume failed */
 		pm_runtime_set_suspended(gi2c->dev);
@@ -259,7 +341,7 @@
 		mb();
 		timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
 		if (!timeout) {
-			gi2c->err = -ETIMEDOUT;
+			geni_i2c_err(gi2c, GENI_TIMEOUT);
 			gi2c->cur = NULL;
 			geni_abort_m_cmd(gi2c->base);
 			timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
@@ -432,6 +514,8 @@
 
 	pm_runtime_disable(gi2c->dev);
 	i2c_del_adapter(&gi2c->adap);
+	if (gi2c->ipcl)
+		ipc_log_context_destroy(gi2c->ipcl);
 	return 0;
 }
 
@@ -455,6 +539,12 @@
 	int ret;
 	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
 
+	if (!gi2c->ipcl) {
+		char ipc_name[I2C_NAME_SIZE];
+
+		snprintf(ipc_name, I2C_NAME_SIZE, "i2c-%d", gi2c->adap.nr);
+		gi2c->ipcl = ipc_log_context_create(2, ipc_name, 0);
+	}
 	ret = se_geni_resources_on(&gi2c->i2c_rsc);
 	if (ret)
 		return ret;
@@ -465,6 +555,8 @@
 		gi2c->tx_wm = gi2c_tx_depth - 1;
 		geni_se_init(gi2c->base, gi2c->tx_wm, gi2c_tx_depth);
 		se_config_packing(gi2c->base, 8, 4, true);
+		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
+			    "i2c fifo depth:%d\n", gi2c_tx_depth);
 	}
 	enable_irq(gi2c->irq);
 	return 0;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77ee..a2e3dd7 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@
 		    int value, int index, void *data, int len)
 {
 	struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+	void *dmadata = kmalloc(len, GFP_KERNEL);
+	int ret;
+
+	if (!dmadata)
+		return -ENOMEM;
 
 	/* do control transfer */
-	return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+	ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
 			       cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
-			       USB_DIR_IN, value, index, data, len, 2000);
+			       USB_DIR_IN, value, index, dmadata, len, 2000);
+
+	memcpy(data, dmadata, len);
+	kfree(dmadata);
+	return ret;
 }
 
 static int usb_write(struct i2c_adapter *adapter, int cmd,
 		     int value, int index, void *data, int len)
 {
 	struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+	void *dmadata = kmemdup(data, len, GFP_KERNEL);
+	int ret;
+
+	if (!dmadata)
+		return -ENOMEM;
 
 	/* do control transfer */
-	return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+	ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
 			       cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-			       value, index, data, len, 2000);
+			       value, index, dmadata, len, 2000);
+
+	kfree(dmadata);
+	return ret;
 }
 
 static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 21d38c8..7f4f9c4 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -143,7 +143,7 @@
 	iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
 }
 
-static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
 {
 	u32 channel_intr_status;
 	u32 intr_status;
@@ -167,7 +167,7 @@
 	return IRQ_NONE;
 }
 
-static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
 {
 	irqreturn_t retval = IRQ_NONE;
 	struct iproc_adc_priv *adc_priv;
@@ -181,7 +181,7 @@
 	adc_priv = iio_priv(indio_dev);
 
 	regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
-	dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+	dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
 			intr_status);
 
 	intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@@ -566,8 +566,8 @@
 	}
 
 	ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
-				iproc_adc_interrupt_thread,
 				iproc_adc_interrupt_handler,
+				iproc_adc_interrupt_thread,
 				IRQF_SHARED, "iproc-adc", indio_dev);
 	if (ret) {
 		dev_err(&pdev->dev, "request_irq error %d\n", ret);
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 3afc53a..c298fd8 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@
 static const struct reg_field reg_field_it =
 				REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
 static const struct reg_field reg_field_als_intr =
-				REG_FIELD(LTR501_INTR, 0, 0);
-static const struct reg_field reg_field_ps_intr =
 				REG_FIELD(LTR501_INTR, 1, 1);
+static const struct reg_field reg_field_ps_intr =
+				REG_FIELD(LTR501_INTR, 0, 0);
 static const struct reg_field reg_field_als_rate =
 				REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
 static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 0204595..268210e 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
 #define AS3935_AFE_PWR_BIT	BIT(0)
 
 #define AS3935_INT		0x03
-#define AS3935_INT_MASK		0x07
+#define AS3935_INT_MASK		0x0f
 #define AS3935_EVENT_INT	BIT(3)
-#define AS3935_NOISE_INT	BIT(1)
+#define AS3935_NOISE_INT	BIT(0)
 
 #define AS3935_DATA		0x07
 #define AS3935_DATA_MASK	0x3F
@@ -215,7 +215,7 @@
 
 	st->buffer[0] = val & AS3935_DATA_MASK;
 	iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
-					   pf->timestamp);
+					   iio_get_time_ns(indio_dev));
 err_read:
 	iio_trigger_notify_done(indio_dev->trig);
 
@@ -244,7 +244,7 @@
 
 	switch (val) {
 	case AS3935_EVENT_INT:
-		iio_trigger_poll(st->trig);
+		iio_trigger_poll_chained(st->trig);
 		break;
 	case AS3935_NOISE_INT:
 		dev_warn(&st->spi->dev, "noise level is too high\n");
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 83198a8..4bd5b5c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2366,8 +2366,11 @@
 		ret = hfi1_rvt_get_rwqe(qp, 1);
 		if (ret < 0)
 			goto nack_op_err;
-		if (!ret)
+		if (!ret) {
+			/* peer will send again */
+			rvt_put_ss(&qp->r_sge);
 			goto rnr_nak;
+		}
 		wc.ex.imm_data = ohdr->u.rc.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
 		goto send_last;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2097512..f3fe787 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2067,8 +2067,10 @@
 		ret = qib_get_rwqe(qp, 1);
 		if (ret < 0)
 			goto nack_op_err;
-		if (!ret)
+		if (!ret) {
+			rvt_put_ss(&qp->r_sge);
 			goto rnr_nak;
+		}
 		wc.ex.imm_data = ohdr->u.rc.imm_data;
 		hdrsize += 4;
 		wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 7826994..cd834da 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1118,8 +1118,10 @@
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+ * Fujitsu LIFEBOOK E557   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
@@ -1525,6 +1527,13 @@
 		},
 	},
 	{
+		/* Fujitsu LIFEBOOK E546  does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+		},
+	},
+	{
 		/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1546,6 +1555,13 @@
 		},
 	},
 	{
+		/* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+		},
+	},
+	{
 		/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 6725c21..1d5c514 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -166,6 +166,7 @@
 #define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
 #define SMR_VALID			(1 << 31)
 #define SMR_MASK_SHIFT			16
+#define SMR_MASK_MASK			0x7FFF
 #define SMR_ID_SHIFT			0
 
 #define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
@@ -335,10 +336,12 @@
 	enum arm_smmu_s2cr_type		type;
 	enum arm_smmu_s2cr_privcfg	privcfg;
 	u8				cbndx;
+	bool				cb_handoff;
 };
 
 #define s2cr_init_val (struct arm_smmu_s2cr){				\
 	.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,	\
+	.cb_handoff = false,						\
 }
 
 struct arm_smmu_smr {
@@ -553,6 +556,10 @@
 
 static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
 
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu,
+				struct device *dev);
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
 	return container_of(dom, struct arm_smmu_domain, domain);
@@ -1615,14 +1622,11 @@
 	if (is_iommu_pt_coherent(smmu_domain))
 		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
 
-	/* Dynamic domains must set cbndx through domain attribute */
-	if (!dynamic) {
-		ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
-				      smmu->num_context_banks);
-		if (ret < 0)
-			goto out_unlock;
-		cfg->cbndx = ret;
-	}
+	ret = arm_smmu_alloc_cb(domain, smmu, dev);
+	if (ret < 0)
+		goto out_unlock;
+	cfg->cbndx = ret;
+
 	if (smmu->version < ARM_SMMU_V2) {
 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
 		cfg->irptndx %= smmu->num_context_irqs;
@@ -2243,14 +2247,18 @@
 	return ret;
 }
 
+#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
 static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 			   struct scatterlist *sg, unsigned int nents, int prot)
 {
 	int ret;
-	size_t size;
+	size_t size, batch_size, size_to_unmap = 0;
 	unsigned long flags;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+	unsigned int idx_start, idx_end;
+	struct scatterlist *sg_start, *sg_end;
+	unsigned long __saved_iova_start;
 
 	if (!ops)
 		return -ENODEV;
@@ -2259,17 +2267,45 @@
 	if (ret)
 		return ret;
 
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-	ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	__saved_iova_start = iova;
+	idx_start = idx_end = 0;
+	sg_start = sg_end = sg;
+	while (idx_end < nents) {
+		batch_size = sg_end->length;
+		sg_end = sg_next(sg_end);
+		idx_end++;
+		while ((idx_end < nents) &&
+		       (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
 
-	if (!ret)
-		arm_smmu_unmap(domain, iova, size);
+			batch_size += sg_end->length;
+			sg_end = sg_next(sg_end);
+			idx_end++;
+		}
 
-	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+		spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+		ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
+				  prot, &size);
+		spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+		/* Returns 0 on error */
+		if (!ret) {
+			size_to_unmap = iova + size - __saved_iova_start;
+			goto out;
+		}
+
+		iova += batch_size;
+		idx_start = idx_end;
+		sg_start = sg_end;
+	}
+
+out:
 	arm_smmu_assign_table(smmu_domain);
 
-	return ret;
+	if (size_to_unmap) {
+		arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
+		iova = __saved_iova_start;
+	}
+	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+	return iova - __saved_iova_start;
 }
 
 static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -3249,6 +3285,91 @@
 	}
 }
 
+
+/*
+ * Some context banks needs to be transferred from bootloader to HLOS in a way
+ * that allows ongoing traffic. The current expectation is that these context
+ * banks operate in bypass mode.
+ * Additionally, there must be exactly one device in devicetree with stream-ids
+ * overlapping those used by the bootloader.
+ */
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu,
+				struct device *dev)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	u32 i, idx;
+	int cb = -EINVAL;
+	bool dynamic;
+
+	/* Dynamic domains must set cbndx through domain attribute */
+	dynamic = is_dynamic_domain(domain);
+	if (dynamic)
+		return INVALID_CBNDX;
+
+	mutex_lock(&smmu->stream_map_mutex);
+	for_each_cfg_sme(fwspec, i, idx) {
+		if (smmu->s2crs[idx].cb_handoff)
+			cb = smmu->s2crs[idx].cbndx;
+	}
+
+	if (cb < 0) {
+		mutex_unlock(&smmu->stream_map_mutex);
+		return __arm_smmu_alloc_bitmap(smmu->context_map,
+						smmu->num_s2_context_banks,
+						smmu->num_context_banks);
+	}
+
+	for (i = 0; i < smmu->num_mapping_groups; i++) {
+		if (smmu->s2crs[i].cb_handoff && smmu->s2crs[i].cbndx == cb) {
+			smmu->s2crs[i].cb_handoff = false;
+			smmu->s2crs[i].count -= 1;
+		}
+	}
+	mutex_unlock(&smmu->stream_map_mutex);
+
+	return cb;
+}
+
+static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
+{
+	u32 i, raw_smr, raw_s2cr;
+	struct arm_smmu_smr smr;
+	struct arm_smmu_s2cr s2cr;
+
+	for (i = 0; i < smmu->num_mapping_groups; i++) {
+		raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+					ARM_SMMU_GR0_SMR(i));
+		if (!(raw_smr & SMR_VALID))
+			continue;
+
+		smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+		smr.id = (u16)raw_smr;
+		smr.valid = true;
+
+		raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+					ARM_SMMU_GR0_S2CR(i));
+		s2cr.group = NULL;
+		s2cr.count = 1;
+		s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
+		s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
+				S2CR_PRIVCFG_MASK;
+		s2cr.cbndx = (u8)raw_s2cr;
+		s2cr.cb_handoff = true;
+
+		if (s2cr.type != S2CR_TYPE_TRANS)
+			continue;
+
+		smmu->smrs[i] = smr;
+		smmu->s2crs[i] = s2cr;
+		bitmap_set(smmu->context_map, s2cr.cbndx, 1);
+		dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
+			raw_smr, raw_s2cr, s2cr.cbndx);
+	}
+
+	return 0;
+}
+
 static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
 {
 	struct device *dev = smmu->dev;
@@ -3508,6 +3629,7 @@
 	smmu->streamid_mask = size - 1;
 	if (id & ID0_SMS) {
 		u32 smr;
+		int i;
 
 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
 		size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
@@ -3522,14 +3644,25 @@
 		 * bits are set, so check each one separately. We can reject
 		 * masters later if they try to claim IDs outside these masks.
 		 */
+		for (i = 0; i < size; i++) {
+			smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
+			if (!(smr & SMR_VALID))
+				break;
+		}
+		if (i == size) {
+			dev_err(smmu->dev,
+				"Unable to compute streamid_masks\n");
+			return -ENODEV;
+		}
+
 		smr = smmu->streamid_mask << SMR_ID_SHIFT;
-		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
 		smmu->streamid_mask = smr >> SMR_ID_SHIFT;
 
 		smr = smmu->streamid_mask << SMR_MASK_SHIFT;
-		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
 		smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
 
 		/* Zero-initialised to mark as invalid */
@@ -3821,6 +3954,10 @@
 	if (err)
 		goto out_power_off;
 
+	err = arm_smmu_handoff_cbs(smmu);
+	if (err)
+		goto out_power_off;
+
 	err = arm_smmu_parse_impl_def_registers(smmu);
 	if (err)
 		goto out_power_off;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ac3059d..b5e817b 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -25,6 +25,13 @@
 #define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
 #define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
 #define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
+#define FAST_MAIR_ATTR_IDX_CACHE	1
+#define FAST_PTE_ATTRINDX_SHIFT		2
+#define FAST_PTE_ATTRINDX_MASK		0x7
+#define FAST_PTE_SH_SHIFT		8
+#define FAST_PTE_SH_MASK	   (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
+#define FAST_PTE_SH_OS             (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
+#define FAST_PTE_SH_IS             (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)
 
 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
 				 bool coherent)
@@ -56,6 +63,36 @@
 		dmac_clean_range(start, end);
 }
 
+static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
+{
+	int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
+			FAST_PTE_ATTRINDX_SHIFT)) >>
+			FAST_PTE_ATTRINDX_SHIFT;
+
+	if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
+		(((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
+		  (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
+		return true;
+
+	return false;
+}
+
+static bool is_dma_coherent(struct device *dev, unsigned long attrs)
+{
+	bool is_coherent;
+
+	if (attrs & DMA_ATTR_FORCE_COHERENT)
+		is_coherent = true;
+	else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
+		is_coherent = false;
+	else if (is_device_dma_coherent(dev))
+		is_coherent = true;
+	else
+		is_coherent = false;
+
+	return is_coherent;
+}
+
 /*
  * Checks if the allocated range (ending at @end) covered the upcoming
  * stale bit.  We don't need to know exactly where the range starts since
@@ -313,7 +350,7 @@
 	int nptes = len >> FAST_PAGE_SHIFT;
 	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
 	int prot = __fast_dma_direction_to_prot(dir);
-	bool is_coherent = is_device_dma_coherent(dev);
+	bool is_coherent = is_dma_coherent(dev, attrs);
 
 	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
 
@@ -357,7 +394,7 @@
 	int nptes = len >> FAST_PAGE_SHIFT;
 	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
-	bool is_coherent = is_device_dma_coherent(dev);
+	bool is_coherent = is_dma_coherent(dev, attrs);
 
 	if (!skip_sync && !is_coherent)
 		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
@@ -377,7 +414,7 @@
 	unsigned long offset = iova & ~FAST_PAGE_MASK;
 	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 
-	if (!is_device_dma_coherent(dev))
+	if (!__fast_is_pte_coherent(pmd))
 		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
@@ -389,7 +426,7 @@
 	unsigned long offset = iova & ~FAST_PAGE_MASK;
 	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 
-	if (!is_device_dma_coherent(dev))
+	if (!__fast_is_pte_coherent(pmd))
 		__fast_dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
@@ -469,7 +506,7 @@
 	struct sg_mapping_iter miter;
 	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
 	int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
-	bool is_coherent = is_device_dma_coherent(dev);
+	bool is_coherent = is_dma_coherent(dev, attrs);
 	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
 	struct page **pages;
 
@@ -591,7 +628,7 @@
 	unsigned long uaddr = vma->vm_start;
 	struct page **pages;
 	int i, nr_pages, ret = 0;
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
 					     coherent);
@@ -611,6 +648,21 @@
 	return ret;
 }
 
+static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
+				void *cpu_addr, dma_addr_t dma_addr,
+				size_t size, unsigned long attrs)
+{
+	unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct vm_struct *area;
+
+	area = find_vm_area(cpu_addr);
+	if (!area || !area->pages)
+		return -EINVAL;
+
+	return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0, size,
+					GFP_KERNEL);
+}
+
 static dma_addr_t fast_smmu_dma_map_resource(
 			struct device *dev, phys_addr_t phys_addr,
 			size_t size, enum dma_data_direction dir,
@@ -659,12 +711,6 @@
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
-
-static int fast_smmu_dma_supported(struct device *dev, u64 mask)
-{
-	return mask <= 0xffffffff;
-}
-
 static int fast_smmu_mapping_error(struct device *dev,
 				   dma_addr_t dma_addr)
 {
@@ -708,6 +754,7 @@
 	.alloc = fast_smmu_alloc,
 	.free = fast_smmu_free,
 	.mmap = fast_smmu_mmap_attrs,
+	.get_sgtable = fast_smmu_get_sgtable,
 	.map_page = fast_smmu_map_page,
 	.unmap_page = fast_smmu_unmap_page,
 	.sync_single_for_cpu = fast_smmu_sync_single_for_cpu,
@@ -718,7 +765,6 @@
 	.sync_sg_for_device = fast_smmu_sync_sg_for_device,
 	.map_resource = fast_smmu_dma_map_resource,
 	.unmap_resource = fast_smmu_dma_unmap_resource,
-	.dma_supported = fast_smmu_dma_supported,
 	.mapping_error = fast_smmu_mapping_error,
 };
 
@@ -765,6 +811,51 @@
 	return ERR_PTR(-ENOMEM);
 }
 
+/*
+ * Based off of similar code from dma-iommu.c, but modified to use a different
+ * iova allocator
+ */
+static void fast_smmu_reserve_pci_windows(struct device *dev,
+			    struct dma_fast_smmu_mapping *mapping)
+{
+	struct pci_host_bridge *bridge;
+	struct resource_entry *window;
+	phys_addr_t start, end;
+	struct pci_dev *pci_dev;
+	unsigned long flags;
+
+	if (!dev_is_pci(dev))
+		return;
+
+	pci_dev = to_pci_dev(dev);
+	bridge = pci_find_host_bridge(pci_dev->bus);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	resource_list_for_each_entry(window, &bridge->windows) {
+		if (resource_type(window->res) != IORESOURCE_MEM &&
+		    resource_type(window->res) != IORESOURCE_IO)
+			continue;
+
+		start = round_down(window->res->start - window->offset,
+				FAST_PAGE_SIZE);
+		end = round_up(window->res->end - window->offset,
+				FAST_PAGE_SIZE);
+		start = max_t(unsigned long, mapping->base, start);
+		end = min_t(unsigned long, mapping->base + mapping->size, end);
+		if (start >= end)
+			continue;
+
+		dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n",
+				&start, &end);
+
+		start = (start - mapping->base) >> FAST_PAGE_SHIFT;
+		end = (end - mapping->base) >> FAST_PAGE_SHIFT;
+		bitmap_set(mapping->bitmap, start, end - start);
+	}
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+
 /**
  * fast_smmu_attach_device
  * @dev: valid struct device pointer
@@ -798,6 +889,8 @@
 	mapping->fast->domain = domain;
 	mapping->fast->dev = dev;
 
+	fast_smmu_reserve_pci_windows(dev, mapping->fast);
+
 	group = dev->iommu_group;
 	if (!group) {
 		dev_err(dev, "No iommu associated with device\n");
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index fa5069e..2ef496d 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -89,6 +89,7 @@
 #define ARM_LPAE_PTE_TYPE_TABLE		3
 #define ARM_LPAE_PTE_TYPE_PAGE		3
 
+#define ARM_LPAE_PTE_SH_MASK		(((arm_lpae_iopte)0x3) << 8)
 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
@@ -735,7 +736,8 @@
 		arm_lpae_iopte *table_base = table;
 		int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
 		int entry_size = ARM_LPAE_GRANULE(data);
-		int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
+		int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) >>
+				data->pg_shift;
 		int entries = min_t(int, size / entry_size,
 			max_entries - tl_offset);
 		int table_len = entries * sizeof(*table);
@@ -893,8 +895,9 @@
 					ARM_LPAE_PTE_ATTRINDX_SHIFT)) >>
 					ARM_LPAE_PTE_ATTRINDX_SHIFT;
 		if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) &&
-		    ((*ptep & ARM_LPAE_PTE_SH_IS) ||
-		     (*ptep & ARM_LPAE_PTE_SH_OS)))
+		   (((*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_IS)
+		     ||
+		     (*ptep & ARM_LPAE_PTE_SH_MASK) == ARM_LPAE_PTE_SH_OS))
 			return true;
 	} else {
 		if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB)
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 9b13fce..2db06b0 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -395,11 +395,16 @@
 	for (i = 0; i < 4; ++i) {
 		for (j = 0; j < 512; ++j) {
 			av8l_fast_iopte pte, *pudp;
+			void *addr;
 
 			page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 			if (!page)
 				goto err_free_pages;
 			pages[pg++] = page;
+
+			addr = page_address(page);
+			dmac_clean_range(addr, addr + SZ_4K);
+
 			pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
 			pudp = data->puds[i] + j;
 			*pudp = pte;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 048ae92..c98d8c2 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1554,10 +1554,14 @@
 	memset(buf, 0, 100);
 
 	phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
-	if (!phys)
+	if (!phys) {
 		strlcpy(buf, "FAIL\n", 100);
-	else
+		phys = iommu_iova_to_phys(ddev->domain, ddev->iova);
+		dev_err(ddev->dev, "ATOS for %pa failed. Software walk returned: %pa\n",
+			&ddev->iova, &phys);
+	} else {
 		snprintf(buf, 100, "%pa\n", &phys);
+	}
 
 	buflen = strlen(buf);
 	if (copy_to_user(ubuf, buf, buflen)) {
@@ -1712,6 +1716,7 @@
  *				0: normal mapping
  *				1: force coherent mapping
  *				2: force non-cohernet mapping
+ *				3: use system cache
  */
 static ssize_t iommu_debug_dma_map_write(struct file *file,
 		const char __user *ubuf, size_t count, loff_t *offset)
@@ -1782,6 +1787,8 @@
 		dma_attrs = DMA_ATTR_FORCE_COHERENT;
 	else if (attr == 2)
 		dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+	else if (attr == 3)
+		dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
 	else
 		goto invalid_format;
 
@@ -1803,7 +1810,7 @@
 	return retval;
 
 invalid_format:
-	pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n");
+	pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
 	return retval;
 
 invalid_addr:
@@ -1984,6 +1991,8 @@
 		dma_attrs = DMA_ATTR_FORCE_COHERENT;
 	else if (attr == 2)
 		dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+	else if (attr == 3)
+		dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
 	else
 		goto invalid_format;
 
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 5f144a6..400839d 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -696,6 +696,16 @@
 	  variable brightness.  It also supports outputting the Avdd supply for
 	  AMOLED displays.
 
+config LEDS_QPNP_HAPTICS
+	tristate "Haptics support for QPNP PMIC"
+	depends on LEDS_CLASS && MFD_SPMI_PMIC
+	help
+	  This option enables device driver support for the haptics peripheral
+	  found on Qualcomm Technologies, Inc. QPNP PMICs.  The haptic
+	  peripheral is capable of driving both LRA and ERM vibrators.  This
+	  module provides haptic feedback for user actions such as a long press
+	  on the touch screen.
+
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e0ca2e8..ba9bb8d 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -75,6 +75,7 @@
 obj-$(CONFIG_LEDS_QPNP_FLASH)		+= leds-qpnp-flash.o
 obj-$(CONFIG_LEDS_QPNP_FLASH_V2)	+= leds-qpnp-flash-v2.o
 obj-$(CONFIG_LEDS_QPNP_WLED)		+= leds-qpnp-wled.o
+obj-$(CONFIG_LEDS_QPNP_HAPTICS)	+= leds-qpnp-haptics.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/leds-qpnp-haptics.c b/drivers/leds/leds-qpnp-haptics.c
new file mode 100644
index 0000000..1eaa652
--- /dev/null
+++ b/drivers/leds/leds-qpnp-haptics.c
@@ -0,0 +1,2497 @@
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"haptics: %s: " fmt, __func__
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/qpnp/qpnp-misc.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+/* Register definitions */
+#define HAP_STATUS_1_REG(chip)		(chip->base + 0x0A)
+#define HAP_BUSY_BIT			BIT(1)
+#define SC_FLAG_BIT			BIT(3)
+#define AUTO_RES_ERROR_BIT		BIT(4)
+
+#define HAP_LRA_AUTO_RES_LO_REG(chip)	(chip->base + 0x0B)
+#define HAP_LRA_AUTO_RES_HI_REG(chip)	(chip->base + 0x0C)
+
+#define HAP_INT_RT_STS_REG(chip)	(chip->base + 0x10)
+#define SC_INT_RT_STS_BIT		BIT(0)
+#define PLAY_INT_RT_STS_BIT		BIT(1)
+
+#define HAP_EN_CTL_REG(chip)		(chip->base + 0x46)
+#define HAP_EN_BIT			BIT(7)
+
+#define HAP_EN_CTL2_REG(chip)		(chip->base + 0x48)
+#define BRAKE_EN_BIT			BIT(0)
+
+#define HAP_AUTO_RES_CTRL_REG(chip)	(chip->base + 0x4B)
+#define AUTO_RES_EN_BIT			BIT(7)
+#define AUTO_RES_ERR_RECOVERY_BIT	BIT(3)
+
+#define HAP_CFG1_REG(chip)		(chip->base + 0x4C)
+#define HAP_ACT_TYPE_MASK		BIT(0)
+#define HAP_LRA				0
+#define HAP_ERM				1
+
+#define HAP_CFG2_REG(chip)		(chip->base + 0x4D)
+#define HAP_WAVE_SINE			0
+#define HAP_WAVE_SQUARE			1
+#define HAP_LRA_RES_TYPE_MASK		BIT(0)
+
+#define HAP_SEL_REG(chip)		(chip->base + 0x4E)
+#define HAP_WF_SOURCE_MASK		GENMASK(5, 4)
+#define HAP_WF_SOURCE_SHIFT		4
+
+#define HAP_LRA_AUTO_RES_REG(chip)	(chip->base + 0x4F)
+/* For pmi8998 */
+#define LRA_AUTO_RES_MODE_MASK		GENMASK(6, 4)
+#define LRA_AUTO_RES_MODE_SHIFT		4
+#define LRA_HIGH_Z_MASK			GENMASK(3, 2)
+#define LRA_HIGH_Z_SHIFT		2
+#define LRA_RES_CAL_MASK		GENMASK(1, 0)
+#define HAP_RES_CAL_PERIOD_MIN		4
+#define HAP_RES_CAL_PERIOD_MAX		32
+/* For pm660 */
+#define PM660_AUTO_RES_MODE_BIT		BIT(7)
+#define PM660_AUTO_RES_MODE_SHIFT	7
+#define PM660_CAL_DURATION_MASK		GENMASK(6, 5)
+#define PM660_CAL_DURATION_SHIFT	5
+#define PM660_QWD_DRIVE_DURATION_BIT	BIT(4)
+#define PM660_QWD_DRIVE_DURATION_SHIFT	4
+#define PM660_CAL_EOP_BIT		BIT(3)
+#define PM660_CAL_EOP_SHIFT		3
+#define PM660_LRA_RES_CAL_MASK		GENMASK(2, 0)
+#define HAP_PM660_RES_CAL_PERIOD_MAX	256
+
+#define HAP_VMAX_CFG_REG(chip)		(chip->base + 0x51)
+#define HAP_VMAX_OVD_BIT		BIT(6)
+#define HAP_VMAX_MASK			GENMASK(5, 1)
+#define HAP_VMAX_SHIFT			1
+#define HAP_VMAX_MIN_MV			116
+#define HAP_VMAX_MAX_MV			3596
+
+#define HAP_ILIM_CFG_REG(chip)		(chip->base + 0x52)
+#define HAP_ILIM_SEL_MASK		BIT(0)
+#define HAP_ILIM_400_MA			0
+#define HAP_ILIM_800_MA			1
+
+#define HAP_SC_DEB_REG(chip)		(chip->base + 0x53)
+#define HAP_SC_DEB_MASK			GENMASK(2, 0)
+#define HAP_SC_DEB_CYCLES_MIN		0
+#define HAP_DEF_SC_DEB_CYCLES		8
+#define HAP_SC_DEB_CYCLES_MAX		32
+
+#define HAP_RATE_CFG1_REG(chip)		(chip->base + 0x54)
+#define HAP_RATE_CFG1_MASK		GENMASK(7, 0)
+
+#define HAP_RATE_CFG2_REG(chip)		(chip->base + 0x55)
+#define HAP_RATE_CFG2_MASK		GENMASK(3, 0)
+/* Shift needed to convert drive period upper bits [11:8] */
+#define HAP_RATE_CFG2_SHIFT		8
+
+#define HAP_INT_PWM_REG(chip)		(chip->base + 0x56)
+#define INT_PWM_FREQ_SEL_MASK		GENMASK(1, 0)
+#define INT_PWM_FREQ_253_KHZ		0
+#define INT_PWM_FREQ_505_KHZ		1
+#define INT_PWM_FREQ_739_KHZ		2
+#define INT_PWM_FREQ_1076_KHZ		3
+
+#define HAP_EXT_PWM_REG(chip)		(chip->base + 0x57)
+#define EXT_PWM_FREQ_SEL_MASK		GENMASK(1, 0)
+#define EXT_PWM_FREQ_25_KHZ		0
+#define EXT_PWM_FREQ_50_KHZ		1
+#define EXT_PWM_FREQ_75_KHZ		2
+#define EXT_PWM_FREQ_100_KHZ		3
+
+#define HAP_PWM_CAP_REG(chip)		(chip->base + 0x58)
+
+#define HAP_SC_CLR_REG(chip)		(chip->base + 0x59)
+#define SC_CLR_BIT			BIT(0)
+
+#define HAP_BRAKE_REG(chip)		(chip->base + 0x5C)
+#define HAP_BRAKE_PAT_MASK		0x3
+
+#define HAP_WF_REPEAT_REG(chip)		(chip->base + 0x5E)
+#define WF_REPEAT_MASK			GENMASK(6, 4)
+#define WF_REPEAT_SHIFT			4
+#define WF_REPEAT_MIN			1
+#define WF_REPEAT_MAX			128
+#define WF_S_REPEAT_MASK		GENMASK(1, 0)
+#define WF_S_REPEAT_MIN			1
+#define WF_S_REPEAT_MAX			8
+
+#define HAP_WF_S1_REG(chip)		(chip->base + 0x60)
+#define HAP_WF_SIGN_BIT			BIT(7)
+#define HAP_WF_OVD_BIT			BIT(6)
+#define HAP_WF_SAMP_MAX			GENMASK(5, 1)
+#define HAP_WF_SAMPLE_LEN		8
+
+#define HAP_PLAY_REG(chip)		(chip->base + 0x70)
+#define PLAY_BIT			BIT(7)
+#define PAUSE_BIT			BIT(0)
+
+#define HAP_SEC_ACCESS_REG(chip)	(chip->base + 0xD0)
+
+#define HAP_TEST2_REG(chip)		(chip->base + 0xE3)
+#define HAP_EXT_PWM_DTEST_MASK		GENMASK(6, 4)
+#define HAP_EXT_PWM_DTEST_SHIFT		4
+#define PWM_MAX_DTEST_LINES		4
+#define HAP_EXT_PWM_PEAK_DATA		0x7F
+#define HAP_EXT_PWM_HALF_DUTY		50
+#define HAP_EXT_PWM_FULL_DUTY		100
+#define HAP_EXT_PWM_DATA_FACTOR		39
+
+/* Other definitions */
+#define HAP_BRAKE_PAT_LEN		4
+#define HAP_WAVE_SAMP_LEN		8
+#define NUM_WF_SET			4
+#define HAP_WAVE_SAMP_SET_LEN		(HAP_WAVE_SAMP_LEN * NUM_WF_SET)
+#define HAP_RATE_CFG_STEP_US		5
+#define HAP_WAVE_PLAY_RATE_US_MIN	0
+#define HAP_DEF_WAVE_PLAY_RATE_US	5715
+#define HAP_WAVE_PLAY_RATE_US_MAX	20475
+#define HAP_MAX_PLAY_TIME_MS		15000
+
+enum hap_brake_pat {
+	NO_BRAKE = 0,
+	BRAKE_VMAX_4,
+	BRAKE_VMAX_2,
+	BRAKE_VMAX,
+};
+
+enum hap_auto_res_mode {
+	HAP_AUTO_RES_NONE,
+	HAP_AUTO_RES_ZXD,
+	HAP_AUTO_RES_QWD,
+	HAP_AUTO_RES_MAX_QWD,
+	HAP_AUTO_RES_ZXD_EOP,
+};
+
+enum hap_pm660_auto_res_mode {
+	HAP_PM660_AUTO_RES_ZXD,
+	HAP_PM660_AUTO_RES_QWD,
+};
+
+/* high Z option lines */
+enum hap_high_z {
+	HAP_LRA_HIGH_Z_NONE, /* opt0 for PM660 */
+	HAP_LRA_HIGH_Z_OPT1,
+	HAP_LRA_HIGH_Z_OPT2,
+	HAP_LRA_HIGH_Z_OPT3,
+};
+
+/* play modes */
+enum hap_mode {
+	HAP_DIRECT,
+	HAP_BUFFER,
+	HAP_AUDIO,
+	HAP_PWM,
+};
+
+/* wave/sample repeat */
+enum hap_rep_type {
+	HAP_WAVE_REPEAT = 1,
+	HAP_WAVE_SAMP_REPEAT,
+};
+
+/* status flags */
+enum hap_status {
+	AUTO_RESONANCE_ENABLED = BIT(0),
+};
+
+enum hap_play_control {
+	HAP_STOP,
+	HAP_PAUSE,
+	HAP_PLAY,
+};
+
+/* pwm channel parameters */
+struct pwm_param {
+	struct pwm_device	*pwm_dev;
+	u32			duty_us;
+	u32			period_us;
+};
+
+/*
+ *  hap_lra_ares_param - Haptic auto_resonance parameters
+ *  @ lra_qwd_drive_duration - LRA QWD drive duration
+ *  @ calibrate_at_eop - Calibrate at EOP
+ *  @ lra_res_cal_period - LRA resonance calibration period
+ *  @ auto_res_mode - auto resonace mode
+ *  @ lra_high_z - high z option line
+ */
+struct hap_lra_ares_param {
+	int				lra_qwd_drive_duration;
+	int				calibrate_at_eop;
+	enum hap_high_z			lra_high_z;
+	u16				lra_res_cal_period;
+	u8				auto_res_mode;
+};
+
+/*
+ *  hap_chip - Haptics data structure
+ *  @ pdev - platform device pointer
+ *  @ regmap - regmap pointer
+ *  @ bus_lock - spin lock for bus read/write
+ *  @ play_lock - mutex lock for haptics play/enable control
+ *  @ haptics_work - haptics worker
+ *  @ stop_timer - hrtimer for stopping haptics
+ *  @ auto_res_err_poll_timer - hrtimer for auto-resonance error
+ *  @ base - base address
+ *  @ play_irq - irq for play
+ *  @ sc_irq - irq for short circuit
+ *  @ pwm_data - pwm configuration
+ *  @ ares_cfg - auto resonance configuration
+ *  @ play_time_ms - play time set by the user in ms
+ *  @ max_play_time_ms - max play time in ms
+ *  @ vmax_mv - max voltage in mv
+ *  @ ilim_ma - limiting current in ma
+ *  @ sc_deb_cycles - short circuit debounce cycles
+ *  @ wave_play_rate_us - play rate for waveform
+ *  @ last_rate_cfg - Last rate config updated
+ *  @ wave_rep_cnt - waveform repeat count
+ *  @ wave_s_rep_cnt - waveform sample repeat count
+ *  @ ext_pwm_freq_khz - external pwm frequency in KHz
+ *  @ ext_pwm_dtest_line - DTEST line for external pwm
+ *  @ status_flags - status
+ *  @ play_mode - play mode
+ *  @ act_type - actuator type
+ *  @ wave_shape - waveform shape
+ *  @ wave_samp_idx - wave sample id used to refer start of a sample set
+ *  @ wave_samp - array of wave samples
+ *  @ brake_pat - pattern for active breaking
+ *  @ en_brake - brake state
+ *  @ misc_clk_trim_error_reg - MISC clock trim error register if present
+ *  @ clk_trim_error_code - MISC clock trim error code
+ *  @ drive_period_code_max_limit - calculated drive period code with
+      percentage variation on the higher side.
+ *  @ drive_period_code_min_limit - calculated drive period code with
+      percentage variation on the lower side
+ *  @ drive_period_code_max_var_pct - maximum limit of percentage variation of
+      drive period code
+ *  @ drive_period_code_min_var_pct - minimum limit of percentage variation of
+      drive period code
+ *  @ last_sc_time - Last time short circuit was detected
+ *  @ sc_count - counter to determine the duration of short circuit
+      condition
+ *  @ perm_disable - Flag to disable module permanently
+ *  @ state - current state of haptics
+ *  @ module_en - module enable status of haptics
+ *  @ lra_auto_mode - Auto mode selection
+ *  @ play_irq_en - Play interrupt enable status
+ *  @ auto_res_err_recovery_hw - Enable auto resonance error recovery by HW
+ */
+struct hap_chip {
+	struct platform_device		*pdev;
+	struct regmap			*regmap;
+	struct pmic_revid_data		*revid;
+	struct led_classdev		cdev;
+	spinlock_t			bus_lock;
+	struct mutex			play_lock;
+	struct mutex			param_lock;
+	struct work_struct		haptics_work;
+	struct hrtimer			stop_timer;
+	struct hrtimer			auto_res_err_poll_timer;
+	u16				base;
+	int				play_irq;
+	int				sc_irq;
+	struct pwm_param		pwm_data;
+	struct hap_lra_ares_param	ares_cfg;
+	u32				play_time_ms;
+	u32				max_play_time_ms;
+	u32				vmax_mv;
+	u8				ilim_ma;
+	u32				sc_deb_cycles;
+	u32				wave_play_rate_us;
+	u16				last_rate_cfg;
+	u32				wave_rep_cnt;
+	u32				wave_s_rep_cnt;
+	u32				ext_pwm_freq_khz;
+	u8				ext_pwm_dtest_line;
+	u32				status_flags;
+	enum hap_mode			play_mode;
+	u8				act_type;
+	u8				wave_shape;
+	u8				wave_samp_idx;
+	u32				wave_samp[HAP_WAVE_SAMP_SET_LEN];
+	u32				brake_pat[HAP_BRAKE_PAT_LEN];
+	bool				en_brake;
+	u32				misc_clk_trim_error_reg;
+	u8				clk_trim_error_code;
+	u16				drive_period_code_max_limit;
+	u16				drive_period_code_min_limit;
+	u8				drive_period_code_max_var_pct;
+	u8				drive_period_code_min_var_pct;
+	ktime_t				last_sc_time;
+	u8				sc_count;
+	bool				perm_disable;
+	atomic_t			state;
+	bool				module_en;
+	bool				lra_auto_mode;
+	bool				play_irq_en;
+	bool				auto_res_err_recovery_hw;
+};
+
+static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip);
+static int qpnp_haptics_parse_pwm_dt(struct hap_chip *chip);
+
+static int qpnp_haptics_read_reg(struct hap_chip *chip, u16 addr, u8 *val,
+				int len)
+{
+	int rc;
+
+	rc = regmap_bulk_read(chip->regmap, addr, val, len);
+	if (rc < 0)
+		pr_err("Error reading address: 0x%x - rc %d\n", addr, rc);
+
+	return rc;
+}
+
+static inline bool is_secure(u16 addr)
+{
+	return ((addr & 0xFF) > 0xD0);
+}
+
+static int qpnp_haptics_write_reg(struct hap_chip *chip, u16 addr, u8 *val,
+				int len)
+{
+	unsigned long flags;
+	unsigned int unlock = 0xA5;
+	int rc = 0, i;
+
+	spin_lock_irqsave(&chip->bus_lock, flags);
+
+	if (is_secure(addr)) {
+		for (i = 0; i < len; i++) {
+			rc = regmap_write(chip->regmap,
+					HAP_SEC_ACCESS_REG(chip), unlock);
+			if (rc < 0) {
+				pr_err("Error writing unlock code - rc %d\n",
+					rc);
+				goto out;
+			}
+
+			rc = regmap_write(chip->regmap, addr + i, val[i]);
+			if (rc < 0) {
+				pr_err("Error writing address 0x%x - rc %d\n",
+					addr + i, rc);
+				goto out;
+			}
+		}
+	} else {
+		if (len > 1)
+			rc = regmap_bulk_write(chip->regmap, addr, val, len);
+		else
+			rc = regmap_write(chip->regmap, addr, *val);
+	}
+
+	if (rc < 0)
+		pr_err("Error writing address: 0x%x - rc %d\n", addr, rc);
+
+out:
+	spin_unlock_irqrestore(&chip->bus_lock, flags);
+	return rc;
+}
+
+static int qpnp_haptics_masked_write_reg(struct hap_chip *chip, u16 addr,
+					u8 mask, u8 val)
+{
+	unsigned long flags;
+	unsigned int unlock = 0xA5;
+	int rc;
+
+	spin_lock_irqsave(&chip->bus_lock, flags);
+	if (is_secure(addr)) {
+		rc = regmap_write(chip->regmap, HAP_SEC_ACCESS_REG(chip),
+				unlock);
+		if (rc < 0) {
+			pr_err("Error writing unlock code - rc %d\n", rc);
+			goto out;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, addr, mask, val);
+	if (rc < 0)
+		pr_err("Error writing address: 0x%x - rc %d\n", addr, rc);
+
+	if (!rc)
+		pr_debug("wrote to address 0x%x = 0x%x\n", addr, val);
+out:
+	spin_unlock_irqrestore(&chip->bus_lock, flags);
+	return rc;
+}
+
+static bool is_sw_lra_auto_resonance_control(struct hap_chip *chip)
+{
+	if (chip->act_type != HAP_LRA)
+		return false;
+
+	if (chip->auto_res_err_recovery_hw)
+		return false;
+
+	/*
+	 * For short pattern in auto mode, we use buffer mode and auto
+	 * resonance is not needed.
+	 */
+	if (chip->lra_auto_mode && chip->play_mode == HAP_BUFFER)
+		return false;
+
+	return true;
+}
+
+#define HAPTICS_BACK_EMF_DELAY_US	20000
+static int qpnp_haptics_auto_res_enable(struct hap_chip *chip, bool enable)
+{
+	int rc = 0;
+	u32 delay_us = HAPTICS_BACK_EMF_DELAY_US;
+	u8 val, auto_res_mode_qwd;
+
+	if (chip->act_type != HAP_LRA)
+		return 0;
+
+	if (chip->revid->pmic_subtype == PM660_SUBTYPE)
+		auto_res_mode_qwd = (chip->ares_cfg.auto_res_mode ==
+						HAP_PM660_AUTO_RES_QWD);
+	else
+		auto_res_mode_qwd = (chip->ares_cfg.auto_res_mode ==
+							HAP_AUTO_RES_QWD);
+
+	/*
+	 * Do not enable auto resonance if auto mode is enabled and auto
+	 * resonance mode is QWD, meaning short pattern.
+	 */
+	if (chip->lra_auto_mode && auto_res_mode_qwd && enable) {
+		pr_debug("auto_mode enabled, not enabling auto_res\n");
+		return 0;
+	}
+
+	/*
+	 * For auto resonance detection to work properly, sufficient back-emf
+	 * has to be generated. In general, back-emf takes some time to build
+	 * up. When the auto resonance mode is chosen as QWD, high-z will be
+	 * applied for every LRA cycle and hence there won't be enough back-emf
+	 * at the start-up. Hence, the motor needs to vibrate for few LRA cycles
+	 * after the PLAY bit is asserted. Enable the auto resonance after
+	 * 'time_required_to_generate_back_emf_us' is completed.
+	 */
+
+	if (auto_res_mode_qwd && enable)
+		usleep_range(delay_us, delay_us + 1);
+
+	val = enable ? AUTO_RES_EN_BIT : 0;
+
+	if (chip->revid->pmic_subtype == PM660_SUBTYPE)
+		rc = qpnp_haptics_masked_write_reg(chip,
+				HAP_AUTO_RES_CTRL_REG(chip),
+				AUTO_RES_EN_BIT, val);
+	else
+		rc = qpnp_haptics_masked_write_reg(chip, HAP_TEST2_REG(chip),
+				AUTO_RES_EN_BIT, val);
+	if (rc < 0)
+		return rc;
+
+	if (enable)
+		chip->status_flags |= AUTO_RESONANCE_ENABLED;
+	else
+		chip->status_flags &= ~AUTO_RESONANCE_ENABLED;
+
+	pr_debug("auto_res %sabled\n", enable ? "en" : "dis");
+	return rc;
+}
+
+static int qpnp_haptics_update_rate_cfg(struct hap_chip *chip, u16 play_rate)
+{
+	int rc;
+	u8 val[2];
+
+	if (chip->last_rate_cfg == play_rate) {
+		pr_debug("Same rate_cfg %x\n", play_rate);
+		return 0;
+	}
+
+	val[0] = play_rate & HAP_RATE_CFG1_MASK;
+	val[1] = (play_rate >> HAP_RATE_CFG2_SHIFT) & HAP_RATE_CFG2_MASK;
+	rc = qpnp_haptics_write_reg(chip, HAP_RATE_CFG1_REG(chip), val, 2);
+	if (rc < 0)
+		return rc;
+
+	pr_debug("Play rate code 0x%x\n", play_rate);
+	chip->last_rate_cfg = play_rate;
+	return 0;
+}
+
+static void qpnp_haptics_update_lra_frequency(struct hap_chip *chip)
+{
+	u8 lra_auto_res[2], val;
+	u32 play_rate_code;
+	u16 rate_cfg;
+	int rc;
+
+	rc = qpnp_haptics_read_reg(chip, HAP_LRA_AUTO_RES_LO_REG(chip),
+				lra_auto_res, 2);
+	if (rc < 0) {
+		pr_err("Error in reading LRA_AUTO_RES_LO/HI, rc=%d\n", rc);
+		return;
+	}
+
+	play_rate_code =
+		 (lra_auto_res[1] & 0xF0) << 4 | (lra_auto_res[0] & 0xFF);
+
+	pr_debug("lra_auto_res_lo = 0x%x lra_auto_res_hi = 0x%x play_rate_code = 0x%x\n",
+		lra_auto_res[0], lra_auto_res[1], play_rate_code);
+
+	rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, 1);
+	if (rc < 0)
+		return;
+
+	/*
+	 * If the drive period code read from AUTO_RES_LO and AUTO_RES_HI
+	 * registers is more than the max limit percent variation or less
+	 * than the min limit percent variation specified through DT, then
+	 * auto-resonance is disabled.
+	 */
+
+	if ((val & AUTO_RES_ERROR_BIT) ||
+		((play_rate_code <= chip->drive_period_code_min_limit) ||
+		(play_rate_code >= chip->drive_period_code_max_limit))) {
+		if (val & AUTO_RES_ERROR_BIT)
+			pr_debug("Auto-resonance error %x\n", val);
+		else
+			pr_debug("play rate %x out of bounds [min: 0x%x, max: 0x%x]\n",
+				play_rate_code,
+				chip->drive_period_code_min_limit,
+				chip->drive_period_code_max_limit);
+		rc = qpnp_haptics_auto_res_enable(chip, false);
+		if (rc < 0)
+			pr_debug("Auto-resonance disable failed\n");
+		return;
+	}
+
+	/*
+	 * bits[7:4] of AUTO_RES_HI should be written to bits[3:0] of RATE_CFG2
+	 */
+	lra_auto_res[1] >>= 4;
+	rate_cfg = lra_auto_res[1] << 8 | lra_auto_res[0];
+	rc = qpnp_haptics_update_rate_cfg(chip, rate_cfg);
+	if (rc < 0)
+		pr_debug("Error in updating rate_cfg\n");
+}
+
+#define MAX_RETRIES	5
+#define HAP_CYCLES	4
+static bool is_haptics_idle(struct hap_chip *chip)
+{
+	unsigned long wait_time_us;
+	int rc, i;
+	u8 val;
+
+	rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, 1);
+	if (rc < 0)
+		return false;
+
+	if (!(val & HAP_BUSY_BIT))
+		return true;
+
+	if (chip->play_time_ms <= 20)
+		wait_time_us = chip->play_time_ms * 1000;
+	else
+		wait_time_us = chip->wave_play_rate_us * HAP_CYCLES;
+
+	for (i = 0; i < MAX_RETRIES; i++) {
+		/* wait for play_rate cycles */
+		usleep_range(wait_time_us, wait_time_us + 1);
+
+		if (chip->play_mode == HAP_DIRECT ||
+				chip->play_mode == HAP_PWM)
+			return true;
+
+		rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val,
+					1);
+		if (rc < 0)
+			return false;
+
+		if (!(val & HAP_BUSY_BIT))
+			return true;
+	}
+
+	if (i >= MAX_RETRIES && (val & HAP_BUSY_BIT)) {
+		pr_debug("Haptics Busy after %d retries\n", i);
+		return false;
+	}
+
+	return true;
+}
+
+static int qpnp_haptics_mod_enable(struct hap_chip *chip, bool enable)
+{
+	u8 val;
+	int rc;
+
+	if (chip->module_en == enable)
+		return 0;
+
+	if (!enable) {
+		if (!is_haptics_idle(chip))
+			pr_debug("Disabling module forcibly\n");
+	}
+
+	val = enable ? HAP_EN_BIT : 0;
+	rc = qpnp_haptics_write_reg(chip, HAP_EN_CTL_REG(chip), &val, 1);
+	if (rc < 0)
+		return rc;
+
+	chip->module_en = enable;
+	return 0;
+}
+
+static int qpnp_haptics_play_control(struct hap_chip *chip,
+					enum hap_play_control ctrl)
+{
+	u8 val;
+	int rc;
+
+	switch (ctrl) {
+	case HAP_STOP:
+		val = 0;
+		break;
+	case HAP_PAUSE:
+		val = PAUSE_BIT;
+		break;
+	case HAP_PLAY:
+		val = PLAY_BIT;
+		break;
+	default:
+		return 0;
+	}
+
+	rc = qpnp_haptics_write_reg(chip, HAP_PLAY_REG(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing to PLAY_REG, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("haptics play ctrl: %d\n", ctrl);
+	return rc;
+}
+
+#define AUTO_RES_ERR_POLL_TIME_NS	(20 * NSEC_PER_MSEC)
+static int qpnp_haptics_play(struct hap_chip *chip, bool enable)
+{
+	int rc = 0, time_ms = chip->play_time_ms;
+
+	if (chip->perm_disable && enable)
+		return 0;
+
+	mutex_lock(&chip->play_lock);
+
+	if (enable) {
+		if (chip->play_mode == HAP_PWM) {
+			rc = pwm_enable(chip->pwm_data.pwm_dev);
+			if (rc < 0) {
+				pr_err("Error in enabling PWM, rc=%d\n", rc);
+				goto out;
+			}
+		}
+
+		rc = qpnp_haptics_auto_res_enable(chip, false);
+		if (rc < 0) {
+			pr_err("Error in disabling auto_res, rc=%d\n", rc);
+			goto out;
+		}
+
+		rc = qpnp_haptics_mod_enable(chip, true);
+		if (rc < 0) {
+			pr_err("Error in enabling module, rc=%d\n", rc);
+			goto out;
+		}
+
+		rc = qpnp_haptics_play_control(chip, HAP_PLAY);
+		if (rc < 0) {
+			pr_err("Error in enabling play, rc=%d\n", rc);
+			goto out;
+		}
+
+		if (chip->play_mode != HAP_BUFFER)
+			hrtimer_start(&chip->stop_timer,
+				ktime_set(time_ms / MSEC_PER_SEC,
+				(time_ms % MSEC_PER_SEC) * NSEC_PER_MSEC),
+				HRTIMER_MODE_REL);
+
+		rc = qpnp_haptics_auto_res_enable(chip, true);
+		if (rc < 0) {
+			pr_err("Error in enabling auto_res, rc=%d\n", rc);
+			goto out;
+		}
+
+		if (is_sw_lra_auto_resonance_control(chip))
+			hrtimer_start(&chip->auto_res_err_poll_timer,
+				ktime_set(0, AUTO_RES_ERR_POLL_TIME_NS),
+				HRTIMER_MODE_REL);
+	} else {
+		rc = qpnp_haptics_play_control(chip, HAP_STOP);
+		if (rc < 0) {
+			pr_err("Error in disabling play, rc=%d\n", rc);
+			goto out;
+		}
+
+		if (is_sw_lra_auto_resonance_control(chip)) {
+			if (chip->status_flags & AUTO_RESONANCE_ENABLED)
+				qpnp_haptics_update_lra_frequency(chip);
+			hrtimer_cancel(&chip->auto_res_err_poll_timer);
+		}
+
+		if (chip->play_mode == HAP_PWM)
+			pwm_disable(chip->pwm_data.pwm_dev);
+	}
+
+out:
+	mutex_unlock(&chip->play_lock);
+	return rc;
+}
+
+static void qpnp_haptics_work(struct work_struct *work)
+{
+	struct hap_chip *chip = container_of(work, struct hap_chip,
+						haptics_work);
+	int rc;
+	bool enable;
+
+	enable = atomic_read(&chip->state);
+	pr_debug("state: %d\n", enable);
+	rc = qpnp_haptics_play(chip, enable);
+	if (rc < 0)
+		pr_err("Error in %sing haptics, rc=%d\n",
+			enable ? "play" : "stopp", rc);
+}
+
+static enum hrtimer_restart hap_stop_timer(struct hrtimer *timer)
+{
+	struct hap_chip *chip = container_of(timer, struct hap_chip,
+					stop_timer);
+
+	atomic_set(&chip->state, 0);
+	schedule_work(&chip->haptics_work);
+
+	return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart hap_auto_res_err_poll_timer(struct hrtimer *timer)
+{
+	struct hap_chip *chip = container_of(timer, struct hap_chip,
+					auto_res_err_poll_timer);
+
+	if (!(chip->status_flags & AUTO_RESONANCE_ENABLED))
+		return HRTIMER_NORESTART;
+
+	qpnp_haptics_update_lra_frequency(chip);
+	hrtimer_forward(&chip->auto_res_err_poll_timer, ktime_get(),
+			ktime_set(0, AUTO_RES_ERR_POLL_TIME_NS));
+
+	return HRTIMER_NORESTART;
+}
+
+static int qpnp_haptics_suspend(struct device *dev)
+{
+	struct hap_chip *chip = dev_get_drvdata(dev);
+	int rc;
+
+	rc = qpnp_haptics_play(chip, false);
+	if (rc < 0)
+		pr_err("Error in stopping haptics, rc=%d\n", rc);
+
+	rc = qpnp_haptics_mod_enable(chip, false);
+	if (rc < 0)
+		pr_err("Error in disabling module, rc=%d\n", rc);
+
+	return 0;
+}
+
+static int qpnp_haptics_wave_rep_config(struct hap_chip *chip,
+					enum hap_rep_type type)
+{
+	int rc;
+	u8 val = 0, mask = 0;
+
+	if (type & HAP_WAVE_REPEAT) {
+		if (chip->wave_rep_cnt < WF_REPEAT_MIN)
+			chip->wave_rep_cnt = WF_REPEAT_MIN;
+		else if (chip->wave_rep_cnt > WF_REPEAT_MAX)
+			chip->wave_rep_cnt = WF_REPEAT_MAX;
+		mask = WF_REPEAT_MASK;
+		val = ilog2(chip->wave_rep_cnt) << WF_REPEAT_SHIFT;
+	}
+
+	if (type & HAP_WAVE_SAMP_REPEAT) {
+		if (chip->wave_s_rep_cnt < WF_S_REPEAT_MIN)
+			chip->wave_s_rep_cnt = WF_S_REPEAT_MIN;
+		else if (chip->wave_s_rep_cnt > WF_S_REPEAT_MAX)
+			chip->wave_s_rep_cnt = WF_S_REPEAT_MAX;
+		mask |= WF_S_REPEAT_MASK;
+		val |= ilog2(chip->wave_s_rep_cnt);
+	}
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_WF_REPEAT_REG(chip),
+			mask, val);
+	return rc;
+}
+
+/* configuration api for buffer mode */
+static int qpnp_haptics_buffer_config(struct hap_chip *chip, u32 *wave_samp,
+				bool overdrive)
+{
+	u8 buf[HAP_WAVE_SAMP_LEN];
+	u32 *ptr;
+	int rc, i;
+
+	if (wave_samp) {
+		ptr = wave_samp;
+	} else {
+		if (chip->wave_samp_idx >= ARRAY_SIZE(chip->wave_samp)) {
+			pr_err("Incorrect wave_samp_idx %d\n",
+				chip->wave_samp_idx);
+			return -EINVAL;
+		}
+
+		ptr = &chip->wave_samp[chip->wave_samp_idx];
+	}
+
+	/* Don't set override bit in waveform sample for PM660 */
+	if (chip->revid->pmic_subtype == PM660_SUBTYPE)
+		overdrive = false;
+
+	/* Configure WAVE_SAMPLE1 to WAVE_SAMPLE8 register */
+	for (i = 0; i < HAP_WAVE_SAMP_LEN; i++) {
+		buf[i] = ptr[i];
+		if (buf[i])
+			buf[i] |= (overdrive ? HAP_WF_OVD_BIT : 0);
+	}
+
+	rc = qpnp_haptics_write_reg(chip, HAP_WF_S1_REG(chip), buf,
+			HAP_WAVE_SAMP_LEN);
+	return rc;
+}
+
+/* configuration api for pwm */
+static int qpnp_haptics_pwm_config(struct hap_chip *chip)
+{
+	u8 val = 0;
+	int rc;
+
+	if (chip->ext_pwm_freq_khz == 0)
+		return 0;
+
+	/* Configure the EXTERNAL_PWM register */
+	if (chip->ext_pwm_freq_khz <= EXT_PWM_FREQ_25_KHZ) {
+		chip->ext_pwm_freq_khz = EXT_PWM_FREQ_25_KHZ;
+		val = 0;
+	} else if (chip->ext_pwm_freq_khz <= EXT_PWM_FREQ_50_KHZ) {
+		chip->ext_pwm_freq_khz = EXT_PWM_FREQ_50_KHZ;
+		val = 1;
+	} else if (chip->ext_pwm_freq_khz <= EXT_PWM_FREQ_75_KHZ) {
+		chip->ext_pwm_freq_khz = EXT_PWM_FREQ_75_KHZ;
+		val = 2;
+	} else {
+		chip->ext_pwm_freq_khz = EXT_PWM_FREQ_100_KHZ;
+		val = 3;
+	}
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_EXT_PWM_REG(chip),
+			EXT_PWM_FREQ_SEL_MASK, val);
+	if (rc < 0)
+		return rc;
+
+	if (chip->ext_pwm_dtest_line < 0 ||
+			chip->ext_pwm_dtest_line > PWM_MAX_DTEST_LINES) {
+		pr_err("invalid dtest line\n");
+		return -EINVAL;
+	}
+
+	if (chip->ext_pwm_dtest_line > 0) {
+		/* disable auto res for PWM mode */
+		val = chip->ext_pwm_dtest_line << HAP_EXT_PWM_DTEST_SHIFT;
+		rc = qpnp_haptics_masked_write_reg(chip, HAP_TEST2_REG(chip),
+			HAP_EXT_PWM_DTEST_MASK | AUTO_RES_EN_BIT, val);
+		if (rc < 0)
+			return rc;
+	}
+
+	rc = pwm_config(chip->pwm_data.pwm_dev,
+			chip->pwm_data.duty_us * NSEC_PER_USEC,
+			chip->pwm_data.period_us * NSEC_PER_USEC);
+	if (rc < 0) {
+		pr_err("pwm_config failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_haptics_lra_auto_res_config(struct hap_chip *chip,
+					struct hap_lra_ares_param *tmp_cfg)
+{
+	struct hap_lra_ares_param *ares_cfg;
+	int rc;
+	u8 val = 0, mask = 0;
+
+	/* disable auto resonance for ERM */
+	if (chip->act_type == HAP_ERM) {
+		val = 0x00;
+		rc = qpnp_haptics_write_reg(chip, HAP_LRA_AUTO_RES_REG(chip),
+					&val, 1);
+		return rc;
+	}
+
+	if (chip->auto_res_err_recovery_hw) {
+		rc = qpnp_haptics_masked_write_reg(chip,
+			HAP_AUTO_RES_CTRL_REG(chip),
+			AUTO_RES_ERR_RECOVERY_BIT, AUTO_RES_ERR_RECOVERY_BIT);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (tmp_cfg)
+		ares_cfg = tmp_cfg;
+	else
+		ares_cfg = &chip->ares_cfg;
+
+	if (ares_cfg->lra_res_cal_period < HAP_RES_CAL_PERIOD_MIN)
+		ares_cfg->lra_res_cal_period = HAP_RES_CAL_PERIOD_MIN;
+
+	if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+		if (ares_cfg->lra_res_cal_period >
+				HAP_PM660_RES_CAL_PERIOD_MAX)
+			ares_cfg->lra_res_cal_period =
+				HAP_PM660_RES_CAL_PERIOD_MAX;
+
+		if (ares_cfg->auto_res_mode == HAP_PM660_AUTO_RES_QWD)
+			ares_cfg->lra_res_cal_period = 0;
+
+		if (ares_cfg->lra_res_cal_period)
+			val = ilog2(ares_cfg->lra_res_cal_period /
+					HAP_RES_CAL_PERIOD_MIN) + 1;
+	} else {
+		if (ares_cfg->lra_res_cal_period > HAP_RES_CAL_PERIOD_MAX)
+			ares_cfg->lra_res_cal_period =
+				HAP_RES_CAL_PERIOD_MAX;
+
+		if (ares_cfg->lra_res_cal_period)
+			val = ilog2(ares_cfg->lra_res_cal_period /
+					HAP_RES_CAL_PERIOD_MIN);
+	}
+
+	if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+		val |= ares_cfg->auto_res_mode << PM660_AUTO_RES_MODE_SHIFT;
+		mask = PM660_AUTO_RES_MODE_BIT;
+		val |= ares_cfg->lra_high_z << PM660_CAL_DURATION_SHIFT;
+		mask |= PM660_CAL_DURATION_MASK;
+		if (ares_cfg->lra_qwd_drive_duration != -EINVAL) {
+			val |= ares_cfg->lra_qwd_drive_duration <<
+				PM660_QWD_DRIVE_DURATION_SHIFT;
+			mask |= PM660_QWD_DRIVE_DURATION_BIT;
+		}
+		if (ares_cfg->calibrate_at_eop != -EINVAL) {
+			val |= ares_cfg->calibrate_at_eop <<
+				PM660_CAL_EOP_SHIFT;
+			mask |= PM660_CAL_EOP_BIT;
+		}
+		mask |= PM660_LRA_RES_CAL_MASK;
+	} else {
+		val |= (ares_cfg->auto_res_mode << LRA_AUTO_RES_MODE_SHIFT);
+		val |= (ares_cfg->lra_high_z << LRA_HIGH_Z_SHIFT);
+		mask = LRA_AUTO_RES_MODE_MASK | LRA_HIGH_Z_MASK |
+			LRA_RES_CAL_MASK;
+	}
+
+	pr_debug("mode: %d hi_z period: %d cal_period: %d\n",
+		ares_cfg->auto_res_mode, ares_cfg->lra_high_z,
+		ares_cfg->lra_res_cal_period);
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_LRA_AUTO_RES_REG(chip),
+			mask, val);
+	return rc;
+}
+
+/* configuration api for play mode */
+static int qpnp_haptics_play_mode_config(struct hap_chip *chip)
+{
+	u8 val = 0;
+	int rc;
+
+	if (!is_haptics_idle(chip))
+		return -EBUSY;
+
+	val = chip->play_mode << HAP_WF_SOURCE_SHIFT;
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_SEL_REG(chip),
+			HAP_WF_SOURCE_MASK, val);
+	if (!rc) {
+		if (chip->play_mode == HAP_BUFFER && !chip->play_irq_en) {
+			enable_irq(chip->play_irq);
+			chip->play_irq_en = true;
+		} else if (chip->play_mode != HAP_BUFFER && chip->play_irq_en) {
+			disable_irq(chip->play_irq);
+			chip->play_irq_en = false;
+		}
+	}
+	return rc;
+}
+
+/* configuration api for max voltage */
+static int qpnp_haptics_vmax_config(struct hap_chip *chip, int vmax_mv,
+				bool overdrive)
+{
+	u8 val = 0;
+	int rc;
+
+	if (vmax_mv < 0)
+		return -EINVAL;
+
+	/* Allow setting override bit in VMAX_CFG only for PM660 */
+	if (chip->revid->pmic_subtype != PM660_SUBTYPE)
+		overdrive = false;
+
+	if (vmax_mv < HAP_VMAX_MIN_MV)
+		vmax_mv = HAP_VMAX_MIN_MV;
+	else if (vmax_mv > HAP_VMAX_MAX_MV)
+		vmax_mv = HAP_VMAX_MAX_MV;
+
+	val = DIV_ROUND_CLOSEST(vmax_mv, HAP_VMAX_MIN_MV);
+	val <<= HAP_VMAX_SHIFT;
+	if (overdrive)
+		val |= HAP_VMAX_OVD_BIT;
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_VMAX_CFG_REG(chip),
+			HAP_VMAX_MASK | HAP_VMAX_OVD_BIT, val);
+	return rc;
+}
+
+/* configuration api for ilim */
+static int qpnp_haptics_ilim_config(struct hap_chip *chip)
+{
+	int rc;
+
+	if (chip->ilim_ma < HAP_ILIM_400_MA)
+		chip->ilim_ma = HAP_ILIM_400_MA;
+	else if (chip->ilim_ma > HAP_ILIM_800_MA)
+		chip->ilim_ma = HAP_ILIM_800_MA;
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_ILIM_CFG_REG(chip),
+			HAP_ILIM_SEL_MASK, chip->ilim_ma);
+	return rc;
+}
+
+/* configuration api for short circuit debounce */
+static int qpnp_haptics_sc_deb_config(struct hap_chip *chip)
+{
+	u8 val = 0;
+	int rc;
+
+	if (chip->sc_deb_cycles < HAP_SC_DEB_CYCLES_MIN)
+		chip->sc_deb_cycles = HAP_SC_DEB_CYCLES_MIN;
+	else if (chip->sc_deb_cycles > HAP_SC_DEB_CYCLES_MAX)
+		chip->sc_deb_cycles = HAP_SC_DEB_CYCLES_MAX;
+
+	if (chip->sc_deb_cycles != HAP_SC_DEB_CYCLES_MIN)
+		val = ilog2(chip->sc_deb_cycles /
+			HAP_DEF_SC_DEB_CYCLES) + 1;
+	else
+		val = HAP_SC_DEB_CYCLES_MIN;
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_SC_DEB_REG(chip),
+			HAP_SC_DEB_MASK, val);
+
+	return rc;
+}
+
+static int qpnp_haptics_brake_config(struct hap_chip *chip, u32 *brake_pat)
+{
+	int rc, i;
+	u32 temp, *ptr;
+	u8 val;
+
+	/* Configure BRAKE register */
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_EN_CTL2_REG(chip),
+			BRAKE_EN_BIT, (u8)chip->en_brake);
+	if (rc < 0)
+		return rc;
+
+	/* If braking is not enabled, skip configuring brake pattern */
+	if (!chip->en_brake)
+		return 0;
+
+	if (!brake_pat)
+		ptr = chip->brake_pat;
+	else
+		ptr = brake_pat;
+
+	for (i = HAP_BRAKE_PAT_LEN - 1, val = 0; i >= 0; i--) {
+		ptr[i] &= HAP_BRAKE_PAT_MASK;
+		temp = i << 1;
+		val |= ptr[i] << temp;
+	}
+
+	rc = qpnp_haptics_write_reg(chip, HAP_BRAKE_REG(chip), &val, 1);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int qpnp_haptics_auto_mode_config(struct hap_chip *chip, int time_ms)
+{
+	struct hap_lra_ares_param ares_cfg;
+	enum hap_mode old_play_mode;
+	u8 old_ares_mode;
+	u32 brake_pat[HAP_BRAKE_PAT_LEN] = {0};
+	u32 wave_samp[HAP_WAVE_SAMP_LEN] = {0};
+	int rc, vmax_mv;
+
+	if (!chip->lra_auto_mode)
+		return false;
+
+	/* For now, this is for LRA only */
+	if (chip->act_type == HAP_ERM)
+		return 0;
+
+	old_ares_mode = chip->ares_cfg.auto_res_mode;
+	old_play_mode = chip->play_mode;
+	pr_debug("auto_mode, time_ms: %d\n", time_ms);
+	if (time_ms <= 20) {
+		wave_samp[0] = HAP_WF_SAMP_MAX;
+		wave_samp[1] = HAP_WF_SAMP_MAX;
+		if (time_ms > 15)
+			wave_samp[2] = HAP_WF_SAMP_MAX;
+
+		/* short pattern */
+		rc = qpnp_haptics_parse_buffer_dt(chip);
+		if (!rc) {
+			rc = qpnp_haptics_wave_rep_config(chip,
+				HAP_WAVE_REPEAT | HAP_WAVE_SAMP_REPEAT);
+			if (rc < 0) {
+				pr_err("Error in configuring wave_rep config %d\n",
+					rc);
+				return rc;
+			}
+
+			rc = qpnp_haptics_buffer_config(chip, wave_samp, true);
+			if (rc < 0) {
+				pr_err("Error in configuring buffer mode %d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		ares_cfg.lra_high_z = HAP_LRA_HIGH_Z_OPT1;
+		ares_cfg.lra_res_cal_period = HAP_RES_CAL_PERIOD_MIN;
+		if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+			ares_cfg.auto_res_mode = HAP_PM660_AUTO_RES_QWD;
+			ares_cfg.lra_qwd_drive_duration = 0;
+			ares_cfg.calibrate_at_eop = 0;
+		} else {
+			ares_cfg.auto_res_mode = HAP_AUTO_RES_QWD;
+			ares_cfg.lra_qwd_drive_duration = -EINVAL;
+			ares_cfg.calibrate_at_eop = -EINVAL;
+		}
+
+		vmax_mv = HAP_VMAX_MAX_MV;
+		rc = qpnp_haptics_vmax_config(chip, vmax_mv, true);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_haptics_brake_config(chip, brake_pat);
+		if (rc < 0)
+			return rc;
+
+		/* enable play_irq for buffer mode */
+		if (chip->play_irq >= 0 && !chip->play_irq_en) {
+			enable_irq(chip->play_irq);
+			chip->play_irq_en = true;
+		}
+
+		chip->play_mode = HAP_BUFFER;
+		chip->wave_shape = HAP_WAVE_SQUARE;
+	} else {
+		/* long pattern */
+		ares_cfg.lra_high_z = HAP_LRA_HIGH_Z_OPT1;
+		if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+			ares_cfg.auto_res_mode = HAP_PM660_AUTO_RES_ZXD;
+			ares_cfg.lra_res_cal_period =
+				HAP_PM660_RES_CAL_PERIOD_MAX;
+			ares_cfg.lra_qwd_drive_duration = 0;
+			ares_cfg.calibrate_at_eop = 1;
+		} else {
+			ares_cfg.auto_res_mode = HAP_AUTO_RES_ZXD_EOP;
+			ares_cfg.lra_res_cal_period = HAP_RES_CAL_PERIOD_MAX;
+			ares_cfg.lra_qwd_drive_duration = -EINVAL;
+			ares_cfg.calibrate_at_eop = -EINVAL;
+		}
+
+		vmax_mv = chip->vmax_mv;
+		rc = qpnp_haptics_vmax_config(chip, vmax_mv, false);
+		if (rc < 0)
+			return rc;
+
+		brake_pat[0] = 0x3;
+		rc = qpnp_haptics_brake_config(chip, brake_pat);
+		if (rc < 0)
+			return rc;
+
+		/* enable play_irq for direct mode */
+		if (chip->play_irq >= 0 && chip->play_irq_en) {
+			disable_irq(chip->play_irq);
+			chip->play_irq_en = false;
+		}
+
+		chip->play_mode = HAP_DIRECT;
+		chip->wave_shape = HAP_WAVE_SINE;
+	}
+
+	chip->ares_cfg.auto_res_mode = ares_cfg.auto_res_mode;
+	rc = qpnp_haptics_lra_auto_res_config(chip, &ares_cfg);
+	if (rc < 0) {
+		chip->ares_cfg.auto_res_mode = old_ares_mode;
+		return rc;
+	}
+
+	rc = qpnp_haptics_play_mode_config(chip);
+	if (rc < 0) {
+		chip->play_mode = old_play_mode;
+		return rc;
+	}
+
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_CFG2_REG(chip),
+			HAP_LRA_RES_TYPE_MASK, chip->wave_shape);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static irqreturn_t qpnp_haptics_play_irq_handler(int irq, void *data)
+{
+	struct hap_chip *chip = data;
+	int rc;
+
+	if (chip->play_mode != HAP_BUFFER)
+		goto irq_handled;
+
+	if (chip->wave_samp[chip->wave_samp_idx + HAP_WAVE_SAMP_LEN] > 0) {
+		chip->wave_samp_idx += HAP_WAVE_SAMP_LEN;
+		if (chip->wave_samp_idx >= ARRAY_SIZE(chip->wave_samp)) {
+			pr_debug("Samples over\n");
+			/* fall through to stop playing */
+		} else {
+			pr_debug("moving to next sample set %d\n",
+				chip->wave_samp_idx);
+
+			rc = qpnp_haptics_buffer_config(chip, NULL, false);
+			if (rc < 0) {
+				pr_err("Error in configuring buffer, rc=%d\n",
+					rc);
+				goto irq_handled;
+			}
+
+			/*
+			 * Moving to next set of wave sample. No need to stop
+			 * or change the play control. Just return.
+			 */
+			goto irq_handled;
+		}
+	}
+
+	rc = qpnp_haptics_play_control(chip, HAP_STOP);
+	if (rc < 0) {
+		pr_err("Error in disabling play, rc=%d\n", rc);
+		goto irq_handled;
+	}
+	chip->wave_samp_idx = 0;
+
+irq_handled:
+	return IRQ_HANDLED;
+}
+
+#define SC_MAX_COUNT		5
+#define SC_COUNT_RST_DELAY_US	1000000
+static irqreturn_t qpnp_haptics_sc_irq_handler(int irq, void *data)
+{
+	struct hap_chip *chip = data;
+	int rc;
+	u8 val;
+	s64 sc_delta_time_us;
+	ktime_t temp;
+
+	rc = qpnp_haptics_read_reg(chip, HAP_STATUS_1_REG(chip), &val, 1);
+	if (rc < 0)
+		goto irq_handled;
+
+	if (!(val & SC_FLAG_BIT)) {
+		chip->sc_count = 0;
+		goto irq_handled;
+	}
+
+	pr_debug("SC irq fired\n");
+	temp = ktime_get();
+	sc_delta_time_us = ktime_us_delta(temp, chip->last_sc_time);
+	chip->last_sc_time = temp;
+
+	if (sc_delta_time_us > SC_COUNT_RST_DELAY_US)
+		chip->sc_count = 0;
+	else
+		chip->sc_count++;
+
+	val = SC_CLR_BIT;
+	rc = qpnp_haptics_write_reg(chip, HAP_SC_CLR_REG(chip), &val, 1);
+	if (rc < 0) {
+		pr_err("Error in writing to SC_CLR_REG, rc=%d\n", rc);
+		goto irq_handled;
+	}
+
+	/* Permanently disable module if SC condition persists */
+	if (chip->sc_count > SC_MAX_COUNT) {
+		pr_crit("SC persists, permanently disabling haptics\n");
+		rc = qpnp_haptics_mod_enable(chip, false);
+		if (rc < 0) {
+			pr_err("Error in disabling module, rc=%d\n", rc);
+			goto irq_handled;
+		}
+		chip->perm_disable = true;
+	}
+
+irq_handled:
+	return IRQ_HANDLED;
+}
+
+/* All sysfs show/store functions below */
+
+#define HAP_STR_SIZE	128
+static int parse_string(const char *in_buf, char *out_buf)
+{
+	int i;
+
+	if (snprintf(out_buf, HAP_STR_SIZE, "%s", in_buf) > HAP_STR_SIZE)
+		return -EINVAL;
+
+	for (i = 0; i < strlen(out_buf); i++) {
+		if (out_buf[i] == ' ' || out_buf[i] == '\n' ||
+			out_buf[i] == '\t') {
+			out_buf[i] = '\0';
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static ssize_t qpnp_haptics_show_state(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->module_en);
+}
+
+static ssize_t qpnp_haptics_store_state(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+
+	/* At present, nothing to do with setting state */
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_duration(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	ktime_t time_rem;
+	s64 time_us = 0;
+
+	if (hrtimer_active(&chip->stop_timer)) {
+		time_rem = hrtimer_get_remaining(&chip->stop_timer);
+		time_us = ktime_to_us(time_rem);
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%lld\n", time_us / 1000);
+	return 0;
+}
+
+static ssize_t qpnp_haptics_store_duration(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	u32 val;
+	int rc;
+
+	rc = kstrtouint(buf, 0, &val);
+	if (rc < 0)
+		return rc;
+
+	/* setting 0 on duration is NOP for now */
+	if (val <= 0)
+		return count;
+
+	if (val > chip->max_play_time_ms)
+		return -EINVAL;
+
+	mutex_lock(&chip->param_lock);
+	rc = qpnp_haptics_auto_mode_config(chip, val);
+	if (rc < 0) {
+		pr_err("Unable to do auto mode config\n");
+		mutex_unlock(&chip->param_lock);
+		return rc;
+	}
+
+	chip->play_time_ms = val;
+	mutex_unlock(&chip->param_lock);
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_activate(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	/* For now nothing to show */
+	return snprintf(buf, PAGE_SIZE, "%d\n", 0);
+}
+
+static ssize_t qpnp_haptics_store_activate(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	u32 val;
+	int rc;
+
+	rc = kstrtouint(buf, 0, &val);
+	if (rc < 0)
+		return rc;
+
+	if (val != 0 && val != 1)
+		return count;
+
+	if (val) {
+		hrtimer_cancel(&chip->stop_timer);
+		if (is_sw_lra_auto_resonance_control(chip))
+			hrtimer_cancel(&chip->auto_res_err_poll_timer);
+		cancel_work_sync(&chip->haptics_work);
+
+		atomic_set(&chip->state, 1);
+		schedule_work(&chip->haptics_work);
+	} else {
+		rc = qpnp_haptics_mod_enable(chip, false);
+		if (rc < 0) {
+			pr_err("Error in disabling module, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_play_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	char *str;
+
+	if (chip->play_mode == HAP_BUFFER)
+		str = "buffer";
+	else if (chip->play_mode == HAP_DIRECT)
+		str = "direct";
+	else if (chip->play_mode == HAP_AUDIO)
+		str = "audio";
+	else if (chip->play_mode == HAP_PWM)
+		str = "pwm";
+	else
+		return -EINVAL;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t qpnp_haptics_store_play_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	char str[HAP_STR_SIZE + 1];
+	int rc = 0, temp, old_mode;
+
+	rc = parse_string(buf, str);
+	if (rc < 0)
+		return rc;
+
+	if (strcmp(str, "buffer") == 0)
+		temp = HAP_BUFFER;
+	else if (strcmp(str, "direct") == 0)
+		temp = HAP_DIRECT;
+	else if (strcmp(str, "audio") == 0)
+		temp = HAP_AUDIO;
+	else if (strcmp(str, "pwm") == 0)
+		temp = HAP_PWM;
+	else
+		return -EINVAL;
+
+	if (temp == chip->play_mode)
+		return count;
+
+	if (temp == HAP_BUFFER) {
+		rc = qpnp_haptics_parse_buffer_dt(chip);
+		if (!rc) {
+			rc = qpnp_haptics_wave_rep_config(chip,
+				HAP_WAVE_REPEAT | HAP_WAVE_SAMP_REPEAT);
+			if (rc < 0) {
+				pr_err("Error in configuring wave_rep config %d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		rc = qpnp_haptics_buffer_config(chip, NULL, true);
+	} else if (temp == HAP_PWM) {
+		rc = qpnp_haptics_parse_pwm_dt(chip);
+		if (!rc)
+			rc = qpnp_haptics_pwm_config(chip);
+	}
+
+	if (rc < 0)
+		return rc;
+
+	rc = qpnp_haptics_mod_enable(chip, false);
+	if (rc < 0)
+		return rc;
+
+	old_mode = chip->play_mode;
+	chip->play_mode = temp;
+	rc = qpnp_haptics_play_mode_config(chip);
+	if (rc < 0) {
+		chip->play_mode = old_mode;
+		return rc;
+	}
+
+	if (chip->play_mode == HAP_AUDIO) {
+		rc = qpnp_haptics_mod_enable(chip, true);
+		if (rc < 0) {
+			chip->play_mode = old_mode;
+			return rc;
+		}
+	}
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_wf_samp(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	char str[HAP_STR_SIZE + 1];
+	char *ptr = str;
+	int i, len = 0;
+
+	for (i = 0; i < ARRAY_SIZE(chip->wave_samp); i++) {
+		len = scnprintf(ptr, HAP_STR_SIZE, "%x ", chip->wave_samp[i]);
+		ptr += len;
+	}
+	ptr[len] = '\0';
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+static ssize_t qpnp_haptics_store_wf_samp(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	u8 samp[HAP_WAVE_SAMP_SET_LEN] = {0};
+	int bytes_read, rc;
+	unsigned int data, pos = 0, i = 0;
+
+	while (pos < count && i < ARRAY_SIZE(samp) &&
+		sscanf(buf + pos, "%x%n", &data, &bytes_read) == 1) {
+		/* bit 0 is not used in WF_Sx */
+		samp[i++] = data & GENMASK(7, 1);
+		pos += bytes_read;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(chip->wave_samp); i++)
+		chip->wave_samp[i] = samp[i];
+
+	rc = qpnp_haptics_buffer_config(chip, NULL, false);
+	if (rc < 0) {
+		pr_err("Error in configuring buffer mode %d\n", rc);
+		return rc;
+	}
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_wf_rep_count(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->wave_rep_cnt);
+}
+
+static ssize_t qpnp_haptics_store_wf_rep_count(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	int data, rc, old_wave_rep_cnt;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc < 0)
+		return rc;
+
+	old_wave_rep_cnt = chip->wave_rep_cnt;
+	chip->wave_rep_cnt = data;
+	rc = qpnp_haptics_wave_rep_config(chip, HAP_WAVE_REPEAT);
+	if (rc < 0) {
+		chip->wave_rep_cnt = old_wave_rep_cnt;
+		return rc;
+	}
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_wf_s_rep_count(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->wave_s_rep_cnt);
+}
+
+static ssize_t qpnp_haptics_store_wf_s_rep_count(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	int data, rc, old_wave_s_rep_cnt;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc < 0)
+		return rc;
+
+	old_wave_s_rep_cnt = chip->wave_s_rep_cnt;
+	chip->wave_s_rep_cnt = data;
+	rc = qpnp_haptics_wave_rep_config(chip, HAP_WAVE_SAMP_REPEAT);
+	if (rc < 0) {
+		chip->wave_s_rep_cnt = old_wave_s_rep_cnt;
+		return rc;
+	}
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_vmax(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->vmax_mv);
+}
+
+static ssize_t qpnp_haptics_store_vmax(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	int data, rc, old_vmax_mv;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc < 0)
+		return rc;
+
+	old_vmax_mv = chip->vmax_mv;
+	chip->vmax_mv = data;
+	rc = qpnp_haptics_vmax_config(chip, chip->vmax_mv, false);
+	if (rc < 0) {
+		chip->vmax_mv = old_vmax_mv;
+		return rc;
+	}
+
+	return count;
+}
+
+static ssize_t qpnp_haptics_show_lra_auto_mode(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->lra_auto_mode);
+}
+
+static ssize_t qpnp_haptics_store_lra_auto_mode(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct led_classdev *cdev = dev_get_drvdata(dev);
+	struct hap_chip *chip = container_of(cdev, struct hap_chip, cdev);
+	int rc, data;
+
+	rc = kstrtoint(buf, 10, &data);
+	if (rc < 0)
+		return rc;
+
+	if (data != 0 && data != 1)
+		return count;
+
+	chip->lra_auto_mode = !!data;
+	return count;
+}
+
+static struct device_attribute qpnp_haptics_attrs[] = {
+	__ATTR(state, 0664, qpnp_haptics_show_state, qpnp_haptics_store_state),
+	__ATTR(duration, 0664, qpnp_haptics_show_duration,
+		qpnp_haptics_store_duration),
+	__ATTR(activate, 0664, qpnp_haptics_show_activate,
+		qpnp_haptics_store_activate),
+	__ATTR(play_mode, 0664, qpnp_haptics_show_play_mode,
+		qpnp_haptics_store_play_mode),
+	__ATTR(wf_samp, 0664, qpnp_haptics_show_wf_samp,
+		qpnp_haptics_store_wf_samp),
+	__ATTR(wf_rep_count, 0664, qpnp_haptics_show_wf_rep_count,
+		qpnp_haptics_store_wf_rep_count),
+	__ATTR(wf_s_rep_count, 0664, qpnp_haptics_show_wf_s_rep_count,
+		qpnp_haptics_store_wf_s_rep_count),
+	__ATTR(vmax_mv, 0664, qpnp_haptics_show_vmax, qpnp_haptics_store_vmax),
+	__ATTR(lra_auto_mode, 0664, qpnp_haptics_show_lra_auto_mode,
+		qpnp_haptics_store_lra_auto_mode),
+};
+
+/* Dummy functions for brightness */
+static
+enum led_brightness qpnp_haptics_brightness_get(struct led_classdev *cdev)
+{
+	return 0;
+}
+
+static void qpnp_haptics_brightness_set(struct led_classdev *cdev,
+					enum led_brightness level)
+{
+}
+
+static int qpnp_haptics_config(struct hap_chip *chip)
+{
+	u8 rc_clk_err_deci_pct;
+	u16 play_rate = 0;
+	int rc;
+
+	/* Configure the CFG1 register for actuator type */
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_CFG1_REG(chip),
+			HAP_ACT_TYPE_MASK, chip->act_type);
+	if (rc < 0)
+		return rc;
+
+	/* Configure auto resonance parameters */
+	rc = qpnp_haptics_lra_auto_res_config(chip, NULL);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the PLAY MODE register */
+	rc = qpnp_haptics_play_mode_config(chip);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the VMAX register */
+	rc = qpnp_haptics_vmax_config(chip, chip->vmax_mv, false);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the ILIM register */
+	rc = qpnp_haptics_ilim_config(chip);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the short circuit debounce register */
+	rc = qpnp_haptics_sc_deb_config(chip);
+	if (rc < 0)
+		return rc;
+
+	/* Configure the WAVE SHAPE register */
+	rc = qpnp_haptics_masked_write_reg(chip, HAP_CFG2_REG(chip),
+			HAP_LRA_RES_TYPE_MASK, chip->wave_shape);
+	if (rc < 0)
+		return rc;
+
+	play_rate = chip->wave_play_rate_us / HAP_RATE_CFG_STEP_US;
+
+	/*
+	 * The frequency of 19.2 MHz RC clock is subject to variation. Currently
+	 * some PMI chips have MISC_TRIM_ERROR_RC19P2_CLK register present in
+	 * MISC peripheral. This register holds the trim error of RC clock.
+	 */
+	if (chip->act_type == HAP_LRA && chip->misc_clk_trim_error_reg) {
+		/*
+		 * Error is available in bits[3:0] and each LSB is 0.7%.
+		 * Bit 7 is the sign bit for error code. If it is set, then a
+		 * negative error correction needs to be made. Otherwise, a
+		 * positive error correction needs to be made.
+		 */
+		rc_clk_err_deci_pct = (chip->clk_trim_error_code & 0x0F) * 7;
+		if (chip->clk_trim_error_code & BIT(7))
+			play_rate = (play_rate *
+					(1000 - rc_clk_err_deci_pct)) / 1000;
+		else
+			play_rate = (play_rate *
+					(1000 + rc_clk_err_deci_pct)) / 1000;
+
+		pr_debug("TRIM register = 0x%x, play_rate=%d\n",
+			chip->clk_trim_error_code, play_rate);
+	}
+
+	/*
+	 * Configure RATE_CFG1 and RATE_CFG2 registers.
+	 * Note: For ERM these registers act as play rate and
+	 * for LRA these represent resonance period
+	 */
+	rc = qpnp_haptics_update_rate_cfg(chip, play_rate);
+	if (chip->act_type == HAP_LRA) {
+		chip->drive_period_code_max_limit = (play_rate *
+			(100 + chip->drive_period_code_max_var_pct)) / 100;
+		chip->drive_period_code_min_limit = (play_rate *
+			(100 - chip->drive_period_code_min_var_pct)) / 100;
+		pr_debug("Drive period code max limit %x min limit %x\n",
+			chip->drive_period_code_max_limit,
+			chip->drive_period_code_min_limit);
+	}
+
+	rc = qpnp_haptics_brake_config(chip, NULL);
+	if (rc < 0)
+		return rc;
+
+	if (chip->play_mode == HAP_BUFFER) {
+		rc = qpnp_haptics_wave_rep_config(chip,
+			HAP_WAVE_REPEAT | HAP_WAVE_SAMP_REPEAT);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_haptics_buffer_config(chip, NULL, false);
+	} else if (chip->play_mode == HAP_PWM) {
+		rc = qpnp_haptics_pwm_config(chip);
+	} else if (chip->play_mode == HAP_AUDIO) {
+		rc = qpnp_haptics_mod_enable(chip, true);
+	}
+
+	if (rc < 0)
+		return rc;
+
+	/* setup play irq */
+	if (chip->play_irq >= 0) {
+		rc = devm_request_threaded_irq(&chip->pdev->dev, chip->play_irq,
+			NULL, qpnp_haptics_play_irq_handler, IRQF_ONESHOT,
+			"haptics_play_irq", chip);
+		if (rc < 0) {
+			pr_err("Unable to request play(%d) IRQ(err:%d)\n",
+				chip->play_irq, rc);
+			return rc;
+		}
+
+		/* use play_irq only for buffer mode */
+		if (chip->play_mode != HAP_BUFFER) {
+			disable_irq(chip->play_irq);
+			chip->play_irq_en = false;
+		}
+	}
+
+	/* setup short circuit irq */
+	if (chip->sc_irq >= 0) {
+		rc = devm_request_threaded_irq(&chip->pdev->dev, chip->sc_irq,
+			NULL, qpnp_haptics_sc_irq_handler, IRQF_ONESHOT,
+			"haptics_sc_irq", chip);
+		if (rc < 0) {
+			pr_err("Unable to request sc(%d) IRQ(err:%d)\n",
+				chip->sc_irq, rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int qpnp_haptics_parse_buffer_dt(struct hap_chip *chip)
+{
+	struct device_node *node = chip->pdev->dev.of_node;
+	u32 temp;
+	int rc, i, wf_samp_len;
+
+	if (chip->wave_rep_cnt > 0 || chip->wave_s_rep_cnt > 0)
+		return 0;
+
+	chip->wave_rep_cnt = WF_REPEAT_MIN;
+	rc = of_property_read_u32(node, "qcom,wave-rep-cnt", &temp);
+	if (!rc) {
+		chip->wave_rep_cnt = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read rep cnt rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->wave_s_rep_cnt = WF_S_REPEAT_MIN;
+	rc = of_property_read_u32(node,
+			"qcom,wave-samp-rep-cnt", &temp);
+	if (!rc) {
+		chip->wave_s_rep_cnt = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read samp rep cnt rc=%d\n", rc);
+		return rc;
+	}
+
+	wf_samp_len = of_property_count_elems_of_size(node,
+			"qcom,wave-samples", sizeof(u32));
+	if (wf_samp_len > 0) {
+		if (wf_samp_len > HAP_WAVE_SAMP_SET_LEN) {
+			pr_err("Invalid length for wave samples\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32_array(node, "qcom,wave-samples",
+				chip->wave_samp, wf_samp_len);
+		if (rc < 0) {
+			pr_err("Error in reading qcom,wave-samples, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else {
+		/* Use default values */
+		for (i = 0; i < HAP_WAVE_SAMP_LEN; i++)
+			chip->wave_samp[i] = HAP_WF_SAMP_MAX;
+	}
+
+	return 0;
+}
+
+static int qpnp_haptics_parse_pwm_dt(struct hap_chip *chip)
+{
+	struct device_node *node = chip->pdev->dev.of_node;
+	u32 temp;
+	int rc;
+
+	if (chip->pwm_data.period_us > 0 && chip->pwm_data.duty_us > 0)
+		return 0;
+
+	chip->pwm_data.pwm_dev = of_pwm_get(node, NULL);
+	if (IS_ERR(chip->pwm_data.pwm_dev)) {
+		rc = PTR_ERR(chip->pwm_data.pwm_dev);
+		pr_err("Cannot get PWM device rc=%d\n", rc);
+		chip->pwm_data.pwm_dev = NULL;
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,period-us", &temp);
+	if (!rc) {
+		chip->pwm_data.period_us = temp;
+	} else {
+		pr_err("Cannot read PWM period rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,duty-us", &temp);
+	if (!rc) {
+		chip->pwm_data.duty_us = temp;
+	} else {
+		pr_err("Cannot read PWM duty rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = of_property_read_u32(node, "qcom,ext-pwm-dtest-line", &temp);
+	if (!rc)
+		chip->ext_pwm_dtest_line = temp;
+
+	rc = of_property_read_u32(node, "qcom,ext-pwm-freq-khz", &temp);
+	if (!rc) {
+		chip->ext_pwm_freq_khz = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read ext pwm freq rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qpnp_haptics_parse_dt(struct hap_chip *chip)
+{
+	struct device_node *node = chip->pdev->dev.of_node;
+	struct device_node *revid_node, *misc_node;
+	const char *temp_str;
+	int rc, temp;
+
+	rc = of_property_read_u32(node, "reg", &temp);
+	if (rc < 0) {
+		pr_err("Couldn't find reg in node = %s rc = %d\n",
+			node->full_name, rc);
+		return rc;
+	}
+
+	if (temp <= 0) {
+		pr_err("Invalid base address %x\n", temp);
+		return -EINVAL;
+	}
+	chip->base = (u16)temp;
+
+	revid_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+	if (!revid_node) {
+		pr_err("Missing qcom,pmic-revid property\n");
+		return -EINVAL;
+	}
+
+	chip->revid = get_revid_data(revid_node);
+	of_node_put(revid_node);
+	if (IS_ERR_OR_NULL(chip->revid)) {
+		pr_err("Unable to get pmic_revid rc=%ld\n",
+			PTR_ERR(chip->revid));
+		/*
+		 * the revid peripheral must be registered, any failure
+		 * here only indicates that the rev-id module has not
+		 * probed yet.
+		 */
+		return -EPROBE_DEFER;
+	}
+
+	if (of_find_property(node, "qcom,pmic-misc", NULL)) {
+		misc_node = of_parse_phandle(node, "qcom,pmic-misc", 0);
+		if (!misc_node)
+			return -EINVAL;
+
+		rc = of_property_read_u32(node, "qcom,misc-clk-trim-error-reg",
+				&chip->misc_clk_trim_error_reg);
+		if (rc < 0 || !chip->misc_clk_trim_error_reg) {
+			pr_err("Invalid or missing misc-clk-trim-error-reg\n");
+			of_node_put(misc_node);
+			return rc;
+		}
+
+		rc = qpnp_misc_read_reg(misc_node,
+				chip->misc_clk_trim_error_reg,
+				&chip->clk_trim_error_code);
+		if (rc < 0) {
+			pr_err("Couldn't get clk_trim_error_code, rc=%d\n", rc);
+			of_node_put(misc_node);
+			return -EPROBE_DEFER;
+		}
+		of_node_put(misc_node);
+	}
+
+	chip->play_irq = platform_get_irq_byname(chip->pdev, "hap-play-irq");
+	if (chip->play_irq < 0) {
+		pr_err("Unable to get play irq\n");
+		return chip->play_irq;
+	}
+
+	chip->sc_irq = platform_get_irq_byname(chip->pdev, "hap-sc-irq");
+	if (chip->sc_irq < 0) {
+		pr_err("Unable to get sc irq\n");
+		return chip->sc_irq;
+	}
+
+	chip->act_type = HAP_LRA;
+	rc = of_property_read_u32(node, "qcom,actuator-type", &temp);
+	if (!rc) {
+		if (temp != HAP_LRA && temp != HAP_ERM) {
+			pr_err("Incorrect actuator type\n");
+			return -EINVAL;
+		}
+		chip->act_type = temp;
+	}
+
+	chip->lra_auto_mode = of_property_read_bool(node, "qcom,lra-auto-mode");
+
+	rc = of_property_read_string(node, "qcom,play-mode", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "direct") == 0)
+			chip->play_mode = HAP_DIRECT;
+		else if (strcmp(temp_str, "buffer") == 0)
+			chip->play_mode = HAP_BUFFER;
+		else if (strcmp(temp_str, "pwm") == 0)
+			chip->play_mode = HAP_PWM;
+		else if (strcmp(temp_str, "audio") == 0)
+			chip->play_mode = HAP_AUDIO;
+		else {
+			pr_err("Invalid play mode\n");
+			return -EINVAL;
+		}
+	} else {
+		if (rc == -EINVAL && chip->act_type == HAP_LRA) {
+			pr_info("Play mode not specified, using auto mode\n");
+			chip->lra_auto_mode = true;
+		} else {
+			pr_err("Unable to read play mode\n");
+			return rc;
+		}
+	}
+
+	chip->max_play_time_ms = HAP_MAX_PLAY_TIME_MS;
+	rc = of_property_read_u32(node, "qcom,max-play-time-ms", &temp);
+	if (!rc) {
+		chip->max_play_time_ms = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read max-play-time rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->vmax_mv = HAP_VMAX_MAX_MV;
+	rc = of_property_read_u32(node, "qcom,vmax-mv", &temp);
+	if (!rc) {
+		chip->vmax_mv = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read Vmax rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->ilim_ma = HAP_ILIM_400_MA;
+	rc = of_property_read_u32(node, "qcom,ilim-ma", &temp);
+	if (!rc) {
+		chip->ilim_ma = (u8)temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read ILIM rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->sc_deb_cycles = HAP_DEF_SC_DEB_CYCLES;
+	rc = of_property_read_u32(node, "qcom,sc-dbc-cycles", &temp);
+	if (!rc) {
+		chip->sc_deb_cycles = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read sc debounce rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->wave_shape = HAP_WAVE_SQUARE;
+	rc = of_property_read_string(node, "qcom,wave-shape", &temp_str);
+	if (!rc) {
+		if (strcmp(temp_str, "sine") == 0)
+			chip->wave_shape = HAP_WAVE_SINE;
+		else if (strcmp(temp_str, "square") == 0)
+			chip->wave_shape = HAP_WAVE_SQUARE;
+		else {
+			pr_err("Unsupported wave shape\n");
+			return -EINVAL;
+		}
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read wave shape rc=%d\n", rc);
+		return rc;
+	}
+
+	chip->wave_play_rate_us = HAP_DEF_WAVE_PLAY_RATE_US;
+	rc = of_property_read_u32(node,
+			"qcom,wave-play-rate-us", &temp);
+	if (!rc) {
+		chip->wave_play_rate_us = temp;
+	} else if (rc != -EINVAL) {
+		pr_err("Unable to read play rate rc=%d\n", rc);
+		return rc;
+	}
+
+	if (chip->wave_play_rate_us < HAP_WAVE_PLAY_RATE_US_MIN)
+		chip->wave_play_rate_us = HAP_WAVE_PLAY_RATE_US_MIN;
+	else if (chip->wave_play_rate_us > HAP_WAVE_PLAY_RATE_US_MAX)
+		chip->wave_play_rate_us = HAP_WAVE_PLAY_RATE_US_MAX;
+
+	chip->en_brake = of_property_read_bool(node, "qcom,en-brake");
+
+	rc = of_property_count_elems_of_size(node,
+			"qcom,brake-pattern", sizeof(u32));
+	if (rc > 0) {
+		if (rc != HAP_BRAKE_PAT_LEN) {
+			pr_err("Invalid length for brake pattern\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32_array(node, "qcom,brake-pattern",
+				chip->brake_pat, HAP_BRAKE_PAT_LEN);
+		if (rc < 0) {
+			pr_err("Error in reading qcom,brake-pattern, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	/* Read the following properties only for LRA */
+	if (chip->act_type == HAP_LRA) {
+		rc = of_property_read_string(node, "qcom,lra-auto-res-mode",
+					&temp_str);
+		if (!rc) {
+			if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+				chip->ares_cfg.auto_res_mode =
+						HAP_PM660_AUTO_RES_QWD;
+				if (strcmp(temp_str, "zxd") == 0)
+					chip->ares_cfg.auto_res_mode =
+						HAP_PM660_AUTO_RES_ZXD;
+				else if (strcmp(temp_str, "qwd") == 0)
+					chip->ares_cfg.auto_res_mode =
+						HAP_PM660_AUTO_RES_QWD;
+			} else {
+				chip->ares_cfg.auto_res_mode =
+						HAP_AUTO_RES_ZXD_EOP;
+				if (strcmp(temp_str, "none") == 0)
+					chip->ares_cfg.auto_res_mode =
+						HAP_AUTO_RES_NONE;
+				else if (strcmp(temp_str, "zxd") == 0)
+					chip->ares_cfg.auto_res_mode =
+						HAP_AUTO_RES_ZXD;
+				else if (strcmp(temp_str, "qwd") == 0)
+					chip->ares_cfg.auto_res_mode =
+						HAP_AUTO_RES_QWD;
+				else if (strcmp(temp_str, "max-qwd") == 0)
+					chip->ares_cfg.auto_res_mode =
+						HAP_AUTO_RES_MAX_QWD;
+				else
+					chip->ares_cfg.auto_res_mode =
+						HAP_AUTO_RES_ZXD_EOP;
+			}
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read auto res mode rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->ares_cfg.lra_high_z = HAP_LRA_HIGH_Z_OPT3;
+		rc = of_property_read_string(node, "qcom,lra-high-z",
+					&temp_str);
+		if (!rc) {
+			if (strcmp(temp_str, "none") == 0)
+				chip->ares_cfg.lra_high_z =
+					HAP_LRA_HIGH_Z_NONE;
+			else if (strcmp(temp_str, "opt1") == 0)
+				chip->ares_cfg.lra_high_z =
+					HAP_LRA_HIGH_Z_OPT1;
+			else if (strcmp(temp_str, "opt2") == 0)
+				chip->ares_cfg.lra_high_z =
+					 HAP_LRA_HIGH_Z_OPT2;
+			else
+				chip->ares_cfg.lra_high_z =
+					 HAP_LRA_HIGH_Z_OPT3;
+			if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+				if (strcmp(temp_str, "opt0") == 0)
+					chip->ares_cfg.lra_high_z =
+						HAP_LRA_HIGH_Z_NONE;
+			}
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read LRA high-z rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->ares_cfg.lra_res_cal_period = HAP_RES_CAL_PERIOD_MAX;
+		rc = of_property_read_u32(node,
+				"qcom,lra-res-cal-period", &temp);
+		if (!rc) {
+			chip->ares_cfg.lra_res_cal_period = temp;
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read cal period rc=%d\n", rc);
+			return rc;
+		}
+
+		chip->ares_cfg.lra_qwd_drive_duration = -EINVAL;
+		chip->ares_cfg.calibrate_at_eop = -EINVAL;
+		if (chip->revid->pmic_subtype == PM660_SUBTYPE) {
+			rc = of_property_read_u32(node,
+					"qcom,lra-qwd-drive-duration",
+					&chip->ares_cfg.lra_qwd_drive_duration);
+			if (rc && rc != -EINVAL) {
+				pr_err("Unable to read LRA QWD drive duration rc=%d\n",
+					rc);
+				return rc;
+			}
+
+			rc = of_property_read_u32(node,
+					"qcom,lra-calibrate-at-eop",
+					&chip->ares_cfg.calibrate_at_eop);
+			if (rc && rc != -EINVAL) {
+				pr_err("Unable to read Calibrate at EOP rc=%d\n",
+					rc);
+				return rc;
+			}
+		}
+
+		chip->drive_period_code_max_var_pct = 25;
+		rc = of_property_read_u32(node,
+			"qcom,drive-period-code-max-variation-pct", &temp);
+		if (!rc) {
+			if (temp > 0 && temp < 100)
+				chip->drive_period_code_max_var_pct = (u8)temp;
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read drive period code max var pct rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		chip->drive_period_code_min_var_pct = 25;
+		rc = of_property_read_u32(node,
+			"qcom,drive-period-code-min-variation-pct", &temp);
+		if (!rc) {
+			if (temp > 0 && temp < 100)
+				chip->drive_period_code_min_var_pct = (u8)temp;
+		} else if (rc != -EINVAL) {
+			pr_err("Unable to read drive period code min var pct rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		chip->auto_res_err_recovery_hw =
+			of_property_read_bool(node,
+				"qcom,auto-res-err-recovery-hw");
+
+		if (chip->revid->pmic_subtype != PM660_SUBTYPE)
+			chip->auto_res_err_recovery_hw = false;
+	}
+
+	if (rc == -EINVAL)
+		rc = 0;
+
+	if (chip->play_mode == HAP_BUFFER)
+		rc = qpnp_haptics_parse_buffer_dt(chip);
+	else if (chip->play_mode == HAP_PWM)
+		rc = qpnp_haptics_parse_pwm_dt(chip);
+
+	return rc;
+}
+
+static int qpnp_haptics_probe(struct platform_device *pdev)
+{
+	struct hap_chip *chip;
+	int rc, i;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chip->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	chip->pdev = pdev;
+	rc = qpnp_haptics_parse_dt(chip);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Error in parsing DT parameters, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	spin_lock_init(&chip->bus_lock);
+	mutex_init(&chip->play_lock);
+	mutex_init(&chip->param_lock);
+	INIT_WORK(&chip->haptics_work, qpnp_haptics_work);
+
+	rc = qpnp_haptics_config(chip);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Error in configuring haptics, rc=%d\n",
+			rc);
+		goto fail;
+	}
+
+	hrtimer_init(&chip->stop_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	chip->stop_timer.function = hap_stop_timer;
+	hrtimer_init(&chip->auto_res_err_poll_timer, CLOCK_MONOTONIC,
+			HRTIMER_MODE_REL);
+	chip->auto_res_err_poll_timer.function = hap_auto_res_err_poll_timer;
+	dev_set_drvdata(&pdev->dev, chip);
+
+	chip->cdev.name = "vibrator";
+	chip->cdev.brightness_get = qpnp_haptics_brightness_get;
+	chip->cdev.brightness_set = qpnp_haptics_brightness_set;
+	chip->cdev.max_brightness = 100;
+	rc = devm_led_classdev_register(&pdev->dev, &chip->cdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Error in registering led class device, rc=%d\n",
+			rc);
+		goto register_fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(qpnp_haptics_attrs); i++) {
+		rc = sysfs_create_file(&chip->cdev.dev->kobj,
+				&qpnp_haptics_attrs[i].attr);
+		if (rc < 0) {
+			dev_err(&pdev->dev, "Error in creating sysfs file, rc=%d\n",
+				rc);
+			goto sysfs_fail;
+		}
+	}
+
+	return 0;
+
+sysfs_fail:
+	for (--i; i >= 0; i--)
+		sysfs_remove_file(&chip->cdev.dev->kobj,
+				&qpnp_haptics_attrs[i].attr);
+register_fail:
+	cancel_work_sync(&chip->haptics_work);
+	hrtimer_cancel(&chip->auto_res_err_poll_timer);
+	hrtimer_cancel(&chip->stop_timer);
+fail:
+	mutex_destroy(&chip->play_lock);
+	mutex_destroy(&chip->param_lock);
+	if (chip->pwm_data.pwm_dev)
+		pwm_put(chip->pwm_data.pwm_dev);
+	dev_set_drvdata(&pdev->dev, NULL);
+	return rc;
+}
+
+static int qpnp_haptics_remove(struct platform_device *pdev)
+{
+	struct hap_chip *chip = dev_get_drvdata(&pdev->dev);
+
+	cancel_work_sync(&chip->haptics_work);
+	hrtimer_cancel(&chip->auto_res_err_poll_timer);
+	hrtimer_cancel(&chip->stop_timer);
+	mutex_destroy(&chip->play_lock);
+	mutex_destroy(&chip->param_lock);
+	if (chip->pwm_data.pwm_dev)
+		pwm_put(chip->pwm_data.pwm_dev);
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+
+static const struct dev_pm_ops qpnp_haptics_pm_ops = {
+	.suspend	= qpnp_haptics_suspend,
+};
+
+static const struct of_device_id hap_match_table[] = {
+	{ .compatible = "qcom,qpnp-haptics" },
+	{ },
+};
+
+static struct platform_driver qpnp_haptics_driver = {
+	.driver		= {
+		.name		= "qcom,qpnp-haptics",
+		.of_match_table	= hap_match_table,
+		.pm		= &qpnp_haptics_pm_ops,
+	},
+	.probe		= qpnp_haptics_probe,
+	.remove		= qpnp_haptics_remove,
+};
+module_platform_driver(qpnp_haptics_driver);
+
+MODULE_DESCRIPTION("QPNP haptics driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6c7f6c4..d2cb1e8 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -60,6 +60,7 @@
 	void *data;
 	int err = -EBUSY;
 
+again:
 	spin_lock_irqsave(&chan->lock, flags);
 
 	if (!chan->msg_count || chan->active_req)
@@ -85,6 +86,16 @@
 exit:
 	spin_unlock_irqrestore(&chan->lock, flags);
 
+	/*
+	 * If the controller returns -EAGAIN, then it means, our spinlock
+	 * here is preventing the controller from receiving its interrupt,
+	 * that would help clear the controller channels that are currently
+	 * blocked waiting on the interrupt response.
+	 * Unlock and retry again.
+	 */
+	if (err == -EAGAIN)
+		goto again;
+
 	if (!err && (chan->txdone_method & TXDONE_BY_POLL))
 		/* kick start the timer immediately to avoid delays */
 		hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index b328a2a..43343a06 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -28,7 +28,6 @@
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
-#include <linux/workqueue.h>
 
 #include <asm-generic/io.h>
 
@@ -95,10 +94,10 @@
 	struct mbox_chan *chan;
 	struct tcs_mbox_msg *msg;
 	u32 m; /* m-th TCS */
-	struct tasklet_struct tasklet;
 	int err;
 	int idx;
 	bool in_use;
+	struct list_head list;
 };
 
 struct tcs_response_pool {
@@ -122,16 +121,18 @@
 
 /* One per MBOX controller */
 struct tcs_drv {
+	struct mbox_controller mbox;
 	const char *name;
-	void *base; /* start address of the RSC's registers */
-	void *reg_base; /* start address for DRV specific register */
+	void __iomem *base; /* start address of the RSC's registers */
+	void __iomem *reg_base; /* start address for DRV specific register */
 	int drv_id;
 	struct platform_device *pdev;
-	struct mbox_controller mbox;
 	struct tcs_mbox tcs[TCS_TYPE_NR];
 	int num_assigned;
 	int num_tcs;
-	struct workqueue_struct *wq;
+	struct tasklet_struct tasklet;
+	struct list_head response_pending;
+	spinlock_t drv_lock;
 	struct tcs_response_pool *resp_pool;
 	atomic_t tcs_in_use[MAX_POOL_SIZE];
 	/* Debug info */
@@ -141,8 +142,6 @@
 	atomic_t tcs_irq_count[MAX_POOL_SIZE];
 };
 
-static void tcs_notify_tx_done(unsigned long data);
-
 static int tcs_response_pool_init(struct tcs_drv *drv)
 {
 	struct tcs_response_pool *pool;
@@ -153,11 +152,10 @@
 		return -ENOMEM;
 
 	for (i = 0; i < MAX_POOL_SIZE; i++) {
-		tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
-						(unsigned long) &pool->resp[i]);
 		pool->resp[i].drv = drv;
 		pool->resp[i].idx = i;
 		pool->resp[i].m = TCS_M_INIT;
+		INIT_LIST_HEAD(&pool->resp[i].list);
 	}
 
 	spin_lock_init(&pool->lock);
@@ -188,6 +186,9 @@
 	}
 	spin_unlock_irqrestore(&pool->lock, flags);
 
+	if (pos == MAX_POOL_SIZE)
+		pr_err("response pool is full\n");
+
 	return resp;
 }
 
@@ -240,11 +241,11 @@
 		return;
 
 	msg = resp->msg;
-	pr_info("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+	pr_debug("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
 			resp->idx, resp->m, resp->in_use);
-	pr_info("Msg: state=%d\n", msg->state);
+	pr_debug("Msg: state=%d\n", msg->state);
 	for (i = 0; i < msg->num_payload; i++)
-		pr_info("addr=0x%x data=0x%x complete=0x%x\n",
+		pr_debug("addr=0x%x data=0x%x complete=0x%x\n",
 				msg->payload[i].addr,
 				msg->payload[i].data,
 				msg->payload[i].complete);
@@ -364,7 +365,15 @@
 
 static inline void send_tcs_response(struct tcs_response *resp)
 {
-	tasklet_schedule(&resp->tasklet);
+	struct tcs_drv *drv = resp->drv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drv->drv_lock, flags);
+	INIT_LIST_HEAD(&resp->list);
+	list_add_tail(&resp->list, &drv->response_pending);
+	spin_unlock_irqrestore(&drv->drv_lock, flags);
+
+	tasklet_schedule(&drv->tasklet);
 }
 
 static inline void enable_tcs_irq(struct tcs_drv *drv, int m, bool enable)
@@ -455,12 +464,12 @@
 		/* Clear the TCS IRQ status */
 		write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
 
+		/* Notify the client that this request is completed. */
+		atomic_set(&drv->tcs_in_use[m], 0);
+
 		/* Clean up response object and notify mbox in tasklet */
 		if (resp)
 			send_tcs_response(resp);
-
-		/* Notify the client that this request is completed. */
-		atomic_set(&drv->tcs_in_use[m], 0);
 	}
 
 	return IRQ_HANDLED;
@@ -475,19 +484,38 @@
 	mbox_chan_txdone(chan, err);
 }
 
-/**
- * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
- */
-static void tcs_notify_tx_done(unsigned long data)
+static void respond_tx_done(struct tcs_response *resp)
 {
-	struct tcs_response *resp = (struct tcs_response *) data;
 	struct mbox_chan *chan = resp->chan;
 	struct tcs_mbox_msg *msg = resp->msg;
 	int err = resp->err;
 	int m = resp->m;
 
-	mbox_notify_tx_done(chan, msg, m, err);
 	free_response(resp);
+	mbox_notify_tx_done(chan, msg, m, err);
+}
+
+/**
+ * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
+ */
+static void tcs_notify_tx_done(unsigned long data)
+{
+	struct tcs_drv *drv = (struct tcs_drv *)data;
+	struct tcs_response *resp;
+	unsigned long flags;
+
+	do {
+		spin_lock_irqsave(&drv->drv_lock, flags);
+		if (list_empty(&drv->response_pending)) {
+			spin_unlock_irqrestore(&drv->drv_lock, flags);
+			break;
+		}
+		resp = list_first_entry(&drv->response_pending,
+					struct tcs_response, list);
+		list_del(&resp->list);
+		spin_unlock_irqrestore(&drv->drv_lock, flags);
+		respond_tx_done(resp);
+	} while (1);
 }
 
 static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
@@ -673,8 +701,11 @@
 	if (IS_ERR(tcs))
 		return PTR_ERR(tcs);
 
-	if (trigger)
+	if (trigger) {
 		resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
+		if (IS_ERR_OR_NULL(resp))
+			return -EBUSY;
+	}
 
 	/* Identify the sequential slots that we can write to */
 	spin_lock_irqsave(&tcs->tcs_lock, flags);
@@ -686,28 +717,21 @@
 		return slot;
 	}
 
-	if (trigger) {
-		ret = check_for_req_inflight(drv, tcs, msg);
-		if (ret) {
-			spin_unlock_irqrestore(&tcs->tcs_lock, flags);
-			return ret;
-		}
-	}
-
-	/* Mark the slots as in-use, before we unlock */
-	if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
-		bitmap_set(tcs->slots, slot, msg->num_payload);
-
-	/* Copy the addresses of the resources over to the slots */
-	for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
-		tcs->cmd_addr[slot + i] = msg->payload[i].addr;
-
+	/* Figure out the TCS-m and CMD-n to write to */
 	offset = slot / tcs->ncpt;
 	m = offset + tcs->tcs_offset;
 	n = slot % tcs->ncpt;
 
-	/* Block, if we have an address from the msg in flight */
 	if (trigger) {
+		/* Block, if we have an address from the msg in flight */
+		ret = check_for_req_inflight(drv, tcs, msg);
+		if (ret) {
+			spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+			if (resp)
+				free_response(resp);
+			return ret;
+		}
+
 		resp->m = m;
 		/* Mark the TCS as busy */
 		atomic_set(&drv->tcs_in_use[m], 1);
@@ -716,6 +740,14 @@
 		if (tcs->type != ACTIVE_TCS)
 			enable_tcs_irq(drv, m, true);
 		drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
+	} else {
+		/* Mark the slots as in-use, before we unlock */
+		if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
+			bitmap_set(tcs->slots, slot, msg->num_payload);
+
+		/* Copy the addresses of the resources over to the slots */
+		for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
+			tcs->cmd_addr[slot + i] = msg->payload[i].addr;
 	}
 
 	/* Write to the TCS or AMC */
@@ -758,6 +790,32 @@
 	return 0;
 }
 
+static void print_tcs_regs(struct tcs_drv *drv, int m)
+{
+	int n;
+	struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
+	void __iomem *base = drv->reg_base;
+	u32 enable, addr, data, msgid;
+
+	if (!tcs || tcs_is_free(drv, m))
+		return;
+
+	enable = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+	if (!enable)
+		return;
+
+	pr_debug("TCS-%d contents:\n", m);
+	for (n = 0; n < tcs->ncpt; n++) {
+		if (!(enable & BIT(n)))
+			continue;
+		addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
+		data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
+		msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
+		pr_debug("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
+						n, addr, data, msgid);
+	}
+}
+
 static void dump_tcs_stats(struct tcs_drv *drv)
 {
 	int i;
@@ -766,12 +824,13 @@
 	for (i = 0; i < drv->num_tcs; i++) {
 		if (!atomic_read(&drv->tcs_in_use[i]))
 			continue;
-		pr_info("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+		pr_debug("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
 				curr, i,
 				atomic_read(&drv->tcs_send_count[i]),
 				drv->tcs_last_sent_ts[i],
 				atomic_read(&drv->tcs_irq_count[i]),
 				drv->tcs_last_recv_ts[i]);
+		print_tcs_regs(drv, i);
 		print_response(drv, i);
 	}
 }
@@ -793,7 +852,6 @@
 	struct tcs_mbox_msg *msg = data;
 	const struct device *dev = chan->cl->dev;
 	int ret = 0;
-	int count = 0;
 
 	if (!msg) {
 		dev_err(dev, "Payload error\n");
@@ -835,12 +893,7 @@
 		tcs_mbox_invalidate(chan);
 
 	/* Post the message to the TCS and trigger */
-	do {
-		ret = tcs_mbox_write(chan, msg, true);
-		if (ret != -EBUSY)
-			break;
-		udelay(100);
-	} while (++count < 10);
+	ret = tcs_mbox_write(chan, msg, true);
 
 tx_fail:
 	/* If there was an error in the request, schedule a response */
@@ -849,14 +902,16 @@
 				drv, msg, chan, TCS_M_INIT, ret);
 
 		dev_err(dev, "Error sending RPMH message %d\n", ret);
-		send_tcs_response(resp);
+		if (resp)
+			send_tcs_response(resp);
 		ret = 0;
 	}
 
 	/* If we were just busy waiting for TCS, dump the state and return */
 	if (ret == -EBUSY) {
-		dev_err(dev, "TCS Busy, retrying RPMH message send\n");
+		pr_info("TCS Busy, retrying RPMH message send\n");
 		dump_tcs_stats(drv);
+		ret = -EAGAIN;
 	}
 
 	return ret;
@@ -967,6 +1022,7 @@
 	}
 
 	chan = &mbox->chans[drv->num_assigned++];
+	chan->con_priv = drv;
 
 	return chan;
 }
@@ -1108,6 +1164,9 @@
 	drv->mbox.is_idle = tcs_drv_is_idle;
 	drv->num_tcs = st;
 	drv->pdev = pdev;
+	INIT_LIST_HEAD(&drv->response_pending);
+	spin_lock_init(&drv->drv_lock);
+	tasklet_init(&drv->tasklet, tcs_notify_tx_done, (unsigned long)drv);
 
 	drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
 	if (!drv->name)
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08..ed25f30 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -80,8 +80,6 @@
 unsigned dm_get_md_type(struct mapped_device *md);
 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
 
-int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
-
 /*
  * To check the return value from dm_table_find_target().
  */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
index 7f2c455..1105d2c 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -692,7 +692,19 @@
 
 	CDM_CDBG("Waiting for CDM HW resetdone\n");
 	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
-			msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+		msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+
+	/*
+	 * Check for HW error and recover as a workaround
+	 * Sometimes CDM HW triggers irq with invalid status for
+	 * HW reset command, so ignore reset failure and proceed further
+	 * as a workaround.
+	 */
+	if (time_left <= 0) {
+		pr_err("CDM HW reset Wait failed time_left=%ld\n", time_left);
+		time_left = 1;
+	}
+
 	if (time_left <= 0) {
 		pr_err("CDM HW reset Wait failed rc=%d\n", rc);
 		goto disable_return;
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
index 034c782..3d258b4 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -318,37 +318,6 @@
 	cdm_write_genirq,
 };
 
-void cam_cdm_data_alignement_check(void)
-{
-	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
-		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI)));
-	BUILD_BUG_ON(sizeof(struct cdm_regcontinuous_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)));
-	BUILD_BUG_ON(sizeof(struct cdm_regrandom_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
-	BUILD_BUG_ON(sizeof(struct cdm_indirect_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT)));
-	BUILD_BUG_ON(sizeof(struct cdm_genirq_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ)));
-	BUILD_BUG_ON(sizeof(struct cdm_wait_event_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_WAIT_EVENT)));
-	BUILD_BUG_ON(sizeof(struct cdm_changebase_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE)));
-	BUILD_BUG_ON(sizeof(struct  cdm_perf_ctrl_cmd) !=
-		(CAM_CDM_DWORD *
-		cdm_get_cmd_header_size(CAM_CDM_CMD_PERF_CTRL)));
-	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
-		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_32)));
-	BUILD_BUG_ON(sizeof(struct cdm_dmi_cmd) !=
-		(CAM_CDM_DWORD * cdm_get_cmd_header_size(CAM_CDM_CMD_DMI_64)));
-}
-
 int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
 	uint32_t base_array_size,
 	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 4f246e1..9a30d64 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -628,9 +628,46 @@
 	return rc;
 }
 
-static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
+static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
+	struct device *dev, unsigned long freq, enum cam_vote_level *req_level)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct dev_pm_opp *opp;
+	unsigned int corner;
+	enum cam_vote_level level = CAM_SVS_VOTE;
+	unsigned long corner_freq = freq;
+	int i;
+
+	if (!dev || !req_level) {
+		pr_err("Invalid params %pK, %pK\n", dev, req_level);
+		return -EINVAL;
+	}
+
+	opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
+	if (IS_ERR(opp)) {
+		pr_err("Error on OPP freq :%ld, %pK\n", corner_freq, opp);
+		return -EINVAL;
+	}
+
+	corner = dev_pm_opp_get_voltage(opp);
+
+	for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++)
+		if (corner == soc_private->vdd_ahb[i].vdd_corner)
+			level = soc_private->vdd_ahb[i].ahb_level;
+
+	CPAS_CDBG("From OPP table : freq=[%ld][%ld], corner=%d, level=%d\n",
+		freq, corner_freq, corner, level);
+
+	*req_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
 	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
 {
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
 	enum cam_vote_level required_level;
 	enum cam_vote_level highest_level;
@@ -642,12 +679,14 @@
 	}
 
 	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
-		pr_err("Dynamic AHB vote not supported\n");
-		return -EINVAL;
+		rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev,
+			ahb_vote->vote.freq, &required_level);
+		if (rc)
+			return rc;
+	} else {
+		required_level = ahb_vote->vote.level;
 	}
 
-	required_level = ahb_vote->vote.level;
-
 	if (cpas_client->ahb_level == required_level)
 		return 0;
 
@@ -708,7 +747,7 @@
 		ahb_vote->vote.freq,
 		cpas_core->cpas_client[client_indx]->ahb_level);
 
-	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core,
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
 		cpas_core->cpas_client[client_indx], ahb_vote);
 
 unlock_client:
@@ -780,7 +819,7 @@
 	CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
 		client_indx, ahb_vote->type, ahb_vote->vote.level,
 		cpas_client->ahb_level);
-	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
 		ahb_vote);
 	if (rc)
 		goto done;
@@ -800,8 +839,8 @@
 			goto done;
 		}
 
-		if (cpas_core->internal_ops.power_on_settings) {
-			rc = cpas_core->internal_ops.power_on_settings(cpas_hw);
+		if (cpas_core->internal_ops.power_on) {
+			rc = cpas_core->internal_ops.power_on(cpas_hw);
 			if (rc) {
 				cam_cpas_soc_disable_resources(
 					&cpas_hw->soc_info);
@@ -873,6 +912,15 @@
 	cpas_core->streamon_clients--;
 
 	if (cpas_core->streamon_clients == 0) {
+		if (cpas_core->internal_ops.power_off) {
+			rc = cpas_core->internal_ops.power_off(cpas_hw);
+			if (rc) {
+				pr_err("failed in power_off settings rc=%d\n",
+					rc);
+				/* Do not return error, passthrough */
+			}
+		}
+
 		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
 		if (rc) {
 			pr_err("disable_resorce failed, rc=%d\n", rc);
@@ -883,7 +931,7 @@
 
 	ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
-	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
 		&ahb_vote);
 	if (rc)
 		goto done;
@@ -1282,15 +1330,22 @@
 	cpas_hw_intf->hw_ops.write = NULL;
 	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
 
+	cpas_core->work_queue = alloc_workqueue("cam-cpas",
+		WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS);
+	if (!cpas_core->work_queue) {
+		rc = -ENOMEM;
+		goto release_mem;
+	}
+
 	internal_ops = &cpas_core->internal_ops;
 	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
-	if (rc != 0)
-		goto release_mem;
+	if (rc)
+		goto release_workq;
 
 	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
 		internal_ops->handle_irq, cpas_hw);
 	if (rc)
-		goto release_mem;
+		goto release_workq;
 
 	soc_private = (struct cam_cpas_private_soc *)
 		cpas_hw->soc_info.soc_private;
@@ -1375,6 +1430,9 @@
 	cam_cpas_util_client_cleanup(cpas_hw);
 deinit_platform_res:
 	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_workq:
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
 release_mem:
 	mutex_destroy(&cpas_hw->hw_mutex);
 	kfree(cpas_core);
@@ -1406,6 +1464,8 @@
 	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
 	cam_cpas_util_client_cleanup(cpas_hw);
 	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
 	mutex_destroy(&cpas_hw->hw_mutex);
 	kfree(cpas_core);
 	kfree(cpas_hw);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index c181302..6d4fafe 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -17,6 +17,7 @@
 #include "cam_cpas_hw_intf.h"
 
 #define CPAS_MAX_CLIENTS 20
+#define CAM_CPAS_INFLIGHT_WORKS 5
 
 #define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
 #define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
@@ -45,7 +46,8 @@
  * @init_hw_version: Function pointer for hw init based on version
  * @handle_irq: Function poniter for irq handling
  * @setup_regbase: Function pointer for setup rebase indices
- * @power_on_settings: Function pointer for hw core specific power on settings
+ * @power_on: Function pointer for hw core specific power on settings
+ * @power_off: Function pointer for hw core specific power off settings
  *
  */
 struct cam_cpas_internal_ops {
@@ -56,7 +58,8 @@
 	irqreturn_t (*handle_irq)(int irq_num, void *data);
 	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
 		int32_t regbase_index[], int32_t num_reg_map);
-	int (*power_on_settings)(struct cam_hw_info *cpas_hw);
+	int (*power_on)(struct cam_hw_info *cpas_hw);
+	int (*power_off)(struct cam_hw_info *cpas_hw);
 };
 
 /**
@@ -167,6 +170,7 @@
  * @ahb_bus_client: AHB Bus client info
  * @axi_ports_list_head: Head pointing to list of AXI ports
  * @internal_ops: CPAS HW internal ops
+ * @work_queue: Work queue handle
  *
  */
 struct cam_cpas {
@@ -180,6 +184,7 @@
 	struct cam_cpas_bus_client ahb_bus_client;
 	struct list_head axi_ports_list_head;
 	struct cam_cpas_internal_ops internal_ops;
+	struct workqueue_struct *work_queue;
 };
 
 int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
index d2c3e06..9ee5a43 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -29,6 +29,13 @@
 
 #define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
 
+/* Number of times to retry while polling */
+#define CAM_CPAS_POLL_RETRY_CNT 5
+/* Minimum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MIN_USECS 200
+/* Maximum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MAX_USECS 250
+
 /**
  * enum cam_cpas_hw_type - Enum for CPAS HW type
  */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index fdebdc7..b774625 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -51,21 +51,23 @@
 static struct cam_cpas_intf *g_cpas_intf;
 
 int cam_cpas_get_hw_info(uint32_t *camera_family,
-	struct cam_hw_version *camera_version)
+	struct cam_hw_version *camera_version,
+	struct cam_hw_version *cpas_version)
 {
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
 		pr_err("cpas intf not initialized\n");
 		return -ENODEV;
 	}
 
-	if (!camera_family || !camera_version) {
-		pr_err("invalid input %pK %pK\n", camera_family,
-			camera_version);
+	if (!camera_family || !camera_version || !cpas_version) {
+		pr_err("invalid input %pK %pK %pK\n", camera_family,
+			camera_version, cpas_version);
 		return -EINVAL;
 	}
 
 	*camera_family = g_cpas_intf->hw_caps.camera_family;
 	*camera_version = g_cpas_intf->hw_caps.camera_version;
+	*cpas_version = g_cpas_intf->hw_caps.cpas_version;
 
 	return 0;
 }
@@ -344,7 +346,7 @@
 		}
 
 		rc = cam_cpas_get_hw_info(&query.camera_family,
-			&query.camera_version);
+			&query.camera_version, &query.cpas_version);
 		if (rc)
 			break;
 
@@ -428,6 +430,7 @@
 static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
 	unsigned int cmd, unsigned long arg)
 {
+	struct cam_control cmd_data;
 	int32_t rc;
 	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
 
@@ -436,9 +439,16 @@
 		return -ENODEV;
 	}
 
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
 	switch (cmd) {
 	case VIDIOC_CAM_CONTROL:
-		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
 		break;
 	default:
 		pr_err("Invalid command %d for CPAS!\n", cmd);
@@ -446,6 +456,15 @@
 		break;
 	}
 
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
 	return rc;
 }
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 0a8e6bb..0c71ece 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -22,6 +22,26 @@
 #include "cam_cpas_hw.h"
 #include "cam_cpas_soc.h"
 
+static int cam_cpas_get_vote_level_from_string(const char *string,
+	enum cam_vote_level *vote_level)
+{
+	if (!vote_level || !string)
+		return -EINVAL;
+
+	if (strnstr("suspend", string, strlen(string)))
+		*vote_level = CAM_SUSPEND_VOTE;
+	else if (strnstr("svs", string, strlen(string)))
+		*vote_level = CAM_SVS_VOTE;
+	else if (strnstr("nominal", string, strlen(string)))
+		*vote_level = CAM_NOMINAL_VOTE;
+	else if (strnstr("turbo", string, strlen(string)))
+		*vote_level = CAM_TURBO_VOTE;
+	else
+		*vote_level = CAM_SVS_VOTE;
+
+	return 0;
+}
+
 int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
 	struct cam_cpas_private_soc *soc_private)
 {
@@ -89,6 +109,42 @@
 	soc_private->axi_camnoc_based = of_property_read_bool(of_node,
 		"client-bus-camnoc-based");
 
+	count = of_property_count_u32_elems(of_node, "vdd-corners");
+	if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
+		(of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
+		count)) {
+		const char *ahb_string;
+
+		for (i = 0; i < count; i++) {
+			rc = of_property_read_u32_index(of_node, "vdd-corners",
+				i, &soc_private->vdd_ahb[i].vdd_corner);
+			if (rc) {
+				pr_err("vdd-corners failed at index=%d\n", i);
+				return -ENODEV;
+			}
+
+			rc = of_property_read_string_index(of_node,
+				"vdd-corner-ahb-mapping", i, &ahb_string);
+			if (rc) {
+				pr_err("no ahb-mapping at index=%d\n", i);
+				return -ENODEV;
+			}
+
+			rc = cam_cpas_get_vote_level_from_string(ahb_string,
+				&soc_private->vdd_ahb[i].ahb_level);
+			if (rc) {
+				pr_err("invalid ahb-string at index=%d\n", i);
+				return -EINVAL;
+			}
+
+			CPAS_CDBG("Vdd-AHB mapping [%d] : [%d] [%s] [%d]\n", i,
+				soc_private->vdd_ahb[i].vdd_corner,
+				ahb_string, soc_private->vdd_ahb[i].ahb_level);
+		}
+
+		soc_private->num_vdd_ahb_mapping = count;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index fdd9386..d3dfbbd 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -16,6 +16,19 @@
 #include "cam_soc_util.h"
 
 #define CAM_CPAS_MAX_CLIENTS 20
+#define CAM_REGULATOR_LEVEL_MAX 16
+
+/**
+ * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
+ *
+ * @vdd_corner : Voltage corner value
+ * @ahb_level : AHB vote level corresponds to this vdd_corner
+ *
+ */
+struct cam_cpas_vdd_ahb_mapping {
+	unsigned int vdd_corner;
+	enum cam_vote_level ahb_level;
+};
 
 /**
  * struct cam_cpas_private_soc : CPAS private DT info
@@ -27,6 +40,8 @@
  * @axi_camnoc_based: Whether AXi access is camnoc based
  * @client_axi_port_name: AXI Port name for each client
  * @axi_port_list_node : Node representing AXI Ports list
+ * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
+ * @vdd_ahb : AHB level mapping info for the supported vdd levels
  *
  */
 struct cam_cpas_private_soc {
@@ -37,6 +52,8 @@
 	bool axi_camnoc_based;
 	const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
 	struct device_node *axi_port_list_node;
+	uint32_t num_vdd_ahb_mapping;
+	struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
 };
 
 int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
index fa8ab89..95e26c5 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -81,7 +81,8 @@
 	internal_ops->init_hw_version = NULL;
 	internal_ops->handle_irq = NULL;
 	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
-	internal_ops->power_on_settings = NULL;
+	internal_ops->power_on = NULL;
+	internal_ops->power_off = NULL;
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 415de47..b901410 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/timer.h>
+#include <linux/slab.h>
 
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_hw.h"
@@ -105,15 +106,64 @@
 static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
 	struct cam_hw_soc_info *soc_info)
 {
-	uint32_t reg_value;
+	uint32_t reg_value[4];
 	int i;
+	int size = camnoc_info->error_logger_size;
 	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
 
-	for (i = 0; i < camnoc_info->error_logger_size; i++) {
-		reg_value = cam_io_r_mb(
+	for (i = 0; (i + 3) < size; i = i + 4) {
+		reg_value[0] = cam_io_r_mb(
 			soc_info->reg_map[camnoc_index].mem_base +
 			camnoc_info->error_logger[i]);
-		pr_err("ErrorLogger[%d] : 0x%x\n", i, reg_value);
+		reg_value[1] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 1]);
+		reg_value[2] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 2]);
+		reg_value[3] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 3]);
+		pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0],
+			reg_value[1], reg_value[2], reg_value[3]);
+	}
+
+	if ((i + 2) < size) {
+		reg_value[0] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		reg_value[1] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 1]);
+		reg_value[2] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 2]);
+		pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0],
+			reg_value[1], reg_value[2]);
+		i = i + 3;
+	}
+
+	if ((i + 1) < size) {
+		reg_value[0] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		reg_value[1] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 1]);
+		pr_err("offset[0x%x] values [0x%x] [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0],
+			reg_value[1]);
+		i = i + 2;
+	}
+
+	if (i < size) {
+		reg_value[0] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		pr_err("offset[0x%x] values [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0]);
 	}
 
 	return 0;
@@ -128,9 +178,10 @@
 	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
 		camnoc_info->irq_err[i].err_status.offset);
 
-	pr_err("Dumping ubwc error status : 0x%x\n", reg_value);
+	pr_err("Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]\n",
+		i, camnoc_info->irq_err[i].err_status.offset, reg_value);
 
-	return 0;
+	return reg_value;
 }
 
 static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
@@ -172,65 +223,130 @@
 	return 0;
 }
 
-irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
+	enum cam_camnoc_hw_irq_type irq_type, uint32_t irq_data)
 {
-	uint32_t irq_status;
-	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
-	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
-	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
-	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int i;
+	struct cam_cpas_client *cpas_client;
+
+	CPAS_CDBG("Notify CB : num_clients=%d, registered=%d, started=%d\n",
+		cpas_core->num_clients, cpas_core->registered_clients,
+		cpas_core->streamon_clients);
+
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
+			cpas_client = cpas_core->cpas_client[i];
+			if (cpas_client->data.cam_cpas_client_cb) {
+				CPAS_CDBG("Calling client CB %d : %d 0x%x\n",
+					i, irq_type, irq_data);
+				cpas_client->data.cam_cpas_client_cb(
+					cpas_client->data.client_handle,
+					cpas_client->data.userdata,
+					(enum cam_camnoc_irq_type)irq_type,
+					irq_data);
+			}
+		}
+	}
+}
+
+static void cam_cpastop_work(struct work_struct *work)
+{
+	struct cam_cpas_work_payload *payload;
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_hw_soc_info *soc_info;
 	int i;
 	enum cam_camnoc_hw_irq_type irq_type;
+	uint32_t irq_data;
 
-	irq_status = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
-		camnoc_info->irq_sbm->sbm_status.offset);
+	payload = container_of(work, struct cam_cpas_work_payload, work);
+	if (!payload) {
+		pr_err("NULL payload");
+		return;
+	}
 
-	pr_err("IRQ callback, irq_status=0x%x\n", irq_status);
+	cpas_hw = payload->hw;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_info = &cpas_hw->soc_info;
 
 	for (i = 0; i < camnoc_info->irq_err_size; i++) {
-		if ((irq_status & camnoc_info->irq_err[i].sbm_port) &&
+		if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
 			(camnoc_info->irq_err[i].enable)) {
 			irq_type = camnoc_info->irq_err[i].irq_type;
 			pr_err("Error occurred, type=%d\n", irq_type);
+			irq_data = 0;
 
 			switch (irq_type) {
 			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
-				cam_cpastop_handle_errlogger(cpas_core,
-					soc_info);
+				irq_data = cam_cpastop_handle_errlogger(
+					cpas_core, soc_info);
 				break;
 			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
-				cam_cpastop_handle_ubwc_err(cpas_core,
-					soc_info, i);
+				irq_data = cam_cpastop_handle_ubwc_err(
+					cpas_core, soc_info, i);
 				break;
 			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
-				cam_cpastop_handle_ahb_timeout_err(cpas_hw);
+				irq_data = cam_cpastop_handle_ahb_timeout_err(
+					cpas_hw);
 				break;
 			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
 				CPAS_CDBG("TEST IRQ\n");
 				break;
 			default:
+				pr_err("Invalid IRQ type\n");
 				break;
 			}
 
-			irq_status &= ~camnoc_info->irq_err[i].sbm_port;
+			cam_cpastop_notify_clients(cpas_core, irq_type,
+				irq_data);
+
+			payload->irq_status &=
+				~camnoc_info->irq_err[i].sbm_port;
 		}
 	}
 
-	if (irq_status)
-		pr_err("IRQ not handled, irq_status=0x%x\n", irq_status);
+	if (payload->irq_status)
+		pr_err("IRQ not handled irq_status=0x%x\n",
+			payload->irq_status);
+
+	kfree(payload);
+}
+
+static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	struct cam_cpas_work_payload *payload;
+
+	payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
+	if (!payload)
+		return IRQ_HANDLED;
+
+	payload->irq_status = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	CPAS_CDBG("IRQ callback, irq_status=0x%x\n", payload->irq_status);
+
+	payload->hw = cpas_hw;
+	INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
 
 	if (TEST_IRQ_ENABLE)
 		cam_cpastop_disable_test_irq(cpas_hw);
 
 	cam_cpastop_reset_irq(cpas_hw);
 
+	queue_work(cpas_core->work_queue, &payload->work);
+
 	return IRQ_HANDLED;
 }
 
-static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
+static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
 {
 	int i;
 
@@ -256,6 +372,38 @@
 	return 0;
 }
 
+static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int rc = 0;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list =
+		camnoc_info->errata_wa_list;
+
+	if (!errata_wa_list)
+		return 0;
+
+	if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
+		struct cam_cpas_hw_errata_wa *errata_wa =
+			&errata_wa_list->camnoc_flush_slave_pending_trans;
+
+		rc = cam_io_poll_value_wmask(
+			soc_info->reg_map[camnoc_index].mem_base +
+			errata_wa->data.reg_info.offset,
+			errata_wa->data.reg_info.value,
+			errata_wa->data.reg_info.mask,
+			CAM_CPAS_POLL_RETRY_CNT,
+			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
+		if (rc) {
+			pr_err("camnoc flush slave pending trans failed\n");
+			/* Do not return error, passthrough */
+		}
+	}
+
+	return rc;
+}
+
 static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
 	struct cam_cpas_hw_caps *hw_caps)
 {
@@ -295,7 +443,8 @@
 	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
 	internal_ops->handle_irq = cam_cpastop_handle_irq;
 	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
-	internal_ops->power_on_settings = cam_cpastop_static_settings;
+	internal_ops->power_on = cam_cpastop_poweron;
+	internal_ops->power_off = cam_cpastop_poweroff;
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index 99aae3f..d5bb363 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -147,6 +147,31 @@
 };
 
 /**
+ * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
+ *
+ * @enable: Whether to enable this errata workround
+ * @data: HW Errata workaround data
+ *
+ */
+struct cam_cpas_hw_errata_wa {
+	bool enable;
+	union {
+		struct cam_cpas_reg reg_info;
+	} data;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
+ *
+ * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
+ *         camnoc slave pending transactions before turning off CPAS_TOP gdsc
+ *
+ */
+struct cam_cpas_hw_errata_wa_list {
+	struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
+};
+
+/**
  * struct cam_camnoc_info : Overall CAMNOC settings info
  *
  * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
@@ -156,6 +181,7 @@
  * @irq_err_size: Array size of IRQ Error settings
  * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
  * @error_logger_size: Array size of IRQ Error logger
+ * @errata_wa_list: HW Errata workaround info
  *
  */
 struct cam_camnoc_info {
@@ -166,6 +192,23 @@
 	int irq_err_size;
 	uint32_t *error_logger;
 	int error_logger_size;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list;
+};
+
+/**
+ * struct cam_cpas_work_payload : Struct for cpas work payload data
+ *
+ * @hw: Pointer to HW info
+ * @irq_status: IRQ status value
+ * @irq_data: IRQ data
+ * @work: Work handle
+ *
+ */
+struct cam_cpas_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
 };
 
 #endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index 12c8e66..b30cd05 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -279,17 +279,16 @@
 			.value = 3,
 		},
 		.danger_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 0,
 			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
-			.value = 0x0,
+			.value = 0xFFFFFF00,
 		},
 		.safe_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
-			.value = 0x0,
+			.value = 0x3,
 		},
 		.ubwc_ctl = {
 			.enable = true,
@@ -328,18 +327,16 @@
 			.value = 3,
 		},
 		.danger_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 0,
 			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
-			.value = 0x0,
+			.value = 0xFFFFFF00,
 		},
 		.safe_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 0,
 			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
-			.value = 0x0,
+			.value = 0x3,
 		},
 		.ubwc_ctl = {
 			.enable = true,
@@ -516,6 +513,18 @@
 	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
 };
 
+static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = true,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
 struct cam_camnoc_info cam170_cpas100_camnoc_info = {
 	.specific = &cam_cpas100_camnoc_specific[0],
 	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
@@ -527,6 +536,7 @@
 	.error_logger = &slave_error_logger[0],
 	.error_logger_size = sizeof(slave_error_logger) /
 		sizeof(slave_error_logger[0]),
+	.errata_wa_list = &cam170_cpas100_errata_wa_list,
 };
 
 #endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index f6b0729..27b8504 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -312,13 +312,15 @@
  * @camera_family  : Camera family type. One of
  *                   CAM_FAMILY_CAMERA_SS
  *                   CAM_FAMILY_CPAS_SS
- * @camera_version : Camera version
+ * @camera_version : Camera platform version
+ * @cpas_version   : Camera cpas version
  *
  * @return 0 on success.
  *
  */
 int cam_cpas_get_hw_info(
 	uint32_t                 *camera_family,
-	struct cam_hw_version    *camera_version);
+	struct cam_hw_version    *camera_version,
+	struct cam_hw_version    *cpas_version);
 
 #endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 5a4e6e9..a9064fa 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -123,9 +123,6 @@
 	if (req_isp->num_fence_map_out != 0) {
 		CDBG("%s: move request %lld to active list\n", __func__,
 			req->request_id);
-		if (!list_empty(&ctx->active_req_list))
-			pr_err("%s: More than one entry in active list\n",
-				__func__);
 		list_add_tail(&req->list, &ctx->active_req_list);
 	} else {
 		/* no io config, so the request is completed. */
@@ -281,9 +278,14 @@
 	void *evt_data)
 {
 	int rc = 0;
+	struct cam_context        *ctx = ctx_isp->base;
+
 
 	ctx_isp->frame_id++;
-	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	if (list_empty(&ctx->active_req_list))
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	else
+		CDBG("%s: Still need to wait for the buf done\n", __func__);
 	CDBG("%s: next substate %d\n", __func__,
 		ctx_isp->substate_activated);
 
@@ -568,10 +570,10 @@
 	if (rc) {
 		pr_err("%s: Can not apply the configuration\n", __func__);
 	} else {
-		spin_lock(&ctx->lock);
+		spin_lock_bh(&ctx->lock);
 		ctx_isp->substate_activated = next_state;
 		CDBG("%s: new state %d\n", __func__, next_state);
-		spin_unlock(&ctx->lock);
+		spin_unlock_bh(&ctx->lock);
 	}
 end:
 	return rc;
@@ -743,13 +745,13 @@
 	CDBG("%s: get free request object......\n", __func__);
 
 	/* get free request */
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	if (!list_empty(&ctx->free_req_list)) {
 		req = list_first_entry(&ctx->free_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
 	}
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 
 	if (!req) {
 		pr_err("%s: No more request obj free\n", __func__);
@@ -827,9 +829,9 @@
 	CDBG("%s: Packet request id 0x%llx\n", __func__,
 		packet->header.request_id);
 
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	list_add_tail(&req->list, &ctx->pending_req_list);
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 
 	CDBG("%s: Preprocessing Config %lld successful\n", __func__,
 		req->request_id);
@@ -837,9 +839,9 @@
 	return rc;
 
 free_req:
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	list_add_tail(&req->list, &ctx->free_req_list);
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 end:
 	return rc;
 }
@@ -1084,9 +1086,9 @@
 		(struct cam_isp_context *) ctx->ctx_priv;
 
 	/* Mask off all the incoming hardware events */
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 	CDBG("%s: next substate %d", __func__, ctx_isp->substate_activated);
 
 	/* stop hw first */
@@ -1206,7 +1208,7 @@
 	struct cam_isp_context *ctx_isp =
 		(struct cam_isp_context *)ctx->ctx_priv;
 
-	spin_lock(&ctx->lock);
+	spin_lock_bh(&ctx->lock);
 	CDBG("%s: Enter: State %d, Substate %d, evt id %d\n",
 		__func__, ctx->state, ctx_isp->substate_activated, evt_id);
 	if (ctx_isp->substate_machine_irq[ctx_isp->substate_activated].
@@ -1219,7 +1221,7 @@
 	}
 	CDBG("%s: Exit: State %d Substate %d\n",
 		__func__, ctx->state, ctx_isp->substate_activated);
-	spin_unlock(&ctx->lock);
+	spin_unlock_bh(&ctx->lock);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 4b2db07..2bc4b00 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -23,6 +23,7 @@
 #include "cam_isp_packet_parser.h"
 #include "cam_ife_hw_mgr.h"
 #include "cam_cdm_intf_api.h"
+#include "cam_packet_util.h"
 
 #undef CDBG
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -1919,6 +1920,13 @@
 	if (rc)
 		return rc;
 
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->mgr_common.cmd_iommu_hdl);
+	if (rc) {
+		pr_err("%s: Patch ISP packet failed.\n", __func__);
+		return rc;
+	}
+
 	prepare->num_hw_update_entries = 0;
 	prepare->num_in_map_entries = 0;
 	prepare->num_out_map_entries = 0;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index b608320..3c72279 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -315,15 +315,17 @@
 	struct cam_isp_resource_node       *res;
 	struct cam_ife_hw_mgr_res          *hw_mgr_res;
 	struct cam_isp_hw_get_buf_update    update_buf;
-	uint32_t kmd_buf_remain_size,  i, j, k, out_buf, in_buf,
-		res_id_out, res_id_in, num_plane, io_cfg_used_bytes, num_ent;
+	uint32_t                            kmd_buf_remain_size;
+	uint32_t                            i, j, num_out_buf, num_in_buf;
+	uint32_t                            res_id_out, res_id_in, plane_id;
+	uint32_t                            io_cfg_used_bytes, num_ent;
 	size_t size;
 
 	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
 			&prepare->packet->payload +
 			prepare->packet->io_configs_offset);
-	out_buf = 0;
-	in_buf  = 0;
+	num_out_buf = 0;
+	num_in_buf  = 0;
 	io_cfg_used_bytes = 0;
 
 	/* Max one hw entries required for each base */
@@ -357,17 +359,18 @@
 			CDBG("%s:%d configure output io with fill fence %d\n",
 				__func__, __LINE__, fill_fence);
 			if (fill_fence) {
-				if (out_buf < prepare->max_out_map_entries) {
-					prepare->out_map_entries[out_buf].
+				if (num_out_buf <
+					prepare->max_out_map_entries) {
+					prepare->out_map_entries[num_out_buf].
 						resource_handle =
 							io_cfg[i].resource_type;
-					prepare->out_map_entries[out_buf].
+					prepare->out_map_entries[num_out_buf].
 						sync_id = io_cfg[i].fence;
-					out_buf++;
+					num_out_buf++;
 				} else {
 					pr_err("%s:%d ln_out:%d max_ln:%d\n",
 						__func__, __LINE__,
-						out_buf,
+						num_out_buf,
 						prepare->max_out_map_entries);
 					return -EINVAL;
 				}
@@ -385,23 +388,22 @@
 			CDBG("%s:%d configure input io with fill fence %d\n",
 				__func__, __LINE__, fill_fence);
 			if (fill_fence) {
-				if (in_buf < prepare->max_in_map_entries) {
-					prepare->in_map_entries[in_buf].
+				if (num_in_buf < prepare->max_in_map_entries) {
+					prepare->in_map_entries[num_in_buf].
 						resource_handle =
 							io_cfg[i].resource_type;
-					prepare->in_map_entries[in_buf].
+					prepare->in_map_entries[num_in_buf].
 						sync_id =
 							io_cfg[i].fence;
-					in_buf++;
+					num_in_buf++;
 				} else {
 					pr_err("%s:%d ln_in:%d imax_ln:%d\n",
 						__func__, __LINE__,
-						in_buf,
+						num_in_buf,
 						prepare->max_in_map_entries);
 					return -EINVAL;
 				}
 			}
-			/*TO DO get the input FE address and add to list */
 			continue;
 		} else {
 			pr_err("%s:%d Invalid io config direction :%d\n",
@@ -427,27 +429,36 @@
 			}
 
 			memset(io_addr, 0, sizeof(io_addr));
-			num_plane = 0;
-			for (k = 0; k < CAM_PACKET_MAX_PLANES; k++) {
-				if (!io_cfg[i].mem_handle[k])
-					continue;
 
-				rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[k],
-					iommu_hdl, &io_addr[num_plane], &size);
+			for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
+						plane_id++) {
+				if (!io_cfg[i].mem_handle[plane_id])
+					break;
+
+				rc = cam_mem_get_io_buf(
+					io_cfg[i].mem_handle[plane_id],
+					iommu_hdl, &io_addr[plane_id], &size);
 				if (rc) {
 					pr_err("%s:%d no io addr for plane%d\n",
-						__func__, __LINE__, k);
+						__func__, __LINE__, plane_id);
 					rc = -ENOMEM;
 					return rc;
 				}
+
+				if (io_addr[plane_id] >> 32) {
+					pr_err("Invalid mapped address\n");
+					rc = -EINVAL;
+					return rc;
+				}
+
 				/* need to update with offset */
-				io_addr[num_plane] += io_cfg->offsets[k];
+				io_addr[plane_id] +=
+						io_cfg[i].offsets[plane_id];
 				CDBG("%s: get io_addr for plane %d: 0x%llx\n",
-					__func__, num_plane,
-					io_addr[num_plane]);
-				num_plane++;
+					__func__, plane_id,
+					io_addr[plane_id]);
 			}
-			if (!num_plane) {
+			if (!plane_id) {
 				pr_err("%s:%d No valid planes for res%d\n",
 					__func__, __LINE__, res->res_id);
 				rc = -ENOMEM;
@@ -471,7 +482,8 @@
 					io_cfg_used_bytes/4;
 			update_buf.cdm.size = kmd_buf_remain_size;
 			update_buf.image_buf = io_addr;
-			update_buf.num_buf   = num_plane;
+			update_buf.num_buf   = plane_id;
+			update_buf.io_cfg    = &io_cfg[i];
 
 			CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
 				__LINE__, update_buf.cdm.cmd_buf_addr,
@@ -509,8 +521,8 @@
 	}
 
 	if (fill_fence) {
-		prepare->num_out_map_entries = out_buf;
-		prepare->num_in_map_entries  = in_buf;
+		prepare->num_out_map_entries = num_out_buf;
+		prepare->num_in_map_entries  = num_in_buf;
 	}
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 3ec9aa6..f09fdc7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -988,8 +988,8 @@
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
 
-	/* select rotate period as  5 frame */
-	val =  5 << 8;
+	/* static frame with split color bar */
+	val =  1 << 5;
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
 	/* config pix pattern */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index ea34406..6c6f38b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -149,14 +149,16 @@
  * @Brief:         Get cdm commands for buffer updates.
  *
  * @ cdm:          Command buffer information
- * @ image_buf:    Contain the image buffer information
+ * @ image_buf:    image buffer address array
  * @ num_buf:      Number of buffers in the image_buf array
+ * @ io_cfg:       IO buffer config information sent from UMD
  *
  */
 struct cam_isp_hw_get_buf_update {
 	struct cam_isp_hw_get_cdm_args  cdm;
 	uint64_t                       *image_buf;
 	uint32_t                        num_buf;
+	struct cam_buf_io_cfg          *io_cfg;
 };
 
 #endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 739a1e7..f6aab7f 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -449,14 +449,14 @@
 		if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
 			isp_res->irq_handle = cam_irq_controller_subscribe_irq(
 				core_info->vfe_irq_controller,
-				CAM_IRQ_PRIORITY_2,
+				CAM_IRQ_PRIORITY_1,
 				camif_irq_reg_mask, &core_info->irq_payload,
 				cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
 				isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
 		else
 			isp_res->irq_handle = cam_irq_controller_subscribe_irq(
 				core_info->vfe_irq_controller,
-				CAM_IRQ_PRIORITY_2,
+				CAM_IRQ_PRIORITY_1,
 				rdi_irq_reg_mask, &core_info->irq_payload,
 				cam_vfe_irq_top_half, cam_ife_mgr_do_tasklet,
 				isp_res->tasklet_info, cam_tasklet_enqueue_cmd);
@@ -469,7 +469,7 @@
 			pr_err("Error! subscribe irq controller failed\n");
 	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
 		isp_res->irq_handle = cam_irq_controller_subscribe_irq(
-			core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_1,
+			core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_2,
 			bus_irq_reg_mask, &core_info->irq_payload,
 			core_info->vfe_bus->top_half_handler,
 			cam_ife_mgr_do_tasklet_buf_done,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 6e62dcf..5e629b6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -28,6 +28,16 @@
 
 #define FRAME_BASED_EN 0
 
+#define MAX_BUF_UPDATE_REG_NUM   20
+#define MAX_REG_VAL_PAIR_SIZE    \
+		(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val)    \
+		do {                                               \
+			buf_array[index++] = offset;               \
+			buf_array[index++] = val;                  \
+		} while (0)
+
 static uint32_t irq_reg_offset[CAM_IFE_BUS_IRQ_REGISTERS_MAX] = {
 	0x0000205C,
 	0x00002060,
@@ -64,6 +74,8 @@
 	void                                       *bus_irq_controller;
 	void                                       *vfe_irq_controller;
 	struct cam_vfe_bus_ver2_reg_offset_common  *common_reg;
+	uint32_t                                    io_buf_update[
+							MAX_REG_VAL_PAIR_SIZE];
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
@@ -73,6 +85,7 @@
 
 	uint32_t             irq_enabled;
 
+	uint32_t             init_cfg_done;
 	uint32_t             offset;
 	uint32_t             width;
 	uint32_t             height;
@@ -83,10 +96,21 @@
 	uint32_t             burst_len;
 	uint32_t             frame_based;
 
+	uint32_t             en_ubwc;
+	uint32_t             packer_cfg;
+	uint32_t             tile_cfg;
+	uint32_t             h_init;
+	uint32_t             v_init;
+	uint32_t             ubwc_meta_stride;
+	uint32_t             ubwc_mode_cfg;
+	uint32_t             ubwc_meta_offset;
+
 	uint32_t             irq_subsample_period;
 	uint32_t             irq_subsample_pattern;
 	uint32_t             framedrop_period;
 	uint32_t             framedrop_pattern;
+
+	uint32_t             en_cfg;
 };
 
 struct cam_vfe_bus_ver2_comp_grp_data {
@@ -598,15 +622,52 @@
 
 	rsrc_data->width = out_port_info->width;
 	rsrc_data->height = out_port_info->height;
-	if (plane == PLANE_C) {
-		switch (rsrc_data->format) {
-		case CAM_FORMAT_NV21:
-		case CAM_FORMAT_NV12:
-			rsrc_data->height /= 2;
+
+	if (rsrc_data->index < 3) {
+		rsrc_data->width = rsrc_data->width * 5/4 * rsrc_data->height;
+		rsrc_data->height = 1;
+		rsrc_data->pack_fmt = 0x0;
+		rsrc_data->en_cfg = 0x3;
+	} else if (rsrc_data->index < 5) {
+		switch (plane) {
+		case PLANE_Y:
+			switch (rsrc_data->format) {
+			case CAM_FORMAT_UBWC_NV12:
+			case CAM_FORMAT_UBWC_NV12_4R:
+			case CAM_FORMAT_UBWC_TP10:
+				rsrc_data->en_ubwc = 1;
+				break;
+			default:
+				break;
+			}
+			break;
+		case PLANE_C:
+			switch (rsrc_data->format) {
+			case CAM_FORMAT_NV21:
+			case CAM_FORMAT_NV12:
+				rsrc_data->height /= 2;
+				break;
+			case CAM_FORMAT_UBWC_NV12:
+			case CAM_FORMAT_UBWC_NV12_4R:
+			case CAM_FORMAT_UBWC_TP10:
+				rsrc_data->height /= 2;
+				rsrc_data->en_ubwc = 1;
+				break;
+			default:
+				break;
+			}
 			break;
 		default:
-			break;
+			pr_err("Invalid plane type %d\n", plane);
+			return -EINVAL;
 		}
+		rsrc_data->pack_fmt = 0xE;
+		rsrc_data->en_cfg = 0x1;
+	} else {
+		rsrc_data->width = rsrc_data->width * 4;
+		rsrc_data->height = rsrc_data->height / 2;
+		rsrc_data->pack_fmt = 0x0;
+		rsrc_data->en_cfg = 0x1;
 	}
 
 	if (vfe_out_res_id >= CAM_ISP_IFE_OUT_RES_RDI_0 &&
@@ -638,7 +699,16 @@
 	rsrc_data->irq_subsample_pattern = 0;
 	rsrc_data->framedrop_period = 0;
 	rsrc_data->framedrop_pattern = 0;
-
+	rsrc_data->packer_cfg = 0;
+	rsrc_data->en_ubwc = 0;
+	rsrc_data->tile_cfg = 0;
+	rsrc_data->h_init = 0;
+	rsrc_data->v_init = 0;
+	rsrc_data->ubwc_meta_stride = 0;
+	rsrc_data->ubwc_mode_cfg = 0;
+	rsrc_data->ubwc_meta_offset = 0;
+	rsrc_data->init_cfg_done = 0;
+	rsrc_data->en_cfg = 0;
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 
 	return 0;
@@ -651,52 +721,18 @@
 		wm_res->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
-	uint32_t                                    width;
-	uint32_t                                    height;
-	uint32_t                                    pack_fmt;
-	uint32_t                                    stride;
-	uint32_t                                    en_cfg;
-
-	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
-		rsrc_data->width, rsrc_data->height);
-	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
-		rsrc_data->pack_fmt & PACKER_FMT_MAX);
-	CDBG("WM res %d stride = %d, burst len = %d\n",
-		rsrc_data->index, rsrc_data->width, 0xf);
 
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
 	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
 
-	if (rsrc_data->index < 3) {
-		width = rsrc_data->width * 5/4 * rsrc_data->height;
-		height = 1;
-		pack_fmt = 0x0;
-		stride = rsrc_data->width * 5/4 * rsrc_data->height;
-		en_cfg = 0x3;
-	} else if (rsrc_data->index < 5) {
-		width = rsrc_data->width;
-		height = rsrc_data->height;
-		pack_fmt = 0xE;
-		stride = rsrc_data->width;
-		en_cfg = 0x1;
-	} else {
-		width = rsrc_data->width * 4;
-		height = rsrc_data->height / 2;
-		pack_fmt = 0x0;
-		stride = rsrc_data->width * 4;
-		en_cfg = 0x1;
-	}
-
-	cam_io_w_mb(width,
+	cam_io_w_mb(rsrc_data->width,
 		common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg);
-	cam_io_w(height,
+	cam_io_w(rsrc_data->height,
 		common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg);
-	cam_io_w(pack_fmt,
+	cam_io_w(rsrc_data->pack_fmt,
 		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
-	cam_io_w(stride,
-		common_data->mem_base + rsrc_data->hw_regs->stride);
 
 	cam_io_w(0xFFFFFFFF, common_data->mem_base +
 		rsrc_data->hw_regs->irq_subsample_pattern);
@@ -708,34 +744,14 @@
 	cam_io_w(0x0,
 		common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
 
-	/* UBWC registers */
-	switch (rsrc_data->format) {
-	case CAM_FORMAT_UBWC_NV12:
-		/* Program UBWC registers */
-		break;
-	default:
-		break;
-	}
-
-	/* Subscribe IRQ */
-	if (rsrc_data->irq_enabled) {
-		/*
-		 * Currently all WM IRQ are subscribed in one place. Need to
-		 * make it dynamic later.
-		 */
-	}
-
-	/* Enable WM */
-	cam_io_w_mb(en_cfg, common_data->mem_base + rsrc_data->hw_regs->cfg);
-
 	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
-		width, height);
+		rsrc_data->width, rsrc_data->height);
 	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
-		pack_fmt & PACKER_FMT_MAX);
+		rsrc_data->pack_fmt & PACKER_FMT_MAX);
 	CDBG("WM res %d stride = %d, burst len = %d\n",
-		rsrc_data->index, stride, 0xf);
+		rsrc_data->index, rsrc_data->stride, 0xf);
 	CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
-		(uint32_t) rsrc_data->hw_regs->cfg, en_cfg);
+		(uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
 
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
@@ -1622,10 +1638,11 @@
 {
 	struct cam_vfe_bus_ver2_priv             *bus_priv;
 	struct cam_isp_hw_get_buf_update         *update_buf;
+	struct cam_buf_io_cfg                    *io_cfg;
 	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
 	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
-	uint32_t  reg_val_pair[8];
-	uint32_t i, size = 0;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, size = 0;
 
 	/*
 	 * Need the entire buf io config so we can get the stride info
@@ -1643,14 +1660,181 @@
 		return -EINVAL;
 	}
 
-	if (update_buf->num_buf < vfe_out_data->num_wm) {
+	if (update_buf->num_buf != vfe_out_data->num_wm) {
 		pr_err("Failed! Invalid number buffers:%d required:%d\n",
 			update_buf->num_buf, vfe_out_data->num_wm);
 		return -ENOMEM;
 	}
 
-	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(
-		vfe_out_data->num_wm);
+	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+	io_cfg = update_buf->io_cfg;
+
+	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+
+		/* For initial configuration program all bus registers */
+		if (wm_data->stride != io_cfg->planes[i].plane_stride ||
+			!wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->stride,
+				io_cfg->planes[i].plane_stride);
+			wm_data->stride = io_cfg->planes[i].plane_stride;
+		}
+		CDBG("image stride 0x%x\n", wm_data->stride);
+
+		if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
+			!wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_pattern,
+				io_cfg->framedrop_pattern);
+			wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
+		}
+		CDBG("framedrop pattern 0x%x\n", wm_data->framedrop_pattern);
+
+		if (wm_data->framedrop_period != io_cfg->framedrop_period ||
+			!wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_period,
+				io_cfg->framedrop_period);
+			wm_data->framedrop_period = io_cfg->framedrop_period;
+		}
+		CDBG("framedrop period 0x%x\n", wm_data->framedrop_period);
+
+		if (wm_data->irq_subsample_period != io_cfg->subsample_period
+			|| !wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_period,
+				io_cfg->subsample_period);
+			wm_data->irq_subsample_period =
+				io_cfg->subsample_period;
+		}
+		CDBG("irq subsample period 0x%x\n",
+			wm_data->irq_subsample_period);
+
+		if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
+			|| !wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_pattern,
+				io_cfg->subsample_pattern);
+			wm_data->irq_subsample_pattern =
+				io_cfg->subsample_pattern;
+		}
+		CDBG("irq subsample pattern 0x%x\n",
+			wm_data->irq_subsample_pattern);
+
+		if (wm_data->en_ubwc) {
+			if (!wm_data->hw_regs->ubwc_regs) {
+				pr_err("%s: No UBWC register to configure.\n",
+					__func__);
+				return -EINVAL;
+			}
+			if (wm_data->packer_cfg !=
+				io_cfg->planes[i].packer_config ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->packer_cfg,
+					io_cfg->planes[i].packer_config);
+				wm_data->packer_cfg =
+					io_cfg->planes[i].packer_config;
+			}
+			CDBG("packer cfg 0x%x\n", wm_data->packer_cfg);
+
+			if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
+				|| !wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->tile_cfg,
+					io_cfg->planes[i].tile_config);
+				wm_data->tile_cfg =
+					io_cfg->planes[i].tile_config;
+			}
+			CDBG("tile cfg 0x%x\n", wm_data->tile_cfg);
+
+			if (wm_data->h_init != io_cfg->planes[i].h_init ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->h_init,
+					io_cfg->planes[i].h_init);
+				wm_data->h_init = io_cfg->planes[i].h_init;
+			}
+			CDBG("h_init 0x%x\n", wm_data->h_init);
+
+			if (wm_data->v_init != io_cfg->planes[i].v_init ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->v_init,
+					io_cfg->planes[i].v_init);
+				wm_data->v_init = io_cfg->planes[i].v_init;
+			}
+			CDBG("v_init 0x%x\n", wm_data->v_init);
+
+			if (wm_data->ubwc_meta_stride !=
+				io_cfg->planes[i].meta_stride ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->
+					meta_stride,
+					io_cfg->planes[i].meta_stride);
+				wm_data->ubwc_meta_stride =
+					io_cfg->planes[i].meta_stride;
+			}
+			CDBG("meta stride 0x%x\n", wm_data->ubwc_meta_stride);
+
+			if (wm_data->ubwc_mode_cfg !=
+				io_cfg->planes[i].mode_config ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->mode_cfg,
+					io_cfg->planes[i].mode_config);
+				wm_data->ubwc_mode_cfg =
+					io_cfg->planes[i].mode_config;
+			}
+			CDBG("ubwc mode cfg 0x%x\n", wm_data->ubwc_mode_cfg);
+
+			if (wm_data->ubwc_meta_offset !=
+				io_cfg->planes[i].meta_offset ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->
+					meta_offset,
+					io_cfg->planes[i].meta_offset);
+				wm_data->ubwc_meta_offset =
+					io_cfg->planes[i].meta_offset;
+			}
+			CDBG("ubwc meta offset 0x%x\n",
+				wm_data->ubwc_meta_offset);
+
+			/* UBWC meta address */
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->ubwc_regs->meta_addr,
+				update_buf->image_buf[i]);
+			CDBG("ubwc meta addr 0x%llx\n",
+				update_buf->image_buf[i]);
+		}
+
+		/* WM Image address */
+		if (wm_data->en_ubwc)
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->image_addr,
+				(update_buf->image_buf[i] +
+				io_cfg->planes[i].meta_size));
+		else
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->image_addr,
+				update_buf->image_buf[i]);
+
+		CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+
+		/* enable the WM */
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->cfg,
+			wm_data->en_cfg);
+
+		/* set initial configuration done */
+		if (!wm_data->init_cfg_done)
+			wm_data->init_cfg_done = 1;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
 
 	/* cdm util returns dwords, need to convert to bytes */
 	if ((size * 4) > update_buf->cdm.size) {
@@ -1659,18 +1843,9 @@
 		return -ENOMEM;
 	}
 
-	for (i = 0 ; i < vfe_out_data->num_wm; i++) {
-		wm_data = vfe_out_data->wm_res[i]->res_priv;
-		reg_val_pair[2 * i] = wm_data->hw_regs->image_addr;
-		reg_val_pair[2 * i + 1] = update_buf->image_buf[i];
-		CDBG("offset 0x%x, value 0x%llx\n",
-			wm_data->hw_regs->image_addr,
-			(uint64_t) update_buf->image_buf[i]);
-	}
-
 	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
-		update_buf->cdm.cmd_buf_addr,
-		vfe_out_data->num_wm, reg_val_pair);
+		update_buf->cdm.cmd_buf_addr, j/2, reg_val_pair);
+
 	/* cdm util returns dwords, need to convert to bytes */
 	update_buf->cdm.used_bytes = size * 4;
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index 9c030ab..f47b1dc 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -751,23 +751,17 @@
 	memset(tbl.bufq[idx].hdls, 0,
 		sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
 
-	CDBG("Ion handle at idx = %d freeing = %pK, fd = %d\n",
-		idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd);
+	CDBG("Ion handle at idx = %d freeing = %pK, fd = %d, imported %d\n",
+		idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
+		tbl.bufq[idx].is_imported);
 
-	if (tbl.bufq[idx].i_hdl && !tbl.bufq[idx].is_imported) {
-		CDBG("Freeing up non-imported buffer at fd = %d, hdl = %pK",
-			tbl.bufq[idx].fd,
-			tbl.bufq[idx].i_hdl);
+	if (tbl.bufq[idx].i_hdl) {
 		ion_free(tbl.client, tbl.bufq[idx].i_hdl);
 		tbl.bufq[idx].i_hdl = NULL;
-	} else {
-		CDBG("Not freeing up imported buffer at fd = %d",
-			tbl.bufq[idx].fd);
 	}
 
 	tbl.bufq[idx].fd = -1;
 	tbl.bufq[idx].is_imported = false;
-	tbl.bufq[idx].i_hdl = NULL;
 	tbl.bufq[idx].len = 0;
 	tbl.bufq[idx].num_hdl = 0;
 	mutex_unlock(&tbl.bufq[idx].q_lock);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index c837232..4888e5b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -531,7 +531,8 @@
 	rc = camera_io_dev_read(
 		&(s_ctrl->io_master_info),
 		slave_info->sensor_id_reg_addr,
-		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD);
+		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
+		CAMERA_SENSOR_I2C_TYPE_WORD);
 
 	CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
 			__func__, __LINE__, chipid, slave_info->sensor_id);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
index bdae1d1..6292a9f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -5,4 +5,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 1261c4b..06e8104 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -75,4 +75,64 @@
 	enum camera_sensor_i2c_type addr_type,
 	uint32_t delay_ms);
 
-#endif /* _CAM_SENSOR_I2C_H_ */
+
+/**
+ * cam_qup_i2c_read : QUP based i2c read
+ * @client    : QUP I2C client structure
+ * @data      : I2C data
+ * @addr_type : I2c address type
+ * @data_type : I2C data type
+ *
+ * This API handles QUP I2C read
+ */
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * cam_qup_i2c_read_seq : QUP based I2C sequential read
+ * @client    : QUP I2C client structure
+ * @data      : I2C data
+ * @addr_type : I2c address type
+ * @num_bytes : number of bytes to read
+ * This API handles QUP I2C Sequential read
+ */
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte);
+
+/**
+ * cam_qup_i2c_poll : QUP based I2C poll operation
+ * @client    : QUP I2C client structure
+ * @addr      : I2C address
+ * @data      : I2C data
+ * @data_mask : I2C data mask
+ * @data_type : I2C data type
+ * @addr_type : I2C addr type
+ * @delay_ms  : Delay in milli seconds
+ *
+ * This API implements QUP based I2C poll
+ */
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms);
+
+/**
+ * cam_qup_i2c_write_table : QUP based I2C write random
+ * @client        : QUP I2C client structure
+ * @write_setting : I2C register settings
+ *
+ * This API handles QUP I2C random write
+ */
+
+int32_t cam_qup_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+#endif /*_CAM_SENSOR_I2C_H*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index f889abc..3e1b331 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -29,6 +29,10 @@
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_poll(io_master_info->cci_client,
 			addr, data, mask, data_type, addr_type, delay_ms);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_poll(io_master_info->client,
+			addr, data, data_mask, addr_type, data_type,
+			delay_ms);
 	} else {
 		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
 			__LINE__, io_master_info->master_type);
@@ -38,6 +42,7 @@
 
 int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type)
 {
 	if (!io_master_info) {
@@ -47,7 +52,10 @@
 
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_read(io_master_info->cci_client,
-			addr, data, data_type, data_type);
+			addr, data, addr_type, data_type);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_read(io_master_info->client,
+			addr, data, addr_type, data_type);
 	} else {
 		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
 			__LINE__, io_master_info->master_type);
@@ -67,6 +75,9 @@
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_write_table(io_master_info,
 			write_setting);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_write_table(io_master_info,
+			write_setting);
 	} else {
 		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
 			__LINE__, io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index 757ac17..f721afd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -38,16 +38,33 @@
  * @io_master_info: I2C/SPI master information
  * @addr: I2C address
  * @data: I2C data
+ * @addr_type: I2C addr_type
  * @data_type: I2C data type
  *
  * This API abstracts read functionality based on master type
  */
 int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type);
 
 /**
  * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr type
+ * @num_bytes: number of bytes
+ *
+ * This API abstracts sequential read functionality based on master type
+ */
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_bytes);
+
+
+/**
+ * @io_master_info: I2C/SPI master information
  *
  * This API initializes the I2C/SPI master based on master type
  */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
new file mode 100644
index 0000000..b25b1855
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_io.h"
+
+#define I2C_REG_DATA_MAX       (8*1024)
+#define I2C_REG_MAX_BUF_SIZE   8
+
+static int32_t cam_qup_i2c_rxdata(
+	struct i2c_client *dev_client, unsigned char *rxdata,
+	enum camera_sensor_i2c_type addr_type,
+	int data_length)
+{
+	int32_t rc = 0;
+	uint16_t saddr = dev_client->addr >> 1;
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = addr_type,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = data_length,
+			.buf   = rxdata,
+		},
+	};
+	rc = i2c_transfer(dev_client->adapter, msgs, 2);
+	if (rc < 0)
+		pr_err("%s:failed 0x%x\n", __func__, saddr);
+	return rc;
+}
+
+
+static int32_t cam_qup_i2c_txdata(
+	struct camera_io_master *dev_client, unsigned char *txdata,
+	int length)
+{
+	int32_t rc = 0;
+	uint16_t saddr = dev_client->client->addr >> 1;
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+	if (rc < 0)
+		pr_err("%s: failed 0x%x\n", __func__, saddr);
+	return rc;
+}
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = -EINVAL;
+	unsigned char *buf = NULL;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("ERR: %s Failed with addr/data_type verfication\n",
+			__func__);
+		return rc;
+	}
+
+	buf = kzalloc(addr_type + data_type, GFP_KERNEL);
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = addr >> 8;
+		buf[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = addr >> 16;
+		buf[1] = addr >> 8;
+		buf[2] = addr;
+	} else {
+		buf[0] = addr >> 24;
+		buf[1] = addr >> 16;
+		buf[2] = addr >> 8;
+		buf[3] = addr;
+	}
+
+	rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
+	if (rc < 0) {
+		pr_err("%s fail\n", __func__);
+		goto read_fail;
+	}
+
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = buf[0];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+		*data = buf[0] << 8 | buf[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = buf[0] << 16 | buf[1] << 8 | buf[2];
+	else
+		*data = buf[0] << 24 | buf[1] << 16 |
+			buf[2] << 8 | buf[3];
+
+	CDBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+read_fail:
+	kfree(buf);
+	buf = NULL;
+	return rc;
+}
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte)
+{
+	int32_t rc = -EFAULT;
+	unsigned char *buf = NULL;
+	int i;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("ERR: %s Failed with addr_type verification\n",
+			__func__);
+		return rc;
+	}
+
+	if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
+		pr_err("%s: Error num_byte:0x%x max supported:0x%x\n",
+			__func__, num_byte, I2C_REG_DATA_MAX);
+		return rc;
+	}
+
+	buf = kzalloc(addr_type + num_byte, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = addr >> BITS_PER_BYTE;
+		buf[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = addr >> 16;
+		buf[1] = addr >> 8;
+		buf[2] = addr;
+	} else {
+		buf[0] = addr >> 24;
+		buf[1] = addr >> 16;
+		buf[2] = addr >> 8;
+		buf[3] = addr;
+	}
+
+	rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
+	if (rc < 0) {
+		pr_err("%s fail\n", __func__);
+		goto read_seq_fail;
+	}
+
+	for (i = 0; i < num_byte; i++)
+		data[i] = buf[i];
+
+read_seq_fail:
+	kfree(buf);
+	buf = NULL;
+	return rc;
+}
+
+static int32_t cam_qup_i2c_compare(struct i2c_client *client,
+	uint32_t addr, uint32_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type)
+{
+	int32_t rc;
+	uint32_t reg_data = 0;
+
+	rc = cam_qup_i2c_read(client, addr, &reg_data,
+		addr_type, data_type);
+	if (rc < 0)
+		return rc;
+
+	reg_data = reg_data & 0xFFFF;
+	if (data != (reg_data & ~data_mask))
+		return I2C_COMPARE_MISMATCH;
+
+	return I2C_COMPARE_MATCH;
+}
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms)
+{
+	int32_t rc = 0;
+	int i = 0;
+
+	if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
+		pr_err("%s:%d invalid delay = %d max_delay = %d\n",
+			__func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+		return -EINVAL;
+	}
+
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+		return -EINVAL;
+
+	for (i = 0; i < delay_ms; i++) {
+		rc = cam_qup_i2c_compare(client,
+			addr, data, data_mask, data_type, addr_type);
+		if (rc == I2C_COMPARE_MATCH)
+			return rc;
+
+		usleep_range(1000, 1010);
+	}
+	/* If rc is MISMATCH then read is successful but poll is failure */
+	if (rc == I2C_COMPARE_MISMATCH)
+		pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
+			__func__, __LINE__, rc);
+	if (rc < 0)
+		pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+
+	return rc;
+}
+
+static int32_t cam_qup_i2c_write(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_array *reg_setting,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = 0;
+	unsigned char buf[I2C_REG_MAX_BUF_SIZE];
+	uint8_t len = 0;
+
+	CDBG("%s reg addr = 0x%x data type: %d\n",
+			  __func__, reg_setting->reg_addr, data_type);
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = reg_setting->reg_addr;
+		CDBG("%s byte %d: 0x%x\n", __func__,
+			len, buf[len]);
+		len = 1;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = reg_setting->reg_addr >> 8;
+		buf[1] = reg_setting->reg_addr;
+		CDBG("%s byte %d: 0x%x\n", __func__,
+			len, buf[len]);
+		CDBG("%s byte %d: 0x%x\n", __func__,
+			len+1, buf[len+1]);
+		len = 2;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = reg_setting->reg_addr >> 16;
+		buf[1] = reg_setting->reg_addr >> 8;
+		buf[2] = reg_setting->reg_addr;
+		len = 3;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[0] = reg_setting->reg_addr >> 24;
+		buf[1] = reg_setting->reg_addr >> 16;
+		buf[2] = reg_setting->reg_addr >> 8;
+		buf[3] = reg_setting->reg_addr;
+		len = 4;
+	} else {
+		pr_err("%s: Invalid I2C addr type\n", __func__);
+		return -EINVAL;
+	}
+
+	CDBG("Data: 0x%x\n", reg_setting->reg_data);
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[len] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		len += 1;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[len] = reg_setting->reg_data >> 8;
+		buf[len+1] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		len += 2;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[len] = reg_setting->reg_data >> 16;
+		buf[len + 1] = reg_setting->reg_data >> 8;
+		buf[len + 2] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+		len += 3;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[len] = reg_setting->reg_data >> 24;
+		buf[len + 1] = reg_setting->reg_data >> 16;
+		buf[len + 2] = reg_setting->reg_data >> 8;
+		buf[len + 3] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+		CDBG("Byte %d: 0x%x\n", len+3, buf[len+3]);
+		len += 4;
+	} else {
+		pr_err("%s: Invalid Data Type\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = cam_qup_i2c_txdata(client, buf, len);
+	if (rc < 0)
+		pr_err("%s fail\n", __func__);
+	return rc;
+}
+
+int32_t cam_qup_i2c_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = -EINVAL;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+
+	if (!client || !write_setting)
+		return rc;
+
+	if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+		return rc;
+
+	reg_setting = write_setting->reg_setting;
+
+	for (i = 0; i < write_setting->size; i++) {
+		CDBG("%s addr 0x%x data 0x%x\n", __func__,
+			reg_setting->reg_addr, reg_setting->reg_data);
+
+		rc = cam_qup_i2c_write(client, reg_setting,
+			write_setting->addr_type, write_setting->data_type);
+		if (rc < 0)
+			break;
+		reg_setting++;
+	}
+
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 901632a..96f40e1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -82,7 +82,7 @@
 		sync_cb->sync_obj = sync_obj;
 		INIT_WORK(&sync_cb->cb_dispatch_work,
 			cam_sync_util_cb_dispatch);
-
+		list_add_tail(&sync_cb->list, &row->callback_list);
 		sync_cb->status = row->state;
 		queue_work(sync_dev->work_queue,
 			&sync_cb->cb_dispatch_work);
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
index 1e42f75..0ffea5b 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
@@ -44,6 +44,7 @@
 	struct hfi_mem msg_q;
 	struct hfi_mem dbg_q;
 	struct hfi_mem sec_heap;
+	struct hfi_mem shmem;
 	void __iomem *icp_base;
 };
 
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/icp/hfi.c
index 4315865..15e0315 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/icp/hfi.c
@@ -19,6 +19,8 @@
 #include <asm/errno.h>
 #include <linux/timer.h>
 #include <media/cam_icp.h>
+#include <linux/iopoll.h>
+
 #include "cam_io_util.h"
 #include "hfi_reg.h"
 #include "hfi_sys_defs.h"
@@ -336,7 +338,7 @@
 		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
 	} else {
 		cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
-			ICP_FLAG_CSR_WAKE_UP_EN,
+			ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI,
 			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
 	}
 
@@ -460,8 +462,10 @@
 	}
 
 	cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
-	cam_io_w((uint32_t)0x7400000, icp_base + HFI_REG_SHARED_MEM_PTR);
-	cam_io_w((uint32_t)0x6400000, icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
 	cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
 		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
 	cam_io_w((uint32_t)hfi_mem->sec_heap.len,
@@ -472,25 +476,17 @@
 	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
 	pr_debug("hw version : %u[%x]\n", hw_version, hw_version);
 
-	do {
-		msleep(500);
-		status = cam_io_r(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
-	} while (status != ICP_INIT_RESP_SUCCESS);
-
-	if (status == ICP_INIT_RESP_SUCCESS) {
-		g_hfi->hfi_state = FW_RESP_DONE;
-		rc = 0;
-	} else {
-		rc = -ENODEV;
-		pr_err("FW initialization failed");
+	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
+	if (rc) {
+		pr_err("timed out , status = %u\n", status);
 		goto regions_fail;
 	}
 
 	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
 	g_hfi->hfi_state = FW_START_SENT;
 
-	pr_debug("fw version : %u[%x]\n", fw_version, fw_version);
-	pr_debug("hfi init is successful\n");
+	HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
 	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 140542b..43491a9 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -26,6 +26,8 @@
 #include <linux/debugfs.h>
 #include <media/cam_defs.h>
 #include <media/cam_icp.h>
+#include <linux/debugfs.h>
+
 #include "cam_sync_api.h"
 #include "cam_packet_util.h"
 #include "cam_hw.h"
@@ -55,6 +57,23 @@
 
 static struct cam_icp_hw_mgr icp_hw_mgr;
 
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+	icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+	if (!icp_hw_mgr.dentry)
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("a5_debug",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.a5_debug)) {
+		debugfs_remove_recursive(icp_hw_mgr.dentry);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static int cam_icp_stop_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
 {
 	struct cam_hw_intf *a5_dev_intf = NULL;
@@ -568,7 +587,12 @@
 	uint64_t kvaddr;
 	size_t len;
 
-	pr_err("Allocating FW for iommu handle: %x\n", icp_hw_mgr.iommu_hdl);
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_MEM_MGR_REGION_SHARED,
+		&icp_hw_mgr.hfi_mem.shmem);
+	if (rc)
+		return -ENOMEM;
+
 	rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
 		&iova, &kvaddr, &len);
 	if (rc < 0) {
@@ -764,7 +788,7 @@
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("timeout/err in iconfig command: %d\n", rc);
+		pr_err("FW response timeout: %d\n", rc);
 	}
 
 	return rc;
@@ -870,6 +894,7 @@
 
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
+	debugfs_remove_recursive(icp_hw_mgr.dentry);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return 0;
@@ -886,6 +911,8 @@
 	struct cam_icp_a5_set_irq_cb irq_cb;
 	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
 	struct hfi_mem_info hfi_mem;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 	int rc = 0;
 
 	if (!hw_mgr) {
@@ -1014,9 +1041,12 @@
 	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
 	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
 
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
 	rc = cam_hfi_init(0, &hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
-		false);
+		hw_mgr->a5_debug);
 	if (rc < 0) {
 		pr_err("hfi_init is failed\n");
 		goto set_irq_failed;
@@ -1033,7 +1063,13 @@
 		NULL, 0);
 
 	ICP_DBG("Wait for INIT DONE Message\n");
-	wait_for_completion(&hw_mgr->a5_complete);
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+		goto set_irq_failed;
+	}
 
 	ICP_DBG("Done Waiting for INIT DONE Message\n");
 
@@ -1041,6 +1077,10 @@
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_CMD_POWER_COLLAPSE,
 		NULL, 0);
+	if (rc) {
+		pr_err("icp power collapse failed\n");
+		goto set_irq_failed;
+	}
 
 	hw_mgr->fw_download = true;
 
@@ -1428,6 +1468,8 @@
 	int rc = 0;
 	struct hfi_cmd_work_data *task_data;
 	struct hfi_cmd_ipebps_async ioconfig_cmd;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 
 	ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
 	ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
@@ -1451,7 +1493,13 @@
 	task->process_cb = cam_icp_mgr_process_cmd;
 	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
 	ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
-	wait_for_completion(&ctx_data->wait_complete);
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+	}
 
 	return rc;
 }
@@ -1462,6 +1510,8 @@
 {
 	struct hfi_cmd_create_handle create_handle;
 	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 	int rc = 0;
 
 	create_handle.size = sizeof(struct hfi_cmd_create_handle);
@@ -1479,7 +1529,13 @@
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
 	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
-	wait_for_completion(&ctx_data->wait_complete);
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+	}
 
 	return rc;
 }
@@ -1489,6 +1545,8 @@
 {
 	struct hfi_cmd_ping_pkt ping_pkt;
 	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 	int rc = 0;
 
 	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
@@ -1505,7 +1563,14 @@
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
 	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
-	wait_for_completion(&ctx_data->wait_complete);
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+	}
+
 
 	return rc;
 }
@@ -1929,6 +1994,9 @@
 	if (!icp_hw_mgr.msg_work_data)
 		goto msg_work_data_failed;
 
+	rc = cam_icp_hw_mgr_create_debugfs_entry();
+	if (rc)
+		goto msg_work_data_failed;
 
 	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
 		icp_hw_mgr.msg_work->task.pool[i].payload =
@@ -1940,7 +2008,6 @@
 
 	init_completion(&icp_hw_mgr.a5_complete);
 
-	pr_err("Exit\n");
 	return rc;
 
 msg_work_data_failed:
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index e5ffa7a..32d796a 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -23,6 +23,8 @@
 #include "hfi_session_defs.h"
 #include "cam_req_mgr_workq.h"
 #include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
 
 #define CAM_ICP_ROLE_PARENT     1
 #define CAM_ICP_ROLE_CHILD      2
@@ -56,6 +58,7 @@
 	struct cam_mem_mgr_memory_desc dbg_q;
 	struct cam_mem_mgr_memory_desc sec_heap;
 	struct cam_mem_mgr_memory_desc fw_buf;
+	struct cam_smmu_region_info shmem;
 };
 
 /**
@@ -176,6 +179,8 @@
 	struct hfi_cmd_work_data *cmd_work_data;
 	struct hfi_msg_work_data *msg_work_data;
 	uint32_t ctxt_cnt;
+	struct dentry *dentry;
+	bool a5_debug;
 };
 
 #endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 44a29aa..a850bc0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -1021,6 +1021,7 @@
 {
 	int i, size, ret = 0;
 	char name[32];
+	struct sched_param param = { .sched_priority = 5 };
 
 	size = sizeof(struct sde_rot_queue) * mgr->queue_count;
 	mgr->commitq = devm_kzalloc(mgr->device, size, GFP_KERNEL);
@@ -1031,11 +1032,21 @@
 		snprintf(name, sizeof(name), "rot_commitq_%d_%d",
 				mgr->device->id, i);
 		SDEROT_DBG("work queue name=%s\n", name);
-		mgr->commitq[i].rot_work_queue =
-			alloc_ordered_workqueue("%s",
-				WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
-		if (!mgr->commitq[i].rot_work_queue) {
+		kthread_init_worker(&mgr->commitq[i].rot_kw);
+		mgr->commitq[i].rot_thread = kthread_run(kthread_worker_fn,
+				&mgr->commitq[i].rot_kw, name);
+		if (IS_ERR(mgr->commitq[i].rot_thread)) {
 			ret = -EPERM;
+			mgr->commitq[i].rot_thread = NULL;
+			break;
+		}
+
+		ret = sched_setscheduler(mgr->commitq[i].rot_thread,
+			SCHED_FIFO, &param);
+		if (ret) {
+			SDEROT_ERR(
+				"failed to set kthread priority for commitq %d\n",
+				ret);
 			break;
 		}
 
@@ -1052,10 +1063,21 @@
 		snprintf(name, sizeof(name), "rot_doneq_%d_%d",
 				mgr->device->id, i);
 		SDEROT_DBG("work queue name=%s\n", name);
-		mgr->doneq[i].rot_work_queue = alloc_ordered_workqueue("%s",
-				WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
-		if (!mgr->doneq[i].rot_work_queue) {
+		kthread_init_worker(&mgr->doneq[i].rot_kw);
+		mgr->doneq[i].rot_thread = kthread_run(kthread_worker_fn,
+				&mgr->doneq[i].rot_kw, name);
+		if (IS_ERR(mgr->doneq[i].rot_thread)) {
 			ret = -EPERM;
+			mgr->doneq[i].rot_thread = NULL;
+			break;
+		}
+
+		ret = sched_setscheduler(mgr->doneq[i].rot_thread,
+			SCHED_FIFO, &param);
+		if (ret) {
+			SDEROT_ERR(
+				"failed to set kthread priority for doneq %d\n",
+				ret);
 			break;
 		}
 
@@ -1071,18 +1093,20 @@
 
 	if (mgr->commitq) {
 		for (i = 0; i < mgr->queue_count; i++) {
-			if (mgr->commitq[i].rot_work_queue)
-				destroy_workqueue(
-					mgr->commitq[i].rot_work_queue);
+			if (mgr->commitq[i].rot_thread) {
+				kthread_flush_worker(&mgr->commitq[i].rot_kw);
+				kthread_stop(mgr->commitq[i].rot_thread);
+			}
 		}
 		devm_kfree(mgr->device, mgr->commitq);
 		mgr->commitq = NULL;
 	}
 	if (mgr->doneq) {
 		for (i = 0; i < mgr->queue_count; i++) {
-			if (mgr->doneq[i].rot_work_queue)
-				destroy_workqueue(
-					mgr->doneq[i].rot_work_queue);
+			if (mgr->doneq[i].rot_thread) {
+				kthread_flush_worker(&mgr->doneq[i].rot_kw);
+				kthread_stop(mgr->doneq[i].rot_thread);
+			}
 		}
 		devm_kfree(mgr->device, mgr->doneq);
 		mgr->doneq = NULL;
@@ -1203,7 +1227,7 @@
 
 		if (entry->item.ts)
 			entry->item.ts[SDE_ROTATOR_TS_QUEUE] = ktime_get();
-		queue_work(queue->rot_work_queue, &entry->commit_work);
+		kthread_queue_work(&queue->rot_kw, &entry->commit_work);
 	}
 }
 
@@ -1423,12 +1447,13 @@
  *
  * Note this asynchronous handler is protected by hal lock.
  */
-static void sde_rotator_commit_handler(struct work_struct *work)
+static void sde_rotator_commit_handler(struct kthread_work *work)
 {
 	struct sde_rot_entry *entry;
 	struct sde_rot_entry_container *request;
 	struct sde_rot_hw_resource *hw;
 	struct sde_rot_mgr *mgr;
+	struct sched_param param = { .sched_priority = 5 };
 	int ret;
 
 	entry = container_of(work, struct sde_rot_entry, commit_work);
@@ -1439,6 +1464,12 @@
 		return;
 	}
 
+	ret = sched_setscheduler(entry->fenceq->rot_thread, SCHED_FIFO, &param);
+	if (ret) {
+		SDEROT_WARN("Fail to set kthread priority for fenceq: %d\n",
+				ret);
+	}
+
 	mgr = entry->private->mgr;
 
 	SDEROT_EVTLOG(
@@ -1514,7 +1545,7 @@
 
 	SDEROT_EVTLOG(entry->item.session_id, 1);
 
-	queue_work(entry->doneq->rot_work_queue, &entry->done_work);
+	kthread_queue_work(&entry->doneq->rot_kw, &entry->done_work);
 	sde_rot_mgr_unlock(mgr);
 	return;
 error:
@@ -1526,8 +1557,8 @@
 	sde_rotator_release_entry(mgr, entry);
 	atomic_dec(&request->pending_count);
 	atomic_inc(&request->failed_count);
-	if (request->retireq && request->retire_work)
-		queue_work(request->retireq, request->retire_work);
+	if (request->retire_kw && request->retire_work)
+		kthread_queue_work(request->retire_kw, request->retire_work);
 	sde_rot_mgr_unlock(mgr);
 }
 
@@ -1541,7 +1572,7 @@
  *
  * Note this asynchronous handler is protected by hal lock.
  */
-static void sde_rotator_done_handler(struct work_struct *work)
+static void sde_rotator_done_handler(struct kthread_work *work)
 {
 	struct sde_rot_entry *entry;
 	struct sde_rot_entry_container *request;
@@ -1606,8 +1637,8 @@
 	ATRACE_INT("sde_rot_done", 1);
 	sde_rotator_release_entry(mgr, entry);
 	atomic_dec(&request->pending_count);
-	if (request->retireq && request->retire_work)
-		queue_work(request->retireq, request->retire_work);
+	if (request->retire_kw && request->retire_work)
+		kthread_queue_work(request->retire_kw, request->retire_work);
 	if (entry->item.ts)
 		entry->item.ts[SDE_ROTATOR_TS_RETIRE] = ktime_get();
 	sde_rot_mgr_unlock(mgr);
@@ -1966,8 +1997,10 @@
 
 		entry->request = req;
 
-		INIT_WORK(&entry->commit_work, sde_rotator_commit_handler);
-		INIT_WORK(&entry->done_work, sde_rotator_done_handler);
+		kthread_init_work(&entry->commit_work,
+				sde_rotator_commit_handler);
+		kthread_init_work(&entry->done_work,
+				sde_rotator_done_handler);
 		SDEROT_DBG(
 			"Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%x dst{%u,%u,%u,%u}f=%x session_id=%u\n",
 			item->wb_idx,
@@ -2016,8 +2049,8 @@
 		sde_rot_mgr_unlock(mgr);
 		for (i = req->count - 1; i >= 0; i--) {
 			entry = req->entries + i;
-			cancel_work_sync(&entry->commit_work);
-			cancel_work_sync(&entry->done_work);
+			kthread_cancel_work_sync(&entry->commit_work);
+			kthread_cancel_work_sync(&entry->done_work);
 		}
 		sde_rot_mgr_lock(mgr);
 		SDEROT_DBG("cancel work done\n");
@@ -2134,7 +2167,7 @@
 
 	sde_rot_mgr_unlock(mgr);
 	for (i = 0; i < req->count; i++)
-		flush_work(&req->entries[i].commit_work);
+		kthread_flush_work(&req->entries[i].commit_work);
 	sde_rot_mgr_lock(mgr);
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 7b8a066..731ff1e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -21,7 +21,7 @@
 #include <linux/types.h>
 #include <linux/cdev.h>
 #include <linux/pm_runtime.h>
-#include <linux/completion.h>
+#include <linux/kthread.h>
 
 #include "sde_rotator_base.h"
 #include "sde_rotator_util.h"
@@ -230,7 +230,8 @@
 };
 
 struct sde_rot_queue {
-	struct workqueue_struct *rot_work_queue;
+	struct kthread_worker rot_kw;
+	struct task_struct *rot_thread;
 	struct sde_rot_timeline *timeline;
 	struct sde_rot_hw_resource *hw;
 };
@@ -253,8 +254,8 @@
 	u32 count;
 	atomic_t pending_count;
 	atomic_t failed_count;
-	struct workqueue_struct *retireq;
-	struct work_struct *retire_work;
+	struct kthread_worker *retire_kw;
+	struct kthread_work *retire_work;
 	bool finished;
 	struct sde_rot_entry *entries;
 };
@@ -284,8 +285,8 @@
  */
 struct sde_rot_entry {
 	struct sde_rotation_item item;
-	struct work_struct commit_work;
-	struct work_struct done_work;
+	struct kthread_work commit_work;
+	struct kthread_work done_work;
 	struct sde_rot_queue *commitq;
 	struct sde_rot_queue *fenceq;
 	struct sde_rot_queue *doneq;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 2e91d54..f2778b0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -54,8 +54,8 @@
 #define SDE_ROTATOR_DEGREE_180		180
 #define SDE_ROTATOR_DEGREE_90		90
 
-static void sde_rotator_submit_handler(struct work_struct *work);
-static void sde_rotator_retire_handler(struct work_struct *work);
+static void sde_rotator_submit_handler(struct kthread_work *work);
+static void sde_rotator_retire_handler(struct kthread_work *work);
 #ifdef CONFIG_COMPAT
 static long sde_rotator_compat_ioctl32(struct file *file,
 	unsigned int cmd, unsigned long arg);
@@ -467,8 +467,8 @@
 			SDEDEV_DBG(rot_dev->dev, "cancel request s:%d\n",
 					ctx->session_id);
 			mutex_unlock(q->lock);
-			cancel_work_sync(&request->submit_work);
-			cancel_work_sync(&request->retire_work);
+			kthread_cancel_work_sync(&request->submit_work);
+			kthread_cancel_work_sync(&request->retire_work);
 			mutex_lock(q->lock);
 			spin_lock(&ctx->list_lock);
 			list_del_init(&request->list);
@@ -926,9 +926,9 @@
 	for (i = 0 ; i < ARRAY_SIZE(ctx->requests); i++) {
 		struct sde_rotator_request *request = &ctx->requests[i];
 
-		INIT_WORK(&request->submit_work,
+		kthread_init_work(&request->submit_work,
 				sde_rotator_submit_handler);
-		INIT_WORK(&request->retire_work,
+		kthread_init_work(&request->retire_work,
 				sde_rotator_retire_handler);
 		request->ctx = ctx;
 		INIT_LIST_HEAD(&request->list);
@@ -965,14 +965,16 @@
 
 	snprintf(name, sizeof(name), "rot_fenceq_%d_%d", rot_dev->dev->id,
 			ctx->session_id);
-	ctx->work_queue.rot_work_queue = alloc_ordered_workqueue("%s",
-			WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
-	if (!ctx->work_queue.rot_work_queue) {
-		SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate workqueue\n");
+	kthread_init_worker(&ctx->work_queue.rot_kw);
+	ctx->work_queue.rot_thread = kthread_run(kthread_worker_fn,
+			&ctx->work_queue.rot_kw, name);
+	if (IS_ERR(ctx->work_queue.rot_thread)) {
+		SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate kthread\n");
 		ret = -EPERM;
+		ctx->work_queue.rot_thread = NULL;
 		goto error_alloc_workqueue;
 	}
-	SDEDEV_DBG(ctx->rot_dev->dev, "work queue name=%s\n", name);
+	SDEDEV_DBG(ctx->rot_dev->dev, "kthread name=%s\n", name);
 
 	snprintf(name, sizeof(name), "%d_%d", rot_dev->dev->id,
 			ctx->session_id);
@@ -1022,7 +1024,8 @@
 error_open_session:
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
-	destroy_workqueue(ctx->work_queue.rot_work_queue);
+	kthread_flush_worker(&ctx->work_queue.rot_kw);
+	kthread_stop(ctx->work_queue.rot_thread);
 error_alloc_workqueue:
 	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
 error_create_sysfs:
@@ -1072,7 +1075,7 @@
 
 		SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n",
 				session_id);
-		cancel_work_sync(&request->submit_work);
+		kthread_cancel_work_sync(&request->submit_work);
 	}
 	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
 	sde_rot_mgr_lock(rot_dev->mgr);
@@ -1085,12 +1088,13 @@
 
 		SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n",
 				session_id);
-		cancel_work_sync(&request->retire_work);
+		kthread_cancel_work_sync(&request->retire_work);
 	}
 	mutex_lock(&rot_dev->lock);
 	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
 	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
-	destroy_workqueue(ctx->work_queue.rot_work_queue);
+	kthread_flush_worker(&ctx->work_queue.rot_kw);
+	kthread_stop(ctx->work_queue.rot_thread);
 	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
 	kobject_put(&ctx->kobj);
 	if (ctx->file) {
@@ -1609,7 +1613,7 @@
 		} else {
 			SDEROT_ERR("invalid stats timestamp\n");
 		}
-		req->retireq = ctx->work_queue.rot_work_queue;
+		req->retire_kw = &ctx->work_queue.rot_kw;
 		req->retire_work = &request->retire_work;
 
 		trace_rot_entry_fence(
@@ -2719,7 +2723,7 @@
  *
  * This function is scheduled in work queue context.
  */
-static void sde_rotator_retire_handler(struct work_struct *work)
+static void sde_rotator_retire_handler(struct kthread_work *work)
 {
 	struct vb2_v4l2_buffer *src_buf;
 	struct vb2_v4l2_buffer *dst_buf;
@@ -2909,7 +2913,7 @@
 		goto error_init_request;
 	}
 
-	req->retireq = ctx->work_queue.rot_work_queue;
+	req->retire_kw = &ctx->work_queue.rot_kw;
 	req->retire_work = &request->retire_work;
 
 	ret = sde_rotator_handle_request_common(
@@ -2938,7 +2942,7 @@
  *
  * This function is scheduled in work queue context.
  */
-static void sde_rotator_submit_handler(struct work_struct *work)
+static void sde_rotator_submit_handler(struct kthread_work *work)
 {
 	struct sde_rotator_ctx *ctx;
 	struct sde_rotator_device *rot_dev;
@@ -3203,7 +3207,7 @@
 			list_del_init(&request->list);
 			list_add_tail(&request->list, &ctx->pending_list);
 			spin_unlock(&ctx->list_lock);
-			queue_work(ctx->work_queue.rot_work_queue,
+			kthread_queue_work(&ctx->work_queue.rot_kw,
 					&request->submit_work);
 		}
 	} else if (request && !atomic_read(&request->req->pending_count)) {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index 100ce27..627ea86 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -23,6 +23,7 @@
 #include <linux/msm-bus.h>
 #include <linux/platform_device.h>
 #include <linux/soc/qcom/llcc-qcom.h>
+#include <linux/kthread.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-ctrls.h>
@@ -95,8 +96,8 @@
  */
 struct sde_rotator_request {
 	struct list_head list;
-	struct work_struct submit_work;
-	struct work_struct retire_work;
+	struct kthread_work submit_work;
+	struct kthread_work retire_work;
 	struct sde_rot_entry_container *req;
 	struct sde_rotator_ctx *ctx;
 	bool committed;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 17fa2cc..b582934 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -681,9 +681,13 @@
 /**
  * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
  * on provided session_id. Each rotator has a different session_id.
+ * @rot: Pointer to rotator hw
+ * @session_id: Identifier for rotator session
+ * @sequence_id: Identifier for rotation request within the session
+ * @q_id: Rotator queue identifier
  */
 static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
-		struct sde_hw_rotator *rot, u32 session_id,
+		struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
 		enum sde_rot_queue_prio q_id)
 {
 	int i;
@@ -692,10 +696,12 @@
 	for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
 		ctx = rot->rotCtx[q_id][i];
 
-		if (ctx && (ctx->session_id == session_id)) {
+		if (ctx && (ctx->session_id == session_id) &&
+				(ctx->sequence_id == sequence_id)) {
 			SDEROT_DBG(
-				"rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
-				q_id, i, ctx, ctx->session_id);
+				"rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
+				q_id, i, ctx, ctx->session_id,
+				ctx->sequence_id);
 			return ctx;
 		}
 	}
@@ -2114,6 +2120,7 @@
  * @rot: Pointer to rotator hw
  * @hw: Pointer to rotator resource
  * @session_id: Session identifier of this context
+ * @sequence_id: Sequence identifier of this request
  * @sbuf_mode: true if stream buffer is requested
  *
  * This function allocates a new rotator context for the given session id.
@@ -2122,6 +2129,7 @@
 		struct sde_hw_rotator *rot,
 		struct sde_rot_hw_resource *hw,
 		u32    session_id,
+		u32    sequence_id,
 		bool   sbuf_mode)
 {
 	struct sde_hw_rotator_context *ctx;
@@ -2136,6 +2144,7 @@
 	ctx->rot        = rot;
 	ctx->q_id       = hw->wb_id;
 	ctx->session_id = session_id;
+	ctx->sequence_id = sequence_id;
 	ctx->hwres      = hw;
 	ctx->timestamp  = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
 	ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
@@ -2226,7 +2235,7 @@
 	item = &entry->item;
 
 	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
-			item->output.sbuf);
+			item->sequence_id, item->output.sbuf);
 	if (!ctx) {
 		SDEROT_ERR("Failed allocating rotator context!!\n");
 		return -EINVAL;
@@ -2486,7 +2495,8 @@
 	rot = resinfo->rot;
 
 	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
+	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+			entry->item.sequence_id, hw->wb_id);
 	if (!ctx) {
 		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
 				entry->item.session_id);
@@ -2523,7 +2533,8 @@
 	rot = resinfo->rot;
 
 	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
+	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+			entry->item.sequence_id, hw->wb_id);
 	if (!ctx) {
 		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
 				entry->item.session_id);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 22eaa3f..67f7f4b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -204,6 +204,7 @@
  * ram segment size allocation. Each rotator context can be any priority. A
  * incremental timestamp is used to identify and assigned to each context.
  * @list: list of pending context
+ * @sequence_id: unique sequence identifier for rotation request
  * @sbuf_mode: true if stream buffer is requested
  * @start_ctrl: start control register update value
  * @sys_cache_mode: sys cache mode register update value
@@ -216,6 +217,7 @@
 	struct sde_rot_hw_resource *hwres;
 	enum   sde_rot_queue_prio q_id;
 	u32    session_id;
+	u32    sequence_id;
 	u32    *regdma_base;
 	u32    *regdma_wrptr;
 	u32    timestamp;
@@ -402,7 +404,7 @@
 	spin_lock_irqsave(&rot->rotisr_lock, flags);
 	rot->rotCtx[ctx->q_id][idx] = ctx;
 	if (ctx->sbuf_mode)
-		list_add_tail(&rot->sbuf_ctx[ctx->q_id], &ctx->list);
+		list_add_tail(&ctx->list, &rot->sbuf_ctx[ctx->q_id]);
 	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
 	SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 88250e1..8d54e20 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1369,13 +1369,13 @@
 		pkt->size += sizeof(u32) * 2;
 		break;
 	}
-	case HAL_CONFIG_VPE_OPERATIONS:
+	case HAL_PARAM_VPE_ROTATION:
 	{
-		struct hfi_operations_type *hfi;
-		struct hal_operations *prop =
-			(struct hal_operations *) pdata;
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS;
-		hfi = (struct hfi_operations_type *) &pkt->rg_property_data[1];
+		struct hfi_vpe_rotation_type *hfi;
+		struct hal_vpe_rotation *prop =
+			(struct hal_vpe_rotation *) pdata;
+		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VPE_ROTATION;
+		hfi = (struct hfi_vpe_rotation_type *)&pkt->rg_property_data[1];
 		switch (prop->rotate) {
 		case HAL_ROTATE_NONE:
 			hfi->rotation = HFI_ROTATE_NONE;
@@ -1411,7 +1411,7 @@
 			rc = -EINVAL;
 			break;
 		}
-		pkt->size += sizeof(u32) + sizeof(struct hfi_operations_type);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_vpe_rotation_type);
 		break;
 	}
 	case HAL_PARAM_VENC_INTRA_REFRESH:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 89e8356..f678f56 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -110,6 +110,8 @@
 	struct hfi_profile_level *profile_level;
 	struct hfi_bit_depth *pixel_depth;
 	struct hfi_pic_struct *pic_struct;
+	struct hfi_buffer_requirements *buf_req;
+	struct hfi_index_extradata_input_crop_payload *crop_info;
 	u32 entropy_mode = 0;
 	u8 *data_ptr;
 	int prop_id;
@@ -231,6 +233,41 @@
 				data_ptr +=
 					sizeof(u32);
 				break;
+			case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+				data_ptr = data_ptr + sizeof(u32);
+				buf_req =
+					(struct hfi_buffer_requirements *)
+						data_ptr;
+				event_notify.capture_buf_count =
+					buf_req->buffer_count_min;
+				dprintk(VIDC_DBG,
+					"Capture Count : 0x%x\n",
+						event_notify.capture_buf_count);
+				data_ptr +=
+					sizeof(struct hfi_buffer_requirements);
+				break;
+			case HFI_INDEX_EXTRADATA_INPUT_CROP:
+				data_ptr = data_ptr + sizeof(u32);
+				crop_info = (struct
+				hfi_index_extradata_input_crop_payload *)
+						data_ptr;
+				event_notify.crop_data.left = crop_info->left;
+				event_notify.crop_data.top = crop_info->top;
+				event_notify.crop_data.width = crop_info->width;
+				event_notify.crop_data.height =
+					crop_info->height;
+				dprintk(VIDC_DBG,
+					"CROP info : Left = %d Top = %d\n",
+						crop_info->left,
+						crop_info->top);
+				dprintk(VIDC_DBG,
+					"CROP info : Width = %d Height = %d\n",
+						crop_info->width,
+						crop_info->height);
+				data_ptr +=
+					sizeof(struct
+					hfi_index_extradata_input_crop_payload);
+				break;
 			default:
 				dprintk(VIDC_ERR,
 					"%s cmd: %#x not supported\n",
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 074ea4fa..b116622 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -74,6 +74,14 @@
 			goto mem_map_failed;
 		}
 
+		/* Check if the dmabuf size matches expected size */
+		if (buf->size < *buffer_size) {
+			rc = -EINVAL;
+			dprintk(VIDC_ERR,
+				"Size mismatch! Dmabuf size: %zu Expected Size: %lu",
+				buf->size, *buffer_size);
+			goto mem_buf_size_mismatch;
+		}
 		/* Prepare a dma buf for dma on the given device */
 		attach = dma_buf_attach(buf, cb->dev);
 		if (IS_ERR_OR_NULL(attach)) {
@@ -151,6 +159,7 @@
 	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
 mem_map_table_failed:
 	dma_buf_detach(buf, attach);
+mem_buf_size_mismatch:
 mem_buf_attach_failed:
 	dma_buf_put(buf);
 mem_map_failed:
@@ -201,12 +210,12 @@
 	}
 }
 
-static int ion_user_to_kernel(struct smem_client *client, int fd, u32 offset,
+static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
 		struct msm_smem *mem, enum hal_buffer buffer_type)
 {
 	struct ion_handle *hndl = NULL;
 	ion_phys_addr_t iova = 0;
-	unsigned long buffer_size = 0;
+	unsigned long buffer_size = size;
 	int rc = 0;
 	unsigned long align = SZ_4K;
 	unsigned long ion_flags = 0;
@@ -217,10 +226,11 @@
 	dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
 	if (IS_ERR_OR_NULL(hndl)) {
 		dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
-				client, fd, offset, hndl);
+				client, fd, size, hndl);
 		rc = -ENOMEM;
 		goto fail_import_fd;
 	}
+
 	mem->kvaddr = NULL;
 	rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
 	if (rc) {
@@ -441,7 +451,7 @@
 	ion_client_destroy(client->clnt);
 }
 
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
 		enum hal_buffer buffer_type)
 {
 	struct smem_client *client = clt;
@@ -459,7 +469,7 @@
 	}
 	switch (client->mem_type) {
 	case SMEM_ION:
-		rc = ion_user_to_kernel(clt, fd, offset, mem, buffer_type);
+		rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
 		break;
 	default:
 		dprintk(VIDC_ERR, "Mem type not supported\n");
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 7802d31..5c34f28 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -225,6 +225,14 @@
 	return 0;
 }
 
+static int msm_v4l2_g_crop(struct file *file, void *fh,
+			struct v4l2_crop *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_g_crop(vidc_inst, a);
+}
+
 static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
 				struct v4l2_frmsizeenum *fsize)
 {
@@ -265,6 +273,7 @@
 	.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
 	.vidioc_s_parm = msm_v4l2_s_parm,
 	.vidioc_g_parm = msm_v4l2_g_parm,
+	.vidioc_g_crop = msm_v4l2_g_crop,
 	.vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
 };
 
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index aa5f18d..d44684e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1117,7 +1117,7 @@
 	struct hal_h264_entropy_control h264_entropy_control;
 	struct hal_intra_period intra_period;
 	struct hal_idr_period idr_period;
-	struct hal_operations operations;
+	struct hal_vpe_rotation vpe_rotation;
 	struct hal_intra_refresh intra_refresh;
 	struct hal_multi_slice_control multi_slice_control;
 	struct hal_h264_db_control h264_db_control;
@@ -1345,19 +1345,12 @@
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
 	{
-		if (!(inst->capability.pixelprocess_capabilities &
-			HAL_VIDEO_ENCODER_ROTATION_CAPABILITY)) {
-			dprintk(VIDC_ERR, "Rotation not supported: %#x\n",
-				ctrl->id);
-			rc = -ENOTSUPP;
-			break;
-		}
-		property_id = HAL_CONFIG_VPE_OPERATIONS;
-		operations.rotate = msm_comm_v4l2_to_hal(
+		property_id = HAL_PARAM_VPE_ROTATION;
+		vpe_rotation.rotate = msm_comm_v4l2_to_hal(
 				V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
 				ctrl->val);
-		operations.flip = HAL_FLIP_NONE;
-		pdata = &operations;
+		vpe_rotation.flip = HAL_FLIP_NONE;
+		pdata = &vpe_rotation;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 6253632..2e952a3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -265,6 +265,29 @@
 }
 EXPORT_SYMBOL(msm_vidc_s_ctrl);
 
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *crop)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !crop)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		dprintk(VIDC_ERR,
+			"Session = %pK : Encoder Crop is not implemented yet\n",
+				inst);
+		return -EPERM;
+	}
+
+	crop->c.left = inst->prop.crop_info.left;
+	crop->c.top = inst->prop.crop_info.top;
+	crop->c.width = inst->prop.crop_info.width;
+	crop->c.height = inst->prop.crop_info.height;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_g_crop);
+
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
 {
 	struct msm_vidc_inst *inst = instance;
@@ -534,7 +557,7 @@
 
 	handle = msm_comm_smem_user_to_kernel(inst,
 				p->reserved[0],
-				p->reserved[1],
+				p->length,
 				buffer_type);
 	if (!handle) {
 		dprintk(VIDC_ERR,
@@ -605,8 +628,10 @@
 		goto exit;
 	}
 
-	dprintk(VIDC_DBG, "[MAP] Create binfo = %pK fd = %d type = %d\n",
-			binfo, b->m.planes[0].reserved[0], b->type);
+	dprintk(VIDC_DBG,
+		"[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
+		binfo, b->m.planes[0].reserved[0],
+		b->m.planes[0].length, b->type);
 
 	for (i = 0; i < b->length; ++i) {
 		rc = 0;
@@ -878,6 +903,7 @@
 	struct buffer_info *bi, *dummy;
 	int i, rc = 0;
 	int found_buf = 0;
+	struct vb2_buf_entry *temp, *next;
 
 	if (!inst)
 		return -EINVAL;
@@ -936,6 +962,16 @@
 	default:
 		break;
 	}
+
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		if (temp->vb->type == buffer_type) {
+			list_del(&temp->list);
+			kfree(temp);
+		}
+	}
+	mutex_unlock(&inst->pendingq.lock);
+
 	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_release_buffer);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index a52fe05..05af186 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -371,7 +371,7 @@
 	struct msm_vidc_inst *temp;
 	struct msm_vidc_core *core;
 	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
-	unsigned long mbs_per_frame;
+	unsigned long mbs_per_second;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
@@ -394,14 +394,21 @@
 
 	list_for_each_entry(temp, &core->instances, list) {
 
-		mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+		if (!temp ||
+				temp->state < MSM_VIDC_START_DONE ||
+				temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
+			continue;
+
+		mbs_per_second = msm_comm_get_inst_load(temp,
+		LOAD_CALC_NO_QUIRKS);
+
 		cycles = temp->clk_data.entry->vpp_cycles;
-		if (inst->session_type == MSM_VIDC_ENCODER)
+		if (temp->session_type == MSM_VIDC_ENCODER)
 			cycles = temp->flags & VIDC_LOW_POWER ?
-				inst->clk_data.entry->low_power_cycles :
+				temp->clk_data.entry->low_power_cycles :
 				cycles;
 
-		load = cycles * mbs_per_frame;
+		load = cycles * mbs_per_second;
 
 		ops_left = load ? (freq_left / load) : 0;
 		/* Convert remaining operating rate to Q16 format */
@@ -418,7 +425,7 @@
 				ctrl->name, ctrl->default_value, ctrl->val);
 			v4l2_ctrl_modify_range(ctrl, ctrl->minimum,
 				ctrl->val + ops_left, ctrl->step,
-				ctrl->minimum);
+				ctrl->default_value);
 			dprintk(VIDC_DBG,
 				"%s: Updated Range = %lld --> %lld\n",
 				ctrl->name, ctrl->minimum, ctrl->maximum);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index b1a8e8b..873a338 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1166,12 +1166,12 @@
 	if (!rc) {
 		dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
 				SESSION_MSG_INDEX(cmd));
-		msm_comm_kill_session(inst);
 		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 		dprintk(VIDC_ERR,
 			"sess resp timeout can potentially crash the system\n");
 		msm_comm_print_debug_info(inst);
 		msm_vidc_handle_hw_error(inst->core);
+		msm_comm_kill_session(inst);
 		rc = -EIO;
 	} else {
 		rc = 0;
@@ -1554,6 +1554,14 @@
 	inst->entropy_mode = event_notify->entropy_mode;
 	inst->profile = event_notify->profile;
 	inst->level = event_notify->level;
+	inst->prop.crop_info.left =
+		event_notify->crop_data.left;
+	inst->prop.crop_info.top =
+		event_notify->crop_data.top;
+	inst->prop.crop_info.height =
+		event_notify->crop_data.height;
+	inst->prop.crop_info.width =
+		event_notify->crop_data.width;
 
 	ptr = (u32 *)seq_changed_event.u.data;
 	ptr[0] = event_notify->height;
@@ -1561,6 +1569,10 @@
 	ptr[2] = event_notify->bit_depth;
 	ptr[3] = event_notify->pic_struct;
 	ptr[4] = event_notify->colour_space;
+	ptr[5] = event_notify->crop_data.top;
+	ptr[6] = event_notify->crop_data.left;
+	ptr[7] = event_notify->crop_data.height;
+	ptr[8] = event_notify->crop_data.width;
 
 	dprintk(VIDC_DBG,
 		"Event payload: height = %d width = %d\n",
@@ -1571,6 +1583,13 @@
 		event_notify->bit_depth, event_notify->pic_struct,
 			event_notify->colour_space);
 
+	dprintk(VIDC_DBG,
+		"Event payload: CROP top = %d left = %d Height = %d Width = %d\n",
+			event_notify->crop_data.top,
+			event_notify->crop_data.left,
+			event_notify->crop_data.height,
+			event_notify->crop_data.width);
+
 	mutex_lock(&inst->lock);
 	inst->in_reconfig = true;
 	inst->reconfig_height = event_notify->height;
@@ -4245,14 +4264,13 @@
 			__func__, inst,
 			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
 		inst->state = MSM_VIDC_CORE_INVALID;
-		msm_comm_kill_session(inst);
 		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 		dprintk(VIDC_ERR,
 			"SESS_PROP timeout can potentially crash the system\n");
-		if (inst->core->resources.debug_timeout)
-			msm_comm_print_debug_info(inst);
+		msm_comm_print_debug_info(inst);
 
 		msm_vidc_handle_hw_error(inst->core);
+		msm_comm_kill_session(inst);
 		rc = -ETIMEDOUT;
 		goto exit;
 	} else {
@@ -5099,30 +5117,30 @@
 	if (input_height > output_height) {
 		if (input_height > x_min * output_height) {
 			dprintk(VIDC_ERR,
-				"Unsupported height downscale ratio %d vs %d\n",
-				input_height/output_height, x_min);
+				"Unsupported height min height %d vs %d\n",
+				input_height / x_min, output_height);
 			return -ENOTSUPP;
 		}
 	} else {
 		if (output_height > x_max * input_height) {
 			dprintk(VIDC_ERR,
-				"Unsupported height upscale ratio %d vs %d\n",
-				input_height/output_height, x_max);
+				"Unsupported height max height %d vs %d\n",
+				x_max * input_height, output_height);
 			return -ENOTSUPP;
 		}
 	}
 	if (input_width > output_width) {
 		if (input_width > y_min * output_width) {
 			dprintk(VIDC_ERR,
-				"Unsupported width downscale ratio %d vs %d\n",
-				input_width/output_width, y_min);
+				"Unsupported width min width %d vs %d\n",
+				input_width / y_min, output_width);
 			return -ENOTSUPP;
 		}
 	} else {
 		if (output_width > y_max * input_width) {
 			dprintk(VIDC_ERR,
-				"Unsupported width upscale ratio %d vs %d\n",
-				input_width/output_width, y_max);
+				"Unsupported width max width %d vs %d\n",
+				y_max * input_width, output_width);
 			return -ENOTSUPP;
 		}
 	}
@@ -5189,6 +5207,10 @@
 			capability->width.max, capability->height.max);
 			rc = -ENOTSUPP;
 		}
+
+		if (!rc && msm_vidc_check_scaling_supported(inst)) {
+			rc = -ENOTSUPP;
+		}
 	}
 	if (rc) {
 		change_inst_state(inst, MSM_VIDC_CORE_INVALID);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index 8ffbf50..c197776 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -12,6 +12,7 @@
  */
 
 #define CREATE_TRACE_POINTS
+#define MAX_SSR_STRING_LEN 10
 #include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
 
@@ -134,21 +135,36 @@
 
 static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
 		size_t count, loff_t *ppos) {
-	u32 ssr_trigger_val;
-	int rc;
+	unsigned long ssr_trigger_val = 0;
+	int rc = 0;
 	struct msm_vidc_core *core = filp->private_data;
+	size_t size = MAX_SSR_STRING_LEN;
+	char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
 
 	if (!buf)
 		return -EINVAL;
 
-	rc = kstrtou32(buf, 0, &ssr_trigger_val);
-	if (rc < 0) {
+	if (!count)
+		goto exit;
+
+	if (count < size)
+		size = count;
+
+	if (copy_from_user(kbuf, buf, size)) {
+		dprintk(VIDC_WARN, "%s User memory fault\n", __func__);
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+	if (rc) {
 		dprintk(VIDC_WARN, "returning error err %d\n", rc);
 		rc = -EINVAL;
 	} else {
 		msm_vidc_trigger_ssr(core, ssr_trigger_val);
 		rc = count;
 	}
+exit:
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index 8fd895d..f4c851a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -186,7 +186,7 @@
 {
 	bool enable_fatal;
 
-	enable_fatal = core->resources.debug_timeout;
+	enable_fatal = msm_vidc_debug_timeout;
 
 	/* Video driver can decide FATAL handling of HW errors
 	 * based on multiple factors. This condition check will
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 37bccbd..5edd3d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -175,9 +175,17 @@
 	struct video_device vdev;
 };
 
+struct session_crop {
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+};
+
 struct session_prop {
 	u32 width[MAX_PORT_NUM];
 	u32 height[MAX_PORT_NUM];
+	struct session_crop crop_info;
 	u32 fps;
 	u32 bitrate;
 };
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index d259072..19ca561 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -969,7 +969,7 @@
 	res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
 			"qcom,debug-timeout");
 
-	res->debug_timeout |= msm_vidc_debug_timeout;
+	msm_vidc_debug_timeout |= res->debug_timeout;
 
 	of_property_read_u32(pdev->dev.of_node,
 			"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 8968764..6139e46 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1049,8 +1049,12 @@
 	}
 
 	dprintk(VIDC_DBG, "Suspending Venus\n");
-	rc = flush_delayed_work(&venus_hfi_pm_work);
+	flush_delayed_work(&venus_hfi_pm_work);
 
+	mutex_lock(&device->lock);
+	if (device->power_enabled)
+		rc = -EBUSY;
+	mutex_unlock(&device->lock);
 	return rc;
 }
 
@@ -4168,7 +4172,7 @@
 	struct venus_hfi_device *device = dev;
 	u32 smem_block_size = 0;
 	u8 *smem_table_ptr;
-	char version[VENUS_VERSION_LENGTH];
+	char version[VENUS_VERSION_LENGTH] = "";
 	const u32 smem_image_index_venus = 14 * 128;
 
 	if (!device || !fw_info) {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index a2f076b..86e4f42 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -154,7 +154,7 @@
 	HAL_PARAM_VENC_SESSION_QP_RANGE,
 	HAL_CONFIG_VENC_INTRA_PERIOD,
 	HAL_CONFIG_VENC_IDR_PERIOD,
-	HAL_CONFIG_VPE_OPERATIONS,
+	HAL_PARAM_VPE_ROTATION,
 	HAL_PARAM_VENC_INTRA_REFRESH,
 	HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
 	HAL_SYS_DEBUG_CONFIG,
@@ -634,7 +634,7 @@
 	HAL_UNUSED_FLIP = 0x10000000,
 };
 
-struct hal_operations {
+struct hal_vpe_rotation {
 	enum hal_rotate rotate;
 	enum hal_flip flip;
 };
@@ -1019,7 +1019,7 @@
 	struct hal_quantization_range quantization_range;
 	struct hal_intra_period intra_period;
 	struct hal_idr_period idr_period;
-	struct hal_operations operations;
+	struct hal_vpe_rotation vpe_rotation;
 	struct hal_intra_refresh intra_refresh;
 	struct hal_multi_slice_control multi_slice_control;
 	struct hal_debug_config debug_config;
@@ -1212,6 +1212,16 @@
 	} data;
 };
 
+struct hal_index_extradata_input_crop_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+};
+
 struct msm_vidc_cb_event {
 	u32 device_id;
 	void *session_id;
@@ -1227,6 +1237,8 @@
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
+	u32 capture_buf_count;
+	struct hal_index_extradata_input_crop_payload crop_data;
 };
 
 struct msm_vidc_cb_data_done {
@@ -1314,16 +1326,6 @@
 	int num_sessions;
 };
 
-struct hal_index_extradata_input_crop_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 left;
-	u32 top;
-	u32 width;
-	u32 height;
-};
-
 struct hal_cmd_sys_get_property_packet {
 	u32 size;
 	u32 packet_type;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 2d4a573..616fc09 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -327,8 +327,6 @@
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
 #define  HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE                \
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
-	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
 #define  HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008)
 #define  HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME			\
@@ -344,13 +342,15 @@
 #define HFI_PROPERTY_CONFIG_VENC_SESSION_QP			\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012)
 
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+#define HFI_PROPERTY_PARAM_VPE_ROTATION				\
+	(HFI_PROPERTY_PARAM_VPE_COMMON_START + 0x001)
 
 #define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
 #define  HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE		\
 	(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
-#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
-	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
 
 struct hfi_pic_struct {
 	u32 progressive_only;
@@ -472,7 +472,7 @@
 	u32 idr_period;
 };
 
-struct hfi_operations_type {
+struct hfi_vpe_rotation_type {
 	u32 rotation;
 	u32 flip;
 };
@@ -716,12 +716,7 @@
 
 #define HFI_FLIP_NONE					(HFI_COMMON_BASE + 0x1)
 #define HFI_FLIP_HORIZONTAL				(HFI_COMMON_BASE + 0x2)
-#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x3)
-
-struct hfi_operations {
-	u32 rotate;
-	u32 flip;
-};
+#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x4)
 
 #define HFI_RESOURCE_SYSCACHE 0x00000002
 
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index fbaf05e..27249ee 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1926,6 +1926,18 @@
 	case WCD934X_ANA_MBHC_ELECT:
 	case WCD934X_ANA_MBHC_ZDET:
 	case WCD934X_ANA_MICB2:
+	case WCD934X_CODEC_RPM_CLK_MCLK_CFG:
+	case WCD934X_CLK_SYS_MCLK_PRG:
+	case WCD934X_CHIP_TIER_CTRL_EFUSE_CTL:
+	case WCD934X_ANA_BIAS:
+	case WCD934X_ANA_BUCK_CTL:
+	case WCD934X_ANA_RCO:
+	case WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL:
+	case WCD934X_CODEC_RPM_CLK_GATE:
+	case WCD934X_BIAS_VBG_FINE_ADJ:
+	case WCD934X_CODEC_CPR_SVS_CX_VDD:
+	case WCD934X_CODEC_CPR_SVS2_CX_VDD:
+	case WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL:
 		return true;
 	}
 
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0ac1cf7..e203ba6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -776,7 +776,7 @@
 
 config UID_SYS_STATS
 	bool "Per-UID statistics"
-	depends on PROFILING
+	depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
 	help
 	  Per UID based cpu time statistics exported to /proc/uid_cputime
 	  Per UID based io statistics exported to /proc/uid_io
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 77080cc..afa2113 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -155,11 +155,8 @@
 
 	/* Do this outside the status_mutex to avoid a circular dependency with
 	 * the locking in cxl_mmap_fault() */
-	if (copy_from_user(&work, uwork,
-			   sizeof(struct cxl_ioctl_start_work))) {
-		rc = -EFAULT;
-		goto out;
-	}
+	if (copy_from_user(&work, uwork, sizeof(work)))
+		return -EFAULT;
 
 	mutex_lock(&ctx->status_mutex);
 	if (ctx->status != OPENED) {
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index a217a74..224c710 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -1066,13 +1066,16 @@
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
-	if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+	if (adapter->native->err_virq == 0 ||
+	    adapter->native->err_virq !=
+	    irq_find_mapping(NULL, adapter->native->err_hwirq))
 		return;
 
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_unmap_irq(adapter->native->err_virq, adapter);
 	cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
 	kfree(adapter->irq_name);
+	adapter->native->err_virq = 0;
 }
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1102,13 +1105,15 @@
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
-	if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+	if (afu->serr_virq == 0 ||
+	    afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
 		return;
 
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
 	kfree(afu->err_irq_name);
+	afu->serr_virq = 0;
 }
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1131,12 +1136,15 @@
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
-	if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+	if (afu->native->psl_virq == 0 ||
+	    afu->native->psl_virq !=
+	    irq_find_mapping(NULL, afu->native->psl_hwirq))
 		return;
 
 	cxl_unmap_irq(afu->native->psl_virq, afu);
 	cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
 	kfree(afu->psl_irq_name);
+	afu->native->psl_virq = 0;
 }
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index dbe676d..0c98ed4 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -678,8 +678,10 @@
 {
 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	u8 version = mei_me_cl_ver(cldev->me_cl);
 
-	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
+	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
+			 cldev->name, uuid, version);
 }
 static DEVICE_ATTR_RO(modalias);
 
diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
index b97a584..d20b518 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
@@ -17,7 +17,6 @@
 #include "q6audio_common.h"
 #include "audio_utils_aio.h"
 #include <sound/msm-audio-effects-q6-v2.h>
-#include <sound/msm-dts-eagle.h>
 
 #define MAX_CHANNELS_SUPPORTED		8
 #define WAIT_TIMEDOUT_DURATION_SECS	1
@@ -53,31 +52,11 @@
 		pr_err("%s: audio client null to init pp\n", __func__);
 		return;
 	}
-	switch (ac->topology) {
-	case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
-
-		ret = q6asm_set_softvolume_v2(ac, &softvol,
-					      SOFT_VOLUME_INSTANCE_1);
-		if (ret < 0)
-			pr_err("%s: Send SoftVolume1 Param failed ret=%d\n",
-				__func__, ret);
-		ret = q6asm_set_softvolume_v2(ac, &softvol,
-					      SOFT_VOLUME_INSTANCE_2);
-		if (ret < 0)
-			pr_err("%s: Send SoftVolume2 Param failed ret=%d\n",
-				 __func__, ret);
-
-		msm_dts_eagle_init_master_module(ac);
-
-		break;
-	default:
-		ret = q6asm_set_softvolume_v2(ac, &softvol,
-					      SOFT_VOLUME_INSTANCE_1);
-		if (ret < 0)
-			pr_err("%s: Send SoftVolume Param failed ret=%d\n",
-				__func__, ret);
-		break;
-	}
+	ret = q6asm_set_softvolume_v2(ac, &softvol,
+				      SOFT_VOLUME_INSTANCE_1);
+	if (ret < 0)
+		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
+			__func__, ret);
 }
 
 static void audio_effects_deinit_pp(struct audio_client *ac)
@@ -86,13 +65,6 @@
 		pr_err("%s: audio client null to deinit pp\n", __func__);
 		return;
 	}
-	switch (ac->topology) {
-	case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
-		msm_dts_eagle_deinit_master_module(ac);
-		break;
-	default:
-		break;
-	}
 }
 
 static void audio_effects_event_handler(uint32_t opcode, uint32_t token,
@@ -149,6 +121,8 @@
 	case AUDIO_START: {
 		pr_debug("%s: AUDIO_START\n", __func__);
 
+		mutex_lock(&effects->lock);
+
 		rc = q6asm_open_read_write_v2(effects->ac,
 					FORMAT_LINEAR_PCM,
 					FORMAT_MULTI_CHANNEL_LINEAR_PCM,
@@ -160,6 +134,7 @@
 			pr_err("%s: Open failed for hw accelerated effects:rc=%d\n",
 				__func__, rc);
 			rc = -EINVAL;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 		effects->opened = 1;
@@ -176,6 +151,7 @@
 			pr_err("%s: Write buffer Allocation failed rc = %d\n",
 				__func__, rc);
 			rc = -ENOMEM;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 		atomic_set(&effects->in_count, effects->config.input.num_buf);
@@ -186,6 +162,7 @@
 			pr_err("%s: Read buffer Allocation failed rc = %d\n",
 				__func__, rc);
 			rc = -ENOMEM;
+			mutex_unlock(&effects->lock);
 			goto readbuf_fail;
 		}
 		atomic_set(&effects->out_count, effects->config.output.num_buf);
@@ -200,6 +177,7 @@
 		if (rc < 0) {
 			pr_err("%s: pcm read block config failed\n", __func__);
 			rc = -EINVAL;
+			mutex_unlock(&effects->lock);
 			goto cfg_fail;
 		}
 		pr_debug("%s: dec: sample_rate: %d, num_channels: %d, bit_width: %d\n",
@@ -214,6 +192,7 @@
 			pr_err("%s: pcm write format block config failed\n",
 				__func__);
 			rc = -EINVAL;
+			mutex_unlock(&effects->lock);
 			goto cfg_fail;
 		}
 
@@ -226,6 +205,7 @@
 			effects->started = 0;
 			pr_err("%s: ASM run state failed\n", __func__);
 		}
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_WRITE: {
@@ -287,8 +267,11 @@
 		uint32_t idx = 0;
 		uint32_t size = 0;
 
+		mutex_lock(&effects->lock);
+
 		if (!effects->started) {
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 
@@ -305,11 +288,13 @@
 		if (!rc) {
 			pr_err("%s: read wait_event_timeout\n", __func__);
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 		if (!atomic_read(&effects->in_count)) {
 			pr_err("%s: pcm stopped in_count 0\n", __func__);
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			goto ioctl_fail;
 		}
 
@@ -317,15 +302,18 @@
 		if (bufptr) {
 			if (!((void *)arg)) {
 				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
 				goto ioctl_fail;
 			}
 			if ((effects->config.buf_cfg.input_len > size) ||
 				copy_to_user((void *)arg, bufptr,
 					  effects->config.buf_cfg.input_len)) {
 				rc = -EFAULT;
+				mutex_unlock(&effects->lock);
 				goto ioctl_fail;
 			}
 		}
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	default:
@@ -414,34 +402,6 @@
 			      &(effects->audio_effects.topo_switch_vol),
 			      (long *)&values[1], SOFT_VOLUME_INSTANCE_2);
 		break;
-	case DTS_EAGLE_MODULE_ENABLE:
-		pr_debug("%s: DTS_EAGLE_MODULE_ENABLE\n", __func__);
-		if (msm_audio_effects_is_effmodule_supp_in_top(
-			effects_module, effects->ac->topology)) {
-			/*
-			 * HPX->OFF: first disable HPX and then
-			 * enable SA+
-			 * HPX->ON: first disable SA+ and then
-			 * enable HPX
-			 */
-			bool hpx_state = (bool)values[1];
-
-			if (hpx_state)
-				msm_audio_effects_enable_extn(effects->ac,
-					&(effects->audio_effects),
-					false);
-			msm_dts_eagle_enable_asm(effects->ac,
-				hpx_state,
-				AUDPROC_MODULE_ID_DTS_HPX_PREMIX);
-			msm_dts_eagle_enable_asm(effects->ac,
-				hpx_state,
-				AUDPROC_MODULE_ID_DTS_HPX_POSTMIX);
-			if (!hpx_state)
-				msm_audio_effects_enable_extn(effects->ac,
-					&(effects->audio_effects),
-					true);
-		}
-		break;
 	default:
 		pr_err("%s: Invalid effects config module\n", __func__);
 		rc = -EINVAL;
@@ -459,6 +419,7 @@
 	switch (cmd) {
 	case AUDIO_SET_EFFECTS_CONFIG: {
 		pr_debug("%s: AUDIO_SET_EFFECTS_CONFIG\n", __func__);
+		mutex_lock(&effects->lock);
 		memset(&effects->config, 0, sizeof(effects->config));
 		if (copy_from_user(&effects->config, (void *)arg,
 				   sizeof(effects->config))) {
@@ -476,6 +437,7 @@
 			 effects->config.input.num_buf,
 			 effects->config.input.sample_rate,
 			 effects->config.input.num_channels);
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_SET_BUF_LEN: {
@@ -497,6 +459,7 @@
 
 		buf_avail.input_num_avail = atomic_read(&effects->in_count);
 		buf_avail.output_num_avail = atomic_read(&effects->out_count);
+		mutex_lock(&effects->lock);
 		pr_debug("%s: write buf avail: %d, read buf avail: %d\n",
 			 __func__, buf_avail.output_num_avail,
 			 buf_avail.input_num_avail);
@@ -506,16 +469,20 @@
 				__func__);
 			rc = -EFAULT;
 		}
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_SET_PP_PARAMS: {
+		mutex_lock(&effects->lock);
 		if (copy_from_user(argvalues, (void *)arg,
 				   MAX_PP_PARAMS_SZ*sizeof(long))) {
 			pr_err("%s: copy from user for pp params failed\n",
 				__func__);
+			mutex_unlock(&effects->lock);
 			return -EFAULT;
 		}
 		rc = audio_effects_set_pp_param(effects, argvalues);
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	default:
@@ -582,12 +549,14 @@
 		struct msm_hwacc_effects_config32 config32;
 		struct msm_hwacc_effects_config *config = &effects->config;
 
+		mutex_lock(&effects->lock);
 		memset(&effects->config, 0, sizeof(effects->config));
 		if (copy_from_user(&config32, (void *)arg,
 				   sizeof(config32))) {
 			pr_err("%s: copy to user for AUDIO_SET_EFFECTS_CONFIG failed\n",
 				__func__);
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			break;
 		}
 		config->input.buf_size = config32.input.buf_size;
@@ -624,17 +593,20 @@
 			 effects->config.input.num_buf,
 			 effects->config.input.sample_rate,
 			 effects->config.input.num_channels);
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_SET_BUF_LEN32: {
 		struct msm_hwacc_buf_cfg32 buf_cfg32;
 		struct msm_hwacc_effects_config *config = &effects->config;
 
+		mutex_lock(&effects->lock);
 		if (copy_from_user(&buf_cfg32, (void *)arg,
 				   sizeof(buf_cfg32))) {
 			pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n",
 				__func__);
 			rc = -EFAULT;
+			mutex_unlock(&effects->lock);
 			break;
 		}
 		config->buf_cfg.input_len = buf_cfg32.input_len;
@@ -642,6 +614,7 @@
 		pr_debug("%s: write buf len: %d, read buf len: %d\n",
 			 __func__, effects->config.buf_cfg.output_len,
 			 effects->config.buf_cfg.input_len);
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_GET_BUF_AVAIL32: {
@@ -649,6 +622,7 @@
 
 		memset(&buf_avail, 0, sizeof(buf_avail));
 
+		mutex_lock(&effects->lock);
 		buf_avail.input_num_avail = atomic_read(&effects->in_count);
 		buf_avail.output_num_avail = atomic_read(&effects->out_count);
 		pr_debug("%s: write buf avail: %d, read buf avail: %d\n",
@@ -660,22 +634,26 @@
 				__func__);
 			rc = -EFAULT;
 		}
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_EFFECTS_SET_PP_PARAMS32: {
 		long argvalues[MAX_PP_PARAMS_SZ] = {0};
 		int argvalues32[MAX_PP_PARAMS_SZ] = {0};
 
+		mutex_lock(&effects->lock);
 		if (copy_from_user(argvalues32, (void *)arg,
 				   MAX_PP_PARAMS_SZ*sizeof(int))) {
 			pr_err("%s: copy from user failed for pp params\n",
 				__func__);
+			mutex_unlock(&effects->lock);
 			return -EFAULT;
 		}
 		for (i = 0; i < MAX_PP_PARAMS_SZ; i++)
 			argvalues[i] = argvalues32[i];
 
 		rc = audio_effects_set_pp_param(effects, argvalues);
+		mutex_unlock(&effects->lock);
 		break;
 	}
 	case AUDIO_START32: {
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 4c9fa8f..80f6e57 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -858,6 +858,7 @@
 	long rc;
 	struct msm_audio_event32 usr_evt_32;
 	struct msm_audio_event usr_evt;
+	memset(&usr_evt, 0, sizeof(struct msm_audio_event));
 
 	if (copy_from_user(&usr_evt_32, arg,
 				sizeof(struct msm_audio_event32))) {
@@ -867,6 +868,11 @@
 	usr_evt.timeout_ms = usr_evt_32.timeout_ms;
 
 	rc = audio_aio_process_event_req_common(audio, &usr_evt);
+	if (rc < 0) {
+		pr_err("%s: audio process event failed, rc = %ld",
+			__func__, rc);
+		return rc;
+	}
 
 	usr_evt_32.event_type = usr_evt.event_type;
 	switch (usr_evt_32.event_type) {
diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c
index c964dcb..3da46b6 100644
--- a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c
+++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c
@@ -179,7 +179,7 @@
 };
 
 /* The opened devices container */
-static int s_opened_devs[MAX_DEVS_NUMBER];
+static atomic_t s_opened_devs[MAX_DEVS_NUMBER];
 
 static struct wakeup_source usf_wakeup_source;
 
@@ -2348,14 +2348,11 @@
 	uint16_t ind = 0;
 
 	for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) {
-		if (minor == s_opened_devs[ind]) {
+		if (minor == atomic_cmpxchg(&s_opened_devs[ind], 0, minor)) {
 			pr_err("%s: device %d is already opened\n",
 			       __func__, minor);
 			return USF_UNDEF_DEV_ID;
-		}
-
-		if (s_opened_devs[ind] == 0) {
-			s_opened_devs[ind] = minor;
+		} else {
 			pr_debug("%s: device %d is added; ind=%d\n",
 				__func__, minor, ind);
 			return ind;
@@ -2410,7 +2407,7 @@
 	usf_disable(&usf->usf_tx);
 	usf_disable(&usf->usf_rx);
 
-	s_opened_devs[usf->dev_ind] = 0;
+	atomic_set(&s_opened_devs[usf->dev_ind], 0);
 
 	wakeup_source_trash(&usf_wakeup_source);
 	mutex_unlock(&usf->mutex);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 877c4d1..c1857c7 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1261,7 +1261,7 @@
 				atomic_read(&data->ioctl_count) <= 1)) {
 			pr_err("Interrupted from abort\n");
 			ret = -ERESTARTSYS;
-			break;
+			return ret;
 		}
 	}
 
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index ccfd225..3fd621c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -133,14 +133,11 @@
 
 		ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
 		/*
-		 * Don't requeue if issue_fn fails, just bug on.
-		 * We don't expect failure here and there is no recovery other
-		 * than fixing the actual issue if there is any.
+		 * Don't requeue if issue_fn fails.
+		 * Recovery will be come by completion softirq
 		 * Also we end the request if there is a partition switch error,
 		 * so we should not requeue the request here.
 		 */
-		if (ret)
-			BUG_ON(1);
 	} /* loop */
 
 	return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8b1b0a0..3910d2d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -613,17 +613,39 @@
 		host->card->clk_scaling_lowest,
 		host->card->clk_scaling_highest);
 
+	/*
+	 * Create the frequency table and initialize it with default values.
+	 * Initialize it with platform specific frequencies if the frequency
+	 * table supplied by platform driver is present, otherwise initialize
+	 * it with min and max frequencies supported by the card.
+	 */
 	if (!clk_scaling->freq_table) {
-		pr_debug("%s: no frequency table defined -  setting default\n",
-			mmc_hostname(host));
+		if (clk_scaling->pltfm_freq_table_sz)
+			clk_scaling->freq_table_sz =
+				clk_scaling->pltfm_freq_table_sz;
+		else
+			clk_scaling->freq_table_sz = 2;
+
 		clk_scaling->freq_table = kzalloc(
-			2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
+			(clk_scaling->freq_table_sz *
+			sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
 		if (!clk_scaling->freq_table)
 			return -ENOMEM;
-		clk_scaling->freq_table[0] = host->card->clk_scaling_lowest;
-		clk_scaling->freq_table[1] = host->card->clk_scaling_highest;
-		clk_scaling->freq_table_sz = 2;
-		goto out;
+
+		if (clk_scaling->pltfm_freq_table) {
+			memcpy(clk_scaling->freq_table,
+				clk_scaling->pltfm_freq_table,
+				(clk_scaling->pltfm_freq_table_sz *
+				sizeof(*(clk_scaling->pltfm_freq_table))));
+		} else {
+			pr_debug("%s: no frequency table defined -  setting default\n",
+				mmc_hostname(host));
+			clk_scaling->freq_table[0] =
+				host->card->clk_scaling_lowest;
+			clk_scaling->freq_table[1] =
+				host->card->clk_scaling_highest;
+			goto out;
+		}
 	}
 
 	if (host->card->clk_scaling_lowest >
@@ -827,10 +849,15 @@
 	if (!mmc_can_scale_clk(host))
 		return 0;
 
+	/*
+	 * If clock scaling is already exited when resume is called, like
+	 * during mmc shutdown, it is not an error and should not fail the
+	 * API calling this.
+	 */
 	if (!host->clk_scaling.devfreq) {
-		pr_err("%s: %s: no devfreq is assosiated with this device\n",
+		pr_warn("%s: %s: no devfreq is assosiated with this device\n",
 			mmc_hostname(host), __func__);
-		return -EPERM;
+		return 0;
 	}
 
 	atomic_set(&host->clk_scaling.devfreq_abort, 0);
@@ -840,7 +867,7 @@
 	devfreq_min_clk = host->clk_scaling.freq_table[0];
 
 	host->clk_scaling.curr_freq = devfreq_max_clk;
-	if (host->ios.clock < host->card->clk_scaling_highest)
+	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
 		host->clk_scaling.curr_freq = devfreq_min_clk;
 
 	host->clk_scaling.clk_scaling_in_progress = false;
@@ -902,6 +929,10 @@
 
 	host->clk_scaling.devfreq = NULL;
 	atomic_set(&host->clk_scaling.devfreq_abort, 1);
+
+	kfree(host->clk_scaling.freq_table);
+	host->clk_scaling.freq_table = NULL;
+
 	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
 
 	return 0;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 0d0d56f..0c8ff86 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -338,10 +338,15 @@
 {
 	struct mmc_host *host = data;
 
-	if (host && host->ops && host->ops->force_err_irq) {
-		mmc_host_clk_hold(host);
+	if (host && host->card && host->ops &&
+			host->ops->force_err_irq) {
+		/*
+		 * To access the force error irq reg, we need to make
+		 * sure the host is powered up and host clock is ticking.
+		 */
+		mmc_get_card(host->card);
 		host->ops->force_err_irq(host, val);
-		mmc_host_clk_release(host);
+		mmc_put_card(host->card);
 	}
 
 	return 0;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index eb730fd..c92ea77 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -325,6 +325,7 @@
 		mod_timer(&host->retune_timer,
 			  jiffies + host->retune_period * HZ);
 }
+EXPORT_SYMBOL(mmc_retune_enable);
 
 /*
  * Pause re-tuning for a small set of operations.  The pause begins after the
@@ -357,6 +358,7 @@
 	host->retune_now = 0;
 	host->need_retune = 0;
 }
+EXPORT_SYMBOL(mmc_retune_disable);
 
 void mmc_retune_timer_stop(struct mmc_host *host)
 {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a36bcbb..564b5c9 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2696,6 +2696,9 @@
 	if (mmc_card_hs400(card)) {
 		if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
 			err = host->ops->enhanced_strobe(host);
+		else if (host->ops->execute_tuning)
+			err = host->ops->execute_tuning(host,
+				MMC_SEND_TUNING_BLOCK_HS200);
 	} else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
 		err = host->ops->execute_tuning(host,
 			MMC_SEND_TUNING_BLOCK_HS200);
diff --git a/drivers/mmc/core/ring_buffer.c b/drivers/mmc/core/ring_buffer.c
new file mode 100644
index 0000000..83945e1
--- /dev/null
+++ b/drivers/mmc/core/ring_buffer.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mmc/ring_buffer.h>
+#include <linux/mmc/host.h>
+
+void mmc_stop_tracing(struct mmc_host *mmc)
+{
+	mmc->trace_buf.stop_tracing = true;
+}
+
+void mmc_trace_write(struct mmc_host *mmc,
+			const char *fmt, ...)
+{
+	unsigned int idx;
+	va_list args;
+	char *event;
+	unsigned long flags;
+	char str[MMC_TRACE_EVENT_SZ];
+
+	if (unlikely(!mmc->trace_buf.data) ||
+			unlikely(mmc->trace_buf.stop_tracing))
+		return;
+
+	/*
+	 * Here an increment and modulus is used to keep
+	 * index within array bounds. The cast to unsigned is
+	 * necessary so increment and rolover wraps to 0 correctly
+	 */
+	spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+	mmc->trace_buf.wr_idx += 1;
+	idx = ((unsigned int)mmc->trace_buf.wr_idx) &
+			(MMC_TRACE_RBUF_NUM_EVENTS - 1);
+	spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+
+	/* Catch some unlikely machine specific wrap-around bug */
+	if (unlikely(idx > (MMC_TRACE_RBUF_NUM_EVENTS - 1))) {
+		pr_err("%s: %s: Invalid idx:%d for mmc trace, tracing stopped !\n",
+			mmc_hostname(mmc), __func__, idx);
+		mmc_stop_tracing(mmc);
+		return;
+	}
+
+	event = &mmc->trace_buf.data[idx * MMC_TRACE_EVENT_SZ];
+	va_start(args, fmt);
+	snprintf(str, MMC_TRACE_EVENT_SZ, "<%d> %lld: %s: %s",
+		raw_smp_processor_id(),
+		ktime_to_ns(ktime_get()),
+		mmc_hostname(mmc), fmt);
+	memset(event, '\0', MMC_TRACE_EVENT_SZ);
+	vscnprintf(event, MMC_TRACE_EVENT_SZ, str, args);
+	va_end(args);
+}
+
+void mmc_trace_init(struct mmc_host *mmc)
+{
+	BUILD_BUG_ON_NOT_POWER_OF_2(MMC_TRACE_RBUF_NUM_EVENTS);
+
+	mmc->trace_buf.data = (char *)
+				__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+				MMC_TRACE_RBUF_SZ_ORDER);
+
+	if (!mmc->trace_buf.data) {
+		pr_err("%s: %s: Unable to allocate trace for mmc\n",
+			__func__, mmc_hostname(mmc));
+		return;
+	}
+
+	spin_lock_init(&mmc->trace_buf.trace_lock);
+	mmc->trace_buf.wr_idx = -1;
+}
+
+void mmc_trace_free(struct mmc_host *mmc)
+{
+	if (mmc->trace_buf.data)
+		free_pages((unsigned long)mmc->trace_buf.data,
+			MMC_TRACE_RBUF_SZ_ORDER);
+}
+
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s)
+{
+	unsigned int idx, cur_idx;
+	unsigned int N = MMC_TRACE_RBUF_NUM_EVENTS - 1;
+	char *event;
+	unsigned long flags;
+
+	if (!mmc->trace_buf.data)
+		return;
+
+	spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+	idx = ((unsigned int)mmc->trace_buf.wr_idx) & N;
+	cur_idx = (idx + 1) & N;
+
+	do {
+		event = &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+		if (s)
+			seq_printf(s, "%s", (char *)event);
+		else
+			pr_err("%s", (char *)event);
+		cur_idx = (cur_idx + 1) & N;
+		if (cur_idx == idx) {
+			event =
+			  &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+			if (s)
+				seq_printf(s, "latest_event: %s",
+					(char *)event);
+			else
+				pr_err("latest_event: %s", (char *)event);
+			break;
+		}
+	} while (1);
+	spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+}
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 77c5ca3..bfe1242 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -943,12 +943,17 @@
 		 * before setting doorbell, hence one is not needed here.
 		 */
 		for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
-			/* complete the corresponding mrq */
-			pr_debug("%s: completing tag -> %lu\n",
-				 mmc_hostname(mmc), tag);
-			MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
-				__func__, tag);
+			mrq = get_req_by_tag(cq_host, tag);
+			if (!((mrq->cmd && mrq->cmd->error) ||
+					mrq->cmdq_req->resp_err ||
+					(mrq->data && mrq->data->error))) {
+				/* complete the corresponding mrq */
+				pr_debug("%s: completing tag -> %lu\n",
+					 mmc_hostname(mmc), tag);
+				MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
+					__func__, tag);
 				cmdq_finish_data(mmc, tag);
+			}
 		}
 	}
 
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 7262466..50dd6bd 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -157,7 +157,8 @@
 };
 
 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
-	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+		  SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
 	.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
 	.ops = &sdhci_iproc_ops,
 };
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fe62b69..caf8dd1 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1838,13 +1838,13 @@
 	}
 
 	if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
-			&msm_host->mmc->clk_scaling.freq_table,
-			&msm_host->mmc->clk_scaling.freq_table_sz, 0))
+			&msm_host->mmc->clk_scaling.pltfm_freq_table,
+			&msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
 		pr_debug("%s: no clock scaling frequencies were supplied\n",
 			dev_name(dev));
-	else if (!msm_host->mmc->clk_scaling.freq_table ||
-			!msm_host->mmc->clk_scaling.freq_table_sz)
-			dev_err(dev, "bad dts clock scaling frequencies\n");
+	else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
+			!msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
+		dev_err(dev, "bad dts clock scaling frequencies\n");
 
 	/*
 	 * Few hosts can support DDR52 mode at the same lower
@@ -1937,7 +1937,7 @@
 	sdhci_msm_pm_qos_parse(dev, pdata);
 
 	if (of_get_property(np, "qcom,core_3_0v_support", NULL))
-		pdata->core_3_0v_support = true;
+		msm_host->core_3_0v_support = true;
 
 	pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
 
@@ -2617,8 +2617,9 @@
 	 * completed before its next update to registers within hc_mem.
 	 */
 	mb();
-
-	if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
+	if ((io_level & REQ_IO_HIGH) &&
+			(msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
+			!msm_host->core_3_0v_support)
 		writel_relaxed((readl_relaxed(host->ioaddr +
 				msm_host_offset->CORE_VENDOR_SPEC) &
 				~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
@@ -2717,14 +2718,15 @@
 					msm_host->offset;
 	unsigned long flags;
 	bool done = false;
-	u32 io_sig_sts;
+	u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
 
 	spin_lock_irqsave(&host->lock, flags);
 	pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
 			mmc_hostname(host->mmc), __func__, req_type,
 			msm_host->curr_pwr_state, msm_host->curr_io_level);
-	io_sig_sts = sdhci_msm_readl_relaxed(host,
-			msm_host_offset->CORE_GENERICS);
+	if (!msm_host->mci_removed)
+		io_sig_sts = sdhci_msm_readl_relaxed(host,
+				msm_host_offset->CORE_GENERICS);
 
 	/*
 	 * The IRQ for request type IO High/Low will be generated when -
@@ -4023,7 +4025,7 @@
 		msm_host->use_14lpp_dll = true;
 
 	/* Fake 3.0V support for SDIO devices which requires such voltage */
-	if (msm_host->pdata->core_3_0v_support) {
+	if (msm_host->core_3_0v_support) {
 		caps |= CORE_3_0V_SUPPORT;
 			writel_relaxed((readl_relaxed(host->ioaddr +
 			SDHCI_CAPABILITIES) | caps), host->ioaddr +
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 53b1953..a7577d9 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -150,7 +150,6 @@
 	u32 *sup_ice_clk_table;
 	unsigned char sup_ice_clk_cnt;
 	struct sdhci_msm_pm_qos_data pm_qos_data;
-	bool core_3_0v_support;
 	bool sdr104_wa;
 };
 
@@ -218,6 +217,7 @@
 	bool tuning_in_progress;
 	bool mci_removed;
 	const struct sdhci_msm_offset *offset;
+	bool core_3_0v_support;
 };
 
 extern char *saved_command_line;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 83be863..4bb4c18 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2418,7 +2418,13 @@
 
 	if (host->ops->platform_execute_tuning) {
 		spin_unlock_irqrestore(&host->lock, flags);
+		/*
+		 * Make sure re-tuning won't get triggered for the CRC errors
+		 * occurred while executing tuning
+		 */
+		mmc_retune_disable(mmc);
 		err = host->ops->platform_execute_tuning(host, opcode);
+		mmc_retune_enable(mmc);
 		return err;
 	}
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index edc70ff..6dcc42d 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2573,7 +2573,7 @@
 		return -1;
 
 	ad_info->aggregator_id = aggregator->aggregator_identifier;
-	ad_info->ports = aggregator->num_of_ports;
+	ad_info->ports = __agg_active_ports(aggregator);
 	ad_info->actor_key = aggregator->actor_oper_aggregator_key;
 	ad_info->partner_key = aggregator->partner_oper_aggregator_key;
 	ether_addr_copy(ad_info->partner_system,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0a9108c..0a5ee1d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1931,7 +1931,7 @@
 	}
 
 	/* select a non-FCoE queue */
-	return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+	return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 19dc9e2..f9c2feb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2226,10 +2226,14 @@
 		if (err)
 			goto irq_err;
 	}
+
+	mutex_lock(&uld_mutex);
 	enable_rx(adap);
 	t4_sge_start(adap);
 	t4_intr_enable(adap);
 	adap->flags |= FULL_INIT_DONE;
+	mutex_unlock(&uld_mutex);
+
 	notify_ulds(adap, CXGB4_STATE_UP);
 #if IS_ENABLED(CONFIG_IPV6)
 	update_clip(adap);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 93aa293..9711ca4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5144,9 +5144,11 @@
 	struct be_adapter *adapter = netdev_priv(dev);
 	u8 l4_hdr = 0;
 
-	/* The code below restricts offload features for some tunneled packets.
+	/* The code below restricts offload features for some tunneled and
+	 * Q-in-Q packets.
 	 * Offload features for normal (non tunnel) packets are unchanged.
 	 */
+	features = vlan_features_check(skb, features);
 	if (!skb->encapsulation ||
 	    !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
 		return features;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index c044667..e31199f 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -710,6 +710,8 @@
 	if (ret)
 		return ret;
 
+	napi_enable(&priv->napi);
+
 	ethoc_init_ring(priv, dev->mem_start);
 	ethoc_reset(priv);
 
@@ -722,7 +724,6 @@
 	}
 
 	phy_start(dev->phydev);
-	napi_enable(&priv->napi);
 
 	if (netif_msg_ifup(priv)) {
 		dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3f51a44..cb45390 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -767,7 +767,7 @@
 	mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 		       mlx5_command_str(msg_to_opcode(ent->in)),
 		       msg_to_opcode(ent->in));
-	mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+	mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 }
 
 static void cmd_work_handler(struct work_struct *work)
@@ -797,6 +797,7 @@
 	}
 
 	cmd->ent_arr[ent->idx] = ent;
+	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
 	lay = get_inst(cmd, ent->idx);
 	ent->lay = lay;
 	memset(lay, 0, sizeof(*lay));
@@ -818,6 +819,20 @@
 	if (ent->callback)
 		schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 
+	/* Skip sending command to fw if internal error */
+	if (pci_channel_offline(dev->pdev) ||
+	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		u8 status = 0;
+		u32 drv_synd;
+
+		ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
+		MLX5_SET(mbox_out, ent->out, status, status);
+		MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
+
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+		return;
+	}
+
 	/* ring doorbell after the descriptor is valid */
 	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
 	wmb();
@@ -828,7 +843,7 @@
 		poll_timeout(ent);
 		/* make sure we read the descriptor after ownership is SW */
 		rmb();
-		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
 	}
 }
 
@@ -872,7 +887,7 @@
 		wait_for_completion(&ent->done);
 	} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
 		ent->ret = -ETIMEDOUT;
-		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 	}
 
 	err = ent->ret;
@@ -1369,7 +1384,7 @@
 	}
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
 {
 	struct mlx5_cmd *cmd = &dev->cmd;
 	struct mlx5_cmd_work_ent *ent;
@@ -1389,6 +1404,19 @@
 			struct semaphore *sem;
 
 			ent = cmd->ent_arr[i];
+
+			/* if we already completed the command, ignore it */
+			if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
+						&ent->state)) {
+				/* only real completion can free the cmd slot */
+				if (!forced) {
+					mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
+						      ent->idx);
+					free_ent(cmd, ent->idx);
+				}
+				continue;
+			}
+
 			if (ent->callback)
 				cancel_delayed_work(&ent->cb_timeout_work);
 			if (ent->page_queue)
@@ -1411,7 +1439,10 @@
 				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
 					      ent->ret, deliv_status_to_str(ent->status), ent->status);
 			}
-			free_ent(cmd, ent->idx);
+
+			/* only real completion will free the entry slot */
+			if (!forced)
+				free_ent(cmd, ent->idx);
 
 			if (ent->callback) {
 				ds = ent->ts2 - ent->ts1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 126cfeb..3744e2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -751,7 +751,6 @@
 	ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
 	ptys2ethtool_supported_link(supported, eth_proto_cap);
 	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
-	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
 }
 
 static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
@@ -761,7 +760,7 @@
 	unsigned long *advertising = link_ksettings->link_modes.advertising;
 
 	ptys2ethtool_adver_link(advertising, eth_proto_cap);
-	if (tx_pause)
+	if (rx_pause)
 		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
 	if (tx_pause ^ rx_pause)
 		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
@@ -806,6 +805,8 @@
 	struct mlx5e_priv *priv    = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+	u32 rx_pause = 0;
+	u32 tx_pause = 0;
 	u32 eth_proto_cap;
 	u32 eth_proto_admin;
 	u32 eth_proto_lp;
@@ -828,11 +829,13 @@
 	an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
 	an_status        = MLX5_GET(ptys_reg, out, an_status);
 
+	mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
 	get_supported(eth_proto_cap, link_ksettings);
-	get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+	get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
 	get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
 
 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index aaca090..f86e9ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -234,7 +234,7 @@
 			break;
 
 		case MLX5_EVENT_TYPE_CMD:
-			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
 			break;
 
 		case MLX5_EVENT_TYPE_PORT_CHANGE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 5bcf934..2115c8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -90,7 +90,7 @@
 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
 
 	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
-	mlx5_cmd_comp_handler(dev, vector);
+	mlx5_cmd_comp_handler(dev, vector, true);
 	return;
 
 no_trig:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b2893fb..ef6bff8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1953,7 +1953,7 @@
 
 		priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
 			0, 1,
-			(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+			(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
 			0, 0);
 
 		tmp_len -= TSO_MAX_BUFF_SIZE;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c2dcf02..d6a541b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -240,34 +240,6 @@
 {
 	int err;
 
-	/* The Marvell PHY has an errata which requires
-	 * that certain registers get written in order
-	 * to restart autonegotiation */
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1d, 0x1f);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1e, 0x200c);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1d, 0x5);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1e, 0);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1e, 0x100);
-	if (err < 0)
-		return err;
-
 	err = marvell_set_polarity(phydev, phydev->mdix);
 	if (err < 0)
 		return err;
@@ -301,6 +273,42 @@
 	return 0;
 }
 
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	/* This Marvell PHY has an errata which requires
+	 * that certain registers get written in order
+	 * to restart autonegotiation
+	 */
+	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1d, 0x1f);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0x200c);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1d, 0x5);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0x100);
+	if (err < 0)
+		return err;
+
+	return marvell_config_aneg(phydev);
+}
+
 static int m88e1111_config_aneg(struct phy_device *phydev)
 {
 	int err;
@@ -1491,7 +1499,7 @@
 		.probe = marvell_probe,
 		.flags = PHY_HAS_INTERRUPT,
 		.config_init = &marvell_config_init,
-		.config_aneg = &marvell_config_aneg,
+		.config_aneg = &m88e1101_config_aneg,
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34d997c..2f260c6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -897,6 +897,8 @@
 	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx */
 	{QMI_FIXED_INTF(0x1199, 0x9079, 8)},	/* Sierra Wireless EM74xx */
 	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x907b, 10)},	/* Sierra Wireless EM74xx */
 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 51fc0c3..7ca9989 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1456,6 +1456,7 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	.ndo_busy_poll		= virtnet_busy_poll,
 #endif
+	.ndo_features_check	= passthru_features_check,
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 80ef486..ee02605 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -850,6 +850,7 @@
 
 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -859,7 +860,7 @@
 {
 	struct net *net = dev_net(dev);
 
-	if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
+	if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
 		skb = NULL;    /* kfree_skb(skb) handled by nf code */
 
 	return skb;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3c4c2cf..55c4408 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -59,6 +59,8 @@
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
 	struct list_head  vxlan_list;
@@ -717,6 +719,22 @@
 	call_rcu(&f->rcu, vxlan_fdb_free);
 }
 
+static void vxlan_dst_free(struct rcu_head *head)
+{
+	struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
+
+	dst_cache_destroy(&rd->dst_cache);
+	kfree(rd);
+}
+
+static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+				  struct vxlan_rdst *rd)
+{
+	list_del_rcu(&rd->list);
+	vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+	call_rcu(&rd->rcu, vxlan_dst_free);
+}
+
 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
 			   union vxlan_addr *ip, __be16 *port, __be32 *vni,
 			   u32 *ifindex)
@@ -847,9 +865,7 @@
 	 * otherwise destroy the fdb entry
 	 */
 	if (rd && !list_is_singular(&f->remotes)) {
-		list_del_rcu(&rd->list);
-		vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
-		kfree_rcu(rd, rcu);
+		vxlan_fdb_dst_destroy(vxlan, f, rd);
 		goto out;
 	}
 
@@ -1026,6 +1042,8 @@
 	rcu_assign_pointer(vxlan->vn4_sock, NULL);
 	synchronize_net();
 
+	vxlan_vs_del_dev(vxlan);
+
 	if (__vxlan_sock_release_prep(sock4)) {
 		udp_tunnel_sock_release(sock4->sock);
 		kfree(sock4);
@@ -2286,6 +2304,15 @@
 	mod_timer(&vxlan->age_timer, next_timer);
 }
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+	spin_lock(&vn->sock_lock);
+	hlist_del_init_rcu(&vxlan->hlist);
+	spin_unlock(&vn->sock_lock);
+}
+
 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 {
 	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3056,12 +3083,6 @@
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
-	struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
-
-	spin_lock(&vn->sock_lock);
-	if (!hlist_unhashed(&vxlan->hlist))
-		hlist_del_rcu(&vxlan->hlist);
-	spin_unlock(&vn->sock_lock);
 
 	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 5610c4f..9afd6f2 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <net/netlink.h>
 #include "wil6210.h"
 #include "wmi.h"
 #include "ftm.h"
@@ -55,6 +56,62 @@
 
 #define QCA_NL80211_VENDOR_ID	0x001374
 
+#define WIL_MAX_RF_SECTORS (128)
+#define WIL_CID_ALL (0xff)
+
+enum qca_wlan_vendor_attr_rf_sector {
+	QCA_ATTR_MAC_ADDR = 6,
+	QCA_ATTR_PAD = 13,
+	QCA_ATTR_TSF = 29,
+	QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
+	QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
+	QCA_ATTR_DMG_RF_MODULE_MASK = 32,
+	QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
+	QCA_ATTR_DMG_RF_SECTOR_MAX,
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_type {
+	QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
+	QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
+	QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
+	QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+
+	/* keep last */
+	QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
+	QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
+};
+
+static const struct
+nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
+	[QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
+	[QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
+	[QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
+	[QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
+};
+
+static const struct
+nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
+};
+
 enum qca_nl80211_vendor_subcmds {
 	QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128,
 	QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129,
@@ -65,8 +122,25 @@
 	QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134,
 	QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135,
 	QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
 };
 
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len);
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len);
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len);
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len);
+
 /* vendor specific commands */
 static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
 	{
@@ -111,6 +185,36 @@
 			 WIPHY_VENDOR_CMD_NEED_RUNNING,
 		.doit = wil_aoa_abort_measurement
 	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_get_cfg
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_set_cfg
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd =
+			QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_get_selected
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd =
+			QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_set_selected
+	},
 };
 
 /* vendor specific events */
@@ -1837,3 +1941,451 @@
 		kfree(p2p_wdev);
 	}
 }
+
+static int wil_rf_sector_status_to_rc(u8 status)
+{
+	switch (status) {
+	case WMI_RF_SECTOR_STATUS_SUCCESS:
+		return 0;
+	case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
+		return -EINVAL;
+	case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
+		return -EAGAIN;
+	case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
+		return -EOPNOTSUPP;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	u16 sector_index;
+	u8 sector_type;
+	u32 rf_modules_vec;
+	struct wmi_get_rf_sector_params_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_get_rf_sector_params_done_event evt;
+	} __packed reply;
+	struct sk_buff *msg;
+	struct nlattr *nl_cfgs, *nl_cfg;
+	u32 i;
+	struct wmi_rf_sector_info *si;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+	    !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+
+	sector_index = nla_get_u16(
+		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+	if (sector_index >= WIL_MAX_RF_SECTORS) {
+		wil_err(wil, "Invalid sector index %d\n", sector_index);
+		return -EINVAL;
+	}
+
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	rf_modules_vec = nla_get_u32(
+		tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
+	if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
+		wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
+		return -EINVAL;
+	}
+
+	cmd.sector_idx = cpu_to_le16(sector_index);
+	cmd.sector_type = sector_type;
+	cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+		      WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	if (reply.evt.status) {
+		wil_err(wil, "get rf sector cfg failed with status %d\n",
+			reply.evt.status);
+		return wil_rf_sector_status_to_rc(reply.evt.status);
+	}
+
+	msg = cfg80211_vendor_cmd_alloc_reply_skb(
+		wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+	if (!msg)
+		return -ENOMEM;
+
+	if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+			      le64_to_cpu(reply.evt.tsf),
+			      QCA_ATTR_PAD))
+		goto nla_put_failure;
+
+	nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+	if (!nl_cfgs)
+		goto nla_put_failure;
+	for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
+		if (!(rf_modules_vec & BIT(i)))
+			continue;
+		nl_cfg = nla_nest_start(msg, i);
+		if (!nl_cfg)
+			goto nla_put_failure;
+		si = &reply.evt.sectors_info[i];
+		if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+			       i) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+				le32_to_cpu(si->etype0)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+				le32_to_cpu(si->etype1)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+				le32_to_cpu(si->etype2)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+				le32_to_cpu(si->psh_hi)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+				le32_to_cpu(si->psh_lo)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+				le32_to_cpu(si->dtype_swch_off)))
+			goto nla_put_failure;
+		nla_nest_end(msg, nl_cfg);
+	}
+
+	nla_nest_end(msg, nl_cfgs);
+	rc = cfg80211_vendor_cmd_reply(msg);
+	return rc;
+nla_put_failure:
+	kfree_skb(msg);
+	return -ENOBUFS;
+}
+
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc, tmp;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
+	u16 sector_index, rf_module_index;
+	u8 sector_type;
+	u32 rf_modules_vec = 0;
+	struct wmi_set_rf_sector_params_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_set_rf_sector_params_done_event evt;
+	} __packed reply;
+	struct nlattr *nl_cfg;
+	struct wmi_rf_sector_info *si;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+
+	sector_index = nla_get_u16(
+		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+	if (sector_index >= WIL_MAX_RF_SECTORS) {
+		wil_err(wil, "Invalid sector index %d\n", sector_index);
+		return -EINVAL;
+	}
+
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.sector_idx = cpu_to_le16(sector_index);
+	cmd.sector_type = sector_type;
+	nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
+			    tmp) {
+		rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+				      nl_cfg, wil_rf_sector_cfg_policy);
+		if (rc) {
+			wil_err(wil, "invalid sector cfg\n");
+			return -EINVAL;
+		}
+
+		if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
+			wil_err(wil, "missing cfg params\n");
+			return -EINVAL;
+		}
+
+		rf_module_index = nla_get_u8(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
+		if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
+			wil_err(wil, "invalid RF module index %d\n",
+				rf_module_index);
+			return -EINVAL;
+		}
+		rf_modules_vec |= BIT(rf_module_index);
+		si = &cmd.sectors_info[rf_module_index];
+		si->etype0 = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
+		si->etype1 = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
+		si->etype2 = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
+		si->psh_hi = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
+		si->psh_lo = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
+		si->dtype_swch_off = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
+	}
+
+	cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+		      WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	u8 sector_type, mac_addr[ETH_ALEN];
+	int cid = 0;
+	struct wmi_get_selected_rf_sector_index_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_get_selected_rf_sector_index_done_event evt;
+	} __packed reply;
+	struct sk_buff *msg;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	if (tb[QCA_ATTR_MAC_ADDR]) {
+		ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+		cid = wil_find_cid(wil, mac_addr);
+		if (cid < 0) {
+			wil_err(wil, "invalid MAC address %pM\n", mac_addr);
+			return -ENOENT;
+		}
+	} else {
+		if (test_bit(wil_status_fwconnected, wil->status)) {
+			wil_err(wil, "must specify MAC address when connected\n");
+			return -EINVAL;
+		}
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cid = (u8)cid;
+	cmd.sector_type = sector_type;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID,
+		      &cmd, sizeof(cmd),
+		      WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	if (reply.evt.status) {
+		wil_err(wil, "get rf selected sector cfg failed with status %d\n",
+			reply.evt.status);
+		return wil_rf_sector_status_to_rc(reply.evt.status);
+	}
+
+	msg = cfg80211_vendor_cmd_alloc_reply_skb(
+		wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+	if (!msg)
+		return -ENOMEM;
+
+	if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+			      le64_to_cpu(reply.evt.tsf),
+			      QCA_ATTR_PAD) ||
+	    nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
+			le16_to_cpu(reply.evt.sector_idx)))
+		goto nla_put_failure;
+
+	rc = cfg80211_vendor_cmd_reply(msg);
+	return rc;
+nla_put_failure:
+	kfree_skb(msg);
+	return -ENOBUFS;
+}
+
+static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
+					  u16 sector_index,
+					  u8 sector_type, u8 cid)
+{
+	struct wmi_set_selected_rf_sector_index_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_set_selected_rf_sector_index_done_event evt;
+	} __packed reply;
+	int rc;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.sector_idx = cpu_to_le16(sector_index);
+	cmd.sector_type = sector_type;
+	cmd.cid = (u8)cid;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID,
+		      &cmd, sizeof(cmd),
+		      WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	u16 sector_index;
+	u8 sector_type, mac_addr[ETH_ALEN], i;
+	int cid = 0;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+
+	sector_index = nla_get_u16(
+		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+	if (sector_index >= WIL_MAX_RF_SECTORS &&
+	    sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+		wil_err(wil, "Invalid sector index %d\n", sector_index);
+		return -EINVAL;
+	}
+
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	if (tb[QCA_ATTR_MAC_ADDR]) {
+		ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+		if (!is_broadcast_ether_addr(mac_addr)) {
+			cid = wil_find_cid(wil, mac_addr);
+			if (cid < 0) {
+				wil_err(wil, "invalid MAC address %pM\n",
+					mac_addr);
+				return -ENOENT;
+			}
+		} else {
+			if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+				wil_err(wil, "broadcast MAC valid only with unlocking\n");
+				return -EINVAL;
+			}
+			cid = -1;
+		}
+	} else {
+		if (test_bit(wil_status_fwconnected, wil->status)) {
+			wil_err(wil, "must specify MAC address when connected\n");
+			return -EINVAL;
+		}
+		/* otherwise, using cid=0 for unassociated station */
+	}
+
+	if (cid >= 0) {
+		rc = wil_rf_sector_wmi_set_selected(wil, sector_index,
+						    sector_type, cid);
+	} else {
+		/* unlock all cids */
+		rc = wil_rf_sector_wmi_set_selected(
+			wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type,
+			WIL_CID_ALL);
+		if (rc == -EINVAL) {
+			for (i = 0; i < WIL6210_MAX_CID; i++) {
+				rc = wil_rf_sector_wmi_set_selected(
+					wil, WMI_INVALID_RF_SECTOR_INDEX,
+					sector_type, i);
+				/* the FW will silently ignore and return
+				 * success for unused cid, so abort the loop
+				 * on any other error
+				 */
+				if (rc) {
+					wil_err(wil, "unlock cid %d failed with status %d\n",
+						i, rc);
+					break;
+				}
+			}
+		}
+	}
+
+	return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index bbdd232..f8d2c20 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -54,7 +54,7 @@
 	}
 
 	off = a - wil->csr;
-	if (size >= WIL6210_MEM_SIZE - off) {
+	if (size >= wil->bar_size - off) {
 		wil_err(wil, "Requested block does not fit into memory: "
 			"off = 0x%08x size = 0x%08x\n", off, size);
 		return NULL;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 03246a9..cf3fadc 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -198,16 +198,18 @@
 		.ramdump = wil_platform_rop_ramdump,
 		.fw_recovery = wil_platform_rop_fw_recovery,
 	};
+	u32 bar_size = pci_resource_len(pdev, 0);
 
 	/* check HW */
 	dev_info(&pdev->dev, WIL_NAME
-		 " device found [%04x:%04x] (rev %x)\n",
-		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+		 " device found [%04x:%04x] (rev %x) bar size 0x%x\n",
+		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
+		 bar_size);
 
-	if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
-		dev_err(&pdev->dev, "Not " WIL_NAME "? "
-			"BAR0 size is %lu while expecting %lu\n",
-			(ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+	if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
+	    (bar_size > WIL6210_MAX_MEM_SIZE)) {
+		dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
+			bar_size);
 		return -ENODEV;
 	}
 
@@ -220,6 +222,7 @@
 
 	wil->pdev = pdev;
 	pci_set_drvdata(pdev, wil);
+	wil->bar_size = bar_size;
 	/* rollback to if_free */
 
 	wil->platform_handle =
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 015dc3c..ce1f384 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -80,6 +80,8 @@
 	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 	wil_unmask_irq(wil);
 
+	wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+
 	/* Send WMI resume request to the device */
 	rc = wmi_resume(wil);
 	if (rc) {
@@ -96,7 +98,9 @@
 		}
 	}
 
-	wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+	/* Wake all queues */
+	if (test_bit(wil_status_fwconnected, wil->status))
+		wil_update_net_queues_bh(wil, NULL, false);
 
 out:
 	if (rc)
@@ -113,6 +117,7 @@
 
 	/* Prevent handling of new tx and wmi commands */
 	set_bit(wil_status_suspending, wil->status);
+	wil_update_net_queues_bh(wil, NULL, true);
 
 	if (!wil_is_tx_idle(wil)) {
 		wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
@@ -175,42 +180,43 @@
 	/* Disable device reset on PERST */
 	wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
-	/* Save the current bus request to return to the same in resume */
-	wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
-	wil6210_bus_request(wil, 0);
-
 	if (wil->platform_ops.suspend) {
 		rc = wil->platform_ops.suspend(wil->platform_handle, true);
 		if (rc) {
 			wil_err(wil, "platform device failed to suspend (%d)\n",
 				rc);
 			wil->suspend_stats.failed_suspends++;
-			clear_bit(wil_status_suspending, wil->status);
-			rc = wil_resume_keep_radio_on(wil);
-			/* if resume succeeded, reject the suspend */
-			if (!rc)
-				rc = -EBUSY;
-			goto out;
+			wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+			wil_unmask_irq(wil);
+			goto resume_after_fail;
 		}
 	}
 
+	/* Save the current bus request to return to the same in resume */
+	wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
+	wil6210_bus_request(wil, 0);
+
 	set_bit(wil_status_suspended, wil->status);
 	clear_bit(wil_status_suspending, wil->status);
 
 	return rc;
 
 resume_after_fail:
+	set_bit(wil_status_resuming, wil->status);
 	clear_bit(wil_status_suspending, wil->status);
 	rc = wmi_resume(wil);
 	/* if resume succeeded, reject the suspend */
-	if (!rc)
+	if (!rc) {
 		rc = -EBUSY;
-
-out:
+		if (test_bit(wil_status_fwconnected, wil->status))
+			wil_update_net_queues_bh(wil, NULL, false);
+	}
 	return rc;
 
 reject_suspend:
 	clear_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_fwconnected, wil->status))
+		wil_update_net_queues_bh(wil, NULL, false);
 	return -EBUSY;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 439fe30..8f1e79b4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1930,6 +1930,11 @@
 		return;
 	}
 
+	/* Do not wake the queues in suspend flow */
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status))
+		return;
+
 	/* check wake */
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
 		struct vring *cur_vring = &wil->vring_tx[i];
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 38f61e3..eca5685 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -59,7 +59,8 @@
 	return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
 }
 
-#define WIL6210_MEM_SIZE (2*1024*1024UL)
+#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
+#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
 
 #define WIL_TX_Q_LEN_DEFAULT		(4000)
 #define WIL_RX_RING_SIZE_ORDER_DEFAULT	(10)
@@ -617,6 +618,7 @@
 
 struct wil6210_priv {
 	struct pci_dev *pdev;
+	u32 bar_size;
 	struct wireless_dev *wdev;
 	void __iomem *csr;
 	DECLARE_BITMAP(status, wil_status_last);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ba2b207..ae0952f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -160,7 +160,7 @@
 		return NULL;
 
 	off = HOSTADDR(ptr);
-	if (off > WIL6210_MEM_SIZE - 4)
+	if (off > wil->bar_size - 4)
 		return NULL;
 
 	return wil->csr + off;
@@ -180,7 +180,7 @@
 		return NULL;
 
 	off = HOSTADDR(ptr);
-	if (off > WIL6210_MEM_SIZE - 4)
+	if (off > wil->bar_size - 4)
 		return NULL;
 
 	return wil->csr + off;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5f2feee..fbeca06 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1725,7 +1725,6 @@
 		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
 					&nvme_ns_attr_group);
 		del_gendisk(ns->disk);
-		blk_mq_abort_requeue_list(ns->queue);
 		blk_cleanup_queue(ns->queue);
 	}
 
@@ -2048,8 +2047,16 @@
 			continue;
 		revalidate_disk(ns->disk);
 		blk_set_queue_dying(ns->queue);
-		blk_mq_abort_requeue_list(ns->queue);
-		blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+		/*
+		 * Forcibly start all queues to avoid having stuck requests.
+		 * Note that we must ensure the queues are not stopped
+		 * when the final removal happens.
+		 */
+		blk_mq_start_hw_queues(ns->queue);
+
+		/* draining requests in requeue list */
+		blk_mq_kick_requeue_list(ns->queue);
 	}
 	mutex_unlock(&ctrl->namespaces_mutex);
 }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3d25add..3222f3e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1011,6 +1011,19 @@
 		nvme_rdma_wr_error(cq, wc, "SEND");
 }
 
+static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+{
+	int sig_limit;
+
+	/*
+	 * We signal completion every queue depth/2 and also handle the
+	 * degenerated case of a  device with queue_depth=1, where we
+	 * would need to signal every message.
+	 */
+	sig_limit = max(queue->queue_size / 2, 1);
+	return (++queue->sig_count % sig_limit) == 0;
+}
+
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
 		struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
 		struct ib_send_wr *first, bool flush)
@@ -1038,9 +1051,6 @@
 	 * Would have been way to obvious to handle this in hardware or
 	 * at least the RDMA stack..
 	 *
-	 * This messy and racy code sniplet is copy and pasted from the iSER
-	 * initiator, and the magic '32' comes from there as well.
-	 *
 	 * Always signal the flushes. The magic request used for the flush
 	 * sequencer is not allocated in our driver's tagset and it's
 	 * triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1048,7 +1058,7 @@
 	 * embeded in request's payload, is not freed when __ib_process_cq()
 	 * calls wr_cqe->done().
 	 */
-	if ((++queue->sig_count % 32) == 0 || flush)
+	if (nvme_rdma_queue_sig_limit(queue) || flush)
 		wr.send_flags |= IB_SEND_SIGNALED;
 
 	if (first)
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index a0fa943..771a1f9 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -4721,16 +4721,18 @@
 	}
 }
 
-void msm_pcie_destroy_irq(unsigned int irq)
+void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev)
 {
 	int pos;
-	struct pci_dev *pdev = irq_get_chip_data(irq);
 	struct msi_desc *entry = irq_get_msi_desc(irq);
 	struct msi_desc *firstentry;
 	struct msm_pcie_dev_t *dev;
 	u32 nvec;
 	int firstirq;
 
+	if (!pdev)
+		pdev = irq_get_chip_data(irq);
+
 	if (!pdev) {
 		pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
 		return;
@@ -4789,7 +4791,7 @@
 void arch_teardown_msi_irq(unsigned int irq)
 {
 	PCIE_GEN_DBG("irq %d deallocated\n", irq);
-	msm_pcie_destroy_irq(irq);
+	msm_pcie_destroy_irq(irq, NULL);
 }
 
 void arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -4809,7 +4811,7 @@
 			continue;
 		nvec = 1 << entry->msi_attrib.multiple;
 		for (i = 0; i < nvec; i++)
-			arch_teardown_msi_irq(entry->irq + i);
+			msm_pcie_destroy_irq(entry->irq + i, dev);
 	}
 }
 
@@ -5004,7 +5006,6 @@
 			firstirq = irq;
 
 		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
-		irq_set_chip_data(irq, pdev);
 	}
 
 	/* write msi vector and data */
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index 4851aac..f731aac 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -260,9 +260,9 @@
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0x81),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6C),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6E),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
@@ -286,7 +286,7 @@
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0x81),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
 };
diff --git a/drivers/pinctrl/qcom/pinctrl-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpi.c
index 009e27bf..fedd5f0 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpi.c
@@ -118,12 +118,12 @@
 	0x00005010,
 	0x00005020,
 	0x00005030,
-	0x00005040,
-	0x00005050,
 	0x00006000,
 	0x00006010,
 	0x00007000,
 	0x00007010,
+	0x00005040,
+	0x00005050,
 	0x00008000,
 	0x00008010,
 	0x00008020,
@@ -406,13 +406,21 @@
 static int lpi_notifier_service_cb(struct notifier_block *this,
 				   unsigned long opcode, void *ptr)
 {
+	static bool initial_boot = true;
+
 	pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
 
 	switch (opcode) {
 	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		if (initial_boot) {
+			initial_boot = false;
+			break;
+		}
 		lpi_dev_up = false;
 		break;
 	case AUDIO_NOTIFIER_SERVICE_UP:
+		if (initial_boot)
+			initial_boot = false;
 		lpi_dev_up = true;
 		break;
 	default:
@@ -455,6 +463,7 @@
 		"pull up"
 	};
 
+	pctldev = pctldev ? : to_gpio_state(chip)->ctrl;
 	pindesc = pctldev->desc->pins[offset];
 	pad = pctldev->desc->pins[offset].drv_data;
 	ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_DIR_CTL);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 293371b..c5aaac5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -125,7 +125,6 @@
 	IPA_USB_CONNECTED,
 	IPA_USB_STOPPED,
 	IPA_USB_SUSPEND_REQUESTED,
-	IPA_USB_SUSPEND_IN_PROGRESS,
 	IPA_USB_SUSPENDED,
 	IPA_USB_SUSPENDED_NO_RWAKEUP,
 	IPA_USB_RESUME_IN_PROGRESS
@@ -146,13 +145,6 @@
 #define IPA3_USB_IS_TTYPE_DPL(__ttype) \
 	((__ttype) == IPA_USB_TRANSPORT_DPL)
 
-struct finish_suspend_work_context {
-	struct work_struct work;
-	enum ipa3_usb_transport_type ttype;
-	u32 dl_clnt_hdl;
-	u32 ul_clnt_hdl;
-};
-
 struct ipa3_usb_teth_prot_conn_params {
 	u32 usb_to_ipa_clnt_hdl;
 	u32 ipa_to_usb_clnt_hdl;
@@ -168,7 +160,6 @@
 	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
 	void *user_data;
 	enum ipa3_usb_state state;
-	struct finish_suspend_work_context finish_suspend_work;
 	struct ipa_usb_xdci_chan_params ch_params;
 	struct ipa3_usb_teth_prot_conn_params teth_conn_params;
 };
@@ -221,16 +212,10 @@
 
 static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
 static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
 static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
 	ipa3_usb_wq_notify_remote_wakeup);
 static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
 	ipa3_usb_wq_dpl_notify_remote_wakeup);
-static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
-	ipa3_usb_wq_notify_suspend_completed);
-static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
-	ipa3_usb_wq_dpl_notify_suspend_completed);
 
 struct ipa3_usb_context *ipa3_usb_ctx;
 
@@ -273,8 +258,6 @@
 		return "IPA_USB_STOPPED";
 	case IPA_USB_SUSPEND_REQUESTED:
 		return "IPA_USB_SUSPEND_REQUESTED";
-	case IPA_USB_SUSPEND_IN_PROGRESS:
-		return "IPA_USB_SUSPEND_IN_PROGRESS";
 	case IPA_USB_SUSPENDED:
 		return "IPA_USB_SUSPENDED";
 	case IPA_USB_SUSPENDED_NO_RWAKEUP:
@@ -330,17 +313,11 @@
 			 * In case of failure during suspend request
 			 * handling, state is reverted to connected.
 			 */
-			(err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
-			/*
-			 * In case of failure during suspend completing
-			 * handling, state is reverted to connected.
-			 */
-			(err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
+			(err_permit && state == IPA_USB_SUSPEND_REQUESTED))
 			state_legal = true;
 		break;
 	case IPA_USB_STOPPED:
-		if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
-			state == IPA_USB_CONNECTED ||
+		if (state == IPA_USB_CONNECTED ||
 			state == IPA_USB_SUSPENDED ||
 			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
 			state_legal = true;
@@ -349,19 +326,8 @@
 		if (state == IPA_USB_CONNECTED)
 			state_legal = true;
 		break;
-	case IPA_USB_SUSPEND_IN_PROGRESS:
-		if (state == IPA_USB_SUSPEND_REQUESTED ||
-			/*
-			 * In case of failure during resume, state is reverted
-			 * to original, which could be suspend_in_progress.
-			 * Allow it.
-			 */
-			(err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
-			state_legal = true;
-		break;
 	case IPA_USB_SUSPENDED:
 		if (state == IPA_USB_SUSPEND_REQUESTED ||
-			state == IPA_USB_SUSPEND_IN_PROGRESS ||
 			/*
 			 * In case of failure during resume, state is reverted
 			 * to original, which could be suspended. Allow it
@@ -374,8 +340,7 @@
 			state_legal = true;
 		break;
 	case IPA_USB_RESUME_IN_PROGRESS:
-		if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
-			state == IPA_USB_SUSPENDED)
+		if (state == IPA_USB_SUSPENDED)
 			state_legal = true;
 		break;
 	default:
@@ -452,7 +417,6 @@
 		break;
 	case IPA_USB_OP_DISCONNECT:
 		if  (state == IPA_USB_CONNECTED ||
-			state == IPA_USB_SUSPEND_IN_PROGRESS ||
 			state == IPA_USB_SUSPENDED ||
 			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
 			is_legal = true;
@@ -483,7 +447,6 @@
 		break;
 	case IPA_USB_OP_RESUME:
 		if (state == IPA_USB_SUSPENDED ||
-			state == IPA_USB_SUSPEND_IN_PROGRESS ||
 			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
 			is_legal = true;
 		break;
@@ -582,71 +545,6 @@
 	ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
 }
 
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
-{
-	ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
-{
-	ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
-{
-	struct finish_suspend_work_context *finish_suspend_work_ctx;
-	unsigned long flags;
-	int result = -EFAULT;
-	struct ipa3_usb_transport_type_ctx *tctx;
-
-	mutex_lock(&ipa3_usb_ctx->general_mutex);
-	IPA_USB_DBG_LOW("entry\n");
-	finish_suspend_work_ctx = container_of(work,
-		struct finish_suspend_work_context, work);
-	tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
-
-	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
-		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-		mutex_unlock(&ipa3_usb_ctx->general_mutex);
-		return;
-	}
-	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
-	/* Stop DL/DPL channel */
-	result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
-	if (result) {
-		IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
-			result);
-		ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
-			finish_suspend_work_ctx->dl_clnt_hdl,
-			IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
-		/* Change state back to CONNECTED */
-		if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
-			finish_suspend_work_ctx->ttype))
-			IPA_USB_ERR("failed to change state to connected\n");
-		queue_work(ipa3_usb_ctx->wq,
-			IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
-			&ipa3_usb_dpl_notify_remote_wakeup_work :
-			&ipa3_usb_notify_remote_wakeup_work);
-		mutex_unlock(&ipa3_usb_ctx->general_mutex);
-		return;
-	}
-
-	/* Change ipa_usb state to SUSPENDED */
-	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
-		finish_suspend_work_ctx->ttype))
-		IPA_USB_ERR("failed to change state to suspended\n");
-
-	queue_work(ipa3_usb_ctx->wq,
-		IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
-		&ipa3_usb_dpl_notify_suspend_completed_work :
-		&ipa3_usb_notify_suspend_completed_work);
-
-	IPA_USB_DBG_LOW("exit\n");
-	mutex_unlock(&ipa3_usb_ctx->general_mutex);
-}
-
 static int ipa3_usb_cons_request_resource_cb_do(
 	enum ipa3_usb_transport_type ttype,
 	struct work_struct *remote_wakeup_work)
@@ -674,17 +572,6 @@
 		else
 			result = -EINPROGRESS;
 		break;
-	case IPA_USB_SUSPEND_IN_PROGRESS:
-		/*
-		 * This case happens due to suspend interrupt.
-		 * CONS is granted
-		 */
-		if (!rm_ctx->cons_requested) {
-			rm_ctx->cons_requested = true;
-			queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
-		}
-		result = 0;
-		break;
 	case IPA_USB_SUSPENDED:
 		if (!rm_ctx->cons_requested) {
 			rm_ctx->cons_requested = true;
@@ -727,15 +614,10 @@
 			ipa3_usb_state_to_string(
 			ipa3_usb_ctx->ttype_ctx[ttype].state));
 	switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
-	case IPA_USB_SUSPEND_IN_PROGRESS:
+	case IPA_USB_SUSPENDED:
 		/* Proceed with the suspend if no DL/DPL data */
 		if (rm_ctx->cons_requested)
 			rm_ctx->cons_requested_released = true;
-		else {
-			queue_work(ipa3_usb_ctx->wq,
-				&ipa3_usb_ctx->ttype_ctx[ttype].
-				finish_suspend_work.work);
-		}
 		break;
 	case IPA_USB_SUSPEND_REQUESTED:
 		if (rm_ctx->cons_requested)
@@ -2311,8 +2193,7 @@
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
 	orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
-		if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
-			orig_state != IPA_USB_SUSPENDED) {
+		if (orig_state != IPA_USB_SUSPENDED) {
 			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
 				flags);
 			/* Stop UL channel */
@@ -2340,8 +2221,7 @@
 	if (result)
 		goto bad_params;
 
-	if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
-		orig_state != IPA_USB_SUSPENDED) {
+	if (orig_state != IPA_USB_SUSPENDED) {
 		result = ipa3_usb_release_prod(ttype);
 		if (result) {
 			IPA_USB_ERR("failed to release PROD.\n");
@@ -2547,7 +2427,6 @@
 {
 	int result = 0;
 	unsigned long flags;
-	enum ipa3_usb_cons_state curr_cons_state;
 	enum ipa3_usb_transport_type ttype;
 
 	mutex_lock(&ipa3_usb_ctx->general_mutex);
@@ -2602,49 +2481,20 @@
 		goto release_prod_fail;
 	}
 
+	/* Check if DL/DPL data pending */
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
+	if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state ==
+		IPA_USB_CONS_GRANTED &&
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+
+		IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n");
+		queue_work(ipa3_usb_ctx->wq,
+			IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			&ipa3_usb_dpl_notify_remote_wakeup_work :
+			&ipa3_usb_notify_remote_wakeup_work);
+	}
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-	if (curr_cons_state == IPA_USB_CONS_GRANTED) {
-		/* Change state to SUSPEND_IN_PROGRESS */
-		if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
-			false, ttype))
-			IPA_USB_ERR("fail set state to suspend_in_progress\n");
 
-		/* Check if DL/DPL data pending */
-		spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-		if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
-			IPA_USB_DBG(
-				"DL/DPL data pending, invoke remote wakeup\n");
-			queue_work(ipa3_usb_ctx->wq,
-				IPA3_USB_IS_TTYPE_DPL(ttype) ?
-				&ipa3_usb_dpl_notify_remote_wakeup_work :
-				&ipa3_usb_notify_remote_wakeup_work);
-		}
-		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
-		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
-			ttype;
-		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
-			dl_clnt_hdl;
-		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
-			ul_clnt_hdl;
-		INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
-			finish_suspend_work.work,
-			ipa3_usb_wq_finish_suspend_work);
-
-		result = -EINPROGRESS;
-		IPA_USB_DBG("exit with suspend_in_progress\n");
-		goto bad_params;
-	}
-
-	/* Stop DL channel */
-	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
-	if (result) {
-		IPAERR("Error stopping DL/DPL channel: %d\n", result);
-		result = -EFAULT;
-		goto release_prod_fail;
-	}
 	/* Change state to SUSPENDED */
 	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
 		IPA_USB_ERR("failed to change state to suspended\n");
@@ -2803,13 +2653,11 @@
 		}
 	}
 
-	if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
-		/* Start DL/DPL channel */
-		result = ipa3_start_gsi_channel(dl_clnt_hdl);
-		if (result) {
-			IPA_USB_ERR("failed to start DL/DPL channel.\n");
-			goto start_dl_fail;
-		}
+	/* Start DL/DPL channel */
+	result = ipa3_start_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to start DL/DPL channel.\n");
+		goto start_dl_fail;
 	}
 
 	/* Change state to CONNECTED */
@@ -2824,12 +2672,10 @@
 	return 0;
 
 state_change_connected_fail:
-	if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
-		result = ipa3_stop_gsi_channel(dl_clnt_hdl);
-		if (result)
-			IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
-				result);
-	}
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result)
+		IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+			result);
 start_dl_fail:
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
 		result = ipa3_stop_gsi_channel(ul_clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index d657a06..bb6f8ec 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -600,12 +600,12 @@
 	mem_size = (ipa_ctx->hdr_proc_ctx_tbl_lcl) ?
 		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
 		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
-	if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
-		IPAERR("hdr proc ctx table overflow\n");
-		goto bad_len;
-	}
-
 	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+			IPAERR("hdr proc ctx table overflow\n");
+			goto bad_len;
+		}
+
 		offset = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_offset_cache,
 					   GFP_KERNEL);
 		if (!offset) {
@@ -711,30 +711,30 @@
 	mem_size = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
 		IPA_MEM_PART(apps_hdr_size_ddr);
 
-	/*
-	 * if header does not fit to table, place it in DDR
-	 * This is valid for IPA 2.5 and on,
-	 * with the exception of IPA2.6L.
-	 */
-	if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
-		if (ipa_ctx->ipa_hw_type != IPA_HW_v2_5) {
-			IPAERR("not enough room for header\n");
-			goto bad_hdr_len;
-		} else {
-			entry->is_hdr_proc_ctx = true;
-			entry->phys_base = dma_map_single(ipa_ctx->pdev,
-				entry->hdr,
-				entry->hdr_len,
-				DMA_TO_DEVICE);
-			if (dma_mapping_error(ipa_ctx->pdev,
-				entry->phys_base)) {
-				IPAERR("dma_map_single failure for entry\n");
-				goto fail_dma_mapping;
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		/*
+		 * if header does not fit to table, place it in DDR
+		 * This is valid for IPA 2.5 and on,
+		 * with the exception of IPA2.6L.
+		 */
+		if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+			if (ipa_ctx->ipa_hw_type != IPA_HW_v2_5) {
+				IPAERR("not enough room for header\n");
+				goto bad_hdr_len;
+			} else {
+				entry->is_hdr_proc_ctx = true;
+				entry->phys_base = dma_map_single(ipa_ctx->pdev,
+					entry->hdr,
+					entry->hdr_len,
+					DMA_TO_DEVICE);
+				if (dma_mapping_error(ipa_ctx->pdev,
+					entry->phys_base)) {
+					IPAERR("dma_map_single failureed\n");
+					goto fail_dma_mapping;
+				}
 			}
-		}
-	} else {
-		entry->is_hdr_proc_ctx = false;
-		if (list_empty(&htbl->head_free_offset_list[bin])) {
+		} else {
+			entry->is_hdr_proc_ctx = false;
 			offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
 						   GFP_KERNEL);
 			if (!offset) {
@@ -751,14 +751,15 @@
 			htbl->end += ipa_hdr_bin_sz[bin];
 			list_add(&offset->link,
 					&htbl->head_offset_list[bin]);
-		} else {
-			/* get the first free slot */
-			offset =
-			list_first_entry(&htbl->head_free_offset_list[bin],
-					struct ipa_hdr_offset_entry, link);
-			list_move(&offset->link, &htbl->head_offset_list[bin]);
+			entry->offset_entry = offset;
 		}
-
+	} else {
+		entry->is_hdr_proc_ctx = false;
+		/* get the first free slot */
+		offset =
+		list_first_entry(&htbl->head_free_offset_list[bin],
+				struct ipa_hdr_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
 		entry->offset_entry = offset;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 9a3c146..a7ecf1c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -666,6 +666,12 @@
 			retries++;
 			if (retries == IPA_BAM_STOP_MAX_RETRY) {
 				IPAERR("Failed after %d tries\n", retries);
+				mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+				/*
+				 * Max retry reached,
+				 * assert to check why cmd send failed.
+				 */
+				ipa_assert();
 			} else {
 				/* sleep for short period to flush IPA */
 				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index bec4264..4ef7e1f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -940,6 +940,11 @@
  */
 bool ipa2_get_client_uplink(int pipe_idx)
 {
+	if (pipe_idx < 0 || pipe_idx >= IPA_MAX_NUM_PIPES) {
+		IPAERR("invalid pipe idx %d\n", pipe_idx);
+		return false;
+	}
+
 	return ipa_ctx->ipacm_client[pipe_idx].uplink;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 31e530e..85bfe95 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -232,6 +232,9 @@
 	ipa3_transport_release_resource);
 static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
 
+static void ipa3_post_init_wq(struct work_struct *work);
+static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+
 static struct ipa3_plat_drv_res ipa3_res = {0, };
 struct msm_bus_scale_pdata *ipa3_bus_scale_table;
 
@@ -495,63 +498,6 @@
 	return 0;
 }
 
-/**
-* ipa3_flow_control() - Enable/Disable flow control on a particular client.
-* Return codes:
-* None
-*/
-void ipa3_flow_control(enum ipa_client_type ipa_client,
-		bool enable, uint32_t qmap_id)
-{
-	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
-	int ep_idx;
-	struct ipa3_ep_context *ep;
-
-	/* Check if tethered flow control is needed or not.*/
-	if (!ipa3_ctx->tethered_flow_control) {
-		IPADBG("Apps flow control is not needed\n");
-		return;
-	}
-
-	/* Check if ep is valid. */
-	ep_idx = ipa3_get_ep_mapping(ipa_client);
-	if (ep_idx == -1) {
-		IPADBG("Invalid IPA client\n");
-		return;
-	}
-
-	ep = &ipa3_ctx->ep[ep_idx];
-	if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
-		IPADBG("EP not valid/Not applicable for client.\n");
-		return;
-	}
-
-	spin_lock(&ipa3_ctx->disconnect_lock);
-	/* Check if the QMAP_ID matches. */
-	if (ep->cfg.meta.qmap_id != qmap_id) {
-		IPADBG("Flow control ind not for same flow: %u %u\n",
-			ep->cfg.meta.qmap_id, qmap_id);
-		spin_unlock(&ipa3_ctx->disconnect_lock);
-		return;
-	}
-	if (!ep->disconnect_in_progress) {
-		if (enable) {
-			IPADBG("Enabling Flow\n");
-			ep_ctrl.ipa_ep_delay = false;
-			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
-		} else {
-			IPADBG("Disabling Flow\n");
-			ep_ctrl.ipa_ep_delay = true;
-			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
-		}
-		ep_ctrl.ipa_ep_suspend = false;
-		ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
-	} else {
-		IPADBG("EP disconnect is in progress\n");
-	}
-	spin_unlock(&ipa3_ctx->disconnect_lock);
-}
-
 static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
 {
 	if (!buff) {
@@ -610,6 +556,7 @@
 	struct ipa_ioc_nat_alloc_mem nat_mem;
 	struct ipa_ioc_v4_nat_init nat_init;
 	struct ipa_ioc_v4_nat_del nat_del;
+	struct ipa_ioc_nat_pdn_entry mdfy_pdn;
 	struct ipa_ioc_rm_dependency rm_depend;
 	size_t sz;
 	int pre_entry;
@@ -708,6 +655,18 @@
 		}
 		break;
 
+	case IPA_IOC_NAT_MODIFY_PDN:
+		if (copy_from_user((u8 *)&mdfy_pdn, (const void __user *)arg,
+			sizeof(struct ipa_ioc_nat_pdn_entry))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa4_nat_mdfy_pdn(&mdfy_pdn)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
 	case IPA_IOC_ADD_HDR:
 		if (copy_from_user(header, (u8 *)arg,
 					sizeof(struct ipa_ioc_add_hdr))) {
@@ -1863,9 +1822,11 @@
 				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 				ep_idx, &ep_holb);
 
-			ipahal_write_reg_n_fields(
-				IPA_ENDP_INIT_CTRL_n,
-				ep_idx, &ep_suspend);
+			/* from IPA 4.0 pipe suspend is not supported */
+			if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+				ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_CTRL_n,
+					ep_idx, &ep_suspend);
 		}
 	}
 }
@@ -3979,6 +3940,15 @@
 	struct ipa3_flt_tbl *flt_tbl;
 	int i;
 
+	if (ipa3_ctx == NULL) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -ENXIO;
+	}
+
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_ctx->ipa_initialization_complete)
+		return 0;
+
 	/*
 	 * indication whether working in MHI config or non MHI config is given
 	 * in ipa3_write which is launched before ipa3_post_init. i.e. from
@@ -4113,41 +4083,15 @@
 fail_setup_apps_pipes:
 	gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
 fail_register_device:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
-	ipa_rm_exit();
-	cdev_del(&ipa3_ctx->cdev);
-	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
-	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
-	ipa3_free_dma_task_for_gsi();
 	ipa3_destroy_flt_tbl_idrs();
-	idr_destroy(&ipa3_ctx->ipa_idr);
-	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
-	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
-	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_cache);
-	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
-	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
-	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
-	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
-	iounmap(ipa3_ctx->mmio);
-	ipa3_disable_clks();
-	if (ipa3_clk)
-		clk_put(ipa3_clk);
-	ipa3_clk = NULL;
-	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
-	if (ipa3_bus_scale_table) {
-		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
-		ipa3_bus_scale_table = NULL;
-	}
-	kfree(ipa3_ctx->ctrl);
-	kfree(ipa3_ctx);
-	ipa3_ctx = NULL;
 	return result;
 }
 
+static void ipa3_post_init_wq(struct work_struct *work)
+{
+	ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+}
+
 static int ipa3_trigger_fw_loading_mdms(void)
 {
 	int result;
@@ -4249,9 +4193,10 @@
 	if (result) {
 		IPAERR("FW loading process has failed\n");
 			return result;
-	} else
-		ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
-
+	} else {
+			queue_work(ipa3_ctx->transport_power_mgmt_wq,
+				&ipa3_post_init_work);
+	}
 	return count;
 }
 
@@ -4722,20 +4667,6 @@
 		goto fail_device_create;
 	}
 
-	cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
-	ipa3_ctx->cdev.owner = THIS_MODULE;
-	ipa3_ctx->cdev.ops = &ipa3_drv_fops;  /* from LDD3 */
-
-	result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
-	if (result) {
-		IPAERR(":cdev_add err=%d\n", -result);
-		result = -ENODEV;
-		goto fail_cdev_add;
-	}
-	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
-			MAJOR(ipa3_ctx->dev_num),
-			MINOR(ipa3_ctx->dev_num));
-
 	if (ipa3_create_nat_device()) {
 		IPAERR("unable to create nat device\n");
 		result = -ENODEV;
@@ -4793,16 +4724,28 @@
 		}
 	}
 
+	cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+	ipa3_ctx->cdev.owner = THIS_MODULE;
+	ipa3_ctx->cdev.ops = &ipa3_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+			MAJOR(ipa3_ctx->dev_num),
+			MINOR(ipa3_ctx->dev_num));
 	return 0;
 
+fail_cdev_add:
 fail_ipa_init_interrupts:
 	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
 fail_create_apps_resource:
 	ipa_rm_exit();
 fail_ipa_rm_init:
 fail_nat_dev_add:
-	cdev_del(&ipa3_ctx->cdev);
-fail_cdev_add:
 	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
 fail_device_create:
 	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 0b8115f..564397a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -69,13 +69,15 @@
 	}
 
 	/* Enable the pipe */
-	if (IPA_CLIENT_IS_CONS(ep->client) &&
-	    (ep->keep_ipa_awake ||
-	     ipa3_ctx->resume_on_connect[ep->client] ||
-	     !ipa3_should_pipe_be_suspended(ep->client))) {
-		memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
-		ep_cfg_ctrl.ipa_ep_suspend = false;
-		res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		if (IPA_CLIENT_IS_CONS(ep->client) &&
+		    (ep->keep_ipa_awake ||
+		    ipa3_ctx->resume_on_connect[ep->client] ||
+		    !ipa3_should_pipe_be_suspended(ep->client))) {
+			memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+			ep_cfg_ctrl.ipa_ep_suspend = false;
+			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		}
 	}
 
 	return res;
@@ -97,33 +99,41 @@
 		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
 	}
 
-	/* Suspend the pipe */
-	if (IPA_CLIENT_IS_CONS(ep->client)) {
-		/*
-		 * for RG10 workaround uC needs to be loaded before pipe can
-		 * be suspended in this case.
-		 */
-		if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
-			IPADBG("uC is not loaded yet, waiting...\n");
-			res = wait_for_completion_timeout(
-				&ipa3_ctx->uc_loaded_completion_obj, 60 * HZ);
-			if (res == 0)
-				IPADBG("timeout waiting for uC to load\n");
+	/*
+	 * for IPA 4.0 and above aggregation frame is closed together with
+	 * channel STOP
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Suspend the pipe */
+		if (IPA_CLIENT_IS_CONS(ep->client)) {
+			/*
+			 * for RG10 workaround uC needs to be loaded before
+			 * pipe can be suspended in this case.
+			 */
+			if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
+				IPADBG("uC is not loaded yet, waiting...\n");
+				res = wait_for_completion_timeout(
+					&ipa3_ctx->uc_loaded_completion_obj,
+					60 * HZ);
+				if (res == 0)
+					IPADBG("timeout waiting for uC load\n");
+			}
+
+			memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+			ep_cfg_ctrl.ipa_ep_suspend = true;
+			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		}
 
-		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-		ep_cfg_ctrl.ipa_ep_suspend = true;
-		res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
-	}
-
-	udelay(IPA_PKT_FLUSH_TO_US);
-	ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
-	if (ep_aggr.aggr_en) {
-		res = ipa3_tag_aggr_force_close(clnt_hdl);
-		if (res) {
-			IPAERR("tag process timeout, client:%d err:%d\n",
-				   clnt_hdl, res);
-			BUG();
+		udelay(IPA_PKT_FLUSH_TO_US);
+		ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
+			&ep_aggr);
+		if (ep_aggr.aggr_en) {
+			res = ipa3_tag_aggr_force_close(clnt_hdl);
+			if (res) {
+				IPAERR("tag process timeout client:%d err:%d\n",
+					clnt_hdl, res);
+				ipa_assert();
+			}
 		}
 	}
 
@@ -1257,10 +1267,12 @@
 		goto disable_clk_and_exit;
 	}
 
-	/* Suspend the DL/DPL EP */
-	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ep_cfg_ctrl.ipa_ep_suspend = true;
-	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Suspend the DL/DPL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
 
 	/*
 	 * Check if DL/DPL channel is empty again, data could enter the channel
@@ -1275,6 +1287,14 @@
 		goto unsuspend_dl_and_exit;
 	}
 
+	/* Stop DL channel */
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping DL/DPL channel: %d\n", result);
+		result = -EFAULT;
+		goto unsuspend_dl_and_exit;
+	}
+
 	/* STOP UL channel */
 	if (!is_dpl) {
 		source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
@@ -1283,7 +1303,7 @@
 		if (result) {
 			IPAERR("Error stopping UL channel: result = %d\n",
 				result);
-			goto unsuspend_dl_and_exit;
+			goto start_dl_and_exit;
 		}
 	}
 
@@ -1292,11 +1312,15 @@
 	IPADBG("exit\n");
 	return 0;
 
+start_dl_and_exit:
+	gsi_start_channel(dl_ep->gsi_chan_hdl);
 unsuspend_dl_and_exit:
-	/* Unsuspend the DL EP */
-	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ep_cfg_ctrl.ipa_ep_suspend = false;
-	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Unsuspend the DL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
 disable_clk_and_exit:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
 	return result;
@@ -1340,7 +1364,8 @@
 
 int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
 {
-	struct ipa3_ep_context *ul_ep, *dl_ep;
+	struct ipa3_ep_context *ul_ep = NULL;
+	struct ipa3_ep_context *dl_ep = NULL;
 	enum gsi_status gsi_res;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 
@@ -1360,10 +1385,17 @@
 		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
 
-	/* Unsuspend the DL/DPL EP */
-	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ep_cfg_ctrl.ipa_ep_suspend = false;
-	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Unsuspend the DL/DPL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	/* Start DL channel */
+	gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS)
+		IPAERR("Error starting DL channel: %d\n", gsi_res);
 
 	/* Start UL channel */
 	if (!is_dpl) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 1ee8ec8..1634b1c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -906,6 +906,10 @@
 			pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
 				entry->rule.hashable, entry->rule_id,
 				entry->rule.max_prio, entry->prio);
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+				pr_err("pdn index %d, set metadata %d ",
+					entry->rule.pdn_idx,
+					entry->rule.set_metadata);
 			if (eq)
 				ipa3_attrib_dump_eq(
 					&entry->rule.eq_attrib);
@@ -968,6 +972,10 @@
 				bitmap, rules[rl].rule.retain_hdr);
 			pr_err("rule_id:%u prio:%u ",
 				rules[rl].id, rules[rl].priority);
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+				pr_err("pdn: %u, set_metadata: %u ",
+					rules[rl].rule.pdn_idx,
+					rules[rl].rule.set_metadata);
 			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
 		}
 
@@ -992,6 +1000,10 @@
 				bitmap, rules[rl].rule.retain_hdr);
 			pr_err("rule_id:%u  prio:%u ",
 				rules[rl].id, rules[rl].priority);
+			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+				pr_err("pdn: %u, set_metadata: %u ",
+					rules[rl].rule.pdn_idx,
+					rules[rl].rule.set_metadata);
 			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
 		}
 		pr_err("\n");
@@ -1502,6 +1514,7 @@
 	u32 value, i, j, rule_id;
 	u16 enable, tbl_entry, flag;
 	u32 no_entrys = 0;
+	struct ipa_pdn_entry *pdn_table = ipa3_ctx->nat_mem.pdn_mem.base;
 
 	mutex_lock(&ipa3_ctx->nat_mem.lock);
 	value = ipa3_ctx->nat_mem.public_ip_addr;
@@ -1512,6 +1525,15 @@
 				((value & 0x0000FF00) >> 8),
 				((value & 0x000000FF)));
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		for (i = 0; i < IPA_MAX_PDN_NUM; i++) {
+			pr_err(
+				"PDN %d: ip 0x%X, src_metadata 0x%X, dst_metadata 0x%X\n",
+				i, pdn_table[i].public_ip,
+				pdn_table[i].src_metadata,
+				pdn_table[i].dst_metadata);
+		}
+
 	pr_err("Table Size:%d\n",
 				ipa3_ctx->nat_mem.size_base_tables);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 04d807f..915f2b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -267,7 +267,6 @@
 	int i = 0;
 	int j;
 	int result;
-	int fail_dma_wrap = 0;
 	u32 mem_flag = GFP_ATOMIC;
 	const struct ipa_gsi_ep_config *gsi_ep_cfg;
 
@@ -298,7 +297,6 @@
 	spin_lock_bh(&sys->spinlock);
 
 	for (i = 0; i < num_desc; i++) {
-		fail_dma_wrap = 0;
 		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
 					   mem_flag);
 		if (!tx_pkt) {
@@ -319,7 +317,7 @@
 			if (ipa_populate_tag_field(&desc[i], tx_pkt,
 				&tag_pyld_ret)) {
 				IPAERR("Failed to populate tag field\n");
-				goto failure;
+				goto failure_dma_map;
 			}
 		}
 
@@ -335,11 +333,6 @@
 					tx_pkt->mem.base,
 					tx_pkt->mem.size,
 					DMA_TO_DEVICE);
-				if (!tx_pkt->mem.phys_base) {
-					IPAERR("failed to do dma map.\n");
-					fail_dma_wrap = 1;
-					goto failure;
-				}
 			} else {
 					tx_pkt->mem.phys_base =
 						desc[i].dma_address;
@@ -355,17 +348,17 @@
 					desc[i].frag,
 					0, tx_pkt->mem.size,
 					DMA_TO_DEVICE);
-				if (!tx_pkt->mem.phys_base) {
-					IPAERR("dma map failed\n");
-					fail_dma_wrap = 1;
-					goto failure;
-				}
 			} else {
 				tx_pkt->mem.phys_base =
 					desc[i].dma_address;
 				tx_pkt->no_unmap_dma = true;
 			}
 		}
+		if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+			IPAERR("failed to do dma map.\n");
+			goto failure_dma_map;
+		}
+
 		tx_pkt->sys = sys;
 		tx_pkt->callback = desc[i].callback;
 		tx_pkt->user1 = desc[i].user1;
@@ -426,28 +419,31 @@
 
 	return 0;
 
+failure_dma_map:
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
 failure:
 	ipahal_destroy_imm_cmd(tag_pyld_ret);
 	tx_pkt = tx_pkt_first;
 	for (j = 0; j < i; j++) {
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
-		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
-			dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
-		} else {
-			dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
+
+		if (!tx_pkt->no_unmap_dma) {
+			if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size, DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
 		}
 		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
 		tx_pkt = next_pkt;
 	}
-	if (j < num_desc)
-		/* last desc failed */
-		if (fail_dma_wrap)
-			kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
 
 	kfree(gsi_xfer_elem_array);
 
@@ -1444,8 +1440,7 @@
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 						     sys->rx_buff_sz,
 						     DMA_FROM_DEVICE);
-		if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
 			pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
 			       __func__, (void *)rx_pkt->data.dma_addr,
 			       ptr, sys);
@@ -1605,8 +1600,7 @@
 		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
-		if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
 			IPAERR("dma_map_single failure %p for %p\n",
 			       (void *)rx_pkt->data.dma_addr, ptr);
 			goto fail_dma_mapping;
@@ -1676,8 +1670,7 @@
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 						     sys->rx_buff_sz,
 						     DMA_FROM_DEVICE);
-		if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
 			IPAERR("dma_map_single failure %p for %p\n",
 			       (void *)rx_pkt->data.dma_addr, ptr);
 			goto fail_dma_mapping;
@@ -1764,8 +1757,8 @@
 			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
 			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
 				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
-			if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				rx_pkt->data.dma_addr)) {
 				IPAERR("dma_map_single failure %p for %p\n",
 					(void *)rx_pkt->data.dma_addr, ptr);
 				goto fail_dma_mapping;
@@ -1780,8 +1773,8 @@
 			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
 			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
 				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
-			if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				rx_pkt->data.dma_addr)) {
 				IPAERR("dma_map_single failure %p for %p\n",
 					(void *)rx_pkt->data.dma_addr, ptr);
 				goto fail_dma_mapping;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index d0ed782..bfcaa2b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -755,6 +755,23 @@
 		}
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		if (rule->pdn_idx) {
+			if (rule->action == IPA_PASS_TO_EXCEPTION ||
+				rule->action == IPA_PASS_TO_ROUTING) {
+				IPAERR(
+					"PDN index should be 0 when action is not pass to NAT\n");
+				goto error;
+			} else {
+				if (rule->pdn_idx >= IPA_MAX_PDN_NUM) {
+					IPAERR("PDN index %d is too large\n",
+						rule->pdn_idx);
+					goto error;
+				}
+			}
+		}
+	}
+
 	if (rule->rule_id) {
 		if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
 			IPAERR("invalid rule_id provided 0x%x\n"
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 410b96a..6e51472 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -373,12 +373,12 @@
 	mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
 		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
 		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
-	if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
-		IPAERR("hdr proc ctx table overflow\n");
-		goto bad_len;
-	}
-
 	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+			IPAERR("hdr proc ctx table overflow\n");
+			goto bad_len;
+		}
+
 		offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
 					   GFP_KERNEL);
 		if (!offset) {
@@ -483,16 +483,21 @@
 	mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
 		IPA_MEM_PART(apps_hdr_size_ddr);
 
-	/* if header does not fit to table, place it in DDR */
-	if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
-		entry->is_hdr_proc_ctx = true;
-		entry->phys_base = dma_map_single(ipa3_ctx->pdev,
-			entry->hdr,
-			entry->hdr_len,
-			DMA_TO_DEVICE);
-	} else {
-		entry->is_hdr_proc_ctx = false;
-		if (list_empty(&htbl->head_free_offset_list[bin])) {
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		/* if header does not fit to table, place it in DDR */
+		if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+			entry->is_hdr_proc_ctx = true;
+			entry->phys_base = dma_map_single(ipa3_ctx->pdev,
+				entry->hdr,
+				entry->hdr_len,
+				DMA_TO_DEVICE);
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				entry->phys_base)) {
+				IPAERR("dma_map_single failure for entry\n");
+				goto fail_dma_mapping;
+			}
+		} else {
+			entry->is_hdr_proc_ctx = false;
 			offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
 						   GFP_KERNEL);
 			if (!offset) {
@@ -509,14 +514,14 @@
 			htbl->end += ipa_hdr_bin_sz[bin];
 			list_add(&offset->link,
 					&htbl->head_offset_list[bin]);
-		} else {
-			/* get the first free slot */
-			offset =
-			list_first_entry(&htbl->head_free_offset_list[bin],
-					struct ipa_hdr_offset_entry, link);
-			list_move(&offset->link, &htbl->head_offset_list[bin]);
+			entry->offset_entry = offset;
 		}
-
+	} else {
+		entry->is_hdr_proc_ctx = false;
+		/* get the first free slot */
+		offset = list_first_entry(&htbl->head_free_offset_list[bin],
+			struct ipa_hdr_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
 		entry->offset_entry = offset;
 	}
 
@@ -565,6 +570,9 @@
 	list_del(&entry->link);
 	dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
 			entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+	entry->is_hdr_proc_ctx = false;
+
 bad_hdr_len:
 	entry->cookie = 0;
 	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 86442b1..c6d5c6e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -739,6 +739,19 @@
 };
 
 /**
+* struct ipa_pdn_entry - IPA PDN config table entry
+* @public_ip: the PDN's public ip
+* @src_metadata: the PDN's metadata to be replaced for source NAT
+* @dst_metadata: the PDN's metadata to be replaced for destination NAT
+* @resrvd: reserved field
+*/
+struct ipa_pdn_entry {
+	u32 public_ip;
+	u32 src_metadata;
+	u32 dst_metadata;
+	u32 resrvd;
+};
+/**
  * struct ipa3_nat_mem - IPA NAT memory description
  * @class: pointer to the struct class
  * @dev: the dev_t of the device
@@ -759,6 +772,7 @@
  * @size_base_tables: base table size
  * @size_expansion_tables: expansion table size
  * @public_ip_addr: ip address of nat table
+ * @pdn_mem: pdn config table SW cache memory structure
  */
 struct ipa3_nat_mem {
 	struct class *class;
@@ -784,6 +798,7 @@
 	void *tmp_vaddr;
 	dma_addr_t tmp_dma_handle;
 	bool is_tmp_mem;
+	struct ipa_mem_buffer pdn_mem;
 };
 
 /**
@@ -1580,6 +1595,8 @@
 
 int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
 
+int ipa4_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
+
 /*
  * Messaging
  */
@@ -1970,8 +1987,6 @@
 int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
 void ipa3_set_resorce_groups_min_max_limits(void);
 void ipa3_suspend_apps_pipes(bool suspend);
-void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
-			uint32_t qmap_id);
 int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
 	enum ipa_ip_type ip_type,
 	bool hashable,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index e1177ca..6acc4d8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -34,7 +34,6 @@
 #define NAT_TABLE_ENTRY_SIZE_BYTE 32
 #define NAT_INTEX_TABLE_ENTRY_SIZE_BYTE 4
 
-
 static int ipa3_nat_vma_fault_remap(
 	 struct vm_area_struct *vma, struct vm_fault *vmf)
 {
@@ -247,7 +246,7 @@
 int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
 {
 	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
-	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
 	int result;
 
 	IPADBG("passed memory size %zu\n", mem->size);
@@ -295,11 +294,44 @@
 		IPADBG("using shared(local) memory\n");
 		nat_ctx->is_sys_mem = false;
 	}
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		struct ipa_pdn_entry *pdn_entries;
+		struct ipa_mem_buffer *pdn_mem = &ipa3_ctx->nat_mem.pdn_mem;
+
+		pdn_mem->size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
+		if (IPA_MEM_PART(pdn_config_size) < pdn_mem->size) {
+			IPAERR(
+				"number of PDN entries exceeds SRAM available space\n");
+			result = -ENOMEM;
+			goto fail_alloc_pdn;
+		}
+
+		pdn_mem->base = dma_alloc_coherent(ipa3_ctx->pdev,
+			pdn_mem->size,
+			&pdn_mem->phys_base,
+			gfp_flags);
+		if (!pdn_mem->base) {
+			IPAERR("fail to allocate PDN memory\n");
+			result = -ENOMEM;
+			goto fail_alloc_pdn;
+		}
+		pdn_entries = pdn_mem->base;
+		memset(pdn_entries, 0, pdn_mem->size);
+		IPADBG("IPA NAT dev allocated PDN memory successfully\n");
+	}
 
 	nat_ctx->is_dev_init = true;
 	IPADBG("IPA NAT dev init successfully\n");
-	result = 0;
+	mutex_unlock(&nat_ctx->lock);
 
+	return 0;
+
+fail_alloc_pdn:
+	if (nat_ctx->vaddr) {
+		dma_free_coherent(ipa3_ctx->pdev, mem->size, nat_ctx->vaddr,
+			nat_ctx->dma_handle);
+		nat_ctx->vaddr = NULL;
+	}
 bail:
 	mutex_unlock(&nat_ctx->lock);
 
@@ -320,11 +352,13 @@
 #define TBL_ENTRY_SIZE 32
 #define INDX_TBL_ENTRY_SIZE 4
 
-	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
-	struct ipa3_desc desc[2];
+	struct ipa3_desc desc[3];
 	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
-	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
-	int result;
+	int num_cmd = 0;
+	int i = 0;
+	struct ipahal_imm_cmd_pyld *cmd_pyld[3];
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+	int result = 0;
 	u32 offset = 0;
 	size_t tmp;
 
@@ -412,21 +446,22 @@
 
 	memset(&desc, 0, sizeof(desc));
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	nop_cmd_pyld =
+	cmd_pyld[num_cmd] =
 		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
-	if (!nop_cmd_pyld) {
+	if (!cmd_pyld[num_cmd]) {
 		IPAERR("failed to construct NOP imm cmd\n");
 		result = -ENOMEM;
 		goto bail;
 	}
 
-	desc[0].opcode = nop_cmd_pyld->opcode;
-	desc[0].type = IPA_IMM_CMD_DESC;
-	desc[0].callback = NULL;
-	desc[0].user1 = NULL;
-	desc[0].user2 = 0;
-	desc[0].pyld = nop_cmd_pyld->data;
-	desc[0].len = nop_cmd_pyld->len;
+	desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].callback = NULL;
+	desc[num_cmd].user1 = NULL;
+	desc[num_cmd].user2 = 0;
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	num_cmd++;
 
 	if (ipa3_ctx->nat_mem.vaddr) {
 		IPADBG("using system memory for nat table\n");
@@ -453,7 +488,7 @@
 			IPAERR("index_expn_offset: 0x%x\n",
 				init->index_expn_offset);
 			result = -EPERM;
-			goto free_nop;
+			goto destroy_imm_cmd;
 		}
 		cmd.ipv4_rules_addr =
 			ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
@@ -495,25 +530,75 @@
 	IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
 	cmd.size_expansion_tables = init->expn_table_entries;
 	IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
-	cmd.public_ip_addr = init->ip_addr;
-	IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
-	cmd_pyld = ipahal_construct_imm_cmd(
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		/*
+		 * public ip field changed to store the PDN config base
+		 * address in IPAv4
+		 */
+		cmd.public_ip_addr = IPA_MEM_PART(pdn_config_ofst);
+		IPADBG("pdn config base:0x%x\n", cmd.public_ip_addr);
+	} else {
+		cmd.public_ip_addr = init->ip_addr;
+		IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
+	}
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
-	if (!cmd_pyld) {
+	if (!cmd_pyld[num_cmd]) {
 		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
 		result = -EPERM;
-		goto free_nop;
+		goto destroy_imm_cmd;
 	}
 
-	desc[1].opcode = cmd_pyld->opcode;
-	desc[1].type = IPA_IMM_CMD_DESC;
-	desc[1].callback = NULL;
-	desc[1].user1 = NULL;
-	desc[1].user2 = 0;
-	desc[1].pyld = cmd_pyld->data;
-	desc[1].len = cmd_pyld->len;
+	desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	desc[num_cmd].callback = NULL;
+	desc[num_cmd].user1 = NULL;
+	desc[num_cmd].user2 = 0;
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	num_cmd++;
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		struct ipa_pdn_entry *pdn_entries;
+
+		/* store ip in pdn entries cache array */
+		pdn_entries = ipa3_ctx->nat_mem.pdn_mem.base;
+		pdn_entries[0].public_ip = init->ip_addr;
+		pdn_entries[0].dst_metadata = 0;
+		pdn_entries[0].src_metadata = 0;
+		pdn_entries[0].resrvd = 0;
+
+		IPADBG("Public ip address:0x%x\n", init->ip_addr);
+
+		/* Copy the PDN config table to SRAM */
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
+		mem_cmd.system_addr = ipa3_ctx->nat_mem.pdn_mem.phys_base;
+		mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(pdn_config_ofst);
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR(
+			"fail construct dma_shared_mem cmd: for pdn table");
+			result = -ENOMEM;
+			goto destroy_imm_cmd;
+		}
+		desc[num_cmd].opcode = cmd_pyld[num_cmd]->opcode;
+		desc[num_cmd].type = IPA_IMM_CMD_DESC;
+		desc[num_cmd].callback = NULL;
+		desc[num_cmd].user1 = NULL;
+		desc[num_cmd].user2 = 0;
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		num_cmd++;
+		IPADBG("added PDN table copy cmd\n");
+	}
+
 	IPADBG("posting v4 init command\n");
-	if (ipa3_send_cmd(2, desc)) {
+	if (ipa3_send_cmd(num_cmd, desc)) {
 		IPAERR("Fail to send immediate command\n");
 		result = -EPERM;
 		goto destroy_imm_cmd;
@@ -550,16 +635,97 @@
 	ipa3_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
 
 	IPADBG("return\n");
-	result = 0;
 destroy_imm_cmd:
-	ipahal_destroy_imm_cmd(cmd_pyld);
-free_nop:
-	ipahal_destroy_imm_cmd(nop_cmd_pyld);
+	for (i = 0; i < num_cmd; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
 bail:
 	return result;
 }
 
 /**
+* ipa4_nat_mdfy_pdn() - Modify a PDN entry in PDN config table in IPA SRAM
+* @mdfy_pdn:	[in] PDN info to be written to SRAM
+*
+* Called by NAT client driver to modify an entry in the PDN config table
+*
+* Returns:	0 on success, negative on failure
+*/
+int ipa4_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn)
+{
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+	struct ipa3_desc desc;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int result = 0;
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	struct ipa_pdn_entry *pdn_entries = nat_ctx->pdn_mem.base;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		IPAERR("IPA HW does not support multi PDN\n");
+		return -EPERM;
+	}
+	if (!nat_ctx->is_dev_init) {
+		IPAERR("attempt to modify a PDN entry before dev init\n");
+		return -EPERM;
+	}
+
+	if (mdfy_pdn->pdn_index > (IPA_MAX_PDN_NUM - 1)) {
+		IPAERR("pdn index out of range %d\n", mdfy_pdn->pdn_index);
+		return -EPERM;
+	}
+
+	mutex_lock(&nat_ctx->lock);
+
+	/* store ip in pdn entries cache array */
+	pdn_entries[mdfy_pdn->pdn_index].public_ip =
+		mdfy_pdn->public_ip;
+	pdn_entries[mdfy_pdn->pdn_index].dst_metadata =
+		mdfy_pdn->dst_metadata;
+	pdn_entries[mdfy_pdn->pdn_index].src_metadata =
+		mdfy_pdn->src_metadata;
+
+	IPADBG("Modify PDN in index %d: ", mdfy_pdn->pdn_index);
+	IPADBG("Public ip address:0x%x, ", mdfy_pdn->public_ip);
+	IPADBG("dst metadata:0x%x, ", mdfy_pdn->dst_metadata);
+	IPADBG("src metadata:0x%x\n", mdfy_pdn->src_metadata);
+
+	memset(&desc, 0, sizeof(desc));
+
+	/* Copy the PDN config table to SRAM */
+	mem_cmd.is_read = false;
+	mem_cmd.skip_pipeline_clear = false;
+	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
+	mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base;
+	mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(pdn_config_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR(
+			"fail construct dma_shared_mem cmd: for pdn table");
+		result = -ENOMEM;
+		goto bail;
+	}
+	desc.opcode = cmd_pyld->opcode;
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.callback = NULL;
+	desc.user1 = NULL;
+	desc.user2 = 0;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+
+	IPADBG("sending PDN table copy cmd");
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+bail:
+	mutex_unlock(&nat_ctx->lock);
+	return result;
+}
+/**
  * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
  * @dma:	[in] initialization command attributes
  *
@@ -573,6 +739,7 @@
 
 	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
 	struct ipahal_imm_cmd_nat_dma cmd;
+	enum ipahal_imm_cmd_name cmd_name = IPA_IMM_CMD_NAT_DMA;
 	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
 	struct ipa3_desc *desc = NULL;
 	u16 size = 0, cnt = 0;
@@ -675,13 +842,16 @@
 	desc[0].pyld = nop_cmd_pyld->data;
 	desc[0].len = nop_cmd_pyld->len;
 
+	/* NAT_DMA was renamed to TABLE_DMA starting from IPAv4 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		cmd_name = IPA_IMM_CMD_TABLE_DMA;
+
 	for (cnt = 0; cnt < dma->entries; cnt++) {
 		cmd.table_index = dma->dma[cnt].table_index;
 		cmd.base_addr = dma->dma[cnt].base_addr;
 		cmd.offset = dma->dma[cnt].offset;
 		cmd.data = dma->dma[cnt].data;
-		cmd_pyld = ipahal_construct_imm_cmd(
-			IPA_IMM_CMD_NAT_DMA, &cmd, false);
+		cmd_pyld = ipahal_construct_imm_cmd(cmd_name, &cmd, false);
 		if (!cmd_pyld) {
 			IPAERR("Fail to construct nat_dma imm cmd\n");
 			continue;
@@ -718,6 +888,10 @@
  */
 void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
 {
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = { 0 };
+	struct ipa3_desc desc;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+
 	IPADBG("\n");
 	mutex_lock(&nat_ctx->lock);
 
@@ -729,6 +903,47 @@
 		nat_ctx->size = 0;
 		nat_ctx->vaddr = NULL;
 	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+		struct ipa_pdn_entry *pdn_entries =
+			nat_ctx->pdn_mem.base;
+
+		/* zero the PDN table and copy the PDN config table to SRAM */
+		IPADBG("zeroing the PDN config table\n");
+		memset(pdn_entries, 0, sizeof(struct ipa_pdn_entry) *
+			IPA_MAX_PDN_NUM);
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = sizeof(struct ipa_pdn_entry) * IPA_MAX_PDN_NUM;
+		mem_cmd.system_addr = nat_ctx->pdn_mem.phys_base;
+		mem_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(pdn_config_ofst);
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld) {
+			IPAERR(
+				"fail construct dma_shared_mem cmd: for pdn table");
+			goto lbl_free_pdn;
+		}
+		memset(&desc, 0, sizeof(desc));
+		desc.opcode = cmd_pyld->opcode;
+		desc.pyld = cmd_pyld->data;
+		desc.len = cmd_pyld->len;
+		desc.type = IPA_IMM_CMD_DESC;
+
+		IPADBG("sending PDN table copy cmd\n");
+		if (ipa3_send_cmd(1, &desc))
+			IPAERR("Fail to send immediate command\n");
+
+		ipahal_destroy_imm_cmd(cmd_pyld);
+lbl_free_pdn:
+		IPADBG("freeing the PDN memory\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			nat_ctx->pdn_mem.size,
+			nat_ctx->pdn_mem.base,
+			nat_ctx->pdn_mem.phys_base);
+	}
 	nat_ctx->is_mapped = false;
 	nat_ctx->is_sys_mem = false;
 	nat_ctx->is_dev_init = false;
@@ -762,7 +977,8 @@
 		base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
 	}
 
-	if (del->public_ip_addr == 0) {
+	if ((ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) &&
+		(del->public_ip_addr == 0)) {
 		IPADBG("Bad Parameter\n");
 		result = -EPERM;
 		goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 799246b..60dc04f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1600,13 +1600,15 @@
 
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 	if (IPA_CLIENT_IS_CONS(ep->client)) {
-		ep_cfg_ctrl.ipa_ep_suspend = true;
-		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
-		if (result)
-			IPAERR("client (ep: %d) failed to suspend result=%d\n",
-					clnt_hdl, result);
-		else
-			IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+			ep_cfg_ctrl.ipa_ep_suspend = true;
+			result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			if (result)
+				IPAERR("(ep: %d) failed to suspend result=%d\n",
+						clnt_hdl, result);
+			else
+				IPADBG("(ep: %d) suspended\n", clnt_hdl);
+		}
 	} else {
 		ep_cfg_ctrl.ipa_ep_delay = true;
 		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index ab26893..dbe6cd6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1631,6 +1631,16 @@
 
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 
+	/*
+	 * starting IPA 4.0 pipe no longer can be suspended. Instead,
+	 * the corresponding GSI channel should be stopped. Usually client
+	 * driver will take care of stopping the channel. For client drivers
+	 * that are not stopping the channel, IPA RM will do that based on
+	 * ipa3_should_pipe_channel_be_stopped().
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		return false;
+
 	if (ep->keep_ipa_awake)
 		return false;
 
@@ -1651,6 +1661,41 @@
 }
 
 /**
+ * ipa3_should_pipe_channel_be_stopped() - returns true when the client's
+ * channel should be stopped during a power save scenario. False otherwise.
+ * Most client already stops the GSI channel on suspend, and are not included
+ * in the list below.
+ *
+ * @client: [IN] IPA client
+ */
+static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+		return false;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_ODU_EMB_CONS ||
+	    client == IPA_CLIENT_ODU_TETH_CONS)
+		return true;
+
+	return false;
+}
+
+/**
  * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
  * resource and decrement active clients counter, which may result in clock
  * gating of IPA clocks.
@@ -1695,6 +1740,19 @@
 				pipe_suspended = true;
 			}
 		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* Stop GSI channel */
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed stop gsi ch %lu\n",
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+					return res;
+				}
+			}
+		}
 	}
 	/* Sleep ~1 msec */
 	if (pipe_suspended)
@@ -1761,6 +1819,12 @@
 				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 			}
 		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			res = -EPERM;
+			goto bail;
+		}
 	}
 
 	if (res == 0) {
@@ -1824,6 +1888,19 @@
 				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 			}
 		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				res = gsi_start_channel(
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start gsi ch %lu\n",
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+					return res;
+				}
+			}
+		}
 	}
 
 	return res;
@@ -2185,6 +2262,11 @@
  */
 bool ipa3_get_client_uplink(int pipe_idx)
 {
+	if (pipe_idx < 0 || pipe_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("invalid pipe idx %d\n", pipe_idx);
+		return false;
+	}
+
 	return ipa3_ctx->ipacm_client[pipe_idx].uplink;
 }
 
@@ -2714,6 +2796,12 @@
 		return -EINVAL;
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) {
+		IPAERR("pipe suspend is not supported\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+
 	IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
 		clnt_hdl,
 		ep_ctrl->ipa_ep_suspend,
@@ -4674,6 +4762,7 @@
 	struct ipa_ep_cfg_ctrl cfg;
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
+	int res;
 
 	memset(&cfg, 0, sizeof(cfg));
 	cfg.ipa_ep_suspend = suspend;
@@ -4688,7 +4777,23 @@
 	if (ep->valid) {
 		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
 			ipa_ep_idx);
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+			if (suspend) {
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed to stop LAN channel\n");
+					ipa_assert();
+				}
+			} else {
+				res = gsi_start_channel(ep->gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start LAN channel\n");
+					ipa_assert();
+				}
+			}
+		} else {
+			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		}
 		if (suspend)
 			ipa3_gsi_poll_after_suspend(ep);
 		else if (!atomic_read(&ep->sys->curr_polling_state))
@@ -4706,7 +4811,23 @@
 	if (ep->valid) {
 		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
 			ipa_ep_idx);
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+			if (suspend) {
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed to stop WAN channel\n");
+					ipa_assert();
+				}
+			} else {
+				res = gsi_start_channel(ep->gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start WAN channel\n");
+					ipa_assert();
+				}
+			}
+		} else {
+			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		}
 		if (suspend)
 			ipa3_gsi_poll_after_suspend(ep);
 		else if (!atomic_read(&ep->sys->curr_polling_state))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index d35b8a7..6f46ebf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -32,6 +32,7 @@
 	__stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
 	__stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
 	__stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+	__stringify(IPA_IMM_CMD_TABLE_DMA),
 };
 
 static const char *ipahal_pkt_status_exception_to_str
@@ -371,6 +372,31 @@
 	return pyld;
 }
 
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_table_dma_ipav4(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_table_dma_ipav4 *data;
+	struct ipahal_imm_cmd_table_dma *nat_params =
+		(struct ipahal_imm_cmd_table_dma *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->opcode = ipahal_imm_cmd_get_opcode(cmd);
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_table_dma_ipav4 *)pyld->data;
+
+	data->table_index = nat_params->table_index;
+	data->base_addr = nat_params->base_addr;
+	data->offset = nat_params->offset;
+	data->data = nat_params->data;
+
+	return pyld;
+}
+
 static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
 	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
 {
@@ -640,6 +666,13 @@
 	[IPA_HW_v4_0][IPA_IMM_CMD_REGISTER_WRITE] = {
 		ipa_imm_cmd_construct_register_write_v_4_0,
 		12},
+	/* NAT_DMA was renamed to TABLE_DMA for IPAv4 */
+	[IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = {
+		NULL,
+		-1 },
+	[IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = {
+		ipa_imm_cmd_construct_table_dma_ipav4,
+		14},
 	[IPA_HW_v4_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
 		ipa_imm_cmd_construct_dma_shared_mem_v_4_0,
 		19},
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index e71a48b..f8bdc2c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -36,6 +36,7 @@
 	IPA_IMM_CMD_DMA_SHARED_MEM,
 	IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
 	IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+	IPA_IMM_CMD_TABLE_DMA,
 	IPA_IMM_CMD_MAX,
 };
 
@@ -204,6 +205,23 @@
 };
 
 /*
+ * struct ipahal_imm_cmd_table_dma - TABLE_DMA cmd payload
+ * Perform DMA operation on NAT and IPV6 connection tracking related mem
+ * addresses. Copy data into different locations within IPV6CT and NAT
+ * associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ */
+struct ipahal_imm_cmd_table_dma {
+	u8 table_index;
+	u8 base_addr;
+	u32 offset;
+	u16 data;
+};
+
+/*
  * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
  * Configuration for specific IP pkt. Shall be called prior to an IP pkt
  *  data. Pkt will not go through IP pkt processing.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 053a581..4f20e0f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -162,6 +162,8 @@
 		struct ipahal_rt_rule_entry *rule);
 static int ipa_flt_parse_hw_rule(u8 *addr,
 		struct ipahal_flt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
+		struct ipahal_flt_rule_entry *rule);
 
 #define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
 	(ARRAY_SIZE(__eq_array) <= (__eq_index))
@@ -349,6 +351,93 @@
 	return 0;
 }
 
+static int ipa_flt_gen_hw_rule_ipav4(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa4_0_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+
+	ipa_assert_on(params->rule->pdn_idx & ~0xF);
+	rule_hdr->u.hdr.pdn_idx = params->rule->pdn_idx;
+	rule_hdr->u.hdr.set_metadata = params->rule->set_metadata;
+	rule_hdr->u.hdr.rsvd2 = 0;
+	rule_hdr->u.hdr.rsvd3 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa4_0_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d, pdn=%d, set_metadata=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id,
+		rule_hdr->u.hdr.pdn_idx,
+		rule_hdr->u.hdr.set_metadata);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
 /*
  * This array contains the FLT/RT info for IPAv3 and later.
  * All the information on IPAv3 are statically defined below.
@@ -401,6 +490,50 @@
 			[IPA_IS_FRAG]			= 15,
 		},
 	},
+
+	/* IPAv4 */
+	[IPA_HW_v4_0] = {
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule,
+		ipa_flt_gen_hw_rule_ipav4,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule,
+		ipa_flt_parse_hw_rule_ipav4,
+		{
+			[IPA_TOS_EQ] = 0,
+			[IPA_PROTOCOL_EQ] = 1,
+			[IPA_TC_EQ] = 2,
+			[IPA_OFFSET_MEQ128_0] = 3,
+			[IPA_OFFSET_MEQ128_1] = 4,
+			[IPA_OFFSET_MEQ32_0] = 5,
+			[IPA_OFFSET_MEQ32_1] = 6,
+			[IPA_IHL_OFFSET_MEQ32_0] = 7,
+			[IPA_IHL_OFFSET_MEQ32_1] = 8,
+			[IPA_METADATA_COMPARE] = 9,
+			[IPA_IHL_OFFSET_RANGE16_0] = 10,
+			[IPA_IHL_OFFSET_RANGE16_1] = 11,
+			[IPA_IHL_OFFSET_EQ_32] = 12,
+			[IPA_IHL_OFFSET_EQ_16] = 13,
+			[IPA_FL_EQ] = 14,
+			[IPA_IS_FRAG] = 15,
+		},
+	},
 };
 
 static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
@@ -2276,6 +2409,55 @@
 		atrb, &rule->rule_size);
 }
 
+static int ipa_flt_parse_hw_rule_ipav4(u8 *addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa4_0_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa4_0_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+	rule->rule.pdn_idx = rule_hdr->u.hdr.pdn_idx;
+	rule->rule.set_metadata = rule_hdr->u.hdr.set_metadata;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
 /*
  * ipahal_fltrt_init() - Build the FLT/RT information table
  *  See ipahal_fltrt_objs[] comments
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
index 0c0637d..645383a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -137,6 +137,43 @@
 	} u;
 };
 
+/**
+ * struct ipa4_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @pdn_idx: in case of go to src nat action possible to input the pdn index to
+ *  the NAT block
+ * @set_metadata: enable metadata replacement in the NAT block
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa4_0_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule : 16;
+			u64 action : 5;
+			u64 rt_tbl_idx : 5;
+			u64 retain_hdr : 1;
+			u64 pdn_idx : 4;
+			u64 set_metadata : 1;
+			u64 priority : 10;
+			u64 rsvd2 : 6;
+			u64 rule_id : 10;
+			u64 rsvd3 : 6;
+		} hdr;
+	} u;
+};
+
 int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
 void ipahal_fltrt_destroy(void);
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 804c554..c023082 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -143,7 +143,8 @@
  * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
  *  idx tbl (each)
  * @rsvd2: reserved
- * @public_ip_addr: public IP address
+ * @public_ip_addr: public IP address. for IPAv4 this is the PDN config table
+ *  offset in SMEM
  */
 struct ipa_imm_cmd_hw_ip_v4_nat_init {
 	u64 ipv4_rules_addr:64;
@@ -250,6 +251,30 @@
 };
 
 /*
+ * struct ipa_imm_cmd_hw_table_dma_ipav4 - TABLE_DMA command payload
+ *  in H/W format
+ * Perform DMA operation on NAT and ipv6 connection tracking related mem
+ * addresses. Copy data into different locations within NAT associated tbls
+ * (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_table_dma_ipav4 {
+	u64 table_index : 3;
+	u64 rsvd1 : 1;
+	u64 base_addr : 3;
+	u64 rsvd2 : 1;
+	u64 offset : 32;
+	u64 data : 16;
+	u64 rsvd3 : 8;
+};
+
+/*
  * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
  *  in H/W format.
  * Inits hdr table within sys mem with the hdrs and their length.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 1a119b9..3019e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -688,6 +688,19 @@
 		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
 }
 
+static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	WARN_ON(ep_ctrl->ipa_ep_suspend);
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
 static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -1444,6 +1457,9 @@
 		ipareg_parse_hps_queue_weights, 0x000005a4, 0},
 
 	/* IPAv4.0 */
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy,
+		0x00000800, 0x70 },
 	[IPA_HW_v4_0][IPA_TX_CFG] = {
 		ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
 		0x000001FC, 0},
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index b198348..f408f23 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1507,25 +1507,13 @@
 		break;
 	/*  Flow enable  */
 	case RMNET_IOCTL_FLOW_ENABLE:
-		IPAWANDBG("Received flow enable\n");
-		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
-			sizeof(struct rmnet_ioctl_data_s))) {
-			rc = -EFAULT;
-			break;
-		}
-		ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
-			ioctl_data.u.tcm_handle);
+		IPAWANERR("RMNET_IOCTL_FLOW_ENABLE not supported\n");
+		rc = -EFAULT;
 		break;
 	/*  Flow disable  */
 	case RMNET_IOCTL_FLOW_DISABLE:
-		IPAWANDBG("Received flow disable\n");
-		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
-			sizeof(struct rmnet_ioctl_data_s))) {
-			rc = -EFAULT;
-			break;
-		}
-		ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
-			ioctl_data.u.tcm_handle);
+		IPAWANERR("RMNET_IOCTL_FLOW_DISABLE not supported\n");
+		rc = -EFAULT;
 		break;
 	/*  Set flow handle  */
 	case RMNET_IOCTL_FLOW_SET_HNDL:
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 7d8d002..e76ff14 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -40,9 +40,6 @@
 #define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
 
 #define WIGIG_ENABLE_DELAY	50
-#define PM_OPT_SUSPEND (MSM_PCIE_CONFIG_NO_CFG_RESTORE | \
-			MSM_PCIE_CONFIG_LINKDOWN)
-#define PM_OPT_RESUME MSM_PCIE_CONFIG_NO_CFG_RESTORE
 
 #define WIGIG_SUBSYS_NAME	"WIGIG"
 #define WIGIG_RAMDUMP_SIZE    0x200000 /* maximum ramdump size */
@@ -127,6 +124,8 @@
 	bool use_cpu_boost;
 	bool is_cpu_boosted;
 	struct cpumask boost_cpu;
+
+	bool keep_radio_on_during_sleep;
 };
 
 static LIST_HEAD(dev_list);
@@ -523,30 +522,8 @@
 	return rc;
 }
 
-static int ops_suspend(void *handle, bool keep_device_power)
+static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
 {
-	int rc;
-	struct msm11ad_ctx *ctx = handle;
-	struct pci_dev *pcidev;
-
-	pr_info("%s(%p)\n", __func__, handle);
-	if (!ctx) {
-		pr_err("No context\n");
-		return -ENODEV;
-	}
-	pcidev = ctx->pcidev;
-	rc = pci_save_state(pcidev);
-	if (rc) {
-		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
-		return rc;
-	}
-	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
-				 pcidev, NULL, PM_OPT_SUSPEND);
-	if (rc) {
-		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
-			rc);
-		return rc;
-	}
 	if (ctx->gpio_en >= 0)
 		gpio_direction_output(ctx->gpio_en, 0);
 
@@ -557,20 +534,12 @@
 
 	msm_11ad_disable_vregs(ctx);
 
-	return rc;
+	return 0;
 }
 
-static int ops_resume(void *handle, bool device_powered_on)
+static int msm_11ad_turn_device_power_on(struct msm11ad_ctx *ctx)
 {
 	int rc;
-	struct msm11ad_ctx *ctx = handle;
-	struct pci_dev *pcidev;
-
-	pr_info("%s(%p)\n", __func__, handle);
-	if (!ctx) {
-		pr_err("No context\n");
-		return -ENODEV;
-	}
 
 	rc = msm_11ad_enable_vregs(ctx);
 	if (rc) {
@@ -588,25 +557,124 @@
 	if (ctx->sleep_clk_en >= 0)
 		gpio_direction_output(ctx->sleep_clk_en, 1);
 
-	pcidev = ctx->pcidev;
 	if (ctx->gpio_en >= 0) {
 		gpio_direction_output(ctx->gpio_en, 1);
 		msleep(WIGIG_ENABLE_DELAY);
 	}
 
+	return 0;
+
+err_disable_vregs:
+	msm_11ad_disable_vregs(ctx);
+	return rc;
+}
+
+static int msm_11ad_suspend_power_off(void *handle)
+{
+	int rc;
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+
+	pr_debug("%s\n", __func__);
+
+	if (!ctx) {
+		pr_err("%s: No context\n", __func__);
+		return -ENODEV;
+	}
+
+	pcidev = ctx->pcidev;
+
+	msm_pcie_shadow_control(ctx->pcidev, 0);
+
+	rc = pci_save_state(pcidev);
+	if (rc) {
+		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
+		goto out;
+	}
+	ctx->pristine_state = pci_store_saved_state(pcidev);
+
+	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc) {
+		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+			rc);
+		goto out;
+	}
+
+	rc = msm_11ad_turn_device_power_off(ctx);
+
+out:
+	return rc;
+}
+
+static int ops_suspend(void *handle, bool keep_device_power)
+{
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+	int rc;
+
+	pr_debug("11ad suspend: %s\n", __func__);
+	if (!ctx) {
+		pr_err("11ad suspend: No context\n");
+		return -ENODEV;
+	}
+
+	if (!keep_device_power)
+		return msm_11ad_suspend_power_off(handle);
+
+	pcidev = ctx->pcidev;
+
+	msm_pcie_shadow_control(pcidev, 0);
+
+	dev_dbg(ctx->dev, "disable device and save config\n");
+	pci_disable_device(pcidev);
+	pci_save_state(pcidev);
+	ctx->pristine_state = pci_store_saved_state(pcidev);
+	dev_dbg(ctx->dev, "moving to D3\n");
+	pci_set_power_state(pcidev, PCI_D3hot);
+
+	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc)
+		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+			rc);
+
+	return rc;
+}
+
+static int msm_11ad_resume_power_on(void *handle)
+{
+	int rc;
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+
+	pr_debug("%s\n", __func__);
+
+	if (!ctx) {
+		pr_err("%s: No context\n", __func__);
+		return -ENODEV;
+	}
+	pcidev = ctx->pcidev;
+
+	rc = msm_11ad_turn_device_power_on(ctx);
+	if (rc)
+		return rc;
+
 	rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
-				 pcidev, NULL, PM_OPT_RESUME);
+				 pcidev, NULL, 0);
 	if (rc) {
 		dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
 			rc);
 		goto err_disable_power;
 	}
-	rc = msm_pcie_recover_config(pcidev);
-	if (rc) {
-		dev_err(ctx->dev, "msm_pcie_recover_config failed :%d\n",
-			rc);
-		goto err_suspend_rc;
-	}
+
+	pci_set_power_state(pcidev, PCI_D0);
+
+	if (ctx->pristine_state)
+		pci_load_saved_state(ctx->pcidev, ctx->pristine_state);
+	pci_restore_state(ctx->pcidev);
+
+	msm_pcie_shadow_control(ctx->pcidev, 1);
 
 	/* Disable L1, in case it is enabled */
 	if (ctx->l1_enabled_in_enum) {
@@ -622,18 +690,54 @@
 
 err_suspend_rc:
 	msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
-			    pcidev, NULL, PM_OPT_SUSPEND);
+			    pcidev, NULL, 0);
 err_disable_power:
-	if (ctx->gpio_en >= 0)
-		gpio_direction_output(ctx->gpio_en, 0);
+	msm_11ad_turn_device_power_off(ctx);
+	return rc;
+}
 
-	if (ctx->sleep_clk_en >= 0)
-		gpio_direction_output(ctx->sleep_clk_en, 0);
+static int ops_resume(void *handle, bool device_powered_on)
+{
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+	int rc;
 
-	msm_11ad_disable_clocks(ctx);
-err_disable_vregs:
-	msm_11ad_disable_vregs(ctx);
+	pr_debug("11ad resume: %s\n", __func__);
+	if (!ctx) {
+		pr_err("11ad resume: No context\n");
+		return -ENODEV;
+	}
 
+	pcidev = ctx->pcidev;
+
+	if (!device_powered_on)
+		return msm_11ad_resume_power_on(handle);
+
+	rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc) {
+		dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
+			rc);
+		return rc;
+	}
+	pci_set_power_state(pcidev, PCI_D0);
+
+	dev_dbg(ctx->dev, "restore state and enable device\n");
+	pci_load_saved_state(pcidev, ctx->pristine_state);
+	pci_restore_state(pcidev);
+
+	rc = pci_enable_device(pcidev);
+	if (rc) {
+		dev_err(ctx->dev, "pci_enable_device failed (%d)\n", rc);
+		goto out;
+	}
+
+	msm_pcie_shadow_control(pcidev, 1);
+
+	dev_dbg(ctx->dev, "pci set master\n");
+	pci_set_master(pcidev);
+
+out:
 	return rc;
 }
 
@@ -643,9 +747,6 @@
 	int rc;
 	int force_pt_coherent = 1;
 	int smmu_bypass = !ctx->smmu_s1_en;
-	dma_addr_t iova_base = 0;
-	dma_addr_t iova_end =  ctx->smmu_base + ctx->smmu_size - 1;
-	struct iommu_domain_geometry geometry;
 
 	if (!ctx->use_smmu)
 		return 0;
@@ -703,17 +804,6 @@
 					rc);
 				goto release_mapping;
 			}
-			memset(&geometry, 0, sizeof(geometry));
-			geometry.aperture_start = iova_base;
-			geometry.aperture_end = iova_end;
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-						   DOMAIN_ATTR_GEOMETRY,
-						   &geometry);
-			if (rc) {
-				dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
 		}
 	}
 
@@ -992,6 +1082,8 @@
 		return -EINVAL;
 	}
 	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
+	ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
+		"qcom,keep-radio-on-during-sleep");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
 
 	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
@@ -1104,13 +1196,6 @@
 		}
 	}
 
-	rc = pci_save_state(pcidev);
-	if (rc) {
-		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
-		goto out_rc;
-	}
-	ctx->pristine_state = pci_store_saved_state(pcidev);
-
 	if (ctx->sleep_clk_en >= 0) {
 		rc = gpio_request(ctx->sleep_clk_en, "msm_11ad");
 		if (rc < 0) {
@@ -1146,7 +1231,7 @@
 	device_disable_async_suspend(&pcidev->dev);
 
 	list_add_tail(&ctx->list, &dev_list);
-	ops_suspend(ctx, false);
+	msm_11ad_suspend_power_off(ctx);
 
 	return 0;
 out_rc:
@@ -1236,6 +1321,17 @@
 		dev_warn(ctx->dev, "failed to set CPU boost affinity\n");
 }
 
+static void msm_11ad_clear_boost_affinity(struct msm11ad_ctx *ctx)
+{
+	int rc;
+
+	irq_modify_status(ctx->pcidev->irq, IRQ_NO_BALANCING, 0);
+	rc = irq_set_affinity_hint(ctx->pcidev->irq, NULL);
+	if (rc)
+		dev_warn(ctx->dev,
+			 "Failed clear affinity, rc=%d\n", rc);
+}
+
 /* hooks for the wil6210 driver */
 static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
 {
@@ -1287,8 +1383,7 @@
 					dev_err(ctx->dev,
 						"Failed disable boost rc=%d\n",
 						rc);
-				irq_modify_status(ctx->pcidev->irq,
-						  IRQ_NO_BALANCING, 0);
+				msm_11ad_clear_boost_affinity(ctx);
 				dev_dbg(ctx->dev, "CPU boost disabled\n");
 			}
 			ctx->is_cpu_boosted = needs_boost;
@@ -1316,7 +1411,7 @@
 	memset(&ctx->rops, 0, sizeof(ctx->rops));
 	ctx->wil_handle = NULL;
 
-	ops_suspend(ctx, false);
+	msm_11ad_suspend_power_off(ctx);
 }
 
 static int msm_11ad_notify_crash(struct msm11ad_ctx *ctx)
@@ -1374,6 +1469,16 @@
 	return rc;
 }
 
+static bool ops_keep_radio_on_during_sleep(void *handle)
+{
+	struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+
+	pr_debug("%s: keep radio on during sleep is %s\n", __func__,
+		 ctx->keep_radio_on_during_sleep ? "allowed" : "not allowed");
+
+	return ctx->keep_radio_on_during_sleep;
+}
+
 void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
 			const struct wil_platform_rops *rops, void *wil_handle)
 {
@@ -1413,6 +1518,7 @@
 	ops->resume = ops_resume;
 	ops->uninit = ops_uninit;
 	ops->notify = ops_notify;
+	ops->keep_radio_on_during_sleep = ops_keep_radio_on_during_sleep;
 
 	return ctx;
 }
@@ -1429,19 +1535,9 @@
 		return -EINVAL;
 	}
 
-	if (ctx->pristine_state) {
-		/* in old kernels, pci_load_saved_state() is not exported;
-		 * so use pci_load_and_free_saved_state()
-		 * and re-allocate ctx->saved_state again
-		 */
-		pci_load_and_free_saved_state(ctx->pcidev,
-					      &ctx->pristine_state);
-		ctx->pristine_state = pci_store_saved_state(ctx->pcidev);
-	}
-
 	ctx->subsys_handle = subsystem_get(ctx->subsysdesc.name);
 
-	return ops_resume(ctx, false);
+	return msm_11ad_resume_power_on(ctx);
 }
 EXPORT_SYMBOL(msm_11ad_modinit);
 
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index cd76ca2..8c43c4e 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -310,6 +310,7 @@
 	POWER_SUPPLY_ATTR(ctm_current_max),
 	POWER_SUPPLY_ATTR(hw_current_max),
 	POWER_SUPPLY_ATTR(real_type),
+	POWER_SUPPLY_ATTR(pr_swap),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 4ecf9a5..8641a45 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -39,6 +39,8 @@
 #define PL_VOTER			"PL_VOTER"
 #define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
 #define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
+#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -53,7 +55,8 @@
 	struct votable		*pl_awake_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
 	struct votable		*usb_icl_votable;
-	struct work_struct	status_change_work;
+	struct votable		*pl_enable_votable_indirect;
+	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct delayed_work	pl_taper_work;
 	struct power_supply	*main_psy;
@@ -491,6 +494,7 @@
 }
 
 #define ICL_STEP_UA	25000
+#define PL_DELAY_MS     3000
 static int usb_icl_vote_callback(struct votable *votable, void *data,
 			int icl_ua, const char *client)
 {
@@ -512,6 +516,21 @@
 	 */
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
 
+	/*
+	 * if (ICL < 1400)
+	 *	disable parallel charger using USBIN_I_VOTER
+	 * else
+	 *	instead of re-enabling here rely on status_changed_work
+	 *	(triggered via AICL completed or scheduled from here to
+	 *	unvote USBIN_I_VOTER) the status_changed_work enables
+	 *	USBIN_I_VOTER based on settled current.
+	 */
+	if (icl_ua <= 1400000)
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	else
+		schedule_delayed_work(&chip->status_change_work,
+						msecs_to_jiffies(PL_DELAY_MS));
+
 	/* rerun AICL */
 	/* get the settled current */
 	rc = power_supply_get_property(chip->main_psy,
@@ -532,8 +551,6 @@
 		power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CURRENT_MAX,
 				&pval);
-		/* wait for ICL change */
-		msleep(100);
 	}
 
 	/* set the effective ICL */
@@ -541,9 +558,6 @@
 	power_supply_set_property(chip->main_psy,
 			POWER_SUPPLY_PROP_CURRENT_MAX,
 			&pval);
-	if (rerun_aicl)
-		/* wait for ICL change */
-		msleep(100);
 
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
 
@@ -643,6 +657,16 @@
 	return 0;
 }
 
+static int pl_enable_indirect_vote_callback(struct votable *votable,
+			void *data, int pl_enable, const char *client)
+{
+	struct pl_data *chip = data;
+
+	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, !pl_enable, 0);
+
+	return 0;
+}
+
 static int pl_awake_vote_callback(struct votable *votable,
 			void *data, int awake, const char *client)
 {
@@ -775,6 +799,42 @@
 	union power_supply_propval pval = {0, };
 	int new_total_settled_ua;
 	int rc;
+	int main_settled_ua;
+	int main_limited;
+	int total_current_ua;
+
+	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+
+	/*
+	 * call aicl split only when USBIN_USBIN and enabled
+	 * and if aicl changed
+	 */
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return;
+	}
+	main_settled_ua = pval.intval;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return;
+	}
+	main_limited = pval.intval;
+
+	if ((main_limited && (main_settled_ua + chip->pl_settled_ua) < 1400000)
+			|| (main_settled_ua == 0)
+			|| ((total_current_ua >= 0) &&
+				(total_current_ua <= 1400000)))
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	else
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+
 
 	if (get_effective_result(chip->pl_disable_votable))
 		return;
@@ -783,17 +843,10 @@
 			|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
 		/*
 		 * call aicl split only when USBIN_USBIN and enabled
-		 * and if aicl changed
+		 * and if settled current has changed by more than 300mA
 		 */
-		rc = power_supply_get_property(chip->main_psy,
-				       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
-				       &pval);
-		if (rc < 0) {
-			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
-			return;
-		}
 
-		new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+		new_total_settled_ua = main_settled_ua + chip->pl_settled_ua;
 		pl_dbg(chip, PR_PARALLEL,
 			"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
 			chip->total_settled_ua, pval.intval,
@@ -840,7 +893,7 @@
 static void status_change_work(struct work_struct *work)
 {
 	struct pl_data *chip = container_of(work,
-			struct pl_data, status_change_work);
+			struct pl_data, status_change_work.work);
 
 	if (!chip->main_psy && is_main_available(chip)) {
 		/*
@@ -878,7 +931,7 @@
 	if ((strcmp(psy->desc->name, "parallel") == 0)
 	    || (strcmp(psy->desc->name, "battery") == 0)
 	    || (strcmp(psy->desc->name, "main") == 0))
-		schedule_work(&chip->status_change_work);
+		schedule_delayed_work(&chip->status_change_work, 0);
 
 	return NOTIFY_OK;
 }
@@ -899,7 +952,7 @@
 
 static int pl_determine_initial_status(struct pl_data *chip)
 {
-	status_change_work(&chip->status_change_work);
+	status_change_work(&chip->status_change_work.work);
 	return 0;
 }
 
@@ -968,7 +1021,18 @@
 		goto destroy_votable;
 	}
 
-	INIT_WORK(&chip->status_change_work, status_change_work);
+	chip->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+					VOTE_SET_ANY,
+					pl_enable_indirect_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_enable_votable_indirect)) {
+		rc = PTR_ERR(chip->pl_enable_votable_indirect);
+		return rc;
+	}
+
+	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
 	INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
 	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
 
@@ -1001,6 +1065,7 @@
 unreg_notifier:
 	power_supply_unreg_notifier(&chip->nb);
 destroy_votable:
+	destroy_votable(chip->pl_enable_votable_indirect);
 	destroy_votable(chip->pl_awake_votable);
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
@@ -1020,11 +1085,12 @@
 	if (chip == NULL)
 		return;
 
-	cancel_work_sync(&chip->status_change_work);
+	cancel_delayed_work_sync(&chip->status_change_work);
 	cancel_delayed_work_sync(&chip->pl_taper_work);
 	cancel_work_sync(&chip->pl_disable_forever_work);
 
 	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->pl_enable_votable_indirect);
 	destroy_votable(chip->pl_awake_votable);
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e3ecf49..73d54c6 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -491,7 +491,7 @@
 	int i, mask = 0xff;
 	int64_t temp;
 
-	temp = DIV_ROUND_CLOSEST(val * sp[id].numrtr, sp[id].denmtr);
+	temp = (int64_t)div_s64((s64)val * sp[id].numrtr, sp[id].denmtr);
 	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
 	for (i = 0; i < sp[id].len; i++) {
 		buf[i] = temp & mask;
@@ -1320,9 +1320,16 @@
 		return rc;
 	}
 
-	cc_soc_delta_pct = DIV_ROUND_CLOSEST(
-				abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
-				CC_SOC_30BIT);
+	cc_soc_delta_pct =
+		div64_s64((int64_t)(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+			CC_SOC_30BIT);
+
+	/* If the delta is < 50%, then skip processing full data */
+	if (cc_soc_delta_pct < 50) {
+		pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+		return -ERANGE;
+	}
+
 	delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
 				100);
 	chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
@@ -1392,7 +1399,6 @@
 	return rc;
 }
 
-#define FULL_SOC_RAW	255
 static void fg_cap_learning_update(struct fg_chip *chip)
 {
 	int rc, batt_soc, batt_soc_msb;
@@ -3937,7 +3943,7 @@
 }
 
 #define DEFAULT_CUTOFF_VOLT_MV		3200
-#define DEFAULT_EMPTY_VOLT_MV		2800
+#define DEFAULT_EMPTY_VOLT_MV		2850
 #define DEFAULT_RECHARGE_VOLT_MV	4250
 #define DEFAULT_CHG_TERM_CURR_MA	100
 #define DEFAULT_CHG_TERM_BASE_CURR_MA	75
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 2266a2a..becce31 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -266,8 +266,9 @@
 	debug_mask, __debug_mask, int, 0600
 );
 
-#define MICRO_1P5A	1500000
-#define MICRO_P1A	100000
+#define MICRO_1P5A		1500000
+#define MICRO_P1A		100000
+#define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 static int smb2_parse_dt(struct smb2 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -304,9 +305,6 @@
 	chip->dt.no_battery = of_property_read_bool(node,
 						"qcom,batteryless-platform");
 
-	chg->external_vconn = of_property_read_bool(node,
-						"qcom,external-vconn");
-
 	rc = of_property_read_u32(node,
 				"qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
 	if (rc < 0)
@@ -400,6 +398,12 @@
 
 	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
 					"qcom,suspend-input-on-debug-batt");
+
+	rc = of_property_read_u32(node, "qcom,otg-deglitch-time-ms",
+					&chg->otg_delay_ms);
+	if (rc < 0)
+		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+
 	return 0;
 }
 
@@ -428,6 +432,7 @@
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_PR_SWAP,
 };
 
 static int smb2_usb_get_prop(struct power_supply *psy,
@@ -450,8 +455,7 @@
 		if (!val->intval)
 			break;
 
-		rc = smblib_get_prop_typec_mode(chg, val);
-		if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
 			chg->micro_usb_mode) &&
 			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			val->intval = 0;
@@ -488,7 +492,7 @@
 		else if (chip->bad_part)
 			val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
 		else
-			rc = smblib_get_prop_typec_mode(chg, val);
+			val->intval = chg->typec_mode;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
 		if (chg->micro_usb_mode)
@@ -532,6 +536,9 @@
 	case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
 		rc = smblib_get_charge_current(chg, &val->intval);
 		break;
+	case POWER_SUPPLY_PROP_PR_SWAP:
+		rc = smblib_get_prop_pr_swap_in_progress(chg, val);
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -590,6 +597,9 @@
 		rc = vote(chg->usb_icl_votable, CTM_VOTER,
 						val->intval >= 0, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_PR_SWAP:
+		rc = smblib_set_prop_pr_swap_in_progress(chg, val);
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -667,8 +677,7 @@
 		if (!val->intval)
 			break;
 
-		rc = smblib_get_prop_typec_mode(chg, val);
-		if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
 			chg->micro_usb_mode) &&
 			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			val->intval = 1;
@@ -1043,7 +1052,8 @@
 		rc = smblib_get_prop_batt_voltage_now(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
-		val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+		val->intval = get_client_vote(chg->fv_votable,
+				BATT_PROFILE_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
 		rc = smblib_get_prop_charge_qnovo_enable(chg, val);
@@ -1061,7 +1071,7 @@
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		val->intval = get_client_vote(chg->fcc_votable,
-					      DEFAULT_VOTER);
+					      BATT_PROFILE_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_TEMP:
 		rc = smblib_get_prop_batt_temp(chg, val);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index b1070e8..6ead522 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -637,6 +637,7 @@
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 
 	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
 
@@ -839,7 +840,6 @@
 {
 	int rc = 0;
 	bool override;
-	union power_supply_propval pval;
 
 	/* suspend and return if 25mA or less is requested */
 	if (icl_ua < USBIN_25MA)
@@ -849,14 +849,8 @@
 	if (icl_ua == INT_MAX)
 		goto override_suspend_config;
 
-	rc = smblib_get_prop_typec_mode(chg, &pval);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
-		goto enable_icl_changed_interrupt;
-	}
-
 	/* configure current */
-	if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+	if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
 		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
 		rc = set_sdp_current(chg, icl_ua);
 		if (rc < 0) {
@@ -864,6 +858,7 @@
 			goto enable_icl_changed_interrupt;
 		}
 	} else {
+		set_sdp_current(chg, 100000);
 		rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
@@ -877,7 +872,7 @@
 	if (icl_ua == INT_MAX) {
 		/* remove override if no voters - hw defaults is desired */
 		override = false;
-	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+	} else if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
 		if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			/* For std cable with type = SDP never override */
 			override = false;
@@ -917,15 +912,8 @@
 	int rc = 0;
 	u8 load_cfg;
 	bool override;
-	union power_supply_propval pval;
 
-	rc = smblib_get_prop_typec_mode(chg, &pval);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
-		return rc;
-	}
-
-	if ((pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+	if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
 		|| chg->micro_usb_mode)
 		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
 		rc = get_sdp_current(chg, icl_ua);
@@ -1046,16 +1034,6 @@
 	return 0;
 }
 
-static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
-			void *data, int chg_enable, const char *client)
-{
-	struct smb_charger *chg = data;
-
-	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
-
-	return 0;
-}
-
 static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
 			void *data,
 			int hvdcp_enable, const char *client)
@@ -1212,36 +1190,13 @@
 static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
 {
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
-	u8 otg_stat, val;
-	int rc = 0, i;
-
-	if (!chg->external_vconn) {
-		/*
-		 * Hardware based OTG soft start should complete within 1ms, so
-		 * wait for 2ms in the worst case.
-		 */
-		for (i = 0; i < MAX_OTG_SS_TRIES; ++i) {
-			usleep_range(1000, 1100);
-			rc = smblib_read(chg, OTG_STATUS_REG, &otg_stat);
-			if (rc < 0) {
-				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
-									rc);
-				return rc;
-			}
-
-			if (otg_stat & BOOST_SOFTSTART_DONE_BIT)
-				break;
-		}
-
-		if (!(otg_stat & BOOST_SOFTSTART_DONE_BIT)) {
-			smblib_err(chg, "Couldn't enable VCONN; OTG soft start failed\n");
-			return -EAGAIN;
-		}
-	}
+	int rc = 0;
+	u8 val;
 
 	/*
-	 * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
-	 * for Vconn, and it should be set with reverse polarity of CC_OUT.
+	 * When enabling VCONN using the command register the CC pin must be
+	 * selected. VCONN should be supplied to the inactive CC pin hence using
+	 * the opposite of the CC_ORIENTATION_BIT.
 	 */
 	smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
 	val = chg->typec_status[3] &
@@ -1262,7 +1217,7 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int rc = 0;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	if (chg->vconn_en)
 		goto unlock;
 
@@ -1271,7 +1226,7 @@
 		chg->vconn_en = true;
 
 unlock:
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 	return rc;
 }
 
@@ -1294,7 +1249,7 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int rc = 0;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	if (!chg->vconn_en)
 		goto unlock;
 
@@ -1303,7 +1258,7 @@
 		chg->vconn_en = false;
 
 unlock:
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 	return rc;
 }
 
@@ -1312,9 +1267,9 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int ret;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	ret = chg->vconn_en;
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 	return ret;
 }
 
@@ -1417,13 +1372,6 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int rc;
 
-	if (!chg->external_vconn && chg->vconn_en) {
-		smblib_dbg(chg, PR_OTG, "Killing VCONN before disabling OTG\n");
-		rc = _smblib_vconn_regulator_disable(rdev);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
-	}
-
 	if (chg->wa_flags & OTG_WA) {
 		/* set OTG current limit to minimum value */
 		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
@@ -1651,6 +1599,7 @@
 {
 	union power_supply_propval pval;
 	int rc;
+	int effective_fv_uv;
 	u8 stat;
 
 	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
@@ -1669,10 +1618,11 @@
 			 * If Vbatt is within 40mV above Vfloat, then don't
 			 * treat it as overvoltage.
 			 */
-			if (pval.intval >=
-				get_effective_result(chg->fv_votable) + 40000) {
+			effective_fv_uv = get_effective_result(chg->fv_votable);
+			if (pval.intval >= effective_fv_uv + 40000) {
 				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-				smblib_err(chg, "battery over-voltage\n");
+				smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
+						pval.intval, effective_fv_uv);
 				goto done;
 			}
 		}
@@ -1933,38 +1883,18 @@
 		return rc;
 
 	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
-	switch (chg->smb_version) {
-	case PMI8998_SUBTYPE:
-		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
-							&settled_icl_ua);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
-			return rc;
-		}
-
-		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
-				max(settled_icl_ua - chg->param.usb_icl.step_u,
-				chg->param.usb_icl.step_u));
-		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
-		break;
-	case PM660_SUBTYPE:
-		/*
-		 * Use restart_AICL instead of trigger_AICL as it runs the
-		 * complete AICL instead of starting from the last settled
-		 * value.
-		 */
-		rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
-					RESTART_AICL_BIT, RESTART_AICL_BIT);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
-									rc);
-		break;
-	default:
-		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
-				chg->smb_version);
-		return -EINVAL;
+	rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+			&settled_icl_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+		return rc;
 	}
 
+	vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+			max(settled_icl_ua - chg->param.usb_icl.step_u,
+				chg->param.usb_icl.step_u));
+	vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+
 	return 0;
 }
 
@@ -1999,6 +1929,7 @@
 int smblib_dp_dm(struct smb_charger *chg, int val)
 {
 	int target_icl_ua, rc = 0;
+	union power_supply_propval pval;
 
 	switch (val) {
 	case POWER_SUPPLY_DP_DM_DP_PULSE:
@@ -2016,10 +1947,35 @@
 				rc, chg->pulse_cnt);
 		break;
 	case POWER_SUPPLY_DP_DM_ICL_DOWN:
-		chg->usb_icl_delta_ua -= 100000;
 		target_icl_ua = get_effective_result(chg->usb_icl_votable);
+		if (target_icl_ua < 0) {
+			/* no client vote, get the ICL from charger */
+			rc = power_supply_get_property(chg->usb_psy,
+					POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+					&pval);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't get max current rc=%d\n",
+					rc);
+				return rc;
+			}
+			target_icl_ua = pval.intval;
+		}
+
+		/*
+		 * Check if any other voter voted on USB_ICL in case of
+		 * voter other than SW_QC3_VOTER reset and restart reduction
+		 * again.
+		 */
+		if (target_icl_ua != get_client_vote(chg->usb_icl_votable,
+							SW_QC3_VOTER))
+			chg->usb_icl_delta_ua = 0;
+
+		chg->usb_icl_delta_ua += 100000;
 		vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
-				target_icl_ua + chg->usb_icl_delta_ua);
+						target_icl_ua - 100000);
+		smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
+				target_icl_ua, chg->usb_icl_delta_ua);
 		break;
 	case POWER_SUPPLY_DP_DM_ICL_UP:
 	default:
@@ -2255,8 +2211,6 @@
 static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
 {
 	switch (chg->typec_status[0]) {
-	case 0:
-		return POWER_SUPPLY_TYPEC_NONE;
 	case UFP_TYPEC_RDSTD_BIT:
 		return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
 	case UFP_TYPEC_RD1P5_BIT:
@@ -2267,7 +2221,7 @@
 		break;
 	}
 
-	return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+	return POWER_SUPPLY_TYPEC_NONE;
 }
 
 static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
@@ -2281,8 +2235,6 @@
 		return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
 	case DFP_RD_OPEN_BIT:
 		return POWER_SUPPLY_TYPEC_SINK;
-	case DFP_RA_OPEN_BIT:
-		return POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY;
 	default:
 		break;
 	}
@@ -2290,20 +2242,12 @@
 	return POWER_SUPPLY_TYPEC_NONE;
 }
 
-int smblib_get_prop_typec_mode(struct smb_charger *chg,
-			       union power_supply_propval *val)
+static int smblib_get_prop_typec_mode(struct smb_charger *chg)
 {
-	if (!(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
-		val->intval = POWER_SUPPLY_TYPEC_NONE;
-		return 0;
-	}
-
 	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
-		val->intval = smblib_get_prop_dfp_mode(chg);
+		return smblib_get_prop_dfp_mode(chg);
 	else
-		val->intval = smblib_get_prop_ufp_mode(chg);
-
-	return 0;
+		return smblib_get_prop_ufp_mode(chg);
 }
 
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -2591,24 +2535,12 @@
 			      const union power_supply_propval *val)
 {
 	int rc;
-	bool orientation, cc_debounced, sink_attached, hvdcp;
+	bool orientation, sink_attached, hvdcp;
 	u8 stat;
 
 	if (!get_effective_result(chg->pd_allowed_votable))
 		return -EINVAL;
 
-	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
-		return rc;
-	}
-
-	cc_debounced = (bool)
-		(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
-	sink_attached = (bool)
-		(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
-	hvdcp = stat & QC_CHARGER_BIT;
-
 	chg->pd_active = val->intval;
 	if (chg->pd_active) {
 		vote(chg->apsd_disable_votable, PD_VOTER, true, 0);
@@ -2660,6 +2592,14 @@
 		if (rc < 0)
 			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
 	} else {
+		rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read APSD status rc=%d\n",
+									rc);
+			return rc;
+		}
+
+		hvdcp = stat & QC_CHARGER_BIT;
 		vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
 		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
 		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
@@ -2679,8 +2619,8 @@
 		 * and data could be interrupted. Non-legacy DCP could also draw
 		 * more, but it may impact compliance.
 		 */
-		if (!chg->typec_legacy_valid && cc_debounced &&
-							!sink_attached && hvdcp)
+		sink_attached = chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT;
+		if (!chg->typec_legacy_valid && !sink_attached && hvdcp)
 			schedule_work(&chg->legacy_detection_work);
 	}
 
@@ -2802,6 +2742,7 @@
 		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
 		return rc;
 	}
+
 	ccout = (stat & CC_ATTACHED_BIT) ?
 					(!!(stat & CC_ORIENTATION_BIT) + 1) : 0;
 	ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ?
@@ -3638,6 +3579,7 @@
 	vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 	vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
 	vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 
 	/* reset hvdcp voters */
 	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
@@ -3668,6 +3610,11 @@
 	chg->pd_hard_reset = 0;
 	chg->typec_legacy_valid = false;
 
+	/* reset back to 120mS tCC debounce */
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
+
 	/* enable APSD CC trigger for next insertion */
 	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
 				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
@@ -3708,12 +3655,29 @@
 	if (rc < 0)
 		smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
 
+	mutex_lock(&chg->vconn_oc_lock);
+	if (!chg->vconn_en)
+		goto unlock;
+
+	smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT, 0);
+	chg->vconn_en = false;
+
+unlock:
+	mutex_unlock(&chg->vconn_oc_lock);
+
+	/* clear exit sink based on cc */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+						EXIT_SNK_BASED_ON_CC_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't clear exit_sink_based_on_cc rc=%d\n",
+				rc);
+
 	typec_sink_removal(chg);
 	smblib_update_usb_type(chg);
 }
 
-static void smblib_handle_typec_insertion(struct smb_charger *chg,
-							bool sink_attached)
+static void smblib_handle_typec_insertion(struct smb_charger *chg)
 {
 	int rc;
 
@@ -3725,65 +3689,37 @@
 		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
 									rc);
 
-	if (sink_attached)
+	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
 		typec_sink_insertion(chg);
 	else
 		typec_sink_removal(chg);
 }
 
-static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
-						bool rising, bool sink_attached)
+static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
 {
-	int rc;
-	union power_supply_propval pval = {0, };
+	if (chg->pr_swap_in_progress)
+		return;
 
-	if (rising) {
-		if (!chg->typec_present) {
-			chg->typec_present = true;
-			smblib_dbg(chg, PR_MISC,  "TypeC insertion\n");
-			smblib_handle_typec_insertion(chg, sink_attached);
-		}
-	} else {
-		if (chg->typec_present) {
-			chg->typec_present = false;
-			smblib_dbg(chg, PR_MISC,  "TypeC removal\n");
-			smblib_handle_typec_removal(chg);
-		}
+	chg->typec_mode = smblib_get_prop_typec_mode(chg);
+	if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) {
+		chg->typec_present = true;
+		smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n",
+			smblib_typec_mode_name[chg->typec_mode]);
+		smblib_handle_typec_insertion(chg);
+	} else if (chg->typec_present &&
+				chg->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+		chg->typec_present = false;
+		smblib_dbg(chg, PR_MISC, "TypeC removal\n");
+		smblib_handle_typec_removal(chg);
 	}
 
-	rc = smblib_get_prop_typec_mode(chg, &pval);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
-		   rising ? "rising" : "falling",
-		   smblib_typec_mode_name[pval.intval]);
-}
-
-irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg)
-{
-	int rc;
-	u8 stat;
-
-	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
-		return IRQ_HANDLED;
-	}
-	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
-		stat, !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
-
-	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST,
-			!!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
-	power_supply_changed(chg->usb_psy);
-
-	return IRQ_HANDLED;
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
+				smblib_typec_mode_name[chg->typec_mode]);
 }
 
 static void smblib_usb_typec_change(struct smb_charger *chg)
 {
 	int rc;
-	bool debounce_done, sink_attached;
 
 	rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG,
 							chg->typec_status, 5);
@@ -3792,12 +3728,7 @@
 		return;
 	}
 
-	debounce_done =
-		(bool)(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
-	sink_attached =
-		(bool)(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
-
-	smblib_handle_typec_debounce_done(chg, debounce_done, sink_attached);
+	smblib_handle_typec_cc_state_change(chg);
 
 	if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT)
 		smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
@@ -3814,7 +3745,11 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	if (chg->micro_usb_mode) {
-		smblib_handle_usb_typec_change_for_uusb(chg);
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
+		vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
+		smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
+		schedule_delayed_work(&chg->uusb_otg_work,
+				msecs_to_jiffies(chg->otg_delay_ms));
 		return IRQ_HANDLED;
 	}
 
@@ -3896,9 +3831,63 @@
 	return IRQ_HANDLED;
 }
 
+/**************
+ * Additional USB PSY getters/setters
+ * that call interrupt functions
+ ***************/
+
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->pr_swap_in_progress;
+	return 0;
+}
+
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	chg->pr_swap_in_progress = val->intval;
+	/*
+	 * call the cc changed irq to handle real removals while
+	 * PR_SWAP was in progress
+	 */
+	smblib_usb_typec_change(chg);
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT,
+			val->intval ? TCC_DEBOUNCE_20MS_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set tCC debounce rc=%d\n", rc);
+	return 0;
+}
+
 /***************
  * Work Queues *
  ***************/
+static void smblib_uusb_otg_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						uusb_otg_work.work);
+	int rc;
+	u8 stat;
+	bool otg;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+		goto out;
+	}
+
+	otg = !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT));
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST, otg);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
+			stat, otg);
+	power_supply_changed(chg->usb_psy);
+
+out:
+	vote(chg->awake_votable, OTG_DELAY_VOTER, false, 0);
+}
+
 
 static void smblib_hvdcp_detect_work(struct work_struct *work)
 {
@@ -4035,19 +4024,6 @@
 					QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
-
-	if (!chg->external_vconn && chg->vconn_en) {
-		chg->vconn_attempts = 0;
-		if (success) {
-			rc = _smblib_vconn_regulator_enable(
-							chg->vconn_vreg->rdev);
-			if (rc < 0)
-				smblib_err(chg, "Couldn't enable VCONN rc=%d\n",
-									rc);
-		} else {
-			chg->vconn_en = false;
-		}
-	}
 }
 
 #define MAX_OC_FALLING_TRIES 10
@@ -4136,7 +4112,7 @@
 	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
 		return;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
@@ -4185,7 +4161,7 @@
 	}
 
 unlock:
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 }
 
 static void smblib_otg_ss_done_work(struct work_struct *work)
@@ -4220,8 +4196,6 @@
 	}
 
 	power_supply_changed(chg->usb_main_psy);
-	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
-				settled_ua >= USB_WEAK_INPUT_UA, 0);
 
 	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
 }
@@ -4275,14 +4249,14 @@
 	chg->typec_legacy_valid = true;
 	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
 	legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
-	rp_high = smblib_get_prop_ufp_mode(chg) ==
-						POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+	rp_high = chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
 	if (!legacy || !rp_high)
 		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
 								false, 0);
 
 unlock:
 	chg->typec_en_dis_active = 0;
+	smblib_usb_typec_change(chg);
 	mutex_unlock(&chg->lock);
 }
 
@@ -4317,7 +4291,16 @@
 		smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
 		return rc;
 	}
-	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+	chg->pl_enable_votable_indirect = find_votable("PL_ENABLE_INDIRECT");
+	if (chg->pl_enable_votable_indirect == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg,
+			"Couldn't find votable PL_ENABLE_INDIRECT rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
 
 	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
@@ -4367,14 +4350,6 @@
 		return rc;
 	}
 
-	chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
-					VOTE_SET_ANY,
-					smblib_pl_enable_indirect_vote_callback,
-					chg);
-	if (IS_ERR(chg->pl_enable_votable_indirect)) {
-		rc = PTR_ERR(chg->pl_enable_votable_indirect);
-		return rc;
-	}
 
 	chg->hvdcp_disable_votable_indirect = create_votable(
 				"HVDCP_DISABLE_INDIRECT",
@@ -4450,8 +4425,6 @@
 		destroy_votable(chg->awake_votable);
 	if (chg->chg_disable_votable)
 		destroy_votable(chg->chg_disable_votable);
-	if (chg->pl_enable_votable_indirect)
-		destroy_votable(chg->pl_enable_votable_indirect);
 	if (chg->apsd_disable_votable)
 		destroy_votable(chg->apsd_disable_votable);
 	if (chg->hvdcp_hw_inov_dis_votable)
@@ -4481,6 +4454,7 @@
 	mutex_init(&chg->lock);
 	mutex_init(&chg->write_lock);
 	mutex_init(&chg->otg_oc_lock);
+	mutex_init(&chg->vconn_oc_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
 	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
@@ -4492,6 +4466,7 @@
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
 	INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
+	INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
 	chg->fake_capacity = -EINVAL;
 	chg->fake_input_current_limited = -EINVAL;
 
@@ -4546,6 +4521,7 @@
 		cancel_delayed_work_sync(&chg->icl_change_work);
 		cancel_delayed_work_sync(&chg->pl_enable_work);
 		cancel_work_sync(&chg->legacy_detection_work);
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
 		qcom_batt_deinit();
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 42b357e..f39f2c9 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -36,9 +36,7 @@
 #define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
 #define USB_PSY_VOTER			"USB_PSY_VOTER"
 #define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
-#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
 #define PL_QNOVO_VOTER			"PL_QNOVO_VOTER"
-#define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define USBIN_V_VOTER			"USBIN_V_VOTER"
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
 #define TYPEC_SRC_VOTER			"TYPEC_SRC_VOTER"
@@ -64,6 +62,8 @@
 #define CC2_WA_VOTER			"CC2_WA_VOTER"
 #define QNOVO_VOTER			"QNOVO_VOTER"
 #define BATT_PROFILE_VOTER		"BATT_PROFILE_VOTER"
+#define OTG_DELAY_VOTER			"OTG_DELAY_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
@@ -227,15 +227,16 @@
 	struct smb_iio		iio;
 	int			*debug_mask;
 	enum smb_mode		mode;
-	bool			external_vconn;
 	struct smb_chg_freq	chg_freq;
 	int			smb_version;
+	int			otg_delay_ms;
 
 	/* locks */
 	struct mutex		lock;
 	struct mutex		write_lock;
 	struct mutex		ps_change_lock;
 	struct mutex		otg_oc_lock;
+	struct mutex		vconn_oc_lock;
 
 	/* power supplies */
 	struct power_supply		*batt_psy;
@@ -290,6 +291,7 @@
 	struct delayed_work	icl_change_work;
 	struct delayed_work	pl_enable_work;
 	struct work_struct	legacy_detection_work;
+	struct delayed_work	uusb_otg_work;
 
 	/* cached status */
 	int			voltage_min_uv;
@@ -319,6 +321,8 @@
 	u8			typec_status[5];
 	bool			typec_legacy_valid;
 	int			fake_input_current_limited;
+	bool			pr_swap_in_progress;
+	int			typec_mode;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -452,8 +456,6 @@
 				union power_supply_propval *val);
 int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_typec_mode(struct smb_charger *chg,
-				union power_supply_propval *val);
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_pd_allowed(struct smb_charger *chg,
@@ -506,6 +508,10 @@
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
 int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+				const union power_supply_propval *val);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 167666a..d8671ab 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -486,11 +486,11 @@
 #define UFP_TYPEC_OPEN_OPEN_BIT			BIT(0)
 
 #define TYPE_C_STATUS_2_REG			(USBIN_BASE + 0x0C)
-#define DFP_TYPEC_MASK				0x8F
 #define DFP_RA_OPEN_BIT				BIT(7)
 #define TIMER_STAGE_BIT				BIT(6)
 #define EXIT_UFP_MODE_BIT			BIT(5)
 #define EXIT_DFP_MODE_BIT			BIT(4)
+#define DFP_TYPEC_MASK				GENMASK(3, 0)
 #define DFP_RD_OPEN_BIT				BIT(3)
 #define DFP_RD_RA_VCONN_BIT			BIT(2)
 #define DFP_RD_RD_BIT				BIT(1)
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index b92a482..a464a81 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1416,6 +1416,7 @@
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
 };
 
 static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
@@ -1702,6 +1703,9 @@
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
 		val->intval = chip->parallel_mode;
 		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		val->intval = chip->parallel_charger_suspended;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 83374bb..ca0a2c6 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -248,7 +248,7 @@
 		val->intval = chg->usb_psy_desc.type;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_MODE:
-		rc = smblib_get_prop_typec_mode(chg, val);
+		val->intval = chg->typec_mode;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
 		rc = smblib_get_prop_typec_power_role(chg, val);
@@ -941,13 +941,6 @@
 		return rc;
 	}
 
-	rc = smblib_write(chg, THERMREG_SRC_CFG_REG,
-						THERMREG_SKIN_ADC_SRC_EN_BIT);
-	if (rc < 0) {
-		pr_err("Couldn't enable connector thermreg source rc=%d\n", rc);
-		return rc;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6d4b68c4..f3756ca 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -718,6 +718,7 @@
 };
 
 struct qeth_discipline {
+	const struct device_type *devtype;
 	void (*start_poll)(struct ccw_device *, int, unsigned long);
 	qdio_handler_t *input_handler;
 	qdio_handler_t *output_handler;
@@ -893,6 +894,9 @@
 extern struct qeth_discipline qeth_l3_discipline;
 extern const struct attribute_group *qeth_generic_attr_groups[];
 extern const struct attribute_group *qeth_osn_attr_groups[];
+extern const struct attribute_group qeth_device_attr_group;
+extern const struct attribute_group qeth_device_blkt_group;
+extern const struct device_type qeth_generic_devtype;
 extern struct workqueue_struct *qeth_wq;
 
 int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 20cf296..e8c4830 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5462,10 +5462,12 @@
 	card->discipline = NULL;
 }
 
-static const struct device_type qeth_generic_devtype = {
+const struct device_type qeth_generic_devtype = {
 	.name = "qeth_generic",
 	.groups = qeth_generic_attr_groups,
 };
+EXPORT_SYMBOL_GPL(qeth_generic_devtype);
+
 static const struct device_type qeth_osn_devtype = {
 	.name = "qeth_osn",
 	.groups = qeth_osn_attr_groups,
@@ -5591,23 +5593,22 @@
 		goto err_card;
 	}
 
-	if (card->info.type == QETH_CARD_TYPE_OSN)
-		gdev->dev.type = &qeth_osn_devtype;
-	else
-		gdev->dev.type = &qeth_generic_devtype;
-
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_OSN:
 	case QETH_CARD_TYPE_OSM:
 		rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
 		if (rc)
 			goto err_card;
+
+		gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
+					? card->discipline->devtype
+					: &qeth_osn_devtype;
 		rc = card->discipline->setup(card->gdev);
 		if (rc)
 			goto err_disc;
-	case QETH_CARD_TYPE_OSD:
-	case QETH_CARD_TYPE_OSX:
+		break;
 	default:
+		gdev->dev.type = &qeth_generic_devtype;
 		break;
 	}
 
@@ -5663,8 +5664,10 @@
 		if (rc)
 			goto err;
 		rc = card->discipline->setup(card->gdev);
-		if (rc)
+		if (rc) {
+			qeth_core_free_discipline(card);
 			goto err;
+		}
 	}
 	rc = card->discipline->set_online(gdev);
 err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 75b29fd2..db6a285 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,12 +413,16 @@
 
 	if (card->options.layer2 == newdis)
 		goto out;
-	else {
-		card->info.mac_bits  = 0;
-		if (card->discipline) {
-			card->discipline->remove(card->gdev);
-			qeth_core_free_discipline(card);
-		}
+	if (card->info.type == QETH_CARD_TYPE_OSM) {
+		/* fixed layer, can't switch */
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	card->info.mac_bits = 0;
+	if (card->discipline) {
+		card->discipline->remove(card->gdev);
+		qeth_core_free_discipline(card);
 	}
 
 	rc = qeth_core_load_discipline(card, newdis);
@@ -426,6 +430,8 @@
 		goto out;
 
 	rc = card->discipline->setup(card->gdev);
+	if (rc)
+		qeth_core_free_discipline(card);
 out:
 	mutex_unlock(&card->discipline_mutex);
 	return rc ? rc : count;
@@ -703,10 +709,11 @@
 	&dev_attr_inter_jumbo.attr,
 	NULL,
 };
-static struct attribute_group qeth_device_blkt_group = {
+const struct attribute_group qeth_device_blkt_group = {
 	.name = "blkt",
 	.attrs = qeth_blkt_device_attrs,
 };
+EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
 
 static struct attribute *qeth_device_attrs[] = {
 	&dev_attr_state.attr,
@@ -726,9 +733,10 @@
 	&dev_attr_switch_attrs.attr,
 	NULL,
 };
-static struct attribute_group qeth_device_attr_group = {
+const struct attribute_group qeth_device_attr_group = {
 	.attrs = qeth_device_attrs,
 };
+EXPORT_SYMBOL_GPL(qeth_device_attr_group);
 
 const struct attribute_group *qeth_generic_attr_groups[] = {
 	&qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 29d9fb3..0d59f9a 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
 
 #include "qeth_core.h"
 
+extern const struct attribute_group *qeth_l2_attr_groups[];
+
 int qeth_l2_create_device_attributes(struct device *);
 void qeth_l2_remove_device_attributes(struct device *);
 void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bb27058..5d010aa 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1021,11 +1021,21 @@
 	return 0;
 }
 
+static const struct device_type qeth_l2_devtype = {
+	.name = "qeth_layer2",
+	.groups = qeth_l2_attr_groups,
+};
+
 static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
 
-	qeth_l2_create_device_attributes(&gdev->dev);
+	if (gdev->dev.type == &qeth_generic_devtype) {
+		rc = qeth_l2_create_device_attributes(&gdev->dev);
+		if (rc)
+			return rc;
+	}
 	INIT_LIST_HEAD(&card->vid_list);
 	hash_init(card->mac_htable);
 	card->options.layer2 = 1;
@@ -1037,7 +1047,8 @@
 {
 	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
 
-	qeth_l2_remove_device_attributes(&cgdev->dev);
+	if (cgdev->dev.type == &qeth_generic_devtype)
+		qeth_l2_remove_device_attributes(&cgdev->dev);
 	qeth_set_allowed_threads(card, 0, 1);
 	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
@@ -1095,7 +1106,6 @@
 	case QETH_CARD_TYPE_OSN:
 		card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
 					 ether_setup);
-		card->dev->flags |= IFF_NOARP;
 		break;
 	default:
 		card->dev = alloc_etherdev(0);
@@ -1108,9 +1118,12 @@
 	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
 	card->dev->mtu = card->info.initial_mtu;
 	card->dev->netdev_ops = &qeth_l2_netdev_ops;
-	card->dev->ethtool_ops =
-		(card->info.type != QETH_CARD_TYPE_OSN) ?
-		&qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
+	if (card->info.type == QETH_CARD_TYPE_OSN) {
+		card->dev->ethtool_ops = &qeth_l2_osn_ops;
+		card->dev->flags |= IFF_NOARP;
+	} else {
+		card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+	}
 	card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
 		card->dev->hw_features = NETIF_F_SG;
@@ -1434,6 +1447,7 @@
 }
 
 struct qeth_discipline qeth_l2_discipline = {
+	.devtype = &qeth_l2_devtype,
 	.start_poll = qeth_qdio_start_poll,
 	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
 	.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 692db49..a48ed9e 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -272,3 +272,11 @@
 	} else
 		qeth_bridgeport_an_set(card, 0);
 }
+
+const struct attribute_group *qeth_l2_attr_groups[] = {
+	&qeth_device_attr_group,
+	&qeth_device_blkt_group,
+	/* l2 specific, see l2_{create,remove}_device_attributes(): */
+	&qeth_l2_bridgeport_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 272d9e7..171be5e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3157,8 +3157,13 @@
 static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
 
-	qeth_l3_create_device_attributes(&gdev->dev);
+	rc = qeth_l3_create_device_attributes(&gdev->dev);
+	if (rc)
+		return rc;
+	hash_init(card->ip_htable);
+	hash_init(card->ip_mc_htable);
 	card->options.layer2 = 0;
 	card->info.hwtrap = 0;
 	return 0;
@@ -3450,6 +3455,7 @@
 }
 
 struct qeth_discipline qeth_l3_discipline = {
+	.devtype = &qeth_generic_devtype,
 	.start_poll = qeth_qdio_start_poll,
 	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
 	.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 904422f..0414843 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1169,6 +1169,8 @@
 		cmd = list_first_entry_or_null(&vscsi->free_cmd,
 					       struct ibmvscsis_cmd, list);
 		if (cmd) {
+			if (cmd->abort_cmd)
+				cmd->abort_cmd = NULL;
 			cmd->flags &= ~(DELAY_SEND);
 			list_del(&cmd->list);
 			cmd->iue = iue;
@@ -1773,6 +1775,7 @@
 				if (cmd->abort_cmd) {
 					retry = true;
 					cmd->abort_cmd->flags &= ~(DELAY_SEND);
+					cmd->abort_cmd = NULL;
 				}
 
 				/*
@@ -1787,6 +1790,25 @@
 					list_del(&cmd->list);
 					ibmvscsis_free_cmd_resources(vscsi,
 								     cmd);
+					/*
+					 * With a successfully aborted op
+					 * through LIO we want to increment the
+					 * the vscsi credit so that when we dont
+					 * send a rsp to the original scsi abort
+					 * op (h_send_crq), but the tm rsp to
+					 * the abort is sent, the credit is
+					 * correctly sent with the abort tm rsp.
+					 * We would need 1 for the abort tm rsp
+					 * and 1 credit for the aborted scsi op.
+					 * Thus we need to increment here.
+					 * Also we want to increment the credit
+					 * here because we want to make sure
+					 * cmd is actually released first
+					 * otherwise the client will think it
+					 * it can send a new cmd, and we could
+					 * find ourselves short of cmd elements.
+					 */
+					vscsi->credit += 1;
 				} else {
 					iue = cmd->iue;
 
@@ -2961,10 +2983,7 @@
 
 	rsp->opcode = SRP_RSP;
 
-	if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-		rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-	else
-		rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+	rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
 	rsp->tag = cmd->rsp.tag;
 	rsp->flags = 0;
 
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8a7941b..289374c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4634,6 +4634,7 @@
 	struct MPT3SAS_DEVICE *sas_device_priv_data;
 	u32 response_code = 0;
 	unsigned long flags;
+	unsigned int sector_sz;
 
 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 	scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4692,6 +4693,20 @@
 	}
 
 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+	/* In case of bogus fw or device, we could end up having
+	 * unaligned partial completion. We can force alignment here,
+	 * then scsi-ml does not need to handle this misbehavior.
+	 */
+	sector_sz = scmd->device->sector_size;
+	if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+		     xfer_cnt % sector_sz)) {
+		sdev_printk(KERN_INFO, scmd->device,
+		    "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+			    xfer_cnt, sector_sz);
+		xfer_cnt = round_down(xfer_cnt, sector_sz);
+	}
+
 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 643014f..4a6e086 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -721,6 +721,8 @@
 		return -EIO;
 	}
 
+	memset(&elreq, 0, sizeof(elreq));
+
 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
 		DMA_TO_DEVICE);
@@ -786,10 +788,9 @@
 
 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
 	    (ha->current_topology == ISP_CFG_F ||
-	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
-	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
-	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
-		elreq.options == EXTERNAL_LOOPBACK) {
+	    (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+	     req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+	    elreq.options == EXTERNAL_LOOPBACK) {
 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
 		ql_dbg(ql_dbg_user, vha, 0x701e,
 		    "BSG request type: %s.\n", type);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45af34d..658e4d1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1131,7 +1131,7 @@
 
 	/* Mailbox registers. */
 	mbx_reg = &reg->mailbox0;
-	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
 	/* Transfer sequence registers. */
@@ -2090,7 +2090,7 @@
 
 	/* Mailbox registers. */
 	mbx_reg = &reg->mailbox0;
-	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
 		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
 	/* Transfer sequence registers. */
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 23698c9..a1b01d6 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4783,9 +4783,9 @@
 
 	memset(mcp->mb, 0 , sizeof(mcp->mb));
 	mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
-	mcp->mb[1] = mreq->options | BIT_6;	/* BIT_6 specifies 64bit address */
+	/* BIT_6 specifies 64bit address */
+	mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
 	if (IS_CNA_CAPABLE(ha)) {
-		mcp->mb[1] |= BIT_15;
 		mcp->mb[2] = vha->fcoe_fcf_idx;
 	}
 	mcp->mb[16] = LSW(mreq->rcv_dma);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f9b52a4..94630d4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2420,10 +2420,10 @@
 
 	if (mem_only) {
 		if (pci_enable_device_mem(pdev))
-			goto probe_out;
+			return ret;
 	} else {
 		if (pci_enable_device(pdev))
-			goto probe_out;
+			return ret;
 	}
 
 	/* This may fail but that's ok */
@@ -2433,7 +2433,7 @@
 	if (!ha) {
 		ql_log_pci(ql_log_fatal, pdev, 0x0009,
 		    "Unable to allocate memory for ha.\n");
-		goto probe_out;
+		goto disable_device;
 	}
 	ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
 	    "Memory allocated for ha=%p.\n", ha);
@@ -3039,7 +3039,7 @@
 	kfree(ha);
 	ha = NULL;
 
-probe_out:
+disable_device:
 	pci_disable_device(pdev);
 	return ret;
 }
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9..9c2c7fe6 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -371,7 +371,7 @@
 		goto done;
 	}
 
-	if (end <= start || start == 0 || end == 0) {
+	if (end < start || start == 0 || end == 0) {
 		ql_dbg(ql_dbg_misc, vha, 0xd023,
 		    "%s: unusable range (start=%x end=%x)\n", __func__,
 		    ent->t262.end_addr, ent->t262.start_addr);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c2ac982..967bb0d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2792,10 +2792,10 @@
 	if (sdkp->opt_xfer_blocks &&
 	    sdkp->opt_xfer_blocks <= dev_max &&
 	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-	    logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
-		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
-		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
-	} else
+	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+		rw_max = q->limits.io_opt =
+			sdkp->opt_xfer_blocks * sdp->sector_size;
+	else
 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
 				      (sector_t)BLK_DEF_MAX_SECTORS);
 
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d9863..4446ed2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,11 +151,6 @@
 	return blocks << (ilog2(sdev->sector_size) - 9);
 }
 
-static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
-{
-	return blocks * sdev->sector_size;
-}
-
 /*
  * Look up the DIX operation based on whether the command is read or
  * write and whether dix and dif are enabled.
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 1b283b2..db4e7bb 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -83,6 +83,8 @@
 	tristate "QCOM specific hooks to UFS controller platform driver"
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
 	select PHY_QCOM_UFS
+	select EXTCON
+	select EXTCON_GPIO
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index de6ecbd..7c5a1bc 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -327,6 +327,20 @@
 	return ret;
 }
 
+static int ufshcd_parse_extcon_info(struct ufs_hba *hba)
+{
+	struct extcon_dev *extcon;
+
+	extcon = extcon_get_edev_by_phandle(hba->dev, 0);
+	if (IS_ERR(extcon) && PTR_ERR(extcon) != -ENODEV)
+		return PTR_ERR(extcon);
+
+	if (!IS_ERR(extcon))
+		hba->extcon = extcon;
+
+	return 0;
+}
+
 #ifdef CONFIG_SMP
 /**
  * ufshcd_pltfrm_suspend - suspend power management function
@@ -449,6 +463,9 @@
 	ufshcd_parse_pm_levels(hba);
 	ufshcd_parse_gear_limits(hba);
 	ufshcd_parse_cmd_timeout(hba);
+	err = ufshcd_parse_extcon_info(hba);
+	if (err)
+		goto dealloc_host;
 
 	if (!dev->dma_mask)
 		dev->dma_mask = &dev->coherent_dma_mask;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6418c11..59222ea 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -381,6 +381,8 @@
 				 bool is_gating_context);
 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 					      bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
@@ -532,7 +534,7 @@
 		*val = ' ';
 }
 
-#define UFSHCD_MAX_CMD_LOGGING	100
+#define UFSHCD_MAX_CMD_LOGGING	200
 
 #ifdef CONFIG_TRACEPOINTS
 static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -608,7 +610,7 @@
 	ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
 }
 
-static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 {
 	int i;
 	int pos;
@@ -657,7 +659,7 @@
 {
 }
 
-static void ufshcd_cmd_log_print(struct ufs_hba *hba)
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
 {
 }
 #endif
@@ -2055,6 +2057,22 @@
 	return;
 }
 
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+					    unsigned long delay_ms)
+{
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold_all(hba);
+	ufshcd_scsi_block_requests(hba);
+	down_write(&hba->lock);
+	/* wait for all the outstanding requests to finish */
+	ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+	ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+	up_write(&hba->lock);
+	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release_all(hba);
+	pm_runtime_put_sync(hba->dev);
+}
+
 static void ufshcd_hibern8_exit_work(struct work_struct *work)
 {
 	int ret;
@@ -2106,19 +2124,32 @@
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
 	unsigned long flags, value;
+	bool change = true;
 
 	if (kstrtoul(buf, 0, &value))
 		return -EINVAL;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.delay_ms == value)
+		change = false;
+
+	if (value >= hba->clk_gating.delay_ms_pwr_save ||
+	    value >= hba->clk_gating.delay_ms_perf) {
+		dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+			value, hba->clk_gating.delay_ms_pwr_save,
+			hba->clk_gating.delay_ms_perf);
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return -EINVAL;
+	}
+
 	hba->hibern8_on_idle.delay_ms = value;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	/* Update auto hibern8 timer value if supported */
-	if (ufshcd_is_auto_hibern8_supported(hba) &&
+	if (change && ufshcd_is_auto_hibern8_supported(hba) &&
 	    hba->hibern8_on_idle.is_enabled)
-		ufshcd_set_auto_hibern8_timer(hba,
-					      hba->hibern8_on_idle.delay_ms);
+		__ufshcd_set_auto_hibern8_timer(hba,
+						hba->hibern8_on_idle.delay_ms);
 
 	return count;
 }
@@ -2148,7 +2179,7 @@
 
 	/* Update auto hibern8 timer value if supported */
 	if (ufshcd_is_auto_hibern8_supported(hba)) {
-		ufshcd_set_auto_hibern8_timer(hba,
+		__ufshcd_set_auto_hibern8_timer(hba,
 			value ? hba->hibern8_on_idle.delay_ms : value);
 		goto update;
 	}
@@ -3311,8 +3342,10 @@
 	/*
 	 * May get invoked from shutdown and IOCTL contexts.
 	 * In shutdown context, it comes in with lock acquired.
+	 * In error recovery context, it may come with lock acquired.
 	 */
-	if (!ufshcd_is_shutdown_ongoing(hba))
+
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 		down_read(&hba->lock);
 
 	/*
@@ -3346,7 +3379,7 @@
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
-	if (!ufshcd_is_shutdown_ongoing(hba))
+	if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
 		up_read(&hba->lock);
 	return err;
 }
@@ -4303,6 +4336,7 @@
 		ufshcd_print_host_state(hba);
 		ufshcd_print_pwr_info(hba);
 		ufshcd_print_host_regs(hba);
+		ufshcd_print_cmd_log(hba);
 	}
 
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
@@ -6150,7 +6184,7 @@
 			ufshcd_print_host_state(hba);
 			ufshcd_print_pwr_info(hba);
 			ufshcd_print_tmrs(hba, hba->outstanding_tasks);
-			ufshcd_cmd_log_print(hba);
+			ufshcd_print_cmd_log(hba);
 			spin_lock_irqsave(hba->host->host_lock, flags);
 		}
 	}
@@ -6662,7 +6696,7 @@
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
 
-	ufshcd_cmd_log_print(hba);
+	ufshcd_print_cmd_log(hba);
 	lrbp = &hba->lrb[tag];
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -6934,6 +6968,23 @@
 	return err;
 }
 
+static int ufshcd_detect_device(struct ufs_hba *hba)
+{
+	int err = 0;
+
+	err = ufshcd_vops_full_reset(hba);
+	if (err)
+		dev_warn(hba->dev, "%s: full reset returned %d\n",
+			 __func__, err);
+
+	err = ufshcd_reset_device(hba);
+	if (err)
+		dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+			 __func__, err);
+
+	return ufshcd_host_reset_and_restore(hba);
+}
+
 /**
  * ufshcd_reset_and_restore - reset and re-initialize host/device
  * @hba: per-adapter instance
@@ -6950,26 +7001,10 @@
 	int retries = MAX_HOST_RESET_RETRIES;
 
 	do {
-		err = ufshcd_vops_full_reset(hba);
-		if (err)
-			dev_warn(hba->dev, "%s: full reset returned %d\n",
-				 __func__, err);
-
-		err = ufshcd_reset_device(hba);
-		if (err)
-			dev_warn(hba->dev, "%s: device reset failed. err %d\n",
-				 __func__, err);
-
-		err = ufshcd_host_reset_and_restore(hba);
+		err = ufshcd_detect_device(hba);
 	} while (err && --retries);
 
 	/*
-	 * There is no point proceeding even after failing
-	 * to recover after multiple retries.
-	 */
-	if (err)
-		BUG();
-	/*
 	 * After reset the door-bell might be cleared, complete
 	 * outstanding requests in s/w here.
 	 */
@@ -7672,10 +7707,8 @@
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
 		pm_runtime_put_sync(hba->dev);
-		ufshcd_hba_exit(hba);
-	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7683,6 +7716,70 @@
 	return ret;
 }
 
+static void ufshcd_card_detect_handler(struct work_struct *work)
+{
+	struct ufs_hba *hba;
+
+	hba = container_of(work, struct ufs_hba, card_detect_work);
+	if (hba->card_detect_event &&
+	    (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
+		dev_dbg(hba->dev, "%s: card detect notification received\n",
+			 __func__);
+		pm_runtime_get_sync(hba->dev);
+		ufshcd_detect_device(hba);
+		pm_runtime_put_sync(hba->dev);
+	} else {
+		dev_dbg(hba->dev, "%s: card removed notification received\n",
+			 __func__);
+		/* TODO: remove the scsi device instances */
+	}
+}
+
+static int ufshcd_card_detect_notifier(struct notifier_block *nb,
+				       unsigned long event, void *ptr)
+{
+	struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
+
+	hba->card_detect_event = event;
+	schedule_work(&hba->card_detect_work);
+
+	return NOTIFY_DONE;
+}
+
+static int ufshcd_extcon_register(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!hba->extcon)
+		return 0;
+
+	hba->card_detect_nb.notifier_call = ufshcd_card_detect_notifier;
+	ret = extcon_register_notifier(hba->extcon,
+				       EXTCON_MECHANICAL,
+				       &hba->card_detect_nb);
+	if (ret)
+		dev_err(hba->dev, "%s: extcon_register_notifier() failed, ret %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int ufshcd_extcon_unregister(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!hba->extcon)
+		return 0;
+
+	ret = extcon_unregister_notifier(hba->extcon, EXTCON_MECHANICAL,
+					 &hba->card_detect_nb);
+	if (ret)
+		dev_err(hba->dev, "%s: extcon_unregister_notifier() failed, ret %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
 /**
  * ufshcd_async_scan - asynchronous execution for probing hba
  * @data: data pointer to pass to this function
@@ -7699,6 +7796,8 @@
 	ufshcd_hold_all(hba);
 	ufshcd_probe_hba(hba);
 	ufshcd_release_all(hba);
+
+	ufshcd_extcon_register(hba);
 }
 
 /**
@@ -8408,20 +8507,9 @@
 
 	err = ufshcd_vops_init(hba);
 	if (err)
-		goto out;
-
-	err = ufshcd_vops_setup_regulators(hba, true);
-	if (err)
-		goto out_exit;
-
-	goto out;
-
-out_exit:
-	ufshcd_vops_exit(hba);
-out:
-	if (err)
 		dev_err(hba->dev, "%s: variant %s init failed err %d\n",
 			__func__, ufshcd_get_var_name(hba), err);
+out:
 	return err;
 }
 
@@ -8430,8 +8518,6 @@
 	if (!hba->var || !hba->var->vops)
 		return;
 
-	ufshcd_vops_setup_regulators(hba, false);
-
 	ufshcd_vops_exit(hba);
 }
 
@@ -8490,6 +8576,7 @@
 static void ufshcd_hba_exit(struct ufs_hba *hba)
 {
 	if (hba->is_powered) {
+		ufshcd_extcon_unregister(hba);
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
 		if (ufshcd_is_clkscaling_supported(hba)) {
@@ -8794,10 +8881,8 @@
 		goto enable_gating;
 
 	/* UFS device & link must be active before we enter in this function */
-	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
-		ret = -EINVAL;
-		goto enable_gating;
-	}
+	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba))
+		goto set_vreg_lpm;
 
 	if (ufshcd_is_runtime_pm(pm_op)) {
 		if (ufshcd_can_autobkops_during_suspend(hba)) {
@@ -8833,6 +8918,7 @@
 	    ufshcd_is_hibern8_on_idle_allowed(hba))
 		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 
+set_vreg_lpm:
 	ufshcd_vreg_set_lpm(hba);
 
 disable_clks:
@@ -8935,6 +9021,9 @@
 	if (ret)
 		goto disable_vreg;
 
+	if (ufshcd_is_link_off(hba))
+		goto skip_dev_ops;
+
 	if (ufshcd_is_link_hibern8(hba)) {
 		ret = ufshcd_uic_hibern8_exit(hba);
 		if (!ret) {
@@ -8982,6 +9071,7 @@
 	if (hba->clk_scaling.is_allowed)
 		ufshcd_resume_clkscaling(hba);
 
+skip_dev_ops:
 	/* Schedule clock gating in case of no access to UFS device yet */
 	ufshcd_release_all(hba);
 	goto out;
@@ -10043,6 +10133,7 @@
 	/* Initialize work queues */
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+	INIT_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
 
 	/* Initialize UIC command mutex */
 	mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c61a753..a485885 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -57,6 +57,7 @@
 #include <linux/completion.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
+#include <linux/extcon.h>
 #include "unipro.h"
 
 #include <asm/irq.h>
@@ -724,6 +725,10 @@
  * @ufs_stats: ufshcd statistics to be used via debugfs
  * @debugfs_files: debugfs files associated with the ufs stats
  * @ufshcd_dbg_print: Bitmask for enabling debug prints
+ * @extcon: pointer to external connector device
+ * @card_detect_nb: card detector notifier registered with @extcon
+ * @card_detect_work: work to exectute the card detect function
+ * @card_detect_event: card detect event, 0 = removed, 1 = inserted
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
@@ -896,6 +901,11 @@
 	/* Bitmask for enabling debug prints */
 	u32 ufshcd_dbg_print;
 
+	struct extcon_dev *extcon;
+	struct notifier_block card_detect_nb;
+	struct work_struct card_detect_work;
+	unsigned long card_detect_event;
+
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
 
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ec85506..8317c09 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -43,6 +43,16 @@
 	  data required to configure LLCC so that clients can start using the
 	  LLCC slices.
 
+config QCOM_SDM670_LLCC
+	tristate "Qualcomm Technologies, Inc. SDM670 LLCC driver"
+	depends on QCOM_LLCC
+	help
+	  This provides Last level cache controller driver for SDM670.
+	  This driver provides data required to configure LLCC, so that clients
+	  can start using the LLCC slices.
+	  Say yes here to enable llcc driver for SDM670.
+
+
 config QCOM_LLCC_AMON
 	tristate "Qualcomm Technologies, Inc. LLCC Activity Monitor(AMON) driver"
 	depends on QCOM_LLCC
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 4c59ca6..2577ac6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
 obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_SDM670_LLCC) += llcc-sdm670.o
 obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
 obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 42f146d..585836a 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -1143,8 +1143,16 @@
 
 	mutex_lock(&drvdata->mutex);
 
-	if (kstrtoul(buf, 16, &loop_cnt))
+	if (kstrtoul(buf, 16, &loop_cnt)) {
 		ret = -EINVAL;
+		goto err;
+	}
+
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
 
 	entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
 	if (!entry) {
@@ -1154,6 +1162,7 @@
 
 	entry->loop_cnt = min_t(uint32_t, loop_cnt, MAX_LOOP_CNT);
 	entry->index = drvdata->nr_config[drvdata->curr_list]++;
+	entry->desc_type = DCC_LOOP_TYPE;
 	INIT_LIST_HEAD(&entry->list);
 	list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
 
@@ -1221,12 +1230,13 @@
 
 	nval = sscanf(buf, "%x %x %d", &addr, &write_val, &apb_bus);
 
-	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
-		dev_err(dev, "Select link list to program using curr_list\n");
-		return -EINVAL;
+	if (nval <= 1 || nval > 3) {
+		ret = -EINVAL;
+		goto err;
 	}
 
-	if (nval <= 1 || nval > 3) {
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
 		ret = -EINVAL;
 		goto err;
 	}
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 3c4759c..0ce1bda 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -702,7 +702,8 @@
 		err = true;
 	} else if (intent->data == NULL) {
 		if (einfo->intentless) {
-			intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
+			intent->data = kmalloc(cmd.frag_size,
+						__GFP_ATOMIC | __GFP_HIGH);
 			if (!intent->data) {
 				err = true;
 				GLINK_ERR(
@@ -868,7 +869,7 @@
 
 	rcu_id = srcu_read_lock(&einfo->use_ref);
 
-	if (unlikely(!einfo->rx_fifo)) {
+	if (unlikely(!einfo->rx_fifo) && atomic_ctx) {
 		if (!get_rx_fifo(einfo)) {
 			srcu_read_unlock(&einfo->use_ref, rcu_id);
 			return;
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e3b5826..b5bb719 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -191,8 +191,8 @@
 	ICNSS_FW_TEST_MODE,
 	ICNSS_PM_SUSPEND,
 	ICNSS_PM_SUSPEND_NOIRQ,
-	ICNSS_SSR_ENABLED,
-	ICNSS_PDR_ENABLED,
+	ICNSS_SSR_REGISTERED,
+	ICNSS_PDR_REGISTERED,
 	ICNSS_PD_RESTART,
 	ICNSS_MSA0_ASSIGNED,
 	ICNSS_WLFW_EXISTS,
@@ -2347,7 +2347,7 @@
 	if (code != SUBSYS_BEFORE_SHUTDOWN)
 		return NOTIFY_OK;
 
-	if (test_bit(ICNSS_PDR_ENABLED, &priv->state))
+	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state))
 		return NOTIFY_OK;
 
 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
@@ -2388,14 +2388,14 @@
 		icnss_pr_err("Modem register notifier failed: %d\n", ret);
 	}
 
-	set_bit(ICNSS_SSR_ENABLED, &priv->state);
+	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
 
 	return ret;
 }
 
 static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
 {
-	if (!test_and_clear_bit(ICNSS_SSR_ENABLED, &priv->state))
+	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
 		return 0;
 
 	subsys_notif_unregister_notifier(priv->modem_notify_handler,
@@ -2409,7 +2409,7 @@
 {
 	int i;
 
-	if (!test_and_clear_bit(ICNSS_PDR_ENABLED, &priv->state))
+	if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
 		return 0;
 
 	for (i = 0; i < priv->total_domains; i++)
@@ -2533,9 +2533,10 @@
 	priv->service_notifier = notifier;
 	priv->total_domains = pd->total_domains;
 
-	set_bit(ICNSS_PDR_ENABLED, &priv->state);
+	set_bit(ICNSS_PDR_REGISTERED, &priv->state);
 
-	icnss_pr_dbg("PD restart enabled, state: 0x%lx\n", priv->state);
+	icnss_pr_dbg("PD notification registration happened, state: 0x%lx\n",
+		     priv->state);
 
 	return NOTIFY_OK;
 
@@ -3190,7 +3191,7 @@
 		goto out;
 	}
 
-	if (!test_bit(ICNSS_PDR_ENABLED, &priv->state)) {
+	if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
 		icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
 			     priv->state);
 		ret = -EOPNOTSUPP;
@@ -3644,11 +3645,11 @@
 		case ICNSS_PM_SUSPEND_NOIRQ:
 			seq_puts(s, "PM SUSPEND NOIRQ");
 			continue;
-		case ICNSS_SSR_ENABLED:
-			seq_puts(s, "SSR ENABLED");
+		case ICNSS_SSR_REGISTERED:
+			seq_puts(s, "SSR REGISTERED");
 			continue;
-		case ICNSS_PDR_ENABLED:
-			seq_puts(s, "PDR ENABLED");
+		case ICNSS_PDR_REGISTERED:
+			seq_puts(s, "PDR REGISTERED");
 			continue;
 		case ICNSS_PD_RESTART:
 			seq_puts(s, "PD RESTART");
@@ -4262,6 +4263,11 @@
 
 	icnss_debugfs_create(priv);
 
+	ret = device_init_wakeup(&priv->pdev->dev, true);
+	if (ret)
+		icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+			     ret);
+
 	penv = priv;
 
 	icnss_pr_info("Platform driver probed successfully\n");
@@ -4282,6 +4288,8 @@
 {
 	icnss_pr_info("Removing driver: state: 0x%lx\n", penv->state);
 
+	device_init_wakeup(&penv->pdev->dev, false);
+
 	icnss_debugfs_destroy(penv);
 
 	icnss_modem_ssr_unregister_notifier(penv);
diff --git a/drivers/soc/qcom/icnss_utils.c b/drivers/soc/qcom/icnss_utils.c
index a7a0ffa..6974146 100644
--- a/drivers/soc/qcom/icnss_utils.c
+++ b/drivers/soc/qcom/icnss_utils.c
@@ -12,11 +12,13 @@
 
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <soc/qcom/icnss.h>
 
 #define ICNSS_MAX_CH_NUM 45
 
 static DEFINE_MUTEX(unsafe_channel_list_lock);
 static DEFINE_SPINLOCK(dfs_nol_info_lock);
+static int driver_load_cnt;
 
 static struct icnss_unsafe_channel_list {
 	u16 unsafe_ch_count;
@@ -124,3 +126,15 @@
 	return len;
 }
 EXPORT_SYMBOL(icnss_wlan_get_dfs_nol);
+
+void icnss_increment_driver_load_cnt(void)
+{
+	++driver_load_cnt;
+}
+EXPORT_SYMBOL(icnss_increment_driver_load_cnt);
+
+int icnss_get_driver_load_cnt(void)
+{
+	return driver_load_cnt;
+}
+EXPORT_SYMBOL(icnss_get_driver_load_cnt);
diff --git a/drivers/soc/qcom/llcc-sdm670.c b/drivers/soc/qcom/llcc-sdm670.c
new file mode 100644
index 0000000..68ad755
--- /dev/null
+++ b/drivers/soc/qcom/llcc-sdm670.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+/*
+ * SCT entry contains of the following parameters
+ * name: Name of the client's use case for which the llcc slice is used
+ * uid: Unique id for the client's use case
+ * slice_id: llcc slice id for each client
+ * max_cap: The maximum capacity of the cache slice provided in KB
+ * priority: Priority of the client used to select victim line for replacement
+ * fixed_size: Determine of the slice has a fixed capacity
+ * bonus_ways: Bonus ways to be used by any slice, bonus way is used only if
+ *             it't not a reserved way.
+ * res_ways: Reserved ways for the cache slice, the reserved ways cannot be used
+ *           by any other client than the one its assigned to.
+ * cache_mode: Each slice operates as a cache, this controls the mode of the
+ *             slice normal or TCM
+ * probe_target_ways: Determines what ways to probe for access hit. When
+ *                    configured to 1 only bonus and reseved ways are probed.
+ *                    when configured to 0 all ways in llcc are probed.
+ * dis_cap_alloc: Disable capacity based allocation for a client
+ * retain_on_pc: If this bit is set and client has maitained active vote
+ *               then the ways assigned to this client are not flushed on power
+ *               collapse.
+ * activate_on_init: Activate the slice immidiately after the SCT is programmed
+ */
+#define SCT_ENTRY(n, uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
+	{					\
+		.name = n,			\
+		.usecase_id = uid,		\
+		.slice_id = sid,		\
+		.max_cap = mc,			\
+		.priority = p,			\
+		.fixed_size = fs,		\
+		.bonus_ways = bway,		\
+		.res_ways = rway,		\
+		.cache_mode = cmod,		\
+		.probe_target_ways = ptw,	\
+		.dis_cap_alloc = dca,		\
+		.retain_on_pc = rp,		\
+		.activate_on_init = a,		\
+	}
+
+static struct llcc_slice_config sdm670_data[] =  {
+	SCT_ENTRY("cpuss", 1, 1, 512, 1, 1, 0xF,  0x0, 0, 0, 0, 1, 1),
+	SCT_ENTRY("vidsc0", 2, 2, 64,  2, 1, 0xF,  0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("vidsc1", 3, 3, 64,  2, 1, 0xF,  0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("rotator", 4, 4, 384, 2, 1, 0xF,  0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("modem",  8, 8, 512, 1, 0, 0xF,  0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("gpuhtw", 11, 11, 128, 1, 1, 0xF,  0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("gpu",    12, 12, 384, 1, 1, 0xF,  0x0, 0, 0, 0, 1, 0),
+};
+
+static int sdm670_qcom_llcc_probe(struct platform_device *pdev)
+{
+	return qcom_llcc_probe(pdev, sdm670_data,
+				 ARRAY_SIZE(sdm670_data));
+}
+
+static const struct of_device_id sdm670_qcom_llcc_of_match[] = {
+	{ .compatible = "qcom,sdm670-llcc", },
+	{ },
+};
+
+static struct platform_driver sdm670_qcom_llcc_driver = {
+	.driver = {
+		.name = "sdm670-llcc",
+		.owner = THIS_MODULE,
+		.of_match_table = sdm670_qcom_llcc_of_match,
+	},
+	.probe = sdm670_qcom_llcc_probe,
+	.remove = qcom_llcc_remove,
+};
+
+static int __init sdm670_init_qcom_llcc_init(void)
+{
+	return platform_driver_register(&sdm670_qcom_llcc_driver);
+}
+module_init(sdm670_init_qcom_llcc_init);
+
+static void __exit sdm670_exit_qcom_llcc_exit(void)
+{
+	platform_driver_unregister(&sdm670_qcom_llcc_driver);
+}
+module_exit(sdm670_exit_qcom_llcc_exit);
+
+MODULE_DESCRIPTION("QTI sdm670 LLCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index 9d0adbb..e38c53e 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -706,8 +706,6 @@
 	struct msm_bus_node_device_type *cur_rsc = NULL;
 	int ret = 0;
 
-	rt_mutex_lock(&msm_bus_adhoc_lock);
-
 	bus_dev = to_msm_bus_node(dev);
 	if (bus_dev->node_info->is_bcm_dev ||
 		bus_dev->node_info->is_fab_dev ||
@@ -730,7 +728,6 @@
 	}
 
 exit_bcm_remove_handoff_req:
-	rt_mutex_unlock(&msm_bus_adhoc_lock);
 	return ret;
 }
 
@@ -857,14 +854,18 @@
 	INIT_LIST_HEAD(&commit_list);
 }
 
-void commit_late_init_data(void)
+int commit_late_init_data(void)
 {
+	int rc;
 	rt_mutex_lock(&msm_bus_adhoc_lock);
+	rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						bcm_remove_handoff_req);
 
 	msm_bus_commit_data(&late_init_clist);
 	INIT_LIST_HEAD(&late_init_clist);
 
 	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return rc;
 }
 
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 458cf0d..144b1a1 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -1689,15 +1689,9 @@
 
 int __init msm_bus_device_late_init(void)
 {
-	int rc;
-
 	MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
 	init_time = false;
-	rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
-						bcm_remove_handoff_req);
-
-	commit_late_init_data();
-	return rc;
+	return commit_late_init_data();
 }
 subsys_initcall(msm_bus_device_init_driver);
 late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index f7f17c3..cd5281a 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -205,7 +205,7 @@
 				int throttle_en, uint64_t lim_bw);
 int msm_bus_commit_data(struct list_head *clist);
 int bcm_remove_handoff_req(struct device *dev, void *data);
-void commit_late_init_data(void);
+int commit_late_init_data(void);
 int msm_bus_query_gen(struct list_head *qlist,
 				struct msm_bus_tcs_usecase *tcs_usecase);
 void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 1f28712..cfa4ca9 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -38,6 +38,8 @@
 
 #include <linux/uaccess.h>
 #include <asm/setup.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_pil_event.h>
 
 #include "peripheral-loader.h"
 
@@ -834,6 +836,7 @@
 		goto release_fw;
 	}
 
+	trace_pil_event("before_init_image", desc);
 	if (desc->ops->init_image)
 		ret = desc->ops->init_image(desc, fw->data, fw->size);
 	if (ret) {
@@ -841,6 +844,7 @@
 		goto err_boot;
 	}
 
+	trace_pil_event("before_mem_setup", desc);
 	if (desc->ops->mem_setup)
 		ret = desc->ops->mem_setup(desc, priv->region_start,
 				priv->region_end - priv->region_start);
@@ -856,6 +860,7 @@
 		 * Also for secure boot devices, modem memory has to be released
 		 * after MBA is booted
 		 */
+		trace_pil_event("before_assign_mem", desc);
 		if (desc->modem_ssr) {
 			ret = pil_assign_mem_to_linux(desc, priv->region_start,
 				(priv->region_end - priv->region_start));
@@ -874,6 +879,7 @@
 		hyp_assign = true;
 	}
 
+	trace_pil_event("before_load_seg", desc);
 	list_for_each_entry(seg, &desc->priv->segs, list) {
 		ret = pil_load_seg(desc, seg);
 		if (ret)
@@ -881,6 +887,7 @@
 	}
 
 	if (desc->subsys_vmid > 0) {
+		trace_pil_event("before_reclaim_mem", desc);
 		ret =  pil_reclaim_mem(desc, priv->region_start,
 				(priv->region_end - priv->region_start),
 				desc->subsys_vmid);
@@ -892,11 +899,13 @@
 		hyp_assign = false;
 	}
 
+	trace_pil_event("before_auth_reset", desc);
 	ret = desc->ops->auth_and_reset(desc);
 	if (ret) {
 		pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
 		goto err_auth_and_reset;
 	}
+	trace_pil_event("reset_done", desc);
 	pil_info(desc, "Brought out of reset\n");
 	desc->modem_ssr = false;
 err_auth_and_reset:
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index b71ce6b..4a586ac 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -26,6 +26,7 @@
 #include <linux/highmem.h>
 #include <soc/qcom/scm.h>
 #include <soc/qcom/secure_buffer.h>
+#include <trace/events/trace_msm_pil_event.h>
 
 #include "peripheral-loader.h"
 #include "pil-q6v5.h"
@@ -513,6 +514,7 @@
 	u32 debug_val;
 	int ret;
 
+	trace_pil_func(__func__);
 	if (drv->mba_dp_phys)
 		start_addr = drv->mba_dp_phys;
 
@@ -612,6 +614,7 @@
 	const u8 *data;
 	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
 
+	trace_pil_func(__func__);
 	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
 	ret = request_firmware(&fw, fw_name_p, pil->dev);
 	if (ret) {
@@ -734,6 +737,7 @@
 	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	unsigned long attrs = 0;
 
+	trace_pil_func(__func__);
 	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
 	attrs |= DMA_ATTR_SKIP_ZEROING;
 	attrs |= DMA_ATTR_STRONGLY_ORDERED;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index df0c609c..7984dfe 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -37,7 +37,7 @@
 
 #define MAX_VDD_MSS_UV		1150000
 #define PROXY_TIMEOUT_MS	10000
-#define MAX_SSR_REASON_LEN	81U
+#define MAX_SSR_REASON_LEN	256U
 #define STOP_ACK_TIMEOUT_MS	1000
 
 #define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
@@ -276,6 +276,10 @@
 	if (!res) {
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 							"restart_reg_sec");
+		if (!res) {
+			dev_err(&pdev->dev, "No restart register defined\n");
+			return -ENOMEM;
+		}
 		q6->restart_reg_sec = true;
 	}
 
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index b41a173..49dd0be 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/regulator/consumer.h>
+#include <trace/events/trace_msm_pil_event.h>
 
 #include "peripheral-loader.h"
 #include "pil-q6v5.h"
@@ -425,6 +426,7 @@
 	u32 val;
 	int i;
 
+	trace_pil_func(__func__);
 	/* Override the ACC value if required */
 	if (drv->override_acc)
 		writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c
index 5d860a3..e45f61e 100644
--- a/drivers/soc/qcom/qdsp6v2/apr.c
+++ b/drivers/soc/qcom/qdsp6v2/apr.c
@@ -515,19 +515,19 @@
 			mutex_unlock(&svc->m_lock);
 			return NULL;
 		}
-		if (!svc->port_cnt && !svc->svc_cnt)
+		if (!svc->svc_cnt)
 			clnt->svc_cnt++;
 		svc->port_cnt++;
 		svc->port_fn[temp_port] = svc_fn;
 		svc->port_priv[temp_port] = priv;
+		svc->svc_cnt++;
 	} else {
 		if (!svc->fn) {
-			if (!svc->port_cnt && !svc->svc_cnt)
+			if (!svc->svc_cnt)
 				clnt->svc_cnt++;
 			svc->fn = svc_fn;
-			if (svc->port_cnt)
-				svc->svc_cnt++;
 			svc->priv = priv;
+			svc->svc_cnt++;
 		}
 	}
 
@@ -747,28 +747,28 @@
 		return -EINVAL;
 
 	mutex_lock(&svc->m_lock);
+	if (!svc->svc_cnt) {
+		pr_err("%s: svc already deregistered. svc = %pK\n",
+			__func__, svc);
+		mutex_unlock(&svc->m_lock);
+		return -EINVAL;
+	}
+
 	dest_id = svc->dest_id;
 	client_id = svc->client_id;
 	clnt = &client[dest_id][client_id];
 
-	if (svc->port_cnt > 0 || svc->svc_cnt > 0) {
+	if (svc->svc_cnt > 0) {
 		if (svc->port_cnt)
 			svc->port_cnt--;
-		else if (svc->svc_cnt)
-			svc->svc_cnt--;
-		if (!svc->port_cnt && !svc->svc_cnt) {
+		svc->svc_cnt--;
+		if (!svc->svc_cnt) {
 			client[dest_id][client_id].svc_cnt--;
-			svc->need_reset = 0x0;
-		}
-	} else if (client[dest_id][client_id].svc_cnt > 0) {
-		client[dest_id][client_id].svc_cnt--;
-		if (!client[dest_id][client_id].svc_cnt) {
-			svc->need_reset = 0x0;
 			pr_debug("%s: service is reset %pK\n", __func__, svc);
 		}
 	}
 
-	if (!svc->port_cnt && !svc->svc_cnt) {
+	if (!svc->svc_cnt) {
 		svc->priv = NULL;
 		svc->id = 0;
 		svc->fn = NULL;
@@ -887,8 +887,10 @@
 		 * recovery notifications during initial boot
 		 * up since everything is expected to be down.
 		 */
-		if (is_initial_boot)
+		if (is_initial_boot) {
+			is_initial_boot = false;
 			break;
+		}
 		if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
 			apr_modem_down(opcode);
 		else
@@ -908,7 +910,12 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block service_nb = {
+static struct notifier_block adsp_service_nb = {
+	.notifier_call  = apr_notifier_service_cb,
+	.priority = 0,
+};
+
+static struct notifier_block modem_service_nb = {
 	.notifier_call  = apr_notifier_service_cb,
 	.priority = 0,
 };
@@ -938,9 +945,9 @@
 
 	is_initial_boot = true;
 	subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
-			      &service_nb);
+			      &adsp_service_nb);
 	subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
-			      &service_nb);
+			      &modem_service_nb);
 
 	return 0;
 }
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index 40aac6a..92a97fae 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -97,8 +97,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&apr_ch->w_lock, flags);
-	rc = glink_tx(apr_ch->handle, pkt_priv, data, len,
-			GLINK_TX_REQ_INTENT | GLINK_TX_ATOMIC);
+	rc = glink_tx(apr_ch->handle, pkt_priv, data, len, GLINK_TX_ATOMIC);
 	spin_unlock_irqrestore(&apr_ch->w_lock, flags);
 
 	if (rc)
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index 414c123..2320fea 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -626,9 +626,11 @@
 	 * If pdr registration failed, register clients on next service
 	 * Do in late init to ensure that SSR subsystem is initialized
 	 */
+	mutex_lock(&notifier_mutex);
 	if (!audio_notifer_is_service_enabled(AUDIO_NOTIFIER_PDR_SERVICE))
 		audio_notifer_reg_all_clients();
 
+	mutex_unlock(&notifier_mutex);
 	return 0;
 }
 late_initcall(audio_notifier_late_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 5ca0fe5..306510f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -34,17 +34,21 @@
 #define RPMH_MAX_REQ_IN_BATCH		10
 
 #define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name)	\
-	struct rpmh_msg name = {		\
-		.msg = { 0 },			\
-		.msg.state = s,			\
-		.msg.is_complete = true,	\
-		.msg.payload = name.cmd,	\
-		.msg.num_payload = 0,		\
-		.cmd = { { 0 } },		\
-		.waitq = q,			\
-		.wait_count = c,		\
-		.rc = rc,			\
-		.bit = -1,			\
+	struct rpmh_msg name = {			\
+		.msg = {				\
+			.state = s,			\
+			.payload = name.cmd,		\
+			.num_payload = 0,		\
+			.is_read = false,		\
+			.is_control = false,		\
+			.is_complete = true,		\
+			.invalidate = false,		\
+		},					\
+		.cmd = { { 0 } },			\
+		.completion = q,			\
+		.wait_count = c,			\
+		.rc = rc,				\
+		.bit = -1,				\
 	}
 
 struct rpmh_req {
@@ -57,7 +61,7 @@
 struct rpmh_msg {
 	struct tcs_mbox_msg msg;
 	struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
-	wait_queue_head_t *waitq;
+	struct completion *completion;
 	atomic_t *wait_count;
 	struct rpmh_client *rc;
 	int bit;
@@ -106,21 +110,31 @@
 	return msg;
 }
 
+static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+	unsigned long flags;
+
+	/* If we allocated the pool, set it as available */
+	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+		spin_lock_irqsave(&rpm->lock, flags);
+		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+		spin_unlock_irqrestore(&rpm->lock, flags);
+	}
+}
+
 static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
 {
 	struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
 
 	atomic_dec(rpm_msg->wait_count);
-	wake_up(rpm_msg->waitq);
 }
 
 static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
 {
 	struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
-	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
 	atomic_t *wc = rpm_msg->wait_count;
-	wait_queue_head_t *waitq = rpm_msg->waitq;
-	unsigned long flags;
+	struct completion *compl = rpm_msg->completion;
 
 	rpm_msg->err = r;
 
@@ -144,18 +158,12 @@
 	 * into an issue that the stack allocated parent object may be
 	 * invalid before we can check the ->bit value.
 	 */
-
-	/* If we allocated the pool, set it as available */
-	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
-		spin_lock_irqsave(&rpm->lock, flags);
-		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
-		spin_unlock_irqrestore(&rpm->lock, flags);
-	}
+	free_msg_to_pool(rpm_msg);
 
 	/* Signal the blocking thread we are done */
 	if (wc && atomic_dec_and_test(wc))
-		if (waitq)
-			wake_up(waitq);
+		if (compl)
+			complete(compl);
 }
 
 static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
@@ -312,9 +320,9 @@
 int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
 			u32 addr, u32 data)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(1);
-	DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
 	int ret;
 
 	if (IS_ERR_OR_NULL(rc))
@@ -333,7 +341,7 @@
 	if (ret < 0)
 		return ret;
 
-	wait_event(waitq, atomic_read(&wait_count) == 0);
+	wait_for_completion(&compl);
 
 	return rpm_msg.err;
 }
@@ -408,9 +416,9 @@
 int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
 			struct tcs_cmd *cmd, int n)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(1);
-	DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
 	int ret;
 
 	if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
@@ -428,7 +436,7 @@
 	if (ret)
 		return ret;
 
-	wait_event(waitq, atomic_read(&wait_count) == 0);
+	wait_for_completion(&compl);
 
 	return rpm_msg.err;
 }
@@ -454,7 +462,7 @@
 			struct tcs_cmd *cmd, int *n)
 {
 	struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
 	int count = 0;
 	int ret, i, j, k;
@@ -507,9 +515,8 @@
 	for (i = 0; i < count; i++) {
 		rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
 		if (IS_ERR_OR_NULL(rpm_msg[i])) {
-			/* Clean up our call by spoofing tx_done */
 			for (j = 0 ; j < i; j++)
-				rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, 0);
+				free_msg_to_pool(rpm_msg[j]);
 			return PTR_ERR(rpm_msg[i]);
 		}
 		cmd += n[i];
@@ -520,7 +527,7 @@
 		might_sleep();
 		atomic_set(&wait_count, count);
 		for (i = 0; i < count; i++) {
-			rpm_msg[i]->waitq = &waitq;
+			rpm_msg[i]->completion = &compl;
 			rpm_msg[i]->wait_count = &wait_count;
 			/* Bypass caching and write to mailbox directly */
 			ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
@@ -530,15 +537,17 @@
 				break;
 			}
 		}
-		wait_event(waitq, atomic_read(&wait_count) == (count - i));
+		/* For those unsent requests, spoof tx_done */
+		for (j = i; j < count; j++)
+			rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
+		wait_for_completion(&compl);
 	} else {
 		/* Send Sleep requests to the controller, expect no response */
 		for (i = 0; i < count; i++) {
-			rpm_msg[i]->waitq = NULL;
+			rpm_msg[i]->completion = NULL;
 			ret = mbox_send_controller_data(rc->chan,
 						&rpm_msg[i]->msg);
-			/* Clean up our call by spoofing tx_done */
-			rpmh_tx_done(&rc->client, &rpm_msg[i]->msg, ret);
+			free_msg_to_pool(rpm_msg[i]);
 		}
 		return 0;
 	}
@@ -660,10 +669,10 @@
 int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
 {
 	int ret;
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
 	DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
-				&waitq, &wait_count, rpm_msg);
+				&compl, &wait_count, rpm_msg);
 
 	if (IS_ERR_OR_NULL(rc) || !resp)
 		return -EINVAL;
@@ -684,7 +693,7 @@
 		return ret;
 
 	/* Wait until the response is received from RPMH */
-	wait_event(waitq, atomic_read(&wait_count) == 0);
+	wait_for_completion(&compl);
 
 	/* Read the data back from the tcs_mbox_msg structrure */
 	*resp = rpm_msg.cmd[0].data;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index f381f16..1c7c4a1 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -53,28 +53,25 @@
 /* Uncomment the line below to test spcom against modem rather than SP */
 /* #define SPCOM_TEST_HLOS_WITH_MODEM 1 */
 
-/* Uncomment the line below to enable debug messages */
-/* #define DEBUG 1 */
-
 #define pr_fmt(fmt)	"spcom [%s]: " fmt, __func__
 
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
+#include <linux/kernel.h>	/* min() */
+#include <linux/module.h>	/* MODULE_LICENSE */
+#include <linux/device.h>	/* class_create() */
+#include <linux/slab.h>	/* kzalloc() */
+#include <linux/fs.h>		/* file_operations */
+#include <linux/cdev.h>	/* cdev_add() */
+#include <linux/errno.h>	/* EINVAL, ETIMEDOUT */
+#include <linux/printk.h>	/* pr_err() */
+#include <linux/bitops.h>	/* BIT(x) */
+#include <linux/completion.h>	/* wait_for_completion_timeout() */
+#include <linux/poll.h>	/* POLLOUT */
+#include <linux/dma-mapping.h>	/* dma_alloc_coherent() */
 #include <linux/platform_device.h>
-#include <linux/of.h>
+#include <linux/of.h>		/* of_property_count_strings() */
 #include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/msm_ion.h>
+#include <linux/delay.h>	/* msleep() */
+#include <linux/msm_ion.h>	/* msm_ion_client_create() */
 
 #include <soc/qcom/glink.h>
 #include <soc/qcom/smem.h>
@@ -82,7 +79,7 @@
 
 #include <uapi/linux/spcom.h>
 
-#include "glink_private.h"
+#include "glink_private.h" /* glink_ssr() */
 
 /* "SPCM" string */
 #define SPCOM_MAGIC_ID	((uint32_t)(0x5350434D))
@@ -220,9 +217,9 @@
 	bool tx_abort;
 
 	/* rx data info */
-	int rx_buf_size;	/* allocated rx buffer size */
+	size_t rx_buf_size;	/* allocated rx buffer size */
 	bool rx_buf_ready;
-	int actual_rx_size;	/* actual data size received */
+	size_t actual_rx_size;	/* actual data size received */
 	const void *glink_rx_buf;
 
 	/* ION lock/unlock support */
@@ -276,6 +273,7 @@
 				  const void *pkt_priv);
 static struct spcom_channel *spcom_find_channel_by_name(const char *name);
 static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd);
+static void spcom_rx_abort_pending_server(void);
 
 /**
  * spcom_is_ready() - driver is initialized and ready.
@@ -301,6 +299,10 @@
  */
 static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
 {
+	/* Channel must be open before it gets connected */
+	if (!spcom_is_channel_open(ch))
+		return false;
+
 	return (ch->glink_state == GLINK_CONNECTED);
 }
 
@@ -316,6 +318,10 @@
 {
 	int i;
 	int ret;
+	static bool is_predefined_created;
+
+	if (is_predefined_created)
+		return 0;
 
 	for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
 		const char *name = spcom_dev->predefined_ch_name[i];
@@ -330,6 +336,8 @@
 		}
 	}
 
+	is_predefined_created = true;
+
 	return 0;
 }
 
@@ -352,6 +360,16 @@
 	struct spcom_channel *ch = NULL;
 	const char *ch_name = "sp_kernel";
 
+	if (!cb_info) {
+		pr_err("invalid NULL cb_info param\n");
+		return;
+	}
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return;
+	}
+
 	spcom_dev->link_state = cb_info->link_state;
 
 	pr_debug("spcom_link_state_notif_cb called. transport = %s edge = %s\n",
@@ -375,6 +393,9 @@
 			pr_err("failed to find channel [%s].\n", ch_name);
 		else
 			spcom_unlock_ion_buf(ch, SPCOM_ION_FD_UNLOCK_ALL);
+
+		pr_debug("Rx-Abort pending servers.\n");
+		spcom_rx_abort_pending_server();
 		break;
 	default:
 		pr_err("unknown link_state [%d].\n", cb_info->link_state);
@@ -396,13 +417,17 @@
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
 	if (!ch) {
-		pr_err("invalid ch parameter.\n");
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+	if (!buf) {
+		pr_err("invalid NULL buf param\n");
 		return;
 	}
 
-	pr_debug("ch [%s] rx size [%d].\n", ch->name, (int) size);
+	pr_debug("ch [%s] rx size [%zu]\n", ch->name, size);
 
-	ch->actual_rx_size = (int) size;
+	ch->actual_rx_size = size;
 	ch->glink_rx_buf = (void *) buf;
 
 	complete_all(&ch->rx_done);
@@ -421,7 +446,11 @@
 	int *tx_buf = (int *) buf;
 
 	if (!ch) {
-		pr_err("invalid ch parameter.\n");
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+	if (!buf) {
+		pr_err("invalid NULL buf param\n");
 		return;
 	}
 
@@ -446,11 +475,15 @@
 	int ret;
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
 	switch (event) {
 	case GLINK_CONNECTED:
 		pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
-		complete_all(&ch->connect);
-
+		ch->glink_state = event;
 		/*
 		 * if spcom_notify_state() is called within glink_open()
 		 * then ch->glink_handle is not updated yet.
@@ -466,10 +499,11 @@
 		if (ret) {
 			pr_err("glink_queue_rx_intent() err [%d]\n", ret);
 		} else {
-			pr_debug("rx buf is ready, size [%d].\n",
+			pr_debug("rx buf is ready, size [%zu].\n",
 				 ch->rx_buf_size);
 			ch->rx_buf_ready = true;
 		}
+		complete_all(&ch->connect);
 		break;
 	case GLINK_LOCAL_DISCONNECTED:
 		/*
@@ -477,6 +511,7 @@
 		 * only after *both* sides closed the channel.
 		 */
 		pr_debug("GLINK_LOCAL_DISCONNECTED, ch [%s].\n", ch->name);
+		ch->glink_state = event;
 		complete_all(&ch->disconnect);
 		break;
 	case GLINK_REMOTE_DISCONNECTED:
@@ -487,6 +522,8 @@
 		 */
 		pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
 
+		ch->glink_state = event;
+
 		/*
 		 * Abort any blocking read() operation.
 		 * The glink notification might be after REMOTE_DISCONNECT.
@@ -504,8 +541,6 @@
 		       (int) event, ch->name);
 		return;
 	}
-
-	ch->glink_state = event;
 }
 
 /**
@@ -539,9 +574,14 @@
 {
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
 	pr_debug("ch [%s] pending rx aborted.\n", ch->name);
 
-	if (spcom_is_channel_connected(ch) && (!ch->rx_abort)) {
+	if (spcom_is_channel_open(ch) && (!ch->rx_abort)) {
 		ch->rx_abort = true;
 		complete_all(&ch->rx_done);
 	}
@@ -559,6 +599,11 @@
 {
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
 	pr_debug("ch [%s] pending tx aborted.\n", ch->name);
 
 	if (spcom_is_channel_connected(ch) && (!ch->tx_abort)) {
@@ -672,12 +717,11 @@
 
 	/* only one client/server may use the channel */
 	if (ch->ref_count) {
-		pr_err("channel [%s] already in use.\n", name);
-		goto exit_err;
+		pr_err("channel [%s] is BUSY, already in use by pid [%d].\n",
+			name, ch->pid);
+		mutex_unlock(&ch->lock);
+		return -EBUSY;
 	}
-	ch->ref_count++;
-	ch->pid = current_pid();
-	ch->txn_id = INITIAL_TXN_ID;
 
 	pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
 		 name, ch->pid, ch->ref_count);
@@ -702,7 +746,12 @@
 	} else {
 		pr_debug("glink_open [%s] ok.\n", name);
 	}
+
+	/* init channel context after successful open */
 	ch->glink_handle = handle;
+	ch->ref_count++;
+	ch->pid = current_pid();
+	ch->txn_id = INITIAL_TXN_ID;
 
 	pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
 		 name, timeout_msec);
@@ -776,6 +825,8 @@
  * @size: buffer size
  *
  * ACK is expected within a very short time (few msec).
+ *
+ * Return: 0 on successful operation, negative value otherwise.
  */
 static int spcom_tx(struct spcom_channel *ch,
 		    void *buf,
@@ -840,13 +891,15 @@
  * @size: buffer size
  *
  * ACK is expected within a very short time (few msec).
+ *
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_rx(struct spcom_channel *ch,
 		     void *buf,
 		     uint32_t size,
 		     uint32_t timeout_msec)
 {
-	int ret;
+	int ret = -1;
 	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
 	long timeleft = 1;
 
@@ -854,7 +907,7 @@
 
 	/* check for already pending data */
 	if (ch->actual_rx_size) {
-		pr_debug("already pending data size [%d].\n",
+		pr_debug("already pending data size [%zu]\n",
 			 ch->actual_rx_size);
 		goto copy_buf;
 	}
@@ -871,23 +924,24 @@
 
 	if (timeleft == 0) {
 		pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
-		goto exit_err;
+		mutex_unlock(&ch->lock);
+		return -ETIMEDOUT;
 	} else if (ch->rx_abort) {
-		pr_err("rx aborted.\n");
-		goto exit_err;
+		mutex_unlock(&ch->lock);
+		return -ERESTART; /* probably SSR */
 	} else if (ch->actual_rx_size) {
-		pr_debug("actual_rx_size is [%d].\n", ch->actual_rx_size);
+		pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
 	} else {
 		pr_err("actual_rx_size is zero.\n");
 		goto exit_err;
 	}
 
+copy_buf:
 	if (!ch->glink_rx_buf) {
 		pr_err("invalid glink_rx_buf.\n");
 		goto exit_err;
 	}
 
-copy_buf:
 	/* Copy from glink buffer to spcom buffer */
 	size = min_t(int, ch->actual_rx_size, size);
 	memcpy(buf, ch->glink_rx_buf, size);
@@ -905,7 +959,7 @@
 		pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
 		goto exit_err;
 	} else {
-		pr_debug("queue rx_buf, size [%d].\n", ch->rx_buf_size);
+		pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
 	}
 
 	mutex_unlock(&ch->lock);
@@ -925,6 +979,8 @@
  * Server needs the size of the next request to allocate a request buffer.
  * Initially used intent-request, however this complicated the remote side,
  * so both sides are not using glink_tx() with INTENT_REQ anymore.
+ *
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_get_next_request_size(struct spcom_channel *ch)
 {
@@ -936,15 +992,22 @@
 
 	/* check if already got it via callback */
 	if (ch->actual_rx_size) {
-		pr_debug("next-req-size already ready ch [%s] size [%d].\n",
+		pr_debug("next-req-size already ready ch [%s] size [%zu]\n",
 			 ch->name, ch->actual_rx_size);
 		goto exit_ready;
 	}
 
 	pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
 	wait_for_completion(&ch->rx_done);
+
+	/* Check Rx Abort on SP reset */
+	if (ch->rx_abort) {
+		pr_err("rx aborted.\n");
+		goto exit_error;
+	}
+
 	if (ch->actual_rx_size <= 0) {
-		pr_err("invalid rx size [%d] ch [%s].\n",
+		pr_err("invalid rx size [%zu] ch [%s]\n",
 		       ch->actual_rx_size, ch->name);
 		goto exit_error;
 	}
@@ -968,6 +1031,27 @@
 
 }
 
+/**
+ * spcom_rx_abort_pending_server() - abort pending server rx on SSR.
+ *
+ * Server that is waiting for request, but has no client connected,
+ * will not get RX-ABORT or REMOTE-DISCONNECT notification,
+ * that should cancel the server pending rx operation.
+ */
+static void spcom_rx_abort_pending_server(void)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+		struct spcom_channel *ch = &spcom_dev->channels[i];
+
+		if (ch->is_server) {
+			pr_debug("rx-abort server on ch [%s].\n", ch->name);
+			spcom_notify_rx_abort(NULL, ch, NULL);
+		}
+	}
+}
+
 /*======================================================================*/
 /*		General API for kernel drivers				*/
 /*======================================================================*/
@@ -979,6 +1063,9 @@
  */
 bool spcom_is_sp_subsystem_link_up(void)
 {
+	if (spcom_dev == NULL)
+		return false;
+
 	return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
 }
 EXPORT_SYMBOL(spcom_is_sp_subsystem_link_up);
@@ -1001,6 +1088,11 @@
 	struct spcom_channel *ch;
 	struct spcom_client *client;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+			return NULL;
+	}
+
 	if (!info) {
 		pr_err("Invalid parameter.\n");
 			return NULL;
@@ -1042,17 +1134,26 @@
 {
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!client) {
-		pr_err("Invalid parameter.\n");
+		pr_err("Invalid client parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = client->ch;
-
-	kfree(client);
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	spcom_close(ch);
 
+	kfree(client);
+
 	return 0;
 }
 EXPORT_SYMBOL(spcom_unregister_client);
@@ -1069,6 +1170,8 @@
  * @timeout_msec: timeout waiting for response.
  *
  * The timeout depends on the specific request handling time at the remote side.
+ *
+ * Return: number of rx bytes on success, negative value on failure.
  */
 int spcom_client_send_message_sync(struct spcom_client	*client,
 				    void	*req_ptr,
@@ -1080,12 +1183,21 @@
 	int ret;
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!client || !req_ptr || !resp_ptr) {
 		pr_err("Invalid parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1120,13 +1232,25 @@
 bool spcom_client_is_server_connected(struct spcom_client *client)
 {
 	bool connected;
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return false;
+	}
 
 	if (!client) {
 		pr_err("Invalid parameter.\n");
+		return false;
+	}
+
+	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
 		return -EINVAL;
 	}
 
-	connected = spcom_is_channel_connected(client->ch);
+	connected = spcom_is_channel_connected(ch);
 
 	return connected;
 }
@@ -1150,6 +1274,11 @@
 	struct spcom_channel *ch;
 	struct spcom_server *server;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return NULL;
+	}
+
 	if (!info) {
 		pr_err("Invalid parameter.\n");
 		return NULL;
@@ -1188,17 +1317,26 @@
 {
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!server) {
-		pr_err("Invalid parameter.\n");
+		pr_err("Invalid server parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = server->ch;
-
-	kfree(server);
+	if (!ch) {
+		pr_err("Invalid channel parameter.\n");
+		return -EINVAL;
+	}
 
 	spcom_close(ch);
 
+	kfree(server);
+
 	return 0;
 }
 EXPORT_SYMBOL(spcom_unregister_service);
@@ -1208,7 +1346,7 @@
  *
  * @server: server handle
  *
- * Return: request size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 int spcom_server_get_next_request_size(struct spcom_server *server)
 {
@@ -1221,6 +1359,10 @@
 	}
 
 	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1243,7 +1385,7 @@
  * @req_ptr: request buffer pointer
  * @req_size: max request size
  *
- * Return: request size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 int spcom_server_wait_for_request(struct spcom_server	*server,
 				  void			*req_ptr,
@@ -1252,12 +1394,21 @@
 	int ret;
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!server || !req_ptr) {
 		pr_err("Invalid parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1285,12 +1436,21 @@
 	int ret;
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!server || !resp_ptr) {
 		pr_err("Invalid parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1322,6 +1482,7 @@
 	int ret = 0;
 	struct spcom_user_create_channel_command *cmd = cmd_buf;
 	const char *ch_name;
+	const size_t maxlen = sizeof(cmd->ch_name);
 
 	if (cmd_size != sizeof(*cmd)) {
 		pr_err("cmd_size [%d] , expected [%d].\n",
@@ -1330,6 +1491,10 @@
 	}
 
 	ch_name = cmd->ch_name;
+	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
+		pr_err("channel name is not NULL terminated\n");
+		return -EINVAL;
+	}
 
 	pr_debug("ch_name [%s].\n", ch_name);
 
@@ -1468,7 +1633,7 @@
 
 	/* Get ION handle from fd */
 	handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
-	if (handle == NULL) {
+	if (IS_ERR_OR_NULL(handle)) {
 		pr_err("fail to get ion handle.\n");
 		return -EINVAL;
 	}
@@ -1629,18 +1794,23 @@
 
 	/* Get ION handle from fd - this increments the ref count */
 	ion_handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
-	if (ion_handle == NULL) {
+	if (IS_ERR_OR_NULL(ion_handle)) {
 		pr_err("fail to get ion handle.\n");
 		return -EINVAL;
 	}
+
 	pr_debug("ion handle ok.\n");
 
+	/* ION buf lock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
 	/* Check if this ION buffer is already locked */
 	for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
 		if (ch->ion_handle_table[i] == ion_handle) {
-			pr_debug("fd [%d] ion buf is already locked.\n", fd);
+			pr_err("fd [%d] ion buf is already locked.\n", fd);
 			/* decrement back the ref count */
 			ion_free(spcom_dev->ion_client, ion_handle);
+			mutex_unlock(&ch->lock);
 			return -EINVAL;
 		}
 	}
@@ -1650,11 +1820,19 @@
 		if (ch->ion_handle_table[i] == NULL) {
 			ch->ion_handle_table[i] = ion_handle;
 			ch->ion_fd_table[i] = fd;
-			pr_debug("locked ion buf#[%d], fd [%d].\n", i, fd);
+			pr_debug("ch [%s] locked ion buf #%d, fd [%d].\n",
+				ch->name, i, fd);
+			mutex_unlock(&ch->lock);
 			return 0;
 		}
 	}
 
+	pr_err("no free entry to store ion handle of fd [%d].\n", fd);
+	/* decrement back the ref count */
+	ion_free(spcom_dev->ion_client, ion_handle);
+
+	mutex_unlock(&ch->lock);
+
 	return -EFAULT;
 }
 
@@ -1684,20 +1862,24 @@
 		/* unlock all ION buf */
 		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
 			if (ch->ion_handle_table[i] != NULL) {
+				pr_debug("unlocked ion buf #%d fd [%d].\n",
+					i, ch->ion_fd_table[i]);
 				ion_free(ion_client, ch->ion_handle_table[i]);
 				ch->ion_handle_table[i] = NULL;
 				ch->ion_fd_table[i] = -1;
-				pr_debug("unlocked ion buf#[%d].\n", i);
 			}
 		}
 	} else {
 		/* unlock specific ION buf */
 		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+			if (ch->ion_handle_table[i] == NULL)
+				continue;
 			if (ch->ion_fd_table[i] == fd) {
+				pr_debug("unlocked ion buf #%d fd [%d].\n",
+					i, ch->ion_fd_table[i]);
 				ion_free(ion_client, ch->ion_handle_table[i]);
 				ch->ion_handle_table[i] = NULL;
 				ch->ion_fd_table[i] = -1;
-				pr_debug("unlocked ion buf#[%d].\n", i);
 				found = true;
 				break;
 			}
@@ -1731,8 +1913,13 @@
 		return -EINVAL;
 	}
 
+	/* ION buf unlock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
 	ret = spcom_unlock_ion_buf(ch, fd);
 
+	mutex_unlock(&ch->lock);
+
 	return ret;
 }
 
@@ -1766,9 +1953,9 @@
 	int swap_id;
 	char cmd_name[5] = {0}; /* debug only */
 
-	/* opcode field is the minimum length of cmd */
-	if (buf_size < sizeof(cmd->cmd_id)) {
-		pr_err("Invalid argument user buffer size %d.\n", buf_size);
+	/* Minimal command should have command-id and argument */
+	if (buf_size < sizeof(struct spcom_user_command)) {
+		pr_err("Command buffer size [%d] too small\n", buf_size);
 		return -EINVAL;
 	}
 
@@ -1813,7 +2000,7 @@
  * @buf:	command buffer.
  * @size:	command buffer size.
  *
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_handle_get_req_size(struct spcom_channel *ch,
 				      void *buf,
@@ -1841,7 +2028,7 @@
  * @buf:	command buffer.
  * @size:	command buffer size.
  *
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_handle_read_req_resp(struct spcom_channel *ch,
 				       void *buf,
@@ -1861,7 +2048,7 @@
 
 	/* Check param validity */
 	if (size > SPCOM_MAX_RESPONSE_SIZE) {
-		pr_err("ch [%s] inavlid size [%d].\n",
+		pr_err("ch [%s] invalid size [%d].\n",
 			ch->name, size);
 		return -EINVAL;
 	}
@@ -1884,7 +2071,8 @@
 	ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
 	if (ret < 0) {
 		pr_err("rx error %d.\n", ret);
-		goto exit_err;
+		kfree(rx_buf);
+		return ret;
 	} else {
 		size = ret; /* actual_rx_size */
 	}
@@ -1924,7 +2112,7 @@
  * A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
  * response/request tells the kernel that user space only need the size.
  *
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_handle_read(struct spcom_channel *ch,
 			      void *buf,
@@ -1932,8 +2120,8 @@
 {
 	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
 		pr_debug("get next request size, ch [%s].\n", ch->name);
-		size = spcom_handle_get_req_size(ch, buf, size);
 		ch->is_server = true;
+		size = spcom_handle_get_req_size(ch, buf, size);
 	} else {
 		pr_debug("get request/response, ch [%s].\n", ch->name);
 		size = spcom_handle_read_req_resp(ch, buf, size);
@@ -1988,6 +2176,10 @@
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
 
+	/* silent error message until spss link is up */
+	if (!spcom_is_sp_subsystem_link_up())
+		return -ENODEV;
+
 	pr_debug("Open file [%s].\n", name);
 
 	if (strcmp(name, DEVICE_NAME) == 0) {
@@ -2006,8 +2198,6 @@
 		return -ENODEV;
 	}
 
-	filp->private_data = ch;
-
 	ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
 	if (ret == -ETIMEDOUT) {
 		pr_err("Connection timeout channel [%s].\n", name);
@@ -2016,6 +2206,8 @@
 		return ret;
 	}
 
+	filp->private_data = ch;
+
 	pr_debug("finished.\n");
 
 	return 0;
@@ -2036,7 +2228,6 @@
 {
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
-	bool connected = false;
 
 	pr_debug("Close file [%s].\n", name);
 
@@ -2058,19 +2249,18 @@
 	}
 
 	/* channel might be already closed or disconnected */
-	if (spcom_is_channel_open(ch) && spcom_is_channel_connected(ch))
-		connected = true;
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch [%s] already closed.\n", name);
+		return 0;
+	}
 
 	reinit_completion(&ch->disconnect);
 
 	spcom_close(ch);
 
-	if (connected) {
-		pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n",
-			 name);
-		wait_for_completion(&ch->disconnect);
-		pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
-	}
+	pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n", name);
+	wait_for_completion(&ch->disconnect);
+	pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
 
 	return 0;
 }
@@ -2102,8 +2292,8 @@
 
 	ch = filp->private_data;
 	if (!ch) {
-		pr_debug("invalid ch pointer.\n");
-		/* Allow some special commands via /dev/spcom and /dev/sp_ssr */
+		pr_err("invalid ch pointer, command not allowed.\n");
+		return -EINVAL;
 	} else {
 		/* Check if remote side connect */
 		if (!spcom_is_channel_connected(ch)) {
@@ -2147,7 +2337,7 @@
 }
 
 /**
- * spcom_device_read() - handle channel file write() from user space.
+ * spcom_device_read() - handle channel file read() from user space.
  *
  * @filp: file pointer
  *
@@ -2173,12 +2363,28 @@
 
 	ch = filp->private_data;
 
+	if (ch == NULL) {
+		pr_err("invalid ch pointer, file [%s].\n", name);
+		return -EINVAL;
+	}
+
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch is not open, file [%s].\n", name);
+		return -EINVAL;
+	}
+
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
 		return -ENOMEM;
 
-	actual_size = spcom_handle_read(ch, buf, size);
-	if ((actual_size <= 0) || (actual_size > size)) {
+	ret = spcom_handle_read(ch, buf, size);
+	if (ret < 0) {
+		pr_err("read error [%d].\n", ret);
+		kfree(buf);
+		return ret;
+	}
+	actual_size = ret;
+	if ((actual_size == 0) || (actual_size > size)) {
 		pr_err("invalid actual_size [%d].\n", actual_size);
 		kfree(buf);
 		return -EFAULT;
@@ -2254,6 +2460,10 @@
 		done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
 		break;
 	case SPCOM_POLL_CH_CONNECT:
+		if (ch == NULL) {
+			pr_err("invalid ch pointer, file [%s].\n", name);
+			return -EINVAL;
+		}
 		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
 		if (wait) {
 			reinit_completion(&ch->connect);
@@ -2329,7 +2539,7 @@
 	devt = spcom_dev->device_no + spcom_dev->channel_count;
 	priv = ch;
 	dev = device_create(cls, parent, devt, priv, name);
-	if (!dev) {
+	if (IS_ERR(dev)) {
 		pr_err("device_create failed.\n");
 		kfree(cdev);
 		return -ENODEV;
@@ -2382,7 +2592,7 @@
 				  spcom_dev->device_no, priv,
 				  DEVICE_NAME);
 
-	if (!spcom_dev->class_dev) {
+	if (IS_ERR(spcom_dev->class_dev)) {
 		pr_err("class_device_create failed %d\n", ret);
 		ret = -ENOMEM;
 		goto exit_destroy_class;
@@ -2435,6 +2645,11 @@
 
 	pr_debug("num of predefined channels [%d].\n", num_ch);
 
+	if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
+		pr_err("too many predefined channels [%d].\n", num_ch);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < num_ch; i++) {
 		ret = of_property_read_string_index(np, propname, i, &name);
 		if (ret) {
@@ -2500,21 +2715,23 @@
 	pr_debug("register_link_state_cb(), transport [%s] edge [%s]\n",
 		link_info.transport, link_info.edge);
 	notif_handle = glink_register_link_state_cb(&link_info, spcom_dev);
-	if (!notif_handle) {
+	if (IS_ERR(notif_handle)) {
 		pr_err("glink_register_link_state_cb(), err [%d]\n", ret);
 		goto fail_reg_chardev;
 	}
 
 	spcom_dev->ion_client = msm_ion_client_create(DEVICE_NAME);
-	if (spcom_dev->ion_client == NULL) {
+	if (IS_ERR(spcom_dev->ion_client)) {
 		pr_err("fail to create ion client.\n");
-		goto fail_reg_chardev;
+		goto fail_ion_client;
 	}
 
 	pr_info("Driver Initialization ok.\n");
 
 	return 0;
 
+fail_ion_client:
+	glink_unregister_link_state_cb(notif_handle);
 fail_reg_chardev:
 	pr_err("Failed to init driver.\n");
 	spcom_unregister_chrdev();
@@ -2552,7 +2769,7 @@
 	if (ret)
 		pr_err("spcom_driver register failed %d\n", ret);
 
-	return 0;
+	return ret;
 }
 module_init(spcom_init);
 
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 6ff39de..f8f6829 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -36,7 +36,7 @@
 
 #define XO_FREQ			19200000
 #define PROXY_TIMEOUT_MS	10000
-#define MAX_SSR_REASON_LEN	81U
+#define MAX_SSR_REASON_LEN	256U
 #define STOP_ACK_TIMEOUT_MS	1000
 #define CRASH_STOP_ACK_TO_MS	200
 
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 21f3580..55cb604 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -35,6 +35,7 @@
 #include <soc/qcom/subsystem_restart.h>
 #include <soc/qcom/subsystem_notif.h>
 #include <soc/qcom/sysmon.h>
+#include <trace/events/trace_msm_pil_event.h>
 
 #include <asm/current.h>
 
@@ -534,8 +535,10 @@
 		notif_data.no_auth = dev->desc->no_auth;
 		notif_data.pdev = pdev;
 
+		trace_pil_notif("before_send_notif", notif, dev->desc->fw_name);
 		subsys_notif_queue_notification(dev->notify, notif,
 								&notif_data);
+		trace_pil_notif("after_send_notif", notif, dev->desc->fw_name);
 	}
 }
 
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 034ddd3..c8bb13d 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -21,6 +21,7 @@
 #include <linux/list.h>
 #include <linux/cdev.h>
 #include <linux/platform_device.h>
+#include <linux/vmalloc.h>
 #include <soc/qcom/glink.h>
 #include "sound/wcd-dsp-glink.h"
 
@@ -29,6 +30,10 @@
 #define WDSP_MAX_READ_SIZE (4 * 1024)
 #define WDSP_MAX_NO_OF_INTENTS (20)
 #define WDSP_MAX_NO_OF_CHANNELS (10)
+#define WDSP_WRITE_PKT_SIZE (sizeof(struct wdsp_write_pkt))
+#define WDSP_REG_PKT_SIZE (sizeof(struct wdsp_reg_pkt))
+#define WDSP_CMD_PKT_SIZE (sizeof(struct wdsp_cmd_pkt))
+#define WDSP_CH_CFG_SIZE (sizeof(struct wdsp_glink_ch_cfg))
 
 #define MINOR_NUMBER_COUNT 1
 #define WDSP_EDGE "wdsp"
@@ -183,7 +188,7 @@
 		return;
 	}
 	/* Free tx pkt */
-	kfree(pkt_priv);
+	vfree(pkt_priv);
 }
 
 /*
@@ -201,7 +206,7 @@
 		return;
 	}
 	/* Free tx pkt */
-	kfree(pkt_priv);
+	vfree(pkt_priv);
 }
 
 /*
@@ -519,9 +524,10 @@
  * and register with glink
  * wpriv:     Wdsp_glink private structure.
  * pkt:       Glink registration packet contains glink channel information.
+ * pkt_size:  Size of the pkt.
  */
 static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
-				   struct wdsp_reg_pkt *pkt)
+				   struct wdsp_reg_pkt *pkt, size_t pkt_size)
 {
 	int ret = 0, i, j;
 	struct glink_link_info link_info;
@@ -530,14 +536,23 @@
 	u8 no_of_channels;
 	u8 *payload;
 	u32 ch_size, ch_cfg_size;
+	size_t size = WDSP_WRITE_PKT_SIZE + WDSP_REG_PKT_SIZE;
 
+	mutex_lock(&wpriv->glink_mutex);
+	if (wpriv->ch) {
+		dev_err(wpriv->dev, "%s: glink ch memory is already allocated\n",
+			 __func__);
+		ret = -EINVAL;
+		goto done;
+	}
 	payload = (u8 *)pkt->payload;
 	no_of_channels = pkt->no_of_channels;
 
 	if (no_of_channels > WDSP_MAX_NO_OF_CHANNELS) {
-		dev_info(wpriv->dev, "%s: no_of_channels = %d are limited to %d\n",
-			 __func__, no_of_channels, WDSP_MAX_NO_OF_CHANNELS);
-		no_of_channels = WDSP_MAX_NO_OF_CHANNELS;
+		dev_err(wpriv->dev, "%s: no_of_channels: %d but max allowed are %d\n",
+			__func__, no_of_channels, WDSP_MAX_NO_OF_CHANNELS);
+		ret = -EINVAL;
+		goto done;
 	}
 	ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *),
 		     GFP_KERNEL);
@@ -551,20 +566,34 @@
 	for (i = 0; i < no_of_channels; i++) {
 		ch_cfg = (struct wdsp_glink_ch_cfg *)payload;
 
+		size += WDSP_CH_CFG_SIZE;
+		if (size > pkt_size) {
+			dev_err(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n",
+				__func__, size, pkt_size);
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
 		if (ch_cfg->no_of_intents > WDSP_MAX_NO_OF_INTENTS) {
 			dev_err(wpriv->dev, "%s: Invalid no_of_intents = %d\n",
 				__func__, ch_cfg->no_of_intents);
 			ret = -EINVAL;
 			goto err_ch_mem;
 		}
+		size += (sizeof(u32) * ch_cfg->no_of_intents);
+		if (size > pkt_size) {
+			dev_err(wpriv->dev, "%s: Invalid size = %zd, pkt_size = %zd\n",
+				__func__, size, pkt_size);
+			ret = -EINVAL;
+			goto err_ch_mem;
+		}
 
 		ch_cfg_size = sizeof(struct wdsp_glink_ch_cfg) +
 					(sizeof(u32) * ch_cfg->no_of_intents);
 		ch_size = sizeof(struct wdsp_glink_ch) +
 					(sizeof(u32) * ch_cfg->no_of_intents);
 
-		dev_dbg(wpriv->dev, "%s: channels = %d, ch_cfg_size %d",
-			 __func__, no_of_channels, ch_cfg_size);
+		dev_dbg(wpriv->dev, "%s: channels: %d ch_cfg_size: %d, size: %zd, pkt_size: %zd",
+			 __func__, no_of_channels, ch_cfg_size, size, pkt_size);
 
 		ch[i] = kzalloc(ch_size, GFP_KERNEL);
 		if (!ch[i]) {
@@ -611,6 +640,7 @@
 	wpriv->no_of_channels = 0;
 
 done:
+	mutex_unlock(&wpriv->glink_mutex);
 	return ret;
 }
 
@@ -650,7 +680,7 @@
 			 * there won't be any tx_done notification to
 			 * free the buffer.
 			 */
-			kfree(tx_buf);
+			vfree(tx_buf);
 		}
 	} else {
 		mutex_unlock(&tx_buf->ch->mutex);
@@ -660,7 +690,7 @@
 		 * Free tx_buf here as there won't be any tx_done
 		 * notification in this case also.
 		 */
-		kfree(tx_buf);
+		vfree(tx_buf);
 	}
 }
 
@@ -753,6 +783,7 @@
 	struct wdsp_cmd_pkt *cpkt;
 	struct wdsp_glink_tx_buf *tx_buf;
 	struct wdsp_glink_priv *wpriv;
+	size_t pkt_max_size;
 
 	wpriv = (struct wdsp_glink_priv *)file->private_data;
 	if (!wpriv) {
@@ -761,7 +792,7 @@
 		goto done;
 	}
 
-	if ((count < sizeof(struct wdsp_write_pkt)) ||
+	if ((count < WDSP_WRITE_PKT_SIZE) ||
 	    (count > WDSP_MAX_WRITE_SIZE)) {
 		dev_err(wpriv->dev, "%s: Invalid count = %zd\n",
 			__func__, count);
@@ -771,8 +802,8 @@
 
 	dev_dbg(wpriv->dev, "%s: count = %zd\n", __func__, count);
 
-	tx_buf_size = WDSP_MAX_WRITE_SIZE + sizeof(struct wdsp_glink_tx_buf);
-	tx_buf = kzalloc(tx_buf_size, GFP_KERNEL);
+	tx_buf_size = count + sizeof(struct wdsp_glink_tx_buf);
+	tx_buf = vzalloc(tx_buf_size);
 	if (!tx_buf) {
 		ret = -ENOMEM;
 		goto done;
@@ -789,19 +820,20 @@
 	wpkt = (struct wdsp_write_pkt *)tx_buf->buf;
 	switch (wpkt->pkt_type) {
 	case WDSP_REG_PKT:
-		if (count <= (sizeof(struct wdsp_write_pkt) +
-			      sizeof(struct wdsp_reg_pkt))) {
+		if (count < (WDSP_WRITE_PKT_SIZE + WDSP_REG_PKT_SIZE +
+			     WDSP_CH_CFG_SIZE)) {
 			dev_err(wpriv->dev, "%s: Invalid reg pkt size = %zd\n",
 				__func__, count);
 			ret = -EINVAL;
 			goto free_buf;
 		}
 		ret = wdsp_glink_ch_info_init(wpriv,
-					(struct wdsp_reg_pkt *)wpkt->payload);
+					(struct wdsp_reg_pkt *)wpkt->payload,
+					count);
 		if (ret < 0)
 			dev_err(wpriv->dev, "%s: glink register failed, ret = %d\n",
 				__func__, ret);
-		kfree(tx_buf);
+		vfree(tx_buf);
 		break;
 	case WDSP_READY_PKT:
 		ret = wait_event_timeout(wpriv->link_state_wait,
@@ -815,11 +847,10 @@
 			goto free_buf;
 		}
 		ret = 0;
-		kfree(tx_buf);
+		vfree(tx_buf);
 		break;
 	case WDSP_CMD_PKT:
-		if (count <= (sizeof(struct wdsp_write_pkt) +
-			      sizeof(struct wdsp_cmd_pkt))) {
+		if (count <= (WDSP_WRITE_PKT_SIZE + WDSP_CMD_PKT_SIZE)) {
 			dev_err(wpriv->dev, "%s: Invalid cmd pkt size = %zd\n",
 				__func__, count);
 			ret = -EINVAL;
@@ -835,10 +866,18 @@
 			goto free_buf;
 		}
 		mutex_unlock(&wpriv->glink_mutex);
-
 		cpkt = (struct wdsp_cmd_pkt *)wpkt->payload;
-		dev_dbg(wpriv->dev, "%s: requested ch_name: %s\n", __func__,
-			 cpkt->ch_name);
+		pkt_max_size =  sizeof(struct wdsp_write_pkt) +
+					sizeof(struct wdsp_cmd_pkt) +
+					cpkt->payload_size;
+		if (count < pkt_max_size) {
+			dev_err(wpriv->dev, "%s: Invalid cmd pkt count = %zd, pkt_size = %zd\n",
+				__func__, count, pkt_max_size);
+			ret = -EINVAL;
+			goto free_buf;
+		}
+		dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
+			__func__, cpkt->ch_name, pkt_max_size);
 		for (i = 0; i < wpriv->no_of_channels; i++) {
 			if (wpriv->ch && wpriv->ch[i] &&
 				(!strcmp(cpkt->ch_name,
@@ -873,13 +912,13 @@
 	default:
 		dev_err(wpriv->dev, "%s: Invalid packet type\n", __func__);
 		ret = -EINVAL;
-		kfree(tx_buf);
+		vfree(tx_buf);
 		break;
 	}
 	goto done;
 
 free_buf:
-	kfree(tx_buf);
+	vfree(tx_buf);
 
 done:
 	return ret;
diff --git a/drivers/soundwire/soundwire.c b/drivers/soundwire/soundwire.c
index 68655a5..f0c7aa9 100644
--- a/drivers/soundwire/soundwire.c
+++ b/drivers/soundwire/soundwire.c
@@ -68,6 +68,27 @@
 }
 
 /**
+ * swr_remove_device - remove a soundwire device
+ * @swr_dev: soundwire device to remove
+ *
+ * Remove a soundwire device. Go through the soundwire
+ * device list that master has and remove swr_dev from
+ * it.
+ */
+void swr_remove_device(struct swr_device *swr_dev)
+{
+	struct swr_device *swr_dev_loop, *safe;
+
+	list_for_each_entry_safe(swr_dev_loop, safe,
+				 &swr_dev->master->devices,
+				 dev_list) {
+		if (swr_dev == swr_dev_loop)
+			list_del(&swr_dev_loop->dev_list);
+	}
+}
+EXPORT_SYMBOL(swr_remove_device);
+
+/**
  * swr_new_device - instantiate a new soundwire device
  * @master: Controller to which device is connected
  * @info: Describes the soundwire device
@@ -128,47 +149,6 @@
 EXPORT_SYMBOL(swr_new_device);
 
 /**
- * swr_startup_devices - perform additional initialization for child devices
- *
- * @swr_dev: pointer to soundwire slave device
- *
- * Performs any additional initialization needed for a soundwire slave device.
- * This is a optional functionality defined by slave devices.
- * Removes the slave node from the list, in case there is any failure.
- */
-int swr_startup_devices(struct swr_device *swr_dev)
-{
-	struct swr_driver *swr_drv;
-	struct device *dev;
-	int ret = 0;
-
-	if (!swr_dev)
-		return -EINVAL;
-
-	dev = &swr_dev->dev;
-	if (!dev)
-		return -EINVAL;
-
-	swr_drv = to_swr_driver(dev->driver);
-	if (!swr_drv)
-		return -EINVAL;
-
-	if (swr_drv->startup) {
-		ret = swr_drv->startup(swr_dev);
-		if (ret)
-			goto out;
-
-		dev_dbg(&swr_dev->dev,
-			"%s: startup complete for device %lx\n",
-			__func__, swr_dev->addr);
-	}
-
-out:
-	return ret;
-}
-EXPORT_SYMBOL(swr_startup_devices);
-
-/**
  * of_register_swr_devices - register child devices on to the soundwire bus
  * @master: pointer to soundwire master device
  *
@@ -202,14 +182,15 @@
 		}
 		info.addr = addr;
 		info.of_node = of_node_get(node);
+		master->num_dev++;
 		swr = swr_new_device(master, &info);
 		if (!swr) {
 			dev_err(&master->dev, "of_swr: Register failed %s\n",
 				node->full_name);
 			of_node_put(node);
+			master->num_dev--;
 			continue;
 		}
-		master->num_dev++;
 	}
 	return 0;
 }
@@ -605,7 +586,7 @@
 	dev = &swr_dev->dev;
 	sdrv = to_swr_driver(dev->driver);
 	if (!sdrv)
-		return -EINVAL;
+		return 0;
 
 	if (sdrv->device_up)
 		return sdrv->device_up(to_swr_device(dev));
@@ -633,7 +614,7 @@
 	dev = &swr_dev->dev;
 	sdrv = to_swr_driver(dev->driver);
 	if (!sdrv)
-		return -EINVAL;
+		return 0;
 
 	if (sdrv->device_down)
 		return sdrv->device_down(to_swr_device(dev));
diff --git a/drivers/soundwire/swr-wcd-ctrl.c b/drivers/soundwire/swr-wcd-ctrl.c
index ce2a367..e338d58 100644
--- a/drivers/soundwire/swr-wcd-ctrl.c
+++ b/drivers/soundwire/swr-wcd-ctrl.c
@@ -223,6 +223,12 @@
 static struct dentry *debugfs_reg_dump;
 static unsigned int read_data;
 
+
+static bool swrm_is_msm_variant(int val)
+{
+	return (val == SWRM_VERSION_1_3);
+}
+
 static int swrm_debug_open(struct inode *inode, struct file *file)
 {
 	file->private_data = inode->i_private;
@@ -513,8 +519,17 @@
 			__func__, val, ret);
 		goto err;
 	}
-	if (cmd_id == 0xF)
-		wait_for_completion_timeout(&swrm->broadcast, (2 * HZ/10));
+	if (cmd_id == 0xF) {
+		/*
+		 * sleep for 10ms for MSM soundwire variant to allow broadcast
+		 * command to complete.
+		 */
+		if (swrm_is_msm_variant(swrm->version))
+			usleep_range(10000, 10100);
+		else
+			wait_for_completion_timeout(&swrm->broadcast,
+						    (2 * HZ/10));
+	}
 err:
 	return ret;
 }
@@ -1355,7 +1370,6 @@
 {
 	struct swr_mstr_ctrl *swrm;
 	struct swr_ctrl_platform_data *pdata;
-	struct swr_device *swr_dev, *safe;
 	int ret;
 
 	/* Allocate soundwire master driver structure */
@@ -1454,9 +1468,6 @@
 		goto err_mstr_fail;
 	}
 
-	if (pdev->dev.of_node)
-		of_register_swr_devices(&swrm->master);
-
 	/* Add devices registered with board-info as the
 	 * controller will be up now
 	 */
@@ -1471,16 +1482,13 @@
 		mutex_unlock(&swrm->mlock);
 		goto err_mstr_fail;
 	}
+	swrm->version = swrm->read(swrm->handle, SWRM_COMP_HW_VERSION);
 
-	/* Enumerate slave devices */
-	list_for_each_entry_safe(swr_dev, safe, &swrm->master.devices,
-				 dev_list) {
-		ret = swr_startup_devices(swr_dev);
-		if (ret)
-			list_del(&swr_dev->dev_list);
-	}
 	mutex_unlock(&swrm->mlock);
 
+	if (pdev->dev.of_node)
+		of_register_swr_devices(&swrm->master);
+
 	dbgswrm = swrm;
 	debugfs_swrm_dent = debugfs_create_dir(dev_name(&pdev->dev), 0);
 	if (!IS_ERR(debugfs_swrm_dent)) {
diff --git a/drivers/soundwire/swr-wcd-ctrl.h b/drivers/soundwire/swr-wcd-ctrl.h
index 8992318..b7a3eda 100644
--- a/drivers/soundwire/swr-wcd-ctrl.h
+++ b/drivers/soundwire/swr-wcd-ctrl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,10 @@
 
 #define SWR_MSTR_PORT_LEN	8 /* Number of master ports */
 
+#define SWRM_VERSION_1_0 0x01010000
+#define SWRM_VERSION_1_2 0x01030000
+#define SWRM_VERSION_1_3 0x01040000
+
 enum {
 	SWR_MSTR_PAUSE,
 	SWR_MSTR_RESUME,
@@ -88,6 +92,7 @@
 	int (*reg_irq)(void *handle, irqreturn_t(*irq_handler)(int irq,
 			void *data), void *swr_handle, int type);
 	int irq;
+	int version;
 	int num_enum_slaves;
 	int slave_status;
 	struct swr_mstr_port *mstr_port;
diff --git a/drivers/soundwire/swrm_registers.h b/drivers/soundwire/swrm_registers.h
index c6923f3..50c3ecf 100644
--- a/drivers/soundwire/swrm_registers.h
+++ b/drivers/soundwire/swrm_registers.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 
 #define SWRM_BASE_ADDRESS				0x00
 
+#define SWRM_COMP_HW_VERSION                     SWRM_BASE_ADDRESS
 #define SWRM_COMP_CFG_ADDR			(SWRM_BASE_ADDRESS+0x00000004)
 #define SWRM_COMP_CFG_RMSK				0x3
 #define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_BMSK		0x2
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index be6e985..a6d1cc8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -387,18 +387,10 @@
 	struct lov_mds_md *lmmk = NULL;
 	int rc, lmmk_size, lmm_size;
 	int lum_size;
-	mm_segment_t seg;
 
 	if (!lsm)
 		return -ENODATA;
 
-	/*
-	 * "Switch to kernel segment" to allow copying from kernel space by
-	 * copy_{to,from}_user().
-	 */
-	seg = get_fs();
-	set_fs(KERNEL_DS);
-
 	/* we only need the header part from user space to get lmm_magic and
 	 * lmm_stripe_count, (the header part is common to v1 and v3)
 	 */
@@ -478,6 +470,5 @@
 out_free:
 	kfree(lmmk);
 out:
-	set_fs(seg);
 	return rc;
 }
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 40e50f2..01ea228 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3798,6 +3798,8 @@
 {
 	int ret = 0;
 	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
+
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
 	 * connection recovery / failure event can be triggered externally.
@@ -3823,12 +3825,14 @@
 			goto transport_err;
 
 		ret = iscsit_handle_response_queue(conn);
-		if (ret == 1)
+		if (ret == 1) {
 			goto get_immediate;
-		else if (ret == -ECONNRESET)
+		} else if (ret == -ECONNRESET) {
+			conn_freed = true;
 			goto out;
-		else if (ret < 0)
+		} else if (ret < 0) {
 			goto transport_err;
+		}
 	}
 
 transport_err:
@@ -3838,8 +3842,13 @@
 	 * responsible for cleaning up the early connection failure.
 	 */
 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-		iscsit_take_action_for_connection_exit(conn);
+		iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
 	return 0;
 }
 
@@ -4012,6 +4021,7 @@
 {
 	int rc;
 	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
 
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
@@ -4024,7 +4034,7 @@
 	 */
 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
 	if (rc < 0 || iscsi_target_check_conn_state(conn))
-		return 0;
+		goto out;
 
 	if (!conn->conn_transport->iscsit_get_rx_pdu)
 		return 0;
@@ -4033,7 +4043,15 @@
 
 	if (!signal_pending(current))
 		atomic_set(&conn->transport_failed, 1);
-	iscsit_take_action_for_connection_exit(conn);
+	iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b54e72c..efc453e 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -930,8 +930,10 @@
 	}
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+	*conn_freed = false;
+
 	spin_lock_bh(&conn->state_lock);
 	if (atomic_read(&conn->connection_exit)) {
 		spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@
 	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
 		spin_unlock_bh(&conn->state_lock);
 		iscsit_close_connection(conn);
+		*conn_freed = true;
 		return;
 	}
 
@@ -955,4 +958,5 @@
 	spin_unlock_bh(&conn->state_lock);
 
 	iscsit_handle_connection_cleanup(conn);
+	*conn_freed = true;
 }
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f94..fbc1d84 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -9,6 +9,6 @@
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 96c55bc..6128e8e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1460,5 +1460,9 @@
 			break;
 	}
 
+	while (!kthread_should_stop()) {
+		msleep(100);
+	}
+
 	return 0;
 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index cae4dea..077344c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1182,15 +1182,28 @@
 	if (cmd->unknown_data_length) {
 		cmd->data_length = size;
 	} else if (size != cmd->data_length) {
-		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+		pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
 				cmd->data_length, size, cmd->t_task_cdb[0]);
 
-		if (cmd->data_direction == DMA_TO_DEVICE &&
-		    cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
-			pr_err("Rejecting underflow/overflow WRITE data\n");
-			return TCM_INVALID_CDB_FIELD;
+		if (cmd->data_direction == DMA_TO_DEVICE) {
+			if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+				pr_err_ratelimited("Rejecting underflow/overflow"
+						   " for WRITE data CDB\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
+			/*
+			 * Some fabric drivers like iscsi-target still expect to
+			 * always reject overflow writes.  Reject this case until
+			 * full fabric driver level support for overflow writes
+			 * is introduced tree-wide.
+			 */
+			if (size > cmd->data_length) {
+				pr_err_ratelimited("Rejecting overflow for"
+						   " WRITE control CDB\n");
+				return TCM_INVALID_CDB_FIELD;
+			}
 		}
 		/*
 		 * Reject READ_* or WRITE_* with overflow/underflow for
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 432adbc..1ab5b0c 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/thermal.h>
 #include "tsens.h"
+#include "qcom/qti_virtual_sensor.h"
 
 LIST_HEAD(tsens_device_list);
 
@@ -172,6 +173,9 @@
 		return -ENODEV;
 	}
 
+	/* Register virtual thermal sensors. */
+	qti_virtual_sensor_register(&tmdev->pdev->dev);
+
 	return 0;
 }
 
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 984241f9..65d8fd7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -33,6 +33,9 @@
 #include <linux/thermal.h>
 #include <linux/list.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_virtual.h>
+
 #include "thermal_core.h"
 
 /***   Private data structures to represent thermal device tree data ***/
@@ -107,11 +110,82 @@
 	struct __sensor_param *senps;
 };
 
+/**
+ * struct virtual_sensor - internal representation of a virtual thermal zone
+ * @num_sensors - number of sensors this virtual sensor will reference to
+ *		  estimate temperature
+ * @tz - Array of thermal zones of the sensors this virtual sensor will use
+ *	 to estimate temperature
+ * @virt_tz - Virtual thermal zone pointer
+ * @logic - aggregation logic to be used to estimate the temperature
+ * @last_reading - last estimated temperature
+ * @coefficients - array of coefficients to be used for weighted aggregation
+ *		       logic
+ * @avg_offset - offset value to be used for the weighted aggregation logic
+ * @avg_denominator - denominator value to be used for the weighted aggregation
+ *			logic
+ */
+struct virtual_sensor {
+	int                        num_sensors;
+	struct thermal_zone_device *tz[THERMAL_MAX_VIRT_SENSORS];
+	struct thermal_zone_device *virt_tz;
+	enum aggregation_logic     logic;
+	int                        last_reading;
+	int                        coefficients[THERMAL_MAX_VIRT_SENSORS];
+	int                        avg_offset;
+	int                        avg_denominator;
+};
+
 static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
 		unsigned int trip_type_mask, int *low, int *high);
 
 /***   DT thermal zone device callbacks   ***/
 
+static int virt_sensor_read_temp(void *data, int *val)
+{
+	struct virtual_sensor *sens = data;
+	int idx, temp = 0, ret = 0;
+
+	for (idx = 0; idx < sens->num_sensors; idx++) {
+		int sens_temp = 0;
+
+		ret = thermal_zone_get_temp(sens->tz[idx], &sens_temp);
+		if (ret) {
+			pr_err("virt zone: sensor[%s] read error:%d\n",
+				sens->tz[idx]->type, ret);
+			return ret;
+		}
+		switch (sens->logic) {
+		case VIRT_WEIGHTED_AVG:
+			temp += sens_temp * sens->coefficients[idx];
+			if (idx == (sens->num_sensors - 1))
+				temp = (temp + sens->avg_offset)
+					/ sens->avg_denominator;
+			break;
+		case VIRT_MAXIMUM:
+			if (idx == 0)
+				temp = INT_MIN;
+			if (sens_temp > temp)
+				temp = sens_temp;
+			break;
+		case VIRT_MINIMUM:
+			if (idx == 0)
+				temp = INT_MAX;
+			if (sens_temp < temp)
+				temp = sens_temp;
+			break;
+		default:
+			break;
+		}
+		trace_virtual_temperature(sens->virt_tz, sens->tz[idx],
+					sens_temp, temp);
+	}
+
+	sens->last_reading = *val = temp;
+
+	return 0;
+}
+
 static int of_thermal_get_temp(struct thermal_zone_device *tz,
 			       int *temp)
 {
@@ -516,6 +590,10 @@
 	.unbind = of_thermal_unbind,
 };
 
+static struct thermal_zone_of_device_ops of_virt_ops = {
+	.get_temp = virt_sensor_read_temp,
+};
+
 /***   sensor API   ***/
 
 static struct thermal_zone_device *
@@ -727,6 +805,135 @@
 }
 
 /**
+ * devm_thermal_of_virtual_sensor_register - Register a virtual sensor.
+ *	Three types of virtual sensors are supported.
+ *	1. Weighted aggregation type:
+ *		Virtual sensor of this type calculates the weighted aggregation
+ *		of sensor temperatures using the below formula,
+ *		temp = (sensor_1_temp * coeff_1 + ... + sensor_n_temp * coeff_n)
+ *			+ avg_offset / avg_denominator
+ *		So the sensor drivers has to specify n+2 coefficients.
+ *	2. Maximum type:
+ *		Virtual sensors of this type will report the maximum of all
+ *		sensor temperatures.
+ *	3. Minimum type:
+ *		Virtual sensors of this type will report the minimum of all
+ *		sensor temperatures.
+ *
+ * @input arguments:
+ * @dev: Virtual sensor driver device pointer.
+ * @sensor_data: Virtual sensor data supported for the device.
+ *
+ * @return: Returns a virtual thermal zone pointer. Returns error if thermal
+ * zone is not created. Returns -EAGAIN, if the sensor that is required for
+ * this virtual sensor temperature estimation is not registered yet. The
+ * sensor driver can try again later.
+ */
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+		struct device *dev,
+		const struct virtual_sensor_data *sensor_data)
+{
+	int sens_idx = 0;
+	struct virtual_sensor *sens;
+	struct __thermal_zone *tz;
+	struct thermal_zone_device **ptr;
+	struct thermal_zone_device *tzd;
+	struct __sensor_param *sens_param = NULL;
+	enum thermal_device_mode mode;
+
+	if (!dev || !sensor_data)
+		return ERR_PTR(-EINVAL);
+
+	tzd = thermal_zone_get_zone_by_name(
+				sensor_data->virt_zone_name);
+	if (IS_ERR(tzd)) {
+		dev_err(dev, "sens:%s not available err: %ld\n",
+				sensor_data->virt_zone_name,
+				PTR_ERR(tzd));
+		return tzd;
+	}
+
+	mutex_lock(&tzd->lock);
+	/*
+	 * Check if the virtual zone is registered and enabled.
+	 * If so return the registered thermal zone.
+	 */
+	tzd->ops->get_mode(tzd, &mode);
+	mutex_unlock(&tzd->lock);
+	if (mode == THERMAL_DEVICE_ENABLED)
+		return tzd;
+
+	sens = devm_kzalloc(dev, sizeof(*sens), GFP_KERNEL);
+	if (!sens)
+		return ERR_PTR(-ENOMEM);
+
+	sens->virt_tz = tzd;
+	sens->logic = sensor_data->logic;
+	sens->num_sensors = sensor_data->num_sensors;
+	if (sens->logic == VIRT_WEIGHTED_AVG) {
+		int coeff_ct = sensor_data->coefficient_ct;
+
+		/*
+		 * For weighted aggregation, sensor drivers has to specify
+		 * n+2 coefficients.
+		 */
+		if (coeff_ct != sens->num_sensors) {
+			dev_err(dev, "sens:%s Invalid coefficient\n",
+					sensor_data->virt_zone_name);
+			return ERR_PTR(-EINVAL);
+		}
+		memcpy(sens->coefficients, sensor_data->coefficients,
+			       coeff_ct * sizeof(*sens->coefficients));
+		sens->avg_offset = sensor_data->avg_offset;
+		sens->avg_denominator = sensor_data->avg_denominator;
+	}
+
+	for (sens_idx = 0; sens_idx < sens->num_sensors; sens_idx++) {
+		sens->tz[sens_idx] = thermal_zone_get_zone_by_name(
+					sensor_data->sensor_names[sens_idx]);
+		if (IS_ERR(sens->tz[sens_idx])) {
+			dev_err(dev, "sens:%s sensor[%s] fetch err:%ld\n",
+				     sensor_data->virt_zone_name,
+				     sensor_data->sensor_names[sens_idx],
+				     PTR_ERR(sens->tz[sens_idx]));
+			break;
+		}
+	}
+	if (sens->num_sensors != sens_idx)
+		return ERR_PTR(-EAGAIN);
+
+	sens_param = kzalloc(sizeof(*sens_param), GFP_KERNEL);
+	if (!sens_param)
+		return ERR_PTR(-ENOMEM);
+	sens_param->sensor_data = sens;
+	sens_param->ops = &of_virt_ops;
+	INIT_LIST_HEAD(&sens_param->first_tz);
+	sens_param->trip_high = INT_MAX;
+	sens_param->trip_low = INT_MIN;
+	mutex_init(&sens_param->lock);
+
+	mutex_lock(&tzd->lock);
+	tz = tzd->devdata;
+	tz->senps = sens_param;
+	tzd->ops->get_temp = of_thermal_get_temp;
+	list_add_tail(&tz->list, &sens_param->first_tz);
+	mutex_unlock(&tzd->lock);
+
+	ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	*ptr = tzd;
+	devres_add(dev, ptr);
+
+	tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
+
+	return tzd;
+}
+EXPORT_SYMBOL(devm_thermal_of_virtual_sensor_register);
+
+/**
  * devm_thermal_zone_of_sensor_register - Resource managed version of
  *				thermal_zone_of_sensor_register()
  * @dev: a valid struct device pointer of a sensor device. Must contain
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index 819c6d5..e1f22a3 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/iio/consumer.h>
@@ -21,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/thermal.h>
+#include "thermal_core.h"
 
 #define QPNP_TM_REG_TYPE		0x04
 #define QPNP_TM_REG_SUBTYPE		0x05
@@ -29,13 +31,17 @@
 #define QPNP_TM_REG_ALARM_CTRL		0x46
 
 #define QPNP_TM_TYPE			0x09
-#define QPNP_TM_SUBTYPE			0x08
+#define QPNP_TM_SUBTYPE_GEN1		0x08
+#define QPNP_TM_SUBTYPE_GEN2		0x09
 
-#define STATUS_STAGE_MASK		0x03
+#define STATUS_GEN1_STAGE_MASK		GENMASK(1, 0)
+#define STATUS_GEN2_STATE_MASK		GENMASK(6, 4)
+#define STATUS_GEN2_STATE_SHIFT		4
 
-#define SHUTDOWN_CTRL1_THRESHOLD_MASK	0x03
+#define SHUTDOWN_CTRL1_OVERRIDE_MASK	GENMASK(7, 6)
+#define SHUTDOWN_CTRL1_THRESHOLD_MASK	GENMASK(1, 0)
 
-#define ALARM_CTRL_FORCE_ENABLE		0x80
+#define ALARM_CTRL_FORCE_ENABLE		BIT(7)
 
 /*
  * Trip point values based on threshold control
@@ -58,6 +64,7 @@
 struct qpnp_tm_chip {
 	struct regmap			*map;
 	struct thermal_zone_device	*tz_dev;
+	unsigned int			subtype;
 	long				temp;
 	unsigned int			thresh;
 	unsigned int			stage;
@@ -66,6 +73,9 @@
 	struct iio_channel		*adc;
 };
 
+/* This array maps from GEN2 alarm state to GEN1 alarm stage */
+static const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
+
 static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data)
 {
 	unsigned int val;
@@ -84,13 +94,14 @@
 	return regmap_write(chip->map, chip->base + addr, data);
 }
 
-/*
- * This function updates the internal temp value based on the
- * current thermal stage and threshold as well as the previous stage
+/**
+ * qpnp_tm_get_temp_stage() - return over-temperature stage
+ * @chip:		Pointer to the qpnp_tm chip
+ *
+ * Return: stage (GEN1) or state (GEN2) on success, or errno on failure.
  */
-static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip)
 {
-	unsigned int stage;
 	int ret;
 	u8 reg = 0;
 
@@ -98,16 +109,44 @@
 	if (ret < 0)
 		return ret;
 
-	stage = reg & STATUS_STAGE_MASK;
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1)
+		ret = reg & STATUS_GEN1_STAGE_MASK;
+	else
+		ret = (reg & STATUS_GEN2_STATE_MASK) >> STATUS_GEN2_STATE_SHIFT;
 
-	if (stage > chip->stage) {
+	return ret;
+}
+
+/*
+ * This function updates the internal temp value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+	unsigned int stage, stage_new, stage_old;
+	int ret;
+
+	ret = qpnp_tm_get_temp_stage(chip);
+	if (ret < 0)
+		return ret;
+	stage = ret;
+
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) {
+		stage_new = stage;
+		stage_old = chip->stage;
+	} else {
+		stage_new = alarm_state_map[stage];
+		stage_old = alarm_state_map[chip->stage];
+	}
+
+	if (stage_new > stage_old) {
 		/* increasing stage, use lower bound */
-		chip->temp = (stage - 1) * TEMP_STAGE_STEP +
+		chip->temp = (stage_new - 1) * TEMP_STAGE_STEP +
 			     chip->thresh * TEMP_THRESH_STEP +
 			     TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
-	} else if (stage < chip->stage) {
+	} else if (stage_new < stage_old) {
 		/* decreasing stage, use upper bound */
-		chip->temp = stage * TEMP_STAGE_STEP +
+		chip->temp = stage_new * TEMP_STAGE_STEP +
 			     chip->thresh * TEMP_THRESH_STEP -
 			     TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
 	}
@@ -150,7 +189,7 @@
 {
 	struct qpnp_tm_chip *chip = data;
 
-	thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED);
+	of_thermal_handle_trip(chip->tz_dev);
 
 	return IRQ_HANDLED;
 }
@@ -162,28 +201,37 @@
  */
 static int qpnp_tm_init(struct qpnp_tm_chip *chip)
 {
+	unsigned int stage;
 	int ret;
-	u8 reg;
+	u8 reg = 0;
 
-	chip->thresh = THRESH_MIN;
-	chip->temp = DEFAULT_TEMP;
-
-	ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
+	ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
 	if (ret < 0)
 		return ret;
 
-	chip->stage = reg & STATUS_STAGE_MASK;
+	chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+	chip->temp = DEFAULT_TEMP;
 
-	if (chip->stage)
+	ret = qpnp_tm_get_temp_stage(chip);
+	if (ret < 0)
+		return ret;
+	chip->stage = ret;
+
+	stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
+		? chip->stage : alarm_state_map[chip->stage];
+
+	if (stage)
 		chip->temp = chip->thresh * TEMP_THRESH_STEP +
-			     (chip->stage - 1) * TEMP_STAGE_STEP +
+			     (stage - 1) * TEMP_STAGE_STEP +
 			     TEMP_THRESH_MIN;
 
 	/*
 	 * Set threshold and disable software override of stage 2 and 3
 	 * shutdowns.
 	 */
-	reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+	chip->thresh = THRESH_MIN;
+	reg &= ~(SHUTDOWN_CTRL1_OVERRIDE_MASK | SHUTDOWN_CTRL1_THRESHOLD_MASK);
+	reg |= chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
 	ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
 	if (ret < 0)
 		return ret;
@@ -242,13 +290,16 @@
 		goto fail;
 	}
 
-	if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
+	if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
+				     && subtype != QPNP_TM_SUBTYPE_GEN2)) {
 		dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
 			type, subtype);
 		ret = -ENODEV;
 		goto fail;
 	}
 
+	chip->subtype = subtype;
+
 	ret = qpnp_tm_init(chip);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "init failed\n");
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 473d15a..f6e1b86 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -30,3 +30,13 @@
 	  each cluster can be used to perform quick thermal mitigations by
 	  tracking temperatures of the CPUs and taking thermal action in the
 	  hardware without s/w intervention.
+
+config QTI_VIRTUAL_SENSOR
+	bool "QTI Virtual Sensor driver"
+	depends on THERMAL_OF
+	help
+	  This driver has the information about the virtual sensors used by
+	  QTI chipset's and registers the virtual sensors to a thermal zone.
+	  The virtual sensor information includes the underlying thermal
+	  sensors to query for temperature and the aggregation logic to
+	  determine the virtual sensor temperature.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index d1a53b0..8859380 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -2,3 +2,4 @@
 qcom_tsens-y			+= tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
 obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
 obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
+obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
new file mode 100644
index 0000000..3064c74
--- /dev/null
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include "qti_virtual_sensor.h"
+
+static const struct virtual_sensor_data qti_virtual_sensors[] = {
+	{
+		.virt_zone_name = "gpu-virt-max-step",
+		.num_sensors = 2,
+		.sensor_names = {"gpu0-usr",
+				"gpu1-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+	{
+		.virt_zone_name = "silver-virt-max-usr",
+		.num_sensors = 4,
+		.sensor_names = {"cpu0-silver-usr",
+				"cpu1-silver-usr",
+				"cpu2-silver-usr",
+				"cpu3-silver-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+	{
+		.virt_zone_name = "gold-virt-max-usr",
+		.num_sensors = 4,
+		.sensor_names = {"cpu0-gold-usr",
+				"cpu1-gold-usr",
+				"cpu2-gold-usr",
+				"cpu3-gold-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+};
+
+int qti_virtual_sensor_register(struct device *dev)
+{
+	int sens_ct = 0;
+	static int idx;
+	struct thermal_zone_device *tz;
+
+	sens_ct = ARRAY_SIZE(qti_virtual_sensors);
+	for (; idx < sens_ct; idx++) {
+		tz = devm_thermal_of_virtual_sensor_register(dev,
+				&qti_virtual_sensors[idx]);
+		if (IS_ERR(tz))
+			dev_dbg(dev, "sensor:%d register error:%ld\n",
+					idx, PTR_ERR(tz));
+		else
+			dev_dbg(dev, "sensor:%d registered\n", idx);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qti_virtual_sensor_register);
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.h b/drivers/thermal/qcom/qti_virtual_sensor.h
new file mode 100644
index 0000000..371b794
--- /dev/null
+++ b/drivers/thermal/qcom/qti_virtual_sensor.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QTI_VIRT_SENS_H__
+#define __QTI_VIRT_SENS_H__
+
+#ifdef CONFIG_QTI_VIRTUAL_SENSOR
+
+int qti_virtual_sensor_register(struct device *dev);
+
+#else
+
+static inline int qti_virtual_sensor_register(struct device *dev)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_QTI_VIRTUAL_SENSOR */
+
+#endif /* __QTI_VIRT_SENS_H__ */
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index ecfc4ef..6b05b7b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -76,6 +76,14 @@
 		return next_target;
 	}
 
+	/*
+	 * If there is no new throttle request and if the thermal zone
+	 * wasn't requesting any previous mitigation, then skip the
+	 * evaluation.
+	 */
+	if (instance->target == THERMAL_NO_TARGET && !throttle)
+		return next_target;
+
 	switch (trend) {
 	case THERMAL_TREND_RAISING:
 		if (throttle) {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index b137c4e..4c1ccee 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1424,9 +1424,26 @@
 	if (ret)
 		return ret;
 
-	/* lower default 0, upper default max_state */
-	lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
-	upper = upper == THERMAL_NO_LIMIT ? max_state : upper;
+	/*
+	 * If upper or lower has a MACRO to define the mitigation state,
+	 * based on the MACRO determine the default state to use or the
+	 * offset from the max_state.
+	 */
+	if (upper > (THERMAL_MAX_LIMIT - max_state)) {
+		/* upper default max_state */
+		if (upper == THERMAL_NO_LIMIT)
+			upper = max_state;
+		else
+			upper = max_state - (THERMAL_MAX_LIMIT - upper);
+	}
+
+	if (lower > (THERMAL_MAX_LIMIT - max_state)) {
+		/* lower default 0 */
+		if (lower == THERMAL_NO_LIMIT)
+			lower = 0;
+		else
+			lower =  max_state - (THERMAL_MAX_LIMIT - lower);
+	}
 
 	if (lower > upper || upper > max_state)
 		return -EINVAL;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 080d5a5..f24d303 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1320,7 +1320,7 @@
 	/*
 	 * Check if the device is a Fintek F81216A
 	 */
-	if (port->type == PORT_16550A)
+	if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
 		fintek_8250_probe(up);
 
 	if (up->capabilities != old_capabilities) {
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index d386346..91d2ddd 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1381,9 +1381,9 @@
 static void __exit ifx_spi_exit(void)
 {
 	/* unregister */
+	spi_unregister_driver(&ifx_spi_driver);
 	tty_unregister_driver(tty_drv);
 	put_tty_driver(tty_drv);
-	spi_unregister_driver(&ifx_spi_driver);
 	unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
 }
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 2e12c3f..94ba2c3e 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -327,6 +327,11 @@
 	mb();
 }
 
+static unsigned int msm_geni_cons_get_mctrl(struct uart_port *uport)
+{
+	return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
 static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport)
 {
 	u32 geni_ios = 0;
@@ -610,6 +615,8 @@
 {
 	struct uart_port *uport;
 	struct msm_geni_serial_port *port;
+	int locked = 1;
+	unsigned long flags;
 
 	WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
 
@@ -618,9 +625,15 @@
 		return;
 
 	uport = &port->uport;
-	spin_lock(&uport->lock);
-	__msm_geni_serial_console_write(uport, s, count);
-	spin_unlock(&uport->lock);
+	if (oops_in_progress)
+		locked = spin_trylock_irqsave(&uport->lock, flags);
+	else
+		spin_lock_irqsave(&uport->lock, flags);
+
+	if (locked) {
+		__msm_geni_serial_console_write(uport, s, count);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
 }
 
 static int handle_rx_console(struct uart_port *uport,
@@ -1014,13 +1027,20 @@
 static void msm_geni_serial_shutdown(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	unsigned long flags;
 
+	/* Stop the console before stopping the current tx */
+	if (uart_console(uport))
+		console_stop(uport->cons);
+
+	spin_lock_irqsave(&uport->lock, flags);
 	msm_geni_serial_stop_tx(uport);
 	msm_geni_serial_stop_rx(uport);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
 	disable_irq(uport->irq);
 	free_irq(uport->irq, msm_port);
 	if (uart_console(uport)) {
-		console_stop(uport->cons);
 		se_geni_resources_off(&msm_port->serial_rsc);
 	} else {
 		if (msm_port->wakeup_irq > 0) {
@@ -1572,6 +1592,7 @@
 	.shutdown = msm_geni_serial_shutdown,
 	.type = msm_geni_serial_get_type,
 	.set_mctrl = msm_geni_cons_set_mctrl,
+	.get_mctrl = msm_geni_cons_get_mctrl,
 #ifdef CONFIG_CONSOLE_POLL
 	.poll_get_char	= msm_geni_serial_get_char,
 	.poll_put_char	= msm_geni_serial_poll_put_char,
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4b26252..ee84f89 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1976,12 +1976,14 @@
 
 	dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
 
-	ret = sci_request_irq(s);
-	if (unlikely(ret < 0))
-		return ret;
-
 	sci_request_dma(port);
 
+	ret = sci_request_irq(s);
+	if (unlikely(ret < 0)) {
+		sci_free_dma(port);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -2012,8 +2014,8 @@
 	}
 #endif
 
-	sci_free_dma(port);
 	sci_free_irq(s);
+	sci_free_dma(port);
 }
 
 static int sci_sck_calc(struct sci_port *s, unsigned int bps,
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 6d23eed..1c31e8a 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -294,7 +294,8 @@
 {
 	struct ci_hdrc *ci = s->private;
 
-	seq_printf(s, "%s\n", ci_role(ci)->name);
+	if (ci->role != CI_ROLE_END)
+		seq_printf(s, "%s\n", ci_role(ci)->name);
 
 	return 0;
 }
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index c9e80ad..6a15b72 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1987,6 +1987,7 @@
 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
 {
 	struct ci_role_driver *rdrv;
+	int ret;
 
 	if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
 		return -ENXIO;
@@ -1999,7 +2000,10 @@
 	rdrv->stop	= udc_id_switch_for_host;
 	rdrv->irq	= udc_irq;
 	rdrv->name	= "gadget";
-	ci->roles[CI_ROLE_GADGET] = rdrv;
 
-	return udc_start(ci);
+	ret = udc_start(ci);
+	if (!ret)
+		ci->roles[CI_ROLE_GADGET] = rdrv;
+
+	return ret;
 }
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 38614fa..dcb41a9 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -69,6 +69,11 @@
 module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
 
+/* override for USB speed */
+static int override_usb_speed;
+module_param(override_usb_speed, int, 0644);
+MODULE_PARM_DESC(override_usb_speed, "override for USB speed");
+
 /* XHCI registers */
 #define USB3_HCSPARAMS1		(0x4)
 #define USB3_PORTSC		(0x420)
@@ -143,6 +148,29 @@
 	ORIENTATION_CC2,
 };
 
+enum msm_usb_irq {
+	HS_PHY_IRQ,
+	PWR_EVNT_IRQ,
+	DP_HS_PHY_IRQ,
+	DM_HS_PHY_IRQ,
+	SS_PHY_IRQ,
+	USB_MAX_IRQ
+};
+
+struct usb_irq {
+	char *name;
+	int irq;
+	bool enable;
+};
+
+static const struct usb_irq usb_irq_info[USB_MAX_IRQ] = {
+	{"hs_phy_irq", 0},
+	{"pwr_event_irq", 0},
+	{"dp_hs_phy_irq", 0},
+	{"dm_hs_phy_irq", 0},
+	{"ss_phy_irq", 0},
+};
+
 /* Input bits to state machine (mdwc->inputs) */
 
 #define ID			0
@@ -184,8 +212,7 @@
 	int			vbus_retry_count;
 	bool			resume_pending;
 	atomic_t                pm_suspended;
-	int			hs_phy_irq;
-	int			ss_phy_irq;
+	struct usb_irq		wakeup_irq[USB_MAX_IRQ];
 	struct work_struct	resume_work;
 	struct work_struct	restart_usb_work;
 	bool			in_restart;
@@ -205,6 +232,7 @@
 	bool			vbus_active;
 	bool			suspend;
 	bool			disable_host_mode_pm;
+	bool			use_pdc_interrupts;
 	enum dwc3_id_state	id_state;
 	unsigned long		lpm_flags;
 #define MDWC3_SS_PHY_SUSPEND		BIT(0)
@@ -225,7 +253,6 @@
 
 	struct notifier_block	host_nb;
 
-	int			pwr_event_irq;
 	atomic_t                in_p3;
 	unsigned int		lpm_to_suspend_delay;
 	bool			init;
@@ -255,6 +282,15 @@
 static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
 static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
 static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event);
+
+static inline bool is_valid_usb_speed(struct dwc3 *dwc, int speed)
+{
+
+	return (((speed == USB_SPEED_FULL) || (speed == USB_SPEED_HIGH) ||
+		(speed == USB_SPEED_SUPER) || (speed == USB_SPEED_SUPER_PLUS))
+		&& (speed <= dwc->maximum_speed));
+}
+
 /**
  *
  * Read register with debug info.
@@ -264,7 +300,7 @@
  *
  * @return u32
  */
-static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
+static inline u32 dwc3_msm_read_reg(void __iomem *base, u32 offset)
 {
 	u32 val = ioread32(base + offset);
 	return val;
@@ -279,11 +315,11 @@
  *
  * @return u32
  */
-static inline u32 dwc3_msm_read_reg_field(void *base,
+static inline u32 dwc3_msm_read_reg_field(void __iomem *base,
 					  u32 offset,
 					  const u32 mask)
 {
-	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 shift = ffs(mask);
 	u32 val = ioread32(base + offset);
 
 	val &= mask;		/* clear other bits */
@@ -300,7 +336,7 @@
  * @val - value to write.
  *
  */
-static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
+static inline void dwc3_msm_write_reg(void __iomem *base, u32 offset, u32 val)
 {
 	iowrite32(val, base + offset);
 }
@@ -314,7 +350,7 @@
  * @val - value to write.
  *
  */
-static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
+static inline void dwc3_msm_write_reg_field(void __iomem *base, u32 offset,
 					    const u32 mask, u32 val)
 {
 	u32 shift = find_first_bit((void *)&mask, 32);
@@ -334,7 +370,7 @@
  * @val - value to write.
  *
  */
-static inline void dwc3_msm_write_readback(void *base, u32 offset,
+static inline void dwc3_msm_write_readback(void __iomem *base, u32 offset,
 					    const u32 mask, u32 val)
 {
 	u32 write_val, tmp = ioread32(base + offset);
@@ -1580,7 +1616,7 @@
 	int ret = 0;
 
 	if (assert) {
-		disable_irq(mdwc->pwr_event_irq);
+		disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
 		/* Using asynchronous block reset to the hardware */
 		dev_dbg(mdwc->dev, "block_reset ASSERT\n");
 		clk_disable_unprepare(mdwc->utmi_clk);
@@ -1600,7 +1636,7 @@
 		clk_prepare_enable(mdwc->core_clk);
 		clk_prepare_enable(mdwc->sleep_clk);
 		clk_prepare_enable(mdwc->utmi_clk);
-		enable_irq(mdwc->pwr_event_irq);
+		enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
 	}
 
 	return ret;
@@ -1995,12 +2031,93 @@
 static void msm_dwc3_perf_vote_update(struct dwc3_msm *mdwc,
 						bool perf_mode);
 
+static void configure_usb_wakeup_interrupt(struct dwc3_msm *mdwc,
+	struct usb_irq *uirq, unsigned int polarity, bool enable)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	if (uirq && enable && !uirq->enable) {
+		dbg_event(0xFF, "PDC_IRQ_EN", uirq->irq);
+		dbg_event(0xFF, "PDC_IRQ_POL", polarity);
+		/* clear any pending interrupt */
+		irq_set_irqchip_state(uirq->irq, IRQCHIP_STATE_PENDING, 0);
+		irq_set_irq_type(uirq->irq, polarity);
+		enable_irq_wake(uirq->irq);
+		enable_irq(uirq->irq);
+		uirq->enable = true;
+	}
+
+	if (uirq && !enable && uirq->enable) {
+		dbg_event(0xFF, "PDC_IRQ_DIS", uirq->irq);
+		disable_irq_wake(uirq->irq);
+		disable_irq_nosync(uirq->irq);
+		uirq->enable = false;
+	}
+}
+
+static void enable_usb_pdc_interrupt(struct dwc3_msm *mdwc, bool enable)
+{
+	if (!enable)
+		goto disable_usb_irq;
+
+	if (mdwc->hs_phy->flags & PHY_LS_MODE) {
+		configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[DM_HS_PHY_IRQ],
+			IRQ_TYPE_EDGE_FALLING, enable);
+	} else if (mdwc->hs_phy->flags & PHY_HSFS_MODE) {
+		configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[DP_HS_PHY_IRQ],
+			IRQ_TYPE_EDGE_FALLING, enable);
+	} else {
+		configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[DP_HS_PHY_IRQ],
+			IRQ_TYPE_EDGE_RISING, true);
+		configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[DM_HS_PHY_IRQ],
+			IRQ_TYPE_EDGE_RISING, true);
+	}
+
+	configure_usb_wakeup_interrupt(mdwc,
+		&mdwc->wakeup_irq[SS_PHY_IRQ],
+		IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH, enable);
+	return;
+
+disable_usb_irq:
+	configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[DP_HS_PHY_IRQ], 0, enable);
+	configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[DM_HS_PHY_IRQ], 0, enable);
+	configure_usb_wakeup_interrupt(mdwc,
+			&mdwc->wakeup_irq[SS_PHY_IRQ], 0, enable);
+}
+
+static void configure_nonpdc_usb_interrupt(struct dwc3_msm *mdwc,
+		struct usb_irq *uirq, bool enable)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	if (uirq && enable && !uirq->enable) {
+		dbg_event(0xFF, "IRQ_EN", uirq->irq);
+		enable_irq_wake(uirq->irq);
+		enable_irq(uirq->irq);
+		uirq->enable = true;
+	}
+
+	if (uirq && !enable && uirq->enable) {
+		dbg_event(0xFF, "IRQ_DIS", uirq->irq);
+		disable_irq_wake(uirq->irq);
+		disable_irq_nosync(uirq->irq);
+		uirq->enable = true;
+	}
+}
+
 static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
 {
 	int ret;
 	bool can_suspend_ssphy;
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
 	struct dwc3_event_buffer *evt;
+	struct usb_irq *uirq;
 
 	if (atomic_read(&dwc->in_lpm)) {
 		dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
@@ -2066,7 +2183,7 @@
 		dbg_event(0xFF, "pend evt", 0);
 
 	/* disable power event irq, hs and ss phy irq is used as wake up src */
-	disable_irq(mdwc->pwr_event_irq);
+	disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
 
 	dwc3_set_phy_speed_flags(mdwc);
 	/* Suspend HS PHY */
@@ -2152,11 +2269,13 @@
 	 * case of host bus suspend and device bus suspend.
 	 */
 	if (mdwc->vbus_active || mdwc->in_host_mode) {
-		enable_irq_wake(mdwc->hs_phy_irq);
-		enable_irq(mdwc->hs_phy_irq);
-		if (mdwc->ss_phy_irq) {
-			enable_irq_wake(mdwc->ss_phy_irq);
-			enable_irq(mdwc->ss_phy_irq);
+		if (mdwc->use_pdc_interrupts) {
+			enable_usb_pdc_interrupt(mdwc, true);
+		} else {
+			uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
+			configure_nonpdc_usb_interrupt(mdwc, uirq, true);
+			uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
+			configure_nonpdc_usb_interrupt(mdwc, uirq, true);
 		}
 		mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
 	}
@@ -2170,6 +2289,7 @@
 	int ret;
 	long core_clk_rate;
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct usb_irq *uirq;
 
 	dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
 
@@ -2287,7 +2407,7 @@
 	atomic_set(&dwc->in_lpm, 0);
 
 	/* enable power evt irq for IN P3 detection */
-	enable_irq(mdwc->pwr_event_irq);
+	enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
 
 	/* Disable HSPHY auto suspend */
 	dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
@@ -2297,11 +2417,13 @@
 
 	/* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
 	if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
-		disable_irq_wake(mdwc->hs_phy_irq);
-		disable_irq_nosync(mdwc->hs_phy_irq);
-		if (mdwc->ss_phy_irq) {
-			disable_irq_wake(mdwc->ss_phy_irq);
-			disable_irq_nosync(mdwc->ss_phy_irq);
+		if (mdwc->use_pdc_interrupts) {
+			enable_usb_pdc_interrupt(mdwc, false);
+		} else {
+			uirq = &mdwc->wakeup_irq[HS_PHY_IRQ];
+			configure_nonpdc_usb_interrupt(mdwc, uirq, false);
+			uirq = &mdwc->wakeup_irq[SS_PHY_IRQ];
+			configure_nonpdc_usb_interrupt(mdwc, uirq, false);
 		}
 		mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
 	}
@@ -2395,6 +2517,12 @@
 		if (dwc->maximum_speed > dwc->max_hw_supp_speed)
 			dwc->maximum_speed = dwc->max_hw_supp_speed;
 
+		if (override_usb_speed &&
+				is_valid_usb_speed(dwc, override_usb_speed)) {
+			dwc->maximum_speed = override_usb_speed;
+			dbg_event(0xFF, "override_speed", override_usb_speed);
+		}
+
 		dbg_event(0xFF, "speed", dwc->maximum_speed);
 
 		ret = extcon_get_property(edev, extcon_id,
@@ -2925,9 +3053,10 @@
 	struct resource *res;
 	void __iomem *tcsr;
 	bool host_mode;
-	int ret = 0;
+	int ret = 0, i;
 	int ext_hub_reset_gpio;
 	u32 val;
+	unsigned long irq_type;
 
 	mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
 	if (!mdwc)
@@ -2977,63 +3106,41 @@
 		mdwc->lpm_to_suspend_delay = 0;
 	}
 
-	/*
-	 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
-	 * DP and DM linestate transitions during low power mode.
-	 */
-	mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
-	if (mdwc->hs_phy_irq < 0) {
-		dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
-		ret = -EINVAL;
-		goto err;
-	} else {
-		irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
-		ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
-					msm_dwc3_pwr_irq,
-					msm_dwc3_pwr_irq_thread,
-					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
-					| IRQF_ONESHOT, "hs_phy_irq", mdwc);
-		if (ret) {
-			dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
-					ret);
-			goto err;
-		}
-	}
+	memcpy(mdwc->wakeup_irq, usb_irq_info, sizeof(usb_irq_info));
+	for (i = 0; i < USB_MAX_IRQ; i++) {
+		irq_type = IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME |
+						IRQF_ONESHOT;
+		mdwc->wakeup_irq[i].irq = platform_get_irq_byname(pdev,
+					mdwc->wakeup_irq[i].name);
+		if (mdwc->wakeup_irq[i].irq < 0) {
+			/* pwr_evnt_irq is only mandatory irq */
+			if (!strcmp(mdwc->wakeup_irq[i].name,
+						"pwr_event_irq")) {
+				dev_err(&pdev->dev, "get_irq for %s failed\n\n",
+						mdwc->wakeup_irq[i].name);
+				ret = -EINVAL;
+				goto err;
+			}
+			mdwc->wakeup_irq[i].irq = 0;
+		} else {
+			irq_set_status_flags(mdwc->wakeup_irq[i].irq,
+						IRQ_NOAUTOEN);
+			/* ss_phy_irq is level trigger interrupt */
+			if (!strcmp(mdwc->wakeup_irq[i].name, "ss_phy_irq"))
+				irq_type = IRQF_TRIGGER_HIGH | IRQF_ONESHOT |
+					IRQ_TYPE_LEVEL_HIGH | IRQF_EARLY_RESUME;
 
-	mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
-	if (mdwc->ss_phy_irq < 0) {
-		dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
-	} else {
-		irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
-		ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
+			ret = devm_request_threaded_irq(&pdev->dev,
+					mdwc->wakeup_irq[i].irq,
 					msm_dwc3_pwr_irq,
 					msm_dwc3_pwr_irq_thread,
-					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
-					| IRQF_ONESHOT, "ss_phy_irq", mdwc);
-		if (ret) {
-			dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
-					ret);
-			goto err;
-		}
-	}
-
-	mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
-	if (mdwc->pwr_event_irq < 0) {
-		dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
-		ret = -EINVAL;
-		goto err;
-	} else {
-		/* will be enabled in dwc3_msm_resume() */
-		irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
-		ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
-					msm_dwc3_pwr_irq,
-					msm_dwc3_pwr_irq_thread,
-					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
-					"msm_dwc3", mdwc);
-		if (ret) {
-			dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
-					ret);
-			goto err;
+					irq_type,
+					mdwc->wakeup_irq[i].name, mdwc);
+			if (ret) {
+				dev_err(&pdev->dev, "irq req %s failed: %d\n\n",
+						mdwc->wakeup_irq[i].name, ret);
+				goto err;
+			}
 		}
 	}
 
@@ -3114,7 +3221,7 @@
 		 * by interrupt
 		 */
 		if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
-			if (!mdwc->pwr_event_irq) {
+			if (!mdwc->wakeup_irq[PWR_EVNT_IRQ].irq) {
 				dev_err(&pdev->dev,
 					"need pwr_event_irq exiting L1\n");
 				ret = -EINVAL;
@@ -3146,7 +3253,8 @@
 
 	mdwc->disable_host_mode_pm = of_property_read_bool(node,
 				"qcom,disable-host-mode-pm");
-
+	mdwc->use_pdc_interrupts = of_property_read_bool(node,
+				"qcom,use-pdc-interrupts");
 	dwc3_set_notifier(&dwc3_msm_notify_event);
 
 	ret = dwc3_msm_init_iommu(mdwc);
@@ -3348,10 +3456,15 @@
 	if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
 		regulator_disable(mdwc->vbus_reg);
 
-	disable_irq(mdwc->hs_phy_irq);
-	if (mdwc->ss_phy_irq)
-		disable_irq(mdwc->ss_phy_irq);
-	disable_irq(mdwc->pwr_event_irq);
+	if (mdwc->wakeup_irq[HS_PHY_IRQ].irq)
+		disable_irq(mdwc->wakeup_irq[HS_PHY_IRQ].irq);
+	if (mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq)
+		disable_irq(mdwc->wakeup_irq[DP_HS_PHY_IRQ].irq);
+	if (mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq)
+		disable_irq(mdwc->wakeup_irq[DM_HS_PHY_IRQ].irq);
+	if (mdwc->wakeup_irq[SS_PHY_IRQ].irq)
+		disable_irq(mdwc->wakeup_irq[SS_PHY_IRQ].irq);
+	disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
 
 	clk_disable_unprepare(mdwc->utmi_clk);
 	clk_set_rate(mdwc->core_clk, 19200000);
@@ -3519,13 +3632,16 @@
 		dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
 
 		mdwc->hs_phy->flags |= PHY_HOST_MODE;
-		if (dwc->maximum_speed == USB_SPEED_SUPER)
+		if (dwc->maximum_speed == USB_SPEED_SUPER) {
 			mdwc->ss_phy->flags |= PHY_HOST_MODE;
+			usb_phy_notify_connect(mdwc->ss_phy,
+						USB_SPEED_SUPER);
+		}
 
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
 		pm_runtime_get_sync(mdwc->dev);
 		dbg_event(0xFF, "StrtHost gync",
 			atomic_read(&mdwc->dev->power.usage_count));
-		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
 		if (!IS_ERR(mdwc->vbus_reg))
 			ret = regulator_enable(mdwc->vbus_reg);
 		if (ret) {
@@ -3614,8 +3730,13 @@
 		dbg_event(0xFF, "StopHost gsync",
 			atomic_read(&mdwc->dev->power.usage_count));
 		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+		if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+			usb_phy_notify_disconnect(mdwc->ss_phy,
+					USB_SPEED_SUPER);
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+		}
+
 		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
-		mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
 		platform_device_del(dwc->xhci);
 		usb_unregister_notify(&mdwc->host_nb);
 
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index bb32978..b062d58 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -815,7 +815,13 @@
 		dwc->delayed_status = true;
 
 out:
-	if (ret < 0) {
+	/*
+	 * Don't try to halt ep0 if ret is -ESHUTDOWN.
+	 * ret as -ESHUTDOWN suggests that setup packet related response
+	 * is available but queueing of ep0 is failed. Possibly ep0 is
+	 * already disabled.
+	 */
+	if (ret < 0 && ret != -ESHUTDOWN) {
 		dbg_event(0x0, "ERRSTAL", ret);
 		dwc3_ep0_stall_and_restart(dwc);
 	}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 026ff6c..df0427c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1443,6 +1443,11 @@
 	struct dwc3				*dwc = dep->dwc;
 	int					ret;
 
+	if (!dep->endpoint.desc) {
+		dev_dbg(dwc->dev, "(%s)'s desc is NULL.\n", dep->name);
+		return -EINVAL;
+	}
+
 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
 		return -EINVAL;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 12e94d5..308a49c 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1046,7 +1046,7 @@
 	log_event_dbg("%s: cpkt size:%d", __func__, cpkt->len);
 	if (qti_packet_debug)
 		print_hex_dump(KERN_DEBUG, "READ:", DUMP_PREFIX_OFFSET, 16, 1,
-			buf, min_t(int, 30, cpkt->len), false);
+			cpkt->buf, min_t(int, 30, cpkt->len), false);
 
 	ret = copy_to_user(buf, cpkt->buf, cpkt->len);
 	if (ret) {
@@ -1119,7 +1119,7 @@
 	c_port->copied_from_modem++;
 	if (qti_packet_debug)
 		print_hex_dump(KERN_DEBUG, "WRITE:", DUMP_PREFIX_OFFSET, 16, 1,
-			buf, min_t(int, 30, count), false);
+			cpkt->buf, min_t(int, 30, count), false);
 
 	spin_lock_irqsave(&c_port->lock, flags);
 	list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index cca261e..0b758236 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -395,7 +395,11 @@
 /* Caller must hold fsg->lock */
 static void wakeup_thread(struct fsg_common *common)
 {
-	smp_wmb();	/* ensure the write of bh->state is complete */
+	/*
+	 * Ensure the reading of thread_wakeup_needed
+	 * and the writing of bh->state are completed
+	 */
+	smp_mb();
 	/* Tell the main thread that something has happened */
 	common->thread_wakeup_needed = 1;
 	if (common->thread_task)
@@ -645,7 +649,12 @@
 	}
 	__set_current_state(TASK_RUNNING);
 	common->thread_wakeup_needed = 0;
-	smp_rmb();	/* ensure the latest bh->state is visible */
+
+	/*
+	 * Ensure the writing of thread_wakeup_needed
+	 * and the reading of bh->state are completed
+	 */
+	smp_mb();
 	return rc;
 }
 
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 17f6f60..40a7acf 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -474,6 +474,7 @@
 {
 	struct f_qdss *qdss;
 	int status;
+	unsigned long flags;
 
 	qdss = container_of(work, struct f_qdss, disconnect_w);
 	pr_debug("usb_qdss_disconnect_work\n");
@@ -496,6 +497,14 @@
 		status = set_qdss_data_connection(qdss, 0);
 		if (status)
 			pr_err("qdss_disconnect error");
+
+		spin_lock_irqsave(&qdss->lock, flags);
+		if (qdss->endless_req) {
+			usb_ep_free_request(qdss->port.data,
+					qdss->endless_req);
+			qdss->endless_req = NULL;
+		}
+		spin_unlock_irqrestore(&qdss->lock, flags);
 	}
 
 	/*
@@ -528,6 +537,8 @@
 {
 	struct f_qdss *qdss;
 	int status;
+	struct usb_request *req = NULL;
+	unsigned long flags;
 
 	qdss = container_of(work, struct f_qdss, connect_w);
 
@@ -548,8 +559,13 @@
 	if (qdss->ch.notify)
 		qdss->ch.notify(qdss->ch.priv, USB_QDSS_CONNECT,
 						NULL, &qdss->ch);
+	spin_lock_irqsave(&qdss->lock, flags);
+	req = qdss->endless_req;
+	spin_unlock_irqrestore(&qdss->lock, flags);
+	if (!req)
+		return;
 
-	status = usb_ep_queue(qdss->port.data, qdss->endless_req, GFP_ATOMIC);
+	status = usb_ep_queue(qdss->port.data, req, GFP_ATOMIC);
 	if (status)
 		pr_err("%s: usb_ep_queue error (%d)\n", __func__, status);
 }
@@ -849,9 +865,11 @@
 		return;
 	}
 
-	usb_ep_dequeue(qdss->port.data, qdss->endless_req);
-	usb_ep_free_request(qdss->port.data, qdss->endless_req);
-	qdss->endless_req = NULL;
+	if (qdss->endless_req) {
+		usb_ep_dequeue(qdss->port.data, qdss->endless_req);
+		usb_ep_free_request(qdss->port.data, qdss->endless_req);
+		qdss->endless_req = NULL;
+	}
 	gadget = qdss->gadget;
 	ch->app_conn = 0;
 	spin_unlock_irqrestore(&qdss_lock, flags);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 47b2817..32aa45e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -385,10 +385,6 @@
 	int i;
 
 	ret = 0;
-	virt_dev = xhci->devs[slot_id];
-	if (!virt_dev)
-		return -ENODEV;
-
 	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
 	if (!cmd) {
 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
@@ -396,6 +392,13 @@
 	}
 
 	spin_lock_irqsave(&xhci->lock, flags);
+	virt_dev = xhci->devs[slot_id];
+	if (!virt_dev) {
+		spin_unlock_irqrestore(&xhci->lock, flags);
+		xhci_free_command(xhci, cmd);
+		return -ENODEV;
+	}
+
 	for (i = LAST_EP_INDEX; i > 0; i--) {
 		if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
 			struct xhci_command *command;
@@ -412,6 +415,7 @@
 					i, suspend);
 			if (ret) {
 				spin_unlock_irqrestore(&xhci->lock, flags);
+				xhci_free_command(xhci, command);
 				goto err_cmd_queue;
 			}
 		}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index a50e327..5643613 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -29,6 +29,15 @@
 #include <linux/usb/usbpd.h>
 #include "usbpd.h"
 
+/* To start USB stack for USB3.1 complaince testing */
+static bool usb_compliance_mode;
+module_param(usb_compliance_mode, bool, 0644);
+MODULE_PARM_DESC(usb_compliance_mode, "Start USB stack for USB3.1 compliance testing");
+
+static bool disable_usb_pd;
+module_param(disable_usb_pd, bool, 0644);
+MODULE_PARM_DESC(disable_usb_pd, "Disable USB PD for USB3.1 compliance testing");
+
 enum usbpd_state {
 	PE_UNKNOWN,
 	PE_ERROR_RECOVERY,
@@ -187,6 +196,8 @@
 
 #define PD_MAX_MSG_ID		7
 
+#define PD_MAX_DATA_OBJ		7
+
 #define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
 	(((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
 	 ((pr) << 8) | ((id) << 9) | ((cnt) << 12))
@@ -308,7 +319,7 @@
 	struct list_head	rx_q;
 	spinlock_t		rx_lock;
 
-	u32			received_pdos[7];
+	u32			received_pdos[PD_MAX_DATA_OBJ];
 	u16			src_cap_id;
 	u8			selected_pdo;
 	u8			requested_pdo;
@@ -490,13 +501,12 @@
 	ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), type, 15);
 	/* TODO figure out timeout. based on tReceive=1.1ms x nRetryCount? */
 
-	/* MessageID incremented regardless of Tx error */
-	pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
-
 	if (ret < 0)
 		return ret;
 	else if (ret != num_data * sizeof(u32))
 		return -EIO;
+
+	pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
 	return 0;
 }
 
@@ -556,6 +566,7 @@
 
 static int pd_eval_src_caps(struct usbpd *pd)
 {
+	int obj_cnt;
 	union power_supply_propval val;
 	u32 first_pdo = pd->received_pdos[0];
 
@@ -572,6 +583,13 @@
 	power_supply_set_property(pd->usb_psy,
 			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
 
+	for (obj_cnt = 1; obj_cnt < PD_MAX_DATA_OBJ; obj_cnt++) {
+		if ((PD_SRC_PDO_TYPE(pd->received_pdos[obj_cnt]) ==
+					PD_SRC_PDO_TYPE_AUGMENTED) &&
+				!PD_APDO_PPS(pd->received_pdos[obj_cnt]))
+			pd->spec_rev = USBPD_REV_30;
+	}
+
 	/* Select the first PDO (vSafe5V) immediately. */
 	pd_select_pdo(pd, 1, 0, 0);
 
@@ -580,6 +598,8 @@
 
 static void pd_send_hard_reset(struct usbpd *pd)
 {
+	union power_supply_propval val = {0};
+
 	usbpd_dbg(&pd->dev, "send hard reset");
 
 	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
@@ -587,6 +607,7 @@
 	pd->hard_reset_count++;
 	pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
 	pd->in_pr_swap = false;
+	power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
 }
 
 static void kick_sm(struct usbpd *pd, int ms)
@@ -602,6 +623,8 @@
 
 static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
 {
+	union power_supply_propval val = {1};
+
 	if (type != HARD_RESET_SIG) {
 		usbpd_err(&pd->dev, "invalid signal (%d) received\n", type);
 		return;
@@ -612,6 +635,9 @@
 	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
 	set_power_role(pd, pd->current_pr);
 	pd->hard_reset_recvd = true;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
 	kick_sm(pd, 0);
 }
 
@@ -662,12 +688,6 @@
 		return;
 	}
 
-	/* if spec rev differs (i.e. is older), update PHY */
-	if (PD_MSG_HDR_REV(header) < pd->spec_rev) {
-		pd->spec_rev = PD_MSG_HDR_REV(header);
-		pd_phy_update_spec_rev(pd->spec_rev);
-	}
-
 	rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
 	if (!rx_msg)
 		return;
@@ -710,7 +730,6 @@
 		.shutdown_cb		= phy_shutdown,
 		.frame_filter_val	= FRAME_FILTER_EN_SOP |
 					  FRAME_FILTER_EN_HARD_RESET,
-		.spec_rev		= USBPD_REV_20,
 	};
 	union power_supply_propval val = {0};
 	unsigned long flags;
@@ -732,6 +751,15 @@
 		break;
 
 	/* Source states */
+	case PE_SRC_DISABLED:
+		/* are we still connected? */
+		if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+			pd->current_pr = PR_NONE;
+			kick_sm(pd, 0);
+		}
+
+		break;
+
 	case PE_SRC_STARTUP:
 		if (pd->current_dr == DR_NONE) {
 			pd->current_dr = DR_DFP;
@@ -748,8 +776,6 @@
 		power_supply_set_property(pd->usb_psy,
 				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
 
-		/* support only PD 2.0 as a source */
-		pd->spec_rev = USBPD_REV_20;
 		pd_reset_protocol(pd);
 
 		if (!pd->in_pr_swap) {
@@ -760,7 +786,6 @@
 
 			phy_params.data_role = pd->current_dr;
 			phy_params.power_role = pd->current_pr;
-			phy_params.spec_rev = pd->spec_rev;
 
 			ret = pd_phy_open(&phy_params);
 			if (ret) {
@@ -772,14 +797,15 @@
 			}
 
 			pd->pd_phy_opened = true;
-		} else {
-			pd_phy_update_spec_rev(pd->spec_rev);
 		}
 
 		pd->current_state = PE_SRC_SEND_CAPABILITIES;
 		if (pd->in_pr_swap) {
 			kick_sm(pd, SWAP_SOURCE_START_TIME);
 			pd->in_pr_swap = false;
+			val.intval = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PR_SWAP, &val);
 			break;
 		}
 
@@ -862,6 +888,10 @@
 
 	case PE_SRC_HARD_RESET:
 	case PE_SNK_HARD_RESET:
+		/* are we still connected? */
+		if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE)
+			pd->current_pr = PR_NONE;
+
 		/* hard reset may sleep; handle it in the workqueue */
 		kick_sm(pd, 0);
 		break;
@@ -888,7 +918,8 @@
 			pd->current_dr = DR_UFP;
 
 			if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
-				pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP)
+				pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+				usb_compliance_mode)
 				start_usb_peripheral(pd);
 		}
 
@@ -902,14 +933,9 @@
 			break;
 		}
 
-		if (!val.intval)
+		if (!val.intval || disable_usb_pd)
 			break;
 
-		/*
-		 * support up to PD 3.0 as a sink; if source is 2.0,
-		 * phy_msg_received() will handle the downgrade.
-		 */
-		pd->spec_rev = USBPD_REV_30;
 		pd_reset_protocol(pd);
 
 		if (!pd->in_pr_swap) {
@@ -920,7 +946,6 @@
 
 			phy_params.data_role = pd->current_dr;
 			phy_params.power_role = pd->current_pr;
-			phy_params.spec_rev = pd->spec_rev;
 
 			ret = pd_phy_open(&phy_params);
 			if (ret) {
@@ -932,8 +957,6 @@
 			}
 
 			pd->pd_phy_opened = true;
-		} else {
-			pd_phy_update_spec_rev(pd->spec_rev);
 		}
 
 		pd->current_voltage = pd->requested_voltage = 5000000;
@@ -1548,6 +1571,11 @@
 		if (pd->current_state == PE_UNKNOWN)
 			goto sm_done;
 
+		if (pd->vconn_enabled) {
+			regulator_disable(pd->vconn);
+			pd->vconn_enabled = false;
+		}
+
 		usbpd_info(&pd->dev, "USB Type-C disconnect\n");
 
 		if (pd->pd_phy_opened) {
@@ -1567,7 +1595,6 @@
 		memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
 		rx_msg_cleanup(pd);
 
-		val.intval = 0;
 		power_supply_set_property(pd->usb_psy,
 				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
 
@@ -1583,11 +1610,6 @@
 			pd->vbus_enabled = false;
 		}
 
-		if (pd->vconn_enabled) {
-			regulator_disable(pd->vconn);
-			pd->vconn_enabled = false;
-		}
-
 		reset_vdm_state(pd);
 		if (pd->current_dr == DR_UFP)
 			stop_usb_peripheral(pd);
@@ -1602,6 +1624,10 @@
 			usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
 				(ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
 
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
+
 		/* set due to dual_role class "mode" change */
 		if (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
 			val.intval = pd->forced_pr;
@@ -1625,11 +1651,22 @@
 	if (pd->hard_reset_recvd) {
 		pd->hard_reset_recvd = false;
 
-		val.intval = 1;
+		if (pd->requested_current) {
+			val.intval = pd->requested_current = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+		}
+
+		pd->requested_voltage = 5000000;
+		val.intval = pd->requested_voltage;
 		power_supply_set_property(pd->usb_psy,
-				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+				POWER_SUPPLY_PROP_VOLTAGE_MIN, &val);
 
 		pd->in_pr_swap = false;
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
+
 		pd->in_explicit_contract = false;
 		pd->selected_pdo = pd->requested_pdo = 0;
 		pd->rdo = 0;
@@ -1733,14 +1770,8 @@
 
 	case PE_SRC_READY:
 		if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
-			ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
-					default_src_caps,
-					ARRAY_SIZE(default_src_caps), SOP_MSG);
-			if (ret) {
-				usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
-				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
-				break;
-			}
+			pd->current_state = PE_SRC_SEND_CAPABILITIES;
+			kick_sm(pd, 0);
 		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
 			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
 					pd->sink_caps, pd->num_sink_caps,
@@ -1896,6 +1927,9 @@
 
 	case PE_SNK_WAIT_FOR_CAPABILITIES:
 		pd->in_pr_swap = false;
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
 
 		if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
 			val.intval = 0;
@@ -1915,15 +1949,6 @@
 					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		} else if (pd->hard_reset_count < 3) {
 			usbpd_set_state(pd, PE_SNK_HARD_RESET);
-		} else if (pd->pd_connected) {
-			usbpd_info(&pd->dev, "Sink hard reset count exceeded, forcing reconnect\n");
-
-			val.intval = 0;
-			power_supply_set_property(pd->usb_psy,
-					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
-					&val);
-
-			usbpd_set_state(pd, PE_ERROR_RECOVERY);
 		} else {
 			usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
 
@@ -2071,6 +2096,9 @@
 			}
 
 			pd->in_pr_swap = true;
+			val.intval = 1;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PR_SWAP, &val);
 			usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
 			break;
 		} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
@@ -2214,6 +2242,9 @@
 
 	case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
 		pd->in_pr_swap = true;
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
 		pd->in_explicit_contract = false;
 
 		if (pd->vbus_enabled) {
@@ -2254,6 +2285,9 @@
 		}
 
 		pd->in_pr_swap = true;
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
 		usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
 		break;
 
@@ -2313,6 +2347,14 @@
 sm_done:
 	kfree(rx_msg);
 
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	ret = list_empty(&pd->rx_q);
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	/* requeue if there are any new/pending RX messages */
+	if (!ret)
+		kick_sm(pd, 0);
+
 	if (!pd->sm_queued)
 		pm_relax(&pd->dev);
 }
@@ -3187,7 +3229,7 @@
 	if (ret)
 		goto free_pd;
 
-	pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE);
+	pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
 	if (!pd->wq) {
 		ret = -ENOMEM;
 		goto del_pd;
@@ -3309,6 +3351,8 @@
 		pd->dual_role->drv_data = pd;
 	}
 
+	/* default support as PD 2.0 source or sink */
+	pd->spec_rev = USBPD_REV_20;
 	pd->current_pr = PR_NONE;
 	pd->current_dr = DR_NONE;
 	list_add_tail(&pd->instance, &_usbpd);
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 4caee72..1f5306f 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -108,6 +108,7 @@
 	int tx_status;
 	u8 frame_filter_val;
 	bool in_test_data_mode;
+	bool rx_busy;
 
 	enum data_role data_role;
 	enum power_role power_role;
@@ -334,15 +335,6 @@
 }
 EXPORT_SYMBOL(pd_phy_update_roles);
 
-int pd_phy_update_spec_rev(enum pd_spec_rev rev)
-{
-	struct usb_pdphy *pdphy = __pdphy;
-
-	return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
-			MSG_CONFIG_SPEC_REV_MASK, rev);
-}
-EXPORT_SYMBOL(pd_phy_update_spec_rev);
-
 int pd_phy_open(struct pd_phy_params *params)
 {
 	int ret;
@@ -377,7 +369,9 @@
 	if (ret)
 		return ret;
 
-	ret = pd_phy_update_spec_rev(params->spec_rev);
+	/* PD 2.0  phy */
+	ret = pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+			MSG_CONFIG_SPEC_REV_MASK, USBPD_REV_20);
 	if (ret)
 		return ret;
 
@@ -492,6 +486,12 @@
 		return -EINVAL;
 	}
 
+	ret = pdphy_reg_read(pdphy, &val, USB_PDPHY_RX_ACKNOWLEDGE, 1);
+	if (ret || val || pdphy->rx_busy) {
+		dev_err(pdphy->dev, "%s: RX message pending\n", __func__);
+		return -EBUSY;
+	}
+
 	pdphy->tx_status = -EINPROGRESS;
 
 	/* write 2 byte SOP message header */
@@ -664,6 +664,15 @@
 			BIST_MODE_MASK | BIST_ENABLE, bist_mode | BIST_ENABLE);
 }
 
+static irqreturn_t pdphy_msg_rx_irq(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->rx_busy = true;
+
+	return IRQ_WAKE_THREAD;
+}
+
 static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
 {
 	u8 size, rx_status, frame_type;
@@ -720,6 +729,7 @@
 		false);
 	pdphy->rx_bytes += size + 1;
 done:
+	pdphy->rx_busy = false;
 	return IRQ_HANDLED;
 }
 
@@ -805,7 +815,7 @@
 		return ret;
 
 	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
-		&pdphy->msg_rx_irq, "msg-rx", NULL,
+		&pdphy->msg_rx_irq, "msg-rx", pdphy_msg_rx_irq,
 		pdphy_msg_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
 	if (ret < 0)
 		return ret;
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
index b2663ad..1087017 100644
--- a/drivers/usb/pd/usbpd.h
+++ b/drivers/usb/pd/usbpd.h
@@ -68,7 +68,6 @@
 	enum data_role	data_role;
 	enum power_role power_role;
 	u8		frame_filter_val;
-	u8		spec_rev;
 };
 
 #if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
@@ -77,7 +76,6 @@
 int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
 	enum pd_msg_type type, unsigned int timeout_ms);
 int pd_phy_update_roles(enum data_role dr, enum power_role pr);
-int pd_phy_update_spec_rev(enum pd_spec_rev rev);
 void pd_phy_close(void);
 #else
 static inline int pd_phy_open(struct pd_phy_params *params)
@@ -101,11 +99,6 @@
 	return -ENODEV;
 }
 
-static inline int pd_phy_update_spec_rev(enum pd_spec_rev rev)
-{
-	return -ENODEV;
-}
-
 static inline void pd_phy_close(void)
 {
 }
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 8bdd9fd..59f5379 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -201,14 +201,16 @@
 
 	if (enable) {
 		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+		val = readb_relaxed(phy->base + autonomous_mode_offset);
+		val |= ARCVR_DTCT_EN;
 		if (phy->phy.flags & DEVICE_IN_SS_MODE) {
-			val =
-			readb_relaxed(phy->base + autonomous_mode_offset);
-			val |= ARCVR_DTCT_EN;
 			val |= ALFPS_DTCT_EN;
 			val &= ~ARCVR_DTCT_EVENT_SEL;
-			writeb_relaxed(val, phy->base + autonomous_mode_offset);
+		} else {
+			val &= ~ALFPS_DTCT_EN;
+			val |= ARCVR_DTCT_EVENT_SEL;
 		}
+		writeb_relaxed(val, phy->base + autonomous_mode_offset);
 		msm_ssusb_qmp_clamp_enable(phy, true);
 	} else {
 		msm_ssusb_qmp_clamp_enable(phy, false);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 702040f..0e60614 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -335,8 +335,8 @@
 				st->global_error = 1;
 		}
 	}
-	st->va += PAGE_SIZE * nr;
-	st->index += nr;
+	st->va += XEN_PAGE_SIZE * nr;
+	st->index += nr / XEN_PFN_PER_PAGE;
 
 	return 0;
 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 2924bddb..07e46b7 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -713,7 +713,7 @@
 		bdev->bd_contains = NULL;
 		bdev->bd_super = NULL;
 		bdev->bd_inode = inode;
-		bdev->bd_block_size = (1 << inode->i_blkbits);
+		bdev->bd_block_size = i_blocksize(inode);
 		bdev->bd_part_count = 0;
 		bdev->bd_invalidated = 0;
 		inode->i_mode = S_IFBLK;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5909ae8..e46e7fb 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3984,6 +3984,7 @@
 				    info->space_info_kobj, "%s",
 				    alloc_name(found->flags));
 	if (ret) {
+		percpu_counter_destroy(&found->total_bytes_pinned);
 		kfree(found);
 		return ret;
 	}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3a14c87..3286a6e 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2842,7 +2842,7 @@
 		if (!ret)
 			ret = btrfs_prealloc_file_range(inode, mode,
 					range->start,
-					range->len, 1 << inode->i_blkbits,
+					range->len, i_blocksize(inode),
 					offset + len, &alloc_hint);
 		else
 			btrfs_free_reserved_data_space(inode, range->start,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index be4da91..bddbae7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7435,8 +7435,8 @@
 	int found = false;
 	void **pagep = NULL;
 	struct page *page = NULL;
-	int start_idx;
-	int end_idx;
+	unsigned long start_idx;
+	unsigned long end_idx;
 
 	start_idx = start >> PAGE_SHIFT;
 
diff --git a/fs/buffer.c b/fs/buffer.c
index b205a62..5d8f496 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2353,7 +2353,7 @@
 			    loff_t pos, loff_t *bytes)
 {
 	struct inode *inode = mapping->host;
-	unsigned blocksize = 1 << inode->i_blkbits;
+	unsigned int blocksize = i_blocksize(inode);
 	struct page *page;
 	void *fsdata;
 	pgoff_t index, curidx;
@@ -2433,8 +2433,8 @@
 			get_block_t *get_block, loff_t *bytes)
 {
 	struct inode *inode = mapping->host;
-	unsigned blocksize = 1 << inode->i_blkbits;
-	unsigned zerofrom;
+	unsigned int blocksize = i_blocksize(inode);
+	unsigned int zerofrom;
 	int err;
 
 	err = cont_expand_zero(file, mapping, pos, bytes);
@@ -2796,7 +2796,7 @@
 	struct buffer_head map_bh;
 	int err;
 
-	blocksize = 1 << inode->i_blkbits;
+	blocksize = i_blocksize(inode);
 	length = offset & (blocksize - 1);
 
 	/* Block boundary? Nothing to do */
@@ -2874,7 +2874,7 @@
 	struct buffer_head *bh;
 	int err;
 
-	blocksize = 1 << inode->i_blkbits;
+	blocksize = i_blocksize(inode);
 	length = offset & (blocksize - 1);
 
 	/* Block boundary? Nothing to do */
@@ -2986,7 +2986,7 @@
 	struct inode *inode = mapping->host;
 	tmp.b_state = 0;
 	tmp.b_blocknr = 0;
-	tmp.b_size = 1 << inode->i_blkbits;
+	tmp.b_size = i_blocksize(inode);
 	get_block(inode, block, &tmp, 0);
 	return tmp.b_blocknr;
 }
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 18dc18f..900ffaf 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -745,7 +745,7 @@
 	struct pagevec pvec;
 	int done = 0;
 	int rc = 0;
-	unsigned wsize = 1 << inode->i_blkbits;
+	unsigned int wsize = i_blocksize(inode);
 	struct ceph_osd_request *req = NULL;
 	int do_sync = 0;
 	loff_t snap_size, i_size;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index fb9aa16..c60756e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -587,7 +587,7 @@
 /*
  * Call into the fs to map some more disk blocks.  We record the current number
  * of available blocks at sdio->blocks_available.  These are in units of the
- * fs blocksize, (1 << inode->i_blkbits).
+ * fs blocksize, i_blocksize(inode).
  *
  * The fs is allowed to map lots of blocks at once.  If it wants to do that,
  * it uses the passed inode-relative block number as the file offset, as usual.
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 9fbf92c..a3e0b3b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3413,13 +3413,13 @@
 	struct ext4_sb_info *sbi;
 	struct ext4_extent_header *eh;
 	struct ext4_map_blocks split_map;
-	struct ext4_extent zero_ex;
+	struct ext4_extent zero_ex1, zero_ex2;
 	struct ext4_extent *ex, *abut_ex;
 	ext4_lblk_t ee_block, eof_block;
 	unsigned int ee_len, depth, map_len = map->m_len;
 	int allocated = 0, max_zeroout = 0;
 	int err = 0;
-	int split_flag = 0;
+	int split_flag = EXT4_EXT_DATA_VALID2;
 
 	ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
 		"block %llu, max_blocks %u\n", inode->i_ino,
@@ -3436,7 +3436,8 @@
 	ex = path[depth].p_ext;
 	ee_block = le32_to_cpu(ex->ee_block);
 	ee_len = ext4_ext_get_actual_len(ex);
-	zero_ex.ee_len = 0;
+	zero_ex1.ee_len = 0;
+	zero_ex2.ee_len = 0;
 
 	trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
 
@@ -3576,62 +3577,52 @@
 	if (ext4_encrypted_inode(inode))
 		max_zeroout = 0;
 
-	/* If extent is less than s_max_zeroout_kb, zeroout directly */
-	if (max_zeroout && (ee_len <= max_zeroout)) {
-		err = ext4_ext_zeroout(inode, ex);
-		if (err)
-			goto out;
-		zero_ex.ee_block = ex->ee_block;
-		zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
-		ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
-
-		err = ext4_ext_get_access(handle, inode, path + depth);
-		if (err)
-			goto out;
-		ext4_ext_mark_initialized(ex);
-		ext4_ext_try_to_merge(handle, inode, path, ex);
-		err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-		goto out;
-	}
-
 	/*
-	 * four cases:
+	 * five cases:
 	 * 1. split the extent into three extents.
-	 * 2. split the extent into two extents, zeroout the first half.
-	 * 3. split the extent into two extents, zeroout the second half.
+	 * 2. split the extent into two extents, zeroout the head of the first
+	 *    extent.
+	 * 3. split the extent into two extents, zeroout the tail of the second
+	 *    extent.
 	 * 4. split the extent into two extents with out zeroout.
+	 * 5. no splitting needed, just possibly zeroout the head and / or the
+	 *    tail of the extent.
 	 */
 	split_map.m_lblk = map->m_lblk;
 	split_map.m_len = map->m_len;
 
-	if (max_zeroout && (allocated > map->m_len)) {
+	if (max_zeroout && (allocated > split_map.m_len)) {
 		if (allocated <= max_zeroout) {
-			/* case 3 */
-			zero_ex.ee_block =
-					 cpu_to_le32(map->m_lblk);
-			zero_ex.ee_len = cpu_to_le16(allocated);
-			ext4_ext_store_pblock(&zero_ex,
-				ext4_ext_pblock(ex) + map->m_lblk - ee_block);
-			err = ext4_ext_zeroout(inode, &zero_ex);
+			/* case 3 or 5 */
+			zero_ex1.ee_block =
+				 cpu_to_le32(split_map.m_lblk +
+					     split_map.m_len);
+			zero_ex1.ee_len =
+				cpu_to_le16(allocated - split_map.m_len);
+			ext4_ext_store_pblock(&zero_ex1,
+				ext4_ext_pblock(ex) + split_map.m_lblk +
+				split_map.m_len - ee_block);
+			err = ext4_ext_zeroout(inode, &zero_ex1);
 			if (err)
 				goto out;
-			split_map.m_lblk = map->m_lblk;
 			split_map.m_len = allocated;
-		} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
-			/* case 2 */
-			if (map->m_lblk != ee_block) {
-				zero_ex.ee_block = ex->ee_block;
-				zero_ex.ee_len = cpu_to_le16(map->m_lblk -
+		}
+		if (split_map.m_lblk - ee_block + split_map.m_len <
+								max_zeroout) {
+			/* case 2 or 5 */
+			if (split_map.m_lblk != ee_block) {
+				zero_ex2.ee_block = ex->ee_block;
+				zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
 							ee_block);
-				ext4_ext_store_pblock(&zero_ex,
+				ext4_ext_store_pblock(&zero_ex2,
 						      ext4_ext_pblock(ex));
-				err = ext4_ext_zeroout(inode, &zero_ex);
+				err = ext4_ext_zeroout(inode, &zero_ex2);
 				if (err)
 					goto out;
 			}
 
+			split_map.m_len += split_map.m_lblk - ee_block;
 			split_map.m_lblk = ee_block;
-			split_map.m_len = map->m_lblk - ee_block + map->m_len;
 			allocated = map->m_len;
 		}
 	}
@@ -3642,8 +3633,11 @@
 		err = 0;
 out:
 	/* If we have gotten a failure, don't zero out status tree */
-	if (!err)
-		err = ext4_zeroout_es(inode, &zero_ex);
+	if (!err) {
+		err = ext4_zeroout_es(inode, &zero_ex1);
+		if (!err)
+			err = ext4_zeroout_es(inode, &zero_ex2);
+	}
 	return err ? err : allocated;
 }
 
@@ -4893,6 +4887,8 @@
 
 	/* Zero out partial block at the edges of the range */
 	ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+	if (ret >= 0)
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 
 	if (file->f_flags & O_SYNC)
 		ext4_handle_sync(handle);
@@ -5579,6 +5575,7 @@
 		ext4_handle_sync(handle);
 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
+	ext4_update_inode_fsync_trans(handle, inode, 1);
 
 out_stop:
 	ext4_journal_stop(handle);
@@ -5752,6 +5749,8 @@
 	up_write(&EXT4_I(inode)->i_data_sem);
 	if (IS_SYNC(inode))
 		ext4_handle_sync(handle);
+	if (ret >= 0)
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 
 out_stop:
 	ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 2a822d3..9e77c08 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -432,47 +432,27 @@
 		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 					  (pgoff_t)num);
-		if (nr_pages == 0) {
-			if (whence == SEEK_DATA)
-				break;
-
-			BUG_ON(whence != SEEK_HOLE);
-			/*
-			 * If this is the first time to go into the loop and
-			 * offset is not beyond the end offset, it will be a
-			 * hole at this offset
-			 */
-			if (lastoff == startoff || lastoff < endoff)
-				found = 1;
+		if (nr_pages == 0)
 			break;
-		}
-
-		/*
-		 * If this is the first time to go into the loop and
-		 * offset is smaller than the first page offset, it will be a
-		 * hole at this offset.
-		 */
-		if (lastoff == startoff && whence == SEEK_HOLE &&
-		    lastoff < page_offset(pvec.pages[0])) {
-			found = 1;
-			break;
-		}
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page *page = pvec.pages[i];
 			struct buffer_head *bh, *head;
 
 			/*
-			 * If the current offset is not beyond the end of given
-			 * range, it will be a hole.
+			 * If current offset is smaller than the page offset,
+			 * there is a hole at this offset.
 			 */
-			if (lastoff < endoff && whence == SEEK_HOLE &&
-			    page->index > end) {
+			if (whence == SEEK_HOLE && lastoff < endoff &&
+			    lastoff < page_offset(pvec.pages[i])) {
 				found = 1;
 				*offset = lastoff;
 				goto out;
 			}
 
+			if (page->index > end)
+				goto out;
+
 			lock_page(page);
 
 			if (unlikely(page->mapping != inode->i_mapping)) {
@@ -512,20 +492,18 @@
 			unlock_page(page);
 		}
 
-		/*
-		 * The no. of pages is less than our desired, that would be a
-		 * hole in there.
-		 */
-		if (nr_pages < num && whence == SEEK_HOLE) {
-			found = 1;
-			*offset = lastoff;
+		/* The no. of pages is less than our desired, we are done. */
+		if (nr_pages < num)
 			break;
-		}
 
 		index = pvec.pages[i - 1]->index + 1;
 		pagevec_release(&pvec);
 	} while (index <= end);
 
+	if (whence == SEEK_HOLE && lastoff < endoff) {
+		found = 1;
+		*offset = lastoff;
+	}
 out:
 	pagevec_release(&pvec);
 	return found;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 42723b2..c08d499 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2218,7 +2218,7 @@
 {
 	struct inode *inode = mpd->inode;
 	int err;
-	ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
+	ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
 							>> inode->i_blkbits;
 
 	do {
@@ -3478,14 +3478,14 @@
 		 * writes need zeroing either because they can race with page
 		 * faults or because they use partial blocks.
 		 */
-		if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size &&
+		if (round_down(offset, i_blocksize(inode)) >= inode->i_size &&
 		    ext4_aligned_io(inode, offset, count))
 			get_block_func = ext4_dio_get_block;
 		else
 			get_block_func = ext4_dax_get_block;
 		dio_flags = DIO_LOCKING;
 	} else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
-		   round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
+		   round_down(offset, i_blocksize(inode)) >= inode->i_size) {
 		get_block_func = ext4_dio_get_block;
 		dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
 	} else if (is_sync_kiocb(iocb)) {
@@ -4099,6 +4099,8 @@
 
 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
 	ext4_mark_inode_dirty(handle, inode);
+	if (ret >= 0)
+		ext4_update_inode_fsync_trans(handle, inode, 1);
 out_stop:
 	ext4_journal_stop(handle);
 out_dio:
@@ -5101,7 +5103,7 @@
 	 * do. We do the check mainly to optimize the common PAGE_SIZE ==
 	 * blocksize case
 	 */
-	if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
+	if (offset > PAGE_SIZE - i_blocksize(inode))
 		return;
 	while (1) {
 		page = find_lock_page(inode->i_mapping,
@@ -5496,8 +5498,9 @@
 	/* No extended attributes present */
 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
-		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
-			new_extra_isize);
+		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+		       EXT4_I(inode)->i_extra_isize, 0,
+		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
 		return 0;
 	}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f9eee77..df8168f 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -838,7 +838,7 @@
 	inode = page->mapping->host;
 	sb = inode->i_sb;
 	ngroups = ext4_get_groups_count(sb);
-	blocksize = 1 << inode->i_blkbits;
+	blocksize = i_blocksize(inode);
 	blocks_per_page = PAGE_SIZE / blocksize;
 
 	groups_per_page = blocks_per_page >> 1;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 6fc14de..578f8c3 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -187,7 +187,7 @@
 	if (PageUptodate(page))
 		return 0;
 
-	blocksize = 1 << inode->i_blkbits;
+	blocksize = i_blocksize(inode);
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, blocksize, 0);
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a7943f86..74a2b44 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1805,6 +1805,8 @@
 
 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
 {
+	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
+	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
 	int type = CURSEG_HOT_DATA;
 	int err;
 
@@ -1831,6 +1833,11 @@
 			return err;
 	}
 
+	/* sanity check for summary blocks */
+	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
+			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7e0c002..b81998e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1424,6 +1424,8 @@
 	unsigned int total, fsmeta;
 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+	unsigned int main_segs, blocks_per_seg;
+	int i;
 
 	total = le32_to_cpu(raw_super->segment_count);
 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1435,6 +1437,22 @@
 	if (unlikely(fsmeta >= total))
 		return 1;
 
+	main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+	blocks_per_seg = sbi->blocks_per_seg;
+
+	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+		    le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
+			return 1;
+		}
+	}
+	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
+		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
+		    le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
+			return 1;
+		}
+	}
+
 	if (unlikely(f2fs_cp_error(sbi))) {
 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
 		return 1;
diff --git a/fs/iomap.c b/fs/iomap.c
index 814ae8f..798c291 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -419,8 +419,8 @@
 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
 		struct iomap_ops *ops)
 {
-	unsigned blocksize = (1 << inode->i_blkbits);
-	unsigned off = pos & (blocksize - 1);
+	unsigned int blocksize = i_blocksize(inode);
+	unsigned int off = pos & (blocksize - 1);
 
 	/* Block boundary? Nothing to do */
 	if (!off)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 85671f7..14be95b 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -758,7 +758,7 @@
 				sb->s_blocksize - offset : toread;
 
 		tmp_bh.b_state = 0;
-		tmp_bh.b_size = 1 << inode->i_blkbits;
+		tmp_bh.b_size = i_blocksize(inode);
 		err = jfs_get_block(inode, blk, &tmp_bh, 0);
 		if (err)
 			return err;
@@ -798,7 +798,7 @@
 				sb->s_blocksize - offset : towrite;
 
 		tmp_bh.b_state = 0;
-		tmp_bh.b_size = 1 << inode->i_blkbits;
+		tmp_bh.b_size = i_blocksize(inode);
 		err = jfs_get_block(inode, blk, &tmp_bh, 1);
 		if (err)
 			goto out;
diff --git a/fs/mpage.c b/fs/mpage.c
index 802b481..1193d43 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -151,7 +151,7 @@
 			SetPageUptodate(page);    
 			return;
 		}
-		create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+		create_empty_buffers(page, i_blocksize(inode), 0);
 	}
 	head = page_buffers(page);
 	page_bh = head;
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 0780ff8..3e396db 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -23,7 +23,7 @@
 {
 	struct nfsd4_layout_seg *seg = &args->lg_seg;
 	struct super_block *sb = inode->i_sb;
-	u32 block_size = (1 << inode->i_blkbits);
+	u32 block_size = i_blocksize(inode);
 	struct pnfs_block_extent *bex;
 	struct iomap iomap;
 	u32 device_generation = 0;
@@ -180,7 +180,7 @@
 	int nr_iomaps;
 
 	nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
-			lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
+			lcp->lc_up_len, &iomaps, i_blocksize(inode));
 	if (nr_iomaps < 0)
 		return nfserrno(nr_iomaps);
 
@@ -372,7 +372,7 @@
 	int nr_iomaps;
 
 	nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
-			lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
+			lcp->lc_up_len, &iomaps, i_blocksize(inode));
 	if (nr_iomaps < 0)
 		return nfserrno(nr_iomaps);
 
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 650226f..022d958 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1783,6 +1783,12 @@
 			opdesc->op_get_currentstateid(cstate, &op->u);
 		op->status = opdesc->op_func(rqstp, cstate, &op->u);
 
+		/* Only from SEQUENCE */
+		if (cstate->status == nfserr_replay_cache) {
+			dprintk("%s NFS4.1 replay from cache\n", __func__);
+			status = op->status;
+			goto out;
+		}
 		if (!op->status) {
 			if (opdesc->op_set_currentstateid)
 				opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1793,14 +1799,7 @@
 			if (need_wrongsec_check(rqstp))
 				op->status = check_nfsd_access(current_fh->fh_export, rqstp);
 		}
-
 encode_op:
-		/* Only from SEQUENCE */
-		if (cstate->status == nfserr_replay_cache) {
-			dprintk("%s NFS4.1 replay from cache\n", __func__);
-			status = op->status;
-			goto out;
-		}
 		if (op->status == nfserr_replay_me) {
 			op->replay = &cstate->replay_owner->so_replay;
 			nfsd4_encode_replay(&resp->xdr, op);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2ee80e1..4e7a56a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2793,9 +2793,16 @@
 	}
 #endif /* CONFIG_NFSD_PNFS */
 	if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
-		status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
-						  NFSD_SUPPATTR_EXCLCREAT_WORD1,
-						  NFSD_SUPPATTR_EXCLCREAT_WORD2);
+		u32 supp[3];
+
+		supp[0] = nfsd_suppattrs0(minorversion);
+		supp[1] = nfsd_suppattrs1(minorversion);
+		supp[2] = nfsd_suppattrs2(minorversion);
+		supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
+		supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
+		supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
+
+		status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
 		if (status)
 			goto out;
 	}
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index d5c23da..c21e0b4 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -50,7 +50,7 @@
 		brelse(bh);
 		BUG();
 	}
-	memset(bh->b_data, 0, 1 << inode->i_blkbits);
+	memset(bh->b_data, 0, i_blocksize(inode));
 	bh->b_bdev = inode->i_sb->s_bdev;
 	bh->b_blocknr = blocknr;
 	set_buffer_mapped(bh);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index c7f4fef..7ffe71a 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -51,7 +51,7 @@
 {
 	struct nilfs_root *root = NILFS_I(inode)->i_root;
 
-	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
+	inode_add_bytes(inode, i_blocksize(inode) * n);
 	if (root)
 		atomic64_add(n, &root->blocks_count);
 }
@@ -60,7 +60,7 @@
 {
 	struct nilfs_root *root = NILFS_I(inode)->i_root;
 
-	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
+	inode_sub_bytes(inode, i_blocksize(inode) * n);
 	if (root)
 		atomic64_sub(n, &root->blocks_count);
 }
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index d56d3a5..98835ed 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -57,7 +57,7 @@
 	set_buffer_mapped(bh);
 
 	kaddr = kmap_atomic(bh->b_page);
-	memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
+	memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
 	if (init_block)
 		init_block(inode, bh, kaddr);
 	flush_dcache_page(bh->b_page);
@@ -501,7 +501,7 @@
 	struct nilfs_mdt_info *mi = NILFS_MDT(inode);
 
 	mi->mi_entry_size = entry_size;
-	mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
+	mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
 	mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
 }
 
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index bedcae2..7d18d62 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -723,7 +723,7 @@
 
 		lock_page(page);
 		if (!page_has_buffers(page))
-			create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+			create_empty_buffers(page, i_blocksize(inode), 0);
 		unlock_page(page);
 
 		bh = head = page_buffers(page);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c5c5b97..f2961b1 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@
 	int ret = 0;
 	struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
 	unsigned int block_end, block_start;
-	unsigned int bsize = 1 << inode->i_blkbits;
+	unsigned int bsize = i_blocksize(inode);
 
 	if (!page_has_buffers(page))
 		create_empty_buffers(page, bsize, 0);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 000c234..0db6f83 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -808,7 +808,7 @@
 	/* We know that zero_from is block aligned */
 	for (block_start = zero_from; block_start < zero_to;
 	     block_start = block_end) {
-		block_end = block_start + (1 << inode->i_blkbits);
+		block_end = block_start + i_blocksize(inode);
 
 		/*
 		 * block_start is block-aligned.  Bump it by one to force
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 06af81f..9b96b99 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -306,7 +306,7 @@
 		break;
 	case S_IFDIR:
 		inode->i_size = PAGE_SIZE;
-		orangefs_inode->blksize = (1 << inode->i_blkbits);
+		orangefs_inode->blksize = i_blocksize(inode);
 		spin_lock(&inode->i_lock);
 		inode_set_bytes(inode, inode->i_size);
 		spin_unlock(&inode->i_lock);
@@ -316,7 +316,7 @@
 		if (new) {
 			inode->i_size = (loff_t)strlen(new_op->
 			    downcall.resp.getattr.link_target);
-			orangefs_inode->blksize = (1 << inode->i_blkbits);
+			orangefs_inode->blksize = i_blocksize(inode);
 			ret = strscpy(orangefs_inode->link_target,
 			    new_op->downcall.resp.getattr.link_target,
 			    ORANGEFS_NAME_MAX);
diff --git a/fs/pnode.c b/fs/pnode.c
index b5f97c6..e4e428d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -504,9 +504,14 @@
 	if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
 		return first_slave(cur);
 	do {
-		if (cur->mnt_slave.next != &cur->mnt_master->mnt_slave_list)
-			return next_slave(cur);
-		cur = cur->mnt_master;
+		struct mount *master = cur->mnt_master;
+
+		if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+			struct mount *next = next_slave(cur);
+
+			return (next == root) ? NULL : next;
+		}
+		cur = master;
 	} while (cur != root);
 	return NULL;
 }
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 2f8c5c9..b396eb0 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -189,7 +189,7 @@
 	int ret = 0;
 
 	th.t_trans_id = 0;
-	blocksize = 1 << inode->i_blkbits;
+	blocksize = i_blocksize(inode);
 
 	if (logit) {
 		reiserfs_write_lock(s);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 58b2ded..bd4c727 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -524,7 +524,7 @@
 	 * referenced in convert_tail_for_hole() that may be called from
 	 * reiserfs_get_block()
 	 */
-	bh_result->b_size = (1 << inode->i_blkbits);
+	bh_result->b_size = i_blocksize(inode);
 
 	ret = reiserfs_get_block(inode, iblock, bh_result,
 				 create | GET_BLOCK_NO_DANGLE);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 17761c5..843fcd2 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -199,7 +199,8 @@
 
 	ret_dentry = d_splice_alias(inode, dentry);
 	dentry = ret_dentry ?: dentry;
-	update_derived_permission_lock(dentry);
+	if (!IS_ERR(dentry))
+		update_derived_permission_lock(dentry);
 out:
 	return ret_dentry;
 }
diff --git a/fs/stat.c b/fs/stat.c
index bc045c7..068fdbc 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -31,7 +31,7 @@
 	stat->atime = inode->i_atime;
 	stat->mtime = inode->i_mtime;
 	stat->ctime = inode->i_ctime;
-	stat->blksize = (1 << inode->i_blkbits);
+	stat->blksize = i_blocksize(inode);
 	stat->blocks = inode->i_blocks;
 }
 
@@ -454,6 +454,7 @@
 		inode->i_bytes -= 512;
 	}
 }
+EXPORT_SYMBOL(__inode_add_bytes);
 
 void inode_add_bytes(struct inode *inode, loff_t bytes)
 {
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index aad4640..129b18a 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1214,7 +1214,7 @@
 {
 	int err;
 	struct udf_inode_info *iinfo;
-	int bsize = 1 << inode->i_blkbits;
+	int bsize = i_blocksize(inode);
 
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 	      S_ISLNK(inode->i_mode)))
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 67e085d..a81b970 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -81,7 +81,8 @@
 			ufs_error (sb, "ufs_free_fragments",
 				   "bit already cleared for fragment %u", i);
 	}
-	
+
+	inode_sub_bytes(inode, count << uspi->s_fshift);
 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
 	uspi->cs_total.cs_nffree += count;
 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -183,6 +184,7 @@
 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
 		}
 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
 			ufs_clusteracct (sb, ucpi, blkno, 1);
 
@@ -494,6 +496,20 @@
 	return 0;
 }		
 
+static bool try_add_frags(struct inode *inode, unsigned frags)
+{
+	unsigned size = frags * i_blocksize(inode);
+	spin_lock(&inode->i_lock);
+	__inode_add_bytes(inode, size);
+	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
+		__inode_sub_bytes(inode, size);
+		spin_unlock(&inode->i_lock);
+		return false;
+	}
+	spin_unlock(&inode->i_lock);
+	return true;
+}
+
 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
 			     unsigned oldcount, unsigned newcount)
 {
@@ -530,6 +546,9 @@
 	for (i = oldcount; i < newcount; i++)
 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
 			return 0;
+
+	if (!try_add_frags(inode, count))
+		return 0;
 	/*
 	 * Block can be extended
 	 */
@@ -647,6 +666,7 @@
 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
 		i = uspi->s_fpb - count;
 
+		inode_sub_bytes(inode, i << uspi->s_fshift);
 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
 		uspi->cs_total.cs_nffree += i;
 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +677,8 @@
 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
 	if (result == INVBLOCK)
 		return 0;
+	if (!try_add_frags(inode, count))
+		return 0;
 	for (i = 0; i < count; i++)
 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
 	
@@ -716,6 +738,8 @@
 		return INVBLOCK;
 	ucpi->c_rotor = result;
 gotit:
+	if (!try_add_frags(inode, uspi->s_fpb))
+		return 0;
 	blkno = ufs_fragstoblks(result);
 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 190d64b..a2760a2 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@
 
 	p = ufs_get_direct_data_ptr(uspi, ufsi, block);
 	tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
-				new_size, err, locked_page);
+				new_size - (lastfrag & uspi->s_fpbmask), err,
+				locked_page);
 	return tmp != 0;
 }
 
@@ -284,7 +285,7 @@
 			goal += uspi->s_fpb;
 	}
 	tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
-				goal, uspi->s_fpb, err, locked_page);
+				goal, nfrags, err, locked_page);
 
 	if (!tmp) {
 		*err = -ENOSPC;
@@ -402,7 +403,9 @@
 
 	if (!create) {
 		phys64 = ufs_frag_map(inode, offsets, depth);
-		goto out;
+		if (phys64)
+			map_bh(bh_result, sb, phys64 + frag);
+		return 0;
 	}
 
         /* This code entered only while writing ....? */
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f04ab23..351162f 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -746,6 +746,23 @@
 	return;
 }
 
+static u64 ufs_max_bytes(struct super_block *sb)
+{
+	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+	int bits = uspi->s_apbshift;
+	u64 res;
+
+	if (bits > 21)
+		res = ~0ULL;
+	else
+		res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
+			(1LL << (3*bits));
+
+	if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
+		return MAX_LFS_FILESIZE;
+	return res << uspi->s_bshift;
+}
+
 static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 {
 	struct ufs_sb_info * sbi;
@@ -812,9 +829,8 @@
 	uspi->s_dirblksize = UFS_SECTOR_SIZE;
 	super_block_offset=UFS_SBLOCK;
 
-	/* Keep 2Gig file limit. Some UFS variants need to override 
-	   this but as I don't know which I'll let those in the know loosen
-	   the rules */
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+
 	switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
 	case UFS_MOUNT_UFSTYPE_44BSD:
 		UFSD("ufstype=44bsd\n");
@@ -1212,6 +1228,7 @@
 			    "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
 		uspi->s_maxsymlinklen = maxsymlen;
 	}
+	sb->s_maxbytes = ufs_max_bytes(sb);
 	sb->s_max_links = UFS_LINK_MAX;
 
 	inode = ufs_iget(sb, UFS_ROOTINO);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index b7fbf53..398019f 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -473,15 +473,19 @@
 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
 {
+	u8 mask;
 	switch (uspi->s_fpb) {
 	case 8:
 	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
 	case 4:
-		return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+		mask = 0x0f << ((block & 0x01) << 2);
+		return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
 	case 2:
-		return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+		mask = 0x03 << ((block & 0x03) << 1);
+		return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
 	case 1:
-		return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+		mask = 0x01 << (block & 0x07);
+		return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
 	}
 	return 0;	
 }
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 5a508b0..2a8cbd1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2208,8 +2208,10 @@
 		}
 		temp = xfs_bmap_worst_indlen(bma->ip, temp);
 		temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
-		diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
-			(bma->cur ? bma->cur->bc_private.b.allocated : 0));
+		diff = (int)(temp + temp2 -
+			     (startblockval(PREV.br_startblock) -
+			      (bma->cur ?
+			       bma->cur->bc_private.b.allocated : 0)));
 		if (diff > 0) {
 			error = xfs_mod_fdblocks(bma->ip->i_mount,
 						 -((int64_t)diff), false);
@@ -2266,7 +2268,6 @@
 		temp = da_new;
 		if (bma->cur)
 			temp += bma->cur->bc_private.b.allocated;
-		ASSERT(temp <= da_old);
 		if (temp < da_old)
 			xfs_mod_fdblocks(bma->ip->i_mount,
 					(int64_t)(da_old - temp), false);
@@ -3964,7 +3965,7 @@
 {
 	struct xfs_trans	*tp = ap->tp;
 	struct xfs_mount	*mp = tp->t_mountp;
-	xfs_agblock_t		bno;
+	xfs_fsblock_t		bno;
 	struct xfs_alloc_arg	args;
 	int			error;
 
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2849d3f..91c6891 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4376,7 +4376,7 @@
 			xfs_btree_readahead_ptr(cur, ptr, 1);
 
 			/* save for the next iteration of the loop */
-			lptr = *ptr;
+			xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
 		}
 
 		/* for each buffer in the level */
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index ef9f6ea..699a51b 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -126,6 +126,7 @@
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
 
 /* xfs_dir2_readdir.c */
 extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff..e84af09 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,112 @@
 }
 #endif	/* DEBUG */
 
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+	struct xfs_inode		*ip)
+{
+	struct xfs_mount		*mp = ip->i_mount;
+	struct xfs_dir2_sf_hdr		*sfp;
+	struct xfs_dir2_sf_entry	*sfep;
+	struct xfs_dir2_sf_entry	*next_sfep;
+	char				*endp;
+	const struct xfs_dir_ops	*dops;
+	struct xfs_ifork		*ifp;
+	xfs_ino_t			ino;
+	int				i;
+	int				i8count;
+	int				offset;
+	int				size;
+	int				error;
+	__uint8_t			filetype;
+
+	ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+	/*
+	 * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
+	 * so we can only trust the mountpoint to have the right pointer.
+	 */
+	dops = xfs_dir_get_ops(mp, NULL);
+
+	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
+	size = ifp->if_bytes;
+
+	/*
+	 * Give up if the directory is way too short.
+	 */
+	if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
+	    size < xfs_dir2_sf_hdr_size(sfp->i8count))
+		return -EFSCORRUPTED;
+
+	endp = (char *)sfp + size;
+
+	/* Check .. entry */
+	ino = dops->sf_get_parent_ino(sfp);
+	i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+	error = xfs_dir_ino_validate(mp, ino);
+	if (error)
+		return error;
+	offset = dops->data_first_offset;
+
+	/* Check all reported entries */
+	sfep = xfs_dir2_sf_firstentry(sfp);
+	for (i = 0; i < sfp->count; i++) {
+		/*
+		 * struct xfs_dir2_sf_entry has a variable length.
+		 * Check the fixed-offset parts of the structure are
+		 * within the data buffer.
+		 */
+		if (((char *)sfep + sizeof(*sfep)) >= endp)
+			return -EFSCORRUPTED;
+
+		/* Don't allow names with known bad length. */
+		if (sfep->namelen == 0)
+			return -EFSCORRUPTED;
+
+		/*
+		 * Check that the variable-length part of the structure is
+		 * within the data buffer.  The next entry starts after the
+		 * name component, so nextentry is an acceptable test.
+		 */
+		next_sfep = dops->sf_nextentry(sfp, sfep);
+		if (endp < (char *)next_sfep)
+			return -EFSCORRUPTED;
+
+		/* Check that the offsets always increase. */
+		if (xfs_dir2_sf_get_offset(sfep) < offset)
+			return -EFSCORRUPTED;
+
+		/* Check the inode number. */
+		ino = dops->sf_get_ino(sfp, sfep);
+		i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+		error = xfs_dir_ino_validate(mp, ino);
+		if (error)
+			return error;
+
+		/* Check the file type. */
+		filetype = dops->sf_get_ftype(sfep);
+		if (filetype >= XFS_DIR3_FT_MAX)
+			return -EFSCORRUPTED;
+
+		offset = xfs_dir2_sf_get_offset(sfep) +
+				dops->data_entsize(sfep->namelen);
+
+		sfep = next_sfep;
+	}
+	if (i8count != sfp->i8count)
+		return -EFSCORRUPTED;
+	if ((void *)sfep != (void *)endp)
+		return -EFSCORRUPTED;
+
+	/* Make sure this whole thing ought to be in local format. */
+	if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+	    (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
+		return -EFSCORRUPTED;
+
+	return 0;
+}
+
 /*
  * Create a new (shortform) directory.
  */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e07..8a37efe 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
 #include "xfs_trace.h"
 #include "xfs_attr_sf.h"
 #include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -210,6 +212,16 @@
 	if (error)
 		return error;
 
+	/* Check inline dir contents. */
+	if (S_ISDIR(VFS_I(ip)->i_mode) &&
+	    dip->di_format == XFS_DINODE_FMT_LOCAL) {
+		error = xfs_dir2_sf_verify(ip);
+		if (error) {
+			xfs_idestroy_fork(ip, XFS_DATA_FORK);
+			return error;
+		}
+	}
+
 	if (xfs_is_reflink_inode(ip)) {
 		ASSERT(ip->i_cowfp == NULL);
 		xfs_ifork_init_cow(ip);
@@ -320,7 +332,6 @@
 	int		whichfork,
 	int		size)
 {
-
 	/*
 	 * If the size is unreasonable, then something
 	 * is wrong and we just bail out rather than crash in
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index b177ef3..82a38d8 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1629,13 +1629,28 @@
 	if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
 		return -EOPNOTSUPP;
 
-	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	INIT_LIST_HEAD(&debris);
+
+	/*
+	 * In this first part, we use an empty transaction to gather up
+	 * all the leftover CoW extents so that we can subsequently
+	 * delete them.  The empty transaction is used to avoid
+	 * a buffer lock deadlock if there happens to be a loop in the
+	 * refcountbt because we're allowed to re-grab a buffer that is
+	 * already attached to our transaction.  When we're done
+	 * recording the CoW debris we cancel the (empty) transaction
+	 * and everything goes away cleanly.
+	 */
+	error = xfs_trans_alloc_empty(mp, &tp);
 	if (error)
 		return error;
-	cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+	if (error)
+		goto out_trans;
+	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
 
 	/* Find all the leftover CoW staging extents. */
-	INIT_LIST_HEAD(&debris);
 	memset(&low, 0, sizeof(low));
 	memset(&high, 0, sizeof(high));
 	low.rc.rc_startblock = XFS_REFC_COW_START;
@@ -1645,10 +1660,11 @@
 	if (error)
 		goto out_cursor;
 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
-	xfs_buf_relse(agbp);
+	xfs_trans_brelse(tp, agbp);
+	xfs_trans_cancel(tp);
 
 	/* Now iterate the list to free the leftovers */
-	list_for_each_entry(rr, &debris, rr_list) {
+	list_for_each_entry_safe(rr, n, &debris, rr_list) {
 		/* Set up transaction. */
 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
 		if (error)
@@ -1676,8 +1692,16 @@
 		error = xfs_trans_commit(tp);
 		if (error)
 			goto out_free;
+
+		list_del(&rr->rr_list);
+		kmem_free(rr);
 	}
 
+	return error;
+out_defer:
+	xfs_defer_cancel(&dfops);
+out_trans:
+	xfs_trans_cancel(tp);
 out_free:
 	/* Free the leftover list */
 	list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1688,11 +1712,6 @@
 
 out_cursor:
 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
-	xfs_buf_relse(agbp);
-	goto out_free;
-
-out_defer:
-	xfs_defer_cancel(&dfops);
-	xfs_trans_cancel(tp);
-	goto out_free;
+	xfs_trans_brelse(tp, agbp);
+	goto out_trans;
 }
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index 7917f6e..d787c67 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -21,8 +21,20 @@
 /*
  * Components of space reservations.
  */
+
+/* Worst case number of rmaps that can be held in a block. */
 #define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)    \
 		(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
+
+/* Adding one rmap could split every level up to the top of the tree. */
+#define XFS_RMAPADD_SPACE_RES(mp) ((mp)->m_rmap_maxlevels)
+
+/* Blocks we might need to add "b" rmaps to a tree. */
+#define XFS_NRMAPADD_SPACE_RES(mp, b)\
+	(((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
+	  XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
+	  XFS_RMAPADD_SPACE_RES(mp))
+
 #define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)    \
 		(((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
 #define	XFS_EXTENTADD_SPACE_RES(mp,w)	(XFS_BM_MAXLEVELS(mp,w) - 1)
@@ -30,13 +42,12 @@
 	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
 	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
 	  XFS_EXTENTADD_SPACE_RES(mp,w))
+
+/* Blocks we might need to add "b" mappings & rmappings to a file. */
 #define XFS_SWAP_RMAP_SPACE_RES(mp,b,w)\
-	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
-	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
-	  XFS_EXTENTADD_SPACE_RES(mp,w) + \
-	 ((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
-	  XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
-	  (mp)->m_rmap_maxlevels)
+	(XFS_NEXTENTADD_SPACE_RES((mp), (b), (w)) + \
+	 XFS_NRMAPADD_SPACE_RES((mp), (b)))
+
 #define	XFS_DAENTER_1B(mp,w)	\
 	((w) == XFS_DATA_FORK ? (mp)->m_dir_geo->fsbcount : 1)
 #define	XFS_DAENTER_DBS(mp,w)	\
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0457abe..5789814 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -108,19 +108,19 @@
 	unsigned int		bsize;
 
 	ASSERT(bvec->bv_offset < PAGE_SIZE);
-	ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
+	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
 	ASSERT(end < PAGE_SIZE);
-	ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
+	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
 
 	bh = head = page_buffers(bvec->bv_page);
 
 	bsize = bh->b_size;
 	do {
+		if (off > end)
+			break;
 		next = bh->b_this_page;
 		if (off < bvec->bv_offset)
 			goto next_bh;
-		if (off > end)
-			break;
 		bh->b_end_io(bh, !error);
 next_bh:
 		off += bsize;
@@ -349,7 +349,7 @@
 {
 	struct xfs_inode	*ip = XFS_I(inode);
 	struct xfs_mount	*mp = ip->i_mount;
-	ssize_t			count = 1 << inode->i_blkbits;
+	ssize_t			count = i_blocksize(inode);
 	xfs_fileoff_t		offset_fsb, end_fsb;
 	int			error = 0;
 	int			bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -759,7 +759,7 @@
 			break;
 		}
 next_buffer:
-		offset += 1 << inode->i_blkbits;
+		offset += i_blocksize(inode);
 
 	} while ((bh = bh->b_this_page) != head);
 
@@ -847,7 +847,7 @@
 	LIST_HEAD(submit_list);
 	struct xfs_ioend	*ioend, *next;
 	struct buffer_head	*bh, *head;
-	ssize_t			len = 1 << inode->i_blkbits;
+	ssize_t			len = i_blocksize(inode);
 	int			error = 0;
 	int			count = 0;
 	int			uptodate = 1;
@@ -1250,7 +1250,7 @@
 	    offset + mapping_size >= i_size_read(inode)) {
 		/* limit mapping to block that spans EOF */
 		mapping_size = roundup_64(i_size_read(inode) - offset,
-					  1 << inode->i_blkbits);
+					  i_blocksize(inode));
 	}
 	if (mapping_size > LONG_MAX)
 		mapping_size = LONG_MAX;
@@ -1286,7 +1286,7 @@
 		return -EIO;
 
 	offset = (xfs_off_t)iblock << inode->i_blkbits;
-	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+	ASSERT(bh_result->b_size >= i_blocksize(inode));
 	size = bh_result->b_size;
 
 	if (!create && offset >= i_size_read(inode))
@@ -1634,7 +1634,7 @@
 			if (offset < end_offset)
 				set_buffer_dirty(bh);
 			bh = bh->b_this_page;
-			offset += 1 << inode->i_blkbits;
+			offset += i_blocksize(inode);
 		} while (bh != head);
 	}
 	/*
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 9bf57c7..c4b90e7 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -34,6 +34,8 @@
 #include "xfs_bmap.h"
 #include "xfs_icache.h"
 #include "xfs_trace.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
 
 
 kmem_zone_t	*xfs_bui_zone;
@@ -446,7 +448,8 @@
 		return -EIO;
 	}
 
-	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
 	if (error)
 		return error;
 	budp = xfs_trans_get_bud(tp, buip);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5328ecd..87b495e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -588,9 +588,13 @@
 		}
 		break;
 	default:
+		/* Local format data forks report no extents. */
+		if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+			bmv->bmv_entries = 0;
+			return 0;
+		}
 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
-		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
-		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
 			return -EINVAL;
 
 		if (xfs_get_extsz_hint(ip) ||
@@ -718,7 +722,7 @@
 			 * extents.
 			 */
 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
-			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+			    map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 
                         if (map[i].br_startblock == HOLESTARTBLOCK &&
@@ -911,9 +915,9 @@
 }
 
 /*
- * This is called by xfs_inactive to free any blocks beyond eof
- * when the link count isn't zero and by xfs_dm_punch_hole() when
- * punching a hole to EOF.
+ * This is called to free any blocks beyond eof. The caller must hold
+ * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
+ * reference to the inode.
  */
 int
 xfs_free_eofblocks(
@@ -928,8 +932,6 @@
 	struct xfs_bmbt_irec	imap;
 	struct xfs_mount	*mp = ip->i_mount;
 
-	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-
 	/*
 	 * Figure out if there are any blocks beyond the end
 	 * of the file.  If not, then there is nothing to do.
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d7a67d7..1626927 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -96,12 +96,16 @@
 xfs_buf_ioacct_inc(
 	struct xfs_buf	*bp)
 {
-	if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+	if (bp->b_flags & XBF_NO_IOACCT)
 		return;
 
 	ASSERT(bp->b_flags & XBF_ASYNC);
-	bp->b_flags |= _XBF_IN_FLIGHT;
-	percpu_counter_inc(&bp->b_target->bt_io_count);
+	spin_lock(&bp->b_lock);
+	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+		percpu_counter_inc(&bp->b_target->bt_io_count);
+	}
+	spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -109,14 +113,24 @@
  * freed and unaccount from the buftarg.
  */
 static inline void
+__xfs_buf_ioacct_dec(
+	struct xfs_buf	*bp)
+{
+	ASSERT(spin_is_locked(&bp->b_lock));
+
+	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+		percpu_counter_dec(&bp->b_target->bt_io_count);
+	}
+}
+
+static inline void
 xfs_buf_ioacct_dec(
 	struct xfs_buf	*bp)
 {
-	if (!(bp->b_flags & _XBF_IN_FLIGHT))
-		return;
-
-	bp->b_flags &= ~_XBF_IN_FLIGHT;
-	percpu_counter_dec(&bp->b_target->bt_io_count);
+	spin_lock(&bp->b_lock);
+	__xfs_buf_ioacct_dec(bp);
+	spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -148,9 +162,9 @@
 	 * unaccounted (released to LRU) before that occurs. Drop in-flight
 	 * status now to preserve accounting consistency.
 	 */
-	xfs_buf_ioacct_dec(bp);
-
 	spin_lock(&bp->b_lock);
+	__xfs_buf_ioacct_dec(bp);
+
 	atomic_set(&bp->b_lru_ref, 0);
 	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -953,12 +967,12 @@
 		 * ensures the decrement occurs only once per-buf.
 		 */
 		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-			xfs_buf_ioacct_dec(bp);
+			__xfs_buf_ioacct_dec(bp);
 		goto out_unlock;
 	}
 
 	/* the last reference has been dropped ... */
-	xfs_buf_ioacct_dec(bp);
+	__xfs_buf_ioacct_dec(bp);
 	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
 		/*
 		 * If the buffer is added to the LRU take a new reference to the
@@ -1052,6 +1066,8 @@
 xfs_buf_unlock(
 	struct xfs_buf		*bp)
 {
+	ASSERT(xfs_buf_islocked(bp));
+
 	XB_CLEAR_OWNER(bp);
 	up(&bp->b_sema);
 
@@ -1790,6 +1806,28 @@
 }
 
 /*
+ * Cancel a delayed write list.
+ *
+ * Remove each buffer from the list, clear the delwri queue flag and drop the
+ * associated buffer reference.
+ */
+void
+xfs_buf_delwri_cancel(
+	struct list_head	*list)
+{
+	struct xfs_buf		*bp;
+
+	while (!list_empty(list)) {
+		bp = list_first_entry(list, struct xfs_buf, b_list);
+
+		xfs_buf_lock(bp);
+		bp->b_flags &= ~_XBF_DELWRI_Q;
+		list_del_init(&bp->b_list);
+		xfs_buf_relse(bp);
+	}
+}
+
+/*
  * Add a buffer to the delayed write list.
  *
  * This queues a buffer for writeout if it hasn't already been.  Note that
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 1c2e52b..ad514a8 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -63,7 +63,6 @@
 #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT	 (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -83,14 +82,14 @@
 	{ _XBF_PAGES,		"PAGES" }, \
 	{ _XBF_KMEM,		"KMEM" }, \
 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
-	{ _XBF_COMPOUND,	"COMPOUND" }, \
-	{ _XBF_IN_FLIGHT,	"IN_FLIGHT" }
+	{ _XBF_COMPOUND,	"COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
@@ -330,6 +329,7 @@
 extern void xfs_buf_stale(struct xfs_buf *bp);
 
 /* Delayed Write Buffer Routines */
+extern void xfs_buf_delwri_cancel(struct list_head *);
 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
 extern int xfs_buf_delwri_submit(struct list_head *);
 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 2981698..eba6316 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@
 	struct xfs_da_geometry	*geo = args->geo;
 
 	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
-	/*
-	 * Give up if the directory is way too short.
-	 */
-	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
-		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
-		return -EIO;
-	}
-
 	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
 	ASSERT(dp->i_df.if_u1.if_data != NULL);
 
 	sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
 
-	if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
-		return -EFSCORRUPTED;
-
 	/*
 	 * If the block number in the offset is out of range, we're done.
 	 */
@@ -405,6 +394,7 @@
 
 	/*
 	 * Do we need more readahead?
+	 * Each loop tries to process 1 full dir blk; last may be partial.
 	 */
 	blk_start_plug(&plug);
 	for (mip->ra_index = mip->ra_offset = i = 0;
@@ -415,7 +405,8 @@
 		 * Read-ahead a contiguous directory block.
 		 */
 		if (i > mip->ra_current &&
-		    map[mip->ra_index].br_blockcount >= geo->fsbcount) {
+		    (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
+		    geo->fsbcount) {
 			xfs_dir3_data_readahead(dp,
 				map[mip->ra_index].br_startoff + mip->ra_offset,
 				XFS_FSB_TO_DADDR(dp->i_mount,
@@ -436,14 +427,19 @@
 		}
 
 		/*
-		 * Advance offset through the mapping table.
+		 * Advance offset through the mapping table, processing a full
+		 * dir block even if it is fragmented into several extents.
+		 * But stop if we have consumed all valid mappings, even if
+		 * it's not yet a full directory block.
 		 */
-		for (j = 0; j < geo->fsbcount; j += length ) {
+		for (j = 0;
+		     j < geo->fsbcount && mip->ra_index < mip->map_valid;
+		     j += length ) {
 			/*
 			 * The rest of this extent but not more than a dir
 			 * block.
 			 */
-			length = min_t(int, geo->fsbcount,
+			length = min_t(int, geo->fsbcount - j,
 					map[mip->ra_index].br_blockcount -
 							mip->ra_offset);
 			mip->ra_offset += length;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1209ad2..df206cf 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -823,7 +823,7 @@
 		if (error)
 			goto out_unlock;
 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
-		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+		unsigned int blksize_mask = i_blocksize(inode) - 1;
 
 		if (offset & blksize_mask || len & blksize_mask) {
 			error = -EINVAL;
@@ -845,7 +845,7 @@
 		if (error)
 			goto out_unlock;
 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
-		unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+		unsigned int blksize_mask = i_blocksize(inode) - 1;
 
 		new_size = i_size_read(inode) + len;
 		if (offset & blksize_mask || len & blksize_mask) {
@@ -1130,13 +1130,13 @@
 
 	index = startoff >> PAGE_SHIFT;
 	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
-	end = endoff >> PAGE_SHIFT;
+	end = (endoff - 1) >> PAGE_SHIFT;
 	do {
 		int		want;
 		unsigned	nr_pages;
 		unsigned int	i;
 
-		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 					  want);
 		/*
@@ -1163,17 +1163,6 @@
 			break;
 		}
 
-		/*
-		 * At lease we found one page.  If this is the first time we
-		 * step into the loop, and if the first page index offset is
-		 * greater than the given search offset, a hole was found.
-		 */
-		if (type == HOLE_OFF && lastoff == startoff &&
-		    lastoff < page_offset(pvec.pages[0])) {
-			found = true;
-			break;
-		}
-
 		for (i = 0; i < nr_pages; i++) {
 			struct page	*page = pvec.pages[i];
 			loff_t		b_offset;
@@ -1185,18 +1174,18 @@
 			 * file mapping. However, page->index will not change
 			 * because we have a reference on the page.
 			 *
-			 * Searching done if the page index is out of range.
-			 * If the current offset is not reaches the end of
-			 * the specified search range, there should be a hole
-			 * between them.
+			 * If current page offset is beyond where we've ended,
+			 * we've found a hole.
 			 */
-			if (page->index > end) {
-				if (type == HOLE_OFF && lastoff < endoff) {
-					*offset = lastoff;
-					found = true;
-				}
+			if (type == HOLE_OFF && lastoff < endoff &&
+			    lastoff < page_offset(pvec.pages[i])) {
+				found = true;
+				*offset = lastoff;
 				goto out;
 			}
+			/* Searching done if the page index is out of range. */
+			if (page->index > end)
+				goto out;
 
 			lock_page(page);
 			/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3fb1f3f..74304b6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -264,6 +264,22 @@
 	xfs_perag_clear_reclaim_tag(pag);
 }
 
+static void
+xfs_inew_wait(
+	struct xfs_inode	*ip)
+{
+	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
+	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
+
+	do {
+		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+		if (!xfs_iflags_test(ip, XFS_INEW))
+			break;
+		schedule();
+	} while (true);
+	finish_wait(wq, &wait.wait);
+}
+
 /*
  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
  * part of the structure. This is made more complex by the fact we store
@@ -368,14 +384,17 @@
 
 		error = xfs_reinit_inode(mp, inode);
 		if (error) {
+			bool wake;
 			/*
 			 * Re-initializing the inode failed, and we are in deep
 			 * trouble.  Try to re-add it to the reclaim list.
 			 */
 			rcu_read_lock();
 			spin_lock(&ip->i_flags_lock);
-
+			wake = !!__xfs_iflags_test(ip, XFS_INEW);
 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+			if (wake)
+				wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 			trace_xfs_iget_reclaim_fail(ip);
 			goto out_error;
@@ -625,9 +644,11 @@
 
 STATIC int
 xfs_inode_ag_walk_grab(
-	struct xfs_inode	*ip)
+	struct xfs_inode	*ip,
+	int			flags)
 {
 	struct inode		*inode = VFS_I(ip);
+	bool			newinos = !!(flags & XFS_AGITER_INEW_WAIT);
 
 	ASSERT(rcu_read_lock_held());
 
@@ -645,7 +666,8 @@
 		goto out_unlock_noent;
 
 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
-	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+	if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
+	    __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
 		goto out_unlock_noent;
 	spin_unlock(&ip->i_flags_lock);
 
@@ -673,7 +695,8 @@
 					   void *args),
 	int			flags,
 	void			*args,
-	int			tag)
+	int			tag,
+	int			iter_flags)
 {
 	uint32_t		first_index;
 	int			last_error = 0;
@@ -715,7 +738,7 @@
 		for (i = 0; i < nr_found; i++) {
 			struct xfs_inode *ip = batch[i];
 
-			if (done || xfs_inode_ag_walk_grab(ip))
+			if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
 				batch[i] = NULL;
 
 			/*
@@ -743,6 +766,9 @@
 		for (i = 0; i < nr_found; i++) {
 			if (!batch[i])
 				continue;
+			if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+			    xfs_iflags_test(batch[i], XFS_INEW))
+				xfs_inew_wait(batch[i]);
 			error = execute(batch[i], flags, args);
 			IRELE(batch[i]);
 			if (error == -EAGAIN) {
@@ -822,12 +848,13 @@
 }
 
 int
-xfs_inode_ag_iterator(
+xfs_inode_ag_iterator_flags(
 	struct xfs_mount	*mp,
 	int			(*execute)(struct xfs_inode *ip, int flags,
 					   void *args),
 	int			flags,
-	void			*args)
+	void			*args,
+	int			iter_flags)
 {
 	struct xfs_perag	*pag;
 	int			error = 0;
@@ -837,7 +864,8 @@
 	ag = 0;
 	while ((pag = xfs_perag_get(mp, ag))) {
 		ag = pag->pag_agno + 1;
-		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
+		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
+					  iter_flags);
 		xfs_perag_put(pag);
 		if (error) {
 			last_error = error;
@@ -849,6 +877,17 @@
 }
 
 int
+xfs_inode_ag_iterator(
+	struct xfs_mount	*mp,
+	int			(*execute)(struct xfs_inode *ip, int flags,
+					   void *args),
+	int			flags,
+	void			*args)
+{
+	return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
+}
+
+int
 xfs_inode_ag_iterator_tag(
 	struct xfs_mount	*mp,
 	int			(*execute)(struct xfs_inode *ip, int flags,
@@ -865,7 +904,8 @@
 	ag = 0;
 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 		ag = pag->pag_agno + 1;
-		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
+		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+					  0);
 		xfs_perag_put(pag);
 		if (error) {
 			last_error = error;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 8a7c849..9183f77 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -48,6 +48,11 @@
 #define XFS_IGET_UNTRUSTED	0x2
 #define XFS_IGET_DONTCACHE	0x4
 
+/*
+ * flags for AG inode iterator
+ */
+#define XFS_AGITER_INEW_WAIT	0x1	/* wait on new inodes */
+
 int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
 	     uint flags, uint lock_flags, xfs_inode_t **ipp);
 
@@ -79,6 +84,9 @@
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
 	int (*execute)(struct xfs_inode *ip, int flags, void *args),
 	int flags, void *args);
+int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
+	int (*execute)(struct xfs_inode *ip, int flags, void *args),
+	int flags, void *args, int iter_flags);
 int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
 	int (*execute)(struct xfs_inode *ip, int flags, void *args),
 	int flags, void *args, int tag);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e50636c..7a0b4ee 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
 #include "xfs_log.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_reflink.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -1914,12 +1915,13 @@
 		 * force is true because we are evicting an inode from the
 		 * cache. Post-eof blocks must be freed, lest we end up with
 		 * broken free space accounting.
+		 *
+		 * Note: don't bother with iolock here since lockdep complains
+		 * about acquiring it in reclaim context. We have the only
+		 * reference to the inode at this point anyways.
 		 */
-		if (xfs_can_free_eofblocks(ip, true)) {
-			xfs_ilock(ip, XFS_IOLOCK_EXCL);
+		if (xfs_can_free_eofblocks(ip, true))
 			xfs_free_eofblocks(ip);
-			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-		}
 
 		return;
 	}
@@ -3562,6 +3564,12 @@
 	if (ip->i_d.di_version < 3)
 		ip->i_d.di_flushiter++;
 
+	/* Check the inline directory data. */
+	if (S_ISDIR(VFS_I(ip)->i_mode) &&
+	    ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
+	    xfs_dir2_sf_verify(ip))
+		goto corrupt_out;
+
 	/*
 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
 	 * copy out the core of the inode, because if the inode is dirty at all
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 71e8a81..c038f6e 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -217,7 +217,8 @@
 #define XFS_IRECLAIM		(1 << 0) /* started reclaiming this inode */
 #define XFS_ISTALE		(1 << 1) /* inode has been staled */
 #define XFS_IRECLAIMABLE	(1 << 2) /* inode can be reclaimed */
-#define XFS_INEW		(1 << 3) /* inode has just been allocated */
+#define __XFS_INEW_BIT		3	 /* inode has just been allocated */
+#define XFS_INEW		(1 << __XFS_INEW_BIT)
 #define XFS_ITRUNCATED		(1 << 5) /* truncated down so flush-on-close */
 #define XFS_IDIRTY_RELEASE	(1 << 6) /* dirty release already seen */
 #define __XFS_IFLOCK_BIT	7	 /* inode is being flushed right now */
@@ -467,6 +468,7 @@
 	xfs_iflags_clear(ip, XFS_INEW);
 	barrier();
 	unlock_new_inode(VFS_I(ip));
+	wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 }
 
 static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a391975..73cfc71 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1542,10 +1542,11 @@
 	unsigned int		cmd,
 	void			__user *arg)
 {
-	struct getbmapx		bmx;
+	struct getbmapx		bmx = { 0 };
 	int			error;
 
-	if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
+	/* struct getbmap is a strict subset of struct getbmapx. */
+	if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
 		return -EFAULT;
 
 	if (bmx.bmv_count < 2)
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 3605624..65740d1 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1151,10 +1151,10 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	lockmode = xfs_ilock_data_map_shared(ip);
+	lockmode = xfs_ilock_attr_map_shared(ip);
 
 	/* if there are no attribute fork or extents, return ENOENT */
-	if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+	if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
 		error = -ENOENT;
 		goto out_unlock;
 	}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 66e8817..d8a77db 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -585,7 +585,7 @@
 		return error;
 
 	bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
-	buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+	buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
 	do {
 		struct xfs_inobt_rec_incore	r;
 		int				stat;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 4017aa9..b57ab34 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1293,7 +1293,7 @@
 xfs_log_work_queue(
 	struct xfs_mount        *mp)
 {
-	queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
+	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
 				msecs_to_jiffies(xfs_syncd_centisecs * 10));
 }
 
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1bf878b..5415f90 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -183,6 +183,7 @@
 	struct workqueue_struct	*m_reclaim_workqueue;
 	struct workqueue_struct	*m_log_workqueue;
 	struct workqueue_struct *m_eofblocks_workqueue;
+	struct workqueue_struct	*m_sync_workqueue;
 
 	/*
 	 * Generation of the filesysyem layout.  This is incremented by each
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index b669b12..8b9a9f1 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1384,12 +1384,7 @@
 	mp->m_qflags |= flags;
 
  error_return:
-	while (!list_empty(&buffer_list)) {
-		struct xfs_buf *bp =
-			list_first_entry(&buffer_list, struct xfs_buf, b_list);
-		list_del_init(&bp->b_list);
-		xfs_buf_relse(bp);
-	}
+	xfs_buf_delwri_cancel(&buffer_list);
 
 	if (error) {
 		xfs_warn(mp,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 475a388..9cb5c38 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -759,5 +759,6 @@
 	uint		 flags)
 {
 	ASSERT(mp->m_quotainfo);
-	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
+	xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
+				    XFS_AGITER_INEW_WAIT);
 }
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 2252f16..29a75ec 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -736,8 +736,22 @@
 	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
 	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
 
-	/* Start a rolling transaction to switch the mappings */
-	resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+	/*
+	 * Start a rolling transaction to switch the mappings.  We're
+	 * unlikely ever to have to remap 16T worth of single-block
+	 * extents, so just cap the worst case extent count to 2^32-1.
+	 * Stick a warning in just in case, and avoid 64-bit division.
+	 */
+	BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
+	if (end_fsb - offset_fsb > UINT_MAX) {
+		error = -EFSCORRUPTED;
+		xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
+		ASSERT(0);
+		goto out;
+	}
+	resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
+			(unsigned int)(end_fsb - offset_fsb),
+			XFS_DATA_FORK);
 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
 			resblks, 0, 0, &tp);
 	if (error)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index dbbd3f1..882fb85 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -872,8 +872,15 @@
 	if (!mp->m_eofblocks_workqueue)
 		goto out_destroy_log;
 
+	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
+					       mp->m_fsname);
+	if (!mp->m_sync_workqueue)
+		goto out_destroy_eofb;
+
 	return 0;
 
+out_destroy_eofb:
+	destroy_workqueue(mp->m_eofblocks_workqueue);
 out_destroy_log:
 	destroy_workqueue(mp->m_log_workqueue);
 out_destroy_reclaim:
@@ -894,6 +901,7 @@
 xfs_destroy_mount_workqueues(
 	struct xfs_mount	*mp)
 {
+	destroy_workqueue(mp->m_sync_workqueue);
 	destroy_workqueue(mp->m_eofblocks_workqueue);
 	destroy_workqueue(mp->m_log_workqueue);
 	destroy_workqueue(mp->m_reclaim_workqueue);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 70f42ea..a280e12 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -263,6 +263,28 @@
 }
 
 /*
+ * Create an empty transaction with no reservation.  This is a defensive
+ * mechanism for routines that query metadata without actually modifying
+ * them -- if the metadata being queried is somehow cross-linked (think a
+ * btree block pointer that points higher in the tree), we risk deadlock.
+ * However, blocks grabbed as part of a transaction can be re-grabbed.
+ * The verifiers will notice the corrupt block and the operation will fail
+ * back to userspace without deadlocking.
+ *
+ * Note the zero-length reservation; this transaction MUST be cancelled
+ * without any dirty data.
+ */
+int
+xfs_trans_alloc_empty(
+	struct xfs_mount		*mp,
+	struct xfs_trans		**tpp)
+{
+	struct xfs_trans_res		resv = {0};
+
+	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
+}
+
+/*
  * Record the indicated change to the given field for application
  * to the file system's superblock when the transaction commits.
  * For now, just store the change in the transaction structure.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 61b7fbd..98024cb 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -159,6 +159,8 @@
 int		xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
 			uint blocks, uint rtextents, uint flags,
 			struct xfs_trans **tpp);
+int		xfs_trans_alloc_empty(struct xfs_mount *mp,
+			struct xfs_trans **tpp);
 void		xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
 
 struct xfs_buf	*xfs_trans_get_buf_map(struct xfs_trans *tp,
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index c1350ce..75ddcfa 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -34,4 +34,12 @@
 #define PCLK_SRC_MUX_1_CLK	15
 #define PCLK_SRC_1_CLK		16
 #define PCLK_MUX_1_CLK		17
+
+/* DP PLL clocks */
+#define	DP_VCO_CLK	0
+#define	DP_LINK_CLK_DIVSEL_TEN	1
+#define	DP_VCO_DIVIDED_TWO_CLK_SRC	2
+#define	DP_VCO_DIVIDED_FOUR_CLK_SRC	3
+#define	DP_VCO_DIVIDED_SIX_CLK_SRC	4
+#define	DP_VCO_DIVIDED_CLK_SRC_MUX	5
 #endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index 0d9d9f6..e16b69a 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -31,73 +31,76 @@
 #define CAM_CC_CSI1PHYTIMER_CLK_SRC				14
 #define CAM_CC_CSI2PHYTIMER_CLK					15
 #define CAM_CC_CSI2PHYTIMER_CLK_SRC				16
-#define CAM_CC_CSIPHY0_CLK					17
-#define CAM_CC_CSIPHY1_CLK					18
-#define CAM_CC_CSIPHY2_CLK					19
-#define CAM_CC_FAST_AHB_CLK_SRC					20
-#define CAM_CC_FD_CORE_CLK					21
-#define CAM_CC_FD_CORE_CLK_SRC					22
-#define CAM_CC_FD_CORE_UAR_CLK					23
-#define CAM_CC_ICP_APB_CLK					24
-#define CAM_CC_ICP_ATB_CLK					25
-#define CAM_CC_ICP_CLK						26
-#define CAM_CC_ICP_CLK_SRC					27
-#define CAM_CC_ICP_CTI_CLK					28
-#define CAM_CC_ICP_TS_CLK					29
-#define CAM_CC_IFE_0_AXI_CLK					30
-#define CAM_CC_IFE_0_CLK					31
-#define CAM_CC_IFE_0_CLK_SRC					32
-#define CAM_CC_IFE_0_CPHY_RX_CLK				33
-#define CAM_CC_IFE_0_CSID_CLK					34
-#define CAM_CC_IFE_0_CSID_CLK_SRC				35
-#define CAM_CC_IFE_0_DSP_CLK					36
-#define CAM_CC_IFE_1_AXI_CLK					37
-#define CAM_CC_IFE_1_CLK					38
-#define CAM_CC_IFE_1_CLK_SRC					39
-#define CAM_CC_IFE_1_CPHY_RX_CLK				40
-#define CAM_CC_IFE_1_CSID_CLK					41
-#define CAM_CC_IFE_1_CSID_CLK_SRC				42
-#define CAM_CC_IFE_1_DSP_CLK					43
-#define CAM_CC_IFE_LITE_CLK					44
-#define CAM_CC_IFE_LITE_CLK_SRC					45
-#define CAM_CC_IFE_LITE_CPHY_RX_CLK				46
-#define CAM_CC_IFE_LITE_CSID_CLK				47
-#define CAM_CC_IFE_LITE_CSID_CLK_SRC				48
-#define CAM_CC_IPE_0_AHB_CLK					49
-#define CAM_CC_IPE_0_AREG_CLK					50
-#define CAM_CC_IPE_0_AXI_CLK					51
-#define CAM_CC_IPE_0_CLK					52
-#define CAM_CC_IPE_0_CLK_SRC					53
-#define CAM_CC_IPE_1_AHB_CLK					54
-#define CAM_CC_IPE_1_AREG_CLK					55
-#define CAM_CC_IPE_1_AXI_CLK					56
-#define CAM_CC_IPE_1_CLK					57
-#define CAM_CC_IPE_1_CLK_SRC					58
-#define CAM_CC_JPEG_CLK						59
-#define CAM_CC_JPEG_CLK_SRC					60
-#define CAM_CC_LRME_CLK						61
-#define CAM_CC_LRME_CLK_SRC					62
-#define CAM_CC_MCLK0_CLK					63
-#define CAM_CC_MCLK0_CLK_SRC					64
-#define CAM_CC_MCLK1_CLK					65
-#define CAM_CC_MCLK1_CLK_SRC					66
-#define CAM_CC_MCLK2_CLK					67
-#define CAM_CC_MCLK2_CLK_SRC					68
-#define CAM_CC_MCLK3_CLK					69
-#define CAM_CC_MCLK3_CLK_SRC					70
-#define CAM_CC_PLL0						71
-#define CAM_CC_PLL0_OUT_EVEN					72
-#define CAM_CC_PLL1						73
-#define CAM_CC_PLL1_OUT_EVEN					74
-#define CAM_CC_PLL2						75
-#define CAM_CC_PLL2_OUT_EVEN					76
-#define CAM_CC_PLL2_OUT_ODD					77
-#define CAM_CC_PLL3						78
-#define CAM_CC_PLL3_OUT_EVEN					79
-#define CAM_CC_PLL_TEST_CLK					80
-#define CAM_CC_SLOW_AHB_CLK_SRC					81
-#define CAM_CC_SOC_AHB_CLK					82
-#define CAM_CC_SYS_TMR_CLK					83
+#define CAM_CC_CSI3PHYTIMER_CLK					17
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC				18
+#define CAM_CC_CSIPHY0_CLK					19
+#define CAM_CC_CSIPHY1_CLK					20
+#define CAM_CC_CSIPHY2_CLK					21
+#define CAM_CC_CSIPHY3_CLK					22
+#define CAM_CC_FAST_AHB_CLK_SRC					23
+#define CAM_CC_FD_CORE_CLK					24
+#define CAM_CC_FD_CORE_CLK_SRC					25
+#define CAM_CC_FD_CORE_UAR_CLK					26
+#define CAM_CC_ICP_APB_CLK					27
+#define CAM_CC_ICP_ATB_CLK					28
+#define CAM_CC_ICP_CLK						29
+#define CAM_CC_ICP_CLK_SRC					30
+#define CAM_CC_ICP_CTI_CLK					31
+#define CAM_CC_ICP_TS_CLK					32
+#define CAM_CC_IFE_0_AXI_CLK					33
+#define CAM_CC_IFE_0_CLK					34
+#define CAM_CC_IFE_0_CLK_SRC					35
+#define CAM_CC_IFE_0_CPHY_RX_CLK				36
+#define CAM_CC_IFE_0_CSID_CLK					37
+#define CAM_CC_IFE_0_CSID_CLK_SRC				38
+#define CAM_CC_IFE_0_DSP_CLK					39
+#define CAM_CC_IFE_1_AXI_CLK					40
+#define CAM_CC_IFE_1_CLK					41
+#define CAM_CC_IFE_1_CLK_SRC					42
+#define CAM_CC_IFE_1_CPHY_RX_CLK				43
+#define CAM_CC_IFE_1_CSID_CLK					44
+#define CAM_CC_IFE_1_CSID_CLK_SRC				45
+#define CAM_CC_IFE_1_DSP_CLK					46
+#define CAM_CC_IFE_LITE_CLK					47
+#define CAM_CC_IFE_LITE_CLK_SRC					48
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK				49
+#define CAM_CC_IFE_LITE_CSID_CLK				50
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC				51
+#define CAM_CC_IPE_0_AHB_CLK					52
+#define CAM_CC_IPE_0_AREG_CLK					53
+#define CAM_CC_IPE_0_AXI_CLK					54
+#define CAM_CC_IPE_0_CLK					55
+#define CAM_CC_IPE_0_CLK_SRC					56
+#define CAM_CC_IPE_1_AHB_CLK					57
+#define CAM_CC_IPE_1_AREG_CLK					58
+#define CAM_CC_IPE_1_AXI_CLK					59
+#define CAM_CC_IPE_1_CLK					60
+#define CAM_CC_IPE_1_CLK_SRC					61
+#define CAM_CC_JPEG_CLK						62
+#define CAM_CC_JPEG_CLK_SRC					63
+#define CAM_CC_LRME_CLK						64
+#define CAM_CC_LRME_CLK_SRC					65
+#define CAM_CC_MCLK0_CLK					66
+#define CAM_CC_MCLK0_CLK_SRC					67
+#define CAM_CC_MCLK1_CLK					68
+#define CAM_CC_MCLK1_CLK_SRC					69
+#define CAM_CC_MCLK2_CLK					70
+#define CAM_CC_MCLK2_CLK_SRC					71
+#define CAM_CC_MCLK3_CLK					72
+#define CAM_CC_MCLK3_CLK_SRC					73
+#define CAM_CC_PLL0						74
+#define CAM_CC_PLL0_OUT_EVEN					75
+#define CAM_CC_PLL1						76
+#define CAM_CC_PLL1_OUT_EVEN					77
+#define CAM_CC_PLL2						78
+#define CAM_CC_PLL2_OUT_EVEN					79
+#define CAM_CC_PLL2_OUT_ODD					80
+#define CAM_CC_PLL3						81
+#define CAM_CC_PLL3_OUT_EVEN					82
+#define CAM_CC_PLL_TEST_CLK					83
+#define CAM_CC_SLOW_AHB_CLK_SRC					84
+#define CAM_CC_SOC_AHB_CLK					85
+#define CAM_CC_SYS_TMR_CLK					86
 
 #define TITAN_CAM_CC_BPS_BCR					0
 #define TITAN_CAM_CC_CAMNOC_BCR					1
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 115b62f..f6f4bc3 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -196,6 +196,12 @@
 #define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK				178
 #define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK				179
 #define GCC_GPU_IREF_CLK					180
+#define GCC_SDCC1_AHB_CLK					181
+#define GCC_SDCC1_APPS_CLK					182
+#define GCC_SDCC1_ICE_CORE_CLK					183
+#define GCC_SDCC1_APPS_CLK_SRC					184
+#define GCC_SDCC1_ICE_CORE_CLK_SRC				185
+
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
@@ -225,6 +231,7 @@
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR				24
 #define GCC_PCIE_0_PHY_BCR					25
 #define GCC_PCIE_1_PHY_BCR					26
+#define GCC_SDCC1_BCR						27
 
 /* Dummy clocks for rate measurement */
 #define MEASURE_ONLY_SNOC_CLK					0
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index be2210c..9d52d2e 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -43,6 +43,7 @@
 #define	MSM_BUS_FAB_MC_VIRT 6151
 #define	MSM_BUS_FAB_MEM_NOC 6152
 #define	MSM_BUS_FAB_IPA_VIRT 6153
+#define	MSM_BUS_FAB_CAMNOC_VIRT 6154
 
 #define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
 #define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -236,7 +237,7 @@
 #define	MSM_BUS_MASTER_MNOC_SF_MEM_NOC 133
 #define	MSM_BUS_MASTER_SNOC_GC_MEM_NOC 134
 #define	MSM_BUS_MASTER_SNOC_SF_MEM_NOC 135
-#define	MSM_BUS_MASTER_CAMNOC_HF 136
+#define	MSM_BUS_MASTER_CAMNOC_HF0 136
 #define	MSM_BUS_MASTER_CAMNOC_SF 137
 #define	MSM_BUS_MASTER_VIDEO_PROC 138
 #define	MSM_BUS_MASTER_GNOC_SNOC 139
@@ -245,7 +246,11 @@
 #define	MSM_BUS_MASTER_MEM_NOC_SNOC 142
 #define	MSM_BUS_MASTER_IPA_CORE 143
 #define	MSM_BUS_MASTER_ALC 144
-#define	MSM_BUS_MASTER_MASTER_LAST 145
+#define	MSM_BUS_MASTER_CAMNOC_HF1 145
+#define	MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP 146
+#define	MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP 147
+#define	MSM_BUS_MASTER_CAMNOC_SF_UNCOMP 148
+#define	MSM_BUS_MASTER_MASTER_LAST 149
 
 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000
 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -584,7 +589,8 @@
 #define	MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775
 #define	MSM_BUS_SLAVE_MEM_NOC_SNOC 776
 #define	MSM_BUS_SLAVE_IPA 777
-#define	MSM_BUS_SLAVE_LAST 778
+#define	MSM_BUS_SLAVE_CAMNOC_UNCOMP 778
+#define	MSM_BUS_SLAVE_LAST 779
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index b5e6b00..edd2a15 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -12,6 +12,7 @@
 
 /* On cooling devices upper and lower limits */
 #define THERMAL_NO_LIMIT		(~0)
+#define THERMAL_MAX_LIMIT		(THERMAL_NO_LIMIT - 1)
 
 #endif
 
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index ec80d0c..ace92fc 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -1,7 +1,6 @@
 #ifndef _BPF_CGROUP_H
 #define _BPF_CGROUP_H
 
-#include <linux/bpf.h>
 #include <linux/jump_label.h>
 #include <uapi/linux/bpf.h>
 
@@ -22,20 +21,19 @@
 	 */
 	struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
 	struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+	bool disallow_override[MAX_BPF_ATTACH_TYPE];
 };
 
 void cgroup_bpf_put(struct cgroup *cgrp);
 void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
 
-void __cgroup_bpf_update(struct cgroup *cgrp,
-			 struct cgroup *parent,
-			 struct bpf_prog *prog,
-			 enum bpf_attach_type type);
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+			struct bpf_prog *prog, enum bpf_attach_type type,
+			bool overridable);
 
 /* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
-void cgroup_bpf_update(struct cgroup *cgrp,
-		       struct bpf_prog *prog,
-		       enum bpf_attach_type type);
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+		      enum bpf_attach_type type, bool overridable);
 
 int __cgroup_bpf_run_filter(struct sock *sk,
 			    struct sk_buff *skb,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index b008a33..c5a8afd 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -47,6 +47,7 @@
 	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
 	CSS_RELEASED	= (1 << 2), /* refcnt reached zero, released */
 	CSS_VISIBLE	= (1 << 3), /* css is visible to userland */
+	CSS_DYING	= (1 << 4), /* css is dying */
 };
 
 /* bits in struct cgroup flags field */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 0353461..3b242a3 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -344,6 +344,26 @@
 }
 
 /**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline.  In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal.  cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed.  If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+	return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
+/**
  * css_put - put a css reference
  * @css: target css
  *
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 0e1e050..cf86f52 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -440,6 +440,13 @@
 
 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
 
+void dm_lock_md_type(struct mapped_device *md);
+void dm_unlock_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, unsigned type);
+unsigned dm_get_md_type(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
+unsigned dm_table_get_type(struct dm_table *t);
+
 /*
  * Geometry functions.
  */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bed7a84..026aa0a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -705,6 +705,11 @@
 	void			*i_private; /* fs or device private pointer */
 };
 
+static inline unsigned int i_blocksize(const struct inode *node)
+{
+	return (1 << node->i_blkbits);
+}
+
 static inline int inode_unhashed(struct inode *inode)
 {
 	return hlist_unhashed(&inode->i_hash);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3319d97..8feecd5 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -630,14 +630,16 @@
 static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
 						    netdev_features_t features)
 {
-	if (skb_vlan_tagged_multi(skb))
-		features = netdev_intersect_features(features,
-						     NETIF_F_SG |
-						     NETIF_F_HIGHDMA |
-						     NETIF_F_FRAGLIST |
-						     NETIF_F_HW_CSUM |
-						     NETIF_F_HW_VLAN_CTAG_TX |
-						     NETIF_F_HW_VLAN_STAG_TX);
+	if (skb_vlan_tagged_multi(skb)) {
+		/* In the case of multi-tagged packets, use a direct mask
+		 * instead of using netdev_interesect_features(), to make
+		 * sure that only devices supporting NETIF_F_HW_CSUM will
+		 * have checksum offloading support.
+		 */
+		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+			    NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_STAG_TX;
+	}
 
 	return features;
 }
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 72f0721..bbc65ef 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -450,6 +450,12 @@
 };
 
 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ)       | \
+			   (1 << NET_RX_SOFTIRQ)       | \
+			   (1 << BLOCK_SOFTIRQ)        | \
+			   (1 << IRQ_POLL_SOFTIRQ)     | \
+			   (1 << TASKLET_SOFTIRQ))
 
 /* map softirq index to softirq name. update 'softirq_to_name' in
  * kernel/softirq.c when adding a new softirq.
@@ -485,6 +491,7 @@
 extern void raise_softirq(unsigned int nr);
 
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
 {
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f7033fa..d6ebc01 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -424,12 +424,20 @@
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+		phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
 	return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+		phys_addr_t end_addr)
+{
+	return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecc451d..e1a903a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -640,7 +640,12 @@
 
 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
 
+enum {
+	MLX5_CMD_ENT_STATE_PENDING_COMP,
+};
+
 struct mlx5_cmd_work_ent {
+	unsigned long		state;
 	struct mlx5_cmd_msg    *in;
 	struct mlx5_cmd_msg    *out;
 	void		       *uout;
@@ -838,7 +843,7 @@
 #endif
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0e6a54c..df841cf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -347,7 +347,9 @@
 	atomic_t	devfreq_abort;
 	bool		skip_clk_scale_freq_update;
 	int		freq_table_sz;
+	int		pltfm_freq_table_sz;
 	u32		*freq_table;
+	u32		*pltfm_freq_table;
 	unsigned long	total_busy_time_us;
 	unsigned long	target_freq;
 	unsigned long	curr_freq;
@@ -853,6 +855,8 @@
 	return card->host->ios.enhanced_strobe;
 }
 
+void mmc_retune_enable(struct mmc_host *host);
+void mmc_retune_disable(struct mmc_host *host);
 void mmc_retune_timer_stop(struct mmc_host *host);
 
 static inline void mmc_retune_needed(struct mmc_host *host)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7e273e2..6744eb4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -672,6 +672,7 @@
 	 * is the first PFN that needs to be initialised.
 	 */
 	unsigned long first_deferred_pfn;
+	unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 72f9211..4381570 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -248,6 +248,7 @@
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_PR_SWAP,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e0e5393..d53a231 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -53,7 +53,8 @@
 			  unsigned long addr, unsigned long data);
 extern void ptrace_notify(int exit_code);
 extern void __ptrace_link(struct task_struct *child,
-			  struct task_struct *new_parent);
+			  struct task_struct *new_parent,
+			  const struct cred *ptracer_cred);
 extern void __ptrace_unlink(struct task_struct *child);
 extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
 #define PTRACE_MODE_READ	0x01
@@ -205,7 +206,7 @@
 
 	if (unlikely(ptrace) && current->ptrace) {
 		child->ptrace = current->ptrace;
-		__ptrace_link(child, current->parent);
+		__ptrace_link(child, current->parent, current->ptracer_cred);
 
 		if (child->ptrace & PT_SEIZED)
 			task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -214,6 +215,8 @@
 
 		set_tsk_thread_flag(child, TIF_SIGPENDING);
 	}
+	else
+		child->ptracer_cred = NULL;
 }
 
 /**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index decb943..9e7ab05 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,7 +178,9 @@
 #endif
 
 extern void sched_update_nr_prod(int cpu, long delta, bool inc);
-extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+				     unsigned int *max_nr,
+				     unsigned int *big_max_nr);
 extern unsigned int sched_get_cpu_util(int cpu);
 
 extern void calc_global_load(unsigned long ticks);
@@ -1153,8 +1155,9 @@
 extern int sched_domain_level_max;
 
 struct capacity_state {
-	unsigned long cap;	/* compute capacity */
-	unsigned long power;	/* power consumption at this compute capacity */
+	unsigned long cap;	/* capacity - calculated by energy driver */
+	unsigned long frequency;/* frequency */
+	unsigned long power;	/* power consumption at this frequency */
 };
 
 struct idle_state {
@@ -2635,7 +2638,6 @@
 #define MAX_NUM_CGROUP_COLOC_ID	20
 
 #ifdef CONFIG_SCHED_HMP
-extern void free_task_load_ptrs(struct task_struct *p);
 extern int sched_set_window(u64 window_start, unsigned int window_size);
 extern unsigned long sched_get_busy(int cpu);
 extern void sched_get_cpus_busy(struct sched_load *busy,
@@ -2659,8 +2661,6 @@
 extern unsigned int sched_get_group_id(struct task_struct *p);
 
 #else /* CONFIG_SCHED_HMP */
-static inline void free_task_load_ptrs(struct task_struct *p) { }
-
 static inline int sched_set_window(u64 window_start, unsigned int window_size)
 {
 	return -EINVAL;
@@ -2698,6 +2698,7 @@
 extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin,
 					  u32 fmax);
 extern int sched_set_boost(int enable);
+extern void free_task_load_ptrs(struct task_struct *p);
 #else
 static inline int
 register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
@@ -2710,6 +2711,7 @@
 {
 	return -EINVAL;
 }
+static inline void free_task_load_ptrs(struct task_struct *p) { }
 #endif /* CONFIG_SCHED_WALT */
 
 #ifndef CONFIG_SCHED_WALT
diff --git a/include/linux/soundwire/soundwire.h b/include/linux/soundwire/soundwire.h
index 752a001..a60d78c 100644
--- a/include/linux/soundwire/soundwire.h
+++ b/include/linux/soundwire/soundwire.h
@@ -196,7 +196,6 @@
  * @shutdown: standard shutdown callback used during power down/halt
  * @suspend: standard suspend callback used during system suspend
  * @resume: standard resume callback used during system resume
- * @startup: additional init operation for slave devices
  * @driver: soundwire device drivers should initialize name and
  * owner field of this structure
  * @id_table: list of soundwire devices supported by this driver
@@ -210,7 +209,6 @@
 	int	(*device_up)(struct swr_device *swr);
 	int	(*device_down)(struct swr_device *swr);
 	int	(*reset_device)(struct swr_device *swr);
-	int	(*startup)(struct swr_device *swr);
 	struct device_driver		driver;
 	const struct swr_device_id	*id_table;
 };
@@ -309,4 +307,6 @@
 extern int swr_slvdev_datapath_control(struct swr_device *swr_dev, u8 dev_num,
 				       bool enable);
 extern int swr_remove_from_group(struct swr_device *dev, u8 dev_num);
+
+extern void swr_remove_device(struct swr_device *swr_dev);
 #endif /* _LINUX_SOUNDWIRE_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8491bdc..6e49b86 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -40,9 +40,15 @@
 /* No upper/lower limit requirement */
 #define THERMAL_NO_LIMIT	((u32)~0)
 
+/* upper limit requirement */
+#define THERMAL_MAX_LIMIT	(THERMAL_NO_LIMIT - 1)
+
 /* Default weight of a bound cooling device */
 #define THERMAL_WEIGHT_DEFAULT 0
 
+/* Max sensors that can be used for a single virtual thermalzone */
+#define THERMAL_MAX_VIRT_SENSORS 5
+
 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */
 #define THERMAL_TEMP_INVALID	-274000
 
@@ -405,6 +411,39 @@
 	enum thermal_trip_type type;
 };
 
+/* Different aggregation logic supported for virtual sensors */
+enum aggregation_logic {
+	VIRT_WEIGHTED_AVG,
+	VIRT_MAXIMUM,
+	VIRT_MINIMUM,
+	VIRT_AGGREGATION_NR,
+};
+
+/*
+ * struct virtual_sensor_data - Data structure used to provide
+ *			      information about the virtual zone.
+ * @virt_zone_name - Virtual thermal zone name
+ * @num_sensors - Number of sensors this virtual zone uses to compute
+ *		  temperature
+ * @sensor_names - Array of sensor names
+ * @logic - Temperature aggregation logic to be used
+ * @coefficients - Coefficients to be used for weighted average logic
+ * @coefficient_ct - number of coefficients provided as input
+ * @avg_offset - offset value to be used for the weighted aggregation logic
+ * @avg_denominator - denominator value to be used for the weighted aggregation
+ *			logic
+ */
+struct virtual_sensor_data {
+	int                    num_sensors;
+	char                   virt_zone_name[THERMAL_NAME_LENGTH];
+	char                   *sensor_names[THERMAL_MAX_VIRT_SENSORS];
+	enum aggregation_logic logic;
+	int                    coefficients[THERMAL_MAX_VIRT_SENSORS];
+	int                    coefficient_ct;
+	int                    avg_offset;
+	int                    avg_denominator;
+};
+
 /* Function declarations */
 #ifdef CONFIG_THERMAL_OF
 struct thermal_zone_device *
@@ -417,6 +456,9 @@
 		const struct thermal_zone_of_device_ops *ops);
 void devm_thermal_zone_of_sensor_unregister(struct device *dev,
 					    struct thermal_zone_device *tz);
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+		struct device *dev,
+		const struct virtual_sensor_data *sensor_data);
 #else
 static inline struct thermal_zone_device *
 thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -444,6 +486,14 @@
 {
 }
 
+static inline
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+		struct device *dev,
+		const struct virtual_sensor_data *sensor_data)
+{
+	return ERR_PTR(-ENODEV);
+}
+
 #endif
 
 #if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
new file mode 100644
index 0000000..f2322f3
--- /dev/null
+++ b/include/linux/usb/audio-v3.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 3.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/docs/devclass_docs/USB_Audio_v3.0.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+#define UAC3_MIXER_UNIT_V3	0x05
+#define UAC3_FEATURE_UNIT_V3	0x07
+#define UAC3_CLOCK_SOURCE	0x0b
+
+#define BADD_MAXPSIZE_SYNC_MONO_16	0x0060
+#define BADD_MAXPSIZE_SYNC_MONO_24	0x0090
+#define BADD_MAXPSIZE_SYNC_STEREO_16	0x00c0
+#define BADD_MAXPSIZE_SYNC_STEREO_24	0x0120
+
+#define BADD_MAXPSIZE_ASYNC_MONO_16	0x0062
+#define BADD_MAXPSIZE_ASYNC_MONO_24	0x0093
+#define BADD_MAXPSIZE_ASYNC_STEREO_16	0x00c4
+#define BADD_MAXPSIZE_ASYNC_STEREO_24	0x0126
+
+#define BIT_RES_16_BIT		0x10
+#define BIT_RES_24_BIT		0x18
+
+#define SUBSLOTSIZE_16_BIT	0x02
+#define SUBSLOTSIZE_24_BIT	0x03
+
+#define BADD_SAMPLING_RATE	48000
+
+#define NUM_CHANNELS_MONO	1
+#define NUM_CHANNELS_STEREO	2
+#define BADD_CH_CONFIG_MONO	0
+#define BADD_CH_CONFIG_STEREO	3
+#define CLUSTER_ID_MONO		0x0001
+#define CLUSTER_ID_STEREO	0x0002
+
+#define FULL_ADC_PROFILE	0x01
+
+/* BADD Profile IDs */
+#define PROF_GENERIC_IO		0x20
+#define PROF_HEADPHONE		0x21
+#define PROF_SPEAKER		0x22
+#define PROF_MICROPHONE		0x23
+#define PROF_HEADSET		0x24
+#define PROF_HEADSET_ADAPTER	0x25
+#define PROF_SPEAKERPHONE	0x26
+
+/* BADD Entity IDs */
+#define BADD_OUT_TERM_ID_BAOF	0x03
+#define BADD_OUT_TERM_ID_BAIF	0x06
+#define BADD_IN_TERM_ID_BAOF	0x01
+#define BADD_IN_TERM_ID_BAIF	0x04
+#define BADD_FU_ID_BAOF		0x02
+#define BADD_FU_ID_BAIF		0x05
+#define BADD_CLOCK_SOURCE	0x09
+#define BADD_FU_ID_BAIOF	0x07
+#define BADD_MU_ID_BAIOF	0x08
+
+#define UAC_BIDIR_TERMINAL_HEADSET	0x0402
+#define UAC_BIDIR_TERMINAL_SPEAKERPHONE	0x0403
+
+#define NUM_BADD_DESCS		7
+
+struct uac3_input_terminal_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bTerminalID;
+	__u16 wTerminalType;
+	__u8 bAssocTerminal;
+	__u8 bCSourceID;
+	__u32 bmControls;
+	__u16 wClusterDescrID;
+	__u16 wExTerminalDescrID;
+	__u16 wConnectorsDescrID;
+	__u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_INPUT_TERMINAL_SIZE	0x14
+
+extern struct uac3_input_terminal_descriptor badd_baif_in_term_desc;
+extern struct uac3_input_terminal_descriptor badd_baof_in_term_desc;
+
+struct uac3_output_terminal_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bTerminalID;
+	__u16 wTerminalType;
+	__u8 bAssocTerminal;
+	__u8 bSourceID;
+	__u8 bCSourceID;
+	__u32 bmControls;
+	__u16 wExTerminalDescrID;
+	__u16 wConnectorsDescrID;
+	__u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_OUTPUT_TERMINAL_SIZE	0x13
+
+extern struct uac3_output_terminal_descriptor badd_baif_out_term_desc;
+extern struct uac3_output_terminal_descriptor badd_baof_out_term_desc;
+
+extern __u8 monoControls[];
+extern __u8 stereoControls[];
+extern __u8 badd_mu_src_ids[];
+
+struct uac3_mixer_unit_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bUnitID;
+	__u8 bNrInPins;
+	__u8 *baSourceID;
+	__u16 wClusterDescrID;
+	__u8 bmMixerControls;
+	__u32 bmControls;
+	__u16 wMixerDescrStr;
+} __packed;
+
+#define UAC3_DT_MIXER_UNIT_SIZE		0x10
+
+extern struct uac3_mixer_unit_descriptor badd_baiof_mu_desc;
+
+struct uac3_feature_unit_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bUnitID;
+	__u8 bSourceID;
+	__u8 *bmaControls;
+	__u16 wFeatureDescrStr;
+} __packed;
+
+extern struct uac3_feature_unit_descriptor badd_baif_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baof_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baiof_fu_desc;
+
+struct uac3_clock_source_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bClockID;
+	__u8 bmAttributes;
+	__u32 bmControls;
+	__u8 bReferenceTerminal;
+	__u16 wClockSourceStr;
+} __packed;
+
+#define UAC3_DT_CLOCK_SRC_SIZE		0x0c
+
+extern struct uac3_clock_source_descriptor badd_clock_desc;
+
+extern void *badd_desc_list[];
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 0583431..8053c8a 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -121,5 +121,6 @@
 int msm_vidc_unsubscribe_event(void *instance,
 		const struct v4l2_event_subscription *sub);
 int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *a);
 int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
 #endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 6835d22..ddcff17 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -107,10 +107,16 @@
 	};
 };
 
+struct dst_metrics {
+	u32		metrics[RTAX_MAX];
+	atomic_t	refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY		0x1UL
+#define DST_METRICS_REFCOUNTED		0x2UL
 #define DST_METRICS_FLAGS		0x3UL
 #define __DST_METRICS_PTR(Y)	\
 	((u32 *)((Y) & ~DST_METRICS_FLAGS))
diff --git a/include/net/ip.h b/include/net/ip.h
index b043c7d..9816365 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -272,6 +272,8 @@
 
 __be32 inet_current_timestamp(void);
 
+extern int sysctl_reserved_port_bind;
+
 /* From inetpeer.c */
 extern int inet_peer_threshold;
 extern int inet_peer_minttl;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f390c3b..aa75828 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -114,11 +114,11 @@
 	__be32			fib_prefsrc;
 	u32			fib_tb_id;
 	u32			fib_priority;
-	u32			*fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+	struct dst_metrics	*fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
 	int			fib_nhs;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	int			fib_weight;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 7f15f95..91afb4a 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -1001,6 +1001,7 @@
  */
 extern const struct proto_ops inet6_stream_ops;
 extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
 
 struct group_source_req;
 struct group_filter;
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 7ef984a..549cb84 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -144,5 +144,6 @@
 extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
 extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
 extern int icnss_trigger_recovery(struct device *dev);
-
+extern int icnss_get_driver_load_cnt(void);
+extern void icnss_increment_driver_load_cnt(void);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 7a09cb1..14f6445 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -15,6 +15,7 @@
 #define _APR_AUDIO_V2_H_
 
 #include <linux/qdsp6v2/apr.h>
+#include <linux/msm_audio.h>
 
 /* size of header needed for passing data out of band */
 #define APR_CMD_OB_HDR_SZ  12
@@ -43,6 +44,8 @@
 
 #define ADM_MATRIX_ID_COMPRESSED_AUDIO_RX   2
 
+#define ADM_MATRIX_ID_COMPRESSED_AUDIO_TX   3
+
 #define ADM_MATRIX_ID_LISTEN_TX             4
 /* Enumeration for an audio Tx matrix ID.*/
 #define ADM_MATRIX_ID_AUDIOX              1
@@ -444,6 +447,26 @@
 	 */
 } __packed;
 
+#define ASM_STREAM_CMD_REGISTER_PP_EVENTS 0x00013213
+#define ASM_STREAM_PP_EVENT 0x00013214
+#define ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE 0x13333
+#define ASM_IEC_61937_MEDIA_FMT_EVENT 0x13334
+
+#define DSP_STREAM_CMD "ADSP Stream Cmd"
+#define DSP_STREAM_CALLBACK "ADSP Stream Callback Event"
+#define DSP_STREAM_CALLBACK_QUEUE_SIZE 1024
+
+struct dsp_stream_callback_list {
+	struct list_head list;
+	struct msm_adsp_event_data event;
+};
+
+struct dsp_stream_callback_prtd {
+	uint16_t event_count;
+	struct list_head event_queue;
+	spinlock_t prtd_spin_lock;
+};
+
 /* set customized mixing on matrix mixer */
 #define ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5                        0x00010344
 struct adm_cmd_set_pspd_mtmx_strtr_params_v5 {
@@ -3993,6 +4016,32 @@
 
 } __packed;
 
+
+/* Command to send sample rate & channels for IEC61937 (compressed) or IEC60958
+ * (pcm) streams. Both audio standards use the same format and are used for
+ * HDMI or SPDIF.
+ */
+#define ASM_DATA_CMD_IEC_60958_MEDIA_FMT        0x0001321E
+
+struct asm_iec_compressed_fmt_blk_t {
+	struct apr_hdr hdr;
+
+	/*
+	 * Nominal sampling rate of the incoming bitstream.
+	 * Supported values: 8000, 11025, 16000, 22050, 24000, 32000,
+	 *                   44100, 48000, 88200, 96000, 176400, 192000,
+	 *                   352800, 384000
+	 */
+	uint32_t sampling_rate;
+
+	/*
+	 * Number of channels of the incoming bitstream.
+	 * Supported values: 1,2,3,4,5,6,7,8
+	 */
+	uint32_t num_channels;
+
+} __packed;
+
 struct asm_multi_channel_pcm_fmt_blk_v2 {
 	struct apr_hdr hdr;
 	struct asm_data_cmd_media_fmt_update_v2 fmt_blk;
@@ -5054,6 +5103,12 @@
 #define ASM_MEDIA_FMT_VORBIS                 0x00010C15
 #define ASM_MEDIA_FMT_APE                    0x00012F32
 #define ASM_MEDIA_FMT_DSD                    0x00012F3E
+#define ASM_MEDIA_FMT_TRUEHD                 0x00013215
+/* 0x0 is used for fomat ID since ADSP dynamically determines the
+ * format encapsulated in the IEC61937 (compressed) or IEC60958
+ * (pcm) packets.
+ */
+#define ASM_MEDIA_FMT_IEC                    0x00000000
 
 /* Media format ID for adaptive transform acoustic coding. This
  * ID is used by the #ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED command
@@ -6315,6 +6370,62 @@
 
 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
 
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2     0x00013218
+
+struct asm_stream_cmd_set_encdec_param_v2 {
+	u16                  service_id;
+	/* 0 - ASM_ENCODER_SVC; 1 - ASM_DECODER_SVC */
+
+	u16                  reserved;
+
+	u32                  param_id;
+	/* ID of the parameter. */
+
+	u32                  param_size;
+	/*
+	 * Data size of this parameter, in bytes. The size is a multiple
+	 * of 4 bytes.
+	 */
+} __packed;
+
+#define ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS  0x00013219
+
+#define ASM_STREAM_CMD_ENCDEC_EVENTS           0x0001321A
+
+#define AVS_PARAM_ID_RTIC_SHARED_MEMORY_ADDR   0x00013237
+
+struct avs_rtic_shared_mem_addr {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param_v2  encdec;
+	u32                 shm_buf_addr_lsw;
+	/* Lower 32 bit of the RTIC shared memory */
+
+	u32                 shm_buf_addr_msw;
+	/* Upper 32 bit of the RTIC shared memory */
+
+	u32                 buf_size;
+	/* Size of buffer */
+
+	u16                 shm_buf_mem_pool_id;
+	/* ADSP_MEMORY_MAP_SHMEM8_4K_POOL */
+
+	u16                 shm_buf_num_regions;
+	/* number of regions to map */
+
+	u32                 shm_buf_flag;
+	/* buffer property flag */
+
+	struct avs_shared_map_region_payload map_region;
+	/* memory map region*/
+} __packed;
+
+#define AVS_PARAM_ID_RTIC_EVENT_ACK           0x00013238
+
+struct avs_param_rtic_event_ack {
+	struct apr_hdr hdr;
+	struct asm_stream_cmd_set_encdec_param_v2  encdec;
+} __packed;
+
 #define ASM_PARAM_ID_ENCDEC_BITRATE     0x00010C13
 
 struct asm_bitrate_param {
@@ -10325,10 +10436,33 @@
 	u32                  flags;
 } __packed;
 
+
+/* Parameter used by #ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC to
+ * realize smoother adjustment of audio session clock for a specified session.
+ * The desired audio session clock adjustment(in micro seconds) is specified
+ * using the command #ASM_SESSION_CMD_ADJUST_SESSION_CLOCK_V2.
+ * Delaying/Advancing the session clock would be implemented by inserting
+ * interpolated/dropping audio samples in the playback path respectively.
+ * Also, this parameter has to be configured before the Audio Session is put
+ * to RUN state to avoid cold start latency/glitches in the playback.
+ */
+
+#define ASM_SESSION_MTMX_PARAM_ADJUST_SESSION_TIME_CTL         0x00013217
+
+struct asm_session_mtmx_param_adjust_session_time_ctl_t {
+	/* Specifies whether the module is enabled or not
+	 * @values
+	 * 0 -- disabled
+	 * 1 -- enabled
+	 */
+	u32                 enable;
+};
+
 union asm_session_mtmx_strtr_param_config {
 	struct asm_session_mtmx_strtr_param_window_v2_t window_param;
 	struct asm_session_mtmx_strtr_param_render_mode_t render_param;
 	struct asm_session_mtmx_strtr_param_clk_rec_t clk_rec_param;
+	struct asm_session_mtmx_param_adjust_session_time_ctl_t adj_time_param;
 } __packed;
 
 struct asm_mtmx_strtr_params {
@@ -10457,6 +10591,7 @@
 	COMPRESSED_PASSTHROUGH_DSD,
 	LISTEN,
 	COMPRESSED_PASSTHROUGH_GEN,
+	COMPRESSED_PASSTHROUGH_IEC61937
 };
 
 #define AUDPROC_MODULE_ID_COMPRESSED_MUTE                0x00010770
diff --git a/include/sound/msm-dts-eagle.h b/include/sound/msm-dts-eagle.h
deleted file mode 100644
index 2ef0113..0000000
--- a/include/sound/msm-dts-eagle.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MSM_DTS_EAGLE_H__
-#define __MSM_DTS_EAGLE_H__
-
-#include <linux/compat.h>
-#include <sound/soc.h>
-#include <sound/devdep_params.h>
-#include <sound/q6asm-v2.h>
-
-#ifdef CONFIG_COMPAT
-enum {
-	DTS_EAGLE_IOCTL_GET_CACHE_SIZE32 = _IOR(0xF2, 0, __s32),
-	DTS_EAGLE_IOCTL_SET_CACHE_SIZE32 = _IOW(0xF2, 1, __s32),
-	DTS_EAGLE_IOCTL_GET_PARAM32 = _IOR(0xF2, 2, compat_uptr_t),
-	DTS_EAGLE_IOCTL_SET_PARAM32 = _IOW(0xF2, 3, compat_uptr_t),
-	DTS_EAGLE_IOCTL_SET_CACHE_BLOCK32 =
-				_IOW(0xF2, 4, compat_uptr_t),
-	DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE32 =
-				_IOW(0xF2, 5, compat_uptr_t),
-	DTS_EAGLE_IOCTL_GET_LICENSE32 =
-				_IOR(0xF2, 6, compat_uptr_t),
-	DTS_EAGLE_IOCTL_SET_LICENSE32 =
-				 _IOW(0xF2, 7, compat_uptr_t),
-	DTS_EAGLE_IOCTL_SEND_LICENSE32 = _IOW(0xF2, 8, __s32),
-	DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS32 = _IOW(0xF2, 9,
-						     compat_uptr_t),
-};
-#endif
-
-#ifdef CONFIG_DTS_EAGLE
-void msm_dts_ion_memmap(struct param_outband *po_);
-int msm_dts_eagle_enable_asm(struct audio_client *ac, u32 enable, int module);
-int msm_dts_eagle_enable_adm(int port_id, int copp_idx, u32 enable);
-void msm_dts_eagle_add_controls(struct snd_soc_platform *platform);
-int msm_dts_eagle_set_stream_gain(struct audio_client *ac,
-				  int lgain, int rgain);
-int msm_dts_eagle_handle_asm(struct dts_eagle_param_desc *depd, char *buf,
-			     bool for_pre, bool get, struct audio_client *ac,
-			     struct param_outband *po);
-int msm_dts_eagle_handle_adm(struct dts_eagle_param_desc *depd, char *buf,
-			     bool for_pre, bool get);
-int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg);
-int msm_dts_eagle_is_hpx_on(void);
-int msm_dts_eagle_init_pre(struct audio_client *ac);
-int msm_dts_eagle_deinit_pre(struct audio_client *ac);
-int msm_dts_eagle_init_post(int port_id, int copp_id);
-int msm_dts_eagle_deinit_post(int port_id, int topology);
-int msm_dts_eagle_init_master_module(struct audio_client *ac);
-int msm_dts_eagle_deinit_master_module(struct audio_client *ac);
-int msm_dts_eagle_pcm_new(struct snd_soc_pcm_runtime *runtime);
-void msm_dts_eagle_pcm_free(struct snd_pcm *pcm);
-int msm_dts_eagle_compat_ioctl(unsigned int cmd, unsigned long arg);
-#else
-static inline void msm_dts_ion_memmap(struct param_outband *po_)
-{
-	pr_debug("%s\n", __func__);
-}
-static inline int msm_dts_eagle_enable_asm(struct audio_client *ac,
-					   u32 enable, int module)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_enable_adm(int port_id, int copp_idx,
-					   u32 enable)
-{
-	return 0;
-}
-static inline void msm_dts_eagle_add_controls(struct snd_soc_platform *platform)
-{
-}
-static inline int msm_dts_eagle_set_stream_gain(struct audio_client *ac,
-						int lgain, int rgain)
-{
-	pr_debug("%s\n", __func__);
-	return 0;
-}
-static inline int msm_dts_eagle_handle_asm(struct dts_eagle_param_desc *depd,
-					   char *buf, bool for_pre, bool get,
-					   struct audio_client *ac,
-					   struct param_outband *po)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_handle_adm(struct dts_eagle_param_desc *depd,
-					   char *buf, bool for_pre, bool get)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
-{
-	return -EPERM;
-}
-static inline int msm_dts_eagle_is_hpx_on(void)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_init_pre(struct audio_client *ac)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_deinit_pre(struct audio_client *ac)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_init_post(int port_id, int coppid)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_deinit_post(int port_id, int topology)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_init_master_module(struct audio_client *ac)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_deinit_master_module(struct audio_client *ac)
-{
-	return 0;
-}
-static inline int msm_dts_eagle_pcm_new(struct snd_soc_pcm_runtime *runtime)
-{
-	pr_debug("%s\n", __func__);
-	return 0;
-}
-static inline void msm_dts_eagle_pcm_free(struct snd_pcm *pcm)
-{
-	pr_debug("%s\n", __func__);
-}
-static inline int msm_dts_eagle_compat_ioctl(unsigned int cmd,
-					unsigned long arg)
-{
-	return 0;
-}
-#endif
-
-#endif
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index 42d048f..e689e93 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -17,6 +17,7 @@
 #define ADM_PATH_LIVE_REC 0x2
 #define ADM_PATH_NONLIVE_REC 0x3
 #define ADM_PATH_COMPRESSED_RX 0x5
+#define ADM_PATH_COMPRESSED_TX 0x6
 #include <linux/qdsp6v2/rtac.h>
 #include <sound/q6afe-v2.h>
 #include <sound/q6audio-v2.h>
@@ -34,7 +35,6 @@
 	ADM_AUDVOL_CAL,
 	ADM_RTAC_INFO_CAL,
 	ADM_RTAC_APR_CAL,
-	ADM_DTS_EAGLE,
 	ADM_SRS_TRUMEDIA,
 	ADM_RTAC_AUDVOL_CAL,
 	ADM_MAX_CAL_TYPES
@@ -65,6 +65,20 @@
 	unsigned int session_id;
 };
 
+struct default_chmixer_param_id_coeff {
+	uint32_t index;
+	uint16_t num_output_channels;
+	uint16_t num_input_channels;
+};
+
+struct msm_pcm_channel_mixer {
+	int output_channel;
+	int input_channels[ADM_MAX_CHANNELS];
+	bool enable;
+	int rule;
+	int channel_weight[ADM_MAX_CHANNELS][ADM_MAX_CHANNELS];
+};
+
 int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
 		      void *srs_params);
 
@@ -164,4 +178,10 @@
 			struct sound_focus_param *soundFocusData);
 int adm_get_source_tracking(int port_id, int copp_idx,
 			    struct source_tracking_param *sourceTrackingData);
+int adm_swap_speaker_channels(int port_id, int copp_idx, int sample_rate,
+				bool spk_swap);
+int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
+			int session_type,
+			struct msm_pcm_channel_mixer *ch_mixer,
+			int channel_index);
 #endif /* __Q6_ADM_V2_H__ */
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index 6bc93f5..00b46a5 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -55,6 +55,8 @@
 #define FORMAT_DSD          0x001d
 #define FORMAT_APTX         0x001e
 #define FORMAT_GEN_COMPR    0x001f
+#define FORMAT_TRUEHD       0x0020
+#define FORMAT_IEC61937     0x0021
 
 #define ENCDEC_SBCBITRATE   0x0001
 #define ENCDEC_IMMEDIATE_DECODE 0x0002
@@ -211,6 +213,7 @@
 	int                    session;
 	app_cb		       cb;
 	atomic_t	       cmd_state;
+	atomic_t	       cmd_state_pp;
 	/* Relative or absolute TS */
 	atomic_t	       time_flag;
 	atomic_t	       nowait_cmd_cnt;
@@ -316,6 +319,10 @@
 int q6asm_open_loopback_v2(struct audio_client *ac,
 			   uint16_t bits_per_sample);
 
+int q6asm_open_transcode_loopback(struct audio_client *ac,
+			   uint16_t bits_per_sample, uint32_t source_format,
+			   uint32_t sink_format);
+
 int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
 				uint32_t lsw_ts, uint32_t flags);
 int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
@@ -507,6 +514,10 @@
 			bool use_default_chmap, char *channel_map,
 			uint16_t bits_per_sample);
 
+int q6asm_media_format_block_iec(
+			struct audio_client *ac,
+			uint32_t rate, uint32_t channels);
+
 int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac,
 					     uint32_t rate, uint32_t channels,
 					     bool use_default_chmap,
@@ -617,6 +628,14 @@
 int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
 				    uint32_t params_length);
 
+int q6asm_send_stream_cmd(struct audio_client *ac,
+			  struct msm_adsp_event_data *data);
+
+int q6asm_send_ion_fd(struct audio_client *ac, int fd);
+
+int q6asm_send_rtic_event_ack(struct audio_client *ac,
+			      void *param, uint32_t params_length);
+
 /* Client can set the IO mode to either AIO/SIO mode */
 int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode);
 
@@ -649,6 +668,10 @@
 int q6asm_send_mtmx_strtr_clk_rec_mode(struct audio_client *ac,
 		uint32_t clk_rec_mode);
 
+/* Enable adjust session clock in DSP */
+int q6asm_send_mtmx_strtr_enable_adjust_session_clock(struct audio_client *ac,
+		bool enable);
+
 /* Retrieve the current DSP path delay */
 int q6asm_get_path_delay(struct audio_client *ac);
 
@@ -656,4 +679,8 @@
 uint8_t q6asm_get_buf_index_from_token(uint32_t token);
 uint8_t q6asm_get_stream_id_from_token(uint32_t token);
 
+/* Adjust session clock in DSP */
+int q6asm_adjust_session_clock(struct audio_client *ac,
+		uint32_t adjust_time_lsw,
+		uint32_t adjust_time_msw);
 #endif /* __Q6_ASM_H__ */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4a9c625..1ea6e0d 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -862,7 +862,9 @@
 		__entry->target_cpu		= target_cpu;
 		__entry->ediff			= ediff;
 		__entry->need_idle		= need_idle;
-		__entry->latency		= sched_ktime_clock() - p->ravg.mark_start;
+		__entry->latency		= p->ravg.mark_start ?
+						  sched_ktime_clock() -
+						  p->ravg.mark_start : 0;
 	),
 
 	TP_printk("comm=%s pid=%d task_cpu=%d task_util=%lu nominated_cpu=%d target_cpu=%d energy_diff=%d need_idle=%d latency=%llu",
@@ -1834,24 +1836,30 @@
 
 TRACE_EVENT(sched_get_nr_running_avg,
 
-	TP_PROTO(int avg, int big_avg, int iowait_avg),
+	TP_PROTO(int avg, int big_avg, int iowait_avg,
+		 unsigned int max_nr, unsigned int big_max_nr),
 
-	TP_ARGS(avg, big_avg, iowait_avg),
+	TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
 
 	TP_STRUCT__entry(
 		__field( int,	avg			)
 		__field( int,	big_avg			)
 		__field( int,	iowait_avg		)
+		__field( unsigned int,	max_nr		)
+		__field( unsigned int,	big_max_nr	)
 	),
 
 	TP_fast_assign(
 		__entry->avg		= avg;
 		__entry->big_avg	= big_avg;
 		__entry->iowait_avg	= iowait_avg;
+		__entry->max_nr		= max_nr;
+		__entry->big_max_nr	= big_max_nr;
 	),
 
-	TP_printk("avg=%d big_avg=%d iowait_avg=%d",
-		__entry->avg, __entry->big_avg, __entry->iowait_avg)
+	TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
+		__entry->avg, __entry->big_avg, __entry->iowait_avg,
+		__entry->max_nr, __entry->big_max_nr)
 );
 
 TRACE_EVENT(core_ctl_eval_need,
diff --git a/include/trace/events/thermal_virtual.h b/include/trace/events/thermal_virtual.h
new file mode 100644
index 0000000..4c9ce51
--- /dev/null
+++ b/include/trace/events/thermal_virtual.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_virtual
+
+#if !defined(_TRACE_VIRTUAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VIRTUAL_H
+
+#include <linux/thermal.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(virtual_temperature,
+
+	TP_PROTO(struct thermal_zone_device *virt_tz,
+		struct thermal_zone_device *tz, int sens_temp,
+		int est_temp),
+
+	TP_ARGS(virt_tz, tz, sens_temp, est_temp),
+
+	TP_STRUCT__entry(
+		__string(virt_zone, virt_tz->type)
+		__string(therm_zone, tz->type)
+		__field(int, sens_temp)
+		__field(int, est_temp)
+	),
+
+	TP_fast_assign(
+		__assign_str(virt_zone, virt_tz->type);
+		__assign_str(therm_zone, tz->type);
+		__entry->sens_temp = sens_temp;
+		__entry->est_temp = est_temp;
+	),
+
+	TP_printk("virt_zone=%s zone=%s temp=%d virtual zone estimated temp=%d",
+		__get_str(virt_zone), __get_str(therm_zone),
+		__entry->sens_temp,
+		__entry->est_temp)
+);
+
+#endif /* _TRACE_VIRTUAL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/trace_msm_pil_event.h b/include/trace/events/trace_msm_pil_event.h
new file mode 100644
index 0000000..4795dc5
--- /dev/null
+++ b/include/trace/events/trace_msm_pil_event.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_pil_event
+
+#if !defined(_TRACE_MSM_PIL_EVENT_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_PIL_EVENT_H_
+
+#include <linux/tracepoint.h>
+#include <../drivers/soc/qcom/peripheral-loader.h>
+
+TRACE_EVENT(pil_event,
+
+	TP_PROTO(const char *event_name, struct pil_desc *desc),
+
+	TP_ARGS(event_name, desc),
+
+	TP_STRUCT__entry(
+		__string(event_name, event_name)
+		__string(fw_name, desc->fw_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(event_name, event_name);
+		__assign_str(fw_name, desc->fw_name);
+	),
+
+	TP_printk("event_name=%s fw_name=%s",
+		__get_str(event_name),
+		__get_str(fw_name))
+);
+
+TRACE_EVENT(pil_notif,
+
+	TP_PROTO(const char *event_name, unsigned long code,
+	const char *fw_name),
+
+	TP_ARGS(event_name, code, fw_name),
+
+	TP_STRUCT__entry(
+		__string(event_name, event_name)
+		__field(unsigned long, code)
+		__string(fw_name, fw_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(event_name, event_name);
+		__entry->code = code;
+		__assign_str(fw_name, fw_name);
+	),
+
+	TP_printk("event_name=%s code=%lu fw=%s",
+		__get_str(event_name),
+		__entry->code,
+		__get_str(fw_name))
+);
+
+TRACE_EVENT(pil_func,
+
+	TP_PROTO(const char *func_name),
+
+	TP_ARGS(func_name),
+
+	TP_STRUCT__entry(
+		__string(func_name, func_name)
+	),
+
+	TP_fast_assign(
+		__assign_str(func_name, func_name);
+	),
+
+	TP_printk("func_name=%s",
+		__get_str(func_name))
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_pil_event
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 67d632f..2d078c2 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -92,4 +92,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 14eaf2d..b2d5be9 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -109,6 +109,12 @@
 
 #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
 
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE	(1U << 0)
+
 #define BPF_PSEUDO_MAP_FD	1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -157,6 +163,7 @@
 		__u32		target_fd;	/* container object to attach to */
 		__u32		attach_bpf_fd;	/* eBPF program to attach */
 		__u32		attach_type;
+		__u32		attach_flags;
 	};
 } __attribute__((aligned(8)));
 
diff --git a/include/uapi/linux/msm_audio.h b/include/uapi/linux/msm_audio.h
index bde27d1..3213d00 100644
--- a/include/uapi/linux/msm_audio.h
+++ b/include/uapi/linux/msm_audio.h
@@ -461,4 +461,15 @@
 	__s32 topology;
 };
 
+#define ADSP_STREAM_PP_EVENT				0
+#define ADSP_STREAM_ENCDEC_EVENT			1
+#define ADSP_STREAM_IEC_61937_FMT_UPDATE_EVENT		2
+#define ADSP_STREAM_EVENT_MAX				3
+
+struct msm_adsp_event_data {
+	__u32 event_type;
+	__u32 payload_len;
+	__u8 payload[0];
+};
+
 #endif
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 57c2ca4..9773480 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -68,7 +68,9 @@
 #define IPA_IOCTL_ADD_RT_RULE_AFTER 43
 #define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
 #define IPA_IOCTL_GET_HW_VERSION 45
-#define IPA_IOCTL_MAX 46
+#define IPA_IOCTL_ADD_RT_RULE_EXT 46
+#define IPA_IOCTL_NAT_MODIFY_PDN 47
+#define IPA_IOCTL_MAX 48
 
 /**
  * max size of the header to be inserted
@@ -127,6 +129,11 @@
 #define IPA_FLT_MAC_ETHER_TYPE		(1ul << 21)
 
 /**
+ * maximal number of NAT PDNs in the PDN config table
+ */
+#define IPA_MAX_PDN_NUM 5
+
+/**
  * enum ipa_client_type - names for the various IPA "clients"
  * these are from the perspective of the clients, for e.g.
  * HSIC1_PROD means HSIC client is the producer and IPA is the
@@ -718,6 +725,11 @@
  *  consecutive packets
  * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
  *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
  */
 struct ipa_flt_rule {
 	uint8_t retain_hdr;
@@ -731,6 +743,8 @@
 	uint8_t max_prio;
 	uint8_t hashable;
 	uint16_t rule_id;
+	uint8_t set_metadata;
+	uint8_t pdn_idx;
 };
 
 /**
@@ -1407,6 +1421,20 @@
 };
 
 /**
+* struct ipa_ioc_nat_pdn_entry - PDN entry modification data
+* @pdn_index: index of the entry in the PDN config table to be changed
+* @public_ip: PDN's public ip
+* @src_metadata: PDN's source NAT metadata for metadata replacement
+* @dst_metadata: PDN's destination NAT metadata for metadata replacement
+*/
+struct ipa_ioc_nat_pdn_entry {
+	uint8_t pdn_index;
+	uint32_t public_ip;
+	uint32_t src_metadata;
+	uint32_t dst_metadata;
+};
+
+/**
  * struct ipa_msg_meta - Format of the message meta-data.
  * @msg_type: the type of the message
  * @rsvd: reserved bits for future use.
@@ -1628,6 +1656,9 @@
 #define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
 				IPA_IOCTL_GET_NAT_OFFSET, \
 				uint32_t *)
+#define IPA_IOC_NAT_MODIFY_PDN _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_MODIFY_PDN, \
+				struct ipa_ioc_nat_pdn_entry *)
 #define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
 			IPA_IOCTL_SET_FLT, \
 			uint32_t)
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be..c6f5b09 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -26,6 +26,7 @@
 /* bInterfaceProtocol values to denote the version of the standard used */
 #define UAC_VERSION_1			0x00
 #define UAC_VERSION_2			0x20
+#define UAC_VERSION_3			0x30
 
 /* A.2 Audio Interface Subclass Codes */
 #define USB_SUBCLASS_AUDIOCONTROL	0x01
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
index 300bd87..c5cbac8 100644
--- a/include/uapi/media/cam_cpas.h
+++ b/include/uapi/media/cam_cpas.h
@@ -11,13 +11,15 @@
  *
  * @camera_family     : Camera family type
  * @reserved          : Reserved field for alignment
- * @camera_version    : Camera version
+ * @camera_version    : Camera platform version
+ * @cpas_version      : Camera CPAS version within camera platform
  *
  */
 struct cam_cpas_query_cap {
 	uint32_t                 camera_family;
 	uint32_t                 reserved;
 	struct cam_hw_version    camera_version;
+	struct cam_hw_version    cpas_version;
 };
 
 #endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 3048105..866ec3d 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -149,6 +149,8 @@
  * @SNDRV_COMPRESS_CLK_REC_MODE: clock recovery mode ( none or auto)
  * @SNDRV_COMPRESS_RENDER_WINDOW: render window
  * @SNDRV_COMPRESS_START_DELAY: start delay
+ * @SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK: enable dsp drift correction
+ * @SNDRV_COMPRESS_ADJUST_SESSION_CLOCK: set drift correction value
  */
 enum sndrv_compress_encoder {
 	SNDRV_COMPRESS_ENCODER_PADDING = 1,
@@ -160,6 +162,8 @@
 	SNDRV_COMPRESS_CLK_REC_MODE = 7,
 	SNDRV_COMPRESS_RENDER_WINDOW = 8,
 	SNDRV_COMPRESS_START_DELAY = 9,
+	SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK = 10,
+	SNDRV_COMPRESS_ADJUST_SESSION_CLOCK = 11,
 };
 
 #define SNDRV_COMPRESS_PATH_DELAY SNDRV_COMPRESS_PATH_DELAY
@@ -167,6 +171,9 @@
 #define SNDRV_COMPRESS_CLK_REC_MODE SNDRV_COMPRESS_CLK_REC_MODE
 #define SNDRV_COMPRESS_RENDER_WINDOW SNDRV_COMPRESS_RENDER_WINDOW
 #define SNDRV_COMPRESS_START_DELAY SNDRV_COMPRESS_START_DELAY
+#define SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK \
+			SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK
+#define SNDRV_COMPRESS_ADJUST_SESSION_CLOCK SNDRV_COMPRESS_ADJUST_SESSION_CLOCK
 
 /**
  * struct snd_compr_metadata - compressed stream metadata
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 09593e7..8c84053 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -105,7 +105,8 @@
 #define SND_AUDIOCODEC_APE                   ((__u32) 0x00000021)
 #define SND_AUDIOCODEC_DSD                   ((__u32) 0x00000022)
 #define SND_AUDIOCODEC_APTX                  ((__u32) 0x00000023)
-#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_APTX
+#define SND_AUDIOCODEC_TRUEHD                ((__u32) 0x00000024)
+#define SND_AUDIOCODEC_MAX                   SND_AUDIOCODEC_TRUEHD
 
 /*
  * Profile and modes are listed with bit masks. This allows for a
diff --git a/init/Kconfig b/init/Kconfig
index d8a5868..954de19 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1270,6 +1270,10 @@
 
 	  Say N.
 
+config SOCK_CGROUP_DATA
+	bool
+	default n
+
 endif # CGROUPS
 
 config SCHED_HMP
diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c
index a557c5e..7760705 100644
--- a/init/do_mounts_dm.c
+++ b/init/do_mounts_dm.c
@@ -5,13 +5,17 @@
  *
  * This file is released under the GPL.
  */
+#include <linux/async.h>
+#include <linux/ctype.h>
 #include <linux/device-mapper.h>
 #include <linux/fs.h>
 #include <linux/string.h>
+#include <linux/delay.h>
 
 #include "do_mounts.h"
-#include "../drivers/md/dm.h"
 
+#define DM_MAX_DEVICES 256
+#define DM_MAX_TARGETS 256
 #define DM_MAX_NAME 32
 #define DM_MAX_UUID 129
 #define DM_NO_UUID "none"
@@ -19,14 +23,47 @@
 #define DM_MSG_PREFIX "init"
 
 /* Separators used for parsing the dm= argument. */
-#define DM_FIELD_SEP ' '
-#define DM_LINE_SEP ','
+#define DM_FIELD_SEP " "
+#define DM_LINE_SEP ","
+#define DM_ANY_SEP DM_FIELD_SEP DM_LINE_SEP
 
 /*
  * When the device-mapper and any targets are compiled into the kernel
- * (not a module), one target may be created and used as the root device at
- * boot time with the parameters given with the boot line dm=...
- * The code for that is here.
+ * (not a module), one or more device-mappers may be created and used
+ * as the root device at boot time with the parameters given with the
+ * boot line dm=...
+ *
+ * Multiple device-mappers can be stacked specifing the number of
+ * devices. A device can have multiple targets if the the number of
+ * targets is specified.
+ *
+ * TODO(taysom:defect 32847)
+ * In the future, the <num> field will be mandatory.
+ *
+ * <device>        ::= [<num>] <device-mapper>+
+ * <device-mapper> ::= <head> "," <target>+
+ * <head>          ::= <name> <uuid> <mode> [<num>]
+ * <target>        ::= <start> <length> <type> <options> ","
+ * <mode>          ::= "ro" | "rw"
+ * <uuid>          ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "none"
+ * <type>          ::= "verity" | "bootcache" | ...
+ *
+ * Example:
+ * 2 vboot none ro 1,
+ *     0 1768000 bootcache
+ *       device=aa55b119-2a47-8c45-946a-5ac57765011f+1
+ *       signature=76e9be054b15884a9fa85973e9cb274c93afadb6
+ *       cache_start=1768000 max_blocks=100000 size_limit=23 max_trace=20000,
+ *   vroot none ro 1,
+ *     0 1740800 verity payload=254:0 hashtree=254:0 hashstart=1740800 alg=sha1
+ *       root_hexdigest=76e9be054b15884a9fa85973e9cb274c93afadb6
+ *       salt=5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe
+ *
+ * Notes:
+ *  1. uuid is a label for the device and we set it to "none".
+ *  2. The <num> field will be optional initially and assumed to be 1.
+ *     Once all the scripts that set these fields have been set, it will
+ *     be made mandatory.
  */
 
 struct dm_setup_target {
@@ -38,381 +75,388 @@
 	struct dm_setup_target *next;
 };
 
-static struct {
+struct dm_device {
 	int minor;
 	int ro;
 	char name[DM_MAX_NAME];
 	char uuid[DM_MAX_UUID];
-	char *targets;
+	unsigned long num_targets;
 	struct dm_setup_target *target;
 	int target_count;
+	struct dm_device *next;
+};
+
+struct dm_option {
+	char *start;
+	char *next;
+	size_t len;
+	char delim;
+};
+
+static struct {
+	unsigned long num_devices;
+	char *str;
 } dm_setup_args __initdata;
 
 static __initdata int dm_early_setup;
 
-static size_t __init get_dm_option(char *str, char **next, char sep)
+static int __init get_dm_option(struct dm_option *opt, const char *accept)
 {
-	size_t len = 0;
-	char *endp = NULL;
+	char *str = opt->next;
+	char *endp;
 
 	if (!str)
 		return 0;
 
-	endp = strchr(str, sep);
+	str = skip_spaces(str);
+	opt->start = str;
+	endp = strpbrk(str, accept);
 	if (!endp) {  /* act like strchrnul */
-		len = strlen(str);
-		endp = str + len;
+		opt->len = strlen(str);
+		endp = str + opt->len;
 	} else {
-		len = endp - str;
+		opt->len = endp - str;
 	}
-
-	if (endp == str)
-		return 0;
-
-	if (!next)
-		return len;
-
+	opt->delim = *endp;
 	if (*endp == 0) {
 		/* Don't advance past the nul. */
-		*next = endp;
+		opt->next = endp;
 	} else {
-		*next = endp + 1;
+		opt->next = endp + 1;
 	}
-	return len;
+	return opt->len != 0;
 }
 
-static int __init dm_setup_args_init(void)
+static int __init dm_setup_cleanup(struct dm_device *devices)
 {
-	dm_setup_args.minor = 0;
-	dm_setup_args.ro = 0;
-	dm_setup_args.target = NULL;
-	dm_setup_args.target_count = 0;
+	struct dm_device *dev = devices;
+
+	while (dev) {
+		struct dm_device *old_dev = dev;
+		struct dm_setup_target *target = dev->target;
+		while (target) {
+			struct dm_setup_target *old_target = target;
+			kfree(target->type);
+			kfree(target->params);
+			target = target->next;
+			kfree(old_target);
+			dev->target_count--;
+		}
+		BUG_ON(dev->target_count);
+		dev = dev->next;
+		kfree(old_dev);
+	}
 	return 0;
 }
 
-static int __init dm_setup_cleanup(void)
+static char * __init dm_parse_device(struct dm_device *dev, char *str)
 {
-	struct dm_setup_target *target = dm_setup_args.target;
-	struct dm_setup_target *old_target = NULL;
-	while (target) {
-		kfree(target->type);
-		kfree(target->params);
-		old_target = target;
-		target = target->next;
-		kfree(old_target);
-		dm_setup_args.target_count--;
-	}
-	BUG_ON(dm_setup_args.target_count);
-	return 0;
-}
-
-static char * __init dm_setup_parse_device_args(char *str)
-{
-	char *next = NULL;
-	size_t len = 0;
+	struct dm_option opt;
+	size_t len;
 
 	/* Grab the logical name of the device to be exported to udev */
-	len = get_dm_option(str, &next, DM_FIELD_SEP);
-	if (!len) {
+	opt.next = str;
+	if (!get_dm_option(&opt, DM_FIELD_SEP)) {
 		DMERR("failed to parse device name");
 		goto parse_fail;
 	}
-	len = min(len + 1, sizeof(dm_setup_args.name));
-	strlcpy(dm_setup_args.name, str, len);  /* includes nul */
-	str = skip_spaces(next);
+	len = min(opt.len + 1, sizeof(dev->name));
+	strlcpy(dev->name, opt.start, len);  /* includes nul */
 
 	/* Grab the UUID value or "none" */
-	len = get_dm_option(str, &next, DM_FIELD_SEP);
-	if (!len) {
+	if (!get_dm_option(&opt, DM_FIELD_SEP)) {
 		DMERR("failed to parse device uuid");
 		goto parse_fail;
 	}
-	len = min(len + 1, sizeof(dm_setup_args.uuid));
-	strlcpy(dm_setup_args.uuid, str, len);
-	str = skip_spaces(next);
+	len = min(opt.len + 1, sizeof(dev->uuid));
+	strlcpy(dev->uuid, opt.start, len);
 
 	/* Determine if the table/device will be read only or read-write */
-	if (!strncmp("ro,", str, 3)) {
-		dm_setup_args.ro = 1;
-	} else if (!strncmp("rw,", str, 3)) {
-		dm_setup_args.ro = 0;
+	get_dm_option(&opt, DM_ANY_SEP);
+	if (!strncmp("ro", opt.start, opt.len)) {
+		dev->ro = 1;
+	} else if (!strncmp("rw", opt.start, opt.len)) {
+		dev->ro = 0;
 	} else {
 		DMERR("failed to parse table mode");
 		goto parse_fail;
 	}
-	str = skip_spaces(str + 3);
 
-	return str;
+	/* Optional number field */
+	/* XXX: The <num> field will be mandatory in the next round */
+	if (opt.delim == DM_FIELD_SEP[0]) {
+		if (!get_dm_option(&opt, DM_LINE_SEP))
+			return NULL;
+		dev->num_targets = simple_strtoul(opt.start, NULL, 10);
+	} else {
+		dev->num_targets = 1;
+	}
+	if (dev->num_targets > DM_MAX_TARGETS) {
+		DMERR("too many targets %lu > %d",
+			dev->num_targets, DM_MAX_TARGETS);
+	}
+	return opt.next;
 
 parse_fail:
 	return NULL;
 }
 
-static void __init dm_substitute_devices(char *str, size_t str_len)
+static char * __init dm_parse_targets(struct dm_device *dev, char *str)
 {
-	char *candidate = str;
-	char *candidate_end = str;
-	char old_char;
-	size_t len = 0;
-	dev_t dev;
-
-	if (str_len < 3)
-		return;
-
-	while (str && *str) {
-		candidate = strchr(str, '/');
-		if (!candidate)
-			break;
-
-		/* Avoid embedded slashes */
-		if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) {
-			str = strchr(candidate, DM_FIELD_SEP);
-			continue;
-		}
-
-		len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP);
-		str = skip_spaces(candidate_end);
-		if (len < 3 || len > 37)  /* name_to_dev_t max; maj:mix min */
-			continue;
-
-		/* Temporarily terminate with a nul */
-		if (*candidate_end)
-			candidate_end--;
-		old_char = *candidate_end;
-		*candidate_end = '\0';
-
-		DMDEBUG("converting candidate device '%s' to dev_t", candidate);
-		/* Use the boot-time specific device naming */
-		dev = name_to_dev_t(candidate);
-		*candidate_end = old_char;
-
-		DMDEBUG(" -> %u", dev);
-		/* No suitable replacement found */
-		if (!dev)
-			continue;
-
-		/* Rewrite the /dev/path as a major:minor */
-		len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev));
-		if (!len) {
-			DMERR("error substituting device major/minor.");
-			break;
-		}
-		candidate += len;
-		/* Pad out with spaces (fixing our nul) */
-		while (candidate < candidate_end)
-			*(candidate++) = DM_FIELD_SEP;
-	}
-}
-
-static int __init dm_setup_parse_targets(char *str)
-{
-	char *next = NULL;
-	size_t len = 0;
-	struct dm_setup_target **target = NULL;
+	struct dm_option opt;
+	struct dm_setup_target **target = &dev->target;
+	unsigned long num_targets = dev->num_targets;
+	unsigned long i;
 
 	/* Targets are defined as per the table format but with a
 	 * comma as a newline separator. */
-	target = &dm_setup_args.target;
-	while (str && *str) {
+	opt.next = str;
+	for (i = 0; i < num_targets; i++) {
 		*target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL);
 		if (!*target) {
-			DMERR("failed to allocate memory for target %d",
-			      dm_setup_args.target_count);
+			DMERR("failed to allocate memory for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		dm_setup_args.target_count++;
+		dev->target_count++;
 
-		(*target)->begin = simple_strtoull(str, &next, 10);
-		if (!next || *next != DM_FIELD_SEP) {
-			DMERR("failed to parse starting sector for target %d",
-			      dm_setup_args.target_count - 1);
+		if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+			DMERR("failed to parse starting sector"
+				" for target %s<%ld>", dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next + 1);
+		(*target)->begin = simple_strtoull(opt.start, NULL, 10);
 
-		(*target)->length = simple_strtoull(str, &next, 10);
-		if (!next || *next != DM_FIELD_SEP) {
-			DMERR("failed to parse length for target %d",
-			      dm_setup_args.target_count - 1);
+		if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+			DMERR("failed to parse length for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next + 1);
+		(*target)->length = simple_strtoull(opt.start, NULL, 10);
 
-		len = get_dm_option(str, &next, DM_FIELD_SEP);
-		if (!len ||
-		    !((*target)->type = kstrndup(str, len, GFP_KERNEL))) {
-			DMERR("failed to parse type for target %d",
-			      dm_setup_args.target_count - 1);
+		if (get_dm_option(&opt, DM_FIELD_SEP))
+			(*target)->type = kstrndup(opt.start, opt.len,
+							GFP_KERNEL);
+		if (!((*target)->type)) {
+			DMERR("failed to parse type for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next);
-
-		len = get_dm_option(str, &next, DM_LINE_SEP);
-		if (!len ||
-		    !((*target)->params = kstrndup(str, len, GFP_KERNEL))) {
-			DMERR("failed to parse params for target %d",
-			      dm_setup_args.target_count - 1);
+		if (get_dm_option(&opt, DM_LINE_SEP))
+			(*target)->params = kstrndup(opt.start, opt.len,
+							GFP_KERNEL);
+		if (!((*target)->params)) {
+			DMERR("failed to parse params for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next);
-
-		/* Before moving on, walk through the copied target and
-		 * attempt to replace all /dev/xxx with the major:minor number.
-		 * It may not be possible to resolve them traditionally at
-		 * boot-time. */
-		dm_substitute_devices((*target)->params, len);
-
 		target = &((*target)->next);
 	}
-	DMDEBUG("parsed %d targets", dm_setup_args.target_count);
+	DMDEBUG("parsed %d targets", dev->target_count);
 
-	return 0;
+	return opt.next;
 
 parse_fail:
-	return 1;
+	return NULL;
+}
+
+static struct dm_device * __init dm_parse_args(void)
+{
+	struct dm_device *devices = NULL;
+	struct dm_device **tail = &devices;
+	struct dm_device *dev;
+	char *str = dm_setup_args.str;
+	unsigned long num_devices = dm_setup_args.num_devices;
+	unsigned long i;
+
+	if (!str)
+		return NULL;
+	for (i = 0; i < num_devices; i++) {
+		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+		if (!dev) {
+			DMERR("failed to allocated memory for dev");
+			goto error;
+		}
+		*tail = dev;
+		tail = &dev->next;
+		/*
+		 * devices are given minor numbers 0 - n-1
+		 * in the order they are found in the arg
+		 * string.
+		 */
+		dev->minor = i;
+		str = dm_parse_device(dev, str);
+		if (!str)	/* NULL indicates error in parsing, bail */
+			goto error;
+
+		str = dm_parse_targets(dev, str);
+		if (!str)
+			goto error;
+	}
+	return devices;
+error:
+	dm_setup_cleanup(devices);
+	return NULL;
 }
 
 /*
  * Parse the command-line parameters given our kernel, but do not
  * actually try to invoke the DM device now; that is handled by
- * dm_setup_drive after the low-level disk drivers have initialised.
- * dm format is as follows:
- *  dm="name uuid fmode,[table line 1],[table line 2],..."
- * May be used with root=/dev/dm-0 as it always uses the first dm minor.
+ * dm_setup_drives after the low-level disk drivers have initialised.
+ * dm format is described at the top of the file.
+ *
+ * Because dm minor numbers are assigned in assending order starting with 0,
+ * You can assume the first device is /dev/dm-0, the next device is /dev/dm-1,
+ * and so forth.
  */
-
 static int __init dm_setup(char *str)
 {
-	dm_setup_args_init();
+	struct dm_option opt;
+	unsigned long num_devices;
 
-	str = dm_setup_parse_device_args(str);
 	if (!str) {
 		DMDEBUG("str is NULL");
 		goto parse_fail;
 	}
-
-	/* Target parsing is delayed until we have dynamic memory */
-	dm_setup_args.targets = str;
-
-	printk(KERN_INFO "dm: will configure '%s' on dm-%d\n",
-	       dm_setup_args.name, dm_setup_args.minor);
-
+	opt.next = str;
+	if (!get_dm_option(&opt, DM_FIELD_SEP))
+		goto parse_fail;
+	if (isdigit(opt.start[0])) {	/* XXX: Optional number field */
+		num_devices = simple_strtoul(opt.start, NULL, 10);
+		str = opt.next;
+	} else {
+		num_devices = 1;
+		/* Don't advance str */
+	}
+	if (num_devices > DM_MAX_DEVICES) {
+		DMDEBUG("too many devices %lu > %d",
+			num_devices, DM_MAX_DEVICES);
+	}
+	dm_setup_args.str = str;
+	dm_setup_args.num_devices = num_devices;
+	DMINFO("will configure %lu devices", num_devices);
 	dm_early_setup = 1;
 	return 1;
 
 parse_fail:
-	printk(KERN_WARNING "dm: Invalid arguments supplied to dm=.\n");
+	DMWARN("Invalid arguments supplied to dm=.");
 	return 0;
 }
 
-
-static void __init dm_setup_drive(void)
+static void __init dm_setup_drives(void)
 {
 	struct mapped_device *md = NULL;
 	struct dm_table *table = NULL;
 	struct dm_setup_target *target;
-	char *uuid = dm_setup_args.uuid;
+	struct dm_device *dev;
+	char *uuid = NULL;
 	fmode_t fmode = FMODE_READ;
+	struct dm_device *devices;
 
-	/* Finish parsing the targets. */
-	if (dm_setup_parse_targets(dm_setup_args.targets))
-		goto parse_fail;
+	devices = dm_parse_args();
 
-	if (dm_create(dm_setup_args.minor, &md)) {
-		DMDEBUG("failed to create the device");
-		goto dm_create_fail;
-	}
-	DMDEBUG("created device '%s'", dm_device_name(md));
-
-	/* In addition to flagging the table below, the disk must be
-	 * set explicitly ro/rw. */
-	set_disk_ro(dm_disk(md), dm_setup_args.ro);
-
-	if (!dm_setup_args.ro)
-		fmode |= FMODE_WRITE;
-	if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) {
-		DMDEBUG("failed to create the table");
-		goto dm_table_create_fail;
-	}
-
-	dm_lock_md_type(md);
-	target = dm_setup_args.target;
-	while (target) {
-		DMINFO("adding target '%llu %llu %s %s'",
-		       (unsigned long long) target->begin,
-		       (unsigned long long) target->length, target->type,
-		       target->params);
-		if (dm_table_add_target(table, target->type, target->begin,
-					target->length, target->params)) {
-			DMDEBUG("failed to add the target to the table");
-			goto add_target_fail;
+	for (dev = devices; dev; dev = dev->next) {
+		if (dm_create(dev->minor, &md)) {
+			DMDEBUG("failed to create the device");
+			goto dm_create_fail;
 		}
-		target = target->next;
-	}
+		DMDEBUG("created device '%s'", dm_device_name(md));
 
-	if (dm_table_complete(table)) {
-		DMDEBUG("failed to complete the table");
-		goto table_complete_fail;
-	}
+		/*
+		 * In addition to flagging the table below, the disk must be
+		 * set explicitly ro/rw.
+		 */
+		set_disk_ro(dm_disk(md), dev->ro);
 
-	if (dm_get_md_type(md) == DM_TYPE_NONE) {
+		if (!dev->ro)
+			fmode |= FMODE_WRITE;
+		if (dm_table_create(&table, fmode, dev->target_count, md)) {
+			DMDEBUG("failed to create the table");
+			goto dm_table_create_fail;
+		}
+
+		dm_lock_md_type(md);
+
+		for (target = dev->target; target; target = target->next) {
+			DMINFO("adding target '%llu %llu %s %s'",
+			       (unsigned long long) target->begin,
+			       (unsigned long long) target->length,
+			       target->type, target->params);
+			if (dm_table_add_target(table, target->type,
+						target->begin,
+						target->length,
+						target->params)) {
+				DMDEBUG("failed to add the target"
+					" to the table");
+				goto add_target_fail;
+			}
+		}
+		if (dm_table_complete(table)) {
+			DMDEBUG("failed to complete the table");
+			goto table_complete_fail;
+		}
+
+		/* Suspend the device so that we can bind it to the table. */
+		if (dm_suspend(md, 0)) {
+			DMDEBUG("failed to suspend the device pre-bind");
+			goto suspend_fail;
+		}
+
+		/* Initial table load: acquire type of table. */
 		dm_set_md_type(md, dm_table_get_type(table));
+
+		/* Setup md->queue to reflect md's type. */
 		if (dm_setup_md_queue(md, table)) {
 			DMWARN("unable to set up device queue for new table.");
 			goto setup_md_queue_fail;
 		}
-	} else if (dm_get_md_type(md) != dm_table_get_type(table)) {
-		DMWARN("can't change device type after initial table load.");
-		goto setup_md_queue_fail;
-        }
 
-	/* Suspend the device so that we can bind it to the table. */
-	if (dm_suspend(md, 0)) {
-		DMDEBUG("failed to suspend the device pre-bind");
-		goto suspend_fail;
+		/*
+		 * Bind the table to the device. This is the only way
+		 * to associate md->map with the table and set the disk
+		 * capacity directly.
+		 */
+		if (dm_swap_table(md, table)) {  /* should return NULL. */
+			DMDEBUG("failed to bind the device to the table");
+			goto table_bind_fail;
+		}
+
+		/* Finally, resume and the device should be ready. */
+		if (dm_resume(md)) {
+			DMDEBUG("failed to resume the device");
+			goto resume_fail;
+		}
+
+		/* Export the dm device via the ioctl interface */
+		if (!strcmp(DM_NO_UUID, dev->uuid))
+			uuid = NULL;
+		if (dm_ioctl_export(md, dev->name, uuid)) {
+			DMDEBUG("failed to export device with given"
+				" name and uuid");
+			goto export_fail;
+		}
+
+		dm_unlock_md_type(md);
+
+		DMINFO("dm-%d is ready", dev->minor);
 	}
-
-	/* Bind the table to the device. This is the only way to associate
-	 * md->map with the table and set the disk capacity directly. */
-	if (dm_swap_table(md, table)) {  /* should return NULL. */
-		DMDEBUG("failed to bind the device to the table");
-		goto table_bind_fail;
-	}
-
-	/* Finally, resume and the device should be ready. */
-	if (dm_resume(md)) {
-		DMDEBUG("failed to resume the device");
-		goto resume_fail;
-	}
-
-	/* Export the dm device via the ioctl interface */
-	if (!strcmp(DM_NO_UUID, dm_setup_args.uuid))
-		uuid = NULL;
-	if (dm_ioctl_export(md, dm_setup_args.name, uuid)) {
-		DMDEBUG("failed to export device with given name and uuid");
-		goto export_fail;
-	}
-	printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor);
-
-	dm_unlock_md_type(md);
-	dm_setup_cleanup();
+	dm_setup_cleanup(devices);
 	return;
 
 export_fail:
 resume_fail:
 table_bind_fail:
-suspend_fail:
 setup_md_queue_fail:
+suspend_fail:
 table_complete_fail:
 add_target_fail:
 	dm_unlock_md_type(md);
 dm_table_create_fail:
 	dm_put(md);
 dm_create_fail:
-	dm_setup_cleanup();
-parse_fail:
-	printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n",
-	       dm_setup_args.minor, dm_setup_args.name);
+	DMWARN("starting dm-%d (%s) failed",
+	       dev->minor, dev->name);
+	dm_setup_cleanup(devices);
 }
 
 __setup("dm=", dm_setup);
@@ -421,6 +465,6 @@
 {
 	if (!dm_early_setup)
 		return;
-	printk(KERN_INFO "dm: attempting early device configuration.\n");
-	dm_setup_drive();
+	DMINFO("attempting early device configuration.");
+	dm_setup_drives();
 }
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index d05c292..a44a7e4 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -52,6 +52,7 @@
 		e = rcu_dereference_protected(parent->bpf.effective[type],
 					      lockdep_is_held(&cgroup_mutex));
 		rcu_assign_pointer(cgrp->bpf.effective[type], e);
+		cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
 	}
 }
 
@@ -66,8 +67,8 @@
  * Each cgroup has a set of two pointers for bpf programs; one for eBPF
  * programs it owns, and which is effective for execution.
  *
- * If @prog is %NULL, this function attaches a new program to the cgroup and
- * releases the one that is currently attached, if any. @prog is then made
+ * If @prog is not %NULL, this function attaches a new program to the cgroup
+ * and releases the one that is currently attached, if any. @prog is then made
  * the effective program of type @type in that cgroup.
  *
  * If @prog is %NULL, the currently attached program of type @type is released,
@@ -82,30 +83,63 @@
  *
  * Must be called with cgroup_mutex held.
  */
-void __cgroup_bpf_update(struct cgroup *cgrp,
-			 struct cgroup *parent,
-			 struct bpf_prog *prog,
-			 enum bpf_attach_type type)
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+			struct bpf_prog *prog, enum bpf_attach_type type,
+			bool new_overridable)
 {
-	struct bpf_prog *old_prog, *effective;
+	struct bpf_prog *old_prog, *effective = NULL;
 	struct cgroup_subsys_state *pos;
+	bool overridable = true;
 
-	old_prog = xchg(cgrp->bpf.prog + type, prog);
+	if (parent) {
+		overridable = !parent->bpf.disallow_override[type];
+		effective = rcu_dereference_protected(parent->bpf.effective[type],
+						      lockdep_is_held(&cgroup_mutex));
+	}
 
-	effective = (!prog && parent) ?
-		rcu_dereference_protected(parent->bpf.effective[type],
-					  lockdep_is_held(&cgroup_mutex)) :
-		prog;
+	if (prog && effective && !overridable)
+		/* if parent has non-overridable prog attached, disallow
+		 * attaching new programs to descendent cgroup
+		 */
+		return -EPERM;
+
+	if (prog && effective && overridable != new_overridable)
+		/* if parent has overridable prog attached, only
+		 * allow overridable programs in descendent cgroup
+		 */
+		return -EPERM;
+
+	old_prog = cgrp->bpf.prog[type];
+
+	if (prog) {
+		overridable = new_overridable;
+		effective = prog;
+		if (old_prog &&
+		    cgrp->bpf.disallow_override[type] == new_overridable)
+			/* disallow attaching non-overridable on top
+			 * of existing overridable in this cgroup
+			 * and vice versa
+			 */
+			return -EPERM;
+	}
+
+	if (!prog && !old_prog)
+		/* report error when trying to detach and nothing is attached */
+		return -ENOENT;
+
+	cgrp->bpf.prog[type] = prog;
 
 	css_for_each_descendant_pre(pos, &cgrp->self) {
 		struct cgroup *desc = container_of(pos, struct cgroup, self);
 
 		/* skip the subtree if the descendant has its own program */
-		if (desc->bpf.prog[type] && desc != cgrp)
+		if (desc->bpf.prog[type] && desc != cgrp) {
 			pos = css_rightmost_descendant(pos);
-		else
+		} else {
 			rcu_assign_pointer(desc->bpf.effective[type],
 					   effective);
+			desc->bpf.disallow_override[type] = !overridable;
+		}
 	}
 
 	if (prog)
@@ -115,6 +149,7 @@
 		bpf_prog_put(old_prog);
 		static_branch_dec(&cgroup_bpf_enabled_key);
 	}
+	return 0;
 }
 
 /**
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e13157f..5e668da 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -747,7 +747,9 @@
 	    attr->kern_version != LINUX_VERSION_CODE)
 		return -EINVAL;
 
-	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
+	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
+	    type != BPF_PROG_TYPE_CGROUP_SKB &&
+	    !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
 	/* plain bpf_prog allocation */
@@ -826,12 +828,13 @@
 
 #ifdef CONFIG_CGROUP_BPF
 
-#define BPF_PROG_ATTACH_LAST_FIELD attach_type
+#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
 static int bpf_prog_attach(const union bpf_attr *attr)
 {
 	struct bpf_prog *prog;
 	struct cgroup *cgrp;
+	int ret;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -839,6 +842,9 @@
 	if (CHECK_ATTR(BPF_PROG_ATTACH))
 		return -EINVAL;
 
+	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
+		return -EINVAL;
+
 	switch (attr->attach_type) {
 	case BPF_CGROUP_INET_INGRESS:
 	case BPF_CGROUP_INET_EGRESS:
@@ -853,7 +859,10 @@
 			return PTR_ERR(cgrp);
 		}
 
-		cgroup_bpf_update(cgrp, prog, attr->attach_type);
+		ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
+					attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
+		if (ret)
+			bpf_prog_put(prog);
 		cgroup_put(cgrp);
 		break;
 
@@ -861,7 +870,7 @@
 		return -EINVAL;
 	}
 
-	return 0;
+	return ret;
 }
 
 #define BPF_PROG_DETACH_LAST_FIELD attach_type
@@ -869,6 +878,7 @@
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
 	struct cgroup *cgrp;
+	int ret;
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -883,7 +893,7 @@
 		if (IS_ERR(cgrp))
 			return PTR_ERR(cgrp);
 
-		cgroup_bpf_update(cgrp, NULL, attr->attach_type);
+		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
 		cgroup_put(cgrp);
 		break;
 
@@ -891,7 +901,7 @@
 		return -EINVAL;
 	}
 
-	return 0;
+	return ret;
 }
 #endif /* CONFIG_CGROUP_BPF */
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 44c17f4..fe158bd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2164,6 +2164,7 @@
 	case BPF_PROG_TYPE_SOCKET_FILTER:
 	case BPF_PROG_TYPE_SCHED_CLS:
 	case BPF_PROG_TYPE_SCHED_ACT:
+	case BPF_PROG_TYPE_CGROUP_SKB:
 		return true;
 	default:
 		return false;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0fab276..c0644f4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5433,6 +5433,11 @@
 {
 	lockdep_assert_held(&cgroup_mutex);
 
+	if (css->flags & CSS_DYING)
+		return;
+
+	css->flags |= CSS_DYING;
+
 	/*
 	 * This must happen before css is disassociated with its cgroup.
 	 * See seq_css() for details.
@@ -6520,15 +6525,16 @@
 subsys_initcall(cgroup_namespaces_init);
 
 #ifdef CONFIG_CGROUP_BPF
-void cgroup_bpf_update(struct cgroup *cgrp,
-		       struct bpf_prog *prog,
-		       enum bpf_attach_type type)
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+		      enum bpf_attach_type type, bool overridable)
 {
 	struct cgroup *parent = cgroup_parent(cgrp);
+	int ret;
 
 	mutex_lock(&cgroup_mutex);
-	__cgroup_bpf_update(cgrp, parent, prog, type);
+	ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
 	mutex_unlock(&cgroup_mutex);
+	return ret;
 }
 #endif /* CONFIG_CGROUP_BPF */
 
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index bc615c6..fb6017e 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -3,6 +3,8 @@
 # CONFIG_DEVMEM is not set
 # CONFIG_FHANDLE is not set
 # CONFIG_INET_LRO is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFS_FS is not set
 # CONFIG_OABI_COMPAT is not set
 # CONFIG_SYSVIPC is not set
 # CONFIG_USELIB is not set
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 21a8764..8ac83e5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1812,13 +1812,13 @@
 	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
 	mutex_unlock(&cpuhp_state_mutex);
 	if (ret)
-		return ret;
+		goto out;
 
 	if (st->state < target)
 		ret = do_cpu_up(dev->id, target);
 	else
 		ret = do_cpu_down(dev->id, target);
-
+out:
 	unlock_device_hotplug();
 	return ret ? ret : count;
 }
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4b7b6cb..a99cd8d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -175,9 +175,9 @@
 } cpuset_flagbits_t;
 
 /* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
 {
-	return test_bit(CS_ONLINE, &cs->flags);
+	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 }
 
 static inline int is_cpu_exclusive(const struct cpuset *cs)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 41f376d..d877aba 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -373,6 +373,7 @@
 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
 static DEFINE_PER_CPU(bool, is_idle);
+static DEFINE_PER_CPU(bool, is_hotplugging);
 
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -3495,6 +3496,9 @@
 	struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 	struct pmu *pmu = event->pmu;
 
+	if (__this_cpu_read(is_hotplugging))
+		return;
+
 	/*
 	 * If this is a task context, we need to check whether it is
 	 * the current task context of this cpu.  If not it has been
@@ -3619,7 +3623,8 @@
 			return 0;
 		if (cpu_isolated(event_cpu) ||
 			(event->attr.exclude_idle &&
-				per_cpu(is_idle, event_cpu)))
+				per_cpu(is_idle, event_cpu)) ||
+				per_cpu(is_hotplugging, event_cpu))
 			active_event_skip_read = true;
 	}
 
@@ -3649,7 +3654,8 @@
 		preempt_enable();
 		ret = data.ret;
 	} else if (event->state == PERF_EVENT_STATE_INACTIVE ||
-			active_event_skip_read) {
+			(active_event_skip_read &&
+			!per_cpu(is_hotplugging, event_cpu))) {
 		struct perf_event_context *ctx = event->ctx;
 		unsigned long flags;
 
@@ -7080,6 +7086,21 @@
 	perf_output_end(&handle);
 }
 
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+	/*
+	 * Due to interrupt latency (AKA "skid"), we may enter the
+	 * kernel before taking an overflow, even if the PMU is only
+	 * counting user events.
+	 * To avoid leaking information to userspace, we must always
+	 * reject kernel samples when exclude_kernel is set.
+	 */
+	if (event->attr.exclude_kernel && !user_mode(regs))
+		return false;
+
+	return true;
+}
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7127,6 +7148,12 @@
 	}
 
 	/*
+	 * For security, drop the skid kernel samples if necessary.
+	 */
+	if (!sample_is_allowed(event, regs))
+		return ret;
+
+	/*
 	 * XXX event_limit might not quite work as expected on inherited
 	 * events
 	 */
@@ -10711,6 +10738,8 @@
 		raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
 
 		INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
+		per_cpu(is_hotplugging, cpu) = false;
+		per_cpu(is_idle, cpu) = false;
 	}
 }
 
@@ -10734,19 +10763,10 @@
 static void
 check_hotplug_start_event(struct perf_event *event)
 {
-	if (event->attr.type == PERF_TYPE_SOFTWARE) {
-		switch (event->attr.config) {
-		case PERF_COUNT_SW_CPU_CLOCK:
-			cpu_clock_event_start(event, 0);
-			break;
-		case PERF_COUNT_SW_TASK_CLOCK:
-			break;
-		default:
-			if (event->pmu->start)
-				event->pmu->start(event, 0);
-			break;
-		}
-	}
+	if (event->pmu->events_across_hotplug &&
+	    event->attr.type == PERF_TYPE_SOFTWARE &&
+	    event->pmu->start)
+		event->pmu->start(event, 0);
 }
 
 static int perf_event_start_swevents(unsigned int cpu)
@@ -10767,6 +10787,7 @@
 		mutex_unlock(&ctx->mutex);
 	}
 	srcu_read_unlock(&pmus_srcu, idx);
+	per_cpu(is_hotplugging, cpu) = false;
 	return 0;
 }
 
@@ -10783,22 +10804,13 @@
 			   struct perf_cpu_context *cpuctx,
 			   struct perf_event_context *ctx)
 {
-	if (!event->pmu->events_across_hotplug) {
+	if (event->pmu->events_across_hotplug &&
+	    event->attr.type == PERF_TYPE_SOFTWARE &&
+	    event->pmu->stop)
+		event->pmu->stop(event, PERF_EF_UPDATE);
+	else if (!event->pmu->events_across_hotplug)
 		__perf_remove_from_context(event, cpuctx,
 			ctx, (void *)DETACH_GROUP);
-	} else if (event->attr.type == PERF_TYPE_SOFTWARE) {
-		switch (event->attr.config) {
-		case PERF_COUNT_SW_CPU_CLOCK:
-			cpu_clock_event_stop(event, 0);
-			break;
-		case PERF_COUNT_SW_TASK_CLOCK:
-			break;
-		default:
-			if (event->pmu->stop)
-				event->pmu->stop(event, 0);
-			break;
-		}
-	}
 }
 
 static void __perf_event_exit_context(void *__info)
@@ -10837,6 +10849,7 @@
 
 int perf_event_exit_cpu(unsigned int cpu)
 {
+	per_cpu(is_hotplugging, cpu) = true;
 	perf_event_exit_cpu_context(cpu);
 	return 0;
 }
@@ -10880,6 +10893,24 @@
 	.notifier_call = event_idle_notif,
 };
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int perf_cpu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
+				"PERF/CORE/CPUHP_AP_PERF_ONLINE",
+				perf_event_start_swevents,
+				perf_event_exit_cpu);
+	if (ret)
+		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
+		       ret);
+
+	return ret;
+}
+#else
+static int perf_cpu_hp_init(void) { return 0; }
+#endif
 
 void __init perf_event_init(void)
 {
@@ -10896,6 +10927,8 @@
 	perf_event_init_cpu(smp_processor_id());
 	idle_notifier_register(&perf_event_idle_nb);
 	register_reboot_notifier(&perf_reboot_notifier);
+	ret = perf_cpu_hp_init();
+	WARN(ret, "core perf_cpu_hp_init() failed with: %d", ret);
 
 	ret = init_hw_breakpoint();
 	WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
@@ -10949,22 +10982,6 @@
 }
 device_initcall(perf_event_sysfs_init);
 
-#ifdef CONFIG_HOTPLUG_CPU
-static int perf_cpu_hp_init(void)
-{
-	int ret;
-
-	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ONLINE,
-				"PERF/CORE/AP_PERF_ONLINE",
-				perf_event_start_swevents,
-				perf_event_exit_cpu);
-	if (ret)
-		pr_err("CPU hotplug notifier for perf core could not be registered: %d\n",
-		       ret);
-	return ret;
-}
-subsys_initcall(perf_cpu_hp_init);
-#endif
 
 #ifdef CONFIG_CGROUP_PERF
 static struct cgroup_subsys_state *
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a5caece..f39a7be9 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -57,19 +57,25 @@
 }
 
 
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
+		   const struct cred *ptracer_cred)
+{
+	BUG_ON(!list_empty(&child->ptrace_entry));
+	list_add(&child->ptrace_entry, &new_parent->ptraced);
+	child->parent = new_parent;
+	child->ptracer_cred = get_cred(ptracer_cred);
+}
+
 /*
  * ptrace a task: make the debugger its new parent and
  * move it to the ptrace list.
  *
  * Must be called with the tasklist lock write-held.
  */
-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
 {
-	BUG_ON(!list_empty(&child->ptrace_entry));
-	list_add(&child->ptrace_entry, &new_parent->ptraced);
-	child->parent = new_parent;
 	rcu_read_lock();
-	child->ptracer_cred = get_cred(__task_cred(new_parent));
+	__ptrace_link(child, new_parent, __task_cred(new_parent));
 	rcu_read_unlock();
 }
 
@@ -383,7 +389,7 @@
 		flags |= PT_SEIZED;
 	task->ptrace = flags;
 
-	__ptrace_link(task, current);
+	ptrace_link(task, current);
 
 	/* SEIZE doesn't trap tracee on attach */
 	if (!seize)
@@ -456,7 +462,7 @@
 		 */
 		if (!ret && !(current->real_parent->flags & PF_EXITING)) {
 			current->ptrace = PT_PTRACED;
-			__ptrace_link(current, current->real_parent);
+			ptrace_link(current, current->real_parent);
 		}
 	}
 	write_unlock_irq(&tasklist_lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b6fb796..30a1b34 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3355,6 +3355,7 @@
 	curr->sched_class->task_tick(rq, curr, 0);
 	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
+	sched_freq_tick(cpu);
 	cpufreq_update_util(rq, 0);
 
 	early_notif = early_detection_notify(rq, wallclock);
@@ -3380,8 +3381,6 @@
 
 	if (curr->sched_class == &fair_sched_class)
 		check_for_migration(rq, curr);
-
-	sched_freq_tick(cpu);
 }
 
 #ifdef CONFIG_NO_HZ_FULL
@@ -8053,20 +8052,6 @@
 	int ret;
 
 	set_cpu_active(cpu, false);
-	/*
-	 * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-	 * users of this state to go away such that all new such users will
-	 * observe it.
-	 *
-	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
-	 * not imply sync_sched(), so wait for both.
-	 *
-	 * Do sync before park smpboot threads to take care the rcu boost case.
-	 */
-	if (IS_ENABLED(CONFIG_PREEMPT))
-		synchronize_rcu_mult(call_rcu, call_rcu_sched);
-	else
-		synchronize_rcu();
 
 	if (!sched_smp_initialized)
 		return 0;
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index e594804..b140e55 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -39,6 +39,7 @@
 	cpumask_t cpu_mask;
 	unsigned int need_cpus;
 	unsigned int task_thres;
+	unsigned int max_nr;
 	s64 need_ts;
 	struct list_head lru;
 	bool pending;
@@ -458,47 +459,25 @@
 
 /* ==================== runqueue based core count =================== */
 
-#define NR_RUNNING_TOLERANCE 5
-
 static void update_running_avg(void)
 {
 	int avg, iowait_avg, big_avg;
+	int max_nr, big_max_nr;
 	struct cluster_data *cluster;
 	unsigned int index = 0;
 
-	sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
-
-	/*
-	 * Round up to the next integer if the average nr running tasks
-	 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
-	 * If normal rounding up is used, it will allow a transient task
-	 * to trigger online event. By the time core is onlined, the task
-	 * has finished.
-	 * Rounding to closest suffers same problem because scheduler
-	 * might only provide running stats per jiffy, and a transient
-	 * task could skew the number for one jiffy. If core control
-	 * samples every 2 jiffies, it will observe 0.5 additional running
-	 * average which rounds up to 1 task.
-	 */
-	avg = (avg + NR_RUNNING_TOLERANCE) / 100;
-	big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
+	sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
+				 &max_nr, &big_max_nr);
 
 	for_each_cluster(cluster, index) {
 		if (!cluster->inited)
 			continue;
-		/*
-		 * Big cluster only need to take care of big tasks, but if
-		 * there are not enough big cores, big tasks need to be run
-		 * on little as well. Thus for little's runqueue stat, it
-		 * has to use overall runqueue average, or derive what big
-		 * tasks would have to be run on little. The latter approach
-		 * is not easy to get given core control reacts much slower
-		 * than scheduler, and can't predict scheduler's behavior.
-		 */
 		cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
+		cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
 	}
 }
 
+#define MAX_NR_THRESHOLD	4
 /* adjust needed CPUs based on current runqueue information */
 static unsigned int apply_task_need(const struct cluster_data *cluster,
 				    unsigned int new_need)
@@ -509,7 +488,15 @@
 
 	/* only unisolate more cores if there are tasks to run */
 	if (cluster->nrrun > new_need)
-		return new_need + 1;
+		new_need = new_need + 1;
+
+	/*
+	 * We don't want tasks to be overcrowded in a cluster.
+	 * If any CPU has more than MAX_NR_THRESHOLD in the last
+	 * window, bring another CPU to help out.
+	 */
+	if (cluster->max_nr > MAX_NR_THRESHOLD)
+		new_need = new_need + 1;
 
 	return new_need;
 }
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 11e9705..ba5e3e2 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -27,6 +27,8 @@
  *  of the License.
  */
 
+#include "sched.h"
+
 #include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
@@ -51,6 +53,27 @@
 }
 
 /**
+ * drop_nopreempt_cpus - remove a cpu from the mask if it is likely
+ *			 non-preemptible
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+	unsigned int cpu = cpumask_first(lowest_mask);
+
+	while (cpu < nr_cpu_ids) {
+		/* unlocked access */
+		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+
+		if (task_may_not_preempt(task, cpu))
+			cpumask_clear_cpu(cpu, lowest_mask);
+
+		cpu = cpumask_next(cpu, lowest_mask);
+	}
+}
+
+/**
  * cpupri_find - find the best (lowest-pri) CPU in the system
  * @cp: The cpupri context
  * @p: The task
@@ -70,9 +93,11 @@
 {
 	int idx = 0;
 	int task_pri = convert_prio(p->prio);
+	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
 
 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
+retry:
 	for (idx = 0; idx < task_pri; idx++) {
 		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 		int skip = 0;
@@ -108,7 +133,8 @@
 
 		if (lowest_mask) {
 			cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
-
+			if (drop_nopreempts)
+				drop_nopreempt_cpus(lowest_mask);
 			/*
 			 * We have to ensure that we have at least one bit
 			 * still set in the array, since the map could have
@@ -123,7 +149,14 @@
 
 		return 1;
 	}
-
+	/*
+	 * If we can't find any non-preemptible cpu's, retry so we can
+	 * find the lowest priority target and avoid priority inversion.
+	 */
+	if (drop_nopreempts) {
+		drop_nopreempts = false;
+		goto retry;
+	}
 	return 0;
 }
 
diff --git a/kernel/sched/energy.c b/kernel/sched/energy.c
index 05dd2cb..c32defa 100644
--- a/kernel/sched/energy.c
+++ b/kernel/sched/energy.c
@@ -18,14 +18,15 @@
  */
 #define pr_fmt(fmt) "sched-energy: " fmt
 
-#define DEBUG
-
 #include <linux/gfp.h>
 #include <linux/of.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
 #include <linux/sched_energy.h>
 #include <linux/stddef.h>
+#include <linux/cpu.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
 
 struct sched_group_energy *sge_array[NR_CPUS][NR_SD_LEVELS];
 
@@ -84,14 +85,21 @@
 
 			sge = kcalloc(1, sizeof(struct sched_group_energy),
 				      GFP_NOWAIT);
+			if (!sge)
+				goto out;
 
 			nstates = (prop->length / sizeof(u32)) / 2;
 			cap_states = kcalloc(nstates,
 					     sizeof(struct capacity_state),
 					     GFP_NOWAIT);
+			if (!cap_states) {
+				kfree(sge);
+				goto out;
+			}
 
 			for (i = 0, val = prop->value; i < nstates; i++) {
-				cap_states[i].cap = be32_to_cpup(val++);
+				cap_states[i].cap = SCHED_CAPACITY_SCALE;
+				cap_states[i].frequency = be32_to_cpup(val++);
 				cap_states[i].power = be32_to_cpup(val++);
 			}
 
@@ -101,6 +109,8 @@
 			prop = of_find_property(cp, "idle-cost-data", NULL);
 			if (!prop || !prop->value) {
 				pr_warn("No idle-cost data, skipping sched_energy init\n");
+				kfree(sge);
+				kfree(cap_states);
 				goto out;
 			}
 
@@ -108,6 +118,11 @@
 			idle_states = kcalloc(nstates,
 					      sizeof(struct idle_state),
 					      GFP_NOWAIT);
+			if (!idle_states) {
+				kfree(sge);
+				kfree(cap_states);
+				goto out;
+			}
 
 			for (i = 0, val = prop->value; i < nstates; i++)
 				idle_states[i].power = be32_to_cpup(val++);
@@ -125,3 +140,166 @@
 out:
 	free_resources();
 }
+
+static int sched_energy_probe(struct platform_device *pdev)
+{
+	unsigned long max_freq = 0;
+	int max_efficiency = INT_MIN;
+	int cpu;
+	unsigned long *max_frequencies = NULL;
+	int ret;
+
+	if (!sched_is_energy_aware())
+		return 0;
+
+	max_frequencies = kmalloc_array(nr_cpu_ids, sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!max_frequencies) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/*
+	 * Find system max possible frequency and max frequencies for each
+	 * CPUs.
+	 */
+	for_each_possible_cpu(cpu) {
+		struct device *cpu_dev;
+		struct dev_pm_opp *opp;
+		int efficiency = arch_get_cpu_efficiency(cpu);
+
+		max_efficiency = max(efficiency, max_efficiency);
+
+		cpu_dev = get_cpu_device(cpu);
+		if (IS_ERR_OR_NULL(cpu_dev)) {
+			if (!cpu_dev)
+				ret = -EINVAL;
+			else
+				ret = PTR_ERR(cpu_dev);
+			goto exit;
+		}
+
+		max_frequencies[cpu] = ULONG_MAX;
+
+		rcu_read_lock();
+		opp = dev_pm_opp_find_freq_floor(cpu_dev,
+						 &max_frequencies[cpu]);
+		if (IS_ERR_OR_NULL(opp)) {
+			if (!opp || PTR_ERR(opp) == -ENODEV)
+				ret = -EPROBE_DEFER;
+			else
+				ret = PTR_ERR(opp);
+			goto exit_rcu_unlock;
+		}
+		rcu_read_unlock();
+
+		/* Convert HZ to KHZ */
+		max_frequencies[cpu] /= 1000;
+		max_freq = max(max_freq, max_frequencies[cpu]);
+	}
+
+	/* update capacity in energy model */
+	for_each_possible_cpu(cpu) {
+		unsigned long cpu_max_cap;
+		struct sched_group_energy *sge_l0, *sge;
+		int efficiency = arch_get_cpu_efficiency(cpu);
+
+		cpu_max_cap = DIV_ROUND_UP(SCHED_CAPACITY_SCALE *
+					   max_frequencies[cpu], max_freq);
+		cpu_max_cap = DIV_ROUND_UP(cpu_max_cap * efficiency,
+					   max_efficiency);
+
+		/*
+		 * All the cap_states have same frequency table so use
+		 * SD_LEVEL0's.
+		 */
+		sge_l0 = sge_array[cpu][SD_LEVEL0];
+		if (sge_l0 && sge_l0->nr_cap_states > 0) {
+			int i;
+			int ncapstates = sge_l0->nr_cap_states;
+
+			for (i = 0; i < ncapstates; i++) {
+				int sd_level;
+				unsigned long freq, cap;
+
+				/*
+				 * Energy model can contain more frequency
+				 * steps than actual for multiple speedbin
+				 * support. Ceil the max capacity with actual
+				 * one.
+				 */
+				freq = min(sge_l0->cap_states[i].frequency,
+					   max_frequencies[cpu]);
+				cap = DIV_ROUND_UP(cpu_max_cap * freq,
+						   max_frequencies[cpu]);
+
+				for_each_possible_sd_level(sd_level) {
+					sge = sge_array[cpu][sd_level];
+					if (!sge)
+						break;
+					sge->cap_states[i].cap = cap;
+				}
+
+				dev_dbg(&pdev->dev,
+					"cpu=%d freq=%ld cap=%ld power_d0=%ld\n",
+					cpu, freq, sge_l0->cap_states[i].cap,
+					sge_l0->cap_states[i].power);
+			}
+
+			dev_info(&pdev->dev,
+				"cpu=%d eff=%d [freq=%ld cap=%ld power_d0=%ld] -> [freq=%ld cap=%ld power_d0=%ld]\n",
+				cpu, efficiency,
+				sge_l0->cap_states[0].frequency,
+				sge_l0->cap_states[0].cap,
+				sge_l0->cap_states[0].power,
+				sge_l0->cap_states[ncapstates - 1].frequency,
+				sge_l0->cap_states[ncapstates - 1].cap,
+				sge_l0->cap_states[ncapstates - 1].power
+				);
+		}
+
+
+		dev_dbg(&pdev->dev,
+			"cpu=%d efficiency=%d max_frequency=%ld max_efficiency=%d cpu_max_capacity=%ld\n",
+			cpu, efficiency, max_frequencies[cpu], max_efficiency,
+			cpu_max_cap);
+
+		arch_update_cpu_capacity(cpu);
+	}
+
+	kfree(max_frequencies);
+
+	dev_info(&pdev->dev, "Sched-energy-costs capacity updated\n");
+	return 0;
+
+exit_rcu_unlock:
+	rcu_read_unlock();
+
+exit:
+	if (ret != -EPROBE_DEFER)
+		dev_err(&pdev->dev, "error=%d\n", ret);
+
+	kfree(max_frequencies);
+	return ret;
+}
+
+static const struct of_device_id of_sched_energy_dt[] = {
+	{
+		.compatible = "sched-energy",
+	},
+	{ }
+};
+
+static struct platform_driver energy_driver = {
+	.driver = {
+		.name = "sched-energy",
+		.of_match_table = of_sched_energy_dt,
+	},
+	.probe = sched_energy_probe,
+};
+
+static int __init sched_energy_init(void)
+{
+	return platform_driver_register(&energy_driver);
+}
+subsys_initcall(sched_energy_init);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6ccd3a7..e4b706d 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5406,28 +5406,12 @@
 	return p->se.avg.util_avg;
 }
 
-#define SCHED_ENABLE_WAKER_WAKEE	0
-
-static unsigned int sched_small_wakee_task_util = 102; /* ~10% of max cap */
-static unsigned int sched_big_waker_task_util = 256;  /* 25% of max cap */
-
-static inline bool
-wake_on_waker_sibling(struct task_struct *p)
-{
-	return SCHED_ENABLE_WAKER_WAKEE &&
-	       task_util(current) > sched_big_waker_task_util &&
-	       task_util(p) < sched_small_wakee_task_util;
-}
-
-#define sysctl_sched_prefer_sync_wakee_to_waker 0
-
 static inline bool
 bias_to_waker_cpu(struct task_struct *p, int cpu)
 {
-	return sysctl_sched_prefer_sync_wakee_to_waker &&
-	       cpu_rq(cpu)->nr_running == 1 &&
-	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
-	       cpu_active(cpu) && !cpu_isolated(cpu);
+	return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
+	       cpu_active(cpu) && !cpu_isolated(cpu) &&
+	       task_fits_max(p, cpu);
 }
 
 static int calc_util_delta(struct energy_env *eenv, int cpu)
@@ -5553,6 +5537,9 @@
 	for_each_cpu(i, sched_group_cpus(sg))
 		state = min(state, idle_get_state_idx(cpu_rq(i)));
 
+	if (unlikely(state == INT_MAX))
+		return -EINVAL;
+
 	/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
 	state++;
 
@@ -5638,6 +5625,12 @@
 				}
 
 				idle_idx = group_idle_state(sg);
+				if (unlikely(idle_idx < 0))
+					return idle_idx;
+
+				if (idle_idx > sg->sge->nr_idle_states - 1)
+					idle_idx = sg->sge->nr_idle_states - 1;
+
 				group_util = group_norm_util(eenv, sg);
 				sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
 
@@ -6729,10 +6722,8 @@
 	unsigned int target_cpu_util = UINT_MAX;
 	long target_cpu_new_util_cum = LONG_MAX;
 	struct cpumask *rtg_target = NULL;
-	bool wake_on_sibling = false;
 	int isolated_candidate = -1;
 	bool need_idle;
-	bool skip_ediff = false;
 	enum sched_boost_policy placement_boost = task_sched_boost(p) ?
 				sched_boost_policy() : SCHED_BOOST_NONE;
 
@@ -6745,10 +6736,17 @@
 	sg_target = sg;
 
 	sync = sync && sysctl_sched_sync_hint_enable;
+
 	curr_util = boosted_task_util(cpu_rq(cpu)->curr);
 
 	need_idle = wake_to_idle(p);
 
+	if (sync && bias_to_waker_cpu(p, cpu)) {
+		trace_sched_task_util_bias_to_waker(p, task_cpu(p),
+					task_util(p), cpu, cpu, 0, need_idle);
+		return cpu;
+	}
+
 	if (sysctl_sched_is_big_little) {
 		struct related_thread_group *grp;
 
@@ -6756,17 +6754,8 @@
 		grp = task_related_thread_group(p);
 		rcu_read_unlock();
 
-		if (grp && grp->preferred_cluster) {
+		if (grp && grp->preferred_cluster)
 			rtg_target = &grp->preferred_cluster->cpus;
-		} else if (sync && wake_on_waker_sibling(p)) {
-			if (bias_to_waker_cpu(p, cpu)) {
-				trace_sched_task_util_bias_to_waker(p,
-						task_cpu(p), task_util(p), cpu,
-						cpu, 0, need_idle);
-				return cpu;
-			}
-			wake_on_sibling = true;
-		}
 
 		task_util_boosted = boosted_task_util(p);
 
@@ -6817,21 +6806,6 @@
 							     rtg_target))
 						break;
 					continue;
-				} else if (wake_on_sibling) {
-					/* Skip non-sibling CPUs */
-					if (!cpumask_test_cpu(cpu,
-							sched_group_cpus(sg)))
-						continue;
-				} else if (sync && curr_util >=
-						   task_util_boosted) {
-					if (cpumask_test_cpu(cpu,
-							sched_group_cpus(sg))) {
-						if (!cpumask_test_cpu(task_cpu(p),
-								      sched_group_cpus(sg)))
-							skip_ediff = true;
-						break;
-					}
-					continue;
 				}
 
 				target_max_cap = capacity_of(max_cap_cpu);
@@ -6900,8 +6874,6 @@
 				       idle_get_state_idx(cpu_rq(i));
 
 			if (!need_idle &&
-			    (!wake_on_sibling ||
-			     (wake_on_sibling && i != cpu)) &&
 			    add_capacity_margin(new_util_cum) <
 			    capacity_curr_of(i)) {
 				if (sysctl_sched_cstate_aware) {
@@ -6935,9 +6907,7 @@
 					target_cpu = i;
 					break;
 				}
-			} else if (!need_idle &&
-				   (!wake_on_sibling ||
-				    (wake_on_sibling && i != cpu))) {
+			} else if (!need_idle) {
 				/*
 				 * At least one CPU other than target_cpu is
 				 * going to raise CPU's OPP higher than current
@@ -7008,13 +6978,6 @@
 		}
 	}
 
-	if (wake_on_sibling && target_cpu != -1) {
-		trace_sched_task_util_bias_to_waker(p, task_cpu(p),
-						task_util(p), target_cpu,
-						target_cpu, 0, need_idle);
-		return target_cpu;
-	}
-
 	if (target_cpu != task_cpu(p) && !cpu_isolated(task_cpu(p))) {
 		struct energy_env eenv = {
 			.util_delta	= task_util(p),
@@ -7050,8 +7013,7 @@
 			return target_cpu;
 		}
 
-		if (!skip_ediff)
-			ediff = energy_diff(&eenv);
+		ediff = energy_diff(&eenv);
 
 		if (!sysctl_sched_cstate_aware) {
 			if (ediff >= 0) {
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 6c28298..24b60d7 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -783,20 +783,6 @@
 		return 0;
 }
 
-void free_task_load_ptrs(struct task_struct *p)
-{
-	kfree(p->ravg.curr_window_cpu);
-	kfree(p->ravg.prev_window_cpu);
-
-	/*
-	 * update_task_ravg() can be called for exiting tasks. While the
-	 * function itself ensures correct behavior, the corresponding
-	 * trace event requires that these pointers be NULL.
-	 */
-	p->ravg.curr_window_cpu = NULL;
-	p->ravg.prev_window_cpu = NULL;
-}
-
 /* Return task demand in percentage scale */
 unsigned int pct_task_load(struct task_struct *p)
 {
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2703e0d..ec90319 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -6,6 +6,7 @@
 #include "sched.h"
 #include "walt.h"
 
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/irq_work.h>
 #include <trace/events/sched.h>
@@ -1489,11 +1490,30 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	__u32 softirqs = per_cpu(active_softirqs, cpu) |
+			 __IRQ_STAT(cpu, __softirq_pending);
+	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+
+	return ((softirqs & LONG_SOFTIRQ_MASK) &&
+		(task == cpu_ksoftirqd ||
+		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+}
+
 static int
 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
 	struct task_struct *curr;
 	struct rq *rq;
+	bool may_not_preempt;
 
 #ifdef CONFIG_SCHED_HMP
 	return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
@@ -1509,7 +1529,17 @@
 	curr = READ_ONCE(rq->curr); /* unlocked access */
 
 	/*
-	 * If the current task on @p's runqueue is an RT task, then
+	 * If the current task on @p's runqueue is a softirq task,
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
 	 * try to see if we can wake this RT task up on another
 	 * runqueue. Otherwise simply start this RT task
 	 * on its current runqueue.
@@ -1530,18 +1560,22 @@
 	 * This test is optimistic, if we get it wrong the load-balancer
 	 * will have to sort it out.
 	 */
-	if (energy_aware() ||
-	    (curr && unlikely(rt_task(curr)) &&
+	may_not_preempt = task_may_not_preempt(curr, cpu);
+	if (energy_aware() || may_not_preempt ||
+	     (unlikely(rt_task(curr)) &&
 	     (tsk_nr_cpus_allowed(curr) < 2 ||
 	      curr->prio <= p->prio))) {
 		int target = find_lowest_rq(p);
 
 		/*
-		 * Don't bother moving it if the destination CPU is
-		 * not running a lower priority task.
+		 * If cpu is non-preemptible, prefer remote cpu
+		 * even if it's running a higher-prio task.
+		 * Otherwise: Don't bother moving it if the
+		 * destination CPU is not running a lower priority task.
 		 */
 		if (target != -1 &&
-		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
+		   (may_not_preempt ||
+		    p->prio < cpu_rq(target)->rt.highest_prio.curr))
 			cpu = target;
 	}
 	rcu_read_unlock();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5220511..d4a0612 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1700,6 +1700,13 @@
 }
 #endif
 
+#ifndef arch_update_cpu_capacity
+static __always_inline
+void arch_update_cpu_capacity(int cpu)
+{
+}
+#endif
+
 #ifdef CONFIG_SMP
 static inline unsigned long capacity_of(int cpu)
 {
@@ -2089,6 +2096,11 @@
 		__release(rq2->lock);
 }
 
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
+
 #else /* CONFIG_SMP */
 
 /*
@@ -2451,6 +2463,11 @@
 	return max_possible_capacity != min_max_possible_capacity;
 }
 
+static inline bool is_max_capacity_cpu(int cpu)
+{
+	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
+}
+
 /*
  * 'load' is in reference to "best cpu" at its best frequency.
  * Scale that in reference to a given cpu, accounting for how bad it is
@@ -2676,6 +2693,15 @@
 extern void clear_ed_task(struct task_struct *p, struct rq *rq);
 extern bool early_detection_notify(struct rq *rq, u64 wallclock);
 
+#ifdef CONFIG_SCHED_HMP
+extern unsigned int power_cost(int cpu, u64 demand);
+#else
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return cpu_max_possible_capacity(cpu);
+}
+#endif
+
 #else	/* CONFIG_SCHED_WALT */
 
 struct hmp_sched_stats;
@@ -2719,6 +2745,8 @@
 	return 0;
 }
 
+static inline bool is_max_capacity_cpu(int cpu) { return true; }
+
 static inline void
 inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
 
@@ -2828,6 +2856,11 @@
 	return 0;
 }
 
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
 #endif	/* CONFIG_SCHED_WALT */
 
 #ifdef CONFIG_SCHED_HMP
@@ -2842,7 +2875,6 @@
 check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
 extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
 					struct task_struct *p, s64 delta);
-extern unsigned int power_cost(int cpu, u64 demand);
 extern unsigned int cpu_temp(int cpu);
 extern void pre_big_task_count_change(const struct cpumask *cpus);
 extern void post_big_task_count_change(const struct cpumask *cpus);
@@ -2899,11 +2931,6 @@
 static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
 				      struct task_struct *p, s64 delta) { }
 
-static inline unsigned int power_cost(int cpu, u64 demand)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-
 static inline unsigned int cpu_temp(int cpu)
 {
 	return 0;
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index f820094..7f86c0b 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -27,11 +27,13 @@
 static DEFINE_PER_CPU(u64, last_time);
 static DEFINE_PER_CPU(u64, nr_big_prod_sum);
 static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(u64, nr_max);
 
 static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
 /**
  * sched_get_nr_running_avg
  * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
@@ -41,7 +43,8 @@
  * Obtains the average nr_running value since the last poll.
  * This function may not be called concurrently with itself
  */
-void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+			      unsigned int *max_nr, unsigned int *big_max_nr)
 {
 	int cpu;
 	u64 curr_time = sched_clock();
@@ -51,6 +54,8 @@
 	*avg = 0;
 	*iowait_avg = 0;
 	*big_avg = 0;
+	*max_nr = 0;
+	*big_max_nr = 0;
 
 	if (!diff)
 		return;
@@ -79,17 +84,35 @@
 		per_cpu(nr_big_prod_sum, cpu) = 0;
 		per_cpu(iowait_prod_sum, cpu) = 0;
 
+		if (*max_nr < per_cpu(nr_max, cpu))
+			*max_nr = per_cpu(nr_max, cpu);
+
+		if (is_max_capacity_cpu(cpu)) {
+			if (*big_max_nr < per_cpu(nr_max, cpu))
+				*big_max_nr = per_cpu(nr_max, cpu);
+		}
+
+		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
 		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
 	}
 
 	diff = curr_time - last_get_time;
 	last_get_time = curr_time;
 
-	*avg = (int)div64_u64(tmp_avg * 100, diff);
-	*big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
-	*iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+	/*
+	 * Any task running on BIG cluster and BIG tasks running on little
+	 * cluster contributes to big_avg. Small or medium tasks can also
+	 * run on BIG cluster when co-location and scheduler boost features
+	 * are activated. We don't want these tasks to downmigrate to little
+	 * cluster when BIG CPUs are available but isolated. Round up the
+	 * average values so that core_ctl aggressively unisolate BIG CPUs.
+	 */
+	*avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
+	*big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
+	*iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
 
-	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
+				       *max_nr, *big_max_nr);
 
 	BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
 	pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
@@ -122,6 +145,9 @@
 
 	BUG_ON((s64)per_cpu(nr, cpu) < 0);
 
+	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
+		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+
 	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
 	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
 	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index b89abbd..e4bd0f4 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -152,6 +152,8 @@
  * IMPORTANT: Initialize both copies to same value!!
  */
 
+static __read_mostly bool sched_predl;
+
 __read_mostly unsigned int sched_ravg_hist_size = 5;
 __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
 
@@ -231,6 +233,16 @@
 
 early_param("sched_ravg_window", set_sched_ravg_window);
 
+static int __init set_sched_predl(char *str)
+{
+	unsigned int predl;
+
+	get_option(&str, &predl);
+	sched_predl = !!predl;
+	return 0;
+}
+early_param("sched_predl", set_sched_predl);
+
 void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
 {
 	inc_nr_big_task(&rq->hmp_stats, p);
@@ -402,7 +414,7 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+	if (!is_max_capacity_cpu(cpu))
 		return rq->hmp_stats.nr_big_tasks;
 
 	return rq->nr_running;
@@ -923,7 +935,7 @@
 	if (!sync_cpu_available) {
 		rq->window_start = 1;
 		sync_cpu_available = 1;
-		atomic_set(&walt_irq_work_lastq_ws, rq->window_start);
+		atomic64_set(&walt_irq_work_lastq_ws, rq->window_start);
 	} else {
 		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
 
@@ -1096,6 +1108,9 @@
 {
 	u32 new, old;
 
+	if (!sched_predl)
+		return;
+
 	if (is_idle_task(p) || exiting_task(p))
 		return;
 
@@ -1618,6 +1633,9 @@
 	int bidx;
 	u32 pred_demand;
 
+	if (!sched_predl)
+		return 0;
+
 	bidx = busy_to_bucket(runtime);
 	pred_demand = get_pred_busy(rq, p, bidx, runtime);
 	bucket_increase(p->ravg.busy_buckets, bidx);
@@ -1916,7 +1934,7 @@
 	if (old_window_start == rq->window_start)
 		return;
 
-	result = atomic_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
+	result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
 				   rq->window_start);
 	if (result == old_window_start)
 		irq_work_queue(&rq->irq_work);
@@ -2007,6 +2025,20 @@
 	p->misfit = false;
 }
 
+void free_task_load_ptrs(struct task_struct *p)
+{
+	kfree(p->ravg.curr_window_cpu);
+	kfree(p->ravg.prev_window_cpu);
+
+	/*
+	 * update_task_ravg() can be called for exiting tasks. While the
+	 * function itself ensures correct behavior, the corresponding
+	 * trace event requires that these pointers be NULL.
+	 */
+	p->ravg.curr_window_cpu = NULL;
+	p->ravg.prev_window_cpu = NULL;
+}
+
 void reset_task_stats(struct task_struct *p)
 {
 	u32 sum = 0;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 744fa61..bde8e33 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -57,6 +57,13 @@
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
+
 const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 	"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -264,6 +271,7 @@
 restart:
 	/* Reset the pending bitmask before enabling irqs */
 	set_softirq_pending(0);
+	__this_cpu_write(active_softirqs, pending);
 
 	local_irq_enable();
 
@@ -293,6 +301,7 @@
 		pending >>= softirq_bit;
 	}
 
+	__this_cpu_write(active_softirqs, 0);
 	rcu_bh_qs();
 	local_irq_disable();
 
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9792763..2aef653 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,7 +49,6 @@
 #include <linux/sched/deadline.h>
 #include <linux/timer.h>
 #include <linux/freezer.h>
-#include <linux/delay.h>
 
 #include <asm/uaccess.h>
 
@@ -1579,41 +1578,22 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
-				 struct hrtimer_cpu_base *new_base,
-				 unsigned int i, bool wait,
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+				 struct hrtimer_clock_base *new_base,
 				 bool remove_pinned)
 {
 	struct hrtimer *timer;
 	struct timerqueue_node *node;
 	struct timerqueue_head pinned;
 	int is_pinned;
-	struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
-	struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
+	bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);
 
 	timerqueue_init_head(&pinned);
 
-	while ((node = timerqueue_getnext(&old_c_base->active))) {
+	while ((node = timerqueue_getnext(&old_base->active))) {
 		timer = container_of(node, struct hrtimer, node);
-		if (wait) {
-			/* Ensure timers are done running before continuing */
-			while (hrtimer_callback_running(timer)) {
-				raw_spin_unlock(&old_base->lock);
-				raw_spin_unlock(&new_base->lock);
-				cpu_relax();
-				/*
-				 * cpu_relax may just be a barrier. Grant the
-				 * run_hrtimer_list code some time to obtain
-				 * the spinlock.
-				 */
-				udelay(1);
-				raw_spin_lock(&new_base->lock);
-				raw_spin_lock_nested(&old_base->lock,
-							SINGLE_DEPTH_NESTING);
-			}
-		} else {
+		if (is_hotplug)
 			BUG_ON(hrtimer_callback_running(timer));
-		}
 		debug_deactivate(timer);
 
 		/*
@@ -1621,7 +1601,7 @@
 		 * timer could be seen as !active and just vanish away
 		 * under us on another CPU
 		 */
-		__remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
+		__remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
 
 		is_pinned = timer->state & HRTIMER_STATE_PINNED;
 		if (!remove_pinned && is_pinned) {
@@ -1629,7 +1609,7 @@
 			continue;
 		}
 
-		timer->base = new_c_base;
+		timer->base = new_base;
 		/*
 		 * Enqueue the timers on the new cpu. This does not
 		 * reprogram the event device in case the timer
@@ -1638,7 +1618,7 @@
 		 * sort out already expired timers and reprogram the
 		 * event device.
 		 */
-		enqueue_hrtimer(timer, new_c_base);
+		enqueue_hrtimer(timer, new_base);
 	}
 
 	/* Re-queue pinned timers for non-hotplug usecase */
@@ -1646,12 +1626,11 @@
 		timer = container_of(node, struct hrtimer, node);
 
 		timerqueue_del(&pinned, &timer->node);
-		enqueue_hrtimer(timer, old_c_base);
+		enqueue_hrtimer(timer, old_base);
 	}
 }
 
-static void
-__migrate_hrtimers(unsigned int scpu, bool wait, bool remove_pinned)
+static void __migrate_hrtimers(unsigned int scpu, bool remove_pinned)
 {
 	struct hrtimer_cpu_base *old_base, *new_base;
 	unsigned long flags;
@@ -1668,8 +1647,8 @@
 	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
 
 	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-		migrate_hrtimer_list(old_base, new_base, i, wait,
-								remove_pinned);
+		migrate_hrtimer_list(&old_base->clock_base[i],
+				     &new_base->clock_base[i], remove_pinned);
 	}
 
 	raw_spin_unlock(&old_base->lock);
@@ -1685,13 +1664,13 @@
 	BUG_ON(cpu_online(scpu));
 	tick_cancel_sched_timer(scpu);
 
-	__migrate_hrtimers(scpu, false, true);
+	__migrate_hrtimers(scpu, true);
 	return 0;
 }
 
 void hrtimer_quiesce_cpu(void *cpup)
 {
-	__migrate_hrtimers(*(int *)cpup, true, false);
+	__migrate_hrtimers(*(int *)cpup, false);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
index 34c48b1..9d9f0bf 100644
--- a/kernel/trace/msm_rtb.c
+++ b/kernel/trace/msm_rtb.c
@@ -27,6 +27,7 @@
 #include <linux/io.h>
 #include <asm-generic/sizes.h>
 #include <linux/msm_rtb.h>
+#include <asm/timex.h>
 
 #define SENTINEL_BYTE_1 0xFF
 #define SENTINEL_BYTE_2 0xAA
@@ -41,8 +42,9 @@
  * 4) 4 bytes index
  * 4) 8 bytes extra data from the caller
  * 5) 8 bytes of timestamp
+ * 6) 8 bytes of cyclecount
  *
- * Total = 32 bytes.
+ * Total = 40 bytes.
  */
 struct msm_rtb_layout {
 	unsigned char sentinel[3];
@@ -51,6 +53,7 @@
 	uint64_t caller;
 	uint64_t data;
 	uint64_t timestamp;
+	uint64_t cycle_count;
 } __attribute__ ((__packed__));
 
 
@@ -132,6 +135,11 @@
 	start->timestamp = sched_clock();
 }
 
+static void msm_rtb_write_cyclecount(struct msm_rtb_layout *start)
+{
+	start->cycle_count = get_cycles();
+}
+
 static void uncached_logk_pc_idx(enum logk_event_type log_type, uint64_t caller,
 				 uint64_t data, int idx)
 {
@@ -145,6 +153,7 @@
 	msm_rtb_write_idx(idx, start);
 	msm_rtb_write_data(data, start);
 	msm_rtb_write_timestamp(start);
+	msm_rtb_write_cyclecount(start);
 	mb();
 
 }
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 0ecef3e..5e6db6b 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -58,7 +58,9 @@
 	usermem = (char __user *)user_addr;
 	bad_usermem = (char *)user_addr;
 
-	/* Legitimate usage: none of these should fail. */
+	/*
+	 * Legitimate usage: none of these copies should fail.
+	 */
 	ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
 		    "legitimate copy_from_user failed");
 	ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
@@ -68,19 +70,33 @@
 	ret |= test(put_user(value, (unsigned long __user *)usermem),
 		    "legitimate put_user failed");
 
-	/* Invalid usage: none of these should succeed. */
+	/*
+	 * Invalid usage: none of these copies should succeed.
+	 */
+
+	/* Reject kernel-to-kernel copies through copy_from_user(). */
 	ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
 				    PAGE_SIZE),
 		    "illegal all-kernel copy_from_user passed");
+
+#if 0
+	/*
+	 * When running with SMAP/PAN/etc, this will Oops the kernel
+	 * due to the zeroing of userspace memory on failure. This needs
+	 * to be tested in LKDTM instead, since this test module does not
+	 * expect to explode.
+	 */
 	ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
 				    PAGE_SIZE),
 		    "illegal reversed copy_from_user passed");
+#endif
 	ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
 				  PAGE_SIZE),
 		    "illegal all-kernel copy_to_user passed");
 	ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
 				  PAGE_SIZE),
 		    "illegal reversed copy_to_user passed");
+
 	ret |= test(!get_user(value, (unsigned long __user *)kmem),
 		    "illegal get_user passed");
 	ret |= test(!put_user(value, (unsigned long __user *)kmem),
diff --git a/mm/ksm.c b/mm/ksm.c
index 56e92dc..5f1855b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1020,8 +1020,7 @@
 		goto out;
 
 	if (PageTransCompound(page)) {
-		err = split_huge_page(page);
-		if (err)
+		if (split_huge_page(page))
 			goto out_unlock;
 	}
 
diff --git a/mm/memblock.c b/mm/memblock.c
index 49b7c1e..f1eabcc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1725,6 +1725,29 @@
 	}
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+	struct memblock_region *rgn;
+	unsigned long size = 0;
+	int idx;
+
+	for_each_memblock_type((&memblock.reserved), rgn) {
+		phys_addr_t start, end;
+
+		if (rgn->base + rgn->size < start_addr)
+			continue;
+		if (rgn->base > end_addr)
+			continue;
+
+		start = rgn->base;
+		end = start + rgn->size;
+		size += end - start;
+	}
+
+	return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
 	pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 19e796d..4bd4480 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1587,12 +1587,8 @@
 	if (ret) {
 		pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 			pfn, ret, page->flags);
-		/*
-		 * We know that soft_offline_huge_page() tries to migrate
-		 * only one hugepage pointed to by hpage, so we need not
-		 * run through the pagelist here.
-		 */
-		putback_active_hugepage(hpage);
+		if (!list_empty(&pagelist))
+			putback_movable_pages(&pagelist);
 		if (ret > 0)
 			ret = -EIO;
 	} else {
diff --git a/mm/mlock.c b/mm/mlock.c
index 4feee1d..9cdd063 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -285,7 +285,7 @@
 {
 	int i;
 	int nr = pagevec_count(pvec);
-	int delta_munlocked;
+	int delta_munlocked = -nr;
 	struct pagevec pvec_putback;
 	int pgrescued = 0;
 
@@ -305,6 +305,8 @@
 				continue;
 			else
 				__munlock_isolation_failed(page);
+		} else {
+			delta_munlocked++;
 		}
 
 		/*
@@ -316,7 +318,6 @@
 		pagevec_add(&pvec_putback, pvec->pages[i]);
 		pvec->pages[i] = NULL;
 	}
-	delta_munlocked = -nr + pagevec_count(&pvec_putback);
 	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
 	spin_unlock_irq(zone_lru_lock(zone));
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ca9565..27ddaae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,6 +286,26 @@
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+	unsigned long max_initialise;
+	unsigned long reserved_lowmem;
+
+	/*
+	 * Initialise at least 2G of a node but also take into account that
+	 * two large system hashes that can take up 1GB for 0.25TB/node.
+	 */
+	max_initialise = max(2UL << (30 - PAGE_SHIFT),
+		(pgdat->node_spanned_pages >> 8));
+
+	/*
+	 * Compensate the all the memblock reservations (e.g. crash kernel)
+	 * from the initial estimation to make sure we will initialize enough
+	 * memory to boot.
+	 */
+	reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+			pgdat->node_start_pfn + max_initialise);
+	max_initialise += reserved_lowmem;
+
+	pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
 	pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -308,20 +328,11 @@
 				unsigned long pfn, unsigned long zone_end,
 				unsigned long *nr_initialised)
 {
-	unsigned long max_initialise;
-
 	/* Always populate low zones for address-contrained allocations */
 	if (zone_end < pgdat_end_pfn(pgdat))
 		return true;
-	/*
-	 * Initialise at least 2G of a node but also take into account that
-	 * two large system hashes that can take up 1GB for 0.25TB/node.
-	 */
-	max_initialise = max(2UL << (30 - PAGE_SHIFT),
-		(pgdat->node_spanned_pages >> 8));
-
 	(*nr_initialised)++;
-	if ((*nr_initialised > max_initialise) &&
+	if ((*nr_initialised > pgdat->static_init_size) &&
 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 		pgdat->first_deferred_pfn = pfn;
 		return false;
@@ -5940,7 +5951,6 @@
 	/* pg_data_t should be reset to zero when it's allocated */
 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-	reset_deferred_meminit(pgdat);
 	pgdat->node_id = nid;
 	pgdat->node_start_pfn = node_start_pfn;
 	pgdat->per_cpu_nodestats = NULL;
@@ -5962,6 +5972,7 @@
 		(unsigned long)pgdat->node_mem_map);
 #endif
 
+	reset_deferred_meminit(pgdat);
 	free_area_init_core(pgdat);
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 30be24b..7341005 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@
 	return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+			  unsigned int length)
 {
 	metadata_access_enable();
-	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 			length, 1);
 	metadata_access_disable();
 }
@@ -636,14 +637,15 @@
 	       p, p - addr, get_freepointer(s, p));
 
 	if (s->flags & SLAB_RED_ZONE)
-		print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+			      s->red_left_pad);
 	else if (p > addr + 16)
-		print_section("Bytes b4 ", p - 16, 16);
+		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-	print_section("Object ", p, min_t(unsigned long, s->object_size,
-				PAGE_SIZE));
+	print_section(KERN_ERR, "Object ", p,
+		      min_t(unsigned long, s->object_size, PAGE_SIZE));
 	if (s->flags & SLAB_RED_ZONE)
-		print_section("Redzone ", p + s->object_size,
+		print_section(KERN_ERR, "Redzone ", p + s->object_size,
 			s->inuse - s->object_size);
 
 	if (s->offset)
@@ -658,7 +660,8 @@
 
 	if (off != size_from_object(s))
 		/* Beginning of the filler is the free pointer */
-		print_section("Padding ", p + off, size_from_object(s) - off);
+		print_section(KERN_ERR, "Padding ", p + off,
+			      size_from_object(s) - off);
 
 	dump_stack();
 }
@@ -832,7 +835,7 @@
 		end--;
 
 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-	print_section("Padding ", end - remainder, remainder);
+	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
 	return 0;
@@ -985,7 +988,7 @@
 			page->freelist);
 
 		if (!alloc)
-			print_section("Object ", (void *)object,
+			print_section(KERN_INFO, "Object ", (void *)object,
 					s->object_size);
 
 		dump_stack();
@@ -5466,6 +5469,7 @@
 		char mbuf[64];
 		char *buf;
 		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+		ssize_t len;
 
 		if (!attr || !attr->store || !attr->show)
 			continue;
@@ -5490,8 +5494,9 @@
 			buf = buffer;
 		}
 
-		attr->show(root_cache, buf);
-		attr->store(s, buf, strlen(buf));
+		len = attr->show(root_cache, buf);
+		if (len > 0)
+			attr->store(s, buf, len);
 	}
 
 	if (buffer)
diff --git a/mm/truncate.c b/mm/truncate.c
index 8d8c62d..9c809e7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -753,7 +753,7 @@
  */
 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
 {
-	int bsize = 1 << inode->i_blkbits;
+	int bsize = i_blocksize(inode);
 	loff_t rounded_from;
 	struct page *page;
 	pgoff_t index;
diff --git a/net/Kconfig b/net/Kconfig
index d5ff4f7..0b8c255 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -265,10 +265,6 @@
 config HWBM
        bool
 
-config SOCK_CGROUP_DATA
-	bool
-	default n
-
 config CGROUP_NET_PRIO
 	bool "Network priority cgroup"
 	depends on CGROUPS
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0474106..7625ec8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -776,6 +776,13 @@
 			return -EPROTONOSUPPORT;
 		}
 	}
+
+	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+		if (defpvid >= VLAN_VID_MASK)
+			return -EINVAL;
+	}
 #endif
 
 	return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d8ad73b3..16b5aa9 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -185,6 +185,8 @@
 		br_debug(br, "using kernel STP\n");
 
 		/* To start timers on any ports left in blocking */
+		if (br->dev->flags & IFF_UP)
+			mod_timer(&br->hello_timer, jiffies + br->hello_time);
 		br_port_state_selection(br);
 	}
 
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index da058b8..15826fd 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@
 	if (br->dev->flags & IFF_UP) {
 		br_config_bpdu_generation(br);
 
-		if (br->stp_enabled != BR_USER_STP)
+		if (br->stp_enabled == BR_KERNEL_STP)
 			mod_timer(&br->hello_timer,
 				  round_jiffies(jiffies + br->hello_time));
 	}
diff --git a/net/core/dst.c b/net/core/dst.c
index b5cbbe0..656b70d 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@
 }
 EXPORT_SYMBOL(dst_discard_out);
 
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
 	/* This initializer is needed to force linker to place this variable
 	 * into const section. Otherwise it might end into bss section.
 	 * We really want to avoid false sharing on this variable, and catch
 	 * any writes on it.
 	 */
-	[RTAX_MAX] = 0xdeadbeef,
+	.refcnt = ATOMIC_INIT(1),
 };
 
 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@
 	if (dev)
 		dev_hold(dev);
 	dst->ops = ops;
-	dst_init_metrics(dst, dst_default_metrics, true);
+	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 	dst->expires = 0UL;
 	dst->path = dst;
 	dst->from = NULL;
@@ -315,25 +315,30 @@
 
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
-	u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
 
 	if (p) {
-		u32 *old_p = __DST_METRICS_PTR(old);
+		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
 		unsigned long prev, new;
 
-		memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+		atomic_set(&p->refcnt, 1);
+		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
 
 		new = (unsigned long) p;
 		prev = cmpxchg(&dst->_metrics, old, new);
 
 		if (prev != old) {
 			kfree(p);
-			p = __DST_METRICS_PTR(prev);
+			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
 			if (prev & DST_METRICS_READ_ONLY)
 				p = NULL;
+		} else if (prev & DST_METRICS_REFCOUNTED) {
+			if (atomic_dec_and_test(&old_p->refcnt))
+				kfree(old_p);
 		}
 	}
-	return p;
+	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+	return (u32 *)p;
 }
 EXPORT_SYMBOL(dst_cow_metrics_generic);
 
@@ -342,7 +347,7 @@
 {
 	unsigned long prev, new;
 
-	new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
 	prev = cmpxchg(&dst->_metrics, old, new);
 	if (prev == old)
 		kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/filter.c b/net/core/filter.c
index 2cb4f0f..5e42e0e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -95,8 +95,8 @@
 
 		skb->sk = sk;
 		pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
 		skb->sk = save_sk;
+		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
 	}
 	rcu_read_unlock();
 
@@ -2208,6 +2208,7 @@
 	    func == bpf_skb_change_proto ||
 	    func == bpf_skb_change_tail ||
 	    func == bpf_skb_pull_data ||
+	    func == bpf_clone_redirect ||
 	    func == bpf_l3_csum_replace ||
 	    func == bpf_l4_csum_replace)
 		return true;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b490af6..1d91607 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1617,13 +1617,13 @@
 					       cb->nlh->nlmsg_seq, 0,
 					       flags,
 					       ext_filter_mask);
-			/* If we ran out of room on the first message,
-			 * we're in trouble
-			 */
-			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
 
-			if (err < 0)
-				goto out;
+			if (err < 0) {
+				if (likely(skb->len))
+					goto out;
+
+				goto out_err;
+			}
 
 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
@@ -1631,10 +1631,12 @@
 		}
 	}
 out:
+	err = skb->len;
+out_err:
 	cb->args[1] = idx;
 	cb->args[0] = h;
 
-	return skb->len;
+	return err;
 }
 
 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
@@ -3413,8 +3415,12 @@
 				err = br_dev->netdev_ops->ndo_bridge_getlink(
 						skb, portid, seq, dev,
 						filter_mask, NLM_F_MULTI);
-				if (err < 0 && err != -EOPNOTSUPP)
-					break;
+				if (err < 0 && err != -EOPNOTSUPP) {
+					if (likely(skb->len))
+						break;
+
+					goto out_err;
+				}
 			}
 			idx++;
 		}
@@ -3425,16 +3431,22 @@
 							      seq, dev,
 							      filter_mask,
 							      NLM_F_MULTI);
-				if (err < 0 && err != -EOPNOTSUPP)
-					break;
+				if (err < 0 && err != -EOPNOTSUPP) {
+					if (likely(skb->len))
+						break;
+
+					goto out_err;
+				}
 			}
 			idx++;
 		}
 	}
+	err = skb->len;
+out_err:
 	rcu_read_unlock();
 	cb->args[0] = idx;
 
-	return skb->len;
+	return err;
 }
 
 static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index 19562f7..f07eaea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -138,10 +138,7 @@
 
 #include <trace/events/sock.h>
 
-#ifdef CONFIG_INET
 #include <net/tcp.h>
-#endif
-
 #include <net/busy_poll.h>
 
 static DEFINE_MUTEX(proto_list_mutex);
@@ -1034,6 +1031,7 @@
 
 	union {
 		int val;
+		u64 val64;
 		struct linger ling;
 		struct timeval tm;
 	} v;
@@ -1264,6 +1262,13 @@
 		v.val = sk->sk_incoming_cpu;
 		break;
 
+
+	case SO_COOKIE:
+		lv = sizeof(u64);
+		if (len < lv)
+			return -EINVAL;
+		v.val64 = sock_gen_cookie(sk);
+		break;
 	default:
 		/* We implement the SO_SNDLOWAT etc to not be settable
 		 * (1003.1g 7).
@@ -1687,28 +1692,24 @@
  * delay queue. We want to allow the owner socket to send more
  * packets, as if they were already TX completed by a typical driver.
  * But we also want to keep skb->sk set because some packet schedulers
- * rely on it (sch_fq for example). So we set skb->truesize to a small
- * amount (1) and decrease sk_wmem_alloc accordingly.
+ * rely on it (sch_fq for example).
  */
 void skb_orphan_partial(struct sk_buff *skb)
 {
-	/* If this skb is a TCP pure ACK or already went here,
-	 * we have nothing to do. 2 is already a very small truesize.
-	 */
-	if (skb->truesize <= 2)
+	if (skb_is_tcp_pure_ack(skb))
 		return;
 
-	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
-	 * so we do not completely orphan skb, but transfert all
-	 * accounted bytes but one, to avoid unexpected reorders.
-	 */
 	if (skb->destructor == sock_wfree
 #ifdef CONFIG_INET
 	    || skb->destructor == tcp_wfree
 #endif
 		) {
-		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
-		skb->truesize = 1;
+		struct sock *sk = skb->sk;
+
+		if (atomic_inc_not_zero(&sk->sk_refcnt)) {
+			atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+			skb->destructor = sock_efree;
+		}
 	} else {
 		skb_orphan(skb);
 	}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 237d62c..2ac9d2a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@
 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
 		newnp->pktoptions  = NULL;
 		newnp->opt	   = NULL;
+		newnp->ipv6_mc_list = NULL;
+		newnp->ipv6_ac_list = NULL;
+		newnp->ipv6_fl_list = NULL;
 		newnp->mcast_oif   = inet6_iif(skb);
 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
@@ -490,6 +493,9 @@
 	/* Clone RX bits */
 	newnp->rxopt.all = np->rxopt.all;
 
+	newnp->ipv6_mc_list = NULL;
+	newnp->ipv6_ac_list = NULL;
+	newnp->ipv6_fl_list = NULL;
 	newnp->pktoptions = NULL;
 	newnp->opt	  = NULL;
 	newnp->mcast_oif  = inet6_iif(skb);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 39e9acf..ceddf42 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -136,6 +136,8 @@
 }
 #endif
 
+int sysctl_reserved_port_bind __read_mostly = 1;
+
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
  */
@@ -1034,7 +1036,7 @@
 		.type =       SOCK_DGRAM,
 		.protocol =   IPPROTO_ICMP,
 		.prot =       &ping_prot,
-		.ops =        &inet_dgram_ops,
+		.ops =        &inet_sockraw_ops,
 		.flags =      INET_PROTOSW_REUSE,
        },
 
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 98fd2f7..37f4578 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -759,7 +759,7 @@
 	unsigned int e = 0, s_e;
 	struct fib_table *tb;
 	struct hlist_head *head;
-	int dumped = 0;
+	int dumped = 0, err;
 
 	if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
 	    ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -779,20 +779,27 @@
 			if (dumped)
 				memset(&cb->args[2], 0, sizeof(cb->args) -
 						 2 * sizeof(cb->args[0]));
-			if (fib_table_dump(tb, skb, cb) < 0)
-				goto out;
+			err = fib_table_dump(tb, skb, cb);
+			if (err < 0) {
+				if (likely(skb->len))
+					goto out;
+
+				goto out_err;
+			}
 			dumped = 1;
 next:
 			e++;
 		}
 	}
 out:
+	err = skb->len;
+out_err:
 	rcu_read_unlock();
 
 	cb->args[1] = e;
 	cb->args[0] = h;
 
-	return skb->len;
+	return err;
 }
 
 /* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 6a40680..7563831 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -204,6 +204,7 @@
 static void free_fib_info_rcu(struct rcu_head *head)
 {
 	struct fib_info *fi = container_of(head, struct fib_info, rcu);
+	struct dst_metrics *m;
 
 	change_nexthops(fi) {
 		if (nexthop_nh->nh_dev)
@@ -214,8 +215,9 @@
 		rt_fibinfo_free(&nexthop_nh->nh_rth_input);
 	} endfor_nexthops(fi);
 
-	if (fi->fib_metrics != (u32 *) dst_default_metrics)
-		kfree(fi->fib_metrics);
+	m = fi->fib_metrics;
+	if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+		kfree(m);
 	kfree(fi);
 }
 
@@ -982,11 +984,11 @@
 			val = 255;
 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
 			return -EINVAL;
-		fi->fib_metrics[type - 1] = val;
+		fi->fib_metrics->metrics[type - 1] = val;
 	}
 
 	if (ecn_ca)
-		fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+		fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
 
 	return 0;
 }
@@ -1044,11 +1046,12 @@
 		goto failure;
 	fib_info_cnt++;
 	if (cfg->fc_mx) {
-		fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+		fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
 		if (!fi->fib_metrics)
 			goto failure;
+		atomic_set(&fi->fib_metrics->refcnt, 1);
 	} else
-		fi->fib_metrics = (u32 *) dst_default_metrics;
+		fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
 
 	fi->fib_net = net;
 	fi->fib_protocol = cfg->fc_protocol;
@@ -1252,7 +1255,7 @@
 	if (fi->fib_priority &&
 	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
 		goto nla_put_failure;
-	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+	if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
 		goto nla_put_failure;
 
 	if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e3665bf..ef40bb6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1932,6 +1932,8 @@
 
 	/* rcu_read_lock is hold by caller */
 	hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+		int err;
+
 		if (i < s_i) {
 			i++;
 			continue;
@@ -1942,17 +1944,14 @@
 			continue;
 		}
 
-		if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
-				  cb->nlh->nlmsg_seq,
-				  RTM_NEWROUTE,
-				  tb->tb_id,
-				  fa->fa_type,
-				  xkey,
-				  KEYLENGTH - fa->fa_slen,
-				  fa->fa_tos,
-				  fa->fa_info, NLM_F_MULTI) < 0) {
+		err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
+				    cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+				    tb->tb_id, fa->fa_type,
+				    xkey, KEYLENGTH - fa->fa_slen,
+				    fa->fa_tos, fa->fa_info, NLM_F_MULTI);
+		if (err < 0) {
 			cb->args[4] = i;
-			return -1;
+			return err;
 		}
 		i++;
 	}
@@ -1974,10 +1973,13 @@
 	t_key key = cb->args[3];
 
 	while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
-		if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
+		int err;
+
+		err = fn_trie_dump_leaf(l, tb, skb, cb);
+		if (err < 0) {
 			cb->args[3] = key;
 			cb->args[2] = count;
-			return -1;
+			return err;
 		}
 
 		++count;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d5d3ead..c094ac9 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -111,6 +111,13 @@
 		head = &hinfo->bhash[inet_bhashfn(net, port,
 						  hinfo->bhash_size)];
 		spin_lock_bh(&head->lock);
+
+		if (inet_is_local_reserved_port(net, snum) &&
+		    !sysctl_reserved_port_bind) {
+			ret = 1;
+			goto fail_unlock;
+		}
+
 		inet_bind_bucket_for_each(tb, &head->chain)
 			if (net_eq(ib_net(tb), net) && tb->port == port)
 				goto tb_found;
@@ -665,6 +672,8 @@
 		/* listeners have SOCK_RCU_FREE, not the children */
 		sock_reset_flag(newsk, SOCK_RCU_FREE);
 
+		inet_sk(newsk)->mc_list = NULL;
+
 		newsk->sk_mark = inet_rsk(req)->ir_mark;
 		atomic64_set(&newsk->sk_cookie,
 			     atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 70c40ba2..18c6e79 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1370,8 +1370,12 @@
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
 {
+	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
 	struct rtable *rt = (struct rtable *) dst;
 
+	if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+		kfree(p);
+
 	if (!list_empty(&rt->rt_uncached)) {
 		struct uncached_list *ul = rt->rt_uncached_list;
 
@@ -1423,7 +1427,11 @@
 			rt->rt_gateway = nh->nh_gw;
 			rt->rt_uses_gateway = 1;
 		}
-		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+		dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+		if (fi->fib_metrics != &dst_default_metrics) {
+			rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+			atomic_inc(&fi->fib_metrics->refcnt);
+		}
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		rt->dst.tclassid = nh->nh_tclassid;
 #endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index cf7cfa4..08605a4 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -789,6 +789,13 @@
 		.proc_handler	= proc_do_large_bitmap,
 	},
 	{
+		.procname       = "reserved_port_bind",
+		.data           = &sysctl_reserved_port_bind,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec
+	},
+	{
 		.procname	= "ip_no_pmtu_disc",
 		.data		= &init_net.ipv4.sysctl_ip_no_pmtu_disc,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb142ca..86fbf0f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1078,9 +1078,12 @@
 				int *copied, size_t size)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	struct sockaddr *uaddr = msg->msg_name;
 	int err, flags;
 
-	if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+	if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+	    (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+	     uaddr->sa_family == AF_UNSPEC))
 		return -EOPNOTSUPP;
 	if (tp->fastopen_req)
 		return -EALREADY; /* Another Fast Open is in progress */
@@ -1093,7 +1096,7 @@
 	tp->fastopen_req->size = size;
 
 	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
-	err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+	err = __inet_stream_connect(sk->sk_socket, uaddr,
 				    msg->msg_namelen, flags);
 	*copied = tp->fastopen_req->copied;
 	tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index baea5df..0cdbea9 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -179,6 +179,7 @@
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 
+	tcp_sk(sk)->prior_ssthresh = 0;
 	if (icsk->icsk_ca_ops->init)
 		icsk->icsk_ca_ops->init(sk);
 	if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e074816..a03f1e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1178,13 +1178,14 @@
 		 */
 		if (pkt_len > mss) {
 			unsigned int new_len = (pkt_len / mss) * mss;
-			if (!in_sack && new_len < pkt_len) {
+			if (!in_sack && new_len < pkt_len)
 				new_len += mss;
-				if (new_len >= skb->len)
-					return 0;
-			}
 			pkt_len = new_len;
 		}
+
+		if (pkt_len >= skb->len && !in_sack)
+			return 0;
+
 		err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
 		if (err < 0)
 			return err;
@@ -3233,7 +3234,7 @@
 			int delta;
 
 			/* Non-retransmitted hole got filled? That's reordering */
-			if (reord < prior_fackets)
+			if (reord < prior_fackets && reord <= tp->fackets_out)
 				tcp_update_reordering(sk, tp->fackets_out - reord, 0);
 
 			delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index eca1433..e8ab585 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2228,6 +2228,7 @@
 	__be32 src = inet->inet_rcv_saddr;
 	__u16 destp = ntohs(inet->inet_dport);
 	__u16 srcp = ntohs(inet->inet_sport);
+	__u8 seq_state = sk->sk_state;
 	int rx_queue;
 	int state;
 
@@ -2247,6 +2248,9 @@
 		timer_expires = jiffies;
 	}
 
+	if (inet->transparent)
+		seq_state |= 0x80;
+
 	state = sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
@@ -2258,7 +2262,7 @@
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
-		i, src, srcp, dest, destp, state,
+		i, src, srcp, dest, destp, seq_state,
 		tp->write_seq - tp->snd_una,
 		rx_queue,
 		timer_active,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5093bb8..fe24424 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -290,6 +290,11 @@
 	} else {
 		hslot = udp_hashslot(udptable, net, snum);
 		spin_lock_bh(&hslot->lock);
+
+		if (inet_is_local_reserved_port(net, snum) &&
+		    !sysctl_reserved_port_bind)
+			goto fail_unlock;
+
 		if (hslot->count > 10) {
 			int exist;
 			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
@@ -2377,14 +2382,21 @@
 		int bucket)
 {
 	struct inet_sock *inet = inet_sk(sp);
+	struct udp_sock *up = udp_sk(sp);
 	__be32 dest = inet->inet_daddr;
 	__be32 src  = inet->inet_rcv_saddr;
 	__u16 destp	  = ntohs(inet->inet_dport);
 	__u16 srcp	  = ntohs(inet->inet_sport);
+	__u8 state = sp->sk_state;
+
+	if (up->encap_rcv)
+		state |= 0xF0;
+	else if (inet->transparent)
+		state |= 0x80;
 
 	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
-		bucket, src, srcp, dest, destp, sp->sk_state,
+		bucket, src, srcp, dest, destp, state,
 		sk_wmem_alloc_get(sp),
 		sk_rmem_alloc_get(sp),
 		0, 0L, 0,
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de..8d772fe 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@
 	struct ipv6hdr *ip6_hdr;
 	struct ipv6_opt_hdr *hop;
 	unsigned char buf[CALIPSO_MAX_BUFFER];
-	int len_delta, new_end, pad;
+	int len_delta, new_end, pad, payload;
 	unsigned int start, end;
 
 	ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@
 	if (ret_val < 0)
 		return ret_val;
 
+	ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
+
 	if (len_delta) {
 		if (len_delta > 0)
 			skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@
 			sizeof(*ip6_hdr) + start);
 		skb_reset_network_header(skb);
 		ip6_hdr = ipv6_hdr(skb);
+		payload = ntohs(ip6_hdr->payload_len);
+		ip6_hdr->payload_len = htons(payload + len_delta);
 	}
 
 	hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index a381772..9217390 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -1008,9 +1008,14 @@
 			     __u16 srcp, __u16 destp, int bucket)
 {
 	const struct in6_addr *dest, *src;
+	__u8 state = sp->sk_state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
+
+	if (inet_sk(sp) && inet_sk(sp)->transparent)
+		state |= 0x80;
+
 	seq_printf(seq,
 		   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
@@ -1019,7 +1024,7 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   sp->sk_state,
+		   state,
 		   sk_wmem_alloc_get(sp),
 		   sk_rmem_alloc_get(sp),
 		   0, 0L, 0,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 33b04ec..424fbe1 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@
 	const struct net_offload *ops;
 	int proto;
 	struct frag_hdr *fptr;
-	unsigned int unfrag_ip6hlen;
 	unsigned int payload_len;
 	u8 *prevhdr;
 	int offset = 0;
@@ -116,8 +115,12 @@
 		skb->network_header = (u8 *)ipv6h - skb->head;
 
 		if (udpfrag) {
-			unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-			fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
+			int err = ip6_find_1stfragopt(skb, &prevhdr);
+			if (err < 0) {
+				kfree_skb_list(segs);
+				return ERR_PTR(err);
+			}
+			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
 			fptr->frag_off = htons(offset);
 			if (skb->next)
 				fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a215802..d472a5f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -595,7 +595,10 @@
 	int ptr, offset = 0, err = 0;
 	u8 *prevhdr, nexthdr = 0;
 
-	hlen = ip6_find_1stfragopt(skb, &prevhdr);
+	err = ip6_find_1stfragopt(skb, &prevhdr);
+	if (err < 0)
+		goto fail;
+	hlen = err;
 	nexthdr = *prevhdr;
 
 	mtu = ip6_skb_dst_mtu(skb);
@@ -1453,6 +1456,11 @@
 			 */
 			alloclen += sizeof(struct frag_hdr);
 
+			copy = datalen - transhdrlen - fraggap;
+			if (copy < 0) {
+				err = -EINVAL;
+				goto error;
+			}
 			if (transhdrlen) {
 				skb = sock_alloc_send_skb(sk,
 						alloclen + hh_len,
@@ -1502,13 +1510,9 @@
 				data += fraggap;
 				pskb_trim_unique(skb_prev, maxfraglen);
 			}
-			copy = datalen - transhdrlen - fraggap;
-
-			if (copy < 0) {
-				err = -EINVAL;
-				kfree_skb(skb);
-				goto error;
-			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+			if (copy > 0 &&
+			    getfrag(from, data + transhdrlen, offset,
+				    copy, fraggap, skb) < 0) {
 				err = -EFAULT;
 				kfree_skb(skb);
 				goto error;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd42523..e9065b8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
 	u16 offset = sizeof(struct ipv6hdr);
-	struct ipv6_opt_hdr *exthdr =
-				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
 	unsigned int packet_len = skb_tail_pointer(skb) -
 		skb_network_header(skb);
 	int found_rhdr = 0;
 	*nexthdr = &ipv6_hdr(skb)->nexthdr;
 
-	while (offset + 1 <= packet_len) {
+	while (offset <= packet_len) {
+		struct ipv6_opt_hdr *exthdr;
 
 		switch (**nexthdr) {
 
@@ -107,13 +106,16 @@
 			return offset;
 		}
 
-		offset += ipv6_optlen(exthdr);
-		*nexthdr = &exthdr->nexthdr;
+		if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+			return -EINVAL;
+
 		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
 						 offset);
+		offset += ipv6_optlen(exthdr);
+		*nexthdr = &exthdr->nexthdr;
 	}
 
-	return offset;
+	return -EINVAL;
 }
 EXPORT_SYMBOL(ip6_find_1stfragopt);
 
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index e1f8b34..2a965d4 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -199,7 +199,7 @@
 	.type =      SOCK_DGRAM,
 	.protocol =  IPPROTO_ICMPV6,
 	.prot =      &pingv6_prot,
-	.ops =       &inet6_dgram_ops,
+	.ops =       &inet6_sockraw_ops,
 	.flags =     INET_PROTOSW_REUSE,
 };
 
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 83c7d2b..ff701ce 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1331,7 +1331,7 @@
 #endif	/* CONFIG_PROC_FS */
 
 /* Same as inet6_dgram_ops, sans udp_poll.  */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
 	.family		   = PF_INET6,
 	.owner		   = THIS_MODULE,
 	.release	   = inet6_release,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1c3bc0a..f8a6036 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1048,6 +1048,7 @@
 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
 #endif
 
+		newnp->ipv6_mc_list = NULL;
 		newnp->ipv6_ac_list = NULL;
 		newnp->ipv6_fl_list = NULL;
 		newnp->pktoptions  = NULL;
@@ -1117,6 +1118,7 @@
 	   First: no IPv4 options.
 	 */
 	newinet->inet_opt = NULL;
+	newnp->ipv6_mc_list = NULL;
 	newnp->ipv6_ac_list = NULL;
 	newnp->ipv6_fl_list = NULL;
 
@@ -1739,6 +1741,7 @@
 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 	int rx_queue;
 	int state;
+	__u8 state_seq = sp->sk_state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1770,6 +1773,9 @@
 		 */
 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
+	if (inet->transparent)
+		state_seq |= 0x80;
+
 	seq_printf(seq,
 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1778,7 +1784,7 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   state,
+		   state_seq,
 		   tp->write_seq - tp->snd_una,
 		   rx_queue,
 		   timer_active,
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c4..a2267f8 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@
 	u8 frag_hdr_sz = sizeof(struct frag_hdr);
 	__wsum csum;
 	int tnl_hlen;
+	int err;
 
 	mss = skb_shinfo(skb)->gso_size;
 	if (unlikely(skb->len <= mss))
@@ -90,7 +91,10 @@
 		/* Find the unfragmentable header and shift it left by frag_hdr_sz
 		 * bytes to insert fragment header.
 		 */
-		unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+		err = ip6_find_1stfragopt(skb, &prevhdr);
+		if (err < 0)
+			return ERR_PTR(err);
+		unfrag_ip6hlen = err;
 		nexthdr = *prevhdr;
 		*prevhdr = NEXTHDR_FRAGMENT;
 		unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e01590..07d3657 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@
 	iph = ipv6_hdr(skb);
 
 	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+	if (hdr_len < 0)
+		return hdr_len;
 	skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
 	skb_set_network_header(skb, -x->props.header_len);
 	skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 4e34410..1d3bbe6 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -28,6 +28,8 @@
 	iph = ipv6_hdr(skb);
 
 	hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+	if (hdr_len < 0)
+		return hdr_len;
 	skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
 	skb_set_network_header(skb, -x->props.header_len);
 	skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 36493a7..93820e0 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -118,17 +118,17 @@
 		else if (d > 0)
 			p = &parent->rb_right;
 		else {
-			if (nft_set_elem_active(&rbe->ext, genmask)) {
-				if (nft_rbtree_interval_end(rbe) &&
-				    !nft_rbtree_interval_end(new))
-					p = &parent->rb_left;
-				else if (!nft_rbtree_interval_end(rbe) &&
-					 nft_rbtree_interval_end(new))
-					p = &parent->rb_right;
-				else {
-					*ext = &rbe->ext;
-					return -EEXIST;
-				}
+			if (nft_rbtree_interval_end(rbe) &&
+			    !nft_rbtree_interval_end(new)) {
+				p = &parent->rb_left;
+			} else if (!nft_rbtree_interval_end(rbe) &&
+				   nft_rbtree_interval_end(new)) {
+				p = &parent->rb_right;
+			} else if (nft_set_elem_active(&rbe->ext, genmask)) {
+				*ext = &rbe->ext;
+				return -EEXIST;
+			} else {
+				p = &parent->rb_left;
 			}
 		}
 	}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index cb76ff3..6a563e6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2652,13 +2652,6 @@
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
 	}
 
-	sockc.tsflags = po->sk.sk_tsflags;
-	if (msg->msg_controllen) {
-		err = sock_cmsg_send(&po->sk, msg, &sockc);
-		if (unlikely(err))
-			goto out;
-	}
-
 	err = -ENXIO;
 	if (unlikely(dev == NULL))
 		goto out;
@@ -2666,6 +2659,13 @@
 	if (unlikely(!(dev->flags & IFF_UP)))
 		goto out_put;
 
+	sockc.tsflags = po->sk.sk_tsflags;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(&po->sk, msg, &sockc);
+		if (unlikely(err))
+			goto out_put;
+	}
+
 	if (po->sk.sk_socket->type == SOCK_RAW)
 		reserve = dev->hard_header_len;
 	size_max = po->tx_ring.frame_size
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a01a56e..6c79915 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -473,15 +473,14 @@
 			     struct sctp_association **app,
 			     struct sctp_transport **tpp)
 {
+	struct sctp_init_chunk *chunkhdr, _chunkhdr;
 	union sctp_addr saddr;
 	union sctp_addr daddr;
 	struct sctp_af *af;
 	struct sock *sk = NULL;
 	struct sctp_association *asoc;
 	struct sctp_transport *transport = NULL;
-	struct sctp_init_chunk *chunkhdr;
 	__u32 vtag = ntohl(sctphdr->vtag);
-	int len = skb->len - ((void *)sctphdr - (void *)skb->data);
 
 	*app = NULL; *tpp = NULL;
 
@@ -516,13 +515,16 @@
 	 * discard the packet.
 	 */
 	if (vtag == 0) {
-		chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
-		if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
-			  + sizeof(__be32) ||
+		/* chunk header + first 4 octects of init header */
+		chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+					      sizeof(struct sctphdr),
+					      sizeof(struct sctp_chunkhdr) +
+					      sizeof(__be32), &_chunkhdr);
+		if (!chunkhdr ||
 		    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
-		    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+		    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
 			goto out;
-		}
+
 	} else if (vtag != asoc->c.peer_vtag) {
 		goto out;
 	}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6a2532d..0c09060 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@
 	struct sctp_bind_addr *bp;
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sctp_sockaddr_entry *laddr;
-	union sctp_addr *baddr = NULL;
 	union sctp_addr *daddr = &t->ipaddr;
 	union sctp_addr dst_saddr;
 	struct in6_addr *final_p, final;
 	__u8 matchlen = 0;
-	__u8 bmatchlen;
 	sctp_scope_t scope;
 
 	memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-		if (!laddr->valid)
+		struct dst_entry *bdst;
+		__u8 bmatchlen;
+
+		if (!laddr->valid ||
+		    laddr->state != SCTP_ADDR_SRC ||
+		    laddr->a.sa.sa_family != AF_INET6 ||
+		    scope > sctp_scope(&laddr->a))
 			continue;
-		if ((laddr->state == SCTP_ADDR_SRC) &&
-		    (laddr->a.sa.sa_family == AF_INET6) &&
-		    (scope <= sctp_scope(&laddr->a))) {
-			bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
-			if (!baddr || (matchlen < bmatchlen)) {
-				baddr = &laddr->a;
-				matchlen = bmatchlen;
-			}
-		}
-	}
-	if (baddr) {
-		fl6->saddr = baddr->v6.sin6_addr;
-		fl6->fl6_sport = baddr->v6.sin6_port;
+
+		fl6->saddr = laddr->a.v6.sin6_addr;
+		fl6->fl6_sport = laddr->a.v6.sin6_port;
 		final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
-		dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+		bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+
+		if (!IS_ERR(bdst) &&
+		    ipv6_chk_addr(dev_net(bdst->dev),
+				  &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+			if (!IS_ERR_OR_NULL(dst))
+				dst_release(dst);
+			dst = bdst;
+			break;
+		}
+
+		bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+		if (matchlen > bmatchlen)
+			continue;
+
+		if (!IS_ERR_OR_NULL(dst))
+			dst_release(dst);
+		dst = bdst;
+		matchlen = bmatchlen;
 	}
 	rcu_read_unlock();
 
@@ -666,6 +678,9 @@
 	newnp = inet6_sk(newsk);
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+	newnp->ipv6_mc_list = NULL;
+	newnp->ipv6_ac_list = NULL;
+	newnp->ipv6_fl_list = NULL;
 
 	rcu_read_lock();
 	opt = rcu_dereference(np->opt);
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 9ce707b..9cbc786 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -104,12 +104,14 @@
 	return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 }
 
-int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
+		    unsigned int flags)
 {
 	union bpf_attr attr = {
 		.target_fd = target_fd,
 		.attach_bpf_fd = prog_fd,
 		.attach_type = type,
+		.attach_flags  = flags;
 	};
 
 	return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index d0a799a..b06cf5a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -15,7 +15,8 @@
 		  const struct bpf_insn *insns, int insn_len,
 		  const char *license, int kern_version);
 
-int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
+		    unsigned int flags);
 int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
 
 int bpf_obj_pin(int fd, const char *pathname);
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
index 63ef208..9de4896 100644
--- a/samples/bpf/test_cgrp2_attach.c
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -124,7 +124,7 @@
 	ret = bpf_prog_detach(cg_fd, type);
 	printf("bpf_prog_detach() returned '%s' (%d)\n", strerror(errno), errno);
 
-	ret = bpf_prog_attach(prog_fd, cg_fd, type);
+	ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
 	if (ret < 0) {
 		printf("Failed to attach prog to cgroup: '%s'\n",
 		       strerror(errno));
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 17a0610..56c458d 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -480,12 +480,9 @@
 	struct skcipher_request *req;
 	unsigned int encrypted_datalen;
 	u8 iv[AES_BLOCK_SIZE];
-	unsigned int padlen;
-	char pad[16];
 	int ret;
 
 	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
-	padlen = encrypted_datalen - epayload->decrypted_datalen;
 
 	req = init_skcipher_req(derived_key, derived_keylen);
 	ret = PTR_ERR(req);
@@ -493,11 +490,10 @@
 		goto out;
 	dump_decrypted_data(epayload);
 
-	memset(pad, 0, sizeof pad);
 	sg_init_table(sg_in, 2);
 	sg_set_buf(&sg_in[0], epayload->decrypted_data,
 		   epayload->decrypted_datalen);
-	sg_set_buf(&sg_in[1], pad, padlen);
+	sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
 
 	sg_init_table(sg_out, 1);
 	sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
@@ -584,9 +580,14 @@
 	struct skcipher_request *req;
 	unsigned int encrypted_datalen;
 	u8 iv[AES_BLOCK_SIZE];
-	char pad[16];
+	u8 *pad;
 	int ret;
 
+	/* Throwaway buffer to hold the unused zero padding at the end */
+	pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+	if (!pad)
+		return -ENOMEM;
+
 	encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
 	req = init_skcipher_req(derived_key, derived_keylen);
 	ret = PTR_ERR(req);
@@ -594,13 +595,12 @@
 		goto out;
 	dump_encrypted_data(epayload, encrypted_datalen);
 
-	memset(pad, 0, sizeof pad);
 	sg_init_table(sg_in, 1);
 	sg_init_table(sg_out, 2);
 	sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
 	sg_set_buf(&sg_out[0], epayload->decrypted_data,
 		   epayload->decrypted_datalen);
-	sg_set_buf(&sg_out[1], pad, sizeof pad);
+	sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
 
 	memcpy(iv, epayload->iv, sizeof(iv));
 	skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
@@ -612,6 +612,7 @@
 		goto out;
 	dump_decrypted_data(epayload);
 out:
+	kfree(pad);
 	return ret;
 }
 
diff --git a/security/keys/key.c b/security/keys/key.c
index 346fbf2..2f4ce35 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -962,12 +962,11 @@
 	/* the key must be writable */
 	ret = key_permission(key_ref, KEY_NEED_WRITE);
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	/* attempt to update it if supported */
-	ret = -EOPNOTSUPP;
 	if (!key->type->update)
-		goto error;
+		return -EOPNOTSUPP;
 
 	memset(&prep, 0, sizeof(prep));
 	prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index dbbfd77..ada12c3 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -97,7 +97,7 @@
 	/* pull the payload in if one was supplied */
 	payload = NULL;
 
-	if (_payload) {
+	if (plen) {
 		ret = -ENOMEM;
 		payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
 		if (!payload) {
@@ -327,7 +327,7 @@
 
 	/* pull the payload in if one was supplied */
 	payload = NULL;
-	if (_payload) {
+	if (plen) {
 		ret = -ENOMEM;
 		payload = kmalloc(plen, GFP_KERNEL);
 		if (!payload)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 1a8186b..7622551 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1626,6 +1626,7 @@
 	if (err < 0)
 		goto __err;
 
+	tu->qhead = tu->qtail = tu->qused = 0;
 	kfree(tu->queue);
 	tu->queue = NULL;
 	kfree(tu->tqueue);
@@ -1967,6 +1968,7 @@
 
 	tu = file->private_data;
 	unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+	mutex_lock(&tu->ioctl_lock);
 	spin_lock_irq(&tu->qlock);
 	while ((long)count - result >= unit) {
 		while (!tu->qused) {
@@ -1982,7 +1984,9 @@
 			add_wait_queue(&tu->qchange_sleep, &wait);
 
 			spin_unlock_irq(&tu->qlock);
+			mutex_unlock(&tu->ioctl_lock);
 			schedule();
+			mutex_lock(&tu->ioctl_lock);
 			spin_lock_irq(&tu->qlock);
 
 			remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -2002,7 +2006,6 @@
 		tu->qused--;
 		spin_unlock_irq(&tu->qlock);
 
-		mutex_lock(&tu->ioctl_lock);
 		if (tu->tread) {
 			if (copy_to_user(buffer, &tu->tqueue[qhead],
 					 sizeof(struct snd_timer_tread)))
@@ -2012,7 +2015,6 @@
 					 sizeof(struct snd_timer_read)))
 				err = -EFAULT;
 		}
-		mutex_unlock(&tu->ioctl_lock);
 
 		spin_lock_irq(&tu->qlock);
 		if (err < 0)
@@ -2022,6 +2024,7 @@
 	}
  _error:
 	spin_unlock_irq(&tu->qlock);
+	mutex_unlock(&tu->ioctl_lock);
 	return result > 0 ? result : err;
 }
 
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 37b70f8..0abab79 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1537,6 +1537,8 @@
 		      "Dell Inspiron 1501", STAC_9200_DELL_M26),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
 		      "unknown Dell", STAC_9200_DELL_M26),
+	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
+		      "Dell Latitude D430", STAC_9200_DELL_M22),
 	/* Panasonic */
 	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
 	/* Gateway machines needs EAPD to be set on resume */
diff --git a/sound/soc/codecs/audio-ext-clk-up.c b/sound/soc/codecs/audio-ext-clk-up.c
index 39986d6..f12f4ca 100644
--- a/sound/soc/codecs/audio-ext-clk-up.c
+++ b/sound/soc/codecs/audio-ext-clk-up.c
@@ -34,6 +34,7 @@
 	struct pinctrl *pinctrl;
 	struct pinctrl_state *sleep;
 	struct pinctrl_state *active;
+	char __iomem *base;
 };
 
 struct audio_ext_ap_clk {
@@ -192,8 +193,10 @@
 		pr_err("%s afe_set_digital_codec_core_clock failed\n",
 			__func__);
 		return ret;
-		}
+	}
 
+	if (pnctrl_info->base)
+		iowrite32(1, pnctrl_info->base);
 	return 0;
 }
 
@@ -219,6 +222,8 @@
 	if (ret < 0)
 		pr_err("%s: afe_set_digital_codec_core_clock failed, ret = %d\n",
 			__func__, ret);
+	if (pnctrl_info->base)
+		iowrite32(0, pnctrl_info->base);
 }
 
 static int audio_ext_lpass_mclk2_prepare(struct clk_hw *hw)
@@ -381,9 +386,11 @@
 static int audio_get_pinctrl(struct platform_device *pdev,
 			     enum audio_clk_mux mux)
 {
+	struct device *dev =  &pdev->dev;
 	struct pinctrl_info *pnctrl_info;
 	struct pinctrl *pinctrl;
 	int ret;
+	u32 reg;
 
 	switch (mux) {
 	case AP_CLK2:
@@ -396,21 +403,20 @@
 		pnctrl_info = &audio_lpass_mclk2.pnctrl_info;
 		break;
 	default:
-		dev_err(&pdev->dev, "%s Not a valid MUX ID: %d\n",
+		dev_err(dev, "%s Not a valid MUX ID: %d\n",
 			__func__, mux);
 		return -EINVAL;
 	}
-	pnctrl_info = &audio_ap_clk2.pnctrl_info;
 
 	if (pnctrl_info->pinctrl) {
-		dev_dbg(&pdev->dev, "%s: already requested before\n",
+		dev_dbg(dev, "%s: already requested before\n",
 			__func__);
 		return -EINVAL;
 	}
 
-	pinctrl = devm_pinctrl_get(&pdev->dev);
+	pinctrl = devm_pinctrl_get(dev);
 	if (IS_ERR_OR_NULL(pinctrl)) {
-		dev_dbg(&pdev->dev, "%s: Unable to get pinctrl handle\n",
+		dev_dbg(dev, "%s: Unable to get pinctrl handle\n",
 			__func__);
 		return -EINVAL;
 	}
@@ -418,13 +424,13 @@
 	/* get all state handles from Device Tree */
 	pnctrl_info->sleep = pinctrl_lookup_state(pinctrl, "sleep");
 	if (IS_ERR(pnctrl_info->sleep)) {
-		dev_err(&pdev->dev, "%s: could not get sleep pinstate\n",
+		dev_err(dev, "%s: could not get sleep pinstate\n",
 			__func__);
 		goto err;
 	}
 	pnctrl_info->active = pinctrl_lookup_state(pinctrl, "active");
 	if (IS_ERR(pnctrl_info->active)) {
-		dev_err(&pdev->dev, "%s: could not get active pinstate\n",
+		dev_err(dev, "%s: could not get active pinstate\n",
 			__func__);
 		goto err;
 	}
@@ -432,10 +438,22 @@
 	ret = pinctrl_select_state(pnctrl_info->pinctrl,
 				   pnctrl_info->sleep);
 	if (ret) {
-		dev_err(&pdev->dev, "%s: Disable TLMM pins failed with %d\n",
+		dev_err(dev, "%s: Disable TLMM pins failed with %d\n",
 			__func__, ret);
 		goto err;
 	}
+
+	ret = of_property_read_u32(dev->of_node, "qcom,mclk-clk-reg", &reg);
+	if (ret < 0) {
+		dev_dbg(dev, "%s: miss mclk reg\n", __func__);
+	} else {
+		pnctrl_info->base = ioremap(reg, sizeof(u32));
+		if (pnctrl_info->base ==  NULL) {
+			dev_err(dev, "%s ioremap failed\n", __func__);
+			goto err;
+		}
+	}
+
 	return 0;
 
 err:
diff --git a/sound/soc/codecs/msm_sdw/msm-sdw-tables.c b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
index 4cbdb72..1b51805 100644
--- a/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
+++ b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
@@ -220,3 +220,100 @@
 	[MSM_SDW_TOP_I2S_RESET] = 1,
 	[MSM_SDW_TOP_BLOCKS_RESET] = 1,
 };
+
+const u8 msm_sdw_reg_writeable[MSM_SDW_MAX_REGISTER] = {
+	[MSM_SDW_PAGE_REGISTER] = 1,
+	[MSM_SDW_TX9_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX9_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_TX10_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX10_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_TX11_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX11_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_TX12_SPKR_PROT_PATH_CTL] = 1,
+	[MSM_SDW_TX12_SPKR_PROT_PATH_CFG0] = 1,
+	[MSM_SDW_COMPANDER7_CTL0] = 1,
+	[MSM_SDW_COMPANDER7_CTL1] = 1,
+	[MSM_SDW_COMPANDER7_CTL2] = 1,
+	[MSM_SDW_COMPANDER7_CTL3] = 1,
+	[MSM_SDW_COMPANDER7_CTL4] = 1,
+	[MSM_SDW_COMPANDER7_CTL5] = 1,
+	[MSM_SDW_COMPANDER7_CTL7] = 1,
+	[MSM_SDW_COMPANDER8_CTL0] = 1,
+	[MSM_SDW_COMPANDER8_CTL1] = 1,
+	[MSM_SDW_COMPANDER8_CTL2] = 1,
+	[MSM_SDW_COMPANDER8_CTL3] = 1,
+	[MSM_SDW_COMPANDER8_CTL4] = 1,
+	[MSM_SDW_COMPANDER8_CTL5] = 1,
+	[MSM_SDW_COMPANDER8_CTL7] = 1,
+	[MSM_SDW_RX7_RX_PATH_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_CFG0] = 1,
+	[MSM_SDW_RX7_RX_PATH_CFG1] = 1,
+	[MSM_SDW_RX7_RX_PATH_CFG2] = 1,
+	[MSM_SDW_RX7_RX_VOL_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_CFG] = 1,
+	[MSM_SDW_RX7_RX_VOL_MIX_CTL] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC0] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC1] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC2] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC3] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC5] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC6] = 1,
+	[MSM_SDW_RX7_RX_PATH_SEC7] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_SEC0] = 1,
+	[MSM_SDW_RX7_RX_PATH_MIX_SEC1] = 1,
+	[MSM_SDW_RX8_RX_PATH_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_CFG0] = 1,
+	[MSM_SDW_RX8_RX_PATH_CFG1] = 1,
+	[MSM_SDW_RX8_RX_PATH_CFG2] = 1,
+	[MSM_SDW_RX8_RX_VOL_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_CFG] = 1,
+	[MSM_SDW_RX8_RX_VOL_MIX_CTL] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC0] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC1] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC2] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC3] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC5] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC6] = 1,
+	[MSM_SDW_RX8_RX_PATH_SEC7] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_SEC0] = 1,
+	[MSM_SDW_RX8_RX_PATH_MIX_SEC1] = 1,
+	[MSM_SDW_BOOST0_BOOST_PATH_CTL] = 1,
+	[MSM_SDW_BOOST0_BOOST_CTL] = 1,
+	[MSM_SDW_BOOST0_BOOST_CFG1] = 1,
+	[MSM_SDW_BOOST0_BOOST_CFG2] = 1,
+	[MSM_SDW_BOOST1_BOOST_PATH_CTL] = 1,
+	[MSM_SDW_BOOST1_BOOST_CTL] = 1,
+	[MSM_SDW_BOOST1_BOOST_CFG1] = 1,
+	[MSM_SDW_BOOST1_BOOST_CFG2] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_DATA_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_WR_ADDR_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_0] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_1] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_2] = 1,
+	[MSM_SDW_AHB_BRIDGE_RD_ADDR_3] = 1,
+	[MSM_SDW_AHB_BRIDGE_ACCESS_CFG] = 1,
+	[MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL] = 1,
+	[MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL] = 1,
+	[MSM_SDW_CLK_RST_CTRL_SWR_CONTROL] = 1,
+	[MSM_SDW_TOP_TOP_CFG0] = 1,
+	[MSM_SDW_TOP_TOP_CFG1] = 1,
+	[MSM_SDW_TOP_RX_I2S_CTL] = 1,
+	[MSM_SDW_TOP_TX_I2S_CTL] = 1,
+	[MSM_SDW_TOP_RX7_PATH_INPUT0_MUX] = 1,
+	[MSM_SDW_TOP_RX7_PATH_INPUT1_MUX] = 1,
+	[MSM_SDW_TOP_RX8_PATH_INPUT0_MUX] = 1,
+	[MSM_SDW_TOP_RX8_PATH_INPUT1_MUX] = 1,
+	[MSM_SDW_TOP_FREQ_MCLK] = 1,
+	[MSM_SDW_TOP_DEBUG_BUS_SEL] = 1,
+	[MSM_SDW_TOP_DEBUG_EN] = 1,
+	[MSM_SDW_TOP_I2S_RESET] = 1,
+	[MSM_SDW_TOP_BLOCKS_RESET] = 1,
+};
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw.h b/sound/soc/codecs/msm_sdw/msm_sdw.h
index 3691e84..376ebc6 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw.h
+++ b/sound/soc/codecs/msm_sdw/msm_sdw.h
@@ -21,6 +21,7 @@
 extern const struct regmap_config msm_sdw_regmap_config;
 extern const u8 msm_sdw_page_map[MSM_SDW_MAX_REGISTER];
 extern const u8 msm_sdw_reg_readable[MSM_SDW_MAX_REGISTER];
+extern const u8 msm_sdw_reg_writeable[MSM_SDW_MAX_REGISTER];
 
 enum {
 	MSM_SDW_RX4 = 0,
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 502aa4f..62fdb94 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -854,8 +854,8 @@
 	if (!msm_sdw->comp_enabled[comp])
 		return 0;
 
-	comp_ctl0_reg = MSM_SDW_COMPANDER7_CTL0 + (comp * 8);
-	rx_path_cfg0_reg = MSM_SDW_RX7_RX_PATH_CFG0 + (comp * 20);
+	comp_ctl0_reg = MSM_SDW_COMPANDER7_CTL0 + (comp * 0x20);
+	rx_path_cfg0_reg = MSM_SDW_RX7_RX_PATH_CFG0 + (comp * 0x1E0);
 
 	if (SND_SOC_DAPM_EVENT_ON(event)) {
 		/* Enable Compander Clock */
@@ -1044,7 +1044,7 @@
 	 * Add sleep as SWR slave access read takes time.
 	 * Allow for RD_DONE to complete for previous register if any.
 	 */
-	usleep_range(50, 55);
+	usleep_range(100, 105);
 
 	/* read_lock */
 	mutex_lock(&msm_sdw->sdw_read_lock);
@@ -1079,6 +1079,11 @@
 	sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0;
 	sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0;
 
+	/*
+	 * Add sleep as SWR slave write takes time.
+	 * Allow for any previous pending write to complete.
+	 */
+	usleep_range(50, 55);
 	for (i = 0; i < len; i += 2) {
 		/* First Write the Data to register */
 		ret = regmap_bulk_write(msm_sdw->regmap,
@@ -1656,18 +1661,26 @@
 						    service_nb);
 	bool adsp_ready = false;
 	unsigned long timeout;
+	static bool initial_boot = true;
 
 	pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
 
 	mutex_lock(&msm_sdw->codec_mutex);
 	switch (opcode) {
 	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		if (initial_boot) {
+			initial_boot = false;
+			break;
+		}
+		msm_sdw->int_mclk1_enabled = false;
 		msm_sdw->dev_up = false;
 		for (i = 0; i < msm_sdw->nr; i++)
 			swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
 					SWR_DEVICE_DOWN, NULL);
 		break;
 	case AUDIO_NOTIFIER_SERVICE_UP:
+		if (initial_boot)
+			initial_boot = false;
 		if (!q6core_is_adsp_ready()) {
 			dev_dbg(msm_sdw->dev, "ADSP isn't ready\n");
 			timeout = jiffies +
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c b/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
index 78858f0..2266338 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_regmap.c
@@ -115,6 +115,11 @@
 	return msm_sdw_reg_readable[reg];
 }
 
+static bool msm_sdw_is_writeable_register(struct device *dev, unsigned int reg)
+{
+	return msm_sdw_reg_writeable[reg];
+}
+
 static bool msm_sdw_is_volatile_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
@@ -150,6 +155,7 @@
 	.reg_defaults = msm_sdw_defaults,
 	.num_reg_defaults = ARRAY_SIZE(msm_sdw_defaults),
 	.max_register = MSM_SDW_MAX_REGISTER,
+	.writeable_reg = msm_sdw_is_writeable_register,
 	.volatile_reg = msm_sdw_is_volatile_register,
 	.readable_reg = msm_sdw_is_readable_register,
 };
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 5f8e3fd..a8fcd34 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -37,9 +37,10 @@
 #define DRV_NAME "pmic_analog_codec"
 #define SDM660_CDC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
 			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
-			SNDRV_PCM_RATE_48000)
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\
+			SNDRV_PCM_RATE_192000)
 #define SDM660_CDC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-		SNDRV_PCM_FMTBIT_S24_LE)
+		SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_3LE)
 #define MSM_DIG_CDC_STRING_LEN 80
 #define MSM_ANLG_CDC_VERSION_ENTRY_SIZE 32
 
@@ -1399,8 +1400,26 @@
 	}
 	switch (event) {
 	case SND_SOC_DAPM_PRE_PMU:
-		if (atomic_inc_return(&supply->ref) == 1)
+		if (atomic_inc_return(&supply->ref) == 1) {
+			ret = regulator_set_voltage(supply->supply,
+						    supply->min_uv,
+						    supply->max_uv);
+			if (ret) {
+				dev_err(codec->dev,
+					"Setting regulator voltage(en) for micbias with err = %d\n",
+					ret);
+				goto out;
+			}
+			ret = regulator_set_load(supply->supply,
+						 supply->optimum_ua);
+			if (ret < 0) {
+				dev_err(codec->dev,
+					"Setting regulator optimum mode(en) failed for micbias with err = %d\n",
+					ret);
+				goto out;
+			}
 			ret = regulator_enable(supply->supply);
+		}
 		if (ret)
 			dev_err(codec->dev, "%s: Failed to enable %s\n",
 				__func__,
@@ -1412,12 +1431,27 @@
 				 __func__, on_demand_supply_name[w->shift]);
 			goto out;
 		}
-		if (atomic_dec_return(&supply->ref) == 0)
+		if (atomic_dec_return(&supply->ref) == 0) {
 			ret = regulator_disable(supply->supply);
 			if (ret)
 				dev_err(codec->dev, "%s: Failed to disable %s\n",
 					__func__,
 					on_demand_supply_name[w->shift]);
+			ret = regulator_set_voltage(supply->supply,
+						    0,
+						    supply->max_uv);
+			if (ret) {
+				dev_err(codec->dev,
+					"Setting regulator voltage(dis) failed for micbias with err = %d\n",
+					ret);
+				goto out;
+			}
+			ret = regulator_set_load(supply->supply, 0);
+			if (ret < 0)
+				dev_err(codec->dev,
+					"Setting regulator optimum mode(dis) failed for micbias with err = %d\n",
+					ret);
+		}
 		break;
 	default:
 		break;
@@ -1435,11 +1469,11 @@
 	if (enable) {
 		snd_soc_update_bits(codec,
 			MSM89XX_PMIC_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
+		msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_ON);
 		snd_soc_update_bits(codec,
 			MSM89XX_PMIC_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
 		snd_soc_update_bits(codec,
 			MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
-		msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_ON);
 	} else {
 		snd_soc_update_bits(codec,
 			MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
@@ -3180,7 +3214,7 @@
 		.name = "msm_anlg_cdc_i2s_rx1",
 		.id = AIF1_PB,
 		.playback = {
-			.stream_name = "Playback",
+			.stream_name = "PDM Playback",
 			.rates = SDM660_CDC_RATES,
 			.formats = SDM660_CDC_FORMATS,
 			.rate_max = 192000,
@@ -3194,7 +3228,7 @@
 		.name = "msm_anlg_cdc_i2s_tx1",
 		.id = AIF1_CAP,
 		.capture = {
-			.stream_name = "Record",
+			.stream_name = "PDM Capture",
 			.rates = SDM660_CDC_RATES,
 			.formats = SDM660_CDC_FORMATS,
 			.rate_max = 48000,
@@ -3684,6 +3718,30 @@
 	return NULL;
 }
 
+static void msm_anlg_cdc_update_micbias_regulator(
+				const struct sdm660_cdc_priv *sdm660_cdc,
+				const char *name,
+				struct on_demand_supply *micbias_supply)
+{
+	int i;
+	struct sdm660_cdc_pdata *pdata = sdm660_cdc->dev->platform_data;
+
+	for (i = 0; i < sdm660_cdc->num_of_supplies; i++) {
+		if (sdm660_cdc->supplies[i].supply &&
+		    !strcmp(sdm660_cdc->supplies[i].supply, name)) {
+			micbias_supply->supply =
+				sdm660_cdc->supplies[i].consumer;
+			micbias_supply->min_uv = pdata->regulator[i].min_uv;
+			micbias_supply->max_uv = pdata->regulator[i].max_uv;
+			micbias_supply->optimum_ua =
+					pdata->regulator[i].optimum_ua;
+			return;
+		}
+	}
+
+	dev_err(sdm660_cdc->dev, "Error: regulator not found:%s\n", name);
+}
+
 static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
 {
 	struct msm_asoc_mach_data *pdata = NULL;
@@ -3759,8 +3817,8 @@
 	snd_soc_write(codec,
 		MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
 
-	atomic_set(&pdata->int_mclk0_enabled, false);
 	msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
+	atomic_set(&pdata->int_mclk0_enabled, false);
 	set_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
 	snd_soc_card_change_online_state(codec->component.card, 0);
 
@@ -3771,7 +3829,6 @@
 {
 	struct sdm660_cdc_priv *sdm660_cdc_priv =
 		snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
 
 	dev_dbg(codec->dev, "%s: device up!\n", __func__);
 
@@ -3793,18 +3850,6 @@
 	else if (sdm660_cdc_priv->boost_option == BYPASS_ALWAYS)
 		msm_anlg_cdc_bypass_on(codec);
 
-	msm_anlg_cdc_configure_cap(codec, false, false);
-	wcd_mbhc_stop(&sdm660_cdc_priv->mbhc);
-	wcd_mbhc_deinit(&sdm660_cdc_priv->mbhc);
-	ret = wcd_mbhc_init(&sdm660_cdc_priv->mbhc, codec, &mbhc_cb,
-			    &intr_ids, wcd_mbhc_registers, true);
-	if (ret)
-		dev_err(codec->dev, "%s: mbhc initialization failed\n",
-			__func__);
-	else
-		wcd_mbhc_start(&sdm660_cdc_priv->mbhc,
-			sdm660_cdc_priv->mbhc.mbhc_cfg);
-
 	return 0;
 }
 
@@ -3818,17 +3863,24 @@
 	bool adsp_ready = false;
 	bool timedout;
 	unsigned long timeout;
+	static bool initial_boot = true;
 
 	codec = sdm660_cdc_priv->codec;
 	dev_dbg(codec->dev, "%s: Service opcode 0x%lx\n", __func__, opcode);
 
 	switch (opcode) {
 	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		if (initial_boot) {
+			initial_boot = false;
+			break;
+		}
 		dev_dbg(codec->dev,
 			"ADSP is about to power down. teardown/reset codec\n");
 		msm_anlg_cdc_device_down(codec);
 		break;
 	case AUDIO_NOTIFIER_SERVICE_UP:
+		if (initial_boot)
+			initial_boot = false;
 		dev_dbg(codec->dev,
 			"ADSP is about to power up. bring up codec\n");
 
@@ -4119,10 +4171,10 @@
 
 	wcd9xxx_spmi_set_codec(codec);
 
-	sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS].supply =
-				msm_anlg_cdc_find_regulator(
+	msm_anlg_cdc_update_micbias_regulator(
 				sdm660_cdc,
-				on_demand_supply_name[ON_DEMAND_MICBIAS]);
+				on_demand_supply_name[ON_DEMAND_MICBIAS],
+				&sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS]);
 	atomic_set(&sdm660_cdc->on_demand_list[ON_DEMAND_MICBIAS].ref,
 		   0);
 
@@ -4158,6 +4210,8 @@
 	snd_soc_dapm_ignore_suspend(dapm, "PDM Playback");
 	snd_soc_dapm_ignore_suspend(dapm, "PDM Capture");
 
+	snd_soc_dapm_sync(dapm);
+
 	return 0;
 }
 
@@ -4186,7 +4240,7 @@
 		if (pdata->regulator[i].ondemand)
 			continue;
 		if (regulator_count_voltages(
-				sdm660_cdc->supplies[i].consumer) <=	0)
+				sdm660_cdc->supplies[i].consumer) <= 0)
 			continue;
 
 		ret = regulator_set_voltage(
@@ -4219,7 +4273,7 @@
 		if (pdata->regulator[i].ondemand)
 			continue;
 		if (regulator_count_voltages(
-				sdm660_cdc->supplies[i].consumer) <=	0)
+				sdm660_cdc->supplies[i].consumer) <= 0)
 			continue;
 		regulator_set_voltage(sdm660_cdc->supplies[i].consumer, 0,
 				pdata->regulator[i].max_uv);
@@ -4320,6 +4374,28 @@
 		if (regulator_count_voltages(
 			sdm660_cdc->supplies[i].consumer) <= 0)
 			continue;
+		if (pdata->regulator[i].ondemand) {
+			ret = regulator_set_voltage(
+					sdm660_cdc->supplies[i].consumer,
+					0, pdata->regulator[i].max_uv);
+			if (ret) {
+				dev_err(sdm660_cdc->dev,
+					"Setting regulator voltage failed for regulator %s err = %d\n",
+					sdm660_cdc->supplies[i].supply, ret);
+				goto err_supplies;
+			}
+			ret = regulator_set_load(
+				sdm660_cdc->supplies[i].consumer, 0);
+			if (ret < 0) {
+				dev_err(sdm660_cdc->dev,
+					"Setting regulator optimum mode failed for regulator %s err = %d\n",
+					sdm660_cdc->supplies[i].supply, ret);
+				goto err_supplies;
+			} else {
+				ret = 0;
+				continue;
+			}
+		}
 		ret = regulator_set_voltage(sdm660_cdc->supplies[i].consumer,
 					    pdata->regulator[i].min_uv,
 					    pdata->regulator[i].max_uv);
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
index 0c9e9a6..9563565 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.h
@@ -144,6 +144,9 @@
 struct on_demand_supply {
 	struct regulator *supply;
 	atomic_t ref;
+	int min_uv;
+	int max_uv;
+	int optimum_ua;
 };
 
 struct wcd_imped_i_ref {
diff --git a/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h b/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h
index 95dbc76..1a490a4 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h
+++ b/sound/soc/codecs/sdm660_cdc/msm-cdc-common.h
@@ -19,6 +19,7 @@
 		msm89xx_pmic_cdc_defaults[MSM89XX_PMIC_CDC_CACHE_SIZE];
 
 bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg);
+bool msm89xx_cdc_core_writeable_reg(struct device *dev, unsigned int reg);
 bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg);
 
 enum {
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index f140b19..3f9c0b4 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -30,6 +30,7 @@
 #include "msm-digital-cdc.h"
 #include "msm-cdc-common.h"
 #include "../../msm/sdm660-common.h"
+#include "../../../../drivers/base/regmap/internal.h"
 
 #define DRV_NAME "msm_digital_codec"
 #define MCLK_RATE_9P6MHZ        9600000
@@ -71,11 +72,13 @@
 {
 	int ret = -EINVAL;
 	struct msm_asoc_mach_data *pdata = NULL;
+	struct msm_dig_priv *msm_dig_cdc =
+				snd_soc_codec_get_drvdata(registered_digcodec);
 
 	pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
 
-	mutex_lock(&pdata->cdc_int_mclk0_mutex);
 	if (flag) {
+		mutex_lock(&pdata->cdc_int_mclk0_mutex);
 		if (atomic_read(&pdata->int_mclk0_enabled) == false) {
 			pdata->digital_cdc_core_clk.enable = 1;
 			ret = afe_set_lpass_clock_v2(
@@ -84,7 +87,12 @@
 			if (ret < 0) {
 				pr_err("%s:failed to enable the MCLK\n",
 				       __func__);
-				mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+				/*
+				 * Avoid access to lpass register
+				 * as clock enable failed during SSR.
+				 */
+				if (ret == -ENODEV)
+					msm_dig_cdc->regmap->cache_only = true;
 				return ret;
 			}
 			pr_debug("enabled digital codec core clk\n");
@@ -93,10 +101,10 @@
 					      50);
 		}
 	} else {
+		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
 		dev_dbg(registered_digcodec->dev,
 			"disable MCLK, workq to disable set already\n");
 	}
-	mutex_unlock(&pdata->cdc_int_mclk0_mutex);
 	return 0;
 }
 
@@ -107,6 +115,7 @@
 
 static void disable_digital_callback(void *flag)
 {
+	msm_digcdc_clock_control(false);
 	pr_debug("disable mclk happens in workq\n");
 }
 
@@ -973,6 +982,7 @@
 	struct snd_soc_codec *codec = registered_digcodec;
 	struct msm_dig_priv *msm_dig_cdc = snd_soc_codec_get_drvdata(codec);
 	struct msm_asoc_mach_data *pdata = NULL;
+	int ret = -EINVAL;
 
 	pdata = snd_soc_card_get_drvdata(codec->component.card);
 
@@ -1028,7 +1038,7 @@
 		break;
 	case DIG_CDC_EVENT_PRE_RX1_INT_ON:
 		snd_soc_update_bits(codec,
-				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x14);
+				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x3C, 0x28);
 		snd_soc_update_bits(codec,
 				MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0x10);
 		snd_soc_update_bits(codec,
@@ -1036,7 +1046,7 @@
 		break;
 	case DIG_CDC_EVENT_PRE_RX2_INT_ON:
 		snd_soc_update_bits(codec,
-				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x14);
+				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x3C, 0x28);
 		snd_soc_update_bits(codec,
 				MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0x10);
 		snd_soc_update_bits(codec,
@@ -1044,7 +1054,7 @@
 		break;
 	case DIG_CDC_EVENT_POST_RX1_INT_OFF:
 		snd_soc_update_bits(codec,
-				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x1C, 0x00);
+				MSM89XX_CDC_CORE_RX1_B3_CTL, 0x3C, 0x00);
 		snd_soc_update_bits(codec,
 				MSM89XX_CDC_CORE_RX1_B4_CTL, 0x18, 0xFF);
 		snd_soc_update_bits(codec,
@@ -1052,7 +1062,7 @@
 		break;
 	case DIG_CDC_EVENT_POST_RX2_INT_OFF:
 		snd_soc_update_bits(codec,
-				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x1C, 0x00);
+				MSM89XX_CDC_CORE_RX2_B3_CTL, 0x3C, 0x00);
 		snd_soc_update_bits(codec,
 				MSM89XX_CDC_CORE_RX2_B4_CTL, 0x18, 0xFF);
 		snd_soc_update_bits(codec,
@@ -1064,7 +1074,28 @@
 	case DIG_CDC_EVENT_SSR_UP:
 		regcache_cache_only(msm_dig_cdc->regmap, false);
 		regcache_mark_dirty(msm_dig_cdc->regmap);
+
+		mutex_lock(&pdata->cdc_int_mclk0_mutex);
+		pdata->digital_cdc_core_clk.enable = 1;
+		ret = afe_set_lpass_clock_v2(
+					AFE_PORT_ID_INT0_MI2S_RX,
+					&pdata->digital_cdc_core_clk);
+		if (ret < 0) {
+			pr_err("%s:failed to enable the MCLK\n",
+			       __func__);
+			mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+			break;
+		}
+		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+
 		regcache_sync(msm_dig_cdc->regmap);
+
+		mutex_lock(&pdata->cdc_int_mclk0_mutex);
+		pdata->digital_cdc_core_clk.enable = 0;
+		afe_set_lpass_clock_v2(
+				AFE_PORT_ID_INT0_MI2S_RX,
+				&pdata->digital_cdc_core_clk);
+		mutex_unlock(&pdata->cdc_int_mclk0_mutex);
 		break;
 	case DIG_CDC_EVENT_INVALID:
 	default:
@@ -1207,6 +1238,8 @@
 	snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX2");
 	snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX3");
 
+	snd_soc_dapm_sync(dapm);
+
 	return 0;
 }
 
@@ -1929,8 +1962,12 @@
 			.stream_name = "AIF1 Playback",
 			.channels_min = 1,
 			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
 		},
 		 .ops = &msm_dig_dai_ops,
 	},
@@ -2012,12 +2049,13 @@
 const struct regmap_config msm_digital_regmap_config = {
 	.reg_bits = 32,
 	.reg_stride = 4,
-	.val_bits = 32,
+	.val_bits = 8,
 	.lock = enable_digital_callback,
 	.unlock = disable_digital_callback,
 	.cache_type = REGCACHE_FLAT,
 	.reg_defaults = msm89xx_cdc_core_defaults,
 	.num_reg_defaults = MSM89XX_CDC_CORE_MAX_REGISTER,
+	.writeable_reg = msm89xx_cdc_core_writeable_reg,
 	.readable_reg = msm89xx_cdc_core_readable_reg,
 	.volatile_reg = msm89xx_cdc_core_volatile_reg,
 	.reg_format_endian = REGMAP_ENDIAN_NATIVE,
@@ -2085,10 +2123,18 @@
 #ifdef CONFIG_PM
 static int msm_dig_suspend(struct device *dev)
 {
-	struct msm_asoc_mach_data *pdata =
-	snd_soc_card_get_drvdata(registered_digcodec->component.card);
+	struct msm_asoc_mach_data *pdata;
 	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
 
+	if (!registered_digcodec || !msm_dig_cdc) {
+		pr_debug("%s:digcodec not initialized, return\n", __func__);
+		return 0;
+	}
+	pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+	if (!pdata) {
+		pr_debug("%s:card not initialized, return\n", __func__);
+		return 0;
+	}
 	if (msm_dig_cdc->dapm_bias_off) {
 		pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
 			__func__, atomic_read(&pdata->int_mclk0_rsc_ref),
@@ -2115,8 +2161,8 @@
 }
 
 static const struct dev_pm_ops msm_dig_pm_ops = {
-	.suspend = msm_dig_suspend,
-	.resume = msm_dig_resume,
+	.suspend_late = msm_dig_suspend,
+	.resume_early = msm_dig_resume,
 };
 #endif
 
diff --git a/sound/soc/codecs/sdm660_cdc/sdm660-regmap.c b/sound/soc/codecs/sdm660_cdc/sdm660-regmap.c
index fff1fdc..7d8ac6d 100644
--- a/sound/soc/codecs/sdm660_cdc/sdm660-regmap.c
+++ b/sound/soc/codecs/sdm660_cdc/sdm660-regmap.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/regmap.h>
+#include "msm-cdc-common.h"
 #include "sdm660-cdc-registers.h"
 
 /*
@@ -444,16 +445,167 @@
 		[MSM89XX_CDC_CORE_TX4_DMIC_CTL] = 1,
 };
 
+static const u8 msm89xx_cdc_core_reg_writeable[MSM89XX_CDC_CORE_CACHE_SIZE] = {
+		[MSM89XX_CDC_CORE_CLK_RX_RESET_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX_RESET_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_DMIC_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_RX_I2S_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX_I2S_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_OTHR_RESET_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX_CLK_EN_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_OTHR_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_RX_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_MCLK_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_PDM_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_SD_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_DMIC_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_RX_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CLK_TX2_I2S_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_VOL_CTL_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_VOL_CTL_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_VOL_CTL_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX1_VOL_CTL_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX2_VOL_CTL_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_RX3_VOL_CTL_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_TOP_GAIN_UPDATE] = 1,
+		[MSM89XX_CDC_CORE_TOP_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_COMP0_FS_CFG] = 1,
+		[MSM89XX_CDC_CORE_COMP0_DELAY_BUF_CTL] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_DESER1_CTL] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_DESER2_CTL] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_B1_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_B2_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_DEBUG_B3_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B5_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B6_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B7_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B7_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_B8_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_B8_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_GAIN_TIMER_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_GAIN_TIMER_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR1_COEF_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_IIR2_COEF_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX1_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX1_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX1_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX2_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX2_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX2_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX3_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_RX3_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ1_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B2_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_EQ2_B4_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_I2S_SD1_CTL] = 1,
+		[MSM89XX_CDC_CORE_CONN_TX_B3_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX1_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX2_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX3_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX4_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX1_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX2_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX3_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX4_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX1_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX2_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX3_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX4_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX1_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX2_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX3_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX4_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX5_VOL_CTL_TIMER] = 1,
+		[MSM89XX_CDC_CORE_TX5_VOL_CTL_GAIN] = 1,
+		[MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG] = 1,
+		[MSM89XX_CDC_CORE_TX5_MUX_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX5_CLK_FS_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX5_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX1_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX2_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX3_DMIC_CTL] = 1,
+		[MSM89XX_CDC_CORE_TX4_DMIC_CTL] = 1,
+};
+
 bool msm89xx_cdc_core_readable_reg(struct device *dev, unsigned int reg)
 {
 	return msm89xx_cdc_core_reg_readable[reg];
 }
 
+bool msm89xx_cdc_core_writeable_reg(struct device *dev, unsigned int reg)
+{
+	return msm89xx_cdc_core_reg_writeable[reg];
+}
+
 bool msm89xx_cdc_core_volatile_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	/* cache bypass for initial version */
-	default:
+	case MSM89XX_CDC_CORE_RX1_B1_CTL:
+	case MSM89XX_CDC_CORE_RX2_B1_CTL:
+	case MSM89XX_CDC_CORE_RX3_B1_CTL:
+	case MSM89XX_CDC_CORE_RX1_B6_CTL:
+	case MSM89XX_CDC_CORE_RX2_B6_CTL:
+	case MSM89XX_CDC_CORE_RX3_B6_CTL:
+	case MSM89XX_CDC_CORE_TX1_VOL_CTL_CFG:
+	case MSM89XX_CDC_CORE_TX2_VOL_CTL_CFG:
+	case MSM89XX_CDC_CORE_TX3_VOL_CTL_CFG:
+	case MSM89XX_CDC_CORE_TX4_VOL_CTL_CFG:
+	case MSM89XX_CDC_CORE_TX5_VOL_CTL_CFG:
+	case MSM89XX_CDC_CORE_IIR1_COEF_B1_CTL:
+	case MSM89XX_CDC_CORE_IIR2_COEF_B1_CTL:
+	case MSM89XX_CDC_CORE_CLK_MCLK_CTL:
+	case MSM89XX_CDC_CORE_CLK_PDM_CTL:
 		return true;
+	default:
+		return false;
 	}
 }
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index ae53294..93c2fd1 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/stringify.h>
 #include <linux/of.h>
+#include <linux/debugfs.h>
 #include <linux/component.h>
 #include <linux/dma-mapping.h>
 #include <soc/qcom/ramdump.h>
@@ -182,6 +183,10 @@
 	struct work_struct ssr_work;
 	u16 ready_status;
 	struct completion ready_compl;
+
+	/* Debugfs related */
+	struct dentry *entry;
+	bool panic_on_error;
 };
 
 static char *wdsp_get_ssr_type_string(enum wdsp_ssr_type type)
@@ -656,6 +661,12 @@
 		goto err_read_dumps;
 	}
 
+	/*
+	 * If panic_on_error flag is explicitly set through the debugfs,
+	 * then cause a BUG here to aid debugging.
+	 */
+	BUG_ON(wdsp->panic_on_error);
+
 	rd_seg.address = (unsigned long) wdsp->dump_data.rd_v_addr;
 	rd_seg.size = img_section.size;
 	rd_seg.v_address = wdsp->dump_data.rd_v_addr;
@@ -949,6 +960,22 @@
 		 !strcmp(dev_name(dev), cmpnt->cdev_name)));
 }
 
+static void wdsp_mgr_debugfs_init(struct wdsp_mgr_priv *wdsp)
+{
+	wdsp->entry = debugfs_create_dir("wdsp_mgr", NULL);
+	if (IS_ERR_OR_NULL(wdsp->entry))
+		return;
+
+	debugfs_create_bool("panic_on_error", 0644,
+			    wdsp->entry, &wdsp->panic_on_error);
+}
+
+static void wdsp_mgr_debugfs_remove(struct wdsp_mgr_priv *wdsp)
+{
+	debugfs_remove_recursive(wdsp->entry);
+	wdsp->entry = NULL;
+}
+
 static int wdsp_mgr_bind(struct device *dev)
 {
 	struct wdsp_mgr_priv *wdsp = dev_get_drvdata(dev);
@@ -978,6 +1005,8 @@
 		}
 	}
 
+	wdsp_mgr_debugfs_init(wdsp);
+
 	/* Schedule the work to download image if binding was successful. */
 	if (!ret)
 		schedule_work(&wdsp->load_fw_work);
@@ -993,6 +1022,8 @@
 
 	component_unbind_all(dev, wdsp->ops);
 
+	wdsp_mgr_debugfs_remove(wdsp);
+
 	if (wdsp->dump_data.rd_dev) {
 		destroy_ramdump_device(wdsp->dump_data.rd_dev);
 		wdsp->dump_data.rd_dev = NULL;
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index ebcb413..cb96f2b 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -1292,7 +1292,7 @@
 	wcd_program_btn_threshold(mbhc, false);
 
 
-	init_completion(&mbhc->btn_press_compl);
+	reinit_completion(&mbhc->btn_press_compl);
 
 	WCD_MBHC_RSC_UNLOCK(mbhc);
 	pr_debug("%s: leave\n", __func__);
@@ -1905,6 +1905,7 @@
 	}
 	mutex_init(&mbhc->hphl_pa_lock);
 	mutex_init(&mbhc->hphr_pa_lock);
+	init_completion(&mbhc->btn_press_compl);
 
 	/* Register event notifier */
 	mbhc->nblock.notifier_call = wcd_event_notify;
@@ -2057,6 +2058,9 @@
 	mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_right_ocp, mbhc);
 	if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier)
 		mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false);
+	if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug)
+		mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc,
+					&mbhc->correct_plug_swch);
 	mutex_destroy(&mbhc->codec_resource_lock);
 	mutex_destroy(&mbhc->hphl_pa_lock);
 	mutex_destroy(&mbhc->hphr_pa_lock);
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 7e217a6..a08b598 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -82,8 +82,15 @@
 #define WCD_SPI_WORD_BYTE_CNT (4)
 #define WCD_SPI_RW_MULTI_MIN_LEN (16)
 
-/* Max size is closest multiple of 16 less than 64Kbytes */
-#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 16)
+/* Max size is 32 bytes less than 64Kbytes */
+#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
+
+/*
+ * Max size for the pre-allocated buffers is the max
+ * possible read/write length + 32 bytes for the SPI
+ * read/write command header itself.
+ */
+#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
 
 /* Alignment requirements */
 #define WCD_SPI_RW_MIN_ALIGN    WCD_SPI_WORD_BYTE_CNT
@@ -149,6 +156,10 @@
 
 	/* Completion object to indicate system resume completion */
 	struct completion resume_comp;
+
+	/* Buffers to hold memory used for transfers */
+	void *tx_buf;
+	void *rx_buf;
 };
 
 enum xfer_request {
@@ -230,17 +241,18 @@
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
 	struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
-	u8 *tx_buf;
+	u8 *tx_buf = wcd_spi->tx_buf;
 	u32 frame = 0;
 	int ret;
 
 	dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
 		__func__, remote_addr);
 
-	tx_buf = kzalloc(WCD_SPI_READ_SINGLE_LEN,
-			 GFP_KERNEL | GFP_DMA);
-	if (!tx_buf)
+	if (!tx_buf) {
+		dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+			__func__);
 		return -ENOMEM;
+	}
 
 	frame |= WCD_SPI_READ_FRAME_OPCODE;
 	frame |= remote_addr & WCD_CMD_ADDR_MASK;
@@ -256,7 +268,6 @@
 	rx_xfer->len = sizeof(*val);
 
 	ret = spi_sync(spi, &wcd_spi->msg2);
-	kfree(tx_buf);
 
 	return ret;
 }
@@ -267,8 +278,8 @@
 {
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	struct spi_transfer *xfer = &wcd_spi->xfer1;
-	u8 *tx_buf;
-	u8 *rx_buf;
+	u8 *tx_buf = wcd_spi->tx_buf;
+	u8 *rx_buf = wcd_spi->rx_buf;
 	u32 frame = 0;
 	int ret;
 
@@ -278,15 +289,9 @@
 	frame |= WCD_SPI_FREAD_FRAME_OPCODE;
 	frame |= remote_addr & WCD_CMD_ADDR_MASK;
 
-	tx_buf = kzalloc(WCD_SPI_CMD_FREAD_LEN + len,
-			 GFP_KERNEL | GFP_DMA);
-	if (!tx_buf)
-		return -ENOMEM;
-
-	rx_buf = kzalloc(WCD_SPI_CMD_FREAD_LEN + len,
-			 GFP_KERNEL | GFP_DMA);
-	if (!rx_buf) {
-		kfree(tx_buf);
+	if (!tx_buf || !rx_buf) {
+		dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
+			(!tx_buf) ? "tx_buf" : "rx_buf");
 		return -ENOMEM;
 	}
 
@@ -306,8 +311,6 @@
 
 	memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
 done:
-	kfree(tx_buf);
-	kfree(rx_buf);
 	return ret;
 }
 
@@ -344,7 +347,7 @@
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	struct spi_transfer *xfer = &wcd_spi->xfer1;
 	u32 frame = 0;
-	u8 *tx_buf;
+	u8 *tx_buf = wcd_spi->tx_buf;
 	int xfer_len, ret;
 
 	dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
@@ -356,9 +359,11 @@
 	frame = cpu_to_be32(frame);
 	xfer_len = len + sizeof(frame);
 
-	tx_buf = kzalloc(xfer_len, GFP_KERNEL);
-	if (!tx_buf)
+	if (!tx_buf) {
+		dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+			__func__);
 		return -ENOMEM;
+	}
 
 	memcpy(tx_buf, &frame, sizeof(frame));
 	memcpy(tx_buf + sizeof(frame), data, len);
@@ -372,8 +377,6 @@
 		dev_err(&spi->dev,
 			"%s: Failed, addr = 0x%x, len = %zd\n",
 			__func__, remote_addr, len);
-	kfree(tx_buf);
-
 	return ret;
 }
 
@@ -1331,6 +1334,23 @@
 	spi_message_init(&wcd_spi->msg2);
 	spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
 	spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+
+	/* Pre-allocate the buffers */
+	wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+				  GFP_KERNEL | GFP_DMA);
+	if (!wcd_spi->tx_buf) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+				  GFP_KERNEL | GFP_DMA);
+	if (!wcd_spi->rx_buf) {
+		kfree(wcd_spi->tx_buf);
+		wcd_spi->tx_buf = NULL;
+		ret = -ENOMEM;
+		goto done;
+	}
 done:
 	return ret;
 }
@@ -1348,6 +1368,11 @@
 	spi_transfer_del(&wcd_spi->xfer1);
 	spi_transfer_del(&wcd_spi->xfer2[0]);
 	spi_transfer_del(&wcd_spi->xfer2[1]);
+
+	kfree(wcd_spi->tx_buf);
+	kfree(wcd_spi->rx_buf);
+	wcd_spi->tx_buf = NULL;
+	wcd_spi->rx_buf = NULL;
 }
 
 static const struct component_ops wcd_spi_component_ops = {
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index dedf4dc..90d16fb 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -188,7 +188,7 @@
 MODULE_PARM_DESC(sido_buck_svs_voltage,
 			"setting for SVS voltage for SIDO BUCK");
 
-#define TASHA_TX_UNMUTE_DELAY_MS	25
+#define TASHA_TX_UNMUTE_DELAY_MS	40
 
 static int tx_unmute_delay = TASHA_TX_UNMUTE_DELAY_MS;
 module_param(tx_unmute_delay, int, 0664);
@@ -4941,7 +4941,7 @@
 					 int src_num,
 					 int event)
 {
-	u16 src_paired_reg;
+	u16 src_paired_reg = 0;
 	struct tasha_priv *tasha;
 	u16 rx_path_cfg_reg = WCD9335_CDC_RX1_RX_PATH_CFG0;
 	u16 rx_path_ctl_reg = WCD9335_CDC_RX1_RX_PATH_CTL;
@@ -5902,8 +5902,6 @@
 					    CF_MIN_3DB_150HZ << 5);
 		/* Enable TX PGA Mute */
 		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
-		/* Enable APC */
-		snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x08);
 		break;
 	case SND_SOC_DAPM_POST_PMU:
 		snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
@@ -5930,7 +5928,6 @@
 		hpf_cut_off_freq =
 			tasha->tx_hpf_work[decimator].hpf_cut_off_freq;
 		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
-		snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x00);
 		if (cancel_delayed_work_sync(
 		    &tasha->tx_hpf_work[decimator].dwork)) {
 			if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index c0a32f3..ff08ccb 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -123,6 +123,7 @@
 #define WCD934X_DEC_PWR_LVL_DF 0x00
 #define WCD934X_STRING_LEN 100
 
+#define WCD934X_CDC_SIDETONE_IIR_COEFF_MAX 5
 #define WCD934X_DIG_CORE_REG_MIN  WCD934X_CDC_ANC0_CLK_RESET_CTL
 #define WCD934X_DIG_CORE_REG_MAX  0xFFF
 
@@ -505,7 +506,7 @@
 static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
 static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
 
-#define WCD934X_TX_UNMUTE_DELAY_MS 25
+#define WCD934X_TX_UNMUTE_DELAY_MS 40
 
 static int tx_unmute_delay = WCD934X_TX_UNMUTE_DELAY_MS;
 module_param(tx_unmute_delay, int, 0664);
@@ -651,6 +652,8 @@
 	struct tavil_idle_detect_config idle_det_cfg;
 
 	int power_active_ref;
+	int sidetone_coeff_array[IIR_MAX][BAND_MAX]
+		[WCD934X_CDC_SIDETONE_IIR_COEFF_MAX];
 };
 
 static const struct tavil_reg_mask_val tavil_spkr_default[] = {
@@ -795,11 +798,13 @@
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
 	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
 	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+	/* Add 1msec delay for VOUT to settle */
+	usleep_range(1000, 1100);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
 
 	return 0;
 }
@@ -5157,10 +5162,12 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
 	int iir_idx = ((struct soc_multi_mixer_control *)
 					kcontrol->private_value)->reg;
 	int band_idx = ((struct soc_multi_mixer_control *)
 					kcontrol->private_value)->shift;
+	int coeff_idx;
 
 	/*
 	 * Mask top bit it is reserved
@@ -5170,16 +5177,15 @@
 		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
 		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
 
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[0]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[1]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[2]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[3]);
-	set_iir_band_coeff(codec, iir_idx, band_idx,
-				ucontrol->value.integer.value[4]);
+	/* Store the coefficients in sidetone coeff array */
+	for (coeff_idx = 0; coeff_idx < WCD934X_CDC_SIDETONE_IIR_COEFF_MAX;
+		coeff_idx++) {
+		tavil->sidetone_coeff_array[iir_idx][band_idx][coeff_idx] =
+			ucontrol->value.integer.value[coeff_idx];
+		set_iir_band_coeff(codec, iir_idx, band_idx,
+			tavil->sidetone_coeff_array[iir_idx][band_idx]
+							[coeff_idx]);
+	}
 
 	pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n"
 		"%s: IIR #%d band #%d b1 = 0x%x\n"
@@ -5199,6 +5205,26 @@
 	return 0;
 }
 
+static void tavil_restore_iir_coeff(struct tavil_priv *tavil, int iir_idx)
+{
+	int band_idx = 0, coeff_idx = 0;
+	struct snd_soc_codec *codec = tavil->codec;
+
+	for (band_idx = 0; band_idx < BAND_MAX; band_idx++) {
+		snd_soc_write(codec,
+		(WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx),
+		(band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F);
+
+		for (coeff_idx = 0;
+			coeff_idx < WCD934X_CDC_SIDETONE_IIR_COEFF_MAX;
+			coeff_idx++) {
+			set_iir_band_coeff(codec, iir_idx, band_idx,
+				tavil->sidetone_coeff_array[iir_idx][band_idx]
+								[coeff_idx]);
+		}
+	}
+}
+
 static int tavil_compander_get(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *ucontrol)
 {
@@ -5471,7 +5497,7 @@
 	u32 adc, i, mic_bias_found = 0;
 	int ret = 0;
 	char *mad_input;
-	bool is_adc2_input = false;
+	bool is_adc_input = false;
 
 	tavil_mad_input = ucontrol->value.integer.value[0];
 
@@ -5519,8 +5545,7 @@
 		snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc);
 
 		mad_input_widget = mad_amic_input_widget;
-		if (adc == 2)
-			is_adc2_input = true;
+		is_adc_input = true;
 	} else {
 		/* DMIC type input widget*/
 		mad_input_widget = tavil_conn_mad_text[tavil_mad_input];
@@ -5528,7 +5553,7 @@
 
 	dev_dbg(codec->dev,
 		"%s: tavil input widget = %s, adc_input = %s\n", __func__,
-		mad_input_widget, is_adc2_input ? "true" : "false");
+		mad_input_widget, is_adc_input ? "true" : "false");
 
 	for (i = 0; i < card->num_of_dapm_routes; i++) {
 		if (!strcmp(card->of_dapm_routes[i].sink, mad_input_widget)) {
@@ -5573,8 +5598,8 @@
 			    0x0F, tavil_mad_input);
 	snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
 			    0x07, mic_bias_found);
-	/* for adc2 input, mad should be in micbias mode with BG enabled */
-	if (is_adc2_input)
+	/* for all adc inputs, mad should be in micbias mode with BG enabled */
+	if (is_adc_input)
 		snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
 				    0x88, 0x88);
 	else
@@ -8152,6 +8177,8 @@
 			     WCD934X_DIG_CORE_REG_MIN,
 			     WCD934X_DIG_CORE_REG_MAX);
 
+	tavil_restore_iir_coeff(tavil, IIR0);
+	tavil_restore_iir_coeff(tavil, IIR1);
 	return 0;
 }
 
@@ -8274,6 +8301,9 @@
 
 	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
 	ret = __tavil_cdc_mclk_enable_locked(tavil, enable);
+	if (enable)
+		wcd_resmgr_set_sido_input_src(tavil->resmgr,
+						     SIDO_SOURCE_RCO_BG);
 	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
 
 	return ret;
@@ -8411,6 +8441,8 @@
 					__func__, ret);
 				goto done;
 			}
+			wcd_resmgr_set_sido_input_src(tavil->resmgr,
+							SIDO_SOURCE_RCO_BG);
 			ret = wcd_resmgr_enable_clk_block(tavil->resmgr,
 							   WCD_CLK_RCO);
 			ret |= tavil_cdc_req_mclk_enable(tavil, false);
@@ -8571,6 +8603,7 @@
 	{WCD934X_TLMM_DMIC3_CLK_PINCFG, 0xFF, 0x0a},
 	{WCD934X_TLMM_DMIC3_DATA_PINCFG, 0xFF, 0x0a},
 	{WCD934X_CPE_SS_SVA_CFG, 0x60, 0x00},
+	{WCD934X_CPE_SS_CPAR_CFG, 0x10, 0x10},
 };
 
 static void tavil_codec_init_reg(struct tavil_priv *priv)
@@ -9065,8 +9098,9 @@
 
 	codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv);
 	priv = snd_soc_codec_get_drvdata(codec);
-	swrm_wcd_notify(priv->swr.ctrl_data[0].swr_pdev,
-			SWR_DEVICE_DOWN, NULL);
+	if (priv->swr.ctrl_data)
+		swrm_wcd_notify(priv->swr.ctrl_data[0].swr_pdev,
+				SWR_DEVICE_DOWN, NULL);
 	tavil_dsd_reset(priv->dsd_config);
 	snd_soc_card_change_online_state(codec->component.card, 0);
 	for (count = 0; count < NUM_CODEC_DAIS; count++)
@@ -9814,18 +9848,23 @@
 {
 	int val, rc;
 
-	__tavil_cdc_mclk_enable(tavil, true);
+	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+	__tavil_cdc_mclk_enable_locked(tavil, true);
 
 	regmap_update_bits(tavil->wcd9xxx->regmap,
 			WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x1E, 0x10);
 	regmap_update_bits(tavil->wcd9xxx->regmap,
 			WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x01, 0x01);
-
 	/*
 	 * 5ms sleep required after enabling efuse control
 	 * before checking the status.
 	 */
 	usleep_range(5000, 5500);
+	wcd_resmgr_set_sido_input_src(tavil->resmgr,
+					     SIDO_SOURCE_RCO_BG);
+
+	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+
 	rc = regmap_read(tavil->wcd9xxx->regmap,
 			 WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
 	if (rc || (!(val & 0x01)))
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 8780888..825aaee 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -25,8 +25,7 @@
 #define WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
 #define WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
 
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
-					  int sido_src);
+
 static const char *wcd_resmgr_clk_type_to_str(enum wcd_clock_type clk_type)
 {
 	if (clk_type == WCD_CLK_OFF)
@@ -267,8 +266,6 @@
 					0x01, 0x01);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD934X_CODEC_RPM_CLK_GATE, 0x03, 0x00);
-			wcd_resmgr_set_sido_input_src(resmgr,
-						      SIDO_SOURCE_RCO_BG);
 		} else {
 			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
@@ -515,7 +512,7 @@
 	return ret;
 }
 
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
 					  int sido_src)
 {
 	if (!resmgr)
@@ -553,6 +550,7 @@
 		pr_debug("%s: sido input src to external\n", __func__);
 	}
 }
+EXPORT_SYMBOL(wcd_resmgr_set_sido_input_src);
 
 /*
  * wcd_resmgr_set_sido_input_src_locked:
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.h b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
index f605a24..e831ba6 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.h
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -87,4 +87,7 @@
 void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr);
 void wcd_resmgr_set_sido_input_src_locked(struct wcd9xxx_resmgr_v2 *resmgr,
 					  int sido_src);
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+					  int sido_src);
+
 #endif
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index 0c2f41a..153cc2e 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -3538,6 +3538,8 @@
 	pr_debug("%s: enter payload_size = %d Enable %d\n",
 		 __func__, pld_size, enable);
 
+	memset(&cpe_lab_enable, 0, sizeof(cpe_lab_enable));
+
 	if (fill_lsm_cmd_header_v0_inband(&cpe_lab_enable.hdr, session->id,
 		(u8) pld_size, CPE_LSM_SESSION_CMD_SET_PARAMS_V2)) {
 		return -EINVAL;
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index 062bae2..ef493a8 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -104,6 +104,7 @@
 	int state;
 	struct delayed_work ocp_ctl_work;
 	struct device_node *wsa_rst_np;
+	int pa_mute;
 };
 
 #define SWR_SLV_MAX_REG_ADDR	0x390
@@ -170,9 +171,41 @@
 	return 0;
 }
 
-static const struct snd_kcontrol_new wsa_analog_gain_controls[] = {
+static int wsa881x_get_mute(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wsa881x->pa_mute;
+
+	return 0;
+}
+
+static int wsa881x_set_mute(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
+	int value = ucontrol->value.integer.value[0];
+
+	dev_dbg(codec->dev, "%s: mute current %d, new %d\n",
+		__func__, wsa881x->pa_mute, value);
+
+	if (value)
+		snd_soc_update_bits(codec, WSA881X_SPKR_DRV_EN, 0x80, 0x00);
+	wsa881x->pa_mute = value;
+
+	return 0;
+}
+
+
+static const struct snd_kcontrol_new wsa_snd_controls[] = {
 	SOC_ENUM_EXT("WSA PA Gain", wsa_pa_gain_enum,
 		     wsa_pa_gain_get, wsa_pa_gain_put),
+	SOC_SINGLE_EXT("WSA PA Mute", SND_SOC_NOPM, 0, 1, 0,
+		wsa881x_get_mute, wsa881x_set_mute),
 };
 
 static int codec_debug_open(struct inode *inode, struct file *file)
@@ -1050,8 +1083,8 @@
 	wsa881x->tz_pdata.codec = codec;
 	wsa881x->tz_pdata.wsa_temp_reg_read = wsa881x_temp_reg_read;
 	wsa881x_init_thermal(&wsa881x->tz_pdata);
-	snd_soc_add_codec_controls(codec, wsa_analog_gain_controls,
-				   ARRAY_SIZE(wsa_analog_gain_controls));
+	snd_soc_add_codec_controls(codec, wsa_snd_controls,
+				   ARRAY_SIZE(wsa_snd_controls));
 	INIT_DELAYED_WORK(&wsa881x->ocp_ctl_work, wsa881x_ocp_ctl_work);
 	return 0;
 }
@@ -1092,54 +1125,6 @@
 	},
 };
 
-static int wsa881x_swr_startup(struct swr_device *swr_dev)
-{
-	int ret = 0;
-	u8 devnum = 0;
-	struct wsa881x_priv *wsa881x;
-
-	wsa881x = swr_get_dev_data(swr_dev);
-	if (!wsa881x) {
-		dev_err(&swr_dev->dev, "%s: wsa881x is NULL\n", __func__);
-		return -EINVAL;
-	}
-
-	/*
-	 * Add 5msec delay to provide sufficient time for
-	 * soundwire auto enumeration of slave devices as
-	 * as per HW requirement.
-	 */
-	usleep_range(5000, 5010);
-	ret = swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum);
-	if (ret) {
-		dev_dbg(&swr_dev->dev,
-			"%s get devnum %d for dev addr %lx failed\n",
-			__func__, devnum, swr_dev->addr);
-		goto err;
-	}
-	swr_dev->dev_num = devnum;
-
-	wsa881x->regmap = devm_regmap_init_swr(swr_dev,
-					       &wsa881x_regmap_config);
-	if (IS_ERR(wsa881x->regmap)) {
-		ret = PTR_ERR(wsa881x->regmap);
-		dev_err(&swr_dev->dev, "%s: regmap_init failed %d\n",
-			__func__, ret);
-		goto err;
-	}
-
-	ret = snd_soc_register_codec(&swr_dev->dev, &soc_codec_dev_wsa881x,
-				     NULL, 0);
-	if (ret) {
-		dev_err(&swr_dev->dev, "%s: Codec registration failed\n",
-			__func__);
-		goto err;
-	}
-
-err:
-	return ret;
-}
-
 static int wsa881x_gpio_ctrl(struct wsa881x_priv *wsa881x, bool enable)
 {
 	int ret = 0;
@@ -1201,6 +1186,8 @@
 {
 	int ret = 0;
 	struct wsa881x_priv *wsa881x;
+	u8 devnum = 0;
+	bool pin_state_current = false;
 
 	wsa881x = devm_kzalloc(&pdev->dev, sizeof(struct wsa881x_priv),
 			    GFP_KERNEL);
@@ -1231,6 +1218,9 @@
 		if (ret)
 			goto err;
 	}
+	if (wsa881x->wsa_rst_np)
+		pin_state_current = msm_cdc_pinctrl_get_state(
+						wsa881x->wsa_rst_np);
 	wsa881x_gpio_ctrl(wsa881x, true);
 	wsa881x->state = WSA881X_DEV_UP;
 
@@ -1257,8 +1247,45 @@
 						&codec_debug_ops);
 		}
 	}
+
+	/*
+	 * Add 5msec delay to provide sufficient time for
+	 * soundwire auto enumeration of slave devices as
+	 * as per HW requirement.
+	 */
+	usleep_range(5000, 5010);
+	ret = swr_get_logical_dev_num(pdev, pdev->addr, &devnum);
+	if (ret) {
+		dev_dbg(&pdev->dev,
+			"%s get devnum %d for dev addr %lx failed\n",
+			__func__, devnum, pdev->addr);
+		goto dev_err;
+	}
+	pdev->dev_num = devnum;
+
+	wsa881x->regmap = devm_regmap_init_swr(pdev,
+					       &wsa881x_regmap_config);
+	if (IS_ERR(wsa881x->regmap)) {
+		ret = PTR_ERR(wsa881x->regmap);
+		dev_err(&pdev->dev, "%s: regmap_init failed %d\n",
+			__func__, ret);
+		goto dev_err;
+	}
+
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wsa881x,
+				     NULL, 0);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: Codec registration failed\n",
+			__func__);
+		goto dev_err;
+	}
+
 	return 0;
 
+dev_err:
+	if (pin_state_current == false)
+		wsa881x_gpio_ctrl(wsa881x, false);
+	swr_remove_device(pdev);
 err:
 	return ret;
 }
@@ -1273,6 +1300,7 @@
 		return -EINVAL;
 	}
 	debugfs_remove_recursive(debugfs_wsa881x_dent);
+	debugfs_wsa881x_dent = NULL;
 	snd_soc_unregister_codec(&pdev->dev);
 	if (wsa881x->pd_gpio)
 		gpio_free(wsa881x->pd_gpio);
@@ -1391,7 +1419,6 @@
 	.device_up = wsa881x_swr_up,
 	.device_down = wsa881x_swr_down,
 	.reset_device = wsa881x_swr_reset,
-	.startup = wsa881x_swr_startup,
 };
 
 static int __init wsa881x_codec_init(void)
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 755b62a..c319ccf 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -113,7 +113,7 @@
 				    SNDRV_PCM_FMTBIT_S24_3LE |
 				    SNDRV_PCM_FMTBIT_S32_LE),
 			.channels_min = 1,
-			.channels_max = 4,
+			.channels_max = 8,
 			.rate_min =     8000,
 			.rate_max =	48000,
 		},
@@ -2582,7 +2582,7 @@
 			.channels_min = 1,
 			.channels_max = 8,
 			.rate_min =     8000,
-			.rate_max =     48000,
+			.rate_max =     192000,
 		},
 		.ops = &msm_fe_Multimedia_dai_ops,
 		.compress_new = snd_soc_new_compress,
@@ -2608,6 +2608,39 @@
 		.name = "MultiMedia19",
 		.probe = fe_dai_probe,
 	},
+	{
+		.playback = {
+			.stream_name = "MultiMedia20 Playback",
+			.aif_name = "MM_DL20",
+			.rates = (SNDRV_PCM_RATE_8000_384000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     384000,
+		},
+		.capture = {
+			.stream_name = "MultiMedia20 Capture",
+			.aif_name = "MM_UL20",
+			.rates = (SNDRV_PCM_RATE_8000_48000|
+					SNDRV_PCM_RATE_KNOT),
+			.formats = (SNDRV_PCM_FMTBIT_S16_LE |
+				    SNDRV_PCM_FMTBIT_S24_LE |
+				    SNDRV_PCM_FMTBIT_S24_3LE |
+				    SNDRV_PCM_FMTBIT_S32_LE),
+			.channels_min = 1,
+			.channels_max = 8,
+			.rate_min =     8000,
+			.rate_max =     48000,
+		},
+		.ops = &msm_fe_Multimedia_dai_ops,
+		.name = "MultiMedia20",
+		.probe = fe_dai_probe,
+	},
 };
 
 static int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index b75ba98..222c65a 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -496,6 +496,8 @@
 static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
 
 static struct platform_device *spdev;
@@ -2263,6 +2265,54 @@
 	return sample_rate;
 }
 
+static int mi2s_get_format(int value)
+{
+	int format;
+
+	switch (value) {
+	case 0:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 3:
+		format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+	int value;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		value = 0;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		value = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		value = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		value = 3;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	return value;
+}
+
 static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
 				   struct snd_ctl_elem_value *ucontrol)
 {
@@ -2395,6 +2445,78 @@
 	return 1;
 }
 
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		idx, mi2s_rx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_rx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		idx, mi2s_tx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_tx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
 static int msm_hifi_ctrl(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2647,6 +2769,22 @@
 			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
 	SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
 			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
 	SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
 			msm_hifi_put),
 };
@@ -3111,48 +3249,64 @@
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[PRIM_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[PRIM_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[PRIM_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[PRIM_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[SEC_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[SEC_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[SEC_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[SEC_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[TERT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[TERT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[TERT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[TERT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[QUAT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[QUAT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[QUAT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[QUAT_MI2S].channels;
@@ -3932,7 +4086,6 @@
 			dev_err(rtd->card->dev,
 				"%s lpaif_tert_muxsel_virt_addr is NULL\n",
 				__func__);
-			auxpcm_intf_conf[index].ref_cnt++;
 		}
 	}
 	mutex_unlock(&auxpcm_intf_conf[index].lock);
@@ -3980,6 +4133,7 @@
 	u32 bit_per_sample;
 
 	switch (bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
 	case SNDRV_PCM_FORMAT_S24_3LE:
 	case SNDRV_PCM_FORMAT_S24_LE:
 		bit_per_sample = 32;
@@ -4008,9 +4162,6 @@
 		mi2s_clk[dai_id].clk_freq_in_hz =
 		    mi2s_tx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
 	}
-
-	if (!mi2s_intf_conf[dai_id].msm_is_mi2s_master)
-		mi2s_clk[dai_id].clk_freq_in_hz = 0;
 }
 
 static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
@@ -4059,6 +4210,13 @@
 		ret = -EINVAL;
 		goto err;
 	}
+
+	if (pinctrl_info->pinctrl == NULL) {
+		pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
 	curr_state = pinctrl_info->curr_state;
 	pinctrl_info->curr_state = new_state;
 	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
@@ -4327,6 +4485,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -4341,11 +4500,10 @@
 		goto done;
 	}
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
-		if (ret) {
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret_pinctrl) {
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
-			goto done;
+				__func__, ret_pinctrl);
 		}
 	}
 
@@ -4404,6 +4562,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -4415,19 +4574,17 @@
 	mutex_lock(&mi2s_intf_conf[index].lock);
 	if (--mi2s_intf_conf[index].ref_cnt == 0) {
 		ret = msm_mi2s_set_sclk(substream, false);
-		if (ret < 0) {
+		if (ret < 0)
 			pr_err("%s:clock disable failed for MI2S (%d); ret=%d\n",
 				__func__, index, ret);
-			mi2s_intf_conf[index].ref_cnt++;
-		}
 	}
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
-		if (ret)
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret_pinctrl)
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
+				__func__, ret_pinctrl);
 	}
 }
 
@@ -5346,6 +5503,37 @@
 		.codec_dai_name = "snd-soc-dummy-dai",
 		.codec_name = "snd-soc-dummy",
 	},
+	{
+		.name = MSM_DAILINK_NAME(Transcode Loopback Playback),
+		.stream_name = "Transcode Loopback Playback",
+		.cpu_dai_name = "MultiMedia14",
+		.platform_name = "msm-transcode-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		 /* this dainlink has playback support */
+		.id = MSM_FRONTEND_DAI_MULTIMEDIA14,
+	},
+	{
+		.name = MSM_DAILINK_NAME(Transcode Loopback Capture),
+		.stream_name = "Transcode Loopback Capture",
+		.cpu_dai_name = "MultiMedia18",
+		.platform_name = "msm-transcode-loopback",
+		.dynamic = 1,
+		.dpcm_capture = 1,
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			 SND_SOC_DPCM_TRIGGER_POST},
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.ignore_suspend = 1,
+		.ignore_pmdown_time = 1,
+		.id = MSM_FRONTEND_DAI_MULTIMEDIA18,
+	},
 };
 
 static struct snd_soc_dai_link msm_common_be_dai_links[] = {
@@ -7266,10 +7454,12 @@
 	struct msm_asoc_mach_data *pdata =
 				snd_soc_card_get_drvdata(card);
 
-	gpio_free(pdata->us_euro_gpio);
+	if (gpio_is_valid(pdata->us_euro_gpio))
+		gpio_free(pdata->us_euro_gpio);
 	i2s_auxpcm_deinit();
 
 	snd_soc_unregister_card(card);
+	audio_notifier_deregister("msm8998");
 	return 0;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 36382ba..ceb6b50 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -4,12 +4,12 @@
 			msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
 			msm-lsm-client.o msm-pcm-host-voice-v2.o \
 			msm-audio-effects-q6-v2.o msm-pcm-loopback-v2.o \
-			msm-dai-slim.o \
+			msm-dai-slim.o msm-transcode-loopback-q6-v2.o \
 			adsp_err.o
 obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
 				 msm-dai-stub-v2.o
 obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
-obj-$(CONFIG_DTS_EAGLE) += msm-dts-eagle.o
+obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
 obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
 obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
 obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index 820aa1b..f5c6d6f 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -158,9 +158,6 @@
 	case ULP_LSM_CAL_TYPE:
 		size = sizeof(struct audio_cal_info_lsm);
 		break;
-	case DTS_EAGLE_CAL_TYPE:
-		size = 0;
-		break;
 	case AUDIO_CORE_METAINFO_CAL_TYPE:
 		size = sizeof(struct audio_cal_info_metainfo);
 		break;
@@ -307,9 +304,6 @@
 	case ULP_LSM_CAL_TYPE:
 		size = sizeof(struct audio_cal_type_lsm);
 		break;
-	case DTS_EAGLE_CAL_TYPE:
-		size = 0;
-		break;
 	case AUDIO_CORE_METAINFO_CAL_TYPE:
 		size = sizeof(struct audio_cal_type_metainfo);
 		break;
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
index 225f978..9f08222 100644
--- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
@@ -15,7 +15,6 @@
 #include <sound/q6asm-v2.h>
 #include <sound/compress_params.h>
 #include <sound/msm-audio-effects-q6-v2.h>
-#include <sound/msm-dts-eagle.h>
 #include <sound/devdep_params.h>
 
 #define MAX_ENABLE_CMD_SIZE 32
@@ -49,26 +48,6 @@
 	case EQ_MODULE:
 		switch (topology) {
 		case ASM_STREAM_POSTPROC_TOPO_ID_SA_PLUS:
-		case ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS:
-		case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
-			return true;
-		default:
-			return false;
-		}
-	case DTS_EAGLE_MODULE:
-		switch (topology) {
-		case ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX:
-		case ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS:
-		case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
-			return true;
-		default:
-			return false;
-		}
-	case SOFT_VOLUME2_MODULE:
-	case DTS_EAGLE_MODULE_ENABLE:
-		switch (topology) {
-		case ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS:
-		case ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER:
 			return true;
 		default:
 			return false;
@@ -275,7 +254,7 @@
 			break;
 		}
 	}
-	if (params_length && !msm_dts_eagle_is_hpx_on() && (rc == 0))
+	if (params_length && (rc == 0))
 		q6asm_send_audio_effects_params(ac, params,
 						params_length);
 	else
@@ -745,7 +724,7 @@
 			break;
 		}
 	}
-	if (params_length && !msm_dts_eagle_is_hpx_on() && (rc == 0))
+	if (params_length && (rc == 0))
 		q6asm_send_audio_effects_params(ac, params,
 						params_length);
 	else
@@ -880,7 +859,7 @@
 			break;
 		}
 	}
-	if (params_length && !msm_dts_eagle_is_hpx_on() && (rc == 0))
+	if (params_length && (rc == 0))
 		q6asm_send_audio_effects_params(ac, params,
 						params_length);
 	else
@@ -1219,7 +1198,7 @@
 			break;
 		}
 	}
-	if (params_length && !msm_dts_eagle_is_hpx_on() && (rc == 0))
+	if (params_length && (rc == 0))
 		q6asm_send_audio_effects_params(ac, params,
 						params_length);
 	else
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index e8e4e04..c885265 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -32,6 +32,7 @@
 #include <asm/dma.h>
 #include <linux/dma-mapping.h>
 #include <linux/msm_audio_ion.h>
+#include <linux/msm_audio.h>
 
 #include <sound/timer.h>
 #include <sound/tlv.h>
@@ -42,9 +43,8 @@
 #include <sound/compress_offload.h>
 #include <sound/compress_driver.h>
 #include <sound/msm-audio-effects-q6-v2.h>
-#include <sound/msm-dts-eagle.h>
-
 #include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
 
 #define DSP_PP_BUFFERING_IN_MSEC	25
 #define PARTIAL_DRAIN_ACK_EARLY_BY_MSEC	150
@@ -79,15 +79,6 @@
 
 #define MAX_NUMBER_OF_STREAMS 2
 
-/*
- * Max size for getting DTS EAGLE Param through kcontrol
- * Safe for both 32 and 64 bit platforms
- * 64 = size of kcontrol value array on 64 bit platform
- * 4 = size of parameters Eagle expects before cast to 64 bits
- * 40 = size of dts_eagle_param_desc + module_id cast to 64 bits
- */
-#define DTS_EAGLE_MAX_PARAM_SIZE_FOR_ALSA ((64 * 4) - 40)
-
 struct msm_compr_gapless_state {
 	bool set_next_stream_id;
 	int32_t stream_opened[MAX_NUMBER_OF_STREAMS];
@@ -100,7 +91,8 @@
 
 static unsigned int supported_sample_rates[] = {
 	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000,
-	88200, 96000, 128000, 176400, 192000, 352800, 384000, 2822400, 5644800
+	88200, 96000, 128000, 144000, 176400, 192000, 352800, 384000, 2822400,
+	5644800
 };
 
 struct msm_compr_pdata {
@@ -186,7 +178,7 @@
 
 const u32 compr_codecs[] = {
 	SND_AUDIOCODEC_AC3, SND_AUDIOCODEC_EAC3, SND_AUDIOCODEC_DTS,
-	SND_AUDIOCODEC_DSD};
+	SND_AUDIOCODEC_DSD, SND_AUDIOCODEC_TRUEHD, SND_AUDIOCODEC_IEC61937};
 
 struct query_audio_effect {
 	uint32_t mod_id;
@@ -312,6 +304,39 @@
 	return ret;
 }
 
+static int msm_compr_enable_adjust_session_clock(struct audio_client *ac,
+		bool enable)
+{
+	int ret;
+
+	pr_debug("%s, enable adjust_session %d\n", __func__, enable);
+
+	ret = q6asm_send_mtmx_strtr_enable_adjust_session_clock(ac, enable);
+	if (ret)
+		pr_err("%s, adjust session clock can't be set error %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int msm_compr_adjust_session_clock(struct audio_client *ac,
+		uint32_t adjust_session_lsw, uint32_t adjust_session_msw)
+{
+	int ret;
+
+	pr_debug("%s, adjust_session_time_msw 0x%x adjust_session_time_lsw 0x%x\n",
+		 __func__, adjust_session_msw, adjust_session_lsw);
+
+	ret = q6asm_adjust_session_clock(ac,
+			adjust_session_lsw,
+			adjust_session_msw);
+	if (ret)
+		pr_err("%s, adjust session clock can't be set error %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
 static int msm_compr_set_volume(struct snd_compr_stream *cstream,
 				uint32_t volume_l, uint32_t volume_r)
 {
@@ -377,11 +402,6 @@
 	if (rc < 0)
 		pr_err("%s: Send vol gain command failed rc=%d\n",
 		       __func__, rc);
-	else
-		if (msm_dts_eagle_set_stream_gain(prtd->audio_client,
-						volume_l, volume_r))
-			pr_debug("%s: DTS_EAGLE send stream gain failed\n",
-				__func__);
 
 	return rc;
 }
@@ -545,12 +565,19 @@
 	unsigned long flags;
 	uint64_t read_size;
 	uint32_t *buff_addr;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
 
 	if (!prtd) {
 		pr_err("%s: prtd is NULL\n", __func__);
 		return;
 	}
 	cstream = prtd->cstream;
+	if (!cstream) {
+		pr_err("%s: cstream is NULL\n", __func__);
+		return;
+	}
+
 	ac = prtd->audio_client;
 
 	/*
@@ -718,6 +745,22 @@
 			prtd->gapless_state.gapless_transition = 0;
 		spin_unlock_irqrestore(&prtd->lock, flags);
 		break;
+	case ASM_STREAM_PP_EVENT:
+	case ASM_STREAM_CMD_ENCDEC_EVENTS:
+		pr_debug("%s: ASM_STREAM_EVENT(0x%x)\n", __func__, opcode);
+		rtd = cstream->private_data;
+		if (!rtd) {
+			pr_err("%s: rtd is NULL\n", __func__);
+			return;
+		}
+
+		ret = msm_adsp_inform_mixer_ctl(rtd, payload);
+		if (ret) {
+			pr_err("%s: failed to inform mixer ctrl. err = %d\n",
+				__func__, ret);
+			return;
+		}
+		break;
 	case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
 	case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: {
 		pr_debug("ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY\n");
@@ -821,6 +864,10 @@
 			}
 			atomic_set(&prtd->close, 0);
 			break;
+		case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+			pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS:",
+				__func__);
+			break;
 		default:
 			break;
 		}
@@ -881,7 +928,7 @@
 			COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
 	prtd->compr_cap.max_fragments =
 			COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
-	prtd->compr_cap.num_codecs = 15;
+	prtd->compr_cap.num_codecs = 17;
 	prtd->compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
 	prtd->compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
 	prtd->compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
@@ -897,6 +944,8 @@
 	prtd->compr_cap.codecs[12] = SND_AUDIOCODEC_DTS;
 	prtd->compr_cap.codecs[13] = SND_AUDIOCODEC_DSD;
 	prtd->compr_cap.codecs[14] = SND_AUDIOCODEC_APTX;
+	prtd->compr_cap.codecs[15] = SND_AUDIOCODEC_TRUEHD;
+	prtd->compr_cap.codecs[16] = SND_AUDIOCODEC_IEC61937;
 }
 
 static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream,
@@ -1151,6 +1200,19 @@
 			pr_err("%s: CMD DSD Format block failed ret %d\n",
 				__func__, ret);
 		break;
+	case FORMAT_TRUEHD:
+		pr_debug("SND_AUDIOCODEC_TRUEHD\n");
+		/* no media format block needed */
+		break;
+	case FORMAT_IEC61937:
+		pr_debug("SND_AUDIOCODEC_IEC61937\n");
+		ret = q6asm_media_format_block_iec(prtd->audio_client,
+						   prtd->sample_rate,
+						   prtd->num_channels);
+		if (ret < 0)
+			pr_err("%s: CMD IEC61937 Format block failed ret %d\n",
+				__func__, ret);
+		break;
 	case FORMAT_APTX:
 		pr_debug("SND_AUDIOCODEC_APTX\n");
 		memset(&aptx_cfg, 0x0, sizeof(struct aptx_dec_bt_addr_cfg));
@@ -1187,26 +1249,6 @@
 	};
 
 	switch (ac->topology) {
-	case ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS: /* HPX + SA+ topology */
-
-		ret = q6asm_set_softvolume_v2(ac, &softvol,
-					      SOFT_VOLUME_INSTANCE_1);
-		if (ret < 0)
-			pr_err("%s: Send SoftVolume Param failed ret=%d\n",
-			__func__, ret);
-
-		ret = q6asm_set_softvolume_v2(ac, &softvol,
-					      SOFT_VOLUME_INSTANCE_2);
-		if (ret < 0)
-			pr_err("%s: Send SoftVolume2 Param failed ret=%d\n",
-			__func__, ret);
-		/*
-		 * HPX module init is trigerred from HAL using ioctl
-		 * DTS_EAGLE_MODULE_ENABLE when stream starts
-		 */
-		break;
-	case ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX: /* HPX topology */
-		break;
 	default:
 		ret = q6asm_set_softvolume_v2(ac, &softvol,
 					      SOFT_VOLUME_INSTANCE_1);
@@ -1557,6 +1599,7 @@
 	pr_debug("%s: session ID %d\n", __func__, prtd->audio_client->session);
 	prtd->audio_client->perf_mode = false;
 	prtd->session_id = prtd->audio_client->session;
+	msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
 
 	return 0;
 }
@@ -1712,7 +1755,7 @@
 	q6asm_audio_client_buf_free_contiguous(dir, ac);
 
 	q6asm_audio_client_free(ac);
-
+	msm_adsp_clean_mixer_ctl_pp_event_queue(soc_prtd);
 	kfree(pdata->audio_effects[soc_prtd->dai_link->id]);
 	pdata->audio_effects[soc_prtd->dai_link->id] = NULL;
 	kfree(pdata->dec_params[soc_prtd->dai_link->id]);
@@ -1830,8 +1873,11 @@
 	prtd->sample_rate = prtd->codec_param.codec.sample_rate;
 	pr_debug("%s: sample_rate %d\n", __func__, prtd->sample_rate);
 
-	if (prtd->codec_param.codec.compr_passthr >= LEGACY_PCM &&
-	    prtd->codec_param.codec.compr_passthr <= COMPRESSED_PASSTHROUGH_DSD)
+	if ((prtd->codec_param.codec.compr_passthr >= LEGACY_PCM &&
+	    prtd->codec_param.
+	    codec.compr_passthr <= COMPRESSED_PASSTHROUGH_DSD) ||
+	    (prtd->codec_param.
+	    codec.compr_passthr == COMPRESSED_PASSTHROUGH_IEC61937))
 		prtd->compr_passthr = prtd->codec_param.codec.compr_passthr;
 	else
 		prtd->compr_passthr = LEGACY_PCM;
@@ -1948,6 +1994,18 @@
 		break;
 	}
 
+	case SND_AUDIOCODEC_TRUEHD: {
+		pr_debug("%s: SND_AUDIOCODEC_TRUEHD\n", __func__);
+		prtd->codec = FORMAT_TRUEHD;
+		break;
+	}
+
+	case SND_AUDIOCODEC_IEC61937: {
+		pr_debug("%s: SND_AUDIOCODEC_IEC61937\n", __func__);
+		prtd->codec = FORMAT_IEC61937;
+		break;
+	}
+
 	case SND_AUDIOCODEC_APTX: {
 		pr_debug("%s: SND_AUDIOCODEC_APTX\n", __func__);
 		prtd->codec = FORMAT_APTX;
@@ -2815,20 +2873,15 @@
 				SND_AUDIOSTREAMFORMAT_RAW);
 		break;
 	case SND_AUDIOCODEC_AC3:
-		break;
 	case SND_AUDIOCODEC_EAC3:
-		break;
 	case SND_AUDIOCODEC_FLAC:
-		break;
 	case SND_AUDIOCODEC_VORBIS:
-		break;
 	case SND_AUDIOCODEC_ALAC:
-		break;
 	case SND_AUDIOCODEC_APE:
-		break;
 	case SND_AUDIOCODEC_DTS:
-		break;
 	case SND_AUDIOCODEC_DSD:
+	case SND_AUDIOCODEC_TRUEHD:
+	case SND_AUDIOCODEC_IEC61937:
 	case SND_AUDIOCODEC_APTX:
 		break;
 	default:
@@ -2887,6 +2940,14 @@
 	} else if (metadata->key == SNDRV_COMPRESS_START_DELAY) {
 		prtd->start_delay_lsw = metadata->value[0];
 		prtd->start_delay_msw = metadata->value[1];
+	} else if (metadata->key ==
+				SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK) {
+		return msm_compr_enable_adjust_session_clock(ac,
+				metadata->value[0]);
+	} else if (metadata->key == SNDRV_COMPRESS_ADJUST_SESSION_CLOCK) {
+		return msm_compr_adjust_session_clock(ac,
+				metadata->value[0],
+				metadata->value[1]);
 	}
 
 	return 0;
@@ -3109,23 +3170,6 @@
 						    &(audio_effects->equalizer),
 						     values);
 		break;
-	case DTS_EAGLE_MODULE:
-		pr_debug("%s: DTS_EAGLE_MODULE\n", __func__);
-		if (!msm_audio_effects_is_effmodule_supp_in_top(effects_module,
-						prtd->audio_client->topology))
-			return 0;
-		msm_dts_eagle_handle_asm(NULL, (void *)values, true,
-					 false, prtd->audio_client, NULL);
-		break;
-	case DTS_EAGLE_MODULE_ENABLE:
-		pr_debug("%s: DTS_EAGLE_MODULE_ENABLE\n", __func__);
-		if (msm_audio_effects_is_effmodule_supp_in_top(effects_module,
-						prtd->audio_client->topology))
-			msm_dts_eagle_enable_asm(prtd->audio_client,
-					(bool)values[0],
-					AUDPROC_MODULE_ID_DTS_HPX_PREMIX);
-
-		break;
 	case SOFT_VOLUME_MODULE:
 		pr_debug("%s: SOFT_VOLUME_MODULE\n", __func__);
 		break;
@@ -3154,7 +3198,6 @@
 	struct msm_compr_audio_effects *audio_effects = NULL;
 	struct snd_compr_stream *cstream = NULL;
 	struct msm_compr_audio *prtd = NULL;
-	long *values = &(ucontrol->value.integer.value[0]);
 
 	pr_debug("%s\n", __func__);
 	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
@@ -3174,28 +3217,6 @@
 		return -EINVAL;
 	}
 
-	switch (audio_effects->query.mod_id) {
-	case DTS_EAGLE_MODULE:
-		pr_debug("%s: DTS_EAGLE_MODULE handling queued get\n",
-			 __func__);
-		values[0] = (long)audio_effects->query.mod_id;
-		values[1] = (long)audio_effects->query.parm_id;
-		values[2] = (long)audio_effects->query.size;
-		values[3] = (long)audio_effects->query.offset;
-		values[4] = (long)audio_effects->query.device;
-		if (values[2] > DTS_EAGLE_MAX_PARAM_SIZE_FOR_ALSA) {
-			pr_err("%s: DTS_EAGLE_MODULE parameter's requested size (%li) too large (max size is %i)\n",
-				__func__, values[2],
-				DTS_EAGLE_MAX_PARAM_SIZE_FOR_ALSA);
-			return -EINVAL;
-		}
-		msm_dts_eagle_handle_asm(NULL, (void *)&values[1],
-					 true, true, prtd->audio_client, NULL);
-		break;
-	default:
-		pr_err("%s: Invalid effects config module\n", __func__);
-		return -EINVAL;
-	}
 	return 0;
 }
 
@@ -3299,6 +3320,8 @@
 	switch (prtd->codec) {
 	case FORMAT_MP3:
 	case FORMAT_MPEG4_AAC:
+	case FORMAT_TRUEHD:
+	case FORMAT_IEC61937:
 	case FORMAT_APTX:
 		pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
 			 prtd->codec);
@@ -3366,6 +3389,8 @@
 	case FORMAT_APE:
 	case FORMAT_DTS:
 	case FORMAT_DSD:
+	case FORMAT_TRUEHD:
+	case FORMAT_IEC61937:
 	case FORMAT_APTX:
 		pr_debug("%s: no runtime parameters for codec: %d\n", __func__,
 			 prtd->codec);
@@ -3419,21 +3444,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -3446,28 +3468,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
@@ -3478,21 +3497,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -3505,28 +3521,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
@@ -3589,6 +3602,176 @@
 	return rc;
 }
 
+static int msm_compr_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	int ret = 0;
+	struct msm_adsp_event_data *event_data = NULL;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)ucontrol->value.bytes.data;
+	if ((event_data->event_type < ADSP_STREAM_PP_EVENT) ||
+	    (event_data->event_type >= ADSP_STREAM_EVENT_MAX)) {
+		pr_err("%s: invalid event_type=%d",
+			__func__, event_data->event_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >=
+					sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_stream_cmd(prtd->audio_client, event_data);
+	if (ret < 0)
+		pr_err("%s: failed to send stream event cmd, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_compr_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	int fd;
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&fd, ucontrol->value.bytes.data, sizeof(fd));
+	ret = q6asm_send_ion_fd(prtd->audio_client, fd);
+	if (ret < 0)
+		pr_err("%s: failed to register ion fd\n", __func__);
+done:
+	return ret;
+}
+
+static int msm_compr_rtic_event_ack_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+					snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_compr_audio *prtd;
+	int ret = 0;
+	int param_length = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&param_length, ucontrol->value.bytes.data,
+		sizeof(param_length));
+	if ((param_length + sizeof(param_length))
+		>= sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, param_length);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_rtic_event_ack(prtd->audio_client,
+			ucontrol->value.bytes.data + sizeof(param_length),
+			param_length);
+	if (ret < 0)
+		pr_err("%s: failed to send rtic event ack, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
 static int msm_compr_gapless_put(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
@@ -3863,6 +4046,117 @@
 	return 0;
 }
 
+static int msm_compr_add_audio_adsp_stream_cmd_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CMD;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_audio_adsp_stream_cmd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_adsp_stream_cmd_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_stream_cmd_config_control[0].name = mixer_str;
+	fe_audio_adsp_stream_cmd_config_control[0].private_value =
+				rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+		fe_audio_adsp_stream_cmd_config_control,
+		ARRAY_SIZE(fe_audio_adsp_stream_cmd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_audio_adsp_stream_callback_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol *kctl;
+
+	struct snd_kcontrol_new fe_audio_adsp_callback_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_callback_info,
+		.get = msm_adsp_stream_callback_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: rtd is  NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_callback_config_control[0].name = mixer_str;
+	fe_audio_adsp_callback_config_control[0].private_value =
+					rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+			fe_audio_adsp_callback_config_control,
+			ARRAY_SIZE(fe_audio_adsp_callback_config_control));
+	if (ret < 0) {
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl->private_data = NULL;
+
+free_mixer_str:
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
 static int msm_compr_add_dec_runtime_params_control(
 						struct snd_soc_pcm_runtime *rtd)
 {
@@ -4037,6 +4331,96 @@
 	return 0;
 }
 
+static int msm_compr_add_io_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ION FD";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_ion_fd_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_ion_fd_config_control[0].name = mixer_str;
+	fe_ion_fd_config_control[0].private_value = rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_ion_fd_config_control,
+				ARRAY_SIZE(fe_ion_fd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_compr_add_event_ack_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback Event Ack";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_event_ack_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_compr_rtic_event_ack_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_event_ack_config_control[0].name = mixer_str;
+	fe_event_ack_config_control[0].private_value = rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_event_ack_config_control,
+				ARRAY_SIZE(fe_event_ack_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
 static int msm_compr_new(struct snd_soc_pcm_runtime *rtd)
 {
 	int rc;
@@ -4050,6 +4434,26 @@
 		pr_err("%s: Could not add Compr Audio Effects Control\n",
 			__func__);
 
+	rc = msm_compr_add_audio_adsp_stream_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ADSP Stream Cmd Control\n",
+			__func__);
+
+	rc = msm_compr_add_audio_adsp_stream_callback_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ADSP Stream Callback Control\n",
+			__func__);
+
+	rc = msm_compr_add_io_fd_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr ion fd Control\n",
+			__func__);
+
+	rc = msm_compr_add_event_ack_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add Compr event ack Control\n",
+			__func__);
+
 	rc = msm_compr_add_query_audio_effect_control(rtd);
 	if (rc)
 		pr_err("%s: Could not add Compr Query Audio Effect Control\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
index 9b072ea..deb1798 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-hdmi-v2.c
@@ -174,7 +174,7 @@
 	{
 		.access = SNDRV_CTL_ELEM_ACCESS_READ,
 		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
-		.name	= "HDMI RX Drift",
+		.name	= "HDMI DRIFT",
 		.info	= msm_dai_q6_ext_disp_drift_info,
 		.get	= msm_dai_q6_ext_disp_drift_get,
 	},
@@ -191,7 +191,7 @@
 	{
 		.access = SNDRV_CTL_ELEM_ACCESS_READ,
 		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
-		.name	= "DISPLAY Port RX Drift",
+		.name	= "DISPLAY_PORT DRIFT",
 		.info	= msm_dai_q6_ext_disp_drift_info,
 		.get	= msm_dai_q6_ext_disp_drift_get,
 	},
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 0c46763..c8b01c6 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -2314,6 +2314,44 @@
 		     msm_dai_q6_afe_input_bit_format_put),
 };
 
+static int msm_dai_q6_slim_rx_drift_info(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count = sizeof(struct afe_param_id_dev_timing_stats);
+
+	return 0;
+}
+
+static int msm_dai_q6_slim_rx_drift_get(struct snd_kcontrol *kcontrol,
+		struct snd_ctl_elem_value *ucontrol)
+{
+	int ret = -EINVAL;
+	struct afe_param_id_dev_timing_stats timing_stats;
+	struct snd_soc_dai *dai = kcontrol->private_data;
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		pr_err("%s: afe port not started. dai_data->status_mask = %ld\n",
+			__func__, *dai_data->status_mask);
+		goto done;
+	}
+
+	memset(&timing_stats, 0, sizeof(struct afe_param_id_dev_timing_stats));
+	ret = afe_get_av_dev_drift(&timing_stats, dai->id);
+	if (ret) {
+		pr_err("%s: Error getting AFE Drift for port %d, err=%d\n",
+			__func__, dai->id, ret);
+
+		goto done;
+	}
+
+	memcpy(ucontrol->value.bytes.data, (void *)&timing_stats,
+			sizeof(struct afe_param_id_dev_timing_stats));
+done:
+	return ret;
+}
+
 static const char * const afe_cal_mode_text[] = {
 	"CAL_MODE_DEFAULT", "CAL_MODE_NONE"
 };
@@ -2366,6 +2404,29 @@
 			msm_dai_q6_usb_audio_endian_cfg_put),
 };
 
+static const struct snd_kcontrol_new avd_drift_config_controls[] = {
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "SLIMBUS_0_RX DRIFT",
+		.info	= msm_dai_q6_slim_rx_drift_info,
+		.get	= msm_dai_q6_slim_rx_drift_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "SLIMBUS_6_RX DRIFT",
+		.info	= msm_dai_q6_slim_rx_drift_info,
+		.get	= msm_dai_q6_slim_rx_drift_get,
+	},
+	{
+		.access = SNDRV_CTL_ELEM_ACCESS_READ,
+		.iface	= SNDRV_CTL_ELEM_IFACE_PCM,
+		.name	= "SLIMBUS_7_RX DRIFT",
+		.info	= msm_dai_q6_slim_rx_drift_info,
+		.get	= msm_dai_q6_slim_rx_drift_get,
+	},
+};
 static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
 {
 	struct msm_dai_q6_dai_data *dai_data;
@@ -2413,6 +2474,9 @@
 		rc = snd_ctl_add(dai->component->card->snd_card,
 				 snd_ctl_new1(&afe_enc_config_controls[2],
 				 dai_data));
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(&avd_drift_config_controls[2],
+					dai));
 		break;
 	case RT_PROXY_DAI_001_RX:
 		rc = snd_ctl_add(dai->component->card->snd_card,
@@ -2440,6 +2504,16 @@
 				 snd_ctl_new1(&usb_audio_cfg_controls[3],
 				 dai_data));
 		break;
+	case SLIMBUS_0_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(&avd_drift_config_controls[0],
+					dai));
+		break;
+	case SLIMBUS_6_RX:
+		rc = snd_ctl_add(dai->component->card->snd_card,
+				snd_ctl_new1(&avd_drift_config_controls[1],
+					dai));
+		break;
 	}
 	if (rc < 0)
 		dev_err(dai->dev, "%s: err add config ctl, DAI = %s\n",
@@ -4131,12 +4205,13 @@
 			.stream_name = "INT0 MI2S Playback",
 			.aif_name = "INT0_MI2S_RX",
 			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100,
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100 |
+			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
 				SNDRV_PCM_FMTBIT_S24_LE |
 				SNDRV_PCM_FMTBIT_S24_3LE,
 			.rate_min =     8000,
-			.rate_max =     48000,
+			.rate_max =     192000,
 		},
 		.capture = {
 			.stream_name = "INT0 MI2S Capture",
@@ -4235,12 +4310,13 @@
 			.stream_name = "INT4 MI2S Playback",
 			.aif_name = "INT4_MI2S_RX",
 			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-			SNDRV_PCM_RATE_16000,
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
 				SNDRV_PCM_FMTBIT_S24_LE |
 				SNDRV_PCM_FMTBIT_S24_3LE,
 			.rate_min =     8000,
-			.rate_max =     48000,
+			.rate_max =     192000,
 		},
 		.capture = {
 			.stream_name = "INT4 MI2S Capture",
@@ -4934,6 +5010,44 @@
 	},
 };
 
+static int msm_dai_q6_tdm_set_clk_param(u32 group_id,
+					struct afe_clk_set *clk_set, u32 mode)
+{
+	switch (group_id) {
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT;
+		break;
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT;
+		break;
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT;
+		break;
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
+	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
+		if (mode)
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT;
+		else
+			clk_set->clk_id = Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int msm_dai_tdm_q6_probe(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -4941,6 +5055,7 @@
 	uint32_t array_length = 0;
 	int i = 0;
 	int group_idx = 0;
+	u32 clk_mode = 0;
 
 	/* extract tdm group info into static */
 	rc = of_property_read_u32(pdev->dev.of_node,
@@ -5013,6 +5128,26 @@
 	dev_dbg(&pdev->dev, "%s: Clk Rate from DT file %d\n",
 		__func__, tdm_clk_set.clk_freq_in_hz);
 
+	/* extract tdm clk src master/slave info into static */
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-cpudai-tdm-clk-internal",
+		&clk_mode);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: Clk id from DT file %s\n",
+			__func__, "qcom,msm-cpudai-tdm-clk-internal");
+		goto rtn;
+	}
+	dev_dbg(&pdev->dev, "%s: Clk id from DT file %d\n",
+		__func__, clk_mode);
+
+	rc = msm_dai_q6_tdm_set_clk_param(tdm_group_cfg.group_id,
+					  &tdm_clk_set, clk_mode);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: group id not supported 0x%x\n",
+			__func__, tdm_group_cfg.group_id);
+		goto rtn;
+	}
+
 	/* other initializations within device group */
 	group_idx = msm_dai_q6_get_group_idx(tdm_group_cfg.group_id);
 	if (group_idx < 0) {
@@ -5808,48 +5943,6 @@
 {
 	int rc = 0;
 
-	switch (dai_data->group_cfg.tdm_cfg.group_id) {
-	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX:
-	case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX:
-		if (dai_data->clk_set.clk_freq_in_hz) {
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_PRI_TDM_IBIT;
-		} else
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_PRI_TDM_EBIT;
-		break;
-	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX:
-	case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX:
-		if (dai_data->clk_set.clk_freq_in_hz) {
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_SEC_TDM_IBIT;
-		} else
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_SEC_TDM_EBIT;
-		break;
-	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX:
-	case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX:
-		if (dai_data->clk_set.clk_freq_in_hz) {
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_TER_TDM_IBIT;
-		} else
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_TER_TDM_EBIT;
-		break;
-	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX:
-	case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX:
-		if (dai_data->clk_set.clk_freq_in_hz) {
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT;
-		} else
-			dai_data->clk_set.clk_id =
-				Q6AFE_LPASS_CLK_ID_QUAD_TDM_EBIT;
-		break;
-	default:
-		pr_err("%s: port id 0x%x not supported\n",
-			__func__, port_id);
-		return -EINVAL;
-	}
 	dai_data->clk_set.enable = enable;
 
 	rc = afe_set_lpass_clock_v2(port_id,
@@ -7837,6 +7930,7 @@
 	int rc = 0;
 	u32 tdm_dev_id = 0;
 	int port_idx = 0;
+	struct device_node *tdm_parent_node = NULL;
 
 	/* retrieve device/afe id */
 	rc = of_property_read_u32(pdev->dev.of_node,
@@ -7871,7 +7965,8 @@
 	memset(dai_data, 0, sizeof(*dai_data));
 
 	/* TDM CFG */
-	rc = of_property_read_u32(pdev->dev.of_node,
+	tdm_parent_node = of_get_parent(pdev->dev.of_node);
+	rc = of_property_read_u32(tdm_parent_node,
 		"qcom,msm-cpudai-tdm-sync-mode",
 		(u32 *)&dai_data->port_cfg.tdm.sync_mode);
 	if (rc) {
@@ -7882,7 +7977,7 @@
 	dev_dbg(&pdev->dev, "%s: Sync Mode from DT file 0x%x\n",
 		__func__, dai_data->port_cfg.tdm.sync_mode);
 
-	rc = of_property_read_u32(pdev->dev.of_node,
+	rc = of_property_read_u32(tdm_parent_node,
 		"qcom,msm-cpudai-tdm-sync-src",
 		(u32 *)&dai_data->port_cfg.tdm.sync_src);
 	if (rc) {
@@ -7893,7 +7988,7 @@
 	dev_dbg(&pdev->dev, "%s: Sync Src from DT file 0x%x\n",
 		__func__, dai_data->port_cfg.tdm.sync_src);
 
-	rc = of_property_read_u32(pdev->dev.of_node,
+	rc = of_property_read_u32(tdm_parent_node,
 		"qcom,msm-cpudai-tdm-data-out",
 		(u32 *)&dai_data->port_cfg.tdm.ctrl_data_out_enable);
 	if (rc) {
@@ -7904,7 +7999,7 @@
 	dev_dbg(&pdev->dev, "%s: Data Out from DT file 0x%x\n",
 		__func__, dai_data->port_cfg.tdm.ctrl_data_out_enable);
 
-	rc = of_property_read_u32(pdev->dev.of_node,
+	rc = of_property_read_u32(tdm_parent_node,
 		"qcom,msm-cpudai-tdm-invert-sync",
 		(u32 *)&dai_data->port_cfg.tdm.ctrl_invert_sync_pulse);
 	if (rc) {
@@ -7915,7 +8010,7 @@
 	dev_dbg(&pdev->dev, "%s: Invert Sync from DT file 0x%x\n",
 		__func__, dai_data->port_cfg.tdm.ctrl_invert_sync_pulse);
 
-	rc = of_property_read_u32(pdev->dev.of_node,
+	rc = of_property_read_u32(tdm_parent_node,
 		"qcom,msm-cpudai-tdm-data-delay",
 		(u32 *)&dai_data->port_cfg.tdm.ctrl_sync_data_delay);
 	if (rc) {
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-slim.c b/sound/soc/msm/qdsp6v2/msm-dai-slim.c
index 779a2e6..8115fee 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-slim.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-slim.c
@@ -313,7 +313,7 @@
 	struct msm_slim_dai_data *dai_data = NULL;
 	struct slim_ch prop;
 	int rc;
-	u8 i;
+	u8 i, j;
 
 	dai_data = msm_slim_get_dai_data(drv_data, dai);
 	if (!dai_data) {
@@ -350,10 +350,6 @@
 		}
 	}
 
-	/* To decrement the channel ref count*/
-	for (i = 0; i < dai_data->ch_cnt; i++)
-		slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[i]);
-
 	prop.prot = SLIM_AUTO_ISO;
 	prop.baser = SLIM_RATE_4000HZ;
 	prop.dataf = SLIM_CH_DATAF_NOT_DEFINED;
@@ -377,6 +373,8 @@
 
 error_define_chan:
 error_chan_query:
+	for (j = 0; j < i; j++)
+		slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[j]);
 	return rc;
 }
 
@@ -386,6 +384,7 @@
 	struct msm_dai_slim_drv_data *drv_data = dev_get_drvdata(dai->dev);
 	struct msm_slim_dma_data *dma_data = NULL;
 	struct msm_slim_dai_data *dai_data;
+	int i, rc = 0;
 
 	dai_data = msm_slim_get_dai_data(drv_data, dai);
 	dma_data = snd_soc_dai_get_dma_data(dai, stream);
@@ -404,6 +403,15 @@
 		return;
 	}
 
+	for (i = 0; i < dai_data->ch_cnt; i++) {
+		rc = slim_dealloc_ch(drv_data->sdev, dai_data->chan_h[i]);
+		if (rc) {
+			dev_err(dai->dev,
+				"%s: dealloc_ch failed, err = %d\n",
+				__func__, rc);
+		}
+	}
+
 	snd_soc_dai_set_dma_data(dai, stream, NULL);
 	/* clear prepared state for the dai */
 	CLR_DAI_STATE(dai_data->status, DAI_STATE_PREPARED);
diff --git a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c b/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
deleted file mode 100644
index 2ff1e02..0000000
--- a/sound/soc/msm/qdsp6v2/msm-dts-eagle.c
+++ /dev/null
@@ -1,1660 +0,0 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/msm_ion.h>
-#include <linux/mm.h>
-#include <linux/msm_audio_ion.h>
-#include <linux/vmalloc.h>
-#include <sound/core.h>
-#include <sound/soc.h>
-#include <sound/pcm.h>
-#include <sound/q6adm-v2.h>
-#include <sound/q6asm-v2.h>
-#include <sound/apr_audio-v2.h>
-#include <sound/q6audio-v2.h>
-#include <sound/audio_effects.h>
-#include <sound/hwdep.h>
-#include <sound/msm-dts-eagle.h>
-#include <sound/q6core.h>
-
-#include "msm-pcm-routing-v2.h"
-
-#define ION_MEM_SIZE  131072
-#define DEPC_MAX_SIZE 524288
-
-#define MPST				AUDPROC_MODULE_ID_DTS_HPX_POSTMIX
-#define MPRE				AUDPROC_MODULE_ID_DTS_HPX_PREMIX
-
-#define eagle_vol_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER_VOLUME: " fmt "\n", ##__VA_ARGS__)
-#define eagle_vol_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER_VOLUME: " fmt "\n", ##__VA_ARGS__)
-#define eagle_drv_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER: " fmt "\n", ##__VA_ARGS__)
-#define eagle_drv_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER: " fmt "\n", ##__VA_ARGS__)
-#define eagle_precache_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER_SENDCACHE_PRE: " fmt "\n", ##__VA_ARGS__)
-#define eagle_precache_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER_SENDCACHE_PRE: " fmt "\n", ##__VA_ARGS__)
-#define eagle_postcache_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER_SENDCACHE_POST: " fmt "\n", ##__VA_ARGS__)
-#define eagle_postcache_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER_SENDCACHE_POST: " fmt "\n", ##__VA_ARGS__)
-#define eagle_ioctl_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER_IOCTL: " fmt "\n", ##__VA_ARGS__)
-#define eagle_ioctl_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER_IOCTL: " fmt "\n", ##__VA_ARGS__)
-#define eagle_asm_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER_ASM: " fmt "\n", ##__VA_ARGS__)
-#define eagle_asm_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER_ASM: " fmt "\n", ##__VA_ARGS__)
-#define eagle_adm_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_DRIVER_ADM: " fmt "\n", ##__VA_ARGS__)
-#define eagle_adm_err(fmt, ...) \
-	pr_err("DTS_EAGLE_DRIVER_ADM: " fmt "\n", ##__VA_ARGS__)
-#define eagle_enable_dbg(fmt, ...) \
-	pr_debug("DTS_EAGLE_ENABLE: " fmt "\n", ##__VA_ARGS__)
-#define eagle_enable_err(fmt, ...) \
-	pr_err("DTS_EAGLE_ENABLE: " fmt "\n", ##__VA_ARGS__)
-#define eagle_ioctl_info(fmt, ...) \
-	pr_err("DTS_EAGLE_IOCTL: " fmt "\n", ##__VA_ARGS__)
-
-enum {
-	AUDIO_DEVICE_OUT_EARPIECE = 0,
-	AUDIO_DEVICE_OUT_SPEAKER,
-	AUDIO_DEVICE_OUT_WIRED_HEADSET,
-	AUDIO_DEVICE_OUT_WIRED_HEADPHONE,
-	AUDIO_DEVICE_OUT_BLUETOOTH_SCO,
-	AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
-	AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT,
-	AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
-	AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,
-	AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER,
-	AUDIO_DEVICE_OUT_AUX_DIGITAL,
-	AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
-	AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
-	AUDIO_DEVICE_OUT_USB_ACCESSORY,
-	AUDIO_DEVICE_OUT_USB_DEVICE,
-	AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-	AUDIO_DEVICE_OUT_ANC_HEADSET,
-	AUDIO_DEVICE_OUT_ANC_HEADPHONE,
-	AUDIO_DEVICE_OUT_PROXY,
-	AUDIO_DEVICE_OUT_FM,
-	AUDIO_DEVICE_OUT_FM_TX,
-
-	AUDIO_DEVICE_OUT_COUNT
-};
-
-#define AUDIO_DEVICE_COMBO 0x400000 /* bit 23 */
-
-enum { /* cache block */
-	CB_0 = 0,
-	CB_1,
-	CB_2,
-	CB_3,
-	CB_4,
-	CB_5,
-	CB_6,
-	CB_7,
-
-	CB_COUNT
-};
-
-enum { /* cache block description */
-	CBD_DEV_MASK = 0,
-	CBD_OFFSG,
-	CBD_CMD0,
-	CBD_SZ0,
-	CBD_OFFS1,
-	CBD_CMD1,
-	CBD_SZ1,
-	CBD_OFFS2,
-	CBD_CMD2,
-	CBD_SZ2,
-	CBD_OFFS3,
-	CBD_CMD3,
-	CBD_SZ3,
-
-	CBD_COUNT,
-};
-
-static s32 _fx_logN(s32 x)
-{
-	s32 t, y = 0xa65af;
-
-	if (x < 0x00008000) {
-		x <<= 16; y -= 0xb1721; }
-	if (x < 0x00800000) {
-		x <<= 8; y -= 0x58b91; }
-	if (x < 0x08000000) {
-		x <<= 4; y -= 0x2c5c8; }
-	if (x < 0x20000000) {
-		x <<= 2; y -= 0x162e4; }
-	if (x < 0x40000000) {
-		x <<= 1; y -= 0x0b172; }
-	t = x + (x >> 1);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x067cd; }
-	t = x + (x >> 2);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x03920; }
-	t = x + (x >> 3);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x01e27; }
-	t = x + (x >> 4);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x00f85; }
-	t = x + (x >> 5);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x007e1; }
-	t = x + (x >> 6);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x003f8; }
-	t = x + (x >> 7);
-	if ((t & 0x80000000) == 0) {
-		x = t; y -= 0x001fe; }
-	x = 0x80000000 - x;
-	y -= x >> 15;
-	return y;
-}
-
-static inline void *_getd(struct dts_eagle_param_desc *depd)
-{
-	return (void *)(((char *)depd) + sizeof(struct dts_eagle_param_desc));
-}
-
-static int _ref_cnt;
-/* dts eagle parameter cache */
-static char *_depc;
-static u32 _depc_size;
-static s32 _c_bl[CB_COUNT][CBD_COUNT];
-static u32 _device_primary;
-static u32 _device_all;
-/* ION states */
-static struct ion_client *_ion_client;
-static struct ion_handle *_ion_handle;
-static struct param_outband _po;
-static struct audio_client *_ac_NT;
-static struct ion_client *_ion_client_NT;
-static struct ion_handle *_ion_handle_NT;
-static struct param_outband _po_NT;
-
-#define SEC_BLOB_MAX_CNT 10
-#define SEC_BLOB_MAX_SIZE 0x4004 /*extra 4 for size*/
-static char *_sec_blob[SEC_BLOB_MAX_CNT];
-
-/* multi-copp support */
-static int _cidx[AFE_MAX_PORTS] = {-1};
-
-/* volume controls */
-#define VOL_CMD_CNT_MAX 10
-static u32 _vol_cmd_cnt;
-static s32 **_vol_cmds;
-struct vol_cmds_d {
-	s32 d[4];
-};
-static struct vol_cmds_d *_vol_cmds_d;
-static const s32 _log10_10_inv_x20 = 0x0008af84;
-
-/* hpx master control */
-static u32 _is_hpx_enabled;
-
-static void _volume_cmds_free(void)
-{
-	int i;
-
-	for (i = 0; i < _vol_cmd_cnt; i++)
-		kfree(_vol_cmds[i]);
-	_vol_cmd_cnt = 0;
-	kfree(_vol_cmds);
-	kfree(_vol_cmds_d);
-	_vol_cmds = NULL;
-	_vol_cmds_d = NULL;
-}
-
-static s32 _volume_cmds_alloc1(s32 size)
-{
-	_volume_cmds_free();
-	_vol_cmd_cnt = size;
-	_vol_cmds = kzalloc(_vol_cmd_cnt * sizeof(int *), GFP_KERNEL);
-	if (_vol_cmds) {
-		_vol_cmds_d = kzalloc(_vol_cmd_cnt * sizeof(struct vol_cmds_d),
-					GFP_KERNEL);
-	} else
-		_vol_cmd_cnt = 0;
-	if (_vol_cmds_d)
-		return 0;
-	_volume_cmds_free();
-	return -ENOMEM;
-}
-
-/* assumes size is equal or less than 0xFFF */
-static s32 _volume_cmds_alloc2(s32 idx, s32 size)
-{
-	kfree(_vol_cmds[idx]);
-	_vol_cmds[idx] = kzalloc(size, GFP_KERNEL);
-	if (_vol_cmds[idx])
-		return 0;
-	_vol_cmds_d[idx].d[0] = 0;
-	return -ENOMEM;
-}
-
-static void _init_cb_descs(void)
-{
-	int i;
-
-	for (i = 0; i < CB_COUNT; i++) {
-		_c_bl[i][CBD_DEV_MASK] = 0;
-		_c_bl[i][CBD_OFFSG] = _c_bl[i][CBD_OFFS1] =
-		_c_bl[i][CBD_OFFS2] = _c_bl[i][CBD_OFFS3] =
-		0xFFFFFFFF;
-		_c_bl[i][CBD_CMD0] = _c_bl[i][CBD_SZ0] =
-		_c_bl[i][CBD_CMD1] = _c_bl[i][CBD_SZ1] =
-		_c_bl[i][CBD_CMD2] = _c_bl[i][CBD_SZ2] =
-		_c_bl[i][CBD_CMD3] = _c_bl[i][CBD_SZ3] = 0;
-	}
-}
-
-static u32 _get_dev_mask_for_pid(int pid)
-{
-	switch (pid) {
-	case SLIMBUS_0_RX:
-		return (1 << AUDIO_DEVICE_OUT_EARPIECE) |
-			(1 << AUDIO_DEVICE_OUT_SPEAKER) |
-			(1 << AUDIO_DEVICE_OUT_WIRED_HEADSET) |
-			(1 << AUDIO_DEVICE_OUT_WIRED_HEADPHONE) |
-			(1 << AUDIO_DEVICE_OUT_ANC_HEADSET) |
-			(1 << AUDIO_DEVICE_OUT_ANC_HEADPHONE);
-		/* fallthrough */
-	case INT_BT_SCO_RX:
-		return (1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO) |
-			(1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET) |
-			(1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT);
-		/* fallthrough */
-	case RT_PROXY_PORT_001_RX:
-		return (1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP) |
-			(1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES) |
-			(1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER) |
-			(1 << AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET) |
-			(1 << AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET) |
-			(1 << AUDIO_DEVICE_OUT_USB_ACCESSORY) |
-			(1 << AUDIO_DEVICE_OUT_USB_DEVICE) |
-			(1 << AUDIO_DEVICE_OUT_PROXY);
-		/* fallthrough */
-	case HDMI_RX:
-		return 1 << AUDIO_DEVICE_OUT_AUX_DIGITAL;
-	case INT_FM_RX:
-		return 1 << AUDIO_DEVICE_OUT_FM;
-	case INT_FM_TX:
-		return 1 << AUDIO_DEVICE_OUT_FM_TX;
-	default:
-		return 0;
-	}
-}
-
-static int _get_pid_from_dev(u32 device)
-{
-	if (device & (1 << AUDIO_DEVICE_OUT_EARPIECE) ||
-	    device & (1 << AUDIO_DEVICE_OUT_SPEAKER) ||
-	    device & (1 << AUDIO_DEVICE_OUT_WIRED_HEADSET) ||
-	    device & (1 << AUDIO_DEVICE_OUT_WIRED_HEADPHONE) ||
-	    device & (1 << AUDIO_DEVICE_OUT_ANC_HEADSET) ||
-	    device & (1 << AUDIO_DEVICE_OUT_ANC_HEADPHONE)) {
-		return SLIMBUS_0_RX;
-	} else if (device & (1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO) ||
-		   device & (1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET) ||
-		   device & (1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT)) {
-		return INT_BT_SCO_RX;
-	} else if (device & (1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP) ||
-		   device & (1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES) ||
-		   device & (1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER) ||
-		   device & (1 << AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET) ||
-		   device & (1 << AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET) ||
-		   device & (1 << AUDIO_DEVICE_OUT_USB_ACCESSORY) ||
-		   device & (1 << AUDIO_DEVICE_OUT_USB_DEVICE) ||
-		   device & (1 << AUDIO_DEVICE_OUT_PROXY)) {
-		return RT_PROXY_PORT_001_RX;
-	} else if (device & (1 << AUDIO_DEVICE_OUT_AUX_DIGITAL)) {
-		return HDMI_RX;
-	} else if (device & (1 << AUDIO_DEVICE_OUT_FM)) {
-		return INT_FM_RX;
-	} else if (device & (1 << AUDIO_DEVICE_OUT_FM_TX)) {
-		return INT_FM_TX;
-	}
-	return 0;
-}
-
-static s32 _get_cb_for_dev(int device)
-{
-	s32 i;
-
-	if (device & AUDIO_DEVICE_COMBO) {
-		for (i = 0; i < CB_COUNT; i++) {
-			if ((_c_bl[i][CBD_DEV_MASK] & device) == device)
-				return i;
-		}
-	} else {
-		for (i = 0; i < CB_COUNT; i++) {
-			if ((_c_bl[i][CBD_DEV_MASK] & device) &&
-			    !(_c_bl[i][CBD_DEV_MASK] & AUDIO_DEVICE_COMBO))
-				return i;
-		}
-	}
-	eagle_drv_err("%s: device %i not found", __func__, device);
-	return -EINVAL;
-}
-
-static int _is_port_open_and_eagle(int pid)
-{
-	if (msm_routing_check_backend_enabled(pid))
-		return 1;
-	return 1;
-}
-
-static int _isNTDevice(u32 device)
-{
-	if (device &
-		((1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO) |
-		(1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET) |
-		(1 << AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT) |
-		(1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP) |
-		(1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES) |
-		(1 << AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER) |
-		(1 << AUDIO_DEVICE_OUT_AUX_DIGITAL)))
-		return 1;
-	return 0;
-}
-
-static void _reg_ion_mem(void)
-{
-	int rc;
-
-	rc = msm_audio_ion_alloc("DTS_EAGLE", &_ion_client, &_ion_handle,
-			    ION_MEM_SIZE, &_po.paddr, &_po.size, &_po.kvaddr);
-	if (rc)
-		eagle_drv_err("%s: msm audio ion alloc failed with %i",
-				__func__, rc);
-}
-
-static void _unreg_ion_mem(void)
-{
-	int rc;
-
-	rc = msm_audio_ion_free(_ion_client, _ion_handle);
-	if (rc)
-		eagle_drv_err("%s: msm audio ion alloc failed with %i",
-				__func__, rc);
-}
-
-static void _reg_ion_mem_NT(void)
-{
-	int rc;
-
-	eagle_drv_dbg("%s: NT ion mem", __func__);
-	rc = msm_audio_ion_alloc("DTS_EAGLE", &_ion_client_NT,
-				 &_ion_handle_NT, ION_MEM_SIZE,
-				 &_po_NT.paddr, &_po_NT.size, &_po_NT.kvaddr);
-	if (rc) {
-		eagle_drv_err("%s: msm audio ion alloc failed",	__func__);
-		return;
-	}
-	rc = q6asm_memory_map(_ac_NT, _po_NT.paddr,
-			      IN, _po_NT.size, 1);
-	if (rc < 0) {
-		eagle_drv_err("%s: memory map failed", __func__);
-		msm_audio_ion_free(_ion_client_NT, _ion_handle_NT);
-		_ion_client_NT = NULL;
-		_ion_handle_NT = NULL;
-	}
-}
-
-static void _unreg_ion_mem_NT(void)
-{
-	int rc;
-
-	rc = q6asm_memory_unmap(_ac_NT,	_po_NT.paddr, IN);
-	if (rc < 0)
-		eagle_drv_err("%s: mem unmap failed", __func__);
-	rc = msm_audio_ion_free(_ion_client_NT, _ion_handle_NT);
-	if (rc < 0)
-		eagle_drv_err("%s: mem free failed", __func__);
-
-	_ion_client_NT = NULL;
-	_ion_handle_NT = NULL;
-}
-
-static struct audio_client *_getNTDeviceAC(void)
-{
-	return _ac_NT;
-}
-
-static void _set_audioclient(struct audio_client *ac)
-{
-	_ac_NT = ac;
-	_reg_ion_mem_NT();
-}
-
-static void _clear_audioclient(void)
-{
-	_unreg_ion_mem_NT();
-	_ac_NT = NULL;
-}
-
-
-static int _sendcache_pre(struct audio_client *ac)
-{
-	uint32_t offset, size;
-	int32_t cidx, cmd, err = 0;
-
-	cidx = _get_cb_for_dev(_device_primary);
-	if (cidx < 0) {
-		eagle_precache_err("%s: no cache for primary device %i found",
-			__func__, _device_primary);
-		return -EINVAL;
-	}
-	offset = _c_bl[cidx][CBD_OFFSG];
-	cmd = _c_bl[cidx][CBD_CMD0];
-	size = _c_bl[cidx][CBD_SZ0];
-	/* check for integer overflow */
-	if (offset > (UINT_MAX - size))
-		err = -EINVAL;
-	if ((_depc_size == 0) || !_depc || (size == 0) ||
-		cmd == 0 || ((offset + size) > _depc_size) || (err != 0)) {
-		eagle_precache_err("%s: primary device %i cache index %i general error - cache size = %u, cache ptr = %pK, offset = %u, size = %u, cmd = %i",
-			__func__, _device_primary, cidx, _depc_size, _depc,
-			offset, size, cmd);
-		return -EINVAL;
-	}
-
-	if ((offset < (UINT_MAX - 124)) && ((offset + 124) < _depc_size))
-		eagle_precache_dbg("%s: first 6 integers %i %i %i %i %i %i (30th %i)",
-			__func__, *((int *)&_depc[offset]),
-			*((int *)&_depc[offset+4]),
-			*((int *)&_depc[offset+8]),
-			*((int *)&_depc[offset+12]),
-			*((int *)&_depc[offset+16]),
-			*((int *)&_depc[offset+20]),
-			*((int *)&_depc[offset+120]));
-	eagle_precache_dbg("%s: sending full data block to port, with cache index = %d device mask 0x%X, param = 0x%X, offset = %u, and size = %u",
-		  __func__, cidx, _c_bl[cidx][CBD_DEV_MASK], cmd, offset, size);
-
-	if (q6asm_dts_eagle_set(ac, cmd, size, (void *)&_depc[offset],
-				NULL, MPRE))
-		eagle_precache_err("%s: q6asm_dts_eagle_set failed with id = %d and size = %u",
-			__func__, cmd, size);
-	else
-		eagle_precache_dbg("%s: q6asm_dts_eagle_set succeeded with id = %d and size = %u",
-			 __func__, cmd, size);
-	return 0;
-}
-
-static int _sendcache_post(int port_id, int copp_idx, int topology)
-{
-	int cidx = -1, cmd, mask, index, err = 0;
-	uint32_t offset, size;
-
-	if (port_id == -1) {
-		cidx = _get_cb_for_dev(_device_primary);
-		if (cidx < 0) {
-			eagle_postcache_err("%s: no cache for primary device %i found. Port id was 0x%X",
-				__func__, _device_primary, port_id);
-			return -EINVAL;
-		}
-		goto NT_MODE_GOTO;
-	}
-
-	index = adm_validate_and_get_port_index(port_id);
-	if (index < 0) {
-		eagle_postcache_err("%s: Invalid port idx %d port_id %#x",
-			__func__, index, port_id);
-		return -EINVAL;
-	}
-	eagle_postcache_dbg("%s: valid port idx %d for port_id %#x set to %i",
-		__func__, index, port_id, copp_idx);
-	_cidx[index] = copp_idx;
-
-	mask = _get_dev_mask_for_pid(port_id);
-	if (mask & _device_primary) {
-		cidx = _get_cb_for_dev(_device_primary);
-		if (cidx < 0) {
-			eagle_postcache_err("%s: no cache for primary device %i found. Port id was 0x%X",
-				__func__, _device_primary, port_id);
-			return -EINVAL;
-		}
-	} else if (mask & _device_all) {
-		cidx = _get_cb_for_dev(_device_all);
-		if (cidx < 0) {
-			eagle_postcache_err("%s: no cache for combo device %i found. Port id was 0x%X",
-				__func__, _device_all, port_id);
-			return -EINVAL;
-		}
-	} else {
-		eagle_postcache_err("%s: port id 0x%X not for primary or combo device %i",
-			__func__, port_id, _device_primary);
-		return -EINVAL;
-	}
-
-NT_MODE_GOTO:
-	offset = _c_bl[cidx][CBD_OFFSG] + _c_bl[cidx][CBD_OFFS2];
-	cmd = _c_bl[cidx][CBD_CMD2];
-	size = _c_bl[cidx][CBD_SZ2];
-
-	/* check for integer overflow */
-	if (offset > (UINT_MAX - size))
-		err = -EINVAL;
-	if ((_depc_size == 0) || !_depc || (err != 0) || (size == 0) ||
-		(cmd == 0) || (offset + size) > _depc_size) {
-		eagle_postcache_err("%s: primary device %i cache index %i port_id 0x%X general error - cache size = %u, cache ptr = %pK, offset = %u, size = %u, cmd = %i",
-			__func__, _device_primary, cidx, port_id,
-			_depc_size, _depc, offset, size, cmd);
-		return -EINVAL;
-	}
-
-	if ((offset < (UINT_MAX - 24)) && ((offset + 24) < _depc_size))
-		eagle_postcache_dbg("%s: first 6 integers %i %i %i %i %i %i",
-			__func__, *((int *)&_depc[offset]),
-			*((int *)&_depc[offset+4]),
-			*((int *)&_depc[offset+8]),
-			*((int *)&_depc[offset+12]),
-			*((int *)&_depc[offset+16]),
-			*((int *)&_depc[offset+20]));
-	eagle_postcache_dbg("%s: sending full data block to port, with cache index = %d device mask 0x%X, port_id = 0x%X, param = 0x%X, offset = %u, and size = %u",
-		__func__, cidx, _c_bl[cidx][CBD_DEV_MASK], port_id, cmd,
-		offset, size);
-
-	if (_ac_NT) {
-		eagle_postcache_dbg("%s: NT Route detected", __func__);
-		if (q6asm_dts_eagle_set(_getNTDeviceAC(), cmd, size,
-					(void *)&_depc[offset],
-					&_po_NT, MPST))
-			eagle_postcache_err("%s: q6asm_dts_eagle_set failed with id = 0x%X and size = %u",
-				__func__, cmd, size);
-	} else if (adm_dts_eagle_set(port_id, copp_idx, cmd,
-			      (void *)&_depc[offset], size) < 0)
-		eagle_postcache_err("%s: adm_dts_eagle_set failed with id = 0x%X and size = %u",
-			__func__, cmd, size);
-	else
-		eagle_postcache_dbg("%s: adm_dts_eagle_set succeeded with id = 0x%X and size = %u",
-			 __func__, cmd, size);
-	return 0;
-}
-
-static int _enable_post_get_control(struct snd_kcontrol *kcontrol,
-				    struct snd_ctl_elem_value *ucontrol)
-{
-	ucontrol->value.integer.value[0] = _is_hpx_enabled;
-	return 0;
-}
-
-static int _enable_post_put_control(struct snd_kcontrol *kcontrol,
-				    struct snd_ctl_elem_value *ucontrol)
-{
-	int idx = 0, be_index = 0, port_id, topology;
-	int flag = ucontrol->value.integer.value[0];
-	struct msm_pcm_routing_bdai_data msm_bedai;
-
-	eagle_drv_dbg("%s: flag %d", __func__, flag);
-
-	_is_hpx_enabled = flag ? true : false;
-	msm_pcm_routing_acquire_lock();
-	/* send cache postmix params when hpx is set On */
-	for (be_index = 0; be_index < MSM_BACKEND_DAI_MAX; be_index++) {
-		msm_pcm_routing_get_bedai_info(be_index, &msm_bedai);
-		port_id = msm_bedai.port_id;
-		if (!(((port_id == SLIMBUS_0_RX) ||
-		      (port_id == RT_PROXY_PORT_001_RX)) &&
-		      msm_bedai.active))
-			continue;
-		for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
-			topology = adm_get_topology_for_port_copp_idx(
-								port_id, idx);
-			if (topology ==
-				ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX) {
-				msm_dts_eagle_enable_adm(port_id, idx,
-							 _is_hpx_enabled);
-			}
-		}
-	}
-	msm_pcm_routing_release_lock();
-	return 0;
-}
-
-static const struct snd_kcontrol_new _hpx_enabled_controls[] = {
-	SOC_SINGLE_EXT("Set HPX OnOff", SND_SOC_NOPM, 0, 1, 0,
-	_enable_post_get_control, _enable_post_put_control)
-};
-
-/**
- * msm_dts_ion_memmap() - helper function to map ION memory
- * @po_:	Out of band memory structure used as memory.
- *
- * Assign already allocated ION memory for mapping it to dsp.
- *
- * Return: No return value.
- */
-void msm_dts_ion_memmap(struct param_outband *po_)
-{
-	po_->size = ION_MEM_SIZE;
-	po_->kvaddr = _po.kvaddr;
-	po_->paddr = _po.paddr;
-}
-
-/**
- * msm_dts_eagle_enable_asm() - Enable/disable dts module
- * @ac:	Enable/disable module in ASM session associated with this audio client.
- * @enable:	Enable/disable the dts module.
- * @module:	module id.
- *
- * Enable/disable specified dts module id in asm.
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_enable_asm(struct audio_client *ac, u32 enable, int module)
-{
-	int ret = 0;
-
-	eagle_enable_dbg("%s: enable = %i on module %i",
-		 __func__, enable, module);
-	_is_hpx_enabled = enable;
-	ret = q6asm_dts_eagle_set(ac, AUDPROC_PARAM_ID_ENABLE,
-				      sizeof(enable), &enable,
-				      NULL, module);
-	if (_is_hpx_enabled) {
-		if (module == MPRE)
-			_sendcache_pre(ac);
-		else if (module == MPST)
-			_sendcache_post(-1, 0, 0);
-	}
-	return ret;
-}
-
-/**
- * msm_dts_eagle_enable_adm() - Enable/disable dts module in adm
- * @port_id:	Send enable/disable param to this port id.
- * @copp_idx:	Send enable/disable param to the relevant copp.
- * @enable:	Enable/disable the dts module.
- *
- * Enable/disable dts module in adm.
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_enable_adm(int port_id, int copp_idx, u32 enable)
-{
-	int ret = 0;
-
-	eagle_enable_dbg("%s: enable = %i", __func__, enable);
-	_is_hpx_enabled = enable;
-	ret = adm_dts_eagle_set(port_id, copp_idx, AUDPROC_PARAM_ID_ENABLE,
-			     (char *)&enable, sizeof(enable));
-	if (_is_hpx_enabled)
-		_sendcache_post(port_id, copp_idx, MPST);
-	return ret;
-}
-
-/**
- * msm_dts_eagle_add_controls() -  Add mixer control to Enable/Disable DTS HPX
- * @platform:	Add mixer controls to this platform.
- *
- * Add mixer control to Enable/Disable DTS HPX module in ADM.
- *
- * Return: No return value.
- */
-void msm_dts_eagle_add_controls(struct snd_soc_platform *platform)
-{
-	snd_soc_add_platform_controls(platform, _hpx_enabled_controls,
-				      ARRAY_SIZE(_hpx_enabled_controls));
-}
-
-/**
- * msm_dts_eagle_set_stream_gain() -  Set stream gain to DTS Premix module
- * @ac:	Set stream gain to ASM session associated with this audio client.
- * @lgain:	Left gain value.
- * @rgain:	Right gain value.
- *
- * Set stream gain to DTS Premix module in ASM.
- *
- * Return: failure or success.
- */
-int msm_dts_eagle_set_stream_gain(struct audio_client *ac, int lgain, int rgain)
-{
-	u32 i, val;
-	s32 idx, err = 0;
-
-	eagle_vol_dbg("%s: - entry: vol_cmd_cnt = %u, lgain = %i, rgain = %i",
-		 __func__, _vol_cmd_cnt, lgain, rgain);
-
-	if (_depc_size == 0) {
-		eagle_vol_dbg("%s: driver cache not initialized", __func__);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < _vol_cmd_cnt; i++) {
-		if (_vol_cmds_d[i].d[0] & 0x8000) {
-			idx = (sizeof(struct dts_eagle_param_desc)/sizeof(int))
-				+ (_vol_cmds_d[i].d[0] & 0x3FF);
-			val = _fx_logN(((s32)(lgain+rgain)) << 2);
-			val = ((long long)val * _log10_10_inv_x20) >> 16;
-			_vol_cmds[i][idx] = (s32)clamp((int)(((long long)val *
-						    _vol_cmds_d[i].d[1]) >> 16),
-						    _vol_cmds_d[i].d[2],
-						    _vol_cmds_d[i].d[3]);
-			 eagle_vol_dbg("%s: loop %u cmd desc found %i, idx = %i. volume info: lgain = %i, rgain = %i, volume = %i (scale %i, min %i, max %i)",
-				 __func__, i, _vol_cmds_d[i].d[0], idx, lgain,
-				 rgain, _vol_cmds[i][idx], _vol_cmds_d[i].d[1],
-				 _vol_cmds_d[i].d[2], _vol_cmds_d[i].d[3]);
-		}
-		idx = _get_cb_for_dev(_device_primary);
-		if (idx < 0) {
-			eagle_vol_err("%s: no cache for primary device %i found",
-				__func__, _device_primary);
-			return -EINVAL;
-		}
-		val = _c_bl[idx][CBD_OFFSG] + _vol_cmds[i][2];
-		/* check for integer overflow */
-		if (val > (UINT_MAX - _vol_cmds[i][1]))
-			err = -EINVAL;
-		if ((err != 0) || ((val + _vol_cmds[i][1]) > _depc_size)) {
-			eagle_vol_err("%s: volume size (%u) + offset (%i) out of bounds %i",
-				__func__, val, _vol_cmds[i][1], _depc_size);
-			return -EINVAL;
-		}
-		memcpy((void *)&_depc[val], &_vol_cmds[i][4], _vol_cmds[i][1]);
-		if (q6asm_dts_eagle_set(ac, _vol_cmds[i][0],
-			_vol_cmds[i][1], (void *)&_depc[val], NULL, MPRE))
-			eagle_vol_err("%s: loop %u - volume set failed with id 0x%X, size %i, offset %i, cmd_desc %i, scale %i, min %i, max %i, data(...) %i",
-				__func__, i, _vol_cmds[i][0], _vol_cmds[i][1],
-				_vol_cmds[i][2], _vol_cmds_d[i].d[0],
-				_vol_cmds_d[i].d[1], _vol_cmds_d[i].d[2],
-				_vol_cmds_d[i].d[3], _vol_cmds[i][4]);
-		else
-			eagle_vol_dbg("%s: loop %u - volume set succeeded with id 0x%X, size %i, offset %i, cmd_desc %i, scale %i, min %i, max %i, data(...) %i",
-				 __func__, i, _vol_cmds[i][0], _vol_cmds[i][1],
-				 _vol_cmds[i][2], _vol_cmds_d[i].d[0],
-				 _vol_cmds_d[i].d[1], _vol_cmds_d[i].d[2],
-				 _vol_cmds_d[i].d[3], _vol_cmds[i][4]);
-	}
-	return 0;
-}
-
-/**
- * msm_dts_eagle_handle_asm() - Set or Get params from ASM
- * @depd:	DTS Eagle Params structure.
- * @buf:	Buffer to get queried param value.
- * @for_pre:	For premix module or postmix module.
- * @get:	Getting param from DSP or setting param.
- * @ac:	Set/Get from ASM session associated with this audio client.
- * @po:	Out of band memory to set or get postmix params.
- *
- * Set or Get params from modules in ASM session.
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_handle_asm(struct dts_eagle_param_desc *depd, char *buf,
-			     bool for_pre, bool get, struct audio_client *ac,
-			     struct param_outband *po)
-{
-	struct dts_eagle_param_desc depd_ = {0};
-	s32 ret = 0, isALSA = 0, err = 0, i, mod = for_pre ? MPRE : MPST;
-	u32 offset;
-
-	eagle_asm_dbg("%s: set/get asm", __func__);
-
-	/* special handling for ALSA route, to accommodate 64 bit platforms */
-	if (depd == NULL) {
-		long *arg_ = (long *)buf;
-
-		depd = &depd_;
-		depd->id = (u32)*arg_++;
-		depd->size = (u32)*arg_++;
-		depd->offset = (s32)*arg_++;
-		depd->device = (u32)*arg_++;
-		buf = (char *)arg_;
-		isALSA = 1;
-	}
-
-	if (depd->size & 1) {
-		eagle_asm_err("%s: parameter size %u is not a multiple of 2",
-			__func__, depd->size);
-		return -EINVAL;
-	}
-
-	if (get) {
-		void *buf_, *buf_m = NULL;
-
-		eagle_asm_dbg("%s: get requested", __func__);
-		if (depd->offset == -1) {
-			eagle_asm_dbg("%s: get from dsp requested", __func__);
-			if (depd->size > 0 && depd->size <= DEPC_MAX_SIZE) {
-				buf_ = buf_m = vzalloc(depd->size);
-			} else {
-				eagle_asm_err("%s: get size %u invalid",
-					      __func__, depd->size);
-				return -EINVAL;
-			}
-			if (!buf_m) {
-				eagle_asm_err("%s: out of memory", __func__);
-				return -ENOMEM;
-			} else if (q6asm_dts_eagle_get(ac, depd->id,
-						       depd->size, buf_m,
-						       po, mod) < 0) {
-				eagle_asm_err("%s: asm get failed", __func__);
-				ret = -EFAULT;
-				goto DTS_EAGLE_IOCTL_GET_PARAM_PRE_EXIT;
-			}
-			eagle_asm_dbg("%s: get result: param id 0x%x value %d size %u",
-				 __func__, depd->id, *(int *)buf_m, depd->size);
-		} else {
-			s32 tgt = _get_cb_for_dev(depd->device);
-
-			if (tgt < 0) {
-				eagle_asm_err("%s: no cache for device %u found",
-					__func__, depd->device);
-				return -EINVAL;
-			}
-			offset = _c_bl[tgt][CBD_OFFSG] + depd->offset;
-			/* check for integer overflow */
-			if (offset > (UINT_MAX - depd->size))
-				err = -EINVAL;
-			if ((err != 0) || (offset + depd->size) > _depc_size) {
-				eagle_asm_err("%s: invalid size %u and/or offset %u",
-					__func__, depd->size, offset);
-				return -EINVAL;
-			}
-			buf_ = (u32 *)&_depc[offset];
-		}
-		if (isALSA) {
-			if (depd->size == 2) {
-				*(long *)buf = (long)*(__u16 *)buf_;
-				eagle_asm_dbg("%s: asm out 16 bit value %li",
-						__func__, *(long *)buf);
-			} else {
-				s32 *pbuf = (s32 *)buf_;
-				long *bufl = (long *)buf;
-
-				for (i = 0; i < (depd->size >> 2); i++) {
-					*bufl++ = (long)*pbuf++;
-					eagle_asm_dbg("%s: asm out value %li",
-							 __func__, *(bufl-1));
-				}
-			}
-		} else {
-			memcpy(buf, buf_, depd->size);
-		}
-DTS_EAGLE_IOCTL_GET_PARAM_PRE_EXIT:
-		vfree(buf_m);
-		return (int)ret;
-	} else {
-		s32 tgt = _get_cb_for_dev(depd->device);
-
-		if (tgt < 0) {
-			eagle_asm_err("%s: no cache for device %u found",
-				__func__, depd->device);
-			return -EINVAL;
-		}
-		offset = _c_bl[tgt][CBD_OFFSG] + depd->offset;
-		/* check for integer overflow */
-		if (offset > (UINT_MAX - depd->size))
-			err = -EINVAL;
-		if ((err != 0) || ((offset + depd->size) > _depc_size)) {
-			eagle_asm_err("%s: invalid size %u and/or offset %u for parameter (cache is size %u)",
-				__func__, depd->size, offset, _depc_size);
-			return -EINVAL;
-		}
-		if (isALSA) {
-			if (depd->size == 2) {
-				*(__u16 *)&_depc[offset] = (__u16)*(long *)buf;
-				eagle_asm_dbg("%s: asm in 16 bit value %li",
-						__func__, *(long *)buf);
-			} else {
-				s32 *pbuf = (s32 *)&_depc[offset];
-				long *bufl = (long *)buf;
-
-				for (i = 0; i < (depd->size >> 2); i++) {
-					*pbuf++ = (s32)*bufl++;
-					eagle_asm_dbg("%s: asm in value %i",
-							__func__, *(pbuf-1));
-				}
-			}
-		} else {
-			memcpy(&_depc[offset], buf, depd->size);
-		}
-		eagle_asm_dbg("%s: param info: param = 0x%X, size = %u, offset = %i, device = %u, cache block %i, global offset = %u, first bytes as integer = %i",
-			__func__, depd->id, depd->size, depd->offset,
-			depd->device,
-			tgt, offset, *(int *)&_depc[offset]);
-		if (q6asm_dts_eagle_set(ac, depd->id, depd->size,
-					(void *)&_depc[offset], po, mod))
-			eagle_asm_err("%s: q6asm_dts_eagle_set failed with id = 0x%X, size = %u, offset = %d",
-				__func__, depd->id, depd->size, depd->offset);
-		else
-			eagle_asm_dbg("%s: q6asm_dts_eagle_set succeeded with id = 0x%X, size = %u, offset = %d",
-				 __func__, depd->id, depd->size, depd->offset);
-	}
-
-	return (int)ret;
-}
-
-/**
- * msm_dts_eagle_handle_adm() - Set or Get params from ADM
- * @depd:	DTS Eagle Params structure used to set or get.
- * @buf:	Buffer to get queried param value in NT mode.
- * @for_pre:	For premix module or postmix module.
- * @get:	Getting param from DSP or setting param.
- *
- * Set or Get params from modules in ADM session.
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_handle_adm(struct dts_eagle_param_desc *depd, char *buf,
-			     bool for_pre, bool get)
-{
-	u32 pid = _get_pid_from_dev(depd->device), cidx;
-	s32 ret = 0;
-
-	eagle_adm_dbg("%s: set/get adm", __func__);
-
-	if (_isNTDevice(depd->device)) {
-		eagle_adm_dbg("%s: NT Route detected", __func__);
-		ret = msm_dts_eagle_handle_asm(depd, buf, for_pre, get,
-					       _getNTDeviceAC(), &_po_NT);
-		if (ret < 0)
-			eagle_adm_err("%s: NT Route set failed with id = 0x%X, size = %u, offset = %i, device = %u",
-				__func__, depd->id, depd->size, depd->offset,
-				depd->device);
-	} else if (get) {
-		cidx = adm_validate_and_get_port_index(pid);
-		eagle_adm_dbg("%s: get from qdsp requested (port id 0x%X)",
-			 __func__, pid);
-		if (adm_dts_eagle_get(pid, _cidx[cidx], depd->id,
-				      buf, depd->size) < 0) {
-			eagle_adm_err("%s: get from qdsp via adm with port id 0x%X failed",
-				 __func__, pid);
-			return -EFAULT;
-		}
-	} else if (_is_port_open_and_eagle(pid)) {
-		cidx = adm_validate_and_get_port_index(pid);
-		eagle_adm_dbg("%s: adm_dts_eagle_set called with id = 0x%X, size = %u, offset = %i, device = %u, port id = %u, copp index = %u",
-				__func__, depd->id, depd->size, depd->offset,
-				depd->device, pid, cidx);
-		ret = adm_dts_eagle_set(pid, _cidx[cidx], depd->id,
-					(void *)buf, depd->size);
-		if (ret < 0)
-			eagle_adm_err("%s: adm_dts_eagle_set failed", __func__);
-		else
-			eagle_adm_dbg("%s: adm_dts_eagle_set succeeded",
-				__func__);
-	} else {
-		ret = -EINVAL;
-		eagle_adm_dbg("%s: port id 0x%X not active or not Eagle",
-			 __func__, pid);
-	}
-	return (int)ret;
-}
-
-/**
- * msm_dts_eagle_ioctl() - ioctl handler function
- * @cmd:	cmd to handle.
- * @arg:	argument to the cmd.
- *
- * Handle DTS Eagle ioctl cmds.
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_ioctl(unsigned int cmd, unsigned long arg)
-{
-	s32 ret = 0;
-
-	switch (cmd) {
-	case DTS_EAGLE_IOCTL_GET_CACHE_SIZE: {
-		eagle_ioctl_info("%s: called with control 0x%X (get param cache size)",
-			__func__, cmd);
-		if (copy_to_user((void *)arg, &_depc_size,
-				 sizeof(_depc_size))) {
-			eagle_ioctl_err("%s: error writing size", __func__);
-			return -EFAULT;
-		}
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SET_CACHE_SIZE: {
-		u32 size = 0;
-
-		eagle_ioctl_info("%s: called with control 0x%X (allocate param cache)",
-			__func__, cmd);
-		if (copy_from_user((void *)&size, (void *)arg, sizeof(size))) {
-			eagle_ioctl_err("%s: error copying size (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, &size, sizeof(size));
-			return -EFAULT;
-		} else if (size > DEPC_MAX_SIZE) {
-			eagle_ioctl_err("%s: cache size %u not allowed (min 0, max %u)",
-				__func__, size, DEPC_MAX_SIZE);
-			return -EINVAL;
-		}
-		if (_depc) {
-			eagle_ioctl_dbg("%s: previous param cache of size %u freed",
-				__func__, _depc_size);
-			_depc_size = 0;
-			vfree(_depc);
-			_depc = NULL;
-		}
-		if (size)
-			_depc = vzalloc(size);
-		else
-			eagle_ioctl_dbg("%s: %u bytes requested for param cache, nothing allocated",
-				__func__, size);
-		if (_depc) {
-			eagle_ioctl_dbg("%s: %u bytes allocated for param cache",
-				__func__, size);
-			_depc_size = size;
-		} else {
-			eagle_ioctl_err("%s: error allocating param cache (vzalloc failed on %u bytes)",
-				__func__, size);
-			_depc_size = 0;
-			return -ENOMEM;
-		}
-		break;
-	}
-	case DTS_EAGLE_IOCTL_GET_PARAM: {
-		struct dts_eagle_param_desc depd;
-		s32 for_pre = 0, get_from_core = 0, err = 0;
-		u32 offset;
-		void *buf, *buf_m = NULL;
-
-		eagle_ioctl_info("%s: control 0x%X (get param)",
-			__func__, cmd);
-		if (copy_from_user((void *)&depd, (void *)arg, sizeof(depd))) {
-			eagle_ioctl_err("%s: error copying dts_eagle_param_desc (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, &depd, sizeof(depd));
-			return -EFAULT;
-		}
-		if (depd.device & DTS_EAGLE_FLAG_IOCTL_PRE) {
-			eagle_ioctl_dbg("%s: using for premix", __func__);
-			for_pre = 1;
-		}
-		if (depd.device & DTS_EAGLE_FLAG_IOCTL_GETFROMCORE) {
-			eagle_ioctl_dbg("%s: 'get from core' requested",
-				__func__);
-			get_from_core = 1;
-			depd.offset = -1;
-		}
-		depd.device &= DTS_EAGLE_FLAG_IOCTL_MASK;
-		if (depd.offset == -1) {
-			if (depd.size > 0 && depd.size <= DEPC_MAX_SIZE) {
-				buf = buf_m = vzalloc(depd.size);
-			} else {
-				eagle_ioctl_err("%s: get size %u invalid",
-						__func__, depd.size);
-				return -EINVAL;
-			}
-			if (!buf_m) {
-				eagle_ioctl_err("%s: out of memory", __func__);
-				return -ENOMEM;
-			}
-			if (get_from_core)
-				ret = core_dts_eagle_get(depd.id, depd.size,
-							 buf);
-			else
-				ret = msm_dts_eagle_handle_adm(&depd, buf,
-								for_pre, true);
-		} else {
-			s32 cb = _get_cb_for_dev(depd.device);
-
-			if (cb < 0) {
-				eagle_ioctl_err("%s: no cache for device %u found",
-					__func__, depd.device);
-				return -EINVAL;
-			}
-			offset = _c_bl[cb][CBD_OFFSG] + depd.offset;
-			/* check for integer overflow */
-			if (offset > (UINT_MAX - depd.size))
-				err = -EINVAL;
-			if ((err != 0) ||
-			    ((offset + depd.size) > _depc_size)) {
-				eagle_ioctl_err("%s: invalid size %u and/or offset %u",
-					__func__, depd.size, offset);
-				return -EINVAL;
-			}
-			buf = (void *)&_depc[offset];
-		}
-		if (ret < 0)
-			eagle_ioctl_err("%s: error %i getting data", __func__,
-				ret);
-		else if (copy_to_user((void *)(((char *)arg)+sizeof(depd)),
-						  buf, depd.size)) {
-			eagle_ioctl_err("%s: error copying get data", __func__);
-			ret = -EFAULT;
-		}
-		vfree(buf_m);
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SET_PARAM: {
-		struct dts_eagle_param_desc depd;
-		s32 just_set_cache = 0, for_pre = 0, err = 0;
-		u32 offset;
-		s32 tgt;
-
-		eagle_ioctl_info("%s: control 0x%X (set param)",
-			__func__, cmd);
-		if (copy_from_user((void *)&depd, (void *)arg, sizeof(depd))) {
-			eagle_ioctl_err("%s: error copying dts_eagle_param_desc (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, &depd, sizeof(depd));
-			return -EFAULT;
-		}
-		if (depd.device & DTS_EAGLE_FLAG_IOCTL_PRE) {
-			eagle_ioctl_dbg("%s: using for premix", __func__);
-			for_pre = 1;
-		}
-		if (depd.device & DTS_EAGLE_FLAG_IOCTL_JUSTSETCACHE) {
-			eagle_ioctl_dbg("%s: 'just set cache' requested",
-				__func__);
-			just_set_cache = 1;
-		}
-		depd.device &= DTS_EAGLE_FLAG_IOCTL_MASK;
-		tgt = _get_cb_for_dev(depd.device);
-		if (tgt < 0) {
-			eagle_ioctl_err("%s: no cache for device %u found",
-				__func__, depd.device);
-			return -EINVAL;
-		}
-		offset = _c_bl[tgt][CBD_OFFSG] + depd.offset;
-		/* check for integer overflow */
-		if (offset > (UINT_MAX - depd.size))
-			err = -EINVAL;
-		if ((err != 0) || ((offset + depd.size) > _depc_size)) {
-			eagle_ioctl_err("%s: invalid size %u and/or offset %u for parameter (target cache block %i with offset %i, global cache is size %u)",
-				__func__, depd.size, offset, tgt,
-				_c_bl[tgt][CBD_OFFSG], _depc_size);
-			return -EINVAL;
-		}
-		if (copy_from_user((void *)&_depc[offset],
-				   (void *)(((char *)arg)+sizeof(depd)),
-					depd.size)) {
-			eagle_ioctl_err("%s: error copying param to cache (src:%pK, tgt:%pK, size:%u)",
-				__func__, ((char *)arg)+sizeof(depd),
-				&_depc[offset], depd.size);
-			return -EFAULT;
-		}
-		eagle_ioctl_dbg("%s: param info: param = 0x%X, size = %u, offset = %i, device = %u, cache block %i, global offset = %u, first bytes as integer = %i",
-			__func__, depd.id, depd.size, depd.offset,
-			depd.device, tgt, offset, *(int *)&_depc[offset]);
-		if (!just_set_cache) {
-			ret = msm_dts_eagle_handle_adm(&depd, &_depc[offset],
-						       for_pre, false);
-		}
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SET_CACHE_BLOCK: {
-		u32 b_[CBD_COUNT+1], *b = &b_[1], cb;
-
-		eagle_ioctl_info("%s: with control 0x%X (set param cache block)",
-			 __func__, cmd);
-		if (copy_from_user((void *)b_, (void *)arg, sizeof(b_))) {
-			eagle_ioctl_err("%s: error copying cache block data (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, b_, sizeof(b_));
-			return -EFAULT;
-		}
-		cb = b_[0];
-		if (cb >= CB_COUNT) {
-			eagle_ioctl_err("%s: cache block %u out of range (max %u)",
-				__func__, cb, CB_COUNT-1);
-			return -EINVAL;
-		}
-		eagle_ioctl_dbg("%s: cache block %i set: devices 0x%X, global offset %i, offsets 1:%u 2:%u 3:%u, cmds/sizes 0:0x%X %u 1:0x%X %u 2:0x%X %u 3:0x%X %u",
-		__func__, cb, _c_bl[cb][CBD_DEV_MASK], _c_bl[cb][CBD_OFFSG],
-		_c_bl[cb][CBD_OFFS1], _c_bl[cb][CBD_OFFS2],
-		_c_bl[cb][CBD_OFFS3], _c_bl[cb][CBD_CMD0], _c_bl[cb][CBD_SZ0],
-		_c_bl[cb][CBD_CMD1], _c_bl[cb][CBD_SZ1], _c_bl[cb][CBD_CMD2],
-		_c_bl[cb][CBD_SZ2], _c_bl[cb][CBD_CMD3], _c_bl[cb][CBD_SZ3]);
-		if ((b[CBD_OFFSG]+b[CBD_OFFS1]+b[CBD_SZ1]) > _depc_size ||
-			(b[CBD_OFFSG]+b[CBD_OFFS2]+b[CBD_SZ2]) > _depc_size ||
-			(b[CBD_OFFSG]+b[CBD_OFFS3]+b[CBD_SZ3]) > _depc_size) {
-			eagle_ioctl_err("%s: cache block bounds out of range",
-					__func__);
-			return -EINVAL;
-		}
-		memcpy(_c_bl[cb], b, sizeof(_c_bl[cb]));
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE: {
-		u32 data[2];
-
-		eagle_ioctl_dbg("%s: with control 0x%X (set active device)",
-			 __func__, cmd);
-		if (copy_from_user((void *)data, (void *)arg, sizeof(data))) {
-			eagle_ioctl_err("%s: error copying active device data (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, data, sizeof(data));
-			return -EFAULT;
-		}
-		if (data[1] != 0) {
-			_device_primary = data[0];
-			eagle_ioctl_dbg("%s: primary device %i", __func__,
-				 data[0]);
-		} else {
-			_device_all = data[0];
-			eagle_ioctl_dbg("%s: all devices 0x%X", __func__,
-				 data[0]);
-		}
-		break;
-	}
-	case DTS_EAGLE_IOCTL_GET_LICENSE: {
-		u32 target = 0, size = 0;
-		s32 size_only;
-
-		eagle_ioctl_dbg("%s: with control 0x%X (get license)",
-			 __func__, cmd);
-		if (copy_from_user((void *)&target, (void *)arg,
-				   sizeof(target))) {
-			eagle_ioctl_err("%s: error reading license index. (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, &target, sizeof(target));
-			return -EFAULT;
-		}
-		size_only = target & (1<<31) ? 1 : 0;
-		target &= 0x7FFFFFFF;
-		if (target >= SEC_BLOB_MAX_CNT) {
-			eagle_ioctl_err("%s: license index %u out of bounds (max index is %i)",
-				   __func__, target, SEC_BLOB_MAX_CNT);
-			return -EINVAL;
-		}
-		if (_sec_blob[target] == NULL) {
-			eagle_ioctl_err("%s: license index %u never initialized",
-				   __func__, target);
-			return -EINVAL;
-		}
-		size = ((u32 *)_sec_blob[target])[0];
-		if ((size == 0) || (size > SEC_BLOB_MAX_SIZE)) {
-			eagle_ioctl_err("%s: license size %u for index %u invalid (min size is 1, max size is %u)",
-				   __func__, size, target, SEC_BLOB_MAX_SIZE);
-			return -EINVAL;
-		}
-		if (size_only) {
-			eagle_ioctl_dbg("%s: reporting size of license data only",
-					__func__);
-			if (copy_to_user((void *)(((char *)arg)+sizeof(target)),
-				 (void *)&size, sizeof(size))) {
-				eagle_ioctl_err("%s: error copying license size",
-						__func__);
-				return -EFAULT;
-			}
-		} else if (copy_to_user((void *)(((char *)arg)+sizeof(target)),
-			   (void *)&(((s32 *)_sec_blob[target])[1]), size)) {
-			eagle_ioctl_err("%s: error copying license data",
-				__func__);
-			return -EFAULT;
-		} else {
-			eagle_ioctl_info("%s: license file %u bytes long from license index %u returned to user",
-				  __func__, size, target);
-		}
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SET_LICENSE: {
-		u32 target[2] = {0, 0};
-
-		eagle_ioctl_dbg("%s: control 0x%X (set license)", __func__,
-				cmd);
-		if (copy_from_user((void *)target, (void *)arg,
-				   sizeof(target))) {
-			eagle_ioctl_err("%s: error reading license index (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, target, sizeof(target));
-			return -EFAULT;
-		}
-		if (target[0] >= SEC_BLOB_MAX_CNT) {
-			eagle_ioctl_err("%s: license index %u out of bounds (max index is %u)",
-				   __func__, target[0], SEC_BLOB_MAX_CNT-1);
-			return -EINVAL;
-		}
-		if (target[1] == 0) {
-			eagle_ioctl_dbg("%s: request to free license index %u",
-				 __func__, target[0]);
-			kfree(_sec_blob[target[0]]);
-			_sec_blob[target[0]] = NULL;
-			break;
-		}
-		if ((target[1] == 0) || (target[1] >= SEC_BLOB_MAX_SIZE)) {
-			eagle_ioctl_err("%s: license size %u for index %u invalid (min size is 1, max size is %u)",
-				__func__, target[1], target[0],
-				SEC_BLOB_MAX_SIZE);
-			return -EINVAL;
-		}
-		if (_sec_blob[target[0]] != NULL) {
-			if (((u32 *)_sec_blob[target[0]])[1] != target[1]) {
-				eagle_ioctl_dbg("%s: request new size for already allocated license index %u",
-					 __func__, target[0]);
-			}
-			kfree(_sec_blob[target[0]]);
-			_sec_blob[target[0]] = NULL;
-		}
-		eagle_ioctl_dbg("%s: allocating %u bytes for license index %u",
-				__func__, target[1], target[0]);
-		_sec_blob[target[0]] = kzalloc(target[1] + 4, GFP_KERNEL);
-		if (!_sec_blob[target[0]]) {
-			eagle_ioctl_err("%s: error allocating license index %u (kzalloc failed on %u bytes)",
-					__func__, target[0], target[1]);
-			return -ENOMEM;
-		}
-		((u32 *)_sec_blob[target[0]])[0] = target[1];
-		if (copy_from_user(
-				(void *)&(((u32 *)_sec_blob[target[0]])[1]),
-				(void *)(((char *)arg)+sizeof(target)),
-				target[1])) {
-			eagle_ioctl_err("%s: error copying license to index %u, size %u (src:%pK, tgt:%pK, size:%u)",
-					__func__, target[0], target[1],
-					((char *)arg)+sizeof(target),
-					&(((u32 *)_sec_blob[target[0]])[1]),
-					target[1]);
-			return -EFAULT;
-		}
-		eagle_ioctl_info("%s: license file %u bytes long copied to index license index %u",
-				  __func__, target[1], target[0]);
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SEND_LICENSE: {
-		u32 target = 0;
-
-		eagle_ioctl_dbg("%s: control 0x%X (send license)", __func__,
-				cmd);
-		if (copy_from_user((void *)&target, (void *)arg,
-				   sizeof(target))) {
-			eagle_ioctl_err("%s: error reading license index (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, &target, sizeof(target));
-			return -EFAULT;
-		}
-		if (target >= SEC_BLOB_MAX_CNT) {
-			eagle_ioctl_err("%s: license index %u out of bounds (max index is %i)",
-					__func__, target, SEC_BLOB_MAX_CNT-1);
-			return -EINVAL;
-		}
-		if (!_sec_blob[target] ||
-		    ((u32 *)_sec_blob[target])[0] == 0) {
-			eagle_ioctl_err("%s: license index %u is invalid",
-				__func__, target);
-			return -EINVAL;
-		}
-		if (core_dts_eagle_set(((s32 *)_sec_blob[target])[0],
-				(char *)&((s32 *)_sec_blob[target])[1]) < 0)
-			eagle_ioctl_err("%s: core_dts_eagle_set failed with id = %u",
-				__func__, target);
-		else
-			eagle_ioctl_info("%s: core_dts_eagle_set succeeded with id = %u",
-				 __func__, target);
-		break;
-	}
-	case DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS: {
-		s32 spec = 0;
-
-		eagle_ioctl_info("%s: control 0x%X (set volume commands)",
-				__func__, cmd);
-		if (copy_from_user((void *)&spec, (void *)arg,
-					sizeof(spec))) {
-			eagle_ioctl_err("%s: error reading volume command specifier (src:%pK, tgt:%pK, size:%zu)",
-				__func__, (void *)arg, &spec, sizeof(spec));
-			return -EFAULT;
-		}
-		if (spec & 0x80000000) {
-			u32 idx = (spec & 0x0000F000) >> 12;
-			s32 size = spec & 0x00000FFF;
-
-			eagle_ioctl_dbg("%s: setting volume command %i size: %i",
-				__func__, idx, size);
-			if (idx >= _vol_cmd_cnt) {
-				eagle_ioctl_err("%s: volume command index %u out of bounds (only %u allocated)",
-					__func__, idx, _vol_cmd_cnt);
-				return -EINVAL;
-			}
-			if (_volume_cmds_alloc2(idx, size) < 0) {
-				eagle_ioctl_err("%s: error allocating memory for volume controls",
-						__func__);
-				return -ENOMEM;
-			}
-			if (copy_from_user((void *)&_vol_cmds_d[idx],
-					(void *)(((char *)arg) + sizeof(int)),
-					sizeof(struct vol_cmds_d))) {
-				eagle_ioctl_err("%s: error reading volume command descriptor (src:%pK, tgt:%pK, size:%zu)",
-					__func__, ((char *)arg) + sizeof(int),
-					&_vol_cmds_d[idx],
-					sizeof(struct vol_cmds_d));
-				return -EFAULT;
-			}
-			eagle_ioctl_dbg("%s: setting volume command %i spec (size %zu): %i %i %i %i",
-				  __func__, idx, sizeof(struct vol_cmds_d),
-				  _vol_cmds_d[idx].d[0], _vol_cmds_d[idx].d[1],
-				  _vol_cmds_d[idx].d[2], _vol_cmds_d[idx].d[3]);
-			if (copy_from_user((void *)_vol_cmds[idx],
-					(void *)(((char *)arg) + (sizeof(int) +
-					sizeof(struct vol_cmds_d))), size)) {
-				eagle_ioctl_err("%s: error reading volume command string (src:%pK, tgt:%pK, size:%i)",
-					__func__, ((char *)arg) + (sizeof(int) +
-					sizeof(struct vol_cmds_d)),
-					_vol_cmds[idx], size);
-				return -EFAULT;
-			}
-		} else {
-			eagle_ioctl_dbg("%s: setting volume command size",
-					__func__);
-			if (spec < 0 || spec > VOL_CMD_CNT_MAX) {
-				eagle_ioctl_err("%s: volume command count %i out of bounds (min 0, max %i)",
-					__func__, spec, VOL_CMD_CNT_MAX);
-				return -EINVAL;
-			} else if (spec == 0) {
-				eagle_ioctl_dbg("%s: request to free volume commands",
-						__func__);
-				_volume_cmds_free();
-				break;
-			}
-			eagle_ioctl_dbg("%s: setting volume command size requested = %i",
-				  __func__, spec);
-			if (_volume_cmds_alloc1(spec) < 0) {
-				eagle_ioctl_err("%s: error allocating memory for volume controls",
-						__func__);
-				return -ENOMEM;
-			}
-		}
-		break;
-	}
-	default: {
-		eagle_ioctl_err("%s: control 0x%X (invalid control)",
-			 __func__, cmd);
-		ret = -EINVAL;
-	}
-	}
-	return (int)ret;
-}
-
-/**
- * msm_dts_eagle_compat_ioctl() - To handle 32bit to 64bit ioctl compatibility
- * @cmd:	cmd to handle.
- * @arg:	argument to the cmd.
- *
- * Handle DTS Eagle ioctl cmds from 32bit userspace.
- *
- * Return: Return failure if any.
- */
-#ifdef CONFIG_COMPAT
-int msm_dts_eagle_compat_ioctl(unsigned int cmd, unsigned long arg)
-{
-	switch (cmd) {
-	case DTS_EAGLE_IOCTL_GET_CACHE_SIZE32:
-		cmd = DTS_EAGLE_IOCTL_GET_CACHE_SIZE;
-		break;
-	case DTS_EAGLE_IOCTL_SET_CACHE_SIZE32:
-		cmd = DTS_EAGLE_IOCTL_SET_CACHE_SIZE;
-		break;
-	case DTS_EAGLE_IOCTL_GET_PARAM32:
-		cmd = DTS_EAGLE_IOCTL_GET_PARAM;
-		break;
-	case DTS_EAGLE_IOCTL_SET_PARAM32:
-		cmd = DTS_EAGLE_IOCTL_SET_PARAM;
-		break;
-	case DTS_EAGLE_IOCTL_SET_CACHE_BLOCK32:
-		cmd = DTS_EAGLE_IOCTL_SET_CACHE_BLOCK;
-		break;
-	case DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE32:
-		cmd = DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE;
-		break;
-	case DTS_EAGLE_IOCTL_GET_LICENSE32:
-		cmd = DTS_EAGLE_IOCTL_GET_LICENSE;
-		break;
-	case DTS_EAGLE_IOCTL_SET_LICENSE32:
-		cmd = DTS_EAGLE_IOCTL_SET_LICENSE;
-		break;
-	case DTS_EAGLE_IOCTL_SEND_LICENSE32:
-		cmd = DTS_EAGLE_IOCTL_SEND_LICENSE;
-		break;
-	case DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS32:
-		cmd = DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS;
-		break;
-	default:
-		break;
-	}
-	return msm_dts_eagle_ioctl(cmd, arg);
-}
-#endif
-/**
- * msm_dts_eagle_init_pre() - Initialize DTS premix module
- * @ac:	Initialize premix module in the ASM session.
- *
- * Initialize DTS premix module on provided ASM session
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_init_pre(struct audio_client *ac)
-{
-	return msm_dts_eagle_enable_asm(ac, _is_hpx_enabled,
-				 AUDPROC_MODULE_ID_DTS_HPX_PREMIX);
-}
-
-/**
- * msm_dts_eagle_deinit_pre() - Deinitialize DTS premix module
- * @ac:	Deinitialize premix module in the ASM session.
- *
- * Deinitialize DTS premix module on provided ASM session
- *
- * Return: Currently does nothing so 0.
- */
-int msm_dts_eagle_deinit_pre(struct audio_client *ac)
-{
-	return 0;
-}
-
-/**
- * msm_dts_eagle_init_post() - Initialize DTS postmix module
- * @port_id:	Port id for the ADM session.
- * @copp_idx:	Copp idx for the ADM session.
- *
- * Initialize DTS postmix module on ADM session
- *
- * Return: Return failure if any.
- */
-int msm_dts_eagle_init_post(int port_id, int copp_idx)
-{
-	return msm_dts_eagle_enable_adm(port_id, copp_idx, _is_hpx_enabled);
-}
-
-/**
- * msm_dts_eagle_deinit_post() - Deinitialize DTS postmix module
- * @port_id:	Port id for the ADM session.
- * @topology:	Topology in use.
- *
- * Deinitialize DTS postmix module on ADM session
- *
- * Return: Currently does nothing so 0.
- */
-int msm_dts_eagle_deinit_post(int port_id, int topology)
-{
-	return 0;
-}
-
-/**
- * msm_dts_eagle_init_master_module() - Initialize both DTS modules
- * @ac:	Initialize modules in the ASM session.
- *
- * Initialize DTS modules on ASM session
- *
- * Return: Success.
- */
-int msm_dts_eagle_init_master_module(struct audio_client *ac)
-{
-	_set_audioclient(ac);
-	msm_dts_eagle_enable_asm(ac, _is_hpx_enabled,
-				 AUDPROC_MODULE_ID_DTS_HPX_PREMIX);
-	msm_dts_eagle_enable_asm(ac, _is_hpx_enabled,
-				 AUDPROC_MODULE_ID_DTS_HPX_POSTMIX);
-	return 0;
-}
-
-/**
- * msm_dts_eagle_deinit_master_module() - Deinitialize both DTS modules
- * @ac:	Deinitialize modules in the ASM session.
- *
- * Deinitialize DTS modules on ASM session
- *
- * Return: Success.
- */
-int msm_dts_eagle_deinit_master_module(struct audio_client *ac)
-{
-	msm_dts_eagle_deinit_pre(ac);
-	msm_dts_eagle_deinit_post(-1, 0);
-	_clear_audioclient();
-	return 0;
-}
-
-/**
- * msm_dts_eagle_is_hpx_on() - Check if HPX effects are On
- *
- * Check if HPX effects are On
- *
- * Return: On/Off.
- */
-int msm_dts_eagle_is_hpx_on(void)
-{
-	return _is_hpx_enabled;
-}
-
-/**
- * msm_dts_eagle_pcm_new() - Create hwdep node
- * @runtime:	snd_soc_pcm_runtime structure.
- *
- * Create hwdep node
- *
- * Return: Success.
- */
-int msm_dts_eagle_pcm_new(struct snd_soc_pcm_runtime *runtime)
-{
-	if (!_ref_cnt++) {
-		_init_cb_descs();
-		_reg_ion_mem();
-	}
-	return 0;
-}
-
-/**
- * msm_dts_eagle_pcm_free() - remove hwdep node
- * @runtime:	snd_soc_pcm_runtime structure.
- *
- * Remove hwdep node
- *
- * Return: void.
- */
-void msm_dts_eagle_pcm_free(struct snd_pcm *pcm)
-{
-	if (!--_ref_cnt)
-		_unreg_ion_mem();
-	vfree(_depc);
-}
-
-MODULE_DESCRIPTION("DTS EAGLE platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c b/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
index 5c6f1df..159f44c 100644
--- a/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dts-srs-tm-config.c
@@ -20,7 +20,6 @@
 #include <sound/control.h>
 #include <sound/q6adm-v2.h>
 #include <sound/asound.h>
-#include <sound/msm-dts-eagle.h>
 #include "msm-dts-srs-tm-config.h"
 #include "msm-pcm-routing-v2.h"
 
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 421769e..3a6cbe6 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1313,7 +1313,7 @@
 		}
 
 		size = sizeof(*user) + userarg32.payload_size;
-		user = kmalloc(size, GFP_KERNEL);
+		user = kzalloc(size, GFP_KERNEL);
 		if (!user) {
 			dev_err(rtd->dev,
 				"%s: Allocation failed event status size %d\n",
@@ -1335,7 +1335,7 @@
 			err = -EFAULT;
 		}
 		if (!err) {
-			user32 = kmalloc(size, GFP_KERNEL);
+			user32 = kzalloc(size, GFP_KERNEL);
 			if (!user32) {
 				dev_err(rtd->dev,
 					"%s: Allocation event user status size %d\n",
@@ -1380,7 +1380,7 @@
 		}
 
 		size = sizeof(*user) + userarg32.payload_size;
-		user = kmalloc(size, GFP_KERNEL);
+		user = kzalloc(size, GFP_KERNEL);
 		if (!user) {
 			dev_err(rtd->dev,
 				"%s: Allocation failed event status size %d\n",
@@ -1400,7 +1400,7 @@
 			err = -EFAULT;
 		}
 		if (!err) {
-			user32 = kmalloc(size, GFP_KERNEL);
+			user32 = kzalloc(size, GFP_KERNEL);
 			if (!user32) {
 				dev_err(rtd->dev,
 					"%s: Allocation event user status size %d\n",
@@ -1810,7 +1810,7 @@
 
 		size = sizeof(struct snd_lsm_event_status) +
 		userarg.payload_size;
-		user = kmalloc(size, GFP_KERNEL);
+		user = kzalloc(size, GFP_KERNEL);
 		if (!user) {
 			dev_err(rtd->dev,
 				"%s: Allocation failed event status size %d\n",
@@ -1871,7 +1871,7 @@
 
 		size = sizeof(struct snd_lsm_event_status_v3) +
 			userarg.payload_size;
-		user = kmalloc(size, GFP_KERNEL);
+		user = kzalloc(size, GFP_KERNEL);
 		if (!user) {
 			dev_err(rtd->dev,
 				"%s: Allocation failed event status size %d\n",
@@ -2243,21 +2243,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
-	sample_rate = ucontrol->value.integer.value[2];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.sample_rate = ucontrol->value.integer.value[2];
 
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -2270,28 +2267,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
index f668e95..7ef1ca8 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c
@@ -559,21 +559,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -586,28 +583,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
@@ -618,21 +612,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -645,28 +636,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 9b7c6fb..325d642 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -842,26 +842,21 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
-
-	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
-	if (ret < 0)
-		pr_err("%s: msm_pcm_playback_app_type_cfg_ctl_put failed, err %d\n",
-			__func__, ret);
-
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
 	return ret;
 }
 
@@ -870,29 +865,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
-		pr_err("%s: msm_pcm_playback_app_type_cfg_ctl_get failed, err: %d\n",
-			__func__, ret);
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
-
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
@@ -903,26 +894,21 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
-
-	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
-	if (ret < 0)
-		pr_err("%s: msm_pcm_capture_app_type_cfg_ctl_put failed, err: %d\n",
-			__func__, ret);
-
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
+	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
+						      be_id, &cfg_data);
+	if (ret < 0)
+		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
 
 	return ret;
 }
@@ -932,28 +918,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
-		pr_err("%s: msm_pcm_capture_app_type_cfg_ctl_get failed, err: %d\n",
-			__func__, ret);
+		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+		       __func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 1799d0d..74e99d3 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -30,6 +30,7 @@
 #include <asm/dma.h>
 #include <linux/dma-mapping.h>
 #include <linux/msm_audio_ion.h>
+#include <linux/msm_audio.h>
 
 #include <linux/of_device.h>
 #include <sound/tlv.h>
@@ -37,6 +38,7 @@
 
 #include "msm-pcm-q6-v2.h"
 #include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
 
 enum stream_state {
 	IDLE = 0,
@@ -148,6 +150,8 @@
 	uint32_t idx = 0;
 	uint32_t size = 0;
 	uint8_t buf_index;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
 
 	switch (opcode) {
 	case ASM_DATA_EVENT_WRITE_DONE_V2: {
@@ -224,6 +228,29 @@
 		}
 		break;
 	}
+	case ASM_STREAM_PP_EVENT:
+	case ASM_STREAM_CMD_ENCDEC_EVENTS: {
+		pr_debug("%s: ASM_STREAM_EVENT (0x%x)\n", __func__, opcode);
+		if (!substream) {
+			pr_err("%s: substream is NULL.\n", __func__);
+			return;
+		}
+
+		rtd = substream->private_data;
+		if (!rtd) {
+			pr_err("%s: rtd is NULL\n", __func__);
+			return;
+		}
+
+		ret = msm_adsp_inform_mixer_ctl(rtd, payload);
+		if (ret) {
+			pr_err("%s: failed to inform mixer ctl. err = %d\n",
+				__func__, ret);
+			return;
+		}
+
+		break;
+	}
 	case APR_BASIC_RSP_RESULT: {
 		switch (payload[0]) {
 		case ASM_SESSION_CMD_RUN_V2:
@@ -253,6 +280,10 @@
 			}
 			atomic_set(&prtd->start, 1);
 			break;
+		case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+			pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS:",
+				__func__);
+			break;
 		default:
 			pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
 				__func__, payload[0], payload[1]);
@@ -661,6 +692,7 @@
 	prtd->set_channel_map = false;
 	prtd->reset_event = false;
 	runtime->private_data = prtd;
+	msm_adsp_init_mixer_ctl_pp_event_queue(soc_prtd);
 
 	return 0;
 }
@@ -803,6 +835,7 @@
 	}
 	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
 						SNDRV_PCM_STREAM_PLAYBACK);
+	msm_adsp_clean_mixer_ctl_pp_event_queue(soc_prtd);
 	kfree(prtd);
 	runtime->private_data = NULL;
 
@@ -1036,6 +1069,182 @@
 	.mmap		= msm_pcm_mmap,
 };
 
+static int msm_pcm_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *pcm = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_platform *platform = snd_soc_component_to_platform(pcm);
+	struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+	struct snd_pcm_substream *substream;
+	struct msm_audio *prtd;
+	int ret = 0;
+	struct msm_adsp_event_data *event_data = NULL;
+
+	if (!pdata) {
+		pr_err("%s pdata is NULL\n", __func__);
+		ret = -ENODEV;
+		goto done;
+	}
+
+	substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	if (!substream) {
+		pr_err("%s substream not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (!substream->runtime) {
+		pr_err("%s substream runtime not found\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = substream->runtime->private_data;
+	if (prtd->audio_client == NULL) {
+		pr_err("%s prtd is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)ucontrol->value.bytes.data;
+	if ((event_data->event_type < ADSP_STREAM_PP_EVENT) ||
+	    (event_data->event_type >= ADSP_STREAM_EVENT_MAX)) {
+		pr_err("%s: invalid event_type=%d",
+			__func__, event_data->event_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >=
+					sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_stream_cmd(prtd->audio_client, event_data);
+	if (ret < 0)
+		pr_err("%s: failed to send stream event cmd, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_audio_adsp_stream_cmd_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CMD;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_audio_adsp_stream_cmd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_pcm_adsp_stream_cmd_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s rtd is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_stream_cmd_config_control[0].name = mixer_str;
+	fe_audio_adsp_stream_cmd_config_control[0].private_value =
+		rtd->dai_link->id;
+	pr_debug("Registering new mixer ctl %s\n", mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+		fe_audio_adsp_stream_cmd_config_control,
+		ARRAY_SIZE(fe_audio_adsp_stream_cmd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_pcm_add_audio_adsp_stream_callback_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol *kctl;
+
+	struct snd_kcontrol_new fe_audio_adsp_callback_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_callback_info,
+		.get = msm_adsp_stream_callback_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	pr_debug("%s: added new pcm FE with name %s, id %d, cpu dai %s, device no %d\n",
+		 __func__, rtd->dai_link->name, rtd->dai_link->id,
+		 rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_audio_adsp_callback_config_control[0].name = mixer_str;
+	fe_audio_adsp_callback_config_control[0].private_value =
+		rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+			fe_audio_adsp_callback_config_control,
+			ARRAY_SIZE(fe_audio_adsp_callback_config_control));
+	if (ret < 0) {
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl->private_data = NULL;
+
+free_mixer_str:
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
 static int msm_pcm_set_volume(struct msm_audio *prtd, uint32_t volume)
 {
 	int rc = 0;
@@ -1347,21 +1556,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -1374,28 +1580,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_RX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
@@ -1406,21 +1609,18 @@
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
 	int be_id = ucontrol->value.integer.value[3];
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0, 0, 48000};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate = 48000;
 
-	app_type = ucontrol->value.integer.value[0];
-	acdb_dev_id = ucontrol->value.integer.value[1];
+	cfg_data.app_type = ucontrol->value.integer.value[0];
+	cfg_data.acdb_dev_id = ucontrol->value.integer.value[1];
 	if (ucontrol->value.integer.value[2] != 0)
-		sample_rate = ucontrol->value.integer.value[2];
+		cfg_data.sample_rate = ucontrol->value.integer.value[2];
 	pr_debug("%s: fe_id- %llu session_type- %d be_id- %d app_type- %d acdb_dev_id- %d sample_rate- %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 	ret = msm_pcm_routing_reg_stream_app_type_cfg(fe_id, session_type,
-						      be_id, app_type,
-						      acdb_dev_id, sample_rate);
+						      be_id, &cfg_data);
 	if (ret < 0)
 		pr_err("%s: msm_pcm_routing_reg_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
@@ -1433,28 +1633,25 @@
 {
 	u64 fe_id = kcontrol->private_value;
 	int session_type = SESSION_TYPE_TX;
-	int be_id = ucontrol->value.integer.value[3];
+	int be_id = 0;
+	struct msm_pcm_stream_app_type_cfg cfg_data = {0};
 	int ret = 0;
-	int app_type;
-	int acdb_dev_id;
-	int sample_rate;
 
 	ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, session_type,
-						      be_id, &app_type,
-						      &acdb_dev_id,
-						      &sample_rate);
+						      &be_id, &cfg_data);
 	if (ret < 0) {
 		pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
 			__func__, ret);
 		goto done;
 	}
 
-	ucontrol->value.integer.value[0] = app_type;
-	ucontrol->value.integer.value[1] = acdb_dev_id;
-	ucontrol->value.integer.value[2] = sample_rate;
+	ucontrol->value.integer.value[0] = cfg_data.app_type;
+	ucontrol->value.integer.value[1] = cfg_data.acdb_dev_id;
+	ucontrol->value.integer.value[2] = cfg_data.sample_rate;
+	ucontrol->value.integer.value[3] = be_id;
 	pr_debug("%s: fedai_id %llu, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fe_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data.app_type, cfg_data.acdb_dev_id, cfg_data.sample_rate);
 done:
 	return ret;
 }
@@ -1550,6 +1747,16 @@
 		pr_err("%s: Could not add pcm Compress Control %d\n",
 			__func__, ret);
 
+	ret = msm_pcm_add_audio_adsp_stream_cmd_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm ADSP Stream Cmd Control\n",
+			__func__);
+
+	ret = msm_pcm_add_audio_adsp_stream_callback_control(rtd);
+	if (ret)
+		pr_err("%s: Could not add pcm ADSP Stream Callback Control\n",
+			__func__);
+
 	return ret;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
index 1dfbd7a..7335951 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-devdep.c
@@ -16,8 +16,6 @@
 #include <linux/module.h>
 #include <sound/hwdep.h>
 #include <sound/devdep_params.h>
-#include <sound/msm-dts-eagle.h>
-
 #include "msm-pcm-routing-devdep.h"
 #include "msm-ds2-dap-config.h"
 
@@ -56,23 +54,6 @@
 	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER:
 		ret = msm_ds2_dap_ioctl(hw, file, cmd, argp);
 		break;
-	case DTS_EAGLE_IOCTL_GET_CACHE_SIZE:
-	case DTS_EAGLE_IOCTL_SET_CACHE_SIZE:
-	case DTS_EAGLE_IOCTL_GET_PARAM:
-	case DTS_EAGLE_IOCTL_SET_PARAM:
-	case DTS_EAGLE_IOCTL_SET_CACHE_BLOCK:
-	case DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE:
-	case DTS_EAGLE_IOCTL_GET_LICENSE:
-	case DTS_EAGLE_IOCTL_SET_LICENSE:
-	case DTS_EAGLE_IOCTL_SEND_LICENSE:
-	case DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS:
-		ret = msm_dts_eagle_ioctl(cmd, arg);
-		if (ret == -EPERM) {
-			pr_err("%s called with invalid control 0x%X\n",
-				__func__, cmd);
-			ret = -EINVAL;
-		}
-		break;
 	default:
 		pr_err("%s called with invalid control 0x%X\n", __func__, cmd);
 		ret = -EINVAL;
@@ -84,7 +65,6 @@
 void msm_pcm_routing_hwdep_free(struct snd_pcm *pcm)
 {
 	pr_debug("%s\n", __func__);
-	msm_dts_eagle_pcm_free(pcm);
 }
 
 #ifdef CONFIG_COMPAT
@@ -109,23 +89,6 @@
 	case SNDRV_DEVDEP_DAP_IOCTL_GET_VISUALIZER32:
 		ret = msm_ds2_dap_compat_ioctl(hw, file, cmd, argp);
 		break;
-	case DTS_EAGLE_IOCTL_GET_CACHE_SIZE32:
-	case DTS_EAGLE_IOCTL_SET_CACHE_SIZE32:
-	case DTS_EAGLE_IOCTL_GET_PARAM32:
-	case DTS_EAGLE_IOCTL_SET_PARAM32:
-	case DTS_EAGLE_IOCTL_SET_CACHE_BLOCK32:
-	case DTS_EAGLE_IOCTL_SET_ACTIVE_DEVICE32:
-	case DTS_EAGLE_IOCTL_GET_LICENSE32:
-	case DTS_EAGLE_IOCTL_SET_LICENSE32:
-	case DTS_EAGLE_IOCTL_SEND_LICENSE32:
-	case DTS_EAGLE_IOCTL_SET_VOLUME_COMMANDS32:
-		ret = msm_dts_eagle_compat_ioctl(cmd, arg);
-		if (ret == -EPERM) {
-			pr_err("%s called with invalid control 0x%X\n",
-				__func__, cmd);
-			ret = -EINVAL;
-		}
-		break;
 	default:
 		pr_err("%s called with invalid control 0x%X\n", __func__, cmd);
 		ret = -EINVAL;
@@ -171,6 +134,6 @@
 #ifdef CONFIG_COMPAT
 	hwdep->ops.ioctl_compat = msm_pcm_routing_hwdep_compat_ioctl;
 #endif
-	return msm_dts_eagle_pcm_new(runtime);
+	return rc;
 }
 #endif
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 465634b..019cbae 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -33,7 +33,6 @@
 #include <sound/pcm_params.h>
 #include <sound/q6core.h>
 #include <sound/audio_cal_utils.h>
-#include <sound/msm-dts-eagle.h>
 #include <sound/audio_effects.h>
 #include <sound/hwdep.h>
 
@@ -80,6 +79,13 @@
 static int msm_route_ext_ec_ref;
 static bool is_custom_stereo_on;
 static bool is_ds2_on;
+static bool swap_ch;
+
+#define WEIGHT_0_DB 0x4000
+/* all the FEs which can support channel mixer */
+static struct msm_pcm_channel_mixer channel_mixer[MSM_FRONTEND_DAI_MM_SIZE];
+/* input BE for each FE */
+static int channel_input[MSM_FRONTEND_DAI_MM_SIZE][ADM_MAX_CHANNELS];
 
 enum {
 	MADNONE,
@@ -136,7 +142,8 @@
 };
 static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX];
 
-static int msm_routing_send_device_pp_params(int port_id,  int copp_idx);
+static int msm_routing_send_device_pp_params(int port_id,  int copp_idx,
+					     int fe_id);
 
 static int msm_routing_get_bit_width(unsigned int format)
 {
@@ -208,10 +215,6 @@
 					__func__, topology, port_id, rc);
 		}
 		break;
-	case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX:
-		pr_debug("%s: DTS_EAGLE_COPP_TOPOLOGY_ID\n", __func__);
-		msm_dts_eagle_init_post(port_id, copp_idx);
-		break;
 	case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_AUDIOSPHERE:
 		pr_debug("%s: TOPOLOGY_ID_AUDIOSPHERE\n", __func__);
 		rc = msm_qti_pp_asphere_init(port_id, copp_idx);
@@ -246,10 +249,6 @@
 			msm_dolby_dap_deinit(port_id);
 		}
 		break;
-	case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX:
-		pr_debug("%s: DTS_EAGLE_COPP_TOPOLOGY_ID\n", __func__);
-		msm_dts_eagle_deinit_post(port_id, topology);
-		break;
 	case ADM_CMD_COPP_OPEN_TOPOLOGY_ID_AUDIOSPHERE:
 		pr_debug("%s: TOPOLOGY_ID_AUDIOSPHERE\n", __func__);
 		msm_qti_pp_asphere_deinit(port_id);
@@ -291,253 +290,256 @@
 
 #define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
 struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
-	{ PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
-	{ PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
-	{ SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
-	{ SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
-	{ HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
-	{ INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
-	{ INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
-	{ INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
-	{ INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
-	{ RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_PRI_I2S_RX},
+	{ PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_PRI_I2S_TX},
+	{ SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_0_RX},
+	{ SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_0_TX},
+	{ HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_HDMI},
+	{ INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_BT_SCO_RX},
+	{ INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_BT_SCO_TX},
+	{ INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_FM_RX},
+	{ INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_INT_FM_TX},
+	{ RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_AFE_PCM_RX},
-	{ RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_AFE_PCM_TX},
-	{ AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_AUXPCM_RX},
-	{ AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_AUXPCM_TX},
-	{ VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_VOICE_PLAYBACK_TX},
-	{ VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_VOICE2_PLAYBACK_TX},
-	{ VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INCALL_RECORD_RX},
-	{ VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INCALL_RECORD_TX},
-	{ MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
-	{ MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
-	{ SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
-	{ SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
-	{ SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
-	{ SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
-	{ SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_TX},
-	{ SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
-	{ SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
-	{ SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
-	{ SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
-	{ SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
-	{ SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
-	{ SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
-	{ SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
-	{ SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
-	{ SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
-	{ SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
-	{ SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
-	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
-	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
-	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
-	{ AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_MI2S_RX},
+	{ MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_MI2S_TX},
+	{ SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SEC_I2S_RX},
+	{ SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_1_RX},
+	{ SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_1_TX},
+	{ SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_2_RX},
+	{ SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_2_TX},
+	{ SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_3_RX},
+	{ SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_3_TX},
+	{ SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_4_RX},
+	{ SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_4_TX},
+	{ SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_5_RX},
+	{ SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_5_TX},
+	{ SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_6_RX},
+	{ SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_6_TX},
+	{ SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_7_RX},
+	{ SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_7_TX},
+	{ SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_8_RX},
+	{ SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_SLIMBUS_8_TX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_RX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_TX},
+	{ SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, {0}, LPASS_BE_STUB_1_TX},
+	{ AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_MI2S_RX},
-	{ AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_MI2S_TX},
-	{ AFE_PORT_ID_SECONDARY_MI2S_RX,  0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_MI2S_RX,  0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_MI2S_RX},
-	{ AFE_PORT_ID_SECONDARY_MI2S_TX,  0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_MI2S_TX,  0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_MI2S_TX},
-	{ AFE_PORT_ID_PRIMARY_MI2S_RX,    0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_MI2S_RX,    0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_MI2S_RX},
-	{ AFE_PORT_ID_PRIMARY_MI2S_TX,    0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_MI2S_TX,    0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_MI2S_TX},
-	{ AFE_PORT_ID_TERTIARY_MI2S_RX,   0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_MI2S_RX,   0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_MI2S_RX},
-	{ AFE_PORT_ID_TERTIARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_MI2S_TX},
-	{ AUDIO_PORT_ID_I2S_RX,           0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AUDIO_PORT_ID_I2S_RX,           0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_AUDIO_I2S_RX},
-	{ AFE_PORT_ID_SECONDARY_PCM_RX,	  0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_PCM_RX,	  0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_AUXPCM_RX},
-	{ AFE_PORT_ID_SECONDARY_PCM_TX,   0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_PCM_TX,   0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_AUXPCM_TX},
-	{ AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
-	{ AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_SPDIF_RX},
+	{ AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_MI2S_RX_SD1},
-	{ AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUIN_MI2S_RX},
-	{ AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUIN_MI2S_TX},
-	{ AFE_PORT_ID_SENARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SENARY_MI2S_TX,   0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SENARY_MI2S_TX},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_0},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_0},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_1},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_1},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_2},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_2},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_3},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_3},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_4},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_4},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_5},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_5},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_6},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_6},
-	{ AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_RX_7},
-	{ AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_PRI_TDM_TX_7},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_0},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_0},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_1},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_1},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_2},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_2},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_3},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_3},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_4},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_4},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_5},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_5},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_6},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_6},
-	{ AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_RX_7},
-	{ AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_SEC_TDM_TX_7},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_0},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_0},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_1},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_1},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_2},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_2},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_3},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_3},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_4},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_4},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_5},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_5},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_6},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_6},
-	{ AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_RX_7},
-	{ AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_TDM_TX_7},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_0},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_0},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_1},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_1},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_2},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_2},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_3},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_3},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_4},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_4},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_5},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_5},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_6},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_6},
-	{ AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_RX_7},
-	{ AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_TDM_TX_7},
-	{ INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
-	{ AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_INT_BT_A2DP_RX},
+	{ AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_USB_AUDIO_RX},
-	{ AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_USB_AUDIO_TX},
-	{ DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
-	{ AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
+	  LPASS_BE_DISPLAY_PORT},
+	{ AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_AUXPCM_RX},
-	{ AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_TERT_AUXPCM_TX},
-	{ AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_AUXPCM_RX},
-	{ AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_QUAT_AUXPCM_TX},
-	{ AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT0_MI2S_RX},
-	{ AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT0_MI2S_TX},
-	{ AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT1_MI2S_RX},
-	{ AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT1_MI2S_TX},
-	{ AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT2_MI2S_RX},
-	{ AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT2_MI2S_TX},
-	{ AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT3_MI2S_RX},
-	{ AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT3_MI2S_TX},
-	{ AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT4_MI2S_RX},
-	{ AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT4_MI2S_TX},
-	{ AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT5_MI2S_RX},
-	{ AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT5_MI2S_TX},
-	{ AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT6_MI2S_RX},
-	{ AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+	{ AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, {0},
 	  LPASS_BE_INT6_MI2S_TX},
 };
 
@@ -603,6 +605,9 @@
 	/* MULTIMEDIA19 */
 	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
 	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+	/* MULTIMEDIA20 */
+	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
 	/* CS_VOICE */
 	{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
 	 {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
@@ -678,6 +683,8 @@
 static struct msm_pcm_stream_app_type_cfg
 	fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MAX][2][MSM_BACKEND_DAI_MAX];
 
+static int last_be_id_configured[MSM_FRONTEND_DAI_MAX][MAX_SESSION_TYPES];
+
 /* The caller of this should aqcuire routing lock */
 void msm_pcm_routing_get_bedai_info(int be_idx,
 				    struct msm_pcm_routing_bdai_data *be_dai)
@@ -744,15 +751,22 @@
 	return rc;
 }
 
-int msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int session_type,
-					    int be_id, int app_type,
-					    int acdb_dev_id, int sample_rate)
+int msm_pcm_routing_reg_stream_app_type_cfg(
+	int fedai_id, int session_type, int be_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data)
 {
 	int ret = 0;
 
+	if (cfg_data == NULL) {
+		pr_err("%s: Received NULL pointer for cfg_data\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
 	pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
 		__func__, fedai_id, session_type, be_id,
-		app_type, acdb_dev_id, sample_rate);
+		cfg_data->app_type, cfg_data->acdb_dev_id,
+		cfg_data->sample_rate);
 
 	if (!is_mm_lsm_fe_id(fedai_id)) {
 		pr_err("%s: Invalid machine driver ID %d\n",
@@ -774,15 +788,18 @@
 		goto done;
 	}
 
-	fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type = app_type;
-	fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id =
-		acdb_dev_id;
-	fe_dai_app_type_cfg[fedai_id][session_type][be_id].sample_rate =
-		sample_rate;
+	fe_dai_app_type_cfg[fedai_id][session_type][be_id] = *cfg_data;
+
+	/*
+	 * Store the BE ID of the configuration information set as the latest so
+	 * the get mixer control knows what to return.
+	 */
+	last_be_id_configured[fedai_id][session_type] = be_id;
 
 done:
 	return ret;
 }
+EXPORT_SYMBOL(msm_pcm_routing_reg_stream_app_type_cfg);
 
 /**
  * msm_pcm_routing_get_stream_app_type_cfg
@@ -794,55 +811,48 @@
  * fedai_id - Passed value, front end ID for which app type config is wanted
  * session_type - Passed value, session type for which app type config
  *                is wanted
- * be_id - Passed value, back end device id for which app type config is wanted
- * app_type - Returned value, app type used by app type config
- * acdb_dev_id - Returned value, ACDB device ID used by app type config
- * sample_rate - Returned value, sample rate used by app type config
+ * be_id - Returned value, back end device id the app type config data is for
+ * cfg_data - Returned value, configuration data used by app type config
  */
-int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
-					    int be_id, int *app_type,
-					    int *acdb_dev_id, int *sample_rate)
+int msm_pcm_routing_get_stream_app_type_cfg(
+	int fedai_id, int session_type, int *bedai_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data)
 {
+	int be_id;
 	int ret = 0;
 
-	if (app_type == NULL) {
-		pr_err("%s: NULL pointer sent for app_type\n", __func__);
+	if (bedai_id == NULL) {
+		pr_err("%s: Received NULL pointer for backend ID\n", __func__);
 		ret = -EINVAL;
 		goto done;
-	} else if (acdb_dev_id == NULL) {
-		pr_err("%s: NULL pointer sent for acdb_dev_id\n", __func__);
-		ret = -EINVAL;
-		goto done;
-	} else if (sample_rate == NULL) {
-		pr_err("%s: NULL pointer sent for sample rate\n", __func__);
+	} else if (cfg_data == NULL) {
+		pr_err("%s: NULL pointer sent for cfg_data\n", __func__);
 		ret = -EINVAL;
 		goto done;
 	} else if (!is_mm_lsm_fe_id(fedai_id)) {
-		pr_err("%s: Invalid FE ID %d\n",
-			__func__, fedai_id);
+		pr_err("%s: Invalid FE ID %d\n", __func__, fedai_id);
 		ret = -EINVAL;
 		goto done;
 	} else if (session_type != SESSION_TYPE_RX &&
 		   session_type != SESSION_TYPE_TX) {
-		pr_err("%s: Invalid session type %d\n",
-			__func__, session_type);
+		pr_err("%s: Invalid session type %d\n", __func__, session_type);
 		ret = -EINVAL;
 		goto done;
-	} else if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
-		pr_err("%s: Received out of bounds be_id %d\n",
-			__func__, be_id);
-		return -EINVAL;
 	}
 
-	*app_type = fe_dai_app_type_cfg[fedai_id][session_type][be_id].app_type;
-	*acdb_dev_id =
-		fe_dai_app_type_cfg[fedai_id][session_type][be_id].acdb_dev_id;
-	*sample_rate =
-		fe_dai_app_type_cfg[fedai_id][session_type][be_id].sample_rate;
+	be_id = last_be_id_configured[fedai_id][session_type];
+	if (be_id < 0 || be_id >= MSM_BACKEND_DAI_MAX) {
+		pr_err("%s: Invalid BE ID %d\n", __func__, be_id);
+		ret = -EINVAL;
+		goto done;
+	}
 
+	*bedai_id = be_id;
+	*cfg_data = fe_dai_app_type_cfg[fedai_id][session_type][be_id];
 	pr_debug("%s: fedai_id %d, session_type %d, be_id %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
-		__func__, fedai_id, session_type, be_id,
-		*app_type, *acdb_dev_id, *sample_rate);
+		__func__, fedai_id, session_type, *bedai_id,
+		cfg_data->app_type, cfg_data->acdb_dev_id,
+		cfg_data->sample_rate);
 done:
 	return ret;
 }
@@ -1083,7 +1093,10 @@
 		port_type = MSM_AFE_PORT_TYPE_RX;
 	} else if (stream_type == SNDRV_PCM_STREAM_CAPTURE) {
 		session_type = SESSION_TYPE_TX;
-		path_type = ADM_PATH_LIVE_REC;
+		if (passthr_mode != LEGACY_PCM)
+			path_type = ADM_PATH_COMPRESSED_TX;
+		else
+			path_type = ADM_PATH_LIVE_REC;
 		port_type = MSM_AFE_PORT_TYPE_TX;
 	} else {
 		pr_err("%s: invalid stream type %d\n", __func__, stream_type);
@@ -1100,7 +1113,7 @@
 	msm_qti_pp_send_eq_values(fe_id);
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
 		if (test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))
-			msm_bedais[i].passthr_mode = passthr_mode;
+			msm_bedais[i].passthr_mode[fe_id] = passthr_mode;
 
 		if (!is_be_dai_extproc(i) &&
 			(afe_get_port_type(msm_bedais[i].port_id) ==
@@ -1204,7 +1217,7 @@
 			    COMPRESSED_PASSTHROUGH_GEN) {
 				msm_routing_send_device_pp_params(
 				msm_bedais[i].port_id,
-				copp_idx);
+				copp_idx, fe_id);
 			}
 		}
 	}
@@ -1255,6 +1268,62 @@
 	return session_id;
 }
 
+static int msm_pcm_routing_channel_mixer(int fe_id, bool perf_mode,
+				int dspst_id, int stream_type)
+{
+	int copp_idx = 0;
+	int sess_type = 0;
+	int i = 0, j = 0, be_id;
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return 0;
+	}
+
+	if (!(channel_mixer[fe_id].enable)) {
+		pr_debug("%s: channel mixer not enabled for FE %d\n",
+			__func__, fe_id);
+		return 0;
+	}
+
+	if (stream_type == SNDRV_PCM_STREAM_PLAYBACK)
+		sess_type = SESSION_TYPE_RX;
+	else
+		sess_type = SESSION_TYPE_TX;
+
+	for (i = 0; i < ADM_MAX_CHANNELS && channel_input[fe_id][i] > 0;
+		++i) {
+		be_id = channel_input[fe_id][i] - 1;
+		channel_mixer[fe_id].input_channels[i] =
+						msm_bedais[be_id].channel;
+
+		if ((msm_bedais[be_id].active) &&
+			test_bit(fe_id,
+			&msm_bedais[be_id].fe_sessions[0])) {
+			unsigned long copp =
+				session_copp_map[fe_id][sess_type][be_id];
+			for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
+				if (test_bit(j, &copp)) {
+					copp_idx = j;
+					break;
+				}
+			}
+
+			pr_debug("%s: fe %d, be %d, channel %d, copp %d\n",
+				__func__,
+				fe_id, be_id, msm_bedais[be_id].channel,
+				copp_idx);
+			ret = adm_programable_channel_mixer(
+					msm_bedais[be_id].port_id,
+					copp_idx, dspst_id, sess_type,
+					channel_mixer + fe_id, i);
+		}
+	}
+
+	return ret;
+}
+
 int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
 					int dspst_id, int stream_type)
 {
@@ -1263,6 +1332,7 @@
 	u32 channels, sample_rate;
 	uint16_t bits_per_sample = 16;
 	uint32_t passthr_mode = LEGACY_PCM;
+	int ret = 0;
 
 	if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
 		/* bad ID assigned in machine driver */
@@ -1302,7 +1372,7 @@
 				channels = msm_bedais[i].channel;
 			else
 				channels = msm_bedais[i].adm_override_ch;
-			msm_bedais[i].passthr_mode =
+			msm_bedais[i].passthr_mode[fedai_id] =
 				LEGACY_PCM;
 
 			bits_per_sample = msm_routing_get_bit_width(
@@ -1373,7 +1443,7 @@
 				}
 			}
 			if ((perf_mode == LEGACY_PCM_MODE) &&
-				(msm_bedais[i].passthr_mode ==
+				(msm_bedais[i].passthr_mode[fedai_id] ==
 				LEGACY_PCM))
 				msm_pcm_routing_cfg_pp(msm_bedais[i].port_id,
 						       copp_idx, topology,
@@ -1386,8 +1456,11 @@
 		adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
 		msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
 	}
+
+	ret = msm_pcm_routing_channel_mixer(fedai_id, perf_mode,
+				dspst_id, stream_type);
 	mutex_unlock(&routing_lock);
-	return 0;
+	return ret;
 }
 
 int msm_pcm_routing_reg_phy_stream_v2(int fedai_id, int perf_mode,
@@ -1458,7 +1531,7 @@
 			if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID ||
 				topology == DS2_ADM_COPP_TOPOLOGY_ID) &&
 			    (fdai->perf_mode == LEGACY_PCM_MODE) &&
-			    (msm_bedais[i].passthr_mode ==
+			    (msm_bedais[i].passthr_mode[fedai_id] ==
 					LEGACY_PCM))
 				msm_pcm_routing_deinit_pp(msm_bedais[i].port_id,
 							  topology);
@@ -1493,7 +1566,7 @@
 	u32 channels, sample_rate;
 	uint16_t bits_per_sample = 16;
 	struct msm_pcm_routing_fdai_data *fdai;
-	uint32_t passthr_mode = msm_bedais[reg].passthr_mode;
+	uint32_t passthr_mode;
 	bool is_lsm;
 
 	pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
@@ -1510,6 +1583,7 @@
 		return;
 	}
 
+	passthr_mode = msm_bedais[reg].passthr_mode[val];
 	if (afe_get_port_type(msm_bedais[reg].port_id) ==
 		MSM_AFE_PORT_TYPE_RX) {
 		session_type = SESSION_TYPE_RX;
@@ -1519,7 +1593,10 @@
 			path_type = ADM_PATH_PLAYBACK;
 	} else {
 		session_type = SESSION_TYPE_TX;
-		path_type = ADM_PATH_LIVE_REC;
+		if (passthr_mode != LEGACY_PCM)
+			path_type = ADM_PATH_COMPRESSED_TX;
+		else
+			path_type = ADM_PATH_LIVE_REC;
 	}
 	is_lsm = (val >= MSM_FRONTEND_DAI_LSM1) &&
 			 (val <= MSM_FRONTEND_DAI_LSM8);
@@ -2618,6 +2695,649 @@
 	return 1;
 }
 
+static int msm_pcm_get_channel_rule_index(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] = channel_mixer[fe_id].rule;
+
+	return 0;
+}
+
+static int msm_pcm_put_channel_rule_index(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	channel_mixer[fe_id].rule = ucontrol->value.integer.value[0];
+
+	return 1;
+}
+
+static int msm_pcm_get_out_chs(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	ucontrol->value.integer.value[0] =
+		channel_mixer[fe_id].output_channel;
+	return 0;
+}
+
+static int msm_pcm_put_out_chs(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: fe_id is %d, output channels = %d\n", __func__,
+			fe_id,
+			(unsigned int)(ucontrol->value.integer.value[0]));
+	channel_mixer[fe_id].output_channel =
+			(unsigned int)(ucontrol->value.integer.value[0]);
+
+	return 1;
+}
+
+static const char *const ch_mixer[] = {"Disable", "Enable"};
+
+/* If new backend is added, need update this array */
+static const char *const be_name[] = {
+"ZERO", "PRI_I2S_RX", "PRI_I2S_TX", "SLIM_0_RX",
+"SLIM_0_TX", "HDMI_RX", "INT_BT_SCO_RX", "INT_BT_SCO_TX",
+"INT_FM_RX", "INT_FM_TX", "AFE_PCM_RX", "AFE_PCM_TX",
+"AUXPCM_RX", "AUXPCM_TX", "VOICE_PLAYBACK_TX", "VOICE2_PLAYBACK_TX",
+"INCALL_RECORD_RX", "INCALL_RECORD_TX", "MI2S_RX", "MI2S_TX",
+"SEC_I2S_RX", "SLIM_1_RX", "SLIM_1_TX", "SLIM_2_RX",
+"SLIM_2_TX", "SLIM_3_RX", "SLIM_3_TX", "SLIM_4_RX",
+"SLIM_4_TX", "SLIM_5_RX", "SLIM_5_TX", "SLIM_6_RX",
+"SLIM_6_TX", "SLIM_7_RX", "SLIM_7_TX", "SLIM_8_RX",
+"SLIM_8_TX", "EXTPROC_RX", "EXTPROC_TX", "EXPROC_EC_TX",
+"QUAT_MI2S_RX", "QUAT_MI2S_TX", "SECOND_MI2S_RX", "SECOND_MI2S_TX",
+"PRI_MI2S_RX", "PRI_MI2S_TX", "TERT_MI2S_RX", "TERT_MI2S_TX",
+"AUDIO_I2S_RX", "SEC_AUXPCM_RX", "SEC_AUXPCM_TX", "SPDIF_RX",
+"SECOND_MI2S_RX_SD1", "QUIN_MI2S_RX", "QUIN_MI2S_TX", "SENARY_MI2S_TX",
+"PRI_TDM_RX_0", "PRI_TDM_TX_0", "PRI_TDM_RX_1", "PRI_TDM_TX_1",
+"PRI_TDM_RX_2", "PRI_TDM_TX_2", "PRI_TDM_RX_3", "PRI_TDM_TX_3",
+"PRI_TDM_RX_4", "PRI_TDM_TX_4", "PRI_TDM_RX_5", "PRI_TDM_TX_5",
+"PRI_TDM_RX_6", "PRI_TDM_TX_6", "PRI_TDM_RX_7", "PRI_TDM_TX_7",
+"SEC_TDM_RX_0", "SEC_TDM_TX_0", "SEC_TDM_RX_1", "SEC_TDM_TX_1",
+"SEC_TDM_RX_2", "SEC_TDM_TX_2", "SEC_TDM_RX_3", "SEC_TDM_TX_3",
+"SEC_TDM_RX_4", "SEC_TDM_TX_4", "SEC_TDM_RX_5", "SEC_TDM_TX_5",
+"SEC_TDM_RX_6", "SEC_TDM_TX_6", "SEC_TDM_RX_7", "SEC_TDM_TX_7",
+"TERT_TDM_RX_0", "TERT_TDM_TX_0", "TERT_TDM_RX_1", "TERT_TDM_TX_1",
+"TERT_TDM_RX_2", "TERT_TDM_TX_2", "TERT_TDM_RX_3", "TERT_TDM_TX_3",
+"TERT_TDM_RX_4", "TERT_TDM_TX_4", "TERT_TDM_RX_5", "TERT_TDM_TX_5",
+"TERT_TDM_RX_6", "TERT_TDM_TX_6", "TERT_TDM_RX_7", "TERT_TDM_TX_7",
+"QUAT_TDM_RX_0", "QUAT_TDM_TX_0", "QUAT_TDM_RX_1", "QUAT_TDM_TX_1",
+"QUAT_TDM_RX_2", "QUAT_TDM_TX_2", "QUAT_TDM_RX_3", "QUAT_TDM_TX_3",
+"QUAT_TDM_RX_4", "QUAT_TDM_TX_4", "QUAT_TDM_RX_5", "QUAT_TDM_TX_5",
+"QUAT_TDM_RX_6", "QUAT_TDM_TX_6", "QUAT_TDM_RX_7", "QUAT_TDM_TX_7",
+"INT_BT_A2DP_RX", "USB_RX", "USB_TX", "DISPLAY_PORT_RX",
+"TERT_AUXPCM_RX", "TERT_AUXPCM_TX", "QUAT_AUXPCM_RX", "QUAT_AUXPCM_TX",
+"INT0_MI2S_RX", "INT0_MI2S_TX", "INT1_MI2S_RX", "INT1_MI2S_TX",
+"INT2_MI2S_RX", "INT2_MI2S_TX", "INT3_MI2S_RX", "INT3_MI2S_TX",
+"INT4_MI2S_RX", "INT4_MI2S_TX", "INT5_MI2S_RX", "INT5_MI2S_TX",
+"INT6_MI2S_RX", "INT6_MI2S_TX"
+};
+
+static SOC_ENUM_SINGLE_DECL(mm1_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, ch_mixer);
+static SOC_ENUM_SINGLE_DECL(mm2_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA2, ch_mixer);
+static SOC_ENUM_SINGLE_DECL(mm3_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA3, ch_mixer);
+static SOC_ENUM_SINGLE_DECL(mm4_channel_mux,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA4, ch_mixer);
+
+static SOC_ENUM_DOUBLE_DECL(mm1_ch1_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 0, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch2_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 1, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch3_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 2, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch4_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 3, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch5_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 4, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch6_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 5, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch7_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 6, be_name);
+static SOC_ENUM_DOUBLE_DECL(mm1_ch8_enum,
+	SND_SOC_NOPM, MSM_FRONTEND_DAI_MULTIMEDIA1, 7, be_name);
+
+static int msm_pcm_get_ctl_enum_info(struct snd_ctl_elem_info *uinfo,
+		unsigned int channels,
+		unsigned int items, const char *const names[])
+{
+	if (uinfo->value.enumerated.item >= items)
+		uinfo->value.enumerated.item = items - 1;
+
+	WARN(strlen(names[uinfo->value.enumerated.item]) >=
+		sizeof(uinfo->value.enumerated.name),
+		"ALSA: too long item name '%s'\n",
+		names[uinfo->value.enumerated.item]);
+	strlcpy(uinfo->value.enumerated.name,
+		names[uinfo->value.enumerated.item],
+		sizeof(uinfo->value.enumerated.name));
+	return 0;
+}
+
+static int msm_pcm_channel_mixer_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+
+	uinfo->value.enumerated.items = ARRAY_SIZE(ch_mixer);
+	msm_pcm_get_ctl_enum_info(uinfo, 1, e->items, e->texts);
+
+	return 0;
+}
+static int msm_pcm_channel_mixer_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_enum *)
+			kcontrol->private_value)->shift_l;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: FE %d %s\n", __func__,
+		fe_id,
+		channel_mixer[fe_id].enable ? "Enabled" : "Disabled");
+	ucontrol->value.enumerated.item[0] = channel_mixer[fe_id].enable;
+	return 0;
+}
+
+static int msm_pcm_channel_mixer_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0;
+
+	fe_id = ((struct soc_enum *)
+			kcontrol->private_value)->shift_l;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+
+	channel_mixer[fe_id].enable = ucontrol->value.enumerated.item[0];
+	pr_debug("%s: %s FE %d\n", __func__,
+		channel_mixer[fe_id].enable ? "Enable" : "Disable",
+		fe_id);
+	return 0;
+}
+
+static int msm_pcm_channel_input_be_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+
+	uinfo->value.enumerated.items = ARRAY_SIZE(be_name);
+	msm_pcm_get_ctl_enum_info(uinfo, 1, e->items, e->texts);
+
+	return 0;
+}
+
+static int msm_pcm_channel_input_be_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	u16 fe_id = 0, in_ch = 0;
+
+	fe_id = e->shift_l;
+	in_ch = e->shift_r;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (in_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, in_ch);
+		return -EINVAL;
+	}
+
+	channel_input[fe_id][in_ch] = ucontrol->value.enumerated.item[0];
+	return 1;
+}
+
+static int msm_pcm_channel_input_be_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+	u16 fe_id = 0, in_ch = 0;
+
+	fe_id = e->shift_l;
+	in_ch = e->shift_r;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (in_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, in_ch);
+		return -EINVAL;
+	}
+
+	ucontrol->value.enumerated.item[0] = channel_input[fe_id][in_ch];
+	return 1;
+}
+
+
+static int msm_pcm_channel_weight_info(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = ADM_MAX_CHANNELS;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = WEIGHT_0_DB;
+
+	return 0;
+}
+
+static int msm_pcm_channel_weight_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0, out_ch = 0;
+	int i, weight;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	out_ch = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->rshift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (out_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, out_ch);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: FE_ID: %d, channel weight %ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld\n",
+		__func__, fe_id,
+		ucontrol->value.integer.value[0],
+		ucontrol->value.integer.value[1],
+		ucontrol->value.integer.value[2],
+		ucontrol->value.integer.value[3],
+		ucontrol->value.integer.value[4],
+		ucontrol->value.integer.value[5],
+		ucontrol->value.integer.value[6],
+		ucontrol->value.integer.value[7]);
+
+	for (i = 0; i < ADM_MAX_CHANNELS; ++i) {
+		weight = ucontrol->value.integer.value[i];
+		channel_mixer[fe_id].channel_weight[out_ch][i] = weight;
+		pr_debug("%s: FE_ID %d, output %d input %d weight %d\n",
+			__func__, fe_id, out_ch, i,
+			channel_mixer[fe_id].channel_weight[out_ch][i]);
+	}
+
+	return 0;
+}
+
+static int msm_pcm_channel_weight_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	u16 fe_id = 0, out_ch = 0;
+	int i;
+
+	fe_id = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->shift;
+	out_ch = ((struct soc_multi_mixer_control *)
+			kcontrol->private_value)->rshift;
+	if (fe_id >= MSM_FRONTEND_DAI_MM_SIZE) {
+		pr_err("%s: invalid FE %d\n", __func__, fe_id);
+		return -EINVAL;
+	}
+	if (out_ch >= ADM_MAX_CHANNELS) {
+		pr_err("%s: invalid input channel %d\n", __func__, out_ch);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ADM_MAX_CHANNELS; ++i)
+		ucontrol->value.integer.value[i] =
+			channel_mixer[fe_id].channel_weight[out_ch][i];
+
+	pr_debug("%s: FE_ID: %d, weight  %ld, %ld, %ld, %ld, %ld, %ld, %ld, %ld",
+		__func__, fe_id,
+		ucontrol->value.integer.value[0],
+		ucontrol->value.integer.value[1],
+		ucontrol->value.integer.value[2],
+		ucontrol->value.integer.value[3],
+		ucontrol->value.integer.value[4],
+		ucontrol->value.integer.value[5],
+		ucontrol->value.integer.value[6],
+		ucontrol->value.integer.value[7]);
+
+	return 0;
+}
+
+static const struct snd_kcontrol_new channel_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA1, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia2 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA2, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia3 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA3, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia4 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA4, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia5 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA5, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+	SOC_SINGLE_EXT("MultiMedia6 Channel Rule", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA6, 8, 0,
+			msm_pcm_get_channel_rule_index,
+			msm_pcm_put_channel_rule_index),
+
+	SOC_SINGLE_EXT("MultiMedia1 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA1, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia2 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA2, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia3 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA3, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia4 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA4, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia5 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA5, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	SOC_SINGLE_EXT("MultiMedia6 Channels", SND_SOC_NOPM,
+			MSM_FRONTEND_DAI_MULTIMEDIA6, 8, 0,
+			msm_pcm_get_out_chs,
+			msm_pcm_put_out_chs),
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm1_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm2_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia3 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm3_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia4 Channel Mixer",
+	.info = msm_pcm_channel_mixer_info,
+	.get = msm_pcm_channel_mixer_get,
+	.put = msm_pcm_channel_mixer_put,
+	.private_value = (unsigned long)&(mm4_channel_mux)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel1",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 0,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel2",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 1, }
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel3",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 2,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel4",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 3,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel5",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 4,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel6",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 5,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel7",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 6,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Output Channel8",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{ .shift = MSM_FRONTEND_DAI_MULTIMEDIA1, .rshift = 7,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Output Channel1",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA2, .rshift = 0,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Output Channel2",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA2, .rshift = 1,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia2 Output Channel3",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA2, .rshift = 2,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia3 Output Channel1",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA3, .rshift = 0,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia3 Output Channel2",
+	.info = msm_pcm_channel_weight_info,
+	.get = msm_pcm_channel_weight_get,
+	.put = msm_pcm_channel_weight_put,
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control)
+		{.shift = MSM_FRONTEND_DAI_MULTIMEDIA3, .rshift = 1,}
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel1",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch1_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel2",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch2_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel3",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch3_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel4",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch4_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel5",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch5_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel6",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch6_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel7",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch7_enum)
+	},
+	{
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+	.name = "MultiMedia1 Channel8",
+	.info = msm_pcm_channel_input_be_info,
+	.get = msm_pcm_channel_input_be_get,
+	.put = msm_pcm_channel_input_be_put,
+	.private_value = (unsigned long)&(mm1_ch8_enum)
+	},
+};
 static int msm_ec_ref_ch_get(struct snd_kcontrol *kcontrol,
 			       struct snd_ctl_elem_value *ucontrol)
 {
@@ -5378,6 +6098,57 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -5427,6 +6198,9 @@
 	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 };
 
 static const struct snd_kcontrol_new quat_tdm_tx_0_mixer_controls[] = {
@@ -5529,6 +6303,9 @@
 	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
 	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 };
 
 static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = {
@@ -5580,6 +6357,9 @@
 	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
 	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 };
 
 static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = {
@@ -5631,6 +6411,9 @@
 	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
 	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia20", MSM_BACKEND_DAI_QUAT_TDM_RX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 };
 
 static const struct snd_kcontrol_new mmul1_mixer_controls[] = {
@@ -6402,6 +7185,9 @@
 	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
 	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
 	MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
@@ -6442,6 +7228,70 @@
 	MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
 	msm_routing_put_audio_mixer),
 };
+
+static const struct snd_kcontrol_new mmul20_mixer_controls[] = {
+	SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3,
+	MSM_FRONTEND_DAI_MULTIMEDIA20, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
 	SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
 	MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -7898,6 +8748,9 @@
 	SOC_SINGLE_EXT("SEC_AUX_PCM_UL_TX", MSM_BACKEND_DAI_AUXPCM_RX,
 	MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_AUXPCM_RX,
+	MSM_BACKEND_DAI_QUAT_TDM_TX_0, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new sec_auxpcm_rx_port_mixer_controls[] = {
@@ -8085,6 +8938,9 @@
 	SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
 	MSM_BACKEND_DAI_SLIMBUS_0_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_PRI_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new usb_rx_port_mixer_controls[] = {
@@ -8115,6 +8971,9 @@
 	SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
 	MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new pri_tdm_rx_0_port_mixer_controls[] = {
@@ -9205,6 +10064,9 @@
 	SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
 	MSM_BACKEND_DAI_SECONDARY_MI2S_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new sec_mi2s_rx_port_mixer_controls[] = {
@@ -9226,6 +10088,9 @@
 	SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
 	MSM_BACKEND_DAI_INT_FM_TX, 1, 0, msm_routing_get_port_mixer,
 	msm_routing_put_port_mixer),
+	SOC_SINGLE_EXT("SLIM_8_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+	MSM_BACKEND_DAI_SLIMBUS_8_TX, 1, 0, msm_routing_get_port_mixer,
+	msm_routing_put_port_mixer),
 };
 
 static const struct snd_kcontrol_new lsm1_mixer_controls[] = {
@@ -10467,6 +11332,7 @@
 	SND_SOC_DAPM_AIF_IN("MM_DL14", "MultiMedia14 Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("MM_DL15", "MultiMedia15 Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("MM_DL16", "MultiMedia16 Playback", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_IN("MM_DL20", "MultiMedia20 Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0),
@@ -10479,6 +11345,7 @@
 	SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("MM_UL20", "MultiMedia20 Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("CS-VOICE_DL1", "CS-VOICE Playback", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_OUT("CS-VOICE_UL1", "CS-VOICE Capture", 0, 0, 0, 0),
 	SND_SOC_DAPM_AIF_IN("VOICE2_DL", "Voice2 Playback", 0, 0, 0, 0),
@@ -11176,6 +12043,9 @@
 	SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				tert_tdm_rx_3_mixer_controls,
 				ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_4_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
 	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				quat_tdm_rx_0_mixer_controls,
 				ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
@@ -11213,6 +12083,8 @@
 	mmul18_mixer_controls, ARRAY_SIZE(mmul18_mixer_controls)),
 	SND_SOC_DAPM_MIXER("MultiMedia19 Mixer", SND_SOC_NOPM, 0, 0,
 	mmul19_mixer_controls, ARRAY_SIZE(mmul19_mixer_controls)),
+	SND_SOC_DAPM_MIXER("MultiMedia20 Mixer", SND_SOC_NOPM, 0, 0,
+	mmul20_mixer_controls, ARRAY_SIZE(mmul20_mixer_controls)),
 	SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
 	auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
 	SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -11782,6 +12654,7 @@
 	{"MultiMedia18 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"MultiMedia19 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia18 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
 	{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
 	{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
@@ -12200,6 +13073,24 @@
 	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
 	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
 
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
+
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -12216,6 +13107,7 @@
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"},
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"},
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia20", "MM_DL20"},
 	{"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"},
 
 	{"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12288,6 +13180,7 @@
 	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia14", "MM_DL14"},
 	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"},
 	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia20", "MM_DL20"},
 	{"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Audio Mixer"},
 
 	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12306,6 +13199,7 @@
 	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia14", "MM_DL14"},
 	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"},
 	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia20", "MM_DL20"},
 	{"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Audio Mixer"},
 
 	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia1", "MM_DL1"},
@@ -12324,6 +13218,7 @@
 	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia14", "MM_DL14"},
 	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"},
 	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia20", "MM_DL20"},
 	{"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Audio Mixer"},
 
 	{"MultiMedia1 Mixer", "PRI_TX", "PRI_I2S_TX"},
@@ -12361,6 +13256,7 @@
 	{"MultiMedia2 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia2 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"MultiMedia6 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
 	{"MultiMedia3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -12374,6 +13270,7 @@
 	{"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
 	{"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
 	{"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"MultiMedia6 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 
 	{"MultiMedia1 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
 	{"MultiMedia1 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
@@ -12503,6 +13400,27 @@
 	{"MultiMedia9 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
 	{"MultiMedia9 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
 
+	{"MultiMedia20 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia20 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+	{"MultiMedia20 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+	{"MultiMedia20 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"},
+	{"MultiMedia20 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"},
+
 	{"MultiMedia1 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
 	{"MultiMedia2 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
 	{"MultiMedia4 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"},
@@ -12621,6 +13539,7 @@
 	{"MM_UL17", NULL, "MultiMedia17 Mixer"},
 	{"MM_UL18", NULL, "MultiMedia18 Mixer"},
 	{"MM_UL19", NULL, "MultiMedia19 Mixer"},
+	{"MM_UL20", NULL, "MultiMedia20 Mixer"},
 
 	{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -12954,6 +13873,7 @@
 	{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VOICE2_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VoWLAN_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VOICEMMODE1_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VOICEMMODE2_UL", NULL, "VOC_EXT_EC MUX"},
 
@@ -13269,6 +14189,7 @@
 	{"SLIM7_UL_HL", NULL, "HFP_SLIM7_UL_HL"},
 	{"HFP_SLIM7_UL_HL", "Switch", "SLIMBUS_7_TX"},
 	{"AUX_PCM_RX", NULL, "AUXPCM_DL_HL"},
+	{"AUX_PCM_RX", NULL, "INTHFP_DL_HL"},
 	{"AUXPCM_UL_HL", NULL, "AUX_PCM_TX"},
 	{"MI2S_RX", NULL, "MI2S_DL_HL"},
 	{"MI2S_UL_HL", NULL, "MI2S_TX"},
@@ -13673,6 +14594,7 @@
 	{"AUX_PCM_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"AUX_PCM_RX Port Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"},
 	{"AUX_PCM_RX Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"},
+	{"AUX_PCM_RX Port Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"},
 	{"AUX_PCM_RX", NULL, "AUX_PCM_RX Port Mixer"},
 
 	{"SEC_AUXPCM_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
@@ -13828,6 +14750,7 @@
 	{"PRI_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"PRI_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"PRI_MI2S_RX Port Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+	{"PRI_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"PRI_MI2S_RX", NULL, "PRI_MI2S_RX Port Mixer"},
 
 	{"SEC_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
@@ -13836,6 +14759,7 @@
 	{"SEC_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
 	{"SEC_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"SEC_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+	{"SEC_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"SEC_MI2S_RX", NULL, "SEC_MI2S_RX Port Mixer"},
 
 	{"TERT_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
@@ -13843,6 +14767,7 @@
 	{"TERT_MI2S_RX Port Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
 	{"TERT_MI2S_RX Port Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
 	{"TERT_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+	{"TERT_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"TERT_MI2S_RX", NULL, "TERT_MI2S_RX Port Mixer"},
 
 	{"QUAT_MI2S_RX Port Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
@@ -13852,6 +14777,7 @@
 	{"QUAT_MI2S_RX Port Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"QUAT_MI2S_RX Port Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
 	{"QUAT_MI2S_RX Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"},
+	{"QUAT_MI2S_RX Port Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"QUAT_MI2S_RX", NULL, "QUAT_MI2S_RX Port Mixer"},
 
 	/* Backend Enablement */
@@ -13907,6 +14833,7 @@
 	{"BE_OUT", NULL, "TERT_TDM_RX_1"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_2"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_3"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_4"},
 	{"BE_OUT", NULL, "QUAT_TDM_RX_0"},
 	{"BE_OUT", NULL, "QUAT_TDM_RX_1"},
 	{"BE_OUT", NULL, "QUAT_TDM_RX_2"},
@@ -14044,7 +14971,7 @@
 			clear_bit(idx,
 				  &session_copp_map[i][session_type][be_id]);
 			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
-				(bedai->passthr_mode == LEGACY_PCM))
+				(bedai->passthr_mode[i] == LEGACY_PCM))
 				msm_pcm_routing_deinit_pp(bedai->port_id,
 							  topology);
 		}
@@ -14053,6 +14980,10 @@
 	bedai->active = 0;
 	bedai->sample_rate = 0;
 	bedai->channel = 0;
+	for (i = 0; i < MSM_FRONTEND_DAI_MAX; i++) {
+		if (bedai->passthr_mode[i] != LISTEN)
+			bedai->passthr_mode[i] = LEGACY_PCM;
+	}
 	mutex_unlock(&routing_lock);
 
 	return 0;
@@ -14062,7 +14993,8 @@
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	unsigned int be_id = rtd->dai_link->id;
-	int i, path_type, session_type, topology;
+	int i, path_type, topology;
+	int session_type = INVALID_SESSION;
 	struct msm_pcm_routing_bdai_data *bedai;
 	u32 channels, sample_rate;
 	uint16_t bits_per_sample = 16, voc_path_type;
@@ -14081,17 +15013,6 @@
 
 	bedai = &msm_bedais[be_id];
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		if (bedai->passthr_mode != LEGACY_PCM)
-			path_type = ADM_PATH_COMPRESSED_RX;
-		else
-			path_type = ADM_PATH_PLAYBACK;
-		session_type = SESSION_TYPE_RX;
-	} else {
-		path_type = ADM_PATH_LIVE_REC;
-		session_type = SESSION_TYPE_TX;
-	}
-
 	mutex_lock(&routing_lock);
 	if (bedai->active == 1)
 		goto done; /* Ignore prepare if back-end already active */
@@ -14108,6 +15029,17 @@
 				route_check_fe_id_adm_support(i)))
 			continue;
 
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+			if (bedai->passthr_mode[i] != LEGACY_PCM)
+				path_type = ADM_PATH_COMPRESSED_RX;
+			else
+				path_type = ADM_PATH_PLAYBACK;
+			session_type = SESSION_TYPE_RX;
+		} else {
+			path_type = ADM_PATH_LIVE_REC;
+			session_type = SESSION_TYPE_TX;
+		}
+
 		is_lsm = (i >= MSM_FRONTEND_DAI_LSM1) &&
 				 (i <= MSM_FRONTEND_DAI_LSM8);
 		fdai = &fe_dai_map[i][session_type];
@@ -14186,9 +15118,9 @@
 
 			msm_pcm_routing_build_matrix(i, session_type, path_type,
 						     fdai->perf_mode,
-						     bedai->passthr_mode);
+						     bedai->passthr_mode[i]);
 			if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
-				(bedai->passthr_mode == LEGACY_PCM))
+				(bedai->passthr_mode[i] == LEGACY_PCM))
 				msm_pcm_routing_cfg_pp(bedai->port_id, copp_idx,
 						       topology, channels);
 		}
@@ -14200,10 +15132,10 @@
 			pr_debug("%s voice session_id: 0x%x\n", __func__,
 				 session_id);
 
-			if (session_type == SESSION_TYPE_TX)
-				voc_path_type = TX_PATH;
-			else
+			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 				voc_path_type = RX_PATH;
+			else
+				voc_path_type = TX_PATH;
 
 			voc_set_route_flag(session_id, voc_path_type, 1);
 
@@ -14253,7 +15185,8 @@
 	return 0;
 }
 
-static int msm_routing_send_device_pp_params(int port_id, int copp_idx)
+static int msm_routing_send_device_pp_params(int port_id, int copp_idx,
+					     int fe_id)
 {
 	int index, topo_id, be_idx;
 	unsigned long pp_config = 0;
@@ -14296,8 +15229,8 @@
 		return -EINVAL;
 	}
 
-	if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
-		(msm_bedais[be_idx].passthr_mode == LISTEN))
+	if ((msm_bedais[be_idx].passthr_mode[fe_id] == LEGACY_PCM) ||
+		(msm_bedais[be_idx].passthr_mode[fe_id] == LISTEN))
 		compr_passthr_mode = false;
 
 	pp_config = msm_bedais_pp_params[index].pp_params_config;
@@ -14356,12 +15289,12 @@
 		return -EINVAL;
 	}
 
-	if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
-		(msm_bedais[be_idx].passthr_mode == LISTEN))
-		compr_passthr_mode = false;
-
 	for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
 				MSM_FRONTEND_DAI_MM_SIZE) {
+		if ((msm_bedais[be_idx].passthr_mode[i] == LEGACY_PCM) ||
+			(msm_bedais[be_idx].passthr_mode[i] == LISTEN))
+			compr_passthr_mode = false;
+
 		for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
 			unsigned long copp =
 				session_copp_map[i]
@@ -14374,7 +15307,7 @@
 				continue;
 		pr_debug("%s: port: 0x%x, copp %ld, be active: %d, passt: %d\n",
 			 __func__, port_id, copp, msm_bedais[be_idx].active,
-			 msm_bedais[be_idx].passthr_mode);
+			 msm_bedais[be_idx].passthr_mode[i]);
 		switch (pp_id) {
 		case ADM_PP_PARAM_MUTE_ID:
 			pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
@@ -14507,6 +15440,67 @@
 	},
 };
 
+static int msm_routing_stereo_channel_reverse_control_get(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	ucontrol->value.integer.value[0] = swap_ch;
+	pr_debug("%s: Swap channel value: %ld\n", __func__,
+				ucontrol->value.integer.value[0]);
+	return 0;
+}
+
+static int msm_routing_stereo_channel_reverse_control_put(
+			struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	int i, idx, be_index, port_id;
+	int ret = 0;
+	unsigned long copp;
+
+	pr_debug("%s Swap channel value:%ld\n", __func__,
+				ucontrol->value.integer.value[0]);
+
+	swap_ch = ucontrol->value.integer.value[0];
+
+	mutex_lock(&routing_lock);
+	for (be_index = 0; be_index < MSM_BACKEND_DAI_MAX; be_index++) {
+		port_id = msm_bedais[be_index].port_id;
+		if (!msm_bedais[be_index].active)
+			continue;
+
+		for_each_set_bit(i, &msm_bedais[be_index].fe_sessions[0],
+				MSM_FRONTEND_DAI_MM_SIZE) {
+			copp = session_copp_map[i][SESSION_TYPE_RX][be_index];
+			for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
+				if (!test_bit(idx, &copp))
+					continue;
+
+				pr_debug("%s: swap channel control of portid:%d, coppid:%d\n",
+					 __func__, port_id, idx);
+				ret = adm_swap_speaker_channels(
+					port_id, idx,
+					msm_bedais[be_index].sample_rate,
+					swap_ch);
+				if (ret) {
+					pr_err("%s:Swap_channel failed, err=%d\n",
+						 __func__, ret);
+					goto done;
+				}
+			}
+		}
+	}
+done:
+	mutex_unlock(&routing_lock);
+	return ret;
+}
+
+static const struct snd_kcontrol_new stereo_channel_reverse_control[] = {
+	SOC_SINGLE_EXT("Swap channel", SND_SOC_NOPM, 0,
+	1, 0, msm_routing_stereo_channel_reverse_control_get,
+	msm_routing_stereo_channel_reverse_control_put),
+};
+
 static const struct snd_pcm_ops msm_routing_pcm_ops = {
 	.hw_params	= msm_pcm_routing_hw_params,
 	.close          = msm_pcm_routing_close,
@@ -14545,6 +15539,9 @@
 	snd_soc_add_platform_controls(platform, ec_ref_param_controls,
 				ARRAY_SIZE(ec_ref_param_controls));
 
+	snd_soc_add_platform_controls(platform, channel_mixer_controls,
+				ARRAY_SIZE(channel_mixer_controls));
+
 	msm_qti_pp_add_controls(platform);
 
 	msm_dts_srs_tm_add_controls(platform);
@@ -14563,8 +15560,6 @@
 		msm_routing_be_dai_name_table_mixer_controls,
 		ARRAY_SIZE(msm_routing_be_dai_name_table_mixer_controls));
 
-	msm_dts_eagle_add_controls(platform);
-
 	snd_soc_add_platform_controls(platform, msm_source_tracking_controls,
 				ARRAY_SIZE(msm_source_tracking_controls));
 	snd_soc_add_platform_controls(platform, adm_channel_config_controls,
@@ -14572,6 +15567,8 @@
 
 	snd_soc_add_platform_controls(platform, aptx_dec_license_controls,
 					ARRAY_SIZE(aptx_dec_license_controls));
+	snd_soc_add_platform_controls(platform, stereo_channel_reverse_control,
+				ARRAY_SIZE(stereo_channel_reverse_control));
 	return 0;
 }
 
@@ -14699,6 +15696,7 @@
 		(routing_cb)msm_pcm_get_dev_acdb_id_by_port_id);
 
 	memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+	memset(&last_be_id_configured, 0, sizeof(last_be_id_configured));
 
 	return platform_driver_register(&msm_routing_pcm_driver);
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index fcd155e..19e7260 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -192,6 +192,7 @@
 	MSM_FRONTEND_DAI_MULTIMEDIA17,
 	MSM_FRONTEND_DAI_MULTIMEDIA18,
 	MSM_FRONTEND_DAI_MULTIMEDIA19,
+	MSM_FRONTEND_DAI_MULTIMEDIA20,
 	MSM_FRONTEND_DAI_CS_VOICE,
 	MSM_FRONTEND_DAI_VOIP,
 	MSM_FRONTEND_DAI_AFE_RX,
@@ -217,8 +218,8 @@
 	MSM_FRONTEND_DAI_MAX,
 };
 
-#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA19 + 1)
-#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA19
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA20 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA20
 
 enum {
 	MSM_BACKEND_DAI_PRI_I2S_RX = 0,
@@ -384,6 +385,7 @@
 #define INVALID_SESSION -1
 #define SESSION_TYPE_RX 0
 #define SESSION_TYPE_TX 1
+#define MAX_SESSION_TYPES 2
 #define INT_RX_VOL_MAX_STEPS 0x2000
 #define INT_RX_VOL_GAIN 0x2000
 
@@ -423,7 +425,7 @@
 	unsigned int  channel;
 	unsigned int  format;
 	unsigned int  adm_override_ch;
-	u32 passthr_mode;
+	u32 passthr_mode[MSM_FRONTEND_DAI_MAX];
 	char *name;
 };
 
@@ -475,10 +477,10 @@
 void msm_pcm_routing_acquire_lock(void);
 void msm_pcm_routing_release_lock(void);
 
-int msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int session_type,
-					     int be_id, int app_type,
-					     int acdb_dev_id, int sample_rate);
-int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
-					    int be_id, int *app_type,
-					    int *acdb_dev_id, int *sample_rate);
+int msm_pcm_routing_reg_stream_app_type_cfg(
+	int fedai_id, int session_type, int be_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data);
+int msm_pcm_routing_get_stream_app_type_cfg(
+	int fedai_id, int session_type, int *be_id,
+	struct msm_pcm_stream_app_type_cfg *cfg_data);
 #endif /*_MSM_PCM_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index c60b27f..a885e1e 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -45,21 +45,6 @@
 	EQ_BAND_MAX,
 };
 
-struct msm_audio_eq_band {
-	uint16_t     band_idx; /* The band index, 0 .. 11 */
-	uint32_t     filter_type; /* Filter band type */
-	uint32_t     center_freq_hz; /* Filter band center frequency */
-	uint32_t     filter_gain; /* Filter band initial gain (dB) */
-			/* Range is +12 dB to -12 dB with 1dB increments. */
-	uint32_t     q_factor;
-} __packed;
-
-struct msm_audio_eq_stream_config {
-	uint32_t	enable; /* Number of consequtive bands specified */
-	uint32_t	num_bands;
-	struct msm_audio_eq_band	eq_bands[EQ_BAND_MAX];
-} __packed;
-
 /* Audio Sphere data structures */
 struct msm_audio_pp_asphere_state_s {
 	uint32_t enabled;
@@ -821,6 +806,286 @@
 	return 0;
 }
 
+int msm_adsp_init_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_kcontrol *kctl;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+
+	if (!rtd) {
+		pr_err("%s: rtd is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+		rtd->pcm->device);
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	kfree(mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (kctl->private_data != NULL) {
+		pr_err("%s: kctl_prtd is not NULL at initialization.\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	kctl_prtd = kzalloc(sizeof(struct dsp_stream_callback_prtd),
+			GFP_KERNEL);
+	if (!kctl_prtd) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	spin_lock_init(&kctl_prtd->prtd_spin_lock);
+	INIT_LIST_HEAD(&kctl_prtd->event_queue);
+	kctl_prtd->event_count = 0;
+	kctl->private_data = kctl_prtd;
+
+done:
+	return ret;
+}
+
+int msm_adsp_clean_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_kcontrol *kctl;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct dsp_stream_callback_list *node, *n;
+	unsigned long spin_flags;
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+
+	if (!rtd) {
+		pr_err("%s: rtd is NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+		rtd->pcm->device);
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	kfree(mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	kctl_prtd = (struct dsp_stream_callback_prtd *)
+			kctl->private_data;
+	if (kctl_prtd != NULL) {
+		spin_lock_irqsave(&kctl_prtd->prtd_spin_lock, spin_flags);
+		/* clean the queue */
+		list_for_each_entry_safe(node, n,
+				&kctl_prtd->event_queue, list) {
+			list_del(&node->list);
+			kctl_prtd->event_count--;
+			pr_debug("%s: %d remaining events after del.\n",
+				__func__, kctl_prtd->event_count);
+			kfree(node);
+		}
+		spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+	}
+
+	kfree(kctl_prtd);
+	kctl->private_data = NULL;
+
+done:
+	return ret;
+}
+
+int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
+			uint32_t *payload)
+{
+	/* adsp pp event notifier */
+	struct snd_kcontrol *kctl;
+	struct snd_ctl_elem_value control;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct dsp_stream_callback_list *new_event;
+	struct dsp_stream_callback_list *oldest_event;
+	unsigned long spin_flags;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+	struct msm_adsp_event_data *event_data = NULL;
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	struct snd_ctl_elem_info kctl_info;
+
+	if (!rtd || !payload) {
+		pr_err("%s: %s is NULL\n", __func__,
+			(!rtd) ? "rtd" : "payload");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (rtd->card->snd_card == NULL) {
+		pr_err("%s: snd_card is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_ATOMIC);
+	if (!mixer_str) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+		rtd->pcm->device);
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	kfree(mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)payload;
+	kctl->info(kctl, &kctl_info);
+	if (sizeof(struct msm_adsp_event_data)
+		+ event_data->payload_len > kctl_info.count) {
+		pr_err("%s: payload length exceeds limit of %u bytes.\n",
+			__func__, kctl_info.count);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	kctl_prtd = (struct dsp_stream_callback_prtd *)
+			kctl->private_data;
+	if (kctl_prtd == NULL) {
+		/* queue is not initialized */
+		ret = -EINVAL;
+		pr_err("%s: event queue is not initialized.\n", __func__);
+		goto done;
+	}
+
+	new_event = kzalloc(sizeof(struct dsp_stream_callback_list)
+			+ event_data->payload_len,
+			GFP_ATOMIC);
+	if (new_event == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	memcpy((void *)&new_event->event, (void *)payload,
+		   event_data->payload_len
+		   + sizeof(struct msm_adsp_event_data));
+
+	spin_lock_irqsave(&kctl_prtd->prtd_spin_lock, spin_flags);
+	while (kctl_prtd->event_count >= DSP_STREAM_CALLBACK_QUEUE_SIZE) {
+		pr_info("%s: queue of size %d is full. delete oldest one.\n",
+			__func__, DSP_STREAM_CALLBACK_QUEUE_SIZE);
+		oldest_event = list_first_entry(&kctl_prtd->event_queue,
+				struct dsp_stream_callback_list, list);
+		pr_info("%s: event deleted: type %d length %d\n",
+			__func__, oldest_event->event.event_type,
+			oldest_event->event.payload_len);
+		list_del(&oldest_event->list);
+		kctl_prtd->event_count--;
+		kfree(oldest_event);
+	}
+
+	list_add_tail(&new_event->list, &kctl_prtd->event_queue);
+	kctl_prtd->event_count++;
+	spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+
+	control.id = kctl->id;
+	snd_ctl_notify(rtd->card->snd_card,
+			SNDRV_CTL_EVENT_MASK_INFO,
+			&control.id);
+
+done:
+	return ret;
+}
+
+int msm_adsp_stream_cmd_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count =
+		sizeof(((struct snd_ctl_elem_value *)0)->value.bytes.data);
+
+	return 0;
+}
+
+int msm_adsp_stream_callback_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol)
+{
+	uint32_t payload_size = 0;
+	struct dsp_stream_callback_list *oldest_event;
+	unsigned long spin_flags;
+	struct dsp_stream_callback_prtd *kctl_prtd = NULL;
+	int ret = 0;
+
+	kctl_prtd = (struct dsp_stream_callback_prtd *)
+			kcontrol->private_data;
+	if (kctl_prtd == NULL) {
+		pr_err("%s: ASM Stream PP event queue is not initialized.\n",
+			__func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	spin_lock_irqsave(&kctl_prtd->prtd_spin_lock, spin_flags);
+	pr_debug("%s: %d events in queue.\n", __func__, kctl_prtd->event_count);
+	if (list_empty(&kctl_prtd->event_queue)) {
+		pr_err("%s: ASM Stream PP event queue is empty.\n", __func__);
+		ret = -EINVAL;
+		spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+		goto done;
+	}
+
+	oldest_event = list_first_entry(&kctl_prtd->event_queue,
+			struct dsp_stream_callback_list, list);
+	list_del(&oldest_event->list);
+	kctl_prtd->event_count--;
+	spin_unlock_irqrestore(&kctl_prtd->prtd_spin_lock, spin_flags);
+
+	payload_size = oldest_event->event.payload_len;
+	pr_debug("%s: event fetched: type %d length %d\n",
+			__func__, oldest_event->event.event_type,
+			oldest_event->event.payload_len);
+	memcpy(ucontrol->value.bytes.data, &oldest_event->event,
+		sizeof(struct msm_adsp_event_data) + payload_size);
+	kfree(oldest_event);
+
+done:
+	return ret;
+}
+
+int msm_adsp_stream_callback_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	uinfo->count =
+		sizeof(((struct snd_ctl_elem_value *)0)->value.bytes.data);
+
+	return 0;
+}
+
 static int msm_multichannel_ec_primary_mic_ch_put(struct snd_kcontrol *kcontrol,
 			struct snd_ctl_elem_value *ucontrol)
 {
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
index 805fb3e..b67e873 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
@@ -13,7 +13,16 @@
 #define _MSM_QTI_PP_H_
 
 #include <sound/soc.h>
-
+int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
+			uint32_t *payload);
+int msm_adsp_init_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd);
+int msm_adsp_clean_mixer_ctl_pp_event_queue(struct snd_soc_pcm_runtime *rtd);
+int msm_adsp_stream_cmd_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo);
+int msm_adsp_stream_callback_get(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_value *ucontrol);
+int msm_adsp_stream_callback_info(struct snd_kcontrol *kcontrol,
+			struct snd_ctl_elem_info *uinfo);
 #ifdef CONFIG_QTI_PP
 void msm_qti_pp_send_eq_values(int fedai_id);
 int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
diff --git a/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
new file mode 100644
index 0000000..b1bb272
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/msm-transcode-loopback-q6-v2.c
@@ -0,0 +1,971 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/msm_audio_ion.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6asm-v2.h>
+#include <sound/pcm_params.h>
+#include <sound/timer.h>
+#include <sound/tlv.h>
+#include <sound/apr_audio-v2.h>
+#include <sound/compress_params.h>
+#include <sound/compress_offload.h>
+#include <sound/compress_driver.h>
+#include <linux/msm_audio.h>
+
+#include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
+
+#define LOOPBACK_SESSION_MAX_NUM_STREAMS 2
+
+static DEFINE_MUTEX(transcode_loopback_session_lock);
+
+struct trans_loopback_pdata {
+	struct snd_compr_stream *cstream[MSM_FRONTEND_DAI_MAX];
+};
+
+struct loopback_stream {
+	struct snd_compr_stream *cstream;
+	uint32_t codec_format;
+	bool start;
+};
+
+enum loopback_session_state {
+	/* One or both streams not opened */
+	LOOPBACK_SESSION_CLOSE = 0,
+	/* Loopback streams opened */
+	LOOPBACK_SESSION_READY,
+	/* Loopback streams opened and formats configured */
+	LOOPBACK_SESSION_START,
+	/* Trigger issued on either of streams when in START state */
+	LOOPBACK_SESSION_RUN
+};
+
+struct msm_transcode_loopback {
+	struct loopback_stream source;
+	struct loopback_stream sink;
+
+	struct snd_compr_caps source_compr_cap;
+	struct snd_compr_caps sink_compr_cap;
+
+	uint32_t instance;
+	uint32_t num_streams;
+	int session_state;
+
+	struct mutex lock;
+
+	int session_id;
+	struct audio_client *audio_client;
+};
+
+/* Transcode loopback global info struct */
+static struct msm_transcode_loopback transcode_info;
+
+static void loopback_event_handler(uint32_t opcode,
+		uint32_t token, uint32_t *payload, void *priv)
+{
+	struct msm_transcode_loopback *trans =
+			(struct msm_transcode_loopback *)priv;
+	struct snd_soc_pcm_runtime *rtd;
+	struct snd_compr_stream *cstream;
+	struct audio_client *ac;
+	int stream_id;
+	int ret;
+
+	if (!trans || !payload) {
+		pr_err("%s: rtd or payload is NULL\n", __func__);
+		return;
+	}
+
+	cstream = trans->source.cstream;
+	ac = trans->audio_client;
+
+	/*
+	 * Token for rest of the compressed commands use to set
+	 * session id, stream id, dir etc.
+	 */
+	stream_id = q6asm_get_stream_id_from_token(token);
+
+	switch (opcode) {
+	case ASM_STREAM_CMD_ENCDEC_EVENTS:
+	case ASM_IEC_61937_MEDIA_FMT_EVENT:
+		pr_debug("%s: ASM_IEC_61937_MEDIA_FMT_EVENT\n", __func__);
+		rtd = cstream->private_data;
+		if (!rtd) {
+			pr_err("%s: rtd is NULL\n", __func__);
+			return;
+		}
+
+		ret = msm_adsp_inform_mixer_ctl(rtd, payload);
+		if (ret) {
+			pr_err("%s: failed to inform mixer ctrl. err = %d\n",
+				__func__, ret);
+			return;
+		}
+		break;
+	case APR_BASIC_RSP_RESULT: {
+		switch (payload[0]) {
+		case ASM_SESSION_CMD_RUN_V2:
+			pr_debug("%s: ASM_SESSION_CMD_RUN_V2:", __func__);
+			pr_debug("token 0x%x, stream id %d\n", token,
+				  stream_id);
+			break;
+		case ASM_STREAM_CMD_CLOSE:
+			pr_debug("%s: ASM_DATA_CMD_CLOSE:", __func__);
+			pr_debug("token 0x%x, stream id %d\n", token,
+				  stream_id);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	default:
+		pr_debug("%s: Not Supported Event opcode[0x%x]\n",
+			  __func__, opcode);
+		break;
+	}
+}
+
+static void populate_codec_list(struct msm_transcode_loopback *trans,
+				struct snd_compr_stream *cstream)
+{
+	struct snd_compr_caps compr_cap;
+
+	pr_debug("%s\n", __func__);
+
+	memset(&compr_cap, 0, sizeof(struct snd_compr_caps));
+
+	if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		compr_cap.direction = SND_COMPRESS_CAPTURE;
+		compr_cap.num_codecs = 3;
+		compr_cap.codecs[0] = SND_AUDIOCODEC_PCM;
+		compr_cap.codecs[1] = SND_AUDIOCODEC_AC3;
+		compr_cap.codecs[2] = SND_AUDIOCODEC_EAC3;
+		memcpy(&trans->source_compr_cap, &compr_cap,
+				sizeof(struct snd_compr_caps));
+	}
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		compr_cap.direction = SND_COMPRESS_PLAYBACK;
+		compr_cap.num_codecs = 1;
+		compr_cap.codecs[0] = SND_AUDIOCODEC_PCM;
+		memcpy(&trans->sink_compr_cap, &compr_cap,
+				sizeof(struct snd_compr_caps));
+	}
+}
+
+static int msm_transcode_loopback_open(struct snd_compr_stream *cstream)
+{
+	int ret = 0;
+	struct snd_compr_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd;
+	struct msm_transcode_loopback *trans = &transcode_info;
+	struct trans_loopback_pdata *pdata;
+
+	if (cstream == NULL) {
+		pr_err("%s: Invalid substream\n", __func__);
+		return -EINVAL;
+	}
+	runtime = cstream->runtime;
+	rtd = snd_pcm_substream_chip(cstream);
+	pdata = snd_soc_platform_get_drvdata(rtd->platform);
+	pdata->cstream[rtd->dai_link->id] = cstream;
+
+	mutex_lock(&trans->lock);
+	if (trans->num_streams > LOOPBACK_SESSION_MAX_NUM_STREAMS) {
+		pr_err("msm_transcode_open failed..invalid stream\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		if (trans->source.cstream == NULL) {
+			trans->source.cstream = cstream;
+			trans->num_streams++;
+		} else {
+			pr_err("%s: capture stream already opened\n",
+				__func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+	} else if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		if (trans->sink.cstream == NULL) {
+			trans->sink.cstream = cstream;
+			trans->num_streams++;
+		} else {
+			pr_debug("%s: playback stream already opened\n",
+				__func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+
+	pr_debug("%s: num stream%d, stream name %s\n", __func__,
+		 trans->num_streams, cstream->name);
+
+	populate_codec_list(trans, cstream);
+
+	if (trans->num_streams == LOOPBACK_SESSION_MAX_NUM_STREAMS)	{
+		pr_debug("%s: Moving loopback session to READY state %d\n",
+			 __func__, trans->session_state);
+		trans->session_state = LOOPBACK_SESSION_READY;
+	}
+
+	runtime->private_data = trans;
+	if (trans->num_streams == 1)
+		msm_adsp_init_mixer_ctl_pp_event_queue(rtd);
+exit:
+	mutex_unlock(&trans->lock);
+	return ret;
+}
+
+static void stop_transcoding(struct msm_transcode_loopback *trans)
+{
+	struct snd_soc_pcm_runtime *soc_pcm_rx;
+	struct snd_soc_pcm_runtime *soc_pcm_tx;
+
+	if (trans->audio_client != NULL) {
+		q6asm_cmd(trans->audio_client, CMD_CLOSE);
+
+		if (trans->sink.cstream != NULL) {
+			soc_pcm_rx = trans->sink.cstream->private_data;
+			msm_pcm_routing_dereg_phy_stream(
+					soc_pcm_rx->dai_link->id,
+					SND_COMPRESS_PLAYBACK);
+		}
+		if (trans->source.cstream != NULL) {
+			soc_pcm_tx = trans->source.cstream->private_data;
+			msm_pcm_routing_dereg_phy_stream(
+					soc_pcm_tx->dai_link->id,
+					SND_COMPRESS_CAPTURE);
+		}
+		q6asm_audio_client_free(trans->audio_client);
+		trans->audio_client = NULL;
+	}
+}
+
+static int msm_transcode_loopback_free(struct snd_compr_stream *cstream)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_transcode_loopback *trans = runtime->private_data;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(cstream);
+	int ret = 0;
+
+	mutex_lock(&trans->lock);
+
+	pr_debug("%s: Transcode loopback end:%d, streams %d\n", __func__,
+		  cstream->direction, trans->num_streams);
+	trans->num_streams--;
+	stop_transcoding(trans);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK)
+		memset(&trans->sink, 0, sizeof(struct loopback_stream));
+	else if (cstream->direction == SND_COMPRESS_CAPTURE)
+		memset(&trans->source, 0, sizeof(struct loopback_stream));
+
+	trans->session_state = LOOPBACK_SESSION_CLOSE;
+	if (trans->num_streams == 1)
+		msm_adsp_clean_mixer_ctl_pp_event_queue(rtd);
+	mutex_unlock(&trans->lock);
+	return ret;
+}
+
+static int msm_transcode_loopback_trigger(struct snd_compr_stream *cstream,
+					  int cmd)
+{
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_transcode_loopback *trans = runtime->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+
+		if (trans->session_state == LOOPBACK_SESSION_START) {
+			pr_debug("%s: Issue Loopback session %d RUN\n",
+				  __func__, trans->instance);
+			q6asm_run_nowait(trans->audio_client, 0, 0, 0);
+			trans->session_state = LOOPBACK_SESSION_RUN;
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_STOP:
+		pr_debug("%s: Issue Loopback session %d STOP\n", __func__,
+			  trans->instance);
+		if (trans->session_state == LOOPBACK_SESSION_RUN)
+			q6asm_cmd_nowait(trans->audio_client, CMD_PAUSE);
+		trans->session_state = LOOPBACK_SESSION_START;
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int msm_transcode_loopback_set_params(struct snd_compr_stream *cstream,
+				struct snd_compr_params *codec_param)
+{
+
+	struct snd_compr_runtime *runtime = cstream->runtime;
+	struct msm_transcode_loopback *trans = runtime->private_data;
+	struct snd_soc_pcm_runtime *soc_pcm_rx;
+	struct snd_soc_pcm_runtime *soc_pcm_tx;
+	uint32_t bit_width = 16;
+	int ret = 0;
+
+	if (trans == NULL) {
+		pr_err("%s: Invalid param\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&trans->lock);
+
+	if (cstream->direction == SND_COMPRESS_PLAYBACK) {
+		if (codec_param->codec.id == SND_AUDIOCODEC_PCM) {
+			trans->sink.codec_format =
+				FORMAT_LINEAR_PCM;
+			switch (codec_param->codec.format) {
+			case SNDRV_PCM_FORMAT_S32_LE:
+				bit_width = 32;
+				break;
+			case SNDRV_PCM_FORMAT_S24_LE:
+				bit_width = 24;
+				break;
+			case SNDRV_PCM_FORMAT_S24_3LE:
+				bit_width = 24;
+				break;
+			case SNDRV_PCM_FORMAT_S16_LE:
+			default:
+				bit_width = 16;
+				break;
+			}
+		} else {
+			pr_debug("%s: unknown sink codec\n", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		trans->sink.start = true;
+	}
+
+	if (cstream->direction == SND_COMPRESS_CAPTURE) {
+		switch (codec_param->codec.id) {
+		case SND_AUDIOCODEC_PCM:
+			pr_debug("Source SND_AUDIOCODEC_PCM\n");
+			trans->source.codec_format =
+				FORMAT_LINEAR_PCM;
+			break;
+		case SND_AUDIOCODEC_AC3:
+			pr_debug("Source SND_AUDIOCODEC_AC3\n");
+			trans->source.codec_format =
+				FORMAT_AC3;
+			break;
+		case SND_AUDIOCODEC_EAC3:
+			pr_debug("Source SND_AUDIOCODEC_EAC3\n");
+			trans->source.codec_format =
+				FORMAT_EAC3;
+			break;
+		default:
+			pr_debug("%s: unknown source codec\n", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		trans->source.start = true;
+	}
+
+	pr_debug("%s: trans->source.start %d trans->sink.start %d trans->source.cstream %pK trans->sink.cstream %pK trans->session_state %d\n",
+			__func__, trans->source.start, trans->sink.start,
+			trans->source.cstream, trans->sink.cstream,
+			trans->session_state);
+
+	if ((trans->session_state == LOOPBACK_SESSION_READY) &&
+			trans->source.start && trans->sink.start) {
+		pr_debug("%s: Moving loopback session to start state\n",
+			  __func__);
+		trans->session_state = LOOPBACK_SESSION_START;
+	}
+
+	if (trans->session_state == LOOPBACK_SESSION_START) {
+		if (trans->audio_client != NULL) {
+			pr_debug("%s: ASM client already opened, closing\n",
+				 __func__);
+			stop_transcoding(trans);
+		}
+
+		trans->audio_client = q6asm_audio_client_alloc(
+				(app_cb)loopback_event_handler, trans);
+		if (!trans->audio_client) {
+			pr_err("%s: Could not allocate memory\n", __func__);
+			ret = -EINVAL;
+			goto exit;
+		}
+		pr_debug("%s: ASM client allocated, callback %pK\n", __func__,
+						loopback_event_handler);
+		trans->session_id = trans->audio_client->session;
+		trans->audio_client->perf_mode = false;
+		ret = q6asm_open_transcode_loopback(trans->audio_client,
+					bit_width,
+					trans->source.codec_format,
+					trans->sink.codec_format);
+		if (ret < 0) {
+			pr_err("%s: Session transcode loopback open failed\n",
+				__func__);
+			q6asm_audio_client_free(trans->audio_client);
+			trans->audio_client = NULL;
+			goto exit;
+		}
+
+		pr_debug("%s: Starting ADM open for loopback\n", __func__);
+		soc_pcm_rx = trans->sink.cstream->private_data;
+		soc_pcm_tx = trans->source.cstream->private_data;
+		if (trans->source.codec_format != FORMAT_LINEAR_PCM)
+			msm_pcm_routing_reg_phy_compr_stream(
+					soc_pcm_tx->dai_link->id,
+					trans->audio_client->perf_mode,
+					trans->session_id,
+					SNDRV_PCM_STREAM_CAPTURE,
+					true);
+		else
+			msm_pcm_routing_reg_phy_stream(
+					soc_pcm_tx->dai_link->id,
+					trans->audio_client->perf_mode,
+					trans->session_id,
+					SNDRV_PCM_STREAM_CAPTURE);
+
+		msm_pcm_routing_reg_phy_stream(
+					soc_pcm_rx->dai_link->id,
+					trans->audio_client->perf_mode,
+					trans->session_id,
+					SNDRV_PCM_STREAM_PLAYBACK);
+		pr_debug("%s: Successfully opened ADM sessions\n", __func__);
+	}
+exit:
+	mutex_unlock(&trans->lock);
+	return ret;
+}
+
+static int msm_transcode_loopback_get_caps(struct snd_compr_stream *cstream,
+				struct snd_compr_caps *arg)
+{
+	struct snd_compr_runtime *runtime;
+	struct msm_transcode_loopback *trans;
+
+	if (!arg || !cstream) {
+		pr_err("%s: Invalid arguments\n", __func__);
+		return -EINVAL;
+	}
+
+	runtime = cstream->runtime;
+	trans = runtime->private_data;
+	pr_debug("%s\n", __func__);
+	if (cstream->direction == SND_COMPRESS_CAPTURE)
+		memcpy(arg, &trans->source_compr_cap,
+		       sizeof(struct snd_compr_caps));
+	else
+		memcpy(arg, &trans->sink_compr_cap,
+		       sizeof(struct snd_compr_caps));
+	return 0;
+}
+
+static int msm_transcode_stream_cmd_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_transcode_loopback *prtd;
+	int ret = 0;
+	struct msm_adsp_event_data *event_data = NULL;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null.\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	event_data = (struct msm_adsp_event_data *)ucontrol->value.bytes.data;
+	if ((event_data->event_type < ADSP_STREAM_PP_EVENT) ||
+	    (event_data->event_type >= ADSP_STREAM_EVENT_MAX)) {
+		pr_err("%s: invalid event_type=%d",
+			 __func__, event_data->event_type);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if ((sizeof(struct msm_adsp_event_data) + event_data->payload_len) >=
+					sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			 __func__, event_data->payload_len);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_stream_cmd(prtd->audio_client, event_data);
+	if (ret < 0)
+		pr_err("%s: failed to send stream event cmd, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_transcode_ion_fd_map_put(struct snd_kcontrol *kcontrol,
+				    struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+				snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_transcode_loopback *prtd;
+	int fd;
+	int ret = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received out of bounds invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&fd, ucontrol->value.bytes.data, sizeof(fd));
+	ret = q6asm_send_ion_fd(prtd->audio_client, fd);
+	if (ret < 0)
+		pr_err("%s: failed to register ion fd\n", __func__);
+done:
+	return ret;
+}
+
+static int msm_transcode_rtic_event_ack_put(struct snd_kcontrol *kcontrol,
+					struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+	unsigned long fe_id = kcontrol->private_value;
+	struct trans_loopback_pdata *pdata = (struct trans_loopback_pdata *)
+					snd_soc_component_get_drvdata(comp);
+	struct snd_compr_stream *cstream = NULL;
+	struct msm_transcode_loopback *prtd;
+	int ret = 0;
+	int param_length = 0;
+
+	if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+		pr_err("%s Received invalid fe_id %lu\n",
+			__func__, fe_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	cstream = pdata->cstream[fe_id];
+	if (cstream == NULL) {
+		pr_err("%s cstream is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	prtd = cstream->runtime->private_data;
+	if (!prtd) {
+		pr_err("%s: prtd is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (prtd->audio_client == NULL) {
+		pr_err("%s: audio_client is null\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memcpy(&param_length, ucontrol->value.bytes.data,
+		sizeof(param_length));
+	if ((param_length + sizeof(param_length))
+		>= sizeof(ucontrol->value.bytes.data)) {
+		pr_err("%s param length=%d  exceeds limit",
+			__func__, param_length);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = q6asm_send_rtic_event_ack(prtd->audio_client,
+			ucontrol->value.bytes.data + sizeof(param_length),
+			param_length);
+	if (ret < 0)
+		pr_err("%s: failed to send rtic event ack, err = %d\n",
+			__func__, ret);
+done:
+	return ret;
+}
+
+static int msm_transcode_stream_cmd_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CMD;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_loopback_stream_cmd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_stream_cmd_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_loopback_stream_cmd_config_control[0].name = mixer_str;
+	fe_loopback_stream_cmd_config_control[0].private_value =
+				rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+		fe_loopback_stream_cmd_config_control,
+		ARRAY_SIZE(fe_loopback_stream_cmd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_stream_callback_control(
+			struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol *kctl;
+
+	struct snd_kcontrol_new fe_loopback_callback_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_callback_info,
+		.get = msm_adsp_stream_callback_get,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s: rtd is  NULL\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_loopback_callback_config_control[0].name = mixer_str;
+	fe_loopback_callback_config_control[0].private_value =
+					rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+			fe_loopback_callback_config_control,
+			ARRAY_SIZE(fe_loopback_callback_config_control));
+	if (ret < 0) {
+		pr_err("%s: failed to add ctl %s. err = %d\n",
+			__func__, mixer_str, ret);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+	if (!kctl) {
+		pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+		ret = -EINVAL;
+		goto free_mixer_str;
+	}
+
+	kctl->private_data = NULL;
+free_mixer_str:
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_add_ion_fd_cmd_control(struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback ION FD";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_ion_fd_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_ion_fd_map_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_ion_fd_config_control[0].name = mixer_str;
+	fe_ion_fd_config_control[0].private_value = rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_ion_fd_config_control,
+				ARRAY_SIZE(fe_ion_fd_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_add_event_ack_cmd_control(
+					struct snd_soc_pcm_runtime *rtd)
+{
+	const char *mixer_ctl_name = "Playback Event Ack";
+	const char *deviceNo = "NN";
+	char *mixer_str = NULL;
+	int ctl_len = 0, ret = 0;
+	struct snd_kcontrol_new fe_event_ack_config_control[1] = {
+		{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "?",
+		.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+		.info = msm_adsp_stream_cmd_info,
+		.put = msm_transcode_rtic_event_ack_put,
+		.private_value = 0,
+		}
+	};
+
+	if (!rtd) {
+		pr_err("%s NULL rtd\n", __func__);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+	mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+	if (!mixer_str) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+	fe_event_ack_config_control[0].name = mixer_str;
+	fe_event_ack_config_control[0].private_value = rtd->dai_link->id;
+	pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+	ret = snd_soc_add_platform_controls(rtd->platform,
+				fe_event_ack_config_control,
+				ARRAY_SIZE(fe_event_ack_config_control));
+	if (ret < 0)
+		pr_err("%s: failed to add ctl %s\n", __func__, mixer_str);
+
+	kfree(mixer_str);
+done:
+	return ret;
+}
+
+static int msm_transcode_loopback_new(struct snd_soc_pcm_runtime *rtd)
+{
+	int rc;
+
+	rc = msm_transcode_stream_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: ADSP Stream Cmd Control open failed\n", __func__);
+
+	rc = msm_transcode_stream_callback_control(rtd);
+	if (rc)
+		pr_err("%s: ADSP Stream callback Control open failed\n",
+			__func__);
+
+	rc = msm_transcode_add_ion_fd_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add transcode ion fd Control\n",
+			__func__);
+
+	rc = msm_transcode_add_event_ack_cmd_control(rtd);
+	if (rc)
+		pr_err("%s: Could not add transcode event ack Control\n",
+			__func__);
+
+	return 0;
+}
+
+static struct snd_compr_ops msm_transcode_loopback_ops = {
+	.open			= msm_transcode_loopback_open,
+	.free			= msm_transcode_loopback_free,
+	.trigger		= msm_transcode_loopback_trigger,
+	.set_params		= msm_transcode_loopback_set_params,
+	.get_caps		= msm_transcode_loopback_get_caps,
+};
+
+
+static int msm_transcode_loopback_probe(struct snd_soc_platform *platform)
+{
+	struct trans_loopback_pdata *pdata = NULL;
+
+	pr_debug("%s\n", __func__);
+	pdata = (struct trans_loopback_pdata *)
+			kzalloc(sizeof(struct trans_loopback_pdata),
+			GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	snd_soc_platform_set_drvdata(platform, pdata);
+	return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+	.probe		= msm_transcode_loopback_probe,
+	.compr_ops	= &msm_transcode_loopback_ops,
+	.pcm_new	= msm_transcode_loopback_new,
+};
+
+static int msm_transcode_dev_probe(struct platform_device *pdev)
+{
+
+	pr_debug("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+	if (pdev->dev.of_node)
+		dev_set_name(&pdev->dev, "%s", "msm-transcode-loopback");
+
+	return snd_soc_register_platform(&pdev->dev,
+					&msm_soc_platform);
+}
+
+static int msm_transcode_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_platform(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_transcode_loopback_dt_match[] = {
+	{.compatible = "qcom,msm-transcode-loopback"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_transcode_loopback_dt_match);
+
+static struct platform_driver msm_transcode_loopback_driver = {
+	.driver = {
+		.name = "msm-transcode-loopback",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_transcode_loopback_dt_match,
+	},
+	.probe = msm_transcode_dev_probe,
+	.remove = msm_transcode_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+	memset(&transcode_info, 0, sizeof(struct msm_transcode_loopback));
+	mutex_init(&transcode_info.lock);
+	return platform_driver_register(&msm_transcode_loopback_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+	mutex_destroy(&transcode_info.lock);
+	platform_driver_unregister(&msm_transcode_loopback_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("Transcode loopback platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 90d640d..1590605 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -24,7 +24,6 @@
 #include <sound/q6afe-v2.h>
 #include <sound/audio_cal_utils.h>
 #include <sound/asound.h>
-#include <sound/msm-dts-eagle.h>
 #include "msm-dts-srs-tm-config.h"
 #include <sound/adsp_err.h>
 
@@ -265,223 +264,6 @@
 	return idx;
 }
 
-int adm_dts_eagle_set(int port_id, int copp_idx, int param_id,
-		      void *data, uint32_t size)
-{
-	struct adm_cmd_set_pp_params_v5	admp;
-	int p_idx, ret = 0, *ob_params;
-
-	pr_debug("DTS_EAGLE_ADM: %s - port id %i, copp idx %i, param id 0x%X size %u\n",
-		__func__, port_id, copp_idx, param_id, size);
-
-	port_id = afe_convert_virtual_to_portid(port_id);
-	p_idx = adm_validate_and_get_port_index(port_id);
-	pr_debug("DTS_EAGLE_ADM: %s - after lookup, port id %i, port idx %i\n",
-		__func__, port_id, p_idx);
-
-	if (p_idx < 0) {
-		pr_err("DTS_EAGLE_ADM: %s: invalid port index 0x%x, port id 0x%x\n",
-			__func__, p_idx, port_id);
-		return -EINVAL;
-	}
-
-	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
-		pr_err("DTS_EAGLE_ADM: %s: Invalid copp_idx: %d\n", __func__,
-			copp_idx);
-		return -EINVAL;
-	}
-
-	ob_params = (int *)this_adm.outband_memmap.kvaddr;
-	if (ob_params == NULL) {
-		pr_err("DTS_EAGLE_ADM: %s - NULL memmap. Non Eagle topology selected?\n",
-			__func__);
-		ret = -EINVAL;
-		goto fail_cmd;
-	}
-	/* check for integer overflow */
-	if (size > (UINT_MAX - APR_CMD_OB_HDR_SZ))
-		ret = -EINVAL;
-	if ((ret < 0) ||
-	    (size + APR_CMD_OB_HDR_SZ > this_adm.outband_memmap.size)) {
-		pr_err("DTS_EAGLE_ADM - %s: ion alloc of size %zu too small for size requested %u\n",
-			__func__, this_adm.outband_memmap.size,
-			size + APR_CMD_OB_HDR_SZ);
-		ret = -EINVAL;
-		goto fail_cmd;
-	}
-	*ob_params++ = AUDPROC_MODULE_ID_DTS_HPX_POSTMIX;
-	*ob_params++ = param_id;
-	*ob_params++ = size;
-	memcpy(ob_params, data, size);
-
-	admp.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
-		APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
-	admp.hdr.pkt_size = sizeof(admp);
-	admp.hdr.src_svc = APR_SVC_ADM;
-	admp.hdr.src_domain = APR_DOMAIN_APPS;
-	admp.hdr.src_port = port_id;
-	admp.hdr.dest_svc = APR_SVC_ADM;
-	admp.hdr.dest_domain = APR_DOMAIN_ADSP;
-	admp.hdr.dest_port = atomic_read(&this_adm.copp.id[p_idx][copp_idx]);
-	admp.hdr.token = p_idx << 16 | copp_idx;
-	admp.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
-	admp.payload_addr_lsw = lower_32_bits(this_adm.outband_memmap.paddr);
-	admp.payload_addr_msw = msm_audio_populate_upper_32_bits(
-						this_adm.outband_memmap.paddr);
-	admp.mem_map_handle = atomic_read(&this_adm.mem_map_handles[
-					  ADM_DTS_EAGLE]);
-	admp.payload_size = size + sizeof(struct adm_param_data_v5);
-
-	pr_debug("DTS_EAGLE_ADM: %s - Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
-			__func__, admp.hdr.dest_port,
-			admp.payload_size, AUDPROC_MODULE_ID_DTS_HPX_POSTMIX,
-			param_id);
-	atomic_set(&this_adm.copp.stat[p_idx][copp_idx], -1);
-	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&admp);
-	if (ret < 0) {
-		pr_err("DTS_EAGLE_ADM: %s - ADM enable for port %d failed\n",
-			__func__, port_id);
-		ret = -EINVAL;
-		goto fail_cmd;
-	}
-	ret = wait_event_timeout(this_adm.copp.wait[p_idx][copp_idx],
-			atomic_read(&this_adm.copp.stat
-			[p_idx][copp_idx]) >= 0,
-			msecs_to_jiffies(TIMEOUT_MS));
-	if (!ret) {
-		pr_err("DTS_EAGLE_ADM: %s - set params timed out port = %d\n",
-			__func__, port_id);
-		ret = -EINVAL;
-	} else if (atomic_read(&this_adm.copp.stat
-				[p_idx][copp_idx]) > 0) {
-		pr_err("%s: DSP returned error[%s]\n",
-				__func__, adsp_err_get_err_str(
-				atomic_read(&this_adm.copp.stat
-				[p_idx][copp_idx])));
-		ret = adsp_err_get_lnx_err_code(
-				atomic_read(&this_adm.copp.stat
-				[p_idx][copp_idx]));
-	} else {
-		ret = 0;
-	}
-
-fail_cmd:
-	return ret;
-}
-
-int adm_dts_eagle_get(int port_id, int copp_idx, int param_id,
-		      void *data, uint32_t size)
-{
-	struct adm_cmd_get_pp_params_v5	admp;
-	int p_idx, ret = 0, *ob_params;
-	uint32_t orig_size = size;
-
-	pr_debug("DTS_EAGLE_ADM: %s - port id %i, copp idx %i, param id 0x%X\n",
-		 __func__, port_id, copp_idx, param_id);
-
-	port_id = afe_convert_virtual_to_portid(port_id);
-	p_idx = adm_validate_and_get_port_index(port_id);
-	if (p_idx < 0) {
-		pr_err("DTS_EAGLE_ADM: %s - invalid port index %i, port id %i, copp idx %i\n",
-				__func__, p_idx, port_id, copp_idx);
-		return -EINVAL;
-	}
-
-	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
-		pr_err("DTS_EAGLE_ADM: %s: Invalid copp_idx: %d\n", __func__,
-			copp_idx);
-		return -EINVAL;
-	}
-
-	if ((size == 0) || !data) {
-		pr_err("DTS_EAGLE_ADM: %s - invalid size %u or pointer %pK.\n",
-			__func__, size, data);
-		return -EINVAL;
-	}
-
-	size = (size+3) & 0xFFFFFFFC;
-
-	ob_params = (int *)(this_adm.outband_memmap.kvaddr);
-	if (ob_params == NULL) {
-		pr_err("DTS_EAGLE_ADM: %s - NULL memmap. Non Eagle topology selected?",
-			__func__);
-		ret = -EINVAL;
-		goto fail_cmd;
-	}
-	/* check for integer overflow */
-	if (size > (UINT_MAX - APR_CMD_OB_HDR_SZ))
-		ret = -EINVAL;
-	if ((ret < 0) ||
-	    (size + APR_CMD_OB_HDR_SZ > this_adm.outband_memmap.size)) {
-		pr_err("DTS_EAGLE_ADM - %s: ion alloc of size %zu too small for size requested %u\n",
-			__func__, this_adm.outband_memmap.size,
-			size + APR_CMD_OB_HDR_SZ);
-		ret = -EINVAL;
-		goto fail_cmd;
-	}
-	*ob_params++ = AUDPROC_MODULE_ID_DTS_HPX_POSTMIX;
-	*ob_params++ = param_id;
-	*ob_params++ = size;
-
-	admp.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
-			     APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
-	admp.hdr.pkt_size = sizeof(admp);
-	admp.hdr.src_svc = APR_SVC_ADM;
-	admp.hdr.src_domain = APR_DOMAIN_APPS;
-	admp.hdr.src_port = port_id;
-	admp.hdr.dest_svc = APR_SVC_ADM;
-	admp.hdr.dest_domain = APR_DOMAIN_ADSP;
-	admp.hdr.dest_port = atomic_read(&this_adm.copp.id[p_idx][copp_idx]);
-	admp.hdr.token = p_idx << 16 | copp_idx;
-	admp.hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
-	admp.data_payload_addr_lsw =
-				lower_32_bits(this_adm.outband_memmap.paddr);
-	admp.data_payload_addr_msw =
-				msm_audio_populate_upper_32_bits(
-						this_adm.outband_memmap.paddr);
-	admp.mem_map_handle = atomic_read(&this_adm.mem_map_handles[
-					  ADM_DTS_EAGLE]);
-	admp.module_id = AUDPROC_MODULE_ID_DTS_HPX_POSTMIX;
-	admp.param_id = param_id;
-	admp.param_max_size = size + sizeof(struct adm_param_data_v5);
-	admp.reserved = 0;
-
-	atomic_set(&this_adm.copp.stat[p_idx][copp_idx], -1);
-
-	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&admp);
-	if (ret < 0) {
-		pr_err("DTS_EAGLE_ADM: %s - Failed to get EAGLE Params on port %d\n",
-			__func__, port_id);
-		ret = -EINVAL;
-		goto fail_cmd;
-	}
-	ret = wait_event_timeout(this_adm.copp.wait[p_idx][copp_idx],
-			atomic_read(&this_adm.copp.stat
-			[p_idx][copp_idx]) >= 0,
-			msecs_to_jiffies(TIMEOUT_MS));
-	if (!ret) {
-		pr_err("DTS_EAGLE_ADM: %s - EAGLE get params timed out port = %d\n",
-			__func__, port_id);
-		ret = -EINVAL;
-		goto fail_cmd;
-	} else if (atomic_read(&this_adm.copp.stat
-				[p_idx][copp_idx]) > 0) {
-		pr_err("%s: DSP returned error[%s]\n",
-				__func__, adsp_err_get_err_str(
-				atomic_read(&this_adm.copp.stat
-				[p_idx][copp_idx])));
-		ret = adsp_err_get_lnx_err_code(
-				atomic_read(&this_adm.copp.stat
-					[p_idx][copp_idx]));
-		goto fail_cmd;
-	}
-
-	memcpy(data, ob_params, orig_size);
-	ret = 0;
-fail_cmd:
-	return ret;
-}
-
 int srs_trumedia_open(int port_id, int copp_idx, __s32 srs_tech_id,
 		      void *srs_params)
 {
@@ -748,6 +530,267 @@
 	return ret;
 }
 
+static int adm_populate_channel_weight(u16 *ptr,
+					struct msm_pcm_channel_mixer *ch_mixer,
+					int channel_index)
+{
+	u16 i, j, start_index = 0;
+
+	if (channel_index > ch_mixer->output_channel) {
+		pr_err("%s: channel index %d is larger than output_channel %d\n",
+			 __func__, channel_index, ch_mixer->output_channel);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ch_mixer->output_channel; i++) {
+		pr_debug("%s: weight for output %d:", __func__, i);
+		for (j = 0; j < ADM_MAX_CHANNELS; j++)
+			pr_debug(" %d",
+				ch_mixer->channel_weight[i][j]);
+		pr_debug("\n");
+	}
+
+	for (i = 0; i < channel_index; ++i)
+		start_index += ch_mixer->input_channels[i];
+
+	for (i = 0; i < ch_mixer->output_channel; ++i) {
+		for (j = start_index;
+			j < start_index +
+			ch_mixer->input_channels[channel_index]; j++) {
+			*ptr = ch_mixer->channel_weight[i][j];
+			 pr_debug("%s: ptr[%d][%d] = %d\n",
+				__func__, i, j, *ptr);
+			 ptr++;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * adm_programable_channel_mixer
+ *
+ * Receives port_id, copp_idx, session_id, session_type, ch_mixer
+ * and channel_index to send ADM command to mix COPP data.
+ *
+ * port_id - Passed value, port_id for which backend is wanted
+ * copp_idx - Passed value, copp_idx for which COPP is wanted
+ * session_id - Passed value, session_id for which session is needed
+ * session_type - Passed value, session_type for RX or TX
+ * ch_mixer - Passed value, ch_mixer for which channel mixer config is needed
+ * channel_index - Passed value, channel_index for which channel is needed
+ */
+int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id,
+				  int session_type,
+				  struct msm_pcm_channel_mixer *ch_mixer,
+				  int channel_index)
+{
+	struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL;
+	struct adm_param_data_v5 data_v5;
+	int ret = 0, port_idx, sz = 0, param_size = 0;
+	u16 *adm_pspd_params;
+	u16 *ptr;
+	int index = 0;
+
+	pr_debug("%s: port_id = %d\n", __func__, port_id);
+	port_id = afe_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		return -EINVAL;
+	}
+	/*
+	 * First 8 bytes are 4 bytes as rule number, 2 bytes as output
+	 * channel and 2 bytes as input channel.
+	 * 2 * ch_mixer->output_channel means output channel mapping.
+	 * 2 * ch_mixer->input_channels[channel_index]) means input
+	 * channel mapping.
+	 * 2 * ch_mixer->input_channels[channel_index] *
+	 * ch_mixer->output_channel) means the channel mixer weighting
+	 * coefficients.
+	 * param_size needs to be a multiple of 4 bytes.
+	 */
+
+	param_size = 2 * (4 + ch_mixer->output_channel +
+			ch_mixer->input_channels[channel_index] +
+			ch_mixer->input_channels[channel_index] *
+			ch_mixer->output_channel);
+	roundup(param_size, 4);
+
+	sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) +
+			sizeof(struct default_chmixer_param_id_coeff) +
+			sizeof(struct adm_param_data_v5) + param_size;
+	pr_debug("%s: sz = %d\n", __func__, sz);
+	adm_params = kzalloc(sz, GFP_KERNEL);
+	if (!adm_params)
+		return -ENOMEM;
+
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->direction = session_type;
+	adm_params->sessionid = session_id;
+	pr_debug("%s: copp_id = %d, session id  %d\n", __func__,
+		atomic_read(&this_adm.copp.id[port_idx][copp_idx]),
+			session_id);
+	adm_params->deviceid = atomic_read(
+				&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->reserved = 0;
+
+	data_v5.module_id = MTMX_MODULE_ID_DEFAULT_CHMIXER;
+	data_v5.param_id =  DEFAULT_CHMIXER_PARAM_ID_COEFF;
+	data_v5.reserved = 0;
+	data_v5.param_size = param_size;
+	adm_params->payload_size =
+			sizeof(struct default_chmixer_param_id_coeff) +
+			sizeof(struct adm_param_data_v5) + data_v5.param_size;
+	adm_pspd_params = (u16 *)((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5));
+	memcpy(adm_pspd_params, &data_v5, sizeof(data_v5));
+
+	adm_pspd_params = (u16 *)((u8 *)adm_params +
+			sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)
+			+ sizeof(data_v5));
+
+	adm_pspd_params[0] = ch_mixer->rule;
+	adm_pspd_params[2] = ch_mixer->output_channel;
+	adm_pspd_params[3] = ch_mixer->input_channels[channel_index];
+	index = 4;
+
+	if (ch_mixer->output_channel == 1) {
+		adm_pspd_params[index] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->output_channel == 2) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+	} else if (ch_mixer->output_channel == 3) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->output_channel == 4) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->output_channel == 5) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->output_channel == 6) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->output_channel == 8) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+		adm_pspd_params[index + 6] = PCM_CHANNEL_LB;
+		adm_pspd_params[index + 7] = PCM_CHANNEL_RB;
+	}
+
+	index = index + ch_mixer->output_channel;
+	if (ch_mixer->input_channels[channel_index] == 1) {
+		adm_pspd_params[index] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->input_channels[channel_index] == 2) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+	} else if (ch_mixer->input_channels[channel_index] == 3) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+	} else if (ch_mixer->input_channels[channel_index] == 4) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->input_channels[channel_index] == 5) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->input_channels[channel_index] == 6) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+	} else if (ch_mixer->input_channels[channel_index] == 8) {
+		adm_pspd_params[index] = PCM_CHANNEL_FL;
+		adm_pspd_params[index + 1] = PCM_CHANNEL_FR;
+		adm_pspd_params[index + 2] = PCM_CHANNEL_LFE;
+		adm_pspd_params[index + 3] = PCM_CHANNEL_FC;
+		adm_pspd_params[index + 4] = PCM_CHANNEL_LS;
+		adm_pspd_params[index + 5] = PCM_CHANNEL_RS;
+		adm_pspd_params[index + 6] = PCM_CHANNEL_LB;
+		adm_pspd_params[index + 7] = PCM_CHANNEL_RB;
+	}
+
+	index = index + ch_mixer->input_channels[channel_index];
+	ret = adm_populate_channel_weight(&adm_pspd_params[index],
+					ch_mixer, channel_index);
+	if (!ret) {
+		pr_err("%s: fail to get channel weight with error %d\n",
+			__func__, ret);
+		goto fail_cmd;
+	}
+
+	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	adm_params->hdr.src_svc = APR_SVC_ADM;
+	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
+	adm_params->hdr.src_port = port_id;
+	adm_params->hdr.dest_svc = APR_SVC_ADM;
+	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
+	adm_params->hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	adm_params->hdr.token = port_idx << 16 | copp_idx;
+	adm_params->hdr.opcode = ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5;
+	adm_params->hdr.pkt_size = sz;
+	adm_params->payload_addr_lsw = 0;
+	adm_params->payload_addr_msw = 0;
+	adm_params->mem_map_handle = 0;
+	adm_params->reserved = 0;
+
+	ptr = (u16 *)adm_params;
+	for (index = 0; index < (sz / 2); index++)
+		pr_debug("%s: adm_params[%d] = 0x%x\n",
+			__func__, index, (unsigned int)ptr[index]);
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], 0);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
+	if (ret < 0) {
+		pr_err("%s: Set params failed port %d rc %d\n", __func__,
+			port_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+			atomic_read(
+			&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+			msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: set params timed out port = %d\n",
+			__func__, port_id);
+		ret = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	ret = 0;
+fail_cmd:
+	kfree(adm_params);
+
+	return ret;
+}
+
 int adm_set_stereo_to_custom_stereo(int port_id, int copp_idx,
 				    unsigned int session_id, char *params,
 				    uint32_t params_length)
@@ -2352,13 +2395,6 @@
 		 __func__, port_id, path, rate, channel_mode, perf_mode,
 		 topology);
 
-	/* For DTS EAGLE only, force 24 bit */
-	if ((topology == ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX) &&
-		(perf_mode == LEGACY_PCM_MODE)) {
-		bit_width = 24;
-		pr_debug("%s: Force open adm in 24-bit for DTS HPX topology 0x%x\n",
-			__func__, topology);
-	}
 	port_id = q6audio_convert_virtual_to_portid(port_id);
 	port_idx = adm_validate_and_get_port_index(port_id);
 	if (port_idx < 0) {
@@ -2380,8 +2416,7 @@
 		flags = ADM_ULL_POST_PROCESSING_DEVICE_SESSION;
 		if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
 		    (topology == DS2_ADM_COPP_TOPOLOGY_ID) ||
-		    (topology == SRS_TRUMEDIA_TOPOLOGY_ID) ||
-		    (topology == ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX))
+		    (topology == SRS_TRUMEDIA_TOPOLOGY_ID))
 			topology = DEFAULT_COPP_TOPOLOGY;
 	} else if (perf_mode == ULTRA_LOW_LATENCY_PCM_MODE) {
 		flags = ADM_ULTRA_LOW_LATENCY_DEVICE_SESSION;
@@ -2392,11 +2427,11 @@
 		flags = ADM_LOW_LATENCY_DEVICE_SESSION;
 		if ((topology == DOLBY_ADM_COPP_TOPOLOGY_ID) ||
 		    (topology == DS2_ADM_COPP_TOPOLOGY_ID) ||
-		    (topology == SRS_TRUMEDIA_TOPOLOGY_ID) ||
-		    (topology == ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX))
+		    (topology == SRS_TRUMEDIA_TOPOLOGY_ID))
 			topology = DEFAULT_COPP_TOPOLOGY;
 	} else {
-		if (path == ADM_PATH_COMPRESSED_RX)
+		if ((path == ADM_PATH_COMPRESSED_RX) ||
+		    (path == ADM_PATH_COMPRESSED_TX))
 			flags = 0;
 		else
 			flags = ADM_LEGACY_DEVICE_SESSION;
@@ -2433,7 +2468,8 @@
 			   acdb_id);
 		set_bit(ADM_STATUS_CALIBRATION_REQUIRED,
 		(void *)&this_adm.copp.adm_status[port_idx][copp_idx]);
-		if (path != ADM_PATH_COMPRESSED_RX)
+		if ((path != ADM_PATH_COMPRESSED_RX) &&
+		    (path != ADM_PATH_COMPRESSED_TX))
 			send_adm_custom_topology();
 	}
 
@@ -2463,22 +2499,6 @@
 		(uint32_t)this_adm.outband_memmap.size);
 		}
 	}
-		if ((topology == ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX) &&
-		    (perf_mode == LEGACY_PCM_MODE)) {
-			int res = 0;
-
-			atomic_set(&this_adm.mem_map_index, ADM_DTS_EAGLE);
-			msm_dts_ion_memmap(&this_adm.outband_memmap);
-			res = adm_memory_map_regions(
-				      &this_adm.outband_memmap.paddr,
-				      0,
-				      (uint32_t *)&this_adm.outband_memmap.size,
-				      1);
-			if (res < 0)
-				pr_err("%s: DTS_EAGLE mmap did not work!",
-					__func__);
-		}
-		memset(&open, 0, sizeof(struct adm_cmd_device_open_v5));
 		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
 						   APR_HDR_LEN(APR_HDR_SIZE),
 						   APR_PKT_VER);
@@ -2721,6 +2741,10 @@
 		route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
 		route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
 		break;
+	case ADM_PATH_COMPRESSED_TX:
+		route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+		route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_TX;
+		break;
 	default:
 		pr_err("%s: Wrong path set[%d]\n", __func__, path);
 		break;
@@ -2824,10 +2848,6 @@
 					__func__, port_idx, copp_idx);
 				continue;
 			}
-			if (atomic_read(
-				&this_adm.copp.topology[port_idx][copp_idx]) ==
-				ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX)
-				continue;
 			rtac_add_adm_device(payload_map.port_id[i],
 					    atomic_read(&this_adm.copp.id
 							[port_idx][copp_idx]),
@@ -2941,21 +2961,6 @@
 			}
 		}
 
-		if ((perf_mode == LEGACY_PCM_MODE) &&
-		    (this_adm.outband_memmap.paddr != 0) &&
-		    (atomic_read(
-			&this_adm.copp.topology[port_idx][copp_idx]) ==
-			ADM_CMD_COPP_OPEN_TOPOLOGY_ID_DTS_HPX)) {
-			atomic_set(&this_adm.mem_map_index, ADM_DTS_EAGLE);
-			ret = adm_memory_unmap_regions();
-			if (ret < 0) {
-				pr_err("%s: adm mem unmmap err %d",
-					__func__, ret);
-			} else {
-				atomic_set(&this_adm.mem_map_handles
-					   [ADM_DTS_EAGLE], 0);
-			}
-		}
 
 		if ((afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) &&
 		    this_adm.sourceTrackingData.memmap.paddr) {
@@ -3432,10 +3437,6 @@
 		{NULL, NULL, NULL, NULL, NULL, NULL} },
 		{NULL, NULL, cal_utils_match_buf_num} },
 
-		{{DTS_EAGLE_CAL_TYPE,
-		{NULL, NULL, NULL, NULL, NULL, NULL} },
-		{NULL, NULL, cal_utils_match_buf_num} },
-
 		{{SRS_TRUMEDIA_CAL_TYPE,
 		{NULL, NULL, NULL, NULL, NULL, NULL} },
 		{NULL, NULL, cal_utils_match_buf_num} },
@@ -4309,6 +4310,136 @@
 	return ret;
 }
 
+/**
+ * adm_swap_speaker_channels
+ *
+ * Receives port_id, copp_idx, sample rate, spk_swap and
+ * send MFC command to swap speaker channel.
+ * Return zero on success. On failure returns nonzero.
+ *
+ * port_id - Passed value, port_id for which channels swap is wanted
+ * copp_idx - Passed value, copp_idx for which channels swap is wanted
+ * sample_rate - Passed value, sample rate used by app type config
+ * spk_swap  - Passed value, spk_swap for check if swap flag is set
+ */
+int adm_swap_speaker_channels(int port_id, int copp_idx,
+			int sample_rate, bool spk_swap)
+{
+	struct audproc_mfc_output_media_fmt mfc_cfg;
+	uint16_t num_channels;
+	int port_idx;
+	int ret  = 0;
+
+	pr_debug("%s: Enter, port_id %d, copp_idx %d\n",
+		  __func__, port_id, copp_idx);
+	port_id = q6audio_convert_virtual_to_portid(port_id);
+	port_idx = adm_validate_and_get_port_index(port_id);
+	if (port_idx < 0 || port_idx >= AFE_MAX_PORTS) {
+		pr_err("%s: Invalid port_id %#x\n", __func__, port_id);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (copp_idx < 0 || copp_idx >= MAX_COPPS_PER_PORT) {
+		pr_err("%s: Invalid copp_num: %d\n", __func__, copp_idx);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	num_channels = atomic_read(
+				&this_adm.copp.channels[port_idx][copp_idx]);
+	if (num_channels != 2) {
+		pr_debug("%s: Invalid number of channels: %d\n",
+			__func__, num_channels);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	memset(&mfc_cfg, 0, sizeof(mfc_cfg));
+	mfc_cfg.params.hdr.hdr_field =
+				APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	mfc_cfg.params.hdr.pkt_size =
+				sizeof(mfc_cfg);
+	mfc_cfg.params.hdr.src_svc = APR_SVC_ADM;
+	mfc_cfg.params.hdr.src_domain = APR_DOMAIN_APPS;
+	mfc_cfg.params.hdr.src_port = port_id;
+	mfc_cfg.params.hdr.dest_svc = APR_SVC_ADM;
+	mfc_cfg.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+	mfc_cfg.params.hdr.dest_port =
+			atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+	mfc_cfg.params.hdr.token = port_idx << 16 | copp_idx;
+	mfc_cfg.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+	mfc_cfg.params.payload_addr_lsw = 0;
+	mfc_cfg.params.payload_addr_msw = 0;
+	mfc_cfg.params.mem_map_handle = 0;
+	mfc_cfg.params.payload_size = sizeof(mfc_cfg) -
+				sizeof(mfc_cfg.params);
+	mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC;
+	mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT;
+	mfc_cfg.data.param_size = mfc_cfg.params.payload_size -
+				sizeof(mfc_cfg.data);
+	mfc_cfg.data.reserved = 0;
+	mfc_cfg.sampling_rate = sample_rate;
+	mfc_cfg.bits_per_sample =
+		atomic_read(&this_adm.copp.bit_width[port_idx][copp_idx]);
+	mfc_cfg.num_channels = num_channels;
+
+	/* Currently applying speaker swap for only 2 channel use case */
+	if (spk_swap) {
+		mfc_cfg.channel_type[0] =
+			(uint16_t) PCM_CHANNEL_FR;
+		mfc_cfg.channel_type[1] =
+			(uint16_t) PCM_CHANNEL_FL;
+	} else {
+		mfc_cfg.channel_type[0] =
+			(uint16_t) PCM_CHANNEL_FL;
+		mfc_cfg.channel_type[1] =
+			(uint16_t) PCM_CHANNEL_FR;
+	}
+
+	atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+	pr_debug("%s: mfc config: port_idx %d copp_idx  %d copp SR %d copp BW %d copp chan %d\n",
+		__func__, port_idx, copp_idx, mfc_cfg.sampling_rate,
+		mfc_cfg.bits_per_sample, mfc_cfg.num_channels);
+
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&mfc_cfg);
+	if (ret < 0) {
+		pr_err("%s: port_id: for[0x%x] failed %d\n",
+		__func__, port_id, ret);
+		goto done;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+		atomic_read(&this_adm.copp.stat
+		[port_idx][copp_idx]) >= 0,
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: mfc_cfg Set params timed out for port_id: for [0x%x]\n",
+					__func__, port_id);
+		ret = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&this_adm.copp.stat
+			[port_idx][copp_idx])));
+		ret = adsp_err_get_lnx_err_code(
+			atomic_read(&this_adm.copp.stat
+				[port_idx][copp_idx]));
+		goto done;
+	}
+
+	pr_debug("%s: mfc_cfg Set params returned success", __func__);
+	ret = 0;
+
+done:
+	return ret;
+}
+EXPORT_SYMBOL(adm_swap_speaker_channels);
+
 int adm_set_sound_focus(int port_id, int copp_idx,
 			struct sound_focus_param soundFocusData)
 {
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 176b8aa..ebb8eff 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -5191,7 +5191,7 @@
 					AFE_API_VERSION_LOOPBACK_CONFIG;
 	cmd_sidetone.cfg_data.dst_port_id = rx_port_id;
 	cmd_sidetone.cfg_data.routing_mode = LB_MODE_SIDETONE;
-	cmd_sidetone.cfg_data.enable = ((enable == 1) ? sidetone_enable : 0);
+	cmd_sidetone.cfg_data.enable = enable;
 
 	pr_debug("%s rx(0x%x) tx(0x%x) enable(%d) mid(0x%x) gain(%d) sidetone_enable(%d)\n",
 		  __func__, rx_port_id, tx_port_id,
@@ -6695,8 +6695,6 @@
 	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
 	memcpy(&this_afe.prot_cfg, &cal_data->cal_info,
 		sizeof(this_afe.prot_cfg));
-	this_afe.th_ftm_cfg.mode = this_afe.prot_cfg.mode;
-	this_afe.ex_ftm_cfg.mode = this_afe.prot_cfg.mode;
 	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
 done:
 	return ret;
@@ -6839,8 +6837,6 @@
 		cal_data->cal_info.r0[SP_V2_SPKR_1] = -1;
 		cal_data->cal_info.r0[SP_V2_SPKR_2] = -1;
 	}
-	this_afe.th_ftm_cfg.mode = this_afe.prot_cfg.mode;
-	this_afe.ex_ftm_cfg.mode = this_afe.prot_cfg.mode;
 	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
 	__pm_relax(&wl.ws);
 done:
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index b52c83b..e7e1618 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -38,8 +38,8 @@
 #include <sound/q6asm-v2.h>
 #include <sound/q6audio-v2.h>
 #include <sound/audio_cal_utils.h>
-#include <sound/msm-dts-eagle.h>
 #include <sound/adsp_err.h>
+#include <sound/compress_params.h>
 
 #define TRUE        0x01
 #define FALSE       0x00
@@ -155,6 +155,38 @@
 static char *out_buffer;
 static char *in_buffer;
 
+static uint32_t adsp_reg_event_opcode[] = {
+	ASM_STREAM_CMD_REGISTER_PP_EVENTS,
+	ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS,
+	ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE };
+
+static uint32_t adsp_raise_event_opcode[] = {
+	ASM_STREAM_PP_EVENT,
+	ASM_STREAM_CMD_ENCDEC_EVENTS,
+	ASM_IEC_61937_MEDIA_FMT_EVENT };
+
+static int is_adsp_reg_event(uint32_t cmd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adsp_reg_event_opcode); i++) {
+		if (cmd == adsp_reg_event_opcode[i])
+			return i;
+	}
+	return -EINVAL;
+}
+
+static int is_adsp_raise_event(uint32_t cmd)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adsp_raise_event_opcode); i++) {
+		if (cmd == adsp_raise_event_opcode[i])
+			return i;
+	}
+	return -EINVAL;
+}
+
 static inline void q6asm_set_flag_in_token(union asm_token_struct *asm_token,
 					   int flag, int flag_offset)
 {
@@ -1091,6 +1123,72 @@
 	return NULL;
 }
 
+int q6asm_send_stream_cmd(struct audio_client *ac,
+			  struct msm_adsp_event_data *data)
+{
+	char *asm_params = NULL;
+	struct apr_hdr hdr;
+	int sz, rc;
+
+	if (!data || !ac) {
+		pr_err("%s: %s is NULL\n", __func__,
+			(!data) ? "data" : "ac");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (data->event_type >= ARRAY_SIZE(adsp_reg_event_opcode)) {
+		pr_err("%s: event %u out of boundary of array size of (%lu)\n",
+		       __func__, data->event_type,
+		       (long)ARRAY_SIZE(adsp_reg_event_opcode));
+		rc = -EINVAL;
+		goto done;
+	}
+
+	sz = sizeof(struct apr_hdr) + data->payload_len;
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	q6asm_add_hdr_async(ac, &hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state_pp, -1);
+	hdr.opcode = adsp_reg_event_opcode[data->event_type];
+	memcpy(asm_params, &hdr, sizeof(struct apr_hdr));
+	memcpy(asm_params + sizeof(struct apr_hdr),
+		data->payload, data->payload_len);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: stream event cmd apr pkt failed\n", __func__);
+		rc = -EINVAL;
+		goto fail_send_param;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state_pp) >= 0), 1 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout for stream event cmd resp\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_send_param;
+	}
+
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
+		pr_err("%s: DSP returned error[%s] for stream event cmd\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state_pp)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state_pp));
+		goto fail_send_param;
+	}
+
+	rc = 0;
+fail_send_param:
+	kfree(asm_params);
+done:
+	return rc;
+}
+
 struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
 {
 	struct audio_client *ac;
@@ -1162,6 +1260,7 @@
 		spin_lock_init(&ac->port[lcnt].dsp_lock);
 	}
 	atomic_set(&ac->cmd_state, 0);
+	atomic_set(&ac->cmd_state_pp, 0);
 	atomic_set(&ac->mem_state, 0);
 
 	rc = send_asm_custom_topology(ac);
@@ -1441,7 +1540,6 @@
 			}
 			pr_debug("%s: Clearing custom topology\n", __func__);
 		}
-		this_mmap.apr = NULL;
 
 		cal_utils_clear_cal_block_q6maps(ASM_MAX_CAL_TYPES, cal_data);
 		common_client.mmap_apr = NULL;
@@ -1607,6 +1705,8 @@
 	int32_t  ret = 0;
 	union asm_token_struct asm_token;
 	uint8_t buf_index;
+	struct msm_adsp_event_data *pp_event_package = NULL;
+	uint32_t payload_size = 0;
 
 	if (ac == NULL) {
 		pr_err("%s: ac NULL\n", __func__);
@@ -1639,8 +1739,10 @@
 	if (data->opcode == RESET_EVENTS) {
 		mutex_lock(&ac->cmd_lock);
 		atomic_set(&ac->reset, 1);
-		if (ac->apr == NULL)
+		if (ac->apr == NULL) {
 			ac->apr = ac->apr2;
+			ac->apr2 = NULL;
+		}
 		pr_debug("%s: Reset event is received: %d %d apr[%pK]\n",
 			__func__,
 			data->reset_event, data->reset_proc, ac->apr);
@@ -1652,6 +1754,7 @@
 		atomic_set(&ac->time_flag, 0);
 		atomic_set(&ac->cmd_state, 0);
 		atomic_set(&ac->mem_state, 0);
+		atomic_set(&ac->cmd_state_pp, 0);
 		wake_up(&ac->time_wait);
 		wake_up(&ac->cmd_wait);
 		wake_up(&ac->mem_wait);
@@ -1705,7 +1808,11 @@
 		case ASM_STREAM_CMD_OPEN_LOOPBACK_V2:
 		case ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK:
 		case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2:
+		case ASM_DATA_CMD_IEC_60958_MEDIA_FMT:
 		case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+		case ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2:
+		case ASM_STREAM_CMD_REGISTER_ENCDEC_EVENTS:
+		case ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE:
 		case ASM_DATA_CMD_REMOVE_INITIAL_SILENCE:
 		case ASM_DATA_CMD_REMOVE_TRAILING_SILENCE:
 		case ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS:
@@ -1719,14 +1826,31 @@
 				pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
 					__func__, payload[0], payload[1]);
 				if (wakeup_flag) {
-					atomic_set(&ac->cmd_state, payload[1]);
+					if ((is_adsp_reg_event(payload[0]) >= 0)
+					      || (payload[0] ==
+					      ASM_STREAM_CMD_SET_PP_PARAMS_V2))
+						atomic_set(&ac->cmd_state_pp,
+								payload[1]);
+					else
+						atomic_set(&ac->cmd_state,
+								payload[1]);
 					wake_up(&ac->cmd_wait);
 				}
 				return 0;
 			}
-			if (atomic_read(&ac->cmd_state) && wakeup_flag) {
-				atomic_set(&ac->cmd_state, 0);
-				wake_up(&ac->cmd_wait);
+			if ((is_adsp_reg_event(payload[0]) >= 0) ||
+			    (payload[0] == ASM_STREAM_CMD_SET_PP_PARAMS_V2)) {
+				if (atomic_read(&ac->cmd_state_pp) &&
+					wakeup_flag) {
+					atomic_set(&ac->cmd_state_pp, 0);
+					wake_up(&ac->cmd_wait);
+				}
+			} else {
+				if (atomic_read(&ac->cmd_state) &&
+					wakeup_flag) {
+					atomic_set(&ac->cmd_state, 0);
+					wake_up(&ac->cmd_wait);
+				}
 			}
 			if (ac->cb)
 				ac->cb(data->opcode, data->token,
@@ -1773,6 +1897,17 @@
 							data->payload_size);
 			}
 			break;
+		case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+			pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS session %d opcode 0x%x token 0x%x src %d dest %d\n",
+				__func__, ac->session,
+				data->opcode, data->token,
+				data->src_port, data->dest_port);
+			if (payload[1] != 0)
+				pr_err("%s: ASM get param error = %d, resuming\n",
+					__func__, payload[1]);
+			atomic_set(&ac->cmd_state_pp, payload[1]);
+			wake_up(&ac->cmd_wait);
+			break;
 		default:
 			pr_debug("%s: command[0x%x] not expecting rsp\n",
 							__func__, payload[0]);
@@ -1944,6 +2079,39 @@
 	case ASM_SESSION_CMDRSP_GET_MTMX_STRTR_PARAMS_V2:
 		q6asm_process_mtmx_get_param_rsp(ac, (void *) payload);
 		break;
+	case ASM_STREAM_PP_EVENT:
+	case ASM_STREAM_CMD_ENCDEC_EVENTS:
+	case ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE:
+		pr_debug("%s: ASM_STREAM_EVENT payload[0][0x%x] payload[1][0x%x]",
+				 __func__, payload[0], payload[1]);
+		i = is_adsp_raise_event(data->opcode);
+		if (i < 0)
+			return 0;
+
+		/* repack payload for asm_stream_pp_event
+		 * package is composed of event type + size + actual payload
+		 */
+		payload_size = data->payload_size;
+		pp_event_package = kzalloc(payload_size
+				+ sizeof(struct msm_adsp_event_data),
+				GFP_ATOMIC);
+		if (!pp_event_package)
+			return -ENOMEM;
+
+		pp_event_package->event_type = i;
+		pp_event_package->payload_len = payload_size;
+		memcpy((void *)pp_event_package->payload,
+			data->payload, payload_size);
+		ac->cb(data->opcode, data->token,
+			(void *)pp_event_package, ac->priv);
+		kfree(pp_event_package);
+		return 0;
+	case ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2:
+		pr_debug("%s: ASM_SESSION_CMDRSP_ADJUST_SESSION_CLOCK_V2 sesion %d status 0x%x msw %u lsw %u\n",
+			 __func__, ac->session, payload[0], payload[2],
+			 payload[1]);
+		wake_up(&ac->cmd_wait);
+		break;
 	case ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2:
 		pr_debug("%s: ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2 session %d status 0x%x msw %u lsw %u\n",
 				__func__, ac->session, payload[0], payload[2],
@@ -2306,9 +2474,6 @@
 	open.src_endpointype = ASM_END_POINT_DEVICE_MATRIX;
 
 	open.preprocopo_id = q6asm_get_asm_topology_cal();
-	if ((open.preprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX) ||
-	    (open.preprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS))
-		open.preprocopo_id = ASM_STREAM_POSTPROCOPO_ID_NONE;
 	open.bits_per_sample = bits_per_sample;
 	open.mode_flags = 0x0;
 
@@ -2483,7 +2648,12 @@
 	case FORMAT_GEN_COMPR:
 		open.fmt_id = ASM_MEDIA_FMT_GENERIC_COMPRESSED;
 		break;
-
+	case FORMAT_TRUEHD:
+		open.fmt_id = ASM_MEDIA_FMT_TRUEHD;
+		break;
+	case FORMAT_IEC61937:
+		open.fmt_id = ASM_MEDIA_FMT_IEC;
+		break;
 	default:
 		pr_err("%s: Invalid format[%d]\n", __func__, format);
 		rc = -EINVAL;
@@ -2501,6 +2671,10 @@
 		open.flags = 0x8;
 		pr_debug("%s: Flag 8 - COMPRESSED_PASSTHROUGH_CONVERT\n",
 			 __func__);
+	} else if (passthrough_flag == COMPRESSED_PASSTHROUGH_IEC61937) {
+		open.flags = 0x1;
+		pr_debug("%s: Flag 1 - COMPRESSED_PASSTHROUGH_IEC61937\n",
+			 __func__);
 	} else {
 		pr_err("%s: Invalid passthrough type[%d]\n",
 			__func__, passthrough_flag);
@@ -2594,16 +2768,9 @@
 	open.bits_per_sample = bits_per_sample;
 
 	open.postprocopo_id = q6asm_get_asm_topology_cal();
-	if ((ac->perf_mode != LEGACY_PCM_MODE) &&
-	    ((open.postprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX) ||
-	     (open.postprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS)))
+	if (ac->perf_mode != LEGACY_PCM_MODE)
 		open.postprocopo_id = ASM_STREAM_POSTPROCOPO_ID_NONE;
 
-	/* For DTS EAGLE only, force 24 bit */
-	if ((open.postprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX) ||
-	     (open.postprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_HPX_PLUS))
-		open.bits_per_sample = 24;
-
 	pr_debug("%s: perf_mode %d asm_topology 0x%x bps %d\n", __func__,
 		 ac->perf_mode, open.postprocopo_id, open.bits_per_sample);
 
@@ -2827,10 +2994,6 @@
 	ac->topology = open.postprocopo_id;
 	ac->app_type = q6asm_get_asm_app_type_cal();
 
-	/* For DTS EAGLE only, force 24 bit */
-	if ((open.postprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_DTS_HPX) ||
-	     (open.postprocopo_id == ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER))
-		open.bits_per_sample = 24;
 
 	switch (wr_format) {
 	case FORMAT_LINEAR_PCM:
@@ -3081,6 +3244,102 @@
 	return rc;
 }
 
+
+int q6asm_open_transcode_loopback(struct audio_client *ac,
+			uint16_t bits_per_sample,
+			uint32_t source_format, uint32_t sink_format)
+{
+	int rc = 0x00;
+	struct asm_stream_cmd_open_transcode_loopback_t open;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+	q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	open.hdr.opcode = ASM_STREAM_CMD_OPEN_TRANSCODE_LOOPBACK;
+
+	open.mode_flags = 0;
+	open.src_endpoint_type = 0;
+	open.sink_endpoint_type = 0;
+	switch (source_format) {
+	case FORMAT_LINEAR_PCM:
+	case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+		open.src_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+		break;
+	case FORMAT_AC3:
+		open.src_format_id = ASM_MEDIA_FMT_AC3;
+		break;
+	case FORMAT_EAC3:
+		open.src_format_id = ASM_MEDIA_FMT_EAC3;
+		break;
+	default:
+		pr_err("%s: Unsupported src fmt [%d]\n",
+		       __func__, source_format);
+		return -EINVAL;
+	}
+	switch (sink_format) {
+	case FORMAT_LINEAR_PCM:
+	case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+		open.sink_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3;
+		break;
+	default:
+		pr_err("%s: Unsupported sink fmt [%d]\n",
+		       __func__, sink_format);
+		return -EINVAL;
+	}
+
+	/* source endpoint : matrix */
+	open.audproc_topo_id = q6asm_get_asm_topology_cal();
+
+	ac->app_type = q6asm_get_asm_app_type_cal();
+	if (ac->perf_mode == LOW_LATENCY_PCM_MODE)
+		open.mode_flags |= ASM_LOW_LATENCY_STREAM_SESSION;
+	else
+		open.mode_flags |= ASM_LEGACY_STREAM_SESSION;
+	ac->topology = open.audproc_topo_id;
+	open.bits_per_sample = bits_per_sample;
+	open.reserved = 0;
+	pr_debug("%s: opening a transcode_loopback with mode_flags =[%d] session[%d]\n",
+		__func__, open.mode_flags, ac->session);
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+	if (rc < 0) {
+		pr_err("%s: open failed op[0x%x]rc[%d]\n",
+				__func__, open.hdr.opcode, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for open_transcode_loopback\n",
+			__func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+					atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+
+	return 0;
+fail_cmd:
+	return rc;
+}
+
 static
 int q6asm_set_shared_circ_buff(struct audio_client *ac,
 			       struct asm_stream_cmd_open_shared_io *open,
@@ -3131,11 +3390,12 @@
 	open->shared_circ_buf_start_phy_addr_lsw =
 			lower_32_bits(buf_circ->phys);
 	open->shared_circ_buf_start_phy_addr_msw =
-			upper_32_bits(buf_circ->phys);
+			msm_audio_populate_upper_32_bits(buf_circ->phys);
 	open->shared_circ_buf_size = bufsz * bufcnt;
 
 	open->map_region_circ_buf.shm_addr_lsw = lower_32_bits(buf_circ->phys);
-	open->map_region_circ_buf.shm_addr_msw = upper_32_bits(buf_circ->phys);
+	open->map_region_circ_buf.shm_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_circ->phys);
 	open->map_region_circ_buf.mem_size_bytes = bytes_to_alloc;
 
 	mutex_unlock(&ac->cmd_lock);
@@ -3177,10 +3437,12 @@
 	open->shared_pos_buf_num_regions = 1;
 	open->shared_pos_buf_property_flag = 0x00;
 	open->shared_pos_buf_phy_addr_lsw = lower_32_bits(buf_pos->phys);
-	open->shared_pos_buf_phy_addr_msw = upper_32_bits(buf_pos->phys);
+	open->shared_pos_buf_phy_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_pos->phys);
 
 	open->map_region_pos_buf.shm_addr_lsw = lower_32_bits(buf_pos->phys);
-	open->map_region_pos_buf.shm_addr_msw = upper_32_bits(buf_pos->phys);
+	open->map_region_pos_buf.shm_addr_msw =
+			msm_audio_populate_upper_32_bits(buf_pos->phys);
 	open->map_region_pos_buf.mem_size_bytes = bytes_to_alloc;
 
 done:
@@ -4184,6 +4446,20 @@
 			PCM_CHANNEL_LB : PCM_CHANNEL_LS;
 		lchannel_mapping[5] = use_back_flavor ?
 			PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+	} else if (channels == 7) {
+		/*
+		 * Configured for 5.1 channel mapping + 1 channel for debug
+		 * Can be customized based on DSP.
+		 */
+		lchannel_mapping[0] = PCM_CHANNEL_FL;
+		lchannel_mapping[1] = PCM_CHANNEL_FR;
+		lchannel_mapping[2] = PCM_CHANNEL_FC;
+		lchannel_mapping[3] = PCM_CHANNEL_LFE;
+		lchannel_mapping[4] = use_back_flavor ?
+			PCM_CHANNEL_LB : PCM_CHANNEL_LS;
+		lchannel_mapping[5] = use_back_flavor ?
+			PCM_CHANNEL_RB : PCM_CHANNEL_RS;
+		lchannel_mapping[6] = PCM_CHANNEL_CS;
 	} else if (channels == 8) {
 		lchannel_mapping[0] = PCM_CHANNEL_FL;
 		lchannel_mapping[1] = PCM_CHANNEL_FR;
@@ -5268,6 +5544,62 @@
 }
 EXPORT_SYMBOL(q6asm_media_format_block_gen_compr);
 
+
+/*
+ * q6asm_media_format_block_iec - set up IEC61937 (compressed) or IEC60958
+ *                                (pcm) format params. Both audio standards
+ *                                use the same format and are used for
+ *                                HDMI or SPDIF.
+ *
+ * @ac: Client session handle
+ * @rate: sample rate
+ * @channels: number of channels
+ */
+int q6asm_media_format_block_iec(struct audio_client *ac,
+				uint32_t rate, uint32_t channels)
+{
+	struct asm_iec_compressed_fmt_blk_t fmt;
+	int rc = 0;
+
+	pr_debug("%s: session[%d]rate[%d]ch[%d]\n",
+		 __func__, ac->session, rate,
+		 channels);
+
+	memset(&fmt, 0, sizeof(fmt));
+	q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+	fmt.hdr.opcode = ASM_DATA_CMD_IEC_60958_MEDIA_FMT;
+	fmt.num_channels = channels;
+	fmt.sampling_rate = rate;
+
+	atomic_set(&ac->cmd_state, -1);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+	if (rc < 0) {
+		pr_err("%s: Comamnd open failed %d\n", __func__, rc);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout. waited for format update\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+			__func__, adsp_err_get_err_str(
+			atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+	}
+	return 0;
+fail_cmd:
+	return rc;
+}
+EXPORT_SYMBOL(q6asm_media_format_block_iec);
+
 static int __q6asm_media_format_block_multi_aac(struct audio_client *ac,
 				struct asm_aac_cfg *cfg, int stream_id)
 {
@@ -6404,7 +6736,7 @@
 	memset(&multi_ch_gain, 0, sizeof(multi_ch_gain));
 	sz = sizeof(struct asm_volume_ctrl_multichannel_gain);
 	q6asm_add_hdr_async(ac, &multi_ch_gain.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	multi_ch_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	multi_ch_gain.param.data_payload_addr_lsw = 0;
 	multi_ch_gain.param.data_payload_addr_msw = 0;
@@ -6430,20 +6762,20 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 				multi_ch_gain.data.param_id);
 		rc = -ETIMEDOUT;
 		goto fail_cmd;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] , set-params paramid[0x%x]\n",
 					__func__, adsp_err_get_err_str(
-					atomic_read(&ac->cmd_state)),
+					atomic_read(&ac->cmd_state_pp)),
 					multi_ch_gain.data.param_id);
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_cmd;
 	}
 	rc = 0;
@@ -6498,7 +6830,7 @@
 	memset(&multich_gain, 0, sizeof(multich_gain));
 	sz = sizeof(struct asm_volume_ctrl_multichannel_gain);
 	q6asm_add_hdr_async(ac, &multich_gain.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, 1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	multich_gain.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	multich_gain.param.data_payload_addr_lsw = 0;
 	multich_gain.param.data_payload_addr_msw = 0;
@@ -6536,17 +6868,17 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) <= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 				multich_gain.data.param_id);
 		rc = -EINVAL;
 		goto done;
 	}
-	if (atomic_read(&ac->cmd_state) < 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%d] , set-params paramid[0x%x]\n",
-					__func__, atomic_read(&ac->cmd_state),
-					multich_gain.data.param_id);
+		       __func__, atomic_read(&ac->cmd_state_pp),
+		       multich_gain.data.param_id);
 		rc = -EINVAL;
 		goto done;
 	}
@@ -6574,7 +6906,7 @@
 
 	sz = sizeof(struct asm_volume_ctrl_mute_config);
 	q6asm_add_hdr_async(ac, &mute.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	mute.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	mute.param.data_payload_addr_lsw = 0;
 	mute.param.data_payload_addr_msw = 0;
@@ -6596,20 +6928,20 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 				mute.data.param_id);
 		rc = -ETIMEDOUT;
 		goto fail_cmd;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
 				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)),
+				atomic_read(&ac->cmd_state_pp)),
 				mute.data.param_id);
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_cmd;
 	}
 	rc = 0;
@@ -6617,229 +6949,6 @@
 	return rc;
 }
 
-int q6asm_dts_eagle_set(struct audio_client *ac, int param_id, uint32_t size,
-			void *data, struct param_outband *po, int m_id)
-{
-	int rc = 0, *ob_params = NULL;
-	uint32_t sz = sizeof(struct asm_dts_eagle_param) + (po ? 0 : size);
-	struct asm_dts_eagle_param *ad;
-
-	if (!ac || ac->apr == NULL || (size == 0) || !data) {
-		pr_err("DTS_EAGLE_ASM - %s: APR handle NULL, invalid size %u or pointer %pK.\n",
-			__func__, size, data);
-		return -EINVAL;
-	}
-
-	ad = kzalloc(sz, GFP_KERNEL);
-	if (!ad)
-		return -ENOMEM;
-
-	pr_debug("DTS_EAGLE_ASM - %s: ac %pK param_id 0x%x size %u data %pK m_id 0x%x\n",
-		__func__, ac, param_id, size, data, m_id);
-	q6asm_add_hdr_async(ac, &ad->hdr, sz, 1);
-	ad->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
-	ad->param.data_payload_addr_lsw = 0;
-	ad->param.data_payload_addr_msw = 0;
-
-	ad->param.mem_map_handle = 0;
-	ad->param.data_payload_size = size +
-					sizeof(struct asm_stream_param_data_v2);
-	ad->data.module_id = m_id;
-	ad->data.param_id = param_id;
-	ad->data.param_size = size;
-	ad->data.reserved = 0;
-	atomic_set(&ac->cmd_state, -1);
-
-	if (po) {
-		struct list_head *ptr, *next;
-		struct asm_buffer_node *node;
-
-		pr_debug("DTS_EAGLE_ASM - %s: using out of band memory (virtual %pK, physical %lu)\n",
-			__func__, po->kvaddr, (long)po->paddr);
-		ad->param.data_payload_addr_lsw = lower_32_bits(po->paddr);
-		ad->param.data_payload_addr_msw =
-				msm_audio_populate_upper_32_bits(po->paddr);
-		list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
-			node = list_entry(ptr, struct asm_buffer_node, list);
-			if (node->buf_phys_addr == po->paddr) {
-				ad->param.mem_map_handle = node->mmap_hdl;
-				break;
-			}
-		}
-		if (ad->param.mem_map_handle == 0) {
-			pr_err("DTS_EAGLE_ASM - %s: mem map handle not found\n",
-				__func__);
-			rc = -EINVAL;
-			goto fail_cmd;
-		}
-		/* check for integer overflow */
-		if (size > (UINT_MAX - APR_CMD_OB_HDR_SZ))
-			rc = -EINVAL;
-		if ((rc < 0) || (size + APR_CMD_OB_HDR_SZ > po->size)) {
-			pr_err("DTS_EAGLE_ASM - %s: ion alloc of size %zu too small for size requested %u\n",
-				__func__, po->size, size + APR_CMD_OB_HDR_SZ);
-			rc = -EINVAL;
-			goto fail_cmd;
-		}
-		ob_params = (int *)po->kvaddr;
-		*ob_params++ = m_id;
-		*ob_params++ = param_id;
-		*ob_params++ = size;
-		memcpy(ob_params, data, size);
-	} else {
-		pr_debug("DTS_EAGLE_ASM - %s: using in band\n", __func__);
-		memcpy(((char *)ad) + sizeof(struct asm_dts_eagle_param),
-			data, size);
-	}
-	rc = apr_send_pkt(ac->apr, (uint32_t *)ad);
-	if (rc < 0) {
-		pr_err("DTS_EAGLE_ASM - %s: set-params send failed paramid[0x%x]\n",
-			__func__, ad->data.param_id);
-		rc = -EINVAL;
-		goto fail_cmd;
-	}
-
-	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 1*HZ);
-	if (!rc) {
-		pr_err("DTS_EAGLE_ASM - %s: timeout, set-params paramid[0x%x]\n",
-			__func__, ad->data.param_id);
-		rc = -ETIMEDOUT;
-		goto fail_cmd;
-	}
-
-	if (atomic_read(&ac->cmd_state) > 0) {
-		pr_err("%s: DSP returned error[%s]\n",
-				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)));
-		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
-		goto fail_cmd;
-	}
-	rc = 0;
-fail_cmd:
-	kfree(ad);
-	return rc;
-}
-
-int q6asm_dts_eagle_get(struct audio_client *ac, int param_id, uint32_t size,
-			void *data, struct param_outband *po, int m_id)
-{
-	struct asm_dts_eagle_param_get *ad;
-	int rc = 0, *ob_params = NULL;
-	uint32_t sz = sizeof(struct asm_dts_eagle_param) + APR_CMD_GET_HDR_SZ +
-		 (po ? 0 : size);
-
-	if (!ac || ac->apr == NULL || (size == 0) || !data) {
-		pr_err("DTS_EAGLE_ASM - %s: APR handle NULL, invalid size %u or pointer %pK\n",
-			__func__, size, data);
-		return -EINVAL;
-	}
-	ad = kzalloc(sz, GFP_KERNEL);
-	if (!ad)
-		return -ENOMEM;
-
-	pr_debug("DTS_EAGLE_ASM - %s: ac %pK param_id 0x%x size %u data %pK m_id 0x%x\n",
-		__func__, ac, param_id, size, data, m_id);
-	q6asm_add_hdr(ac, &ad->hdr, sz, TRUE);
-	ad->hdr.opcode = ASM_STREAM_CMD_GET_PP_PARAMS_V2;
-	ad->param.data_payload_addr_lsw = 0;
-	ad->param.data_payload_addr_msw = 0;
-	ad->param.mem_map_handle = 0;
-	ad->param.module_id = m_id;
-	ad->param.param_id = param_id;
-	ad->param.param_max_size = size + APR_CMD_GET_HDR_SZ;
-	ad->param.reserved = 0;
-	atomic_set(&ac->cmd_state, -1);
-
-	generic_get_data = kzalloc(size + sizeof(struct generic_get_data_),
-				   GFP_KERNEL);
-	if (!generic_get_data) {
-		rc = -ENOMEM;
-		goto fail_cmd;
-	}
-
-	if (po) {
-		struct list_head *ptr, *next;
-		struct asm_buffer_node *node;
-
-		pr_debug("DTS_EAGLE_ASM - %s: using out of band memory (virtual %pK, physical %lu)\n",
-			 __func__, po->kvaddr, (long)po->paddr);
-		ad->param.data_payload_addr_lsw = lower_32_bits(po->paddr);
-		ad->param.data_payload_addr_msw =
-				msm_audio_populate_upper_32_bits(po->paddr);
-		list_for_each_safe(ptr, next, &ac->port[IN].mem_map_handle) {
-			node = list_entry(ptr, struct asm_buffer_node, list);
-			if (node->buf_phys_addr == po->paddr) {
-				ad->param.mem_map_handle = node->mmap_hdl;
-				break;
-			}
-		}
-		if (ad->param.mem_map_handle == 0) {
-			pr_err("DTS_EAGLE_ASM - %s: mem map handle not found\n",
-				__func__);
-			rc = -EINVAL;
-			goto fail_cmd;
-		}
-		/* check for integer overflow */
-		if (size > (UINT_MAX - APR_CMD_OB_HDR_SZ))
-			rc = -EINVAL;
-		if ((rc < 0) || (size + APR_CMD_OB_HDR_SZ > po->size)) {
-			pr_err("DTS_EAGLE_ASM - %s: ion alloc of size %zu too small for size requested %u\n",
-				__func__, po->size, size + APR_CMD_OB_HDR_SZ);
-			rc = -EINVAL;
-			goto fail_cmd;
-		}
-		ob_params = (int *)po->kvaddr;
-		*ob_params++ = m_id;
-		*ob_params++ = param_id;
-		*ob_params++ = size;
-		generic_get_data->is_inband = 0;
-	} else {
-		pr_debug("DTS_EAGLE_ASM - %s: using in band\n", __func__);
-		generic_get_data->is_inband = 1;
-	}
-
-	rc = apr_send_pkt(ac->apr, (uint32_t *)ad);
-	if (rc < 0) {
-		pr_err("DTS_EAGLE_ASM - %s: Commmand 0x%x failed\n", __func__,
-			ad->hdr.opcode);
-		goto fail_cmd;
-	}
-
-	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 1*HZ);
-	if (!rc) {
-		pr_err("DTS_EAGLE_ASM - %s: timeout in get\n",
-			__func__);
-		rc = -ETIMEDOUT;
-		goto fail_cmd;
-	}
-
-	if (atomic_read(&ac->cmd_state) > 0) {
-		pr_err("%s: DSP returned error[%s]\n",
-				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)));
-		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
-		goto fail_cmd;
-	}
-
-	if (generic_get_data->valid) {
-		rc = 0;
-		memcpy(data, po ? ob_params : generic_get_data->ints, size);
-	} else {
-		rc = -EINVAL;
-		pr_err("DTS_EAGLE_ASM - %s: EAGLE get params problem getting data - check callback error value\n",
-			__func__);
-	}
-fail_cmd:
-	kfree(ad);
-	kfree(generic_get_data);
-	generic_get_data = NULL;
-	return rc;
-}
-
 static int __q6asm_set_volume(struct audio_client *ac, int volume, int instance)
 {
 	struct asm_volume_ctrl_master_gain vol;
@@ -6870,7 +6979,7 @@
 
 	sz = sizeof(struct asm_volume_ctrl_master_gain);
 	q6asm_add_hdr_async(ac, &vol.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	vol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	vol.param.data_payload_addr_lsw = 0;
 	vol.param.data_payload_addr_msw = 0;
@@ -6892,20 +7001,20 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 				vol.data.param_id);
 		rc = -ETIMEDOUT;
 		goto fail_cmd;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
 				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)),
+				atomic_read(&ac->cmd_state_pp)),
 				vol.data.param_id);
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_cmd;
 	}
 
@@ -6987,6 +7096,156 @@
 	return rc;
 }
 
+int q6asm_send_ion_fd(struct audio_client *ac, int fd)
+{
+	struct ion_client *client;
+	struct ion_handle *handle;
+	ion_phys_addr_t paddr;
+	size_t pa_len = 0;
+	void *vaddr;
+	int ret;
+	int sz = 0;
+	struct avs_rtic_shared_mem_addr shm;
+
+	if (ac == NULL) {
+		pr_err("%s: APR handle NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	if (ac->apr == NULL) {
+		pr_err("%s: AC APR handle NULL\n", __func__);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = msm_audio_ion_import("audio_mem_client",
+				   &client,
+				   &handle,
+				   fd,
+				   NULL,
+				   0,
+				   &paddr,
+				   &pa_len,
+				   &vaddr);
+	if (ret) {
+		pr_err("%s: audio ION import failed, rc = %d\n",
+		       __func__, ret);
+		ret = -ENOMEM;
+		goto fail_cmd;
+	}
+	/* get payload length */
+	sz = sizeof(struct avs_rtic_shared_mem_addr);
+	q6asm_add_hdr_async(ac, &shm.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	shm.shm_buf_addr_lsw = lower_32_bits(paddr);
+	shm.shm_buf_addr_msw = msm_audio_populate_upper_32_bits(paddr);
+	shm.buf_size = pa_len;
+	shm.shm_buf_num_regions = 1;
+	shm.shm_buf_mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
+	shm.shm_buf_flag = 0x00;
+	shm.encdec.param_id = AVS_PARAM_ID_RTIC_SHARED_MEMORY_ADDR;
+	shm.encdec.param_size = sizeof(struct avs_rtic_shared_mem_addr) -
+						sizeof(struct apr_hdr) -
+			sizeof(struct asm_stream_cmd_set_encdec_param_v2);
+	shm.encdec.service_id = OUT;
+	shm.encdec.reserved = 0;
+	shm.map_region.shm_addr_lsw = shm.shm_buf_addr_lsw;
+	shm.map_region.shm_addr_msw = shm.shm_buf_addr_msw;
+	shm.map_region.mem_size_bytes = pa_len;
+	shm.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2;
+	ret = apr_send_pkt(ac->apr, (uint32_t *) &shm);
+	if (ret < 0) {
+		pr_err("%s: set-params send failed paramid[0x%x] rc %d\n",
+		       __func__, shm.encdec.param_id, ret);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 1*HZ);
+	if (!ret) {
+		pr_err("%s: timeout, shm.encdec paramid[0x%x]\n", __func__,
+		       shm.encdec.param_id);
+		ret = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s] shm.encdec paramid[0x%x]\n",
+		       __func__,
+		       adsp_err_get_err_str(atomic_read(&ac->cmd_state)),
+		       shm.encdec.param_id);
+		ret = adsp_err_get_lnx_err_code(atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	ret = 0;
+fail_cmd:
+	return ret;
+}
+
+int q6asm_send_rtic_event_ack(struct audio_client *ac,
+			      void *param, uint32_t params_length)
+{
+	char *asm_params = NULL;
+	int sz, rc;
+	struct avs_param_rtic_event_ack ack;
+
+	if (!param || !ac) {
+		pr_err("%s: %s is NULL\n", __func__,
+			(!param) ? "param" : "ac");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	sz = sizeof(struct avs_param_rtic_event_ack) + params_length;
+	asm_params = kzalloc(sz, GFP_KERNEL);
+	if (!asm_params) {
+		rc = -ENOMEM;
+		goto done;
+	}
+
+	q6asm_add_hdr_async(ac, &ack.hdr,
+			    sizeof(struct avs_param_rtic_event_ack) +
+			    params_length, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	ack.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2;
+	ack.encdec.param_id = AVS_PARAM_ID_RTIC_EVENT_ACK;
+	ack.encdec.param_size = params_length;
+	ack.encdec.reserved = 0;
+	ack.encdec.service_id = OUT;
+	memcpy(asm_params, &ack, sizeof(struct avs_param_rtic_event_ack));
+	memcpy(asm_params + sizeof(struct avs_param_rtic_event_ack),
+		param, params_length);
+	rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+	if (rc < 0) {
+		pr_err("%s: apr pkt failed for rtic event ack\n", __func__);
+		rc = -EINVAL;
+		goto fail_send_param;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+				(atomic_read(&ac->cmd_state) >= 0), 1 * HZ);
+	if (!rc) {
+		pr_err("%s: timeout for rtic event ack cmd\n", __func__);
+		rc = -ETIMEDOUT;
+		goto fail_send_param;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s] for rtic event ack cmd\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_send_param;
+	}
+	rc = 0;
+
+fail_send_param:
+	kfree(asm_params);
+done:
+	return rc;
+}
+
 int q6asm_set_softpause(struct audio_client *ac,
 			struct asm_softpause_params *pause_param)
 {
@@ -7007,7 +7266,7 @@
 
 	sz = sizeof(struct asm_soft_pause_params);
 	q6asm_add_hdr_async(ac, &softpause.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	softpause.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 
 	softpause.param.data_payload_addr_lsw = 0;
@@ -7034,20 +7293,20 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 						softpause.data.param_id);
 		rc = -ETIMEDOUT;
 		goto fail_cmd;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
 				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)),
+				atomic_read(&ac->cmd_state_pp)),
 				softpause.data.param_id);
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_cmd;
 	}
 	rc = 0;
@@ -7087,7 +7346,7 @@
 
 	sz = sizeof(struct asm_soft_step_volume_params);
 	q6asm_add_hdr_async(ac, &softvol.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	softvol.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	softvol.param.data_payload_addr_lsw = 0;
 	softvol.param.data_payload_addr_msw = 0;
@@ -7112,20 +7371,20 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 						softvol.data.param_id);
 		rc = -ETIMEDOUT;
 		goto fail_cmd;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
 				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)),
+				atomic_read(&ac->cmd_state_pp)),
 				softvol.data.param_id);
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_cmd;
 	}
 	rc = 0;
@@ -7174,7 +7433,7 @@
 	sz = sizeof(struct asm_eq_params);
 	eq_params = (struct msm_audio_eq_stream_config *) eq_p;
 	q6asm_add_hdr(ac, &eq.hdr, sz, TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 
 	eq.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	eq.param.data_payload_addr_lsw = 0;
@@ -7219,20 +7478,20 @@
 	}
 
 	rc = wait_event_timeout(ac->cmd_wait,
-			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+			(atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__,
 						eq.data.param_id);
 		rc = -ETIMEDOUT;
 		goto fail_cmd;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n",
 				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)),
+				atomic_read(&ac->cmd_state_pp)),
 				eq.data.param_id);
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_cmd;
 	}
 	rc = 0;
@@ -7850,7 +8109,7 @@
 	q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
 				sizeof(struct asm_stream_cmd_set_pp_params_v2) +
 				params_length), TRUE);
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	payload_params.data_payload_addr_lsw = 0;
 	payload_params.data_payload_addr_msw = 0;
@@ -7869,18 +8128,18 @@
 		goto fail_send_param;
 	}
 	rc = wait_event_timeout(ac->cmd_wait,
-				(atomic_read(&ac->cmd_state) >= 0), 1*HZ);
+				(atomic_read(&ac->cmd_state_pp) >= 0), 1*HZ);
 	if (!rc) {
 		pr_err("%s: timeout, audio effects set-params\n", __func__);
 		rc = -ETIMEDOUT;
 		goto fail_send_param;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%s] set-params\n",
 				__func__, adsp_err_get_err_str(
-				atomic_read(&ac->cmd_state)));
+				atomic_read(&ac->cmd_state_pp)));
 		rc = adsp_err_get_lnx_err_code(
-				atomic_read(&ac->cmd_state));
+				atomic_read(&ac->cmd_state_pp));
 		goto fail_send_param;
 	}
 
@@ -8134,6 +8393,80 @@
 	return rc;
 }
 
+int q6asm_send_mtmx_strtr_enable_adjust_session_clock(struct audio_client *ac,
+		bool enable)
+{
+	struct asm_mtmx_strtr_params matrix;
+	struct asm_session_mtmx_param_adjust_session_time_ctl_t adjust_time;
+	int sz = 0;
+	int rc  = 0;
+
+	pr_debug("%s: adjust session enable %d\n", __func__, enable);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	adjust_time.enable = enable;
+	memset(&matrix, 0, sizeof(struct asm_mtmx_strtr_params));
+	sz = sizeof(struct asm_mtmx_strtr_params);
+	q6asm_add_hdr(ac, &matrix.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	matrix.hdr.opcode = ASM_SESSION_CMD_SET_MTMX_STRTR_PARAMS_V2;
+
+	matrix.param.data_payload_addr_lsw = 0;
+	matrix.param.data_payload_addr_msw = 0;
+	matrix.param.mem_map_handle = 0;
+	matrix.param.data_payload_size =
+		sizeof(struct asm_stream_param_data_v2) +
+		sizeof(struct asm_session_mtmx_param_adjust_session_time_ctl_t);
+	matrix.param.direction = 0; /* RX */
+	matrix.data.module_id = ASM_SESSION_MTMX_STRTR_MODULE_ID_AVSYNC;
+	matrix.data.param_id = ASM_SESSION_MTMX_PARAM_ADJUST_SESSION_TIME_CTL;
+	matrix.data.param_size =
+		sizeof(struct asm_session_mtmx_param_adjust_session_time_ctl_t);
+	matrix.data.reserved = 0;
+	matrix.config.adj_time_param.enable = adjust_time.enable;
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &matrix);
+	if (rc < 0) {
+		pr_err("%s: enable adjust session failed failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: enable adjust session failed failed paramid [0x%x]\n",
+			__func__, matrix.data.param_id);
+		rc = -ETIMEDOUT;
+		goto exit;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto exit;
+	}
+	rc = 0;
+exit:
+	return rc;
+}
+
+
 static int __q6asm_cmd(struct audio_client *ac, int cmd, uint32_t stream_id)
 {
 	struct apr_hdr hdr;
@@ -8523,6 +8856,68 @@
 	return -EINVAL;
 }
 
+int q6asm_adjust_session_clock(struct audio_client *ac,
+		uint32_t adjust_time_lsw,
+		uint32_t adjust_time_msw)
+{
+	int rc = 0;
+	int sz = 0;
+	struct asm_session_cmd_adjust_session_clock_v2 adjust_clock;
+
+	pr_debug("%s: adjust_time_lsw is %x, adjust_time_msw is %x\n", __func__,
+		  adjust_time_lsw, adjust_time_msw);
+
+	if (!ac) {
+		pr_err("%s: audio client handle is NULL\n", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	if (ac->apr == NULL) {
+		pr_err("%s: ac->apr is NULL", __func__);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	sz = sizeof(struct asm_session_cmd_adjust_session_clock_v2);
+	q6asm_add_hdr(ac, &adjust_clock.hdr, sz, TRUE);
+	atomic_set(&ac->cmd_state, -1);
+	adjust_clock.hdr.opcode = ASM_SESSION_CMD_ADJUST_SESSION_CLOCK_V2;
+
+	adjust_clock.adjustime_lsw = adjust_time_lsw;
+	adjust_clock.adjustime_msw = adjust_time_msw;
+
+
+	rc = apr_send_pkt(ac->apr, (uint32_t *) &adjust_clock);
+	if (rc < 0) {
+		pr_err("%s: adjust_clock send failed paramid [0x%x]\n",
+			__func__, adjust_clock.hdr.opcode);
+		rc = -EINVAL;
+		goto fail_cmd;
+	}
+
+	rc = wait_event_timeout(ac->cmd_wait,
+			(atomic_read(&ac->cmd_state) >= 0), 5*HZ);
+	if (!rc) {
+		pr_err("%s: timeout, adjust_clock paramid[0x%x]\n",
+			__func__, adjust_clock.hdr.opcode);
+		rc = -ETIMEDOUT;
+		goto fail_cmd;
+	}
+
+	if (atomic_read(&ac->cmd_state) > 0) {
+		pr_err("%s: DSP returned error[%s]\n",
+				__func__, adsp_err_get_err_str(
+				atomic_read(&ac->cmd_state)));
+		rc = adsp_err_get_lnx_err_code(
+				atomic_read(&ac->cmd_state));
+		goto fail_cmd;
+	}
+	rc = 0;
+fail_cmd:
+	return rc;
+}
+
 /*
  * q6asm_get_path_delay() - get the path delay for an audio session
  * @ac: audio client handle
@@ -8733,7 +9128,7 @@
 	q6asm_add_hdr_async(ac, &hdr, (sizeof(struct apr_hdr) +
 		sizeof(struct asm_stream_cmd_set_pp_params_v2)), TRUE);
 
-	atomic_set(&ac->cmd_state, -1);
+	atomic_set(&ac->cmd_state_pp, -1);
 	hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2;
 	payload_params.data_payload_addr_lsw =
 			lower_32_bits(cal_block->cal_data.paddr);
@@ -8759,15 +9154,15 @@
 		goto free;
 	}
 	rc = wait_event_timeout(ac->cmd_wait,
-				(atomic_read(&ac->cmd_state) >= 0), 5 * HZ);
+				(atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ);
 	if (!rc) {
 		pr_err("%s: timeout, audio audstrm cal send\n", __func__);
 		rc = -ETIMEDOUT;
 		goto free;
 	}
-	if (atomic_read(&ac->cmd_state) > 0) {
+	if (atomic_read(&ac->cmd_state_pp) > 0) {
 		pr_err("%s: DSP returned error[%d] audio audstrm cal send\n",
-				__func__, atomic_read(&ac->cmd_state));
+				__func__, atomic_read(&ac->cmd_state_pp));
 		rc = -EINVAL;
 		goto free;
 	}
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index d6ad97d..f6675a2 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -168,7 +168,7 @@
 			generic_get_data->valid = 1;
 			generic_get_data->size_in_ints =
 				data->payload_size/sizeof(int);
-			pr_debug("DTS_EAGLE_CORE callback size = %i\n",
+			pr_debug("callback size = %i\n",
 				 data->payload_size);
 			memcpy(generic_get_data->ints, data->payload,
 				data->payload_size);
@@ -350,119 +350,6 @@
 	return ret;
 }
 
-int core_dts_eagle_set(int size, char *data)
-{
-	struct adsp_dts_eagle *payload = NULL;
-	int rc = 0, size_aligned4byte;
-
-	pr_debug("DTS_EAGLE_CORE - %s\n", __func__);
-	if (size <= 0 || !data) {
-		pr_err("DTS_EAGLE_CORE - %s: invalid size %i or pointer %pK.\n",
-			__func__, size, data);
-		return -EINVAL;
-	}
-
-	size_aligned4byte = (size+3) & 0xFFFFFFFC;
-	mutex_lock(&(q6core_lcl.cmd_lock));
-	ocm_core_open();
-	if (q6core_lcl.core_handle_q) {
-		payload = kzalloc(sizeof(struct adsp_dts_eagle) +
-				  size_aligned4byte, GFP_KERNEL);
-		if (!payload) {
-			rc = -ENOMEM;
-			goto exit;
-		}
-		payload->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
-						APR_HDR_LEN(APR_HDR_SIZE),
-						APR_PKT_VER);
-		payload->hdr.pkt_size = sizeof(struct adsp_dts_eagle) +
-					       size_aligned4byte;
-		payload->hdr.src_port = 0;
-		payload->hdr.dest_port = 0;
-		payload->hdr.token = 0;
-		payload->hdr.opcode = ADSP_CMD_SET_DTS_EAGLE_DATA_ID;
-		payload->id = DTS_EAGLE_LICENSE_ID;
-		payload->overwrite = 1;
-		payload->size = size;
-		memcpy(payload->data, data, size);
-		rc = apr_send_pkt(q6core_lcl.core_handle_q,
-				(uint32_t *)payload);
-		if (rc < 0) {
-			pr_err("DTS_EAGLE_CORE - %s: failed op[0x%x]rc[%d]\n",
-				__func__, payload->hdr.opcode, rc);
-		}
-		kfree(payload);
-	}
-
-exit:
-	mutex_unlock(&(q6core_lcl.cmd_lock));
-	return rc;
-}
-
-int core_dts_eagle_get(int id, int size, char *data)
-{
-	struct apr_hdr ah;
-	int rc = 0;
-
-	pr_debug("DTS_EAGLE_CORE - %s\n", __func__);
-	if (size <= 0 || !data) {
-		pr_err("DTS_EAGLE_CORE - %s: invalid size %i or pointer %pK.\n",
-			__func__, size, data);
-		return -EINVAL;
-	}
-	mutex_lock(&(q6core_lcl.cmd_lock));
-	ocm_core_open();
-	if (q6core_lcl.core_handle_q) {
-		ah.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
-				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
-		ah.pkt_size = sizeof(struct apr_hdr);
-		ah.src_port = 0;
-		ah.dest_port = 0;
-		ah.token = 0;
-		ah.opcode = id;
-
-		q6core_lcl.bus_bw_resp_received = 0;
-		generic_get_data = kzalloc(sizeof(struct generic_get_data_)
-					   + size, GFP_KERNEL);
-		if (!generic_get_data) {
-			rc = -ENOMEM;
-			goto exit;
-		}
-
-		rc = apr_send_pkt(q6core_lcl.core_handle_q,
-				(uint32_t *)&ah);
-		if (rc < 0) {
-			pr_err("DTS_EAGLE_CORE - %s: failed op[0x%x]rc[%d]\n",
-				__func__, ah.opcode, rc);
-			goto exit;
-		}
-
-		rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
-				(q6core_lcl.bus_bw_resp_received == 1),
-				msecs_to_jiffies(TIMEOUT_MS));
-		if (!rc) {
-			pr_err("DTS_EAGLE_CORE - %s: EAGLE get params timed out\n",
-				__func__);
-			rc = -EINVAL;
-			goto exit;
-		}
-		if (generic_get_data->valid) {
-			rc = 0;
-			memcpy(data, generic_get_data->ints, size);
-		} else {
-			rc = -EINVAL;
-			pr_err("DTS_EAGLE_CORE - %s: EAGLE get params problem getting data - check callback error value\n",
-				__func__);
-		}
-	}
-
-exit:
-	kfree(generic_get_data);
-	generic_get_data = NULL;
-	mutex_unlock(&(q6core_lcl.cmd_lock));
-	return rc;
-}
-
 uint32_t core_set_dolby_manufacturer_id(int manufacturer_id)
 {
 	struct adsp_dolby_manufacturer_id payload;
@@ -496,7 +383,7 @@
 
 bool q6core_is_adsp_ready(void)
 {
-	int rc;
+	int rc = 0;
 	bool ret = false;
 	struct apr_hdr hdr;
 
@@ -509,21 +396,23 @@
 
 	mutex_lock(&(q6core_lcl.cmd_lock));
 	ocm_core_open();
-	q6core_lcl.bus_bw_resp_received = 0;
-	rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
-	if (rc < 0) {
-		pr_err("%s: Get ADSP state APR packet send event %d\n",
-			__func__, rc);
-		goto bail;
-	}
+	if (q6core_lcl.core_handle_q) {
+		q6core_lcl.bus_bw_resp_received = 0;
+		rc = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *)&hdr);
+		if (rc < 0) {
+			pr_err("%s: Get ADSP state APR packet send event %d\n",
+				__func__, rc);
+			goto bail;
+		}
 
-	rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
-				(q6core_lcl.bus_bw_resp_received == 1),
-				msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
-	if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
-		/* ensure to read updated param by callback thread */
-		rmb();
-		ret = !!q6core_lcl.param;
+		rc = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+					(q6core_lcl.bus_bw_resp_received == 1),
+					msecs_to_jiffies(Q6_READY_TIMEOUT_MS));
+		if (rc > 0 && q6core_lcl.bus_bw_resp_received) {
+			/* ensure to read updated param by callback thread */
+			rmb();
+			ret = !!q6core_lcl.param;
+		}
 	}
 bail:
 	pr_debug("%s: leave, rc %d, adsp ready %d\n", __func__, rc, ret);
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index 08ddde4..799d1be 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -1240,14 +1240,12 @@
 	mutex_lock(&lsm_common.cal_data[LSM_CAL_IDX]->lock);
 	cal_block = cal_utils_get_only_cal_block(
 		lsm_common.cal_data[LSM_CAL_IDX]);
-	if (cal_block == NULL)
-		goto unlock;
 
-	if (cal_block->cal_data.size <= 0) {
+	if (!cal_block || cal_block->cal_data.size <= 0) {
 		pr_debug("%s: No cal to send!\n", __func__);
-		rc = -EINVAL;
 		goto unlock;
 	}
+
 	if (cal_block->cal_data.size != client->lsm_cal_size) {
 		pr_err("%s: Cal size %zd doesn't match lsm cal size %d\n",
 			__func__, cal_block->cal_data.size,
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index b829c65..15c9e13 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -478,8 +478,10 @@
 
 	for (i = 0; i < MAX_VOC_SESSIONS; i++) {
 		v = &common.voice[i];
-		if (v != NULL)
+		if (v != NULL) {
 			v->voc_state = VOC_ERROR;
+			v->rec_info.recording = 0;
+		}
 	}
 }
 
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index f1fbce3..eddcb45 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -2412,9 +2412,6 @@
 		mi2s_clk[dai_id].clk_freq_in_hz =
 		    mi2s_tx_cfg[dai_id].sample_rate * 2 * bit_per_sample;
 	}
-
-	if (!mi2s_intf_conf[dai_id].msm_is_mi2s_master)
-		mi2s_clk[dai_id].clk_freq_in_hz = 0;
 }
 
 static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable)
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index b603b8a..2c3d7fc 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -1609,6 +1609,9 @@
 		snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
 		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
 		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
+	} else {
+		snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT1");
+		snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT2");
 	}
 
 	snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index b924cad..802137b 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -1301,15 +1301,6 @@
 	msm_anlg_cdc_spk_ext_pa_cb(enable_spk_ext_pa, ana_cdc);
 	msm_dig_cdc_hph_comp_cb(msm_config_hph_compander_gpio, dig_cdc);
 
-	mbhc_cfg_ptr->calibration = def_msm_int_wcd_mbhc_cal();
-	if (mbhc_cfg_ptr->calibration) {
-		ret = msm_anlg_cdc_hs_detect(ana_cdc, mbhc_cfg_ptr);
-		if (ret) {
-			pr_err("%s: msm_anlg_cdc_hs_detect failed\n", __func__);
-			kfree(mbhc_cfg_ptr->calibration);
-			return ret;
-		}
-	}
 	card = rtd->card->snd_card;
 	if (!codec_root)
 		codec_root = snd_register_module_info(card->module, "codecs",
@@ -1569,6 +1560,36 @@
 	return ret;
 }
 
+static int msm_snd_card_late_probe(struct snd_soc_card *card)
+{
+	const char *be_dl_name = LPASS_BE_INT0_MI2S_RX;
+	struct snd_soc_codec *ana_cdc;
+	struct snd_soc_pcm_runtime *rtd;
+	int ret = 0;
+
+	rtd = snd_soc_get_pcm_runtime(card, be_dl_name);
+	if (!rtd) {
+		dev_err(card->dev,
+			"%s: snd_soc_get_pcm_runtime for %s failed!\n",
+			__func__, be_dl_name);
+		return -EINVAL;
+	}
+
+	ana_cdc = rtd->codec_dais[ANA_CDC]->codec;
+	mbhc_cfg_ptr->calibration = def_msm_int_wcd_mbhc_cal();
+	if (!mbhc_cfg_ptr->calibration)
+		return -ENOMEM;
+
+	ret = msm_anlg_cdc_hs_detect(ana_cdc, mbhc_cfg_ptr);
+	if (ret) {
+		dev_err(card->dev,
+			"%s: msm_anlg_cdc_hs_detect failed\n", __func__);
+		kfree(mbhc_cfg_ptr->calibration);
+	}
+
+	return ret;
+}
+
 static struct snd_soc_ops msm_tdm_be_ops = {
 	.hw_params = msm_tdm_snd_hw_params
 };
@@ -2930,6 +2951,7 @@
 	.name		= "sdm660-snd-card",
 	.dai_link	= msm_int_dai,
 	.num_links	= ARRAY_SIZE(msm_int_dai),
+	.late_probe	= msm_snd_card_late_probe,
 };
 
 static void msm_disable_int_mclk0(struct work_struct *work)
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 130cc56..e699760 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -3934,6 +3934,13 @@
 		ret = -EINVAL;
 		goto err;
 	}
+
+	if (pinctrl_info->pinctrl == NULL) {
+		pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
 	curr_state = pinctrl_info->curr_state;
 	pinctrl_info->curr_state = new_state;
 	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
@@ -4202,6 +4209,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -4216,12 +4224,10 @@
 		goto err;
 	}
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
-		if (ret) {
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret_pinctrl)
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
-			goto err;
-		}
+				__func__, ret_pinctrl);
 	}
 	/*
 	 * Muxtex protection in case the same MI2S
@@ -4278,6 +4284,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -4298,10 +4305,10 @@
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
-		if (ret)
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret_pinctrl)
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
+				__func__, ret_pinctrl);
 	}
 }
 
@@ -5530,6 +5537,22 @@
 		.ignore_pmdown_time = 1,
 		.ignore_suspend = 1,
 	},
+	/* Slimbus VI Recording */
+	{
+		.name = LPASS_BE_SLIMBUS_TX_VI,
+		.stream_name = "Slimbus4 Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16393",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "tavil_codec",
+		.codec_dai_name = "tavil_vifeedback",
+		.id = MSM_BACKEND_DAI_SLIMBUS_4_TX,
+		.be_hw_params_fixup = msm_be_hw_params_fixup,
+		.ops = &msm_be_ops,
+		.ignore_suspend = 1,
+		.no_pcm = 1,
+		.dpcm_capture = 1,
+		.ignore_pmdown_time = 1,
+	},
 };
 
 static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e6a67cdd..6b23bf5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2093,6 +2093,9 @@
 	list_for_each_entry(rtd, &card->rtd_list, list)
 		flush_delayed_work(&rtd->delayed_work);
 
+	/* free the ALSA card at first; this syncs with pending operations */
+	snd_card_free(card->snd_card);
+
 	/* remove and free each DAI */
 	soc_remove_dai_links(card);
 	soc_remove_pcm_runtimes(card);
@@ -2107,9 +2110,7 @@
 	if (card->remove)
 		card->remove(card);
 
-	snd_card_free(card->snd_card);
 	return 0;
-
 }
 
 /* removes a socdev */
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index d2ac038..083887b 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -13,7 +13,8 @@
 			pcm.o \
 			proc.o \
 			quirks.o \
-			stream.o
+			stream.o \
+			badd.o
 
 snd-usbmidi-lib-objs := midi.o
 
diff --git a/sound/usb/badd.c b/sound/usb/badd.c
new file mode 100644
index 0000000..cc6c26c
--- /dev/null
+++ b/sound/usb/badd.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
+
+struct uac3_input_terminal_descriptor badd_baif_in_term_desc = {
+	.bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = BADD_IN_TERM_ID_BAIF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.wExTerminalDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_input_terminal_descriptor badd_baof_in_term_desc = {
+	.bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = BADD_IN_TERM_ID_BAOF,
+	.wTerminalType = UAC_TERMINAL_STREAMING,
+	.bAssocTerminal = 0x00,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000000,
+	.wExTerminalDescrID = 0x0000,
+	.wConnectorsDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baif_out_term_desc = {
+	.bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = BADD_OUT_TERM_ID_BAIF,
+	.wTerminalType = UAC_TERMINAL_STREAMING,
+	.bAssocTerminal = 0x00,		/* No associated terminal */
+	.bSourceID = BADD_FU_ID_BAIF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000000,	/* No controls */
+	.wExTerminalDescrID = 0x0000,
+	.wConnectorsDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baof_out_term_desc = {
+	.bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = BADD_OUT_TERM_ID_BAOF,
+	.bSourceID = BADD_FU_ID_BAOF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.wExTerminalDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+__u8 monoControls[] = {
+	0x03, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00};
+
+__u8 stereoControls[] = {
+	0x03, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00
+};
+
+__u8 badd_mu_src_ids[] = {BADD_IN_TERM_ID_BAOF, BADD_FU_ID_BAIOF};
+
+struct uac3_mixer_unit_descriptor badd_baiof_mu_desc = {
+	.bLength = UAC3_DT_MIXER_UNIT_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_MIXER_UNIT_V3,
+	.bUnitID = BADD_MU_ID_BAIOF,
+	.bNrInPins = 0x02,
+	.baSourceID = badd_mu_src_ids,
+	.bmMixerControls = 0x00,
+	.bmControls = 0x00000000,
+	.wMixerDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baif_fu_desc = {
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAIF,
+	.bSourceID = BADD_IN_TERM_ID_BAIF,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baof_fu_desc = {
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAOF,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baiof_fu_desc = {
+	.bLength = 0x0f,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAIOF,
+	.bSourceID = BADD_IN_TERM_ID_BAIF,
+	.bmaControls = monoControls,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_clock_source_descriptor badd_clock_desc = {
+	.bLength = UAC3_DT_CLOCK_SRC_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_CLOCK_SOURCE,
+	.bClockID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000001,
+	.bReferenceTerminal = 0x00,
+	.wClockSourceStr = 0x0000
+};
+
+void *badd_desc_list[] = {
+	&badd_baif_in_term_desc,
+	&badd_baof_in_term_desc,
+	&badd_baiof_mu_desc,
+	&badd_baif_fu_desc,
+	&badd_baof_fu_desc,
+	&badd_baiof_fu_desc,
+	&badd_clock_desc
+};
+
diff --git a/sound/usb/card.c b/sound/usb/card.c
index ccf06de..eaf18aa 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -45,6 +45,7 @@
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
 #include <linux/module.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/control.h>
 #include <sound/core.h>
@@ -285,7 +286,6 @@
 	struct usb_host_interface *host_iface;
 	struct usb_interface_descriptor *altsd;
 	struct usb_interface *usb_iface;
-	void *control_header;
 	int i, protocol;
 
 	usb_iface = usb_ifnum_to_if(dev, ctrlif);
@@ -302,16 +302,13 @@
 		return -EINVAL;
 	}
 
-	control_header = snd_usb_find_csint_desc(host_iface->extra,
-						 host_iface->extralen,
-						 NULL, UAC_HEADER);
 	altsd = get_iface_desc(host_iface);
 	protocol = altsd->bInterfaceProtocol;
 
-	if (!control_header) {
-		dev_err(&dev->dev, "cannot find UAC_HEADER\n");
-		return -EINVAL;
-	}
+	/*
+	 * UAC 1.0 devices use AC HEADER Desc for linking AS interfaces;
+	 * UAC 2.0 and 3.0 devices use IAD for linking AS interfaces
+	 */
 
 	switch (protocol) {
 	default:
@@ -321,8 +318,17 @@
 		/* fall through */
 
 	case UAC_VERSION_1: {
-		struct uac1_ac_header_descriptor *h1 = control_header;
+		void *control_header;
+		struct uac1_ac_header_descriptor *h1;
 
+		control_header = snd_usb_find_csint_desc(host_iface->extra,
+					host_iface->extralen, NULL, UAC_HEADER);
+		if (!control_header) {
+			dev_err(&dev->dev, "cannot find UAC_HEADER\n");
+			return -EINVAL;
+		}
+
+		h1 = control_header;
 		if (!h1->bInCollection) {
 			dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
 			return -EINVAL;
@@ -339,7 +345,8 @@
 		break;
 	}
 
-	case UAC_VERSION_2: {
+	case UAC_VERSION_2:
+	case UAC_VERSION_3: {
 		struct usb_interface_assoc_descriptor *assoc =
 						usb_iface->intf_assoc;
 		if (!assoc) {
@@ -358,7 +365,8 @@
 		}
 
 		if (!assoc) {
-			dev_err(&dev->dev, "Audio class v2 interfaces need an interface association\n");
+			dev_err(&dev->dev, "Audio class V%d interfaces need an interface association\n",
+					protocol);
 			return -EINVAL;
 		}
 
@@ -606,6 +614,15 @@
 	struct usb_host_interface *alts;
 	int ifnum;
 	u32 id;
+	struct usb_interface_assoc_descriptor *assoc;
+
+	assoc = intf->intf_assoc;
+	if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
+	    assoc->bFunctionProtocol == UAC_VERSION_3 &&
+	    assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+		dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
+		return -EINVAL;
+	}
 
 	alts = &intf->altsetting[0];
 	ifnum = get_iface_desc(alts)->bInterfaceNumber;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 26dd5f2..8238180 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -428,6 +428,10 @@
 
 	case UAC_VERSION_2:
 		return set_sample_rate_v2(chip, iface, alts, fmt, rate);
+
+	/* Clock rate is fixed at 48 kHz for BADD devices */
+	case UAC_VERSION_3:
+		return 0;
 	}
 }
 
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 2c44386..eaf2615 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -20,6 +20,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -69,6 +70,34 @@
 		format <<= 1;
 		break;
 	}
+
+	case UAC_VERSION_3: {
+		switch (fp->maxpacksize) {
+		case BADD_MAXPSIZE_SYNC_MONO_16:
+		case BADD_MAXPSIZE_SYNC_STEREO_16:
+		case BADD_MAXPSIZE_ASYNC_MONO_16:
+		case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+			sample_width = BIT_RES_16_BIT;
+			sample_bytes = SUBSLOTSIZE_16_BIT;
+			break;
+		}
+
+		case BADD_MAXPSIZE_SYNC_MONO_24:
+		case BADD_MAXPSIZE_SYNC_STEREO_24:
+		case BADD_MAXPSIZE_ASYNC_MONO_24:
+		case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+			sample_width = BIT_RES_24_BIT;
+			sample_bytes = SUBSLOTSIZE_24_BIT;
+			break;
+		}
+		default:
+			usb_audio_err(chip, "%u:%d : Invalid wMaxPacketSize\n",
+				fp->iface, fp->altsetting);
+			return pcm_formats;
+		}
+		format = 1 << format;
+		break;
+	}
 	}
 
 	if ((pcm_formats == 0) &&
@@ -364,17 +393,34 @@
 	return ret;
 }
 
+static int badd_set_audio_rate_v3(struct snd_usb_audio *chip,
+		   struct audioformat *fp)
+{
+	unsigned int rate;
+
+	fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+	if (fp->rate_table == NULL)
+		return -ENOMEM;
+
+	fp->nr_rates = 1;
+	rate = BADD_SAMPLING_RATE;
+	fp->rate_min = fp->rate_max = fp->rate_table[0] = rate;
+	fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+	return 0;
+}
+
 /*
  * parse the format type I and III descriptors
  */
 static int parse_audio_format_i(struct snd_usb_audio *chip,
 				struct audioformat *fp, unsigned int format,
+				u8 format_type,
 				struct uac_format_type_i_continuous_descriptor *fmt)
 {
 	snd_pcm_format_t pcm_format;
 	int ret;
 
-	if (fmt->bFormatType == UAC_FORMAT_TYPE_III) {
+	if (format_type == UAC_FORMAT_TYPE_III) {
 		/* FIXME: the format type is really IECxxx
 		 *        but we give normal PCM format to get the existing
 		 *        apps working...
@@ -413,6 +459,9 @@
 		/* fp->channels is already set in this case */
 		ret = parse_audio_format_rates_v2(chip, fp);
 		break;
+	case UAC_VERSION_3:
+		ret = badd_set_audio_rate_v3(chip, fp);
+		break;
 	}
 
 	if (fp->channels < 1) {
@@ -484,11 +533,18 @@
 			       int stream)
 {
 	int err;
+	int format_type = -EINVAL;
 
-	switch (fmt->bFormatType) {
+	if ((fp->protocol == UAC_VERSION_1) ||
+			(fp->protocol == UAC_VERSION_2))
+		format_type = fmt->bFormatType;
+	else
+		format_type = UAC_FORMAT_TYPE_I; /* only BADD is supported */
+
+	switch (format_type) {
 	case UAC_FORMAT_TYPE_I:
 	case UAC_FORMAT_TYPE_III:
-		err = parse_audio_format_i(chip, fp, format, fmt);
+		err = parse_audio_format_i(chip, fp, format, format_type, fmt);
 		break;
 	case UAC_FORMAT_TYPE_II:
 		err = parse_audio_format_ii(chip, fp, format, fmt);
@@ -497,10 +553,10 @@
 		usb_audio_info(chip,
 			 "%u:%d : format type %d is not supported yet\n",
 			 fp->iface, fp->altsetting,
-			 fmt->bFormatType);
+			 format_type);
 		return -ENOTSUPP;
 	}
-	fp->fmt_type = fmt->bFormatType;
+	fp->fmt_type = format_type;
 	if (err < 0)
 		return err;
 #if 1
@@ -511,7 +567,7 @@
 	if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
 	    chip->usb_id == USB_ID(0x041e, 0x3020) ||
 	    chip->usb_id == USB_ID(0x041e, 0x3061)) {
-		if (fmt->bFormatType == UAC_FORMAT_TYPE_I &&
+		if (format_type == UAC_FORMAT_TYPE_I &&
 		    fp->rates != SNDRV_PCM_RATE_48000 &&
 		    fp->rates != SNDRV_PCM_RATE_96000)
 			return -ENOTSUPP;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 932ce3e..c3bf5ff 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -51,6 +51,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -185,6 +186,17 @@
 	/* we just parse the header */
 	struct uac_feature_unit_descriptor *hdr = NULL;
 
+	if (state->mixer->protocol == UAC_VERSION_3) {
+		int i;
+
+		for (i = 0; i < NUM_BADD_DESCS; i++) {
+			hdr = (void *)badd_desc_list[i];
+			if (hdr->bUnitID == unit)
+				return hdr;
+		}
+
+		return NULL;
+	}
 	while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
 					USB_DT_CS_INTERFACE)) != NULL) {
 		if (hdr->bLength >= 4 &&
@@ -718,7 +730,7 @@
 				term->channels = d->bNrChannels;
 				term->chconfig = le16_to_cpu(d->wChannelConfig);
 				term->name = d->iTerminal;
-			} else { /* UAC_VERSION_2 */
+			} else if (state->mixer->protocol == UAC_VERSION_2) {
 				struct uac2_input_terminal_descriptor *d = p1;
 
 				/* call recursively to verify that the
@@ -735,6 +747,24 @@
 				term->channels = d->bNrChannels;
 				term->chconfig = le32_to_cpu(d->bmChannelConfig);
 				term->name = d->iTerminal;
+			} else { /* UAC_VERSION_3 */
+				struct uac3_input_terminal_descriptor *d = p1;
+
+				err = check_input_term(state,
+							d->bCSourceID, term);
+				if (err < 0)
+					return err;
+
+				term->id = id;
+				term->type = d->wTerminalType;
+				if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+					term->channels = NUM_CHANNELS_MONO;
+					term->chconfig = BADD_CH_CONFIG_MONO;
+				} else {
+					term->channels = NUM_CHANNELS_STEREO;
+					term->chconfig = BADD_CH_CONFIG_STEREO;
+				}
+				term->name = d->wTerminalDescrStr;
 			}
 			return 0;
 		case UAC_FEATURE_UNIT: {
@@ -752,41 +782,81 @@
 			return 0;
 		}
 		case UAC_SELECTOR_UNIT:
-		case UAC2_CLOCK_SELECTOR: {
-			struct uac_selector_unit_descriptor *d = p1;
-			/* call recursively to retrieve the channel info */
-			err = check_input_term(state, d->baSourceID[0], term);
-			if (err < 0)
-				return err;
-			term->type = d->bDescriptorSubtype << 16; /* virtual type */
-			term->id = id;
-			term->name = uac_selector_unit_iSelector(d);
+		/* UAC3_MIXER_UNIT_V3 */
+		case UAC2_CLOCK_SELECTOR:
+		/* UAC3_CLOCK_SOURCE */ {
+			if (state->mixer->protocol == UAC_VERSION_3
+				&& hdr[2] == UAC3_CLOCK_SOURCE) {
+				struct uac3_clock_source_descriptor *d = p1;
+
+				term->type = d->bDescriptorSubtype << 16;
+				term->id = id;
+				term->name = d->wClockSourceStr;
+			} else if (state->mixer->protocol == UAC_VERSION_3
+					&& hdr[2] == UAC3_MIXER_UNIT_V3) {
+				struct uac3_mixer_unit_descriptor *d = p1;
+
+				term->type = d->bDescriptorSubtype << 16;
+				if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+					term->channels = NUM_CHANNELS_MONO;
+					term->chconfig = BADD_CH_CONFIG_MONO;
+				} else {
+					term->channels = NUM_CHANNELS_STEREO;
+					term->chconfig = BADD_CH_CONFIG_STEREO;
+				}
+				term->name = d->wMixerDescrStr;
+			} else {
+				struct uac_selector_unit_descriptor *d = p1;
+				/* call recursively to retrieve channel info */
+				err = check_input_term(state,
+							d->baSourceID[0], term);
+				if (err < 0)
+					return err;
+				/* virtual type */
+				term->type = d->bDescriptorSubtype << 16;
+				term->id = id;
+				term->name = uac_selector_unit_iSelector(d);
+			}
 			return 0;
 		}
 		case UAC1_PROCESSING_UNIT:
 		case UAC1_EXTENSION_UNIT:
 		/* UAC2_PROCESSING_UNIT_V2 */
 		/* UAC2_EFFECT_UNIT */
+		/* UAC3_FEATURE_UNIT_V3 */
 		case UAC2_EXTENSION_UNIT_V2: {
-			struct uac_processing_unit_descriptor *d = p1;
+			if (state->mixer->protocol == UAC_VERSION_3) {
+				struct uac_feature_unit_descriptor *d = p1;
 
-			if (state->mixer->protocol == UAC_VERSION_2 &&
-				hdr[2] == UAC2_EFFECT_UNIT) {
-				/* UAC2/UAC1 unit IDs overlap here in an
-				 * uncompatible way. Ignore this unit for now.
-				 */
+				id = d->bSourceID;
+			} else {
+				struct uac_processing_unit_descriptor *d = p1;
+
+				if (state->mixer->protocol == UAC_VERSION_2 &&
+					hdr[2] == UAC2_EFFECT_UNIT) {
+					/* UAC2/UAC1 unit IDs overlap here in an
+					 * uncompatible way. Ignore this unit
+					 * for now.
+					 */
+					return 0;
+				}
+
+				if (d->bNrInPins) {
+					id = d->baSourceID[0];
+					break; /* continue to parse */
+				}
+				/* virtual type */
+				term->type = d->bDescriptorSubtype << 16;
+				term->channels =
+					uac_processing_unit_bNrChannels(d);
+				term->chconfig =
+					uac_processing_unit_wChannelConfig(
+						d, state->mixer->protocol);
+				term->name = uac_processing_unit_iProcessing(
+						d, state->mixer->protocol);
 				return 0;
 			}
-
-			if (d->bNrInPins) {
-				id = d->baSourceID[0];
-				break; /* continue to parse */
-			}
-			term->type = d->bDescriptorSubtype << 16; /* virtual type */
-			term->channels = uac_processing_unit_bNrChannels(d);
-			term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
-			term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
-			return 0;
+			break;
 		}
 		case UAC2_CLOCK_SOURCE: {
 			struct uac_clock_source_descriptor *d = p1;
@@ -1233,12 +1303,18 @@
 	struct usb_feature_control_info *ctl_info;
 	unsigned int len = 0;
 	int mapped_name = 0;
-	int nameid = uac_feature_unit_iFeature(desc);
+	int nameid;
 	struct snd_kcontrol *kctl;
 	struct usb_mixer_elem_info *cval;
 	const struct usbmix_name_map *map;
 	unsigned int range;
 
+	if (state->mixer->protocol == UAC_VERSION_3)
+		nameid = ((struct uac3_feature_unit_descriptor *)
+				raw_desc)->wFeatureDescrStr;
+	else
+		nameid = uac_feature_unit_iFeature(desc);
+
 	control++; /* change from zero-based to 1-based value */
 
 	if (control == UAC_FU_GRAPHIC_EQUALIZER) {
@@ -1259,7 +1335,7 @@
 	ctl_info = &audio_feature_info[control-1];
 	if (state->mixer->protocol == UAC_VERSION_1)
 		cval->val_type = ctl_info->type;
-	else /* UAC_VERSION_2 */
+	else /* UAC_VERSION_2 or UAC_VERSION_3*/
 		cval->val_type = ctl_info->type_uac2 >= 0 ?
 			ctl_info->type_uac2 : ctl_info->type;
 
@@ -1447,6 +1523,62 @@
 	return snd_usb_mixer_add_control(&cval->head, kctl);
 }
 
+static int find_num_channels(struct mixer_build *state, int dir)
+{
+	int num_ch = -EINVAL, num, i, j, wMaxPacketSize;
+	int ctrlif = get_iface_desc(state->mixer->hostif)->bInterfaceNumber;
+	struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev, ctrlif);
+	struct usb_interface_assoc_descriptor *assoc = usb_iface->intf_assoc;
+	struct usb_host_interface *alts;
+
+	for (i = 0; i < assoc->bInterfaceCount; i++) {
+		int intf = assoc->bFirstInterface + i;
+
+		if (intf != ctrlif) {
+			struct usb_interface *iface =
+				usb_ifnum_to_if(state->mixer->chip->dev, intf);
+
+			alts = &iface->altsetting[1];
+			if (dir == USB_DIR_OUT &&
+				get_endpoint(alts, 0)->bEndpointAddress &
+				USB_DIR_IN)
+				continue;
+			if (dir == USB_DIR_IN &&
+				!(get_endpoint(alts, 0)->bEndpointAddress &
+				USB_DIR_IN))
+				continue;
+			num = iface->num_altsetting;
+			for (j = 1; j < num; j++) {
+				num_ch = NUM_CHANNELS_MONO;
+				alts = &iface->altsetting[j];
+				wMaxPacketSize = le16_to_cpu(
+							get_endpoint(alts, 0)->
+							wMaxPacketSize);
+				switch (wMaxPacketSize) {
+				case BADD_MAXPSIZE_SYNC_MONO_16:
+				case BADD_MAXPSIZE_SYNC_MONO_24:
+				case BADD_MAXPSIZE_ASYNC_MONO_16:
+				case BADD_MAXPSIZE_ASYNC_MONO_24:
+					break;
+				case BADD_MAXPSIZE_SYNC_STEREO_16:
+				case BADD_MAXPSIZE_SYNC_STEREO_24:
+				case BADD_MAXPSIZE_ASYNC_STEREO_16:
+				case BADD_MAXPSIZE_ASYNC_STEREO_24:
+					num_ch = NUM_CHANNELS_STEREO;
+					break;
+				}
+				if (num_ch == NUM_CHANNELS_MONO)
+					continue;
+				else
+					break;
+			}
+		}
+	}
+
+	return num_ch;
+}
+
 /*
  * parse a feature unit
  *
@@ -1478,7 +1610,7 @@
 				      unitid);
 			return -EINVAL;
 		}
-	} else {
+	} else if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_feature_unit_descriptor *ftr = _ftr;
 		csize = 4;
 		channels = (hdr->bLength - 6) / 4 - 1;
@@ -1489,11 +1621,118 @@
 				      unitid);
 			return -EINVAL;
 		}
+	} else {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev,
+			get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+							usb_iface->intf_assoc;
+
+		csize = 4;
+		switch (unitid) {
+		case BADD_FU_ID_BAIOF:
+			channels = NUM_CHANNELS_MONO;
+			bmaControls = monoControls;
+			badd_baif_in_term_desc.wClusterDescrID =
+						CLUSTER_ID_MONO;
+			break;
+
+		case BADD_FU_ID_BAOF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADPHONE:
+			case PROF_HEADSET_ADAPTER:
+				channels = NUM_CHANNELS_STEREO;
+				bmaControls = stereoControls;
+				badd_baiof_mu_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			case PROF_SPEAKERPHONE:
+				channels = NUM_CHANNELS_MONO;
+				bmaControls = monoControls;
+				badd_baof_in_term_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			default:
+				channels = find_num_channels(state,
+								USB_DIR_OUT);
+				if (channels < 0) {
+					usb_audio_err(state->chip,
+						      "unit %u: Cant find num of channels\n",
+						      unitid);
+					return channels;
+				}
+
+				bmaControls = (channels == NUM_CHANNELS_MONO) ?
+						monoControls : stereoControls;
+				badd_baof_in_term_desc.wClusterDescrID =
+					(channels == NUM_CHANNELS_MONO) ?
+					CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+				break;
+			}
+			break;
+
+		case BADD_FU_ID_BAIF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADSET:
+			case PROF_HEADSET_ADAPTER:
+			case PROF_SPEAKERPHONE:
+				channels = NUM_CHANNELS_MONO;
+				bmaControls = monoControls;
+				badd_baif_in_term_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			default:
+				channels = find_num_channels(state, USB_DIR_IN);
+				if (channels < 0) {
+					usb_audio_err(state->chip,
+						      "unit %u: Cant find num of channels\n",
+						      unitid);
+					return channels;
+				}
+
+				bmaControls = (channels == NUM_CHANNELS_MONO) ?
+						 monoControls : stereoControls;
+				badd_baif_in_term_desc.wClusterDescrID =
+					(channels == NUM_CHANNELS_MONO) ?
+					CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+				break;
+			}
+			break;
+
+		default:
+			usb_audio_err(state->chip, "Invalid unit %u\n", unitid);
+			return -EINVAL;
+		}
 	}
 
 	/* parse the source unit */
-	if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
-		return err;
+	if (state->mixer->protocol != UAC_VERSION_3) {
+		err = parse_audio_unit(state, hdr->bSourceID);
+		if (err < 0)
+			return err;
+	} else {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev,
+			get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+			usb_iface->intf_assoc;
+
+		switch (unitid) {
+		case BADD_FU_ID_BAOF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADSET:
+			case PROF_HEADSET_ADAPTER:
+				hdr->bSourceID = BADD_MU_ID_BAIOF;
+				break;
+			default:
+				hdr->bSourceID = BADD_IN_TERM_ID_BAOF;
+				break;
+			}
+		}
+		err = parse_audio_unit(state, hdr->bSourceID);
+		if (err < 0)
+			return err;
+	}
 
 	/* determine the input source type and name */
 	err = check_input_term(state, hdr->bSourceID, &iterm);
@@ -1547,7 +1786,7 @@
 				build_feature_ctl(state, _ftr, 0, i, &iterm,
 						  unitid, 0);
 		}
-	} else { /* UAC_VERSION_2 */
+	} else { /* UAC_VERSION_2 or UAC_VERSION_3*/
 		for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
 			unsigned int ch_bits = 0;
 			unsigned int ch_read_only = 0;
@@ -1665,12 +1904,20 @@
 	int input_pins, num_ins, num_outs;
 	int pin, ich, err;
 
-	if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
-	    !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
-		usb_audio_err(state->chip,
-			      "invalid MIXER UNIT descriptor %d\n",
-			      unitid);
-		return -EINVAL;
+	if (state->mixer->protocol == UAC_VERSION_3) {
+		input_pins = badd_baiof_mu_desc.bNrInPins;
+		num_outs =
+		   (badd_baiof_mu_desc.wClusterDescrID == CLUSTER_ID_MONO) ?
+		    NUM_CHANNELS_MONO : NUM_CHANNELS_STEREO;
+	} else {
+		input_pins = desc->bNrInPins;
+		num_outs = uac_mixer_unit_bNrChannels(desc);
+		if (desc->bLength < 11 || !input_pins || !num_outs) {
+			usb_audio_err(state->chip,
+				      "invalid MIXER UNIT descriptor %d\n",
+				      unitid);
+			return -EINVAL;
+		}
 	}
 
 	num_ins = 0;
@@ -1690,9 +1937,14 @@
 			int och, ich_has_controls = 0;
 
 			for (och = 0; och < num_outs; och++) {
-				__u8 *c = uac_mixer_unit_bmControls(desc,
-						state->mixer->protocol);
+				__u8 *c = NULL;
 
+				if (state->mixer->protocol == UAC_VERSION_3)
+					c =
+					  &(badd_baiof_mu_desc.bmMixerControls);
+				else
+					c = uac_mixer_unit_bmControls(desc,
+							state->mixer->protocol);
 				if (check_matrix_bitmap(c, ich, och, num_outs)) {
 					ich_has_controls = 1;
 					break;
@@ -2201,16 +2453,28 @@
 	case UAC2_CLOCK_SOURCE:
 		return parse_clock_source_unit(state, unitid, p1);
 	case UAC_SELECTOR_UNIT:
+	/*   UAC3_MIXER_UNIT_V3 has the same value */
 	case UAC2_CLOCK_SELECTOR:
-		return parse_audio_selector_unit(state, unitid, p1);
+	/*   UAC3_CLOCK_SOURCE has the same value */
+		if (state->mixer->protocol == UAC_VERSION_3 &&
+			p1[2] == UAC3_CLOCK_SOURCE)
+			return 0; /* NOP */
+		else if (state->mixer->protocol == UAC_VERSION_3
+			&& p1[2] == UAC3_MIXER_UNIT_V3)
+			return parse_audio_mixer_unit(state, unitid, p1);
+		else
+			return parse_audio_selector_unit(state, unitid, p1);
 	case UAC_FEATURE_UNIT:
 		return parse_audio_feature_unit(state, unitid, p1);
 	case UAC1_PROCESSING_UNIT:
 	/*   UAC2_EFFECT_UNIT has the same value */
+	/*   UAC3_FEATURE_UNIT_V3 has the same value */
 		if (state->mixer->protocol == UAC_VERSION_1)
 			return parse_audio_processing_unit(state, unitid, p1);
-		else
+		else if (state->mixer->protocol == UAC_VERSION_2)
 			return 0; /* FIXME - effect units not implemented yet */
+		else
+			return parse_audio_feature_unit(state, unitid, p1);
 	case UAC1_EXTENSION_UNIT:
 	/*   UAC2_PROCESSING_UNIT_V2 has the same value */
 		if (state->mixer->protocol == UAC_VERSION_1)
@@ -2245,6 +2509,23 @@
 	return 0;
 }
 
+static int make_out_term(struct mixer_build state, int wTerminalType)
+{
+	struct uac3_output_terminal_descriptor *desc = NULL;
+
+	if (wTerminalType == UAC_TERMINAL_STREAMING)
+		desc = &badd_baif_out_term_desc;
+	else {
+		desc = &badd_baof_out_term_desc;
+		desc->wTerminalType = wTerminalType;
+	}
+	set_bit(desc->bTerminalID, state.unitbitmap);
+	state.oterm.id = desc->bTerminalID;
+	state.oterm.type = desc->wTerminalType;
+	state.oterm.name = desc->wTerminalDescrStr;
+	return parse_audio_unit(&state, desc->bSourceID);
+}
+
 /*
  * create mixer controls
  *
@@ -2253,9 +2534,8 @@
 static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
 {
 	struct mixer_build state;
-	int err;
+	int err = -EINVAL;
 	const struct usbmix_ctl_map *map;
-	void *p;
 
 	memset(&state, 0, sizeof(state));
 	state.chip = mixer->chip;
@@ -2273,44 +2553,108 @@
 		}
 	}
 
-	p = NULL;
-	while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
-					    mixer->hostif->extralen,
-					    p, UAC_OUTPUT_TERMINAL)) != NULL) {
-		if (mixer->protocol == UAC_VERSION_1) {
-			struct uac1_output_terminal_descriptor *desc = p;
+	if (mixer->protocol == UAC_VERSION_3) {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(mixer->chip->dev,
+			get_iface_desc(mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+			usb_iface->intf_assoc;
 
-			if (desc->bLength < sizeof(*desc))
-				continue; /* invalid descriptor? */
-			/* mark terminal ID as visited */
-			set_bit(desc->bTerminalID, state.unitbitmap);
-			state.oterm.id = desc->bTerminalID;
-			state.oterm.type = le16_to_cpu(desc->wTerminalType);
-			state.oterm.name = desc->iTerminal;
-			err = parse_audio_unit(&state, desc->bSourceID);
+		switch (assoc->bFunctionSubClass) {
+		case PROF_GENERIC_IO: {
+			if (assoc->bInterfaceCount == 0x02) {
+				if (get_endpoint(mixer->hostif,
+					0)->bEndpointAddress | USB_DIR_IN)
+					err = make_out_term(state,
+							UAC_TERMINAL_STREAMING);
+				else
+					err = make_out_term(state,
+						UAC_OUTPUT_TERMINAL_UNDEFINED);
+			} else {
+				err = make_out_term(state,
+						UAC_OUTPUT_TERMINAL_UNDEFINED);
+				if (err < 0 && err != -EINVAL)
+					return err;
+				err = make_out_term(state,
+						UAC_TERMINAL_STREAMING);
+			}
+			break;
+		}
+
+		case PROF_HEADPHONE:
+			err = make_out_term(state,
+					UAC_OUTPUT_TERMINAL_HEADPHONES);
+			break;
+		case PROF_SPEAKER:
+			err = make_out_term(state, UAC_OUTPUT_TERMINAL_SPEAKER);
+			break;
+		case PROF_MICROPHONE:
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		case PROF_HEADSET:
+		case PROF_HEADSET_ADAPTER:
+			err = make_out_term(state, UAC_BIDIR_TERMINAL_HEADSET);
 			if (err < 0 && err != -EINVAL)
 				return err;
-		} else { /* UAC_VERSION_2 */
-			struct uac2_output_terminal_descriptor *desc = p;
-
-			if (desc->bLength < sizeof(*desc))
-				continue; /* invalid descriptor? */
-			/* mark terminal ID as visited */
-			set_bit(desc->bTerminalID, state.unitbitmap);
-			state.oterm.id = desc->bTerminalID;
-			state.oterm.type = le16_to_cpu(desc->wTerminalType);
-			state.oterm.name = desc->iTerminal;
-			err = parse_audio_unit(&state, desc->bSourceID);
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		case PROF_SPEAKERPHONE:
+			err = make_out_term(state,
+					UAC_BIDIR_TERMINAL_SPEAKERPHONE);
 			if (err < 0 && err != -EINVAL)
 				return err;
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		}
+		if (err < 0 && err != -EINVAL)
+			return err;
+	} else {
+		void *p;
 
-			/*
-			 * For UAC2, use the same approach to also add the
-			 * clock selectors
-			 */
-			err = parse_audio_unit(&state, desc->bCSourceID);
-			if (err < 0 && err != -EINVAL)
-				return err;
+		p = NULL;
+		while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
+						mixer->hostif->extralen, p,
+						UAC_OUTPUT_TERMINAL)) != NULL) {
+			if (mixer->protocol == UAC_VERSION_1) {
+				struct uac1_output_terminal_descriptor *desc =
+									      p;
+
+				if (desc->bLength < sizeof(*desc))
+					continue; /* invalid descriptor? */
+				/* mark terminal ID as visited */
+				set_bit(desc->bTerminalID, state.unitbitmap);
+				state.oterm.id = desc->bTerminalID;
+				state.oterm.type =
+					le16_to_cpu(desc->wTerminalType);
+				state.oterm.name = desc->iTerminal;
+				err = parse_audio_unit(&state, desc->bSourceID);
+				if (err < 0 && err != -EINVAL)
+					return err;
+			} else { /* UAC_VERSION_2 */
+				struct uac2_output_terminal_descriptor *desc =
+									      p;
+
+				if (desc->bLength < sizeof(*desc))
+					continue; /* invalid descriptor? */
+				/* mark terminal ID as visited */
+				set_bit(desc->bTerminalID, state.unitbitmap);
+				state.oterm.id = desc->bTerminalID;
+				state.oterm.type =
+					le16_to_cpu(desc->wTerminalType);
+				state.oterm.name = desc->iTerminal;
+				err = parse_audio_unit(&state, desc->bSourceID);
+				if (err < 0 && err != -EINVAL)
+					return err;
+
+				/*
+				 * For UAC2, use the same approach to also add
+				 * the clock selectors
+				 */
+				err = parse_audio_unit(&state,
+							desc->bCSourceID);
+				if (err < 0 && err != -EINVAL)
+					return err;
+			}
 		}
 	}
 
@@ -2552,6 +2896,9 @@
 	case UAC_VERSION_2:
 		mixer->protocol = UAC_VERSION_2;
 		break;
+	case UAC_VERSION_3:
+		mixer->protocol = UAC_VERSION_3;
+		break;
 	}
 
 	if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 7437cd5..5bc84b4 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -20,6 +20,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -282,8 +283,6 @@
 		0 /* terminator */
 	};
 	struct snd_pcm_chmap_elem *chmap;
-	const unsigned int *maps;
-	int c;
 
 	if (channels > ARRAY_SIZE(chmap->map))
 		return NULL;
@@ -292,26 +291,41 @@
 	if (!chmap)
 		return NULL;
 
-	maps = protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
 	chmap->channels = channels;
-	c = 0;
 
-	if (bits) {
-		for (; bits && *maps; maps++, bits >>= 1)
-			if (bits & 1)
-				chmap->map[c++] = *maps;
+	if (protocol == UAC_VERSION_3) {
+		switch (channels) {
+		case 1:
+			chmap->map[0] = SNDRV_CHMAP_MONO;
+			break;
+		case 2:
+			chmap->map[0] = SNDRV_CHMAP_FL;
+			chmap->map[1] = SNDRV_CHMAP_FR;
+			break;
+		}
 	} else {
-		/* If we're missing wChannelConfig, then guess something
-		    to make sure the channel map is not skipped entirely */
-		if (channels == 1)
-			chmap->map[c++] = SNDRV_CHMAP_MONO;
-		else
-			for (; c < channels && *maps; maps++)
-				chmap->map[c++] = *maps;
-	}
+		int c = 0;
+		const unsigned int *maps =
+			protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
 
-	for (; c < channels; c++)
-		chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+		if (bits) {
+			for (; bits && *maps; maps++, bits >>= 1)
+				if (bits & 1)
+					chmap->map[c++] = *maps;
+		} else {
+			/*
+			 * If we're missing wChannelConfig, then guess something
+			 * to make sure the channel map is not skipped entirely
+			 */
+			if (channels == 1)
+				chmap->map[c++] = SNDRV_CHMAP_MONO;
+			else
+				for (; c < channels && *maps; maps++)
+					chmap->map[c++] = *maps;
+		}
+		for (; c < channels; c++)
+			chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+	}
 
 	return chmap;
 }
@@ -409,6 +423,9 @@
 	struct usb_interface_descriptor *altsd = get_iface_desc(alts);
 	int attributes = 0;
 
+	if (protocol == UAC_VERSION_3)
+		return 0;
+
 	csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
 
 	/* Creamware Noah has this descriptor after the 2nd endpoint */
@@ -492,7 +509,7 @@
 	unsigned int format = 0, num_channels = 0;
 	struct audioformat *fp = NULL;
 	int num, protocol, clock = 0;
-	struct uac_format_type_i_continuous_descriptor *fmt;
+	struct uac_format_type_i_continuous_descriptor *fmt = NULL;
 	unsigned int chconfig;
 
 	dev = chip->dev;
@@ -629,38 +646,78 @@
 				iface_no, altno, as->bTerminalLink);
 			continue;
 		}
+
+		case UAC_VERSION_3: {
+			int wMaxPacketSize;
+
+			format = UAC_FORMAT_TYPE_I_PCM;
+			clock = BADD_CLOCK_SOURCE;
+			wMaxPacketSize = le16_to_cpu(get_endpoint(alts, 0)
+							->wMaxPacketSize);
+			switch (wMaxPacketSize) {
+			case BADD_MAXPSIZE_SYNC_MONO_16:
+			case BADD_MAXPSIZE_SYNC_MONO_24:
+			case BADD_MAXPSIZE_ASYNC_MONO_16:
+			case BADD_MAXPSIZE_ASYNC_MONO_24: {
+				num_channels = NUM_CHANNELS_MONO;
+				chconfig = BADD_CH_CONFIG_MONO;
+				break;
+			}
+
+			case BADD_MAXPSIZE_SYNC_STEREO_16:
+			case BADD_MAXPSIZE_SYNC_STEREO_24:
+			case BADD_MAXPSIZE_ASYNC_STEREO_16:
+			case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+				num_channels = NUM_CHANNELS_STEREO;
+				chconfig = BADD_CH_CONFIG_STEREO;
+				break;
+			}
+			default:
+				dev_err(&dev->dev,
+					"%u:%d: invalid wMaxPacketSize\n",
+					iface_no, altno);
+				continue;
+			}
+		}
 		}
 
-		/* get format type */
-		fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE);
-		if (!fmt) {
-			dev_err(&dev->dev,
-				"%u:%d : no UAC_FORMAT_TYPE desc\n",
-				iface_no, altno);
-			continue;
-		}
-		if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) ||
-		    ((protocol == UAC_VERSION_2) && (fmt->bLength < 6))) {
-			dev_err(&dev->dev,
-				"%u:%d : invalid UAC_FORMAT_TYPE desc\n",
-				iface_no, altno);
-			continue;
-		}
+		if ((protocol == UAC_VERSION_1) ||
+			(protocol == UAC_VERSION_2)) {
+			/* get format type */
+			fmt = snd_usb_find_csint_desc(alts->extra,
+					alts->extralen, NULL, UAC_FORMAT_TYPE);
+			if (!fmt) {
+				dev_err(&dev->dev,
+					"%u:%d : no UAC_FORMAT_TYPE desc\n",
+					iface_no, altno);
+				continue;
+			}
+			if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8))
+			|| ((protocol == UAC_VERSION_2) &&
+				(fmt->bLength < 6))) {
+				dev_err(&dev->dev,
+					"%u:%d :invalid UAC_FORMAT_TYPE desc\n",
+					iface_no, altno);
+				continue;
+			}
 
-		/*
-		 * Blue Microphones workaround: The last altsetting is identical
-		 * with the previous one, except for a larger packet size, but
-		 * is actually a mislabeled two-channel setting; ignore it.
-		 */
-		if (fmt->bNrChannels == 1 &&
-		    fmt->bSubframeSize == 2 &&
-		    altno == 2 && num == 3 &&
-		    fp && fp->altsetting == 1 && fp->channels == 1 &&
-		    fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
-		    protocol == UAC_VERSION_1 &&
-		    le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
+			/*
+			 * Blue Microphones workaround: The last altsetting is
+			 * identical with the previous one, except for a larger
+			 * packet size, but is actually a mislabeled two-channel
+			 * setting; ignore it.
+			 */
+			if (fmt->bNrChannels == 1 &&
+			    fmt->bSubframeSize == 2 &&
+			    altno == 2 && num == 3 &&
+			    fp && fp->altsetting == 1 && fp->channels == 1 &&
+			    fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
+			    protocol == UAC_VERSION_1 &&
+			    le16_to_cpu(
+				get_endpoint(alts, 0)->wMaxPacketSize) ==
 							fp->maxpacksize * 2)
-			continue;
+				continue;
+		}
 
 		fp = kzalloc(sizeof(*fp), GFP_KERNEL);
 		if (! fp) {
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5a1974e..801508c 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -27,6 +27,7 @@
 #include <soc/qcom/msm_qmi_interface.h>
 #include <linux/iommu.h>
 #include <linux/platform_device.h>
+#include <linux/usb/audio-v3.h>
 
 #include "usbaudio.h"
 #include "card.h"
@@ -427,12 +428,14 @@
 	protocol = altsd->bInterfaceProtocol;
 
 	/* get format type */
-	fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
-			UAC_FORMAT_TYPE);
-	if (!fmt) {
-		pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
-			subs->interface, subs->altset_idx);
-		goto err;
+	if (protocol != UAC_VERSION_3) {
+		fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+				UAC_FORMAT_TYPE);
+		if (!fmt) {
+			pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n",
+				__func__, subs->interface, subs->altset_idx);
+			goto err;
+		}
 	}
 
 	if (!uadev[card_num].ctrl_intf) {
@@ -440,12 +443,15 @@
 		goto err;
 	}
 
-	hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
-					uadev[card_num].ctrl_intf->extralen,
-					NULL, UAC_HEADER);
-	if (!hdr_ptr) {
-		pr_err("%s: no UAC_HEADER desc\n", __func__);
-		goto err;
+	if (protocol != UAC_VERSION_3) {
+		hdr_ptr = snd_usb_find_csint_desc(
+				uadev[card_num].ctrl_intf->extra,
+				uadev[card_num].ctrl_intf->extralen,
+				NULL, UAC_HEADER);
+		if (!hdr_ptr) {
+			pr_err("%s: no UAC_HEADER desc\n", __func__);
+			goto err;
+		}
 	}
 
 	if (protocol == UAC_VERSION_1) {
@@ -473,6 +479,31 @@
 		resp->usb_audio_spec_revision =
 			((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
 		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_3) {
+		switch (le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize)) {
+		case BADD_MAXPSIZE_SYNC_MONO_16:
+		case BADD_MAXPSIZE_SYNC_STEREO_16:
+		case BADD_MAXPSIZE_ASYNC_MONO_16:
+		case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+			resp->usb_audio_subslot_size = SUBSLOTSIZE_16_BIT;
+			break;
+		}
+
+		case BADD_MAXPSIZE_SYNC_MONO_24:
+		case BADD_MAXPSIZE_SYNC_STEREO_24:
+		case BADD_MAXPSIZE_ASYNC_MONO_24:
+		case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+			resp->usb_audio_subslot_size = SUBSLOTSIZE_24_BIT;
+			break;
+		}
+
+		default:
+			pr_err("%d: %u: Invalid wMaxPacketSize\n",
+				subs->interface, subs->altset_idx);
+			ret = -EINVAL;
+			goto err;
+		}
+		resp->usb_audio_subslot_size_valid = 1;
 	} else {
 		pr_err("%s: unknown protocol version %x\n", __func__, protocol);
 		goto err;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 834137e..1ab58f7 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -168,6 +168,13 @@
 	if (irq->hw) {
 		val |= GICH_LR_HW;
 		val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
+		/*
+		 * Never set pending+active on a HW interrupt, as the
+		 * pending state is kept at the physical distributor
+		 * level.
+		 */
+		if (irq->active && irq->pending)
+			val &= ~GICH_LR_PENDING_BIT;
 	} else {
 		if (irq->config == VGIC_CONFIG_LEVEL)
 			val |= GICH_LR_EOI;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index e6b03fd..f132006 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -151,6 +151,13 @@
 	if (irq->hw) {
 		val |= ICH_LR_HW;
 		val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
+		/*
+		 * Never set pending+active on a HW interrupt, as the
+		 * pending state is kept at the physical distributor
+		 * level.
+		 */
+		if (irq->active && irq->pending)
+			val &= ~ICH_LR_PENDING_BIT;
 	} else {
 		if (irq->config == VGIC_CONFIG_LEVEL)
 			val |= ICH_LR_EOI;