Merge "ARM: dts: msm: Add device tree support for msm8953"
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index d4a352b..18386ab 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -39,6 +39,8 @@
 
 		- System Trace Macrocell:
 			"arm,coresight-stm", "arm,primecell"; [1]
+		- Trigger Generation Unit:
+			 "arm,primecell";
 
 	* reg: physical base address and length of the register
 	  set(s) of the component.
@@ -86,6 +88,16 @@
 
 	* qcom,dummy-sink: Configure the device as sink.
 
+* Additional required property for coresight-tgu devices:
+	* tgu-steps: must be present. Indicates number of steps supported
+	  by the TGU.
+	* tgu-conditions: must be present. Indicates the number of conditions
+	  supported by the TGU.
+	* tgu-regs: must be present. Indicates the number of regs supported
+	  by the TGU.
+	* tgu-timer-counters: must be present. Indicates the number of timers and
+	  counters available in the TGU to do a comparision.
+
 * Optional properties for all components:
 	* reg-names: names corresponding to each reg property value.
 
@@ -388,7 +400,7 @@
 		};
 	};
 
-4. CTIs
+5. CTIs
 	cti0: cti@6010000 {
 		compatible = "arm,coresight-cti", "arm,primecell";
 		reg = <0x6010000 0x1000>;
@@ -400,5 +412,21 @@
 		clock-names = "apb_pclk";
 	};
 
+6. TGUs
+	ipcb_tgu: tgu@6b0c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b999>;
+		reg = <0x06B0C000 0x1000>;
+		reg-names = "tgu-base";
+		tgu-steps = <3>;
+		tgu-conditions = <4>;
+		tgu-regs = <4>;
+		tgu-timer-counters = <8>;
+
+		coresight-name = "coresight-tgu-ipcb";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
 [1]. There is currently two version of STM: STM32 and STM500.  Both
 have the same HW interface and as such don't need an explicit binding name.
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index bc82bdc..765b5e4 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -92,6 +92,12 @@
 - SDM670
   compatible = "qcom,sdm670"
 
+- QCS605
+  compatible = "qcom,qcs605"
+
+- SDA670
+  compatible = "qcom,sda670"
+
 - MSM8952
   compatible = "qcom,msm8952"
 
@@ -276,6 +282,10 @@
 compatible = "qcom,sdm670-rumi"
 compatible = "qcom,sdm670-cdp"
 compatible = "qcom,sdm670-mtp"
+compatible = "qcom,qcs605-cdp"
+compatible = "qcom,qcs605-mtp"
+compatible = "qcom,sda670-cdp"
+compatible = "qcom,sda670-mtp"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index c8cffb5..2782428 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -135,6 +135,18 @@
 				power collapse feature available or not.
 - qcom,sde-has-mixer-gc:	Boolean property to indicate if mixer has gamma correction
 				feature available or not.
+- qcom,sde-has-dest-scaler: 	Boolean property to indicate if destination scaler
+				feature is available or not.
+- qcom,sde-max-dest-scaler-input-linewidth: A u32 value indicates the
+				maximum input line width to destination scaler.
+- qcom,sde-max-dest-scaler-output-linewidth: A u32 value indicates the
+				maximum output line width of destination scaler.
+- qcom,sde-dest-scaler-top-off: A u32 value provides the
+				offset from mdp base to destination scaler block.
+- qcom,sde-dest-scaler-top-size: A u32 value indicates the address range for ds top
+- qcom,sde-dest-scaler-off: 	Array of u32 offsets indicate the qseed3 scaler blocks
+				offset from destination scaler top offset.
+- qcom,sde-dest-scaler-size:    A u32 value indicates the address range for each scaler block
 - qcom,sde-sspp-clk-ctrl:	Array of offsets describing clk control
 				offsets for dynamic clock gating. 1st value
 				in the array represents offset of the control
@@ -354,6 +366,7 @@
 				control register. Number of offsets defined should
 				match the number of xin-ids defined in
 				property: qcom,sde-inline-rot-xin
+- #power-domain-cells:		Number of cells in a power-domain specifier and should contain 0.
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -434,6 +447,7 @@
     interrupt-controller;
     #interrupt-cells = <1>;
     iommus = <&mdp_smmu 0>;
+    #power-domain-cells = <0>;
 
     qcom,sde-off = <0x1000>;
     qcom,sde-ctl-off = <0x00002000 0x00002200 0x00002400
@@ -444,6 +458,8 @@
     qcom,sde-dspp-off = <0x00055000 0x00057000>;
     qcom,sde-dspp-ad-off = <0x24000 0x22800>;
     qcom,sde-dspp-ad-version = <0x00030000>;
+    qcom,sde-dest-scaler-top-off = <0x00061000>;
+    qcom,sde-dest-scaler-off = <0x800 0x1000>;
     qcom,sde-wb-off = <0x00066000>;
     qcom,sde-wb-xin-id = <6>;
     qcom,sde-intf-off = <0x0006b000 0x0006b800
@@ -505,6 +521,8 @@
     qcom,sde-cdm-size = <0x100>;
     qcom,sde-pp-size = <0x100>;
     qcom,sde-wb-size = <0x100>;
+    qcom,sde-dest-scaler-top-size = <0xc>;
+    qcom,sde-dest-scaler-size = <0x800>;
     qcom,sde-len = <0x100>;
     qcom,sde-wb-linewidth = <2560>;
     qcom,sde-sspp-scale-size = <0x100>;
@@ -514,6 +532,9 @@
     qcom,sde-highest-bank-bit = <15>;
     qcom,sde-has-mixer-gc;
     qcom,sde-has-idle-pc;
+    qcom,sde-has-dest-scaler;
+    qcom,sde-max-dest-scaler-input-linewidth = <2048>;
+    qcom,sde-max-dest-scaler-output-linewidth = <2560>;
     qcom,sde-sspp-max-rects = <1 1 1 1
 				1 1 1 1
 				1 1
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 8fcdd82..d2e635a 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -100,6 +100,10 @@
 		  Some hardware may not have full support for atos debugging
 		  in tandem with other features like power collapse.
 
+- qcom,mmu500-errata-1:
+		  An array of <sid mask>.
+		  Indicates the SIDs for which the workaround is required.
+
 - qcom,deferred-regulator-disable-delay : The time delay for deferred regulator
                   disable in ms. In case of unmap call, regulator is
                   enabled/disabled. This may introduce additional delay. For
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
index 933ad85..83a58df 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
@@ -278,6 +278,46 @@
   Value type: <u32>
   Definition: should specify the power on sequence delay time in ms.
 
+- spiop-read
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read operation related data.
+
+- spiop-readseq
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read sequence operation realted data.
+
+- spiop-queryid
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI query eeprom id operation related data.
+
+- spiop-pprog:
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI page program operation related data.
+
+- spiop-wenable
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI write enable operation related data.
+
+- spiop-readst
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read destination operation related data.
+
+- spiop-erase
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI erase operation related data.
+
+- eeprom-idx
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides eeprom id realted data.
+
 - xxxx-supply
   Usage: required
   Value type: <phandle>
@@ -385,6 +425,10 @@
 		cell-index = <0>;
 		reg = <0x0>;
 		qcom,eeprom-name = "msm_eeprom";
+		eeprom-id0 = <0xF8 0x15>;
+		eeprom-id1 = <0xEF 0x15>;
+		eeprom-id2 = <0xC2 0x36>;
+		eeprom-id3 = <0xC8 0x15>;
 		compatible = "qcom,eeprom";
 		qcom,slave-addr = <0x60>;
 		qcom,num-blocks = <2>;
@@ -400,6 +444,13 @@
 		qcom,cmm-data-compressed;
 		qcom,cmm-data-offset = <0>;
 		qcom,cmm-data-size = <0>;
+		spiop-read = <0x03 3 0 0 0>;
+		spiop-readseq = <0x03 3 0 0 0>;
+		spiop-queryid = <0x90 3 0 0 0>;
+		spiop-pprog = <0x02 3 0 3 100>;
+		spiop-wenable = <0x06 0 0 0 0>;
+		spiop-readst = <0x05 0 0 0 0>;
+		spiop-erase = <0x20 3 0 10 100>;
 		qcom,cam-power-seq-type = "sensor_vreg",
 			"sensor_vreg", "sensor_clk",
 			"sensor_gpio", "sensor_gpio";
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
index 72e2ab4..728c5f9 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -56,6 +56,11 @@
   Value type: <string>
   Definition: Should specify a string label to identify the context bank.
 
+- qcom,secure-cb
+  Usage: optional
+  Value type: boolean
+  Definition: Specifies if the context bank is a secure context bank.
+
 =============================================
 Third Level Node - CAM SMMU memory map device
 =============================================
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 46649af..db34047 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -123,6 +123,7 @@
 				swizzle configuration value.
 - qcom,rot-reg-bus:		Property to provide Bus scaling for register
 				access for rotator blocks.
+- power-domains:		A phandle to respective power domain node.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -150,6 +151,8 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
+		power-domains = <&mdss_mdp>;
+
 		qcom,mdss-mdp-reg-offset = <0x00001000>;
 
 		rot-vdd-supply = <&gdsc_mdss>;
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index a3fd951..d205b0b 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -309,6 +309,13 @@
 		    is specified to make it fully functional. Value has no
 		    unit. Allowed range is 0 to 62200 in micro units.
 
+- qcom,ki-coeff-full-dischg
+	Usage:	    optional
+	Value type: <u32>
+	Definition: Ki coefficient full SOC value that will be applied during
+		    discharging. If not specified, a value of 0 will be set.
+		    Allowed range is from 245 to 62256.
+
 - qcom,fg-rconn-mohms
 	Usage:      optional
 	Value type: <u32>
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 4d05e50..57a227e 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2459,6 +2459,10 @@
 		6-pole-jack : Jack on the hardware is 6-pole.
 - clock-names : clock name defined for external clock.
 - clocks : external clock defined for codec clock.
+- qcom,msm-mbhc-hs-mic-max-threshold-mv : headset detection threshold. When micbias is
+					  not set to 2.7v, need scale in driver.
+- qcom,msm-mbhc-hs-mic-min-threshold-mv : headhpone detection threshold. When micbias is
+					  not set to 2.7v, need scale in driver.
 - qcom,hph-en1-gpio : GPIO to enable HiFi amplifiers.
 - qcom,hph-en0-gpio : GPIO to enable HiFi audio route to headset.
 - qcom,wsa-max-devs : Maximum number of WSA881x devices present in the target
@@ -2516,6 +2520,8 @@
 
 		qcom,msm-mbhc-hphl-swh = <0>;
 		qcom,msm-mbhc-gnd-swh = <0>;
+		qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
+		qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
 		qcom,hph-en0-gpio = <&tavil_hph_en0>;
 		qcom,hph-en1-gpio = <&tavil_hph_en1>;
 		qcom,tavil-mclk-clk-freq = <9600000>;
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 609d853..6838afd 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -58,6 +58,7 @@
 	gating. Default it is enabled.
  - snps,xhci-imod-value: Interrupt moderation interval for host mode
 	(in increments of 250nsec).
+ - usb-core-id: Differentiates between different controllers present on a device.
 
 This is usually a subnode to DWC3 glue to which it is connected.
 
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 6109fad..d23cb46 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -159,6 +159,7 @@
    "efuse_addr": EFUSE address to read and update analog tune parameter.
    "emu_phy_base" : phy base address used for programming emulation target phy.
    "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+   "eud_base" : EUD device register address space to use EUD pet functionality.
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
diff --git a/Makefile b/Makefile
index 3849d63..665104d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 41
+SUBLEVEL = 51
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d..0bc66e1 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
 #ifndef _ALPHA_TYPES_H
 #define _ALPHA_TYPES_H
 
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
 
 #endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd4..8d1024d 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
  * need to be careful to avoid a name clashes.
  */
 
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this.  They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
 #include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
 #endif
 
 #endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff..4fd6272 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -89,7 +89,9 @@
 #define ARC_REG_SLC_FLUSH	0x904
 #define ARC_REG_SLC_INVALIDATE	0x905
 #define ARC_REG_SLC_RGN_START	0x914
+#define ARC_REG_SLC_RGN_START1	0x915
 #define ARC_REG_SLC_RGN_END	0x916
+#define ARC_REG_SLC_RGN_END1	0x917
 
 /* Bit val in SLC_CONTROL */
 #define SLC_CTRL_IM		0x040
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 8147583..bbdfeb3 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -562,6 +562,7 @@
 	static DEFINE_SPINLOCK(lock);
 	unsigned long flags;
 	unsigned int ctrl;
+	phys_addr_t end;
 
 	spin_lock_irqsave(&lock, flags);
 
@@ -591,8 +592,16 @@
 	 * END needs to be setup before START (latter triggers the operation)
 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
 	 */
-	write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
-	write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
+	end = paddr + sz + l2_line_sz - 1;
+	if (is_pae40_enabled())
+		write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
+
+	write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
+
+	if (is_pae40_enabled())
+		write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
+
+	write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
 
 	while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
 
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index 895fa6c..563901e 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -75,7 +75,7 @@
 					pinctrl-names = "default";
 					pinctrl-0 = <&pca0_pins>;
 					interrupt-parent = <&gpio0>;
-					interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+					interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
 					gpio-controller;
 					#gpio-cells = <2>;
 					interrupt-controller;
@@ -87,7 +87,7 @@
 					compatible = "nxp,pca9555";
 					pinctrl-names = "default";
 					interrupt-parent = <&gpio0>;
-					interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+					interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
 					gpio-controller;
 					#gpio-cells = <2>;
 					interrupt-controller;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-smp2p.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-smp2p.dtsi
new file mode 100644
index 0000000..f9ad6f4
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-smp2p.dtsi
@@ -0,0 +1,109 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	qcom,smp2p-modem@17811008 {
+		compatible = "qcom,smp2p";
+		reg = <0x17811008 0x4>;
+		qcom,remote-pid = <1>;
+		qcom,irq-bitmask = <0x4000>;
+		interrupts = <GIC_SPI 113 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+		gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+		gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+		gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+		gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+	};
+
+	/* ssr - inbound entry from mss */
+	smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to mss */
+	smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
+
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index 2ef277f..d538efe 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -34,6 +34,13 @@
 			reg = <0x8fd00000 0x300000>;
 			label = "peripheral2_mem";
 		};
+
+		mss_mem: mss_region@87800000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0x87800000 0x8000000>;
+			label = "mss_mem";
+		};
 	};
 
 	cpus {
@@ -324,8 +331,134 @@
 		#interrupt-cells = <4>;
 		cell-index = <0>;
 	};
+
+	qcom,ipc-spinlock@1f40000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0x1f40000 0x8000>;
+		qcom,num-locks = <8>;
+	};
+
+	qcom,smem@8fe40000 {
+		compatible = "qcom,smem";
+		reg = <0x8fe40000 0xc0000>,
+			<0x17811008 0x4>,
+			<0x1fd4000 0x8>;
+		reg-names = "smem", "irq-reg-base",
+			"smem_targ_info_reg";
+		qcom,mpu-enabled;
+	};
+
+	qcom,glink-smem-native-xprt-modem@8fe40000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x8fe40000 0xc0000>,
+			<0x17811008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <GIC_SPI 111 IRQ_TYPE_EDGE_RISING>;
+		label = "mpss";
+	};
+
+	qcom,ipc_router {
+		compatible = "qcom,ipc_router";
+		qcom,node-id = <1>;
+	};
+
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "mpss";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-at-mdm0 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DS";
+			qcom,glinkpkt-dev-name = "at_mdm0";
+		};
+
+		qcom,glinkpkt-loopback_cntl {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+		};
+
+		qcom,glinkpkt-loopback_data {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+		};
+
+		qcom,glinkpkt-data40-cntl {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA40_CNTL";
+			qcom,glinkpkt-dev-name = "smdcntl8";
+		};
+
+		qcom,glinkpkt-data1 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA1";
+			qcom,glinkpkt-dev-name = "smd7";
+		};
+
+		qcom,glinkpkt-data4 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA4";
+			qcom,glinkpkt-dev-name = "smd8";
+		};
+
+		qcom,glinkpkt-data11 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA11";
+			qcom,glinkpkt-dev-name = "smd11";
+		};
+	};
+
+	pil_modem: qcom,mss@4080000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x4080000 0x100>;
+		interrupts = <0 250 1>;
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		vdd_cx-supply = <&pmxpoorwills_s5_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+
+		qcom,pas-id = <0>;
+		qcom,smem-id = <421>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,firmware-name = "modem";
+		memory-region = <&mss_mem>;
+		status = "ok";
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+	};
 };
 
 #include "pmxpoorwills.dtsi"
 #include "sdxpoorwills-regulator.dtsi"
+#include "sdxpoorwills-smp2p.dtsi"
 #include "sdxpoorwills-usb.dtsi"
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915..10d3074 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
 };
 
 &pio {
-	mmc2_pins_nrst: mmc2@0 {
+	mmc2_pins_nrst: mmc2-rst-pin {
 		allwinner,pins = "PC16";
 		allwinner,function = "gpio_out";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts
index 4cab64c..e3a51e3 100644
--- a/arch/arm/boot/dts/tango4-vantage-1172.dts
+++ b/arch/arm/boot/dts/tango4-vantage-1172.dts
@@ -21,7 +21,7 @@
 };
 
 &eth0 {
-	phy-connection-type = "rgmii";
+	phy-connection-type = "rgmii-id";
 	phy-handle = <&eth0_phy>;
 	#address-cells = <1>;
 	#size-cells = <0>;
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index bde40e0..1826b6f 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -195,6 +195,7 @@
 CONFIG_INPUT_GPIO=m
 CONFIG_SERIO_LIBPS2=y
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
@@ -280,10 +281,15 @@
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index c239ca8..ce61464 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -189,9 +189,11 @@
 # CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MSM_V2=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
 CONFIG_SPI_SPIDEV=m
@@ -203,6 +205,7 @@
 CONFIG_POWER_SUPPLY=y
 CONFIG_THERMAL=y
 CONFIG_THERMAL_TSENS=y
+CONFIG_MFD_SYSCON=y
 CONFIG_MSM_CDC_PINCTRL=y
 CONFIG_MSM_CDC_SUPPLY=y
 CONFIG_REGULATOR=y
@@ -260,6 +263,7 @@
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_RTC_CLASS=y
 CONFIG_DMADEVICES=y
+CONFIG_QCOM_SPS_DMA=y
 CONFIG_UIO=y
 CONFIG_STAGING=y
 CONFIG_GSI=y
@@ -270,14 +274,20 @@
 CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MAILBOX=y
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SCM=y
 CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
 CONFIG_TRACER_PKT=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
 CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f..22b7311 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@
 
 #define ftrace_return_address(n) return_address(n)
 
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+					       const char *name)
+{
+	if (!strcmp(sym, "sys_mmap2"))
+		sym = "sys_mmap_pgoff";
+	else if (!strcmp(sym, "sys_statfs64_wrapper"))
+		sym = "sys_statfs64";
+	else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+		sym = "sys_fstatfs64";
+	else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+		sym = "sys_fadvise64_64";
+
+	/* Ignore case since sym may start with "SyS" instead of "sys" */
+	return !strcasecmp(sym, name);
+}
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 332ce3b..2206e0e 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -829,22 +829,22 @@
  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
  * underlying level-2 and level-3 tables before freeing the actual level-1 table
  * and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
  */
 void kvm_free_stage2_pgd(struct kvm *kvm)
 {
-	if (kvm->arch.pgd == NULL)
-		return;
+	void *pgd = NULL;
 
 	spin_lock(&kvm->mmu_lock);
-	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+	if (kvm->arch.pgd) {
+		unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+		pgd = READ_ONCE(kvm->arch.pgd);
+		kvm->arch.pgd = NULL;
+	}
 	spin_unlock(&kvm->mmu_lock);
 
 	/* Free the HW pgd, one page at a time */
-	free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
-	kvm->arch.pgd = NULL;
+	if (pgd)
+		free_pages_exact(pgd, S2_PGD_SIZE);
 }
 
 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1664,12 +1664,16 @@
 
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
 {
+	if (!kvm->arch.pgd)
+		return 0;
 	trace_kvm_age_hva(start, end);
 	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
 }
 
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 {
+	if (!kvm->arch.pgd)
+		return 0;
 	trace_kvm_test_age_hva(hva);
 	return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
 }
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index aec74bf..a9ef54d 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -314,8 +314,11 @@
 	 * signal first. We do not need to release the mmap_sem because
 	 * it would already be released in __lock_page_or_retry in
 	 * mm/filemap.c. */
-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+		if (!user_mode(regs))
+			goto no_context;
 		return 0;
+	}
 
 	/*
 	 * Major/minor page fault accounting is only done on the
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 49a5d8c..68e6f88 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -170,6 +170,7 @@
 				interrupt-controller;
 				reg = <0x1d00000 0x10000>, /* GICD */
 				      <0x1d40000 0x40000>; /* GICR */
+				interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
 			};
 		};
 
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 8d99df3..aefdb52 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -92,7 +92,14 @@
 		sdm670-usbc-external-codec-cdp-overlay.dtbo \
 		sdm670-usbc-external-codec-mtp-overlay.dtbo \
 		sdm670-usbc-external-codec-pm660a-cdp-overlay.dtbo \
-		sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo
+		sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo \
+		sda670-cdp-overlay.dtbo \
+		sda670-mtp-overlay.dtbo \
+		sda670-pm660a-cdp-overlay.dtbo \
+		sda670-pm660a-mtp-overlay.dtbo \
+		qcs605-cdp-overlay.dtbo \
+		qcs605-mtp-overlay.dtbo \
+		qcs605-external-codec-mtp-overlay.dtbo
 
 sdm670-cdp-overlay.dtbo-base := sdm670.dtb
 sdm670-mtp-overlay.dtbo-base := sdm670.dtb
@@ -111,6 +118,14 @@
 sdm670-usbc-external-codec-mtp-overlay.dtbo-base := sdm670.dtb
 sdm670-usbc-external-codec-pm660a-cdp-overlay.dtbo-base := sdm670.dtb
 sdm670-usbc-external-codec-pm660a-mtp-overlay.dtbo-base := sdm670.dtb
+sda670-cdp-overlay.dtbo-base := sda670.dtb
+sda670-mtp-overlay.dtbo-base := sda670.dtb
+sda670-pm660a-cdp-overlay.dtbo-base := sda670.dtb
+sda670-pm660a-mtp-overlay.dtbo-base := sda670.dtb
+qcs605-cdp-overlay.dtbo-base := qcs605.dtb
+qcs605-mtp-overlay.dtbo-base := qcs605.dtb
+qcs605-external-codec-mtp-overlay.dtbo-base := qcs605.dtb
+
 else
 dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
 	sdm670-mtp.dtb \
@@ -128,7 +143,14 @@
 	sdm670-usbc-external-codec-pm660a-mtp.dtb \
 	sdm670-usbc-mtp.dtb \
 	sdm670-usbc-pm660a-cdp.dtb \
-	sdm670-usbc-pm660a-mtp.dtb
+	sdm670-usbc-pm660a-mtp.dtb \
+	sda670-mtp.dtb \
+	sda670-cdp.dtb \
+	sda670-pm660a-mtp.dtb \
+	sda670-pm660a-cdp.dtb \
+	qcs605-mtp.dtb \
+	qcs605-cdp.dtb \
+	qcs605-external-codec-mtp.dtb
 endif
 
 ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
index 0ca1175..0e60a0c 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -88,7 +88,7 @@
 					15 01 00 00 00 00 02 5d 81
 					15 01 00 00 00 00 02 5e 00
 					15 01 00 00 00 00 02 5f 01
-					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 72 11
 					15 01 00 00 00 00 02 68 03
 					/* CMD2_P4 */
 					15 01 00 00 00 00 02 ff 24
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
index ac8a956..2c54504 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -74,7 +74,7 @@
 					15 01 00 00 00 00 02 5d 81
 					15 01 00 00 00 00 02 5e 00
 					15 01 00 00 00 00 02 5f 01
-					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 72 11
 					15 01 00 00 00 00 02 68 03
 					/* CMD2_P4 */
 					15 01 00 00 00 00 02 ff 24
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
index 87cabae..5b5fbb8 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -89,7 +89,7 @@
 					15 01 00 00 00 00 02 5D 81
 					15 01 00 00 00 00 02 5E 00
 					15 01 00 00 00 00 02 5F 01
-					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 72 11
 					15 01 00 00 00 00 02 68 03
 					/* CMD2_P4 */
 					15 01 00 00 00 00 02 ff 24
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index 0d0e7f7..95e0e5a 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -29,7 +29,6 @@
 		qcom,mdss-dsi-lane-1-state;
 		qcom,mdss-dsi-lane-2-state;
 		qcom,mdss-dsi-lane-3-state;
-		qcom,cmd-sync-wait-broadcast;
 		qcom,mdss-dsi-dma-trigger = "trigger_sw";
 		qcom,mdss-dsi-mdp-trigger = "none";
 		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
@@ -76,7 +75,7 @@
 					15 01 00 00 00 00 02 5D 81
 					15 01 00 00 00 00 02 5E 00
 					15 01 00 00 00 00 02 5F 01
-					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 72 11
 					15 01 00 00 00 00 02 68 03
 					/* CMD2_P4 */
 					15 01 00 00 00 00 02 FF 24
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 2156a5d..a48fba0 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -355,4 +355,7 @@
 			<0x1400 0x3ff 0x3>,
 			<0x1800 0x3ff 0x3>,
 			<0x1c00 0x3ff 0x3>;
+
+	qcom,mmu500-errata-1 =	<0x800 0x3ff>,
+				<0xc00 0x3ff>;
 };
diff --git a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
index 0d2f9e8..2fd1bc4 100644
--- a/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-audio-lpass.dtsi
@@ -353,6 +353,20 @@
 		qcom,msm-cpudai-afe-clk-ver = <2>;
 	};
 
+	dai_quin_auxpcm: qcom,msm-quin-auxpcm {
+		compatible = "qcom,msm-auxpcm-dev";
+		qcom,msm-cpudai-auxpcm-mode = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-sync = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-frame = <5>, <4>;
+		qcom,msm-cpudai-auxpcm-quant = <2>, <2>;
+		qcom,msm-cpudai-auxpcm-num-slots = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-slot-mapping = <1>, <1>;
+		qcom,msm-cpudai-auxpcm-data = <0>, <0>;
+		qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
+		qcom,msm-auxpcm-interface = "quinary";
+		qcom,msm-cpudai-afe-clk-ver = <2>;
+	};
+
 	hdmi_dba: qcom,msm-hdmi-dba-codec-rx {
 		compatible = "qcom,msm-hdmi-dba-codec-rx";
 		qcom,dba-bridge-chip = "adv7533";
@@ -522,4 +536,42 @@
 			qcom,msm-cpudai-tdm-data-align = <0>;
 		};
 	};
+
+	qcom,msm-dai-tdm-quin-rx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37184>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36928>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_quin_tdm_rx_0: qcom,msm-dai-q6-tdm-quin-rx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36928>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
+
+	qcom,msm-dai-tdm-quin-tx {
+		compatible = "qcom,msm-dai-tdm";
+		qcom,msm-cpudai-tdm-group-id = <37185>;
+		qcom,msm-cpudai-tdm-group-num-ports = <1>;
+		qcom,msm-cpudai-tdm-group-port-id = <36929>;
+		qcom,msm-cpudai-tdm-clk-rate = <1536000>;
+		qcom,msm-cpudai-tdm-clk-internal = <1>;
+		qcom,msm-cpudai-tdm-sync-mode = <1>;
+		qcom,msm-cpudai-tdm-sync-src = <1>;
+		qcom,msm-cpudai-tdm-data-out = <0>;
+		qcom,msm-cpudai-tdm-invert-sync = <1>;
+		qcom,msm-cpudai-tdm-data-delay = <1>;
+		dai_quin_tdm_tx_0: qcom,msm-dai-q6-tdm-quin-tx-0 {
+			compatible = "qcom,msm-dai-q6-tdm";
+			qcom,msm-cpudai-tdm-dev-id = <36929>;
+			qcom,msm-cpudai-tdm-data-align = <0>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/pm660.dtsi b/arch/arm64/boot/dts/qcom/pm660.dtsi
index 80103cb..1fdb3f6 100644
--- a/arch/arm64/boot/dts/qcom/pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm660.dtsi
@@ -353,15 +353,6 @@
 			};
 		};
 
-		pm660_rradc: rradc@4500 {
-			compatible = "qcom,rradc";
-			reg = <0x4500 0x100>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-			#io-channel-cells = <1>;
-			qcom,pmic-revid = <&pm660_revid>;
-		};
-
 		bcl_sensor: bcl@4200 {
 			compatible = "qcom,msm-bcl-lmh";
 			reg = <0x4200 0xff>,
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 450295e..013ac48 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -192,6 +192,33 @@
 			qcom,fast-avg-setup = <0>;
 			#thermal-sensor-cells = <1>;
 		};
+
+		pm8998_div_clk1: qcom,clkdiv@5b00 {
+			compatible = "qcom,qpnp-clkdiv";
+			reg = <0x5b00 0x100>;
+			#clock-cells = <1>;
+			qcom,cxo-freq = <19200000>;
+			qcom,clkdiv-id = <1>;
+			qcom,clkdiv-init-freq = <19200000>;
+		};
+
+		pm8998_div_clk2: qcom,clkdiv@5c00 {
+			compatible = "qcom,qpnp-clkdiv";
+			reg = <0x5c00 0x100>;
+			#clock-cells = <1>;
+			qcom,cxo-freq = <19200000>;
+			qcom,clkdiv-id = <2>;
+			qcom,clkdiv-init-freq = <19200000>;
+		};
+
+		pm8998_div_clk3: qcom,clkdiv@5d00 {
+			compatible = "qcom,qpnp-clkdiv";
+			reg = <0x5d00 0x100>;
+			#clock-cells = <1>;
+			qcom,cxo-freq = <19200000>;
+			qcom,clkdiv-id = <3>;
+			qcom,clkdiv-init-freq = <19200000>;
+		};
 	};
 
 	qcom,pm8998@1 {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
new file mode 100644
index 0000000..fe7a027
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-cdp-overlay.dts
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L CDP";
+	compatible = "qcom,qcs605-cdp", "qcom,qcs605", "qcom,cdp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-cdp.dts b/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
new file mode 100644
index 0000000..7b38a58
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-cdp.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605.dtsi"
+#include "sdm670-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L CDP";
+	compatible = "qcom,qcs605-cdp", "qcom,qcs605", "qcom,cdp";
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
new file mode 100644
index 0000000..1f439ae
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp-overlay.dts
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L Ext. Audio Codec MTP";
+	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <8 1>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp.dts
new file mode 100644
index 0000000..abc3f2d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-external-codec-mtp.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605.dtsi"
+#include "sdm670-mtp.dtsi"
+#include "sdm670-external-codec.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L Ext. Audio Codec MTP";
+	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+	qcom,board-id = <8 1>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
new file mode 100644
index 0000000..7327440
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp-overlay.dts
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L MTP";
+	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+	qcom,msm-id = <347 0x0>;
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605-mtp.dts b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
new file mode 100644
index 0000000..bc7b376
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605-mtp.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "qcs605.dtsi"
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 PM660 + PM660L MTP";
+	compatible = "qcom,qcs605-mtp", "qcom,qcs605", "qcom,mtp";
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dts b/arch/arm64/boot/dts/qcom/qcs605.dts
new file mode 100644
index 0000000..28c417f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "qcs605.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605 SoC";
+	compatible = "qcom,qcs605";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
new file mode 100644
index 0000000..12da650
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sda670.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. QCS605";
+	qcom,msm-id = <347 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
new file mode 100644
index 0000000..141ed59
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-cdp-overlay.dts
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L CDP";
+	compatible = "qcom,sda670-cdp", "qcom,sda670", "qcom,cdp";
+	qcom,msm-id = <337 0x0>;
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-cdp.dts b/arch/arm64/boot/dts/qcom/sda670-cdp.dts
new file mode 100644
index 0000000..fcb340e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-cdp.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sdm670-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L CDP";
+	compatible = "qcom,sda670-cdp", "qcom,sda670", "qcom,cdp";
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
new file mode 100644
index 0000000..af8e8f1
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-mtp-overlay.dts
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L MTP";
+	compatible = "qcom,sda670-mtp", "qcom,sda670", "qcom,mtp";
+	qcom,msm-id = <337 0x0>;
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-mtp.dts b/arch/arm64/boot/dts/qcom/sda670-mtp.dts
new file mode 100644
index 0000000..2123b44
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-mtp.dts
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660L MTP";
+	compatible = "qcom,sda670-mtp", "qcom,sda670", "qcom,mtp";
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+		       <0x0001001b 0x0102001a 0x0 0x0>,
+		       <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
new file mode 100644
index 0000000..3e1365d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp-overlay.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-cdp.dtsi"
+#include "pm660a.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A CDP";
+	compatible = "qcom,sda670-cdp", "qcom,sda670", "qcom,cdp";
+	qcom,msm-id = <337 0x0>;
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+		       <0x0001001b 0x0002001a 0x0 0x0>,
+		       <0x0001001b 0x0202001a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
new file mode 100644
index 0000000..6cbf224
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-cdp.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sdm670-cdp.dtsi"
+#include "pm660a.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A CDP";
+	compatible = "qcom,sda670-cdp", "qcom,sda670", "qcom,cdp";
+	qcom,board-id = <1 0>;
+	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+		       <0x0001001b 0x0002001a 0x0 0x0>,
+		       <0x0001001b 0x0202001a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
new file mode 100644
index 0000000..9855b11
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp-overlay.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,dispcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include "sdm670-mtp.dtsi"
+#include "pm660a.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A MTP";
+	compatible = "qcom,sda670-mtp", "qcom,sda670", "qcom,mtp";
+	qcom,msm-id = <337 0x0>;
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+		       <0x0001001b 0x0002001a 0x0 0x0>,
+		       <0x0001001b 0x0202001a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
new file mode 100644
index 0000000..ffb6aa3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670-pm660a-mtp.dts
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+#include "sdm670-mtp.dtsi"
+#include "pm660a.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 PM660 + PM660A MTP";
+	compatible = "qcom,sda670-mtp", "qcom,sda670", "qcom,mtp";
+	qcom,board-id = <8 0>;
+	qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>,
+		       <0x0001001b 0x0002001a 0x0 0x0>,
+		       <0x0001001b 0x0202001a 0x0 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670.dts b/arch/arm64/boot/dts/qcom/sda670.dts
new file mode 100644
index 0000000..8852e30
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sda670.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670 SoC";
+	compatible = "qcom,sda670";
+	qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sda670.dtsi b/arch/arm64/boot/dts/qcom/sda670.dtsi
new file mode 100644
index 0000000..d19aac3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sda670.dtsi
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm670.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDA670";
+	qcom,msm-id = <337 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
new file mode 100644
index 0000000..dfb8142
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio-overlay.dtsi
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-wcd.dtsi"
+#include "sdm670-wsa881x.dtsi"
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
+&tavil_snd {
+	qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>;
+	qcom,audio-routing =
+		"AIF4 VI", "MCLK",
+		"RX_BIAS", "MCLK",
+		"MADINPUT", "MCLK",
+		"hifi amp", "LINEOUT1",
+		"hifi amp", "LINEOUT2",
+		"AMIC2", "MIC BIAS2",
+		"MIC BIAS2", "Headset Mic",
+		"AMIC3", "MIC BIAS2",
+		"MIC BIAS2", "ANCRight Headset Mic",
+		"AMIC4", "MIC BIAS2",
+		"MIC BIAS2", "ANCLeft Headset Mic",
+		"AMIC5", "MIC BIAS3",
+		"MIC BIAS3", "Handset Mic",
+		"DMIC0", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic0",
+		"DMIC1", "MIC BIAS1",
+		"MIC BIAS1", "Digital Mic1",
+		"DMIC2", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic2",
+		"DMIC3", "MIC BIAS3",
+		"MIC BIAS3", "Digital Mic3",
+		"DMIC4", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic4",
+		"DMIC5", "MIC BIAS4",
+		"MIC BIAS4", "Digital Mic5",
+		"SpkrLeft IN", "SPK1 OUT",
+		"SpkrRight IN", "SPK2 OUT";
+
+	qcom,msm-mbhc-hphl-swh = <1>;
+	qcom,msm-mbhc-gnd-swh = <1>;
+	qcom,hph-en0-gpio = <&tavil_hph_en0>;
+	qcom,hph-en1-gpio = <&tavil_hph_en1>;
+	qcom,msm-mclk-freq = <9600000>;
+	asoc-codec = <&stub_codec>;
+	asoc-codec-names = "msm-stub-codec.1";
+	qcom,wsa-max-devs = <2>;
+	qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+		<&wsa881x_0213>, <&wsa881x_0214>;
+	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+		"SpkrLeft", "SpkrRight";
+};
+
+&int_codec {
+	qcom,audio-routing =
+		"RX_BIAS", "INT_MCLK0",
+		"SPK_RX_BIAS", "INT_MCLK0",
+		"INT_LDO_H", "INT_MCLK0",
+		"MIC BIAS External", "Handset Mic",
+		"MIC BIAS External2", "Headset Mic",
+		"MIC BIAS External", "Secondary Mic",
+		"AMIC1", "MIC BIAS External",
+		"AMIC2", "MIC BIAS External2",
+		"AMIC3", "MIC BIAS External",
+		"DMIC1", "MIC BIAS External",
+		"MIC BIAS External", "Digital Mic1",
+		"DMIC2", "MIC BIAS External",
+		"MIC BIAS External", "Digital Mic2",
+		"DMIC3", "MIC BIAS External",
+		"MIC BIAS External", "Digital Mic3",
+		"DMIC4", "MIC BIAS External",
+		"MIC BIAS External", "Digital Mic4",
+		"SpkrLeft IN", "SPK1 OUT",
+		"SpkrRight IN", "SPK2 OUT",
+		"PDM_IN_RX1", "PDM_OUT_RX1",
+		"PDM_IN_RX2", "PDM_OUT_RX2",
+		"PDM_IN_RX3", "PDM_OUT_RX3",
+		"ADC1_IN", "ADC1_OUT",
+		"ADC2_IN", "ADC2_OUT",
+		"ADC3_IN", "ADC3_OUT";
+
+	qcom,msm-mi2s-master = <1>, <1>, <1>, <1>, <1>;
+	qcom,msm-mclk-freq = <9600000>;
+	qcom,msm-mbhc-hphl-swh = <1>;
+	qcom,msm-mbhc-gnd-swh = <1>;
+	qcom,msm-micbias2-ext-cap;
+	qcom,msm-hs-micbias-type = "external";
+	qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
+	qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
+	qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
+
+	asoc-codec = <&stub_codec>, <&msm_digital_codec>,
+		     <&pmic_analog_codec>, <&msm_sdw_codec>;
+	asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
+			   "analog-codec", "msm_sdw_codec";
+
+	qcom,wsa-max-devs = <2>;
+	qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
+			<&wsa881x_213_en>, <&wsa881x_214_en>;
+	qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+				  "SpkrLeft", "SpkrRight";
+};
+
+&soc {
+	wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl_usbc_audio_en1 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+	};
+
+	cdc_pdm_gpios: cdc_pdm_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_pdm_clk_active &cdc_pdm_sync_active
+			     &cdc_pdm_rx0_active &cdc_pdm_rx1_2_active
+			     &cdc_pdm_2_gpios_active>;
+		pinctrl-1 = <&cdc_pdm_clk_sleep &cdc_pdm_sync_sleep
+			     &cdc_pdm_rx0_sleep &cdc_pdm_rx1_2_sleep
+			     &cdc_pdm_2_gpios_sleep>;
+		qcom,lpi-gpios;
+	};
+
+	cdc_comp_gpios: cdc_comp_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_rx0_comp_active &cdc_rx1_comp_active>;
+		pinctrl-1 = <&cdc_rx0_comp_sleep &cdc_rx1_comp_sleep>;
+		qcom,lpi-gpios;
+	};
+
+	cdc_dmic_gpios: cdc_dmic_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&cdc_dmic12_gpios_active
+			     &cdc_dmic34_gpios_active>;
+		pinctrl-1 = <&cdc_dmic12_gpios_sleep
+			     &cdc_dmic34_gpios_sleep>;
+		qcom,lpi-gpios;
+	};
+
+	cdc_sdw_gpios: sdw_clk_data_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&sdw_clk_active &sdw_data_active>;
+		pinctrl-1 = <&sdw_clk_sleep &sdw_data_sleep>;
+	};
+
+	wsa_spkr_en1: wsa_spkr_en1_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&spkr_1_sd_n_active>;
+		pinctrl-1 = <&spkr_1_sd_n_sleep>;
+	};
+
+	wsa_spkr_en2: wsa_spkr_en2_pinctrl {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&spkr_2_sd_n_active>;
+		pinctrl-1 = <&spkr_2_sd_n_sleep>;
+	};
+
+	msm_sdw_codec: msm-sdw-codec@62ec1000 {
+		status = "okay";
+		compatible = "qcom,msm-sdw-codec";
+		reg = <0x62ec1000 0x0>;
+		interrupts = <0 88 0>;
+		interrupt-names = "swr_master_irq";
+		qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
+
+		swr_master {
+			compatible = "qcom,swr-wcd";
+			#address-cells = <2>;
+			#size-cells = <0>;
+
+			wsa881x_211_en: wsa881x_en@20170211 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x20170211>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+			};
+
+			wsa881x_212_en: wsa881x_en@20170212 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x20170212>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+			};
+
+			wsa881x_213_en: wsa881x_en@21170213 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x21170213>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+			};
+
+			wsa881x_214_en: wsa881x_en@21170214 {
+				compatible = "qcom,wsa881x";
+				reg = <0x0 0x21170214>;
+				qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+			};
+		};
+	};
+
+	wcd9xxx_intc: wcd9xxx-irq {
+		status = "disabled";
+		compatible = "qcom,wcd9xxx-irq";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		interrupt-parent = <&tlmm>;
+		qcom,gpio-connect = <&tlmm 80 0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&wcd_intr_default>;
+	};
+
+	clock_audio_lnbb: audio_ext_clk_lnbb {
+		status = "disabled";
+		compatible = "qcom,audio-ref-clk";
+		clock-names = "osr_clk";
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
+		qcom,node_has_rpm_clock;
+		#clock-cells = <1>;
+	};
+
+	wcd_rst_gpio: msm_cdc_pinctrl@64 {
+		status = "disabled";
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&lpi_cdc_reset_active>;
+		pinctrl-1 = <&lpi_cdc_reset_sleep>;
+		qcom,lpi-gpios;
+	};
+
+	wdsp_mgr: qcom,wcd-dsp-mgr {
+		compatible = "qcom,wcd-dsp-mgr";
+		qcom,wdsp-components = <&wcd934x_cdc 0>,
+				       <&wcd_spi_0 1>,
+				       <&glink_spi_xprt_wdsp 2>;
+					qcom,img-filename = "cpe_9340";
+	};
+
+	wdsp_glink: qcom,wcd-dsp-glink {
+		compatible = "qcom,wcd-dsp-glink";
+	};
+};
+
+&slim_aud {
+	wcd934x_cdc: tavil_codec {
+		status = "disabled";
+		compatible = "qcom,tavil-slim-pgd";
+		elemental-addr = [00 01 50 02 17 02];
+
+		interrupt-parent = <&wcd9xxx_intc>;
+		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+			      17 18 19 20 21 22 23 24 25 26 27 28 29
+			      30 31>;
+
+		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+		clock-names = "wcd_clk";
+		clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
+
+		cdc-vdd-mic-bias-supply = <&pm660l_bob>;
+		qcom,cdc-vdd-mic-bias-voltage = <3312000 3312000>;
+		qcom,cdc-vdd-mic-bias-current = <30400>;
+
+		qcom,cdc-static-supplies = "cdc-vdd-mic-bias";
+
+		qcom,cdc-micbias1-mv = <1800>;
+		qcom,cdc-micbias2-mv = <1800>;
+		qcom,cdc-micbias3-mv = <1800>;
+		qcom,cdc-micbias4-mv = <1800>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+		qcom,cdc-slim-ifd = "tavil-slim-ifd";
+		qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+		qcom,cdc-dmic-sample-rate = <4800000>;
+		qcom,cdc-mad-dmic-rate = <600000>;
+
+		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+		wcd_spi_0: wcd_spi {
+			compatible = "qcom,wcd-spi-v2";
+			qcom,master-bus-num = <0>;
+			qcom,chip-select = <0>;
+			qcom,max-frequency = <24000000>;
+			qcom,mem-base-addr = <0x100000>;
+		};
+	};
+};
+
+&pm660l_3 {
+	pmic_analog_codec: analog-codec@f000 {
+		status = "okay";
+		compatible = "qcom,pmic-analog-codec";
+		reg = <0xf000 0x200>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+		interrupt-parent = <&spmi_bus>;
+		interrupts = <0x3 0xf0 0x0 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x1 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x2 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x3 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x4 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x5 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x6 IRQ_TYPE_NONE>,
+			     <0x3 0xf0 0x7 IRQ_TYPE_NONE>,
+			     <0x3 0xf1 0x0 IRQ_TYPE_NONE>,
+			     <0x3 0xf1 0x1 IRQ_TYPE_NONE>,
+			     <0x3 0xf1 0x2 IRQ_TYPE_NONE>,
+			     <0x3 0xf1 0x3 IRQ_TYPE_NONE>,
+			     <0x3 0xf1 0x4 IRQ_TYPE_NONE>,
+			     <0x3 0xf1 0x5 IRQ_TYPE_NONE>;
+		interrupt-names = "spk_cnp_int",
+				   "spk_clip_int",
+				   "spk_ocp_int",
+				   "ins_rem_det1",
+				   "but_rel_det",
+				   "but_press_det",
+				   "ins_rem_det",
+				   "mbhc_int",
+				   "ear_ocp_int",
+				   "hphr_ocp_int",
+				   "hphl_ocp_det",
+				   "ear_cnp_int",
+				   "hphr_cnp_int",
+				   "hphl_cnp_int";
+
+		cdc-vdda-cp-supply = <&pm660_s4>;
+		qcom,cdc-vdda-cp-voltage = <1900000 2050000>;
+		qcom,cdc-vdda-cp-current = <50000>;
+
+		cdc-vdd-pa-supply = <&pm660_s4>;
+		qcom,cdc-vdd-pa-voltage = <2040000 2040000>;
+		qcom,cdc-vdd-pa-current = <260000>;
+
+		cdc-vdd-mic-bias-supply = <&pm660l_l7>;
+		qcom,cdc-vdd-mic-bias-voltage = <3088000 3088000>;
+		qcom,cdc-vdd-mic-bias-current = <5000>;
+
+		qcom,cdc-mclk-clk-rate = <9600000>;
+
+		qcom,cdc-static-supplies = "cdc-vdda-cp",
+					   "cdc-vdd-pa";
+
+		qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
+
+		/*
+		 * Not marking address @ as driver searches this child
+		 * with name msm-dig-codec
+		 */
+		msm_digital_codec: msm-dig-codec {
+			compatible = "qcom,msm-digital-codec";
+			reg = <0x62ec0000 0x0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
index 3bd0350..ef92cdd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-audio.dtsi
@@ -39,42 +39,6 @@
 		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
-		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
-		qcom,audio-routing =
-			"AIF4 VI", "MCLK",
-			"RX_BIAS", "MCLK",
-			"MADINPUT", "MCLK",
-			"hifi amp", "LINEOUT1",
-			"hifi amp", "LINEOUT2",
-			"AMIC2", "MIC BIAS2",
-			"MIC BIAS2", "Headset Mic",
-			"AMIC3", "MIC BIAS2",
-			"MIC BIAS2", "ANCRight Headset Mic",
-			"AMIC4", "MIC BIAS2",
-			"MIC BIAS2", "ANCLeft Headset Mic",
-			"AMIC5", "MIC BIAS3",
-			"MIC BIAS3", "Handset Mic",
-			"DMIC0", "MIC BIAS1",
-			"MIC BIAS1", "Digital Mic0",
-			"DMIC1", "MIC BIAS1",
-			"MIC BIAS1", "Digital Mic1",
-			"DMIC2", "MIC BIAS3",
-			"MIC BIAS3", "Digital Mic2",
-			"DMIC3", "MIC BIAS3",
-			"MIC BIAS3", "Digital Mic3",
-			"DMIC4", "MIC BIAS4",
-			"MIC BIAS4", "Digital Mic4",
-			"DMIC5", "MIC BIAS4",
-			"MIC BIAS4", "Digital Mic5",
-			"SpkrLeft IN", "SPK1 OUT",
-			"SpkrRight IN", "SPK2 OUT";
-
-		qcom,msm-mbhc-hphl-swh = <1>;
-		qcom,msm-mbhc-gnd-swh = <1>;
-		qcom,hph-en0-gpio = <&tavil_hph_en0>;
-		qcom,hph-en1-gpio = <&tavil_hph_en1>;
-		qcom,msm-mclk-freq = <9600000>;
-		qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 			<&loopback>, <&compress>, <&hostless>,
 			<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -87,9 +51,10 @@
 			"msm-pcm-routing", "msm-cpe-lsm",
 			"msm-compr-dsp", "msm-pcm-dsp-noirq";
 		asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
-			<&dai_mi2s2>, <&dai_mi2s3>,
+			<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
 			<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
 			<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+			<&dai_quin_auxpcm>,
 			<&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
 			<&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
 			<&sb_4_rx>, <&sb_4_tx>, <&sb_5_rx>, <&sb_5_tx>,
@@ -103,11 +68,14 @@
 			<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
 			<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
 			<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
-			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
+			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
 		asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
 			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+			"msm-dai-q6-mi2s.5",
 			"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
 			"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+			"msm-dai-q6-auxpcm.5",
 			"msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
 			"msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
 			"msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
@@ -125,59 +93,17 @@
 			"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
 			"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
 			"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
-			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
-		asoc-codec = <&stub_codec>;
-		asoc-codec-names = "msm-stub-codec.1";
-		qcom,wsa-max-devs = <2>;
-		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
-				<&wsa881x_0213>, <&wsa881x_0214>;
-		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
-					"SpkrLeft", "SpkrRight";
+			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913",
+			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
 	};
 
-int_codec: sound {
+	int_codec: sound {
 		status = "okay";
 		compatible = "qcom,sdm670-asoc-snd";
-		qcom,model = "sdm670-snd-card";
+		qcom,model = "sdm670-snd-card-mtp";
 		qcom,wcn-btfm;
 		qcom,mi2s-audio-intf;
 		qcom,auxpcm-audio-intf;
-		qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
-		qcom,msm-mclk-freq = <9600000>;
-		qcom,msm-mbhc-hphl-swh = <1>;
-		qcom,msm-mbhc-gnd-swh = <1>;
-		qcom,msm-micbias2-ext-cap;
-		qcom,msm-hs-micbias-type = "external";
-		qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
-		qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
-		qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
-		qcom,audio-routing =
-			"RX_BIAS", "INT_MCLK0",
-			"SPK_RX_BIAS", "INT_MCLK0",
-			"INT_LDO_H", "INT_MCLK0",
-			"MIC BIAS External", "Handset Mic",
-			"MIC BIAS External2", "Headset Mic",
-			"MIC BIAS External", "Secondary Mic",
-			"AMIC1", "MIC BIAS External",
-			"AMIC2", "MIC BIAS External2",
-			"AMIC3", "MIC BIAS External",
-			"DMIC1", "MIC BIAS External",
-			"MIC BIAS External", "Digital Mic1",
-			"DMIC2", "MIC BIAS External",
-			"MIC BIAS External", "Digital Mic2",
-			"DMIC3", "MIC BIAS External",
-			"MIC BIAS External", "Digital Mic3",
-			"DMIC4", "MIC BIAS External",
-			"MIC BIAS External", "Digital Mic4",
-			"SpkrLeft IN", "SPK1 OUT",
-			"SpkrRight IN", "SPK2 OUT",
-			"PDM_IN_RX1", "PDM_OUT_RX1",
-			"PDM_IN_RX2", "PDM_OUT_RX2",
-			"PDM_IN_RX3", "PDM_OUT_RX3",
-			"ADC1_IN", "ADC1_OUT",
-			"ADC2_IN", "ADC2_OUT",
-			"ADC3_IN", "ADC3_OUT";
-
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 			<&loopback>, <&compress>, <&hostless>,
 			<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -190,12 +116,13 @@
 			"msm-pcm-routing", "msm-compr-dsp",
 			"msm-pcm-dsp-noirq";
 		asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
-			<&dai_mi2s2>, <&dai_mi2s3>,
+			<&dai_mi2s2>, <&dai_mi2s3>, <&dai_mi2s4>,
 			<&dai_int_mi2s0>, <&dai_int_mi2s1>,
 			<&dai_int_mi2s2>, <&dai_int_mi2s3>,
 			<&dai_int_mi2s4>, <&dai_int_mi2s5>,
 			<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
 			<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
+			<&dai_quin_auxpcm>,
 			<&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
 			<&afe_proxy_tx>, <&incall_record_rx>,
 			<&incall_record_tx>, <&incall_music_rx>,
@@ -205,14 +132,17 @@
 			<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_tx_0>,
 			<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
 			<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
-			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
+			<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>,
+			<&dai_quin_tdm_rx_0>, <&dai_quin_tdm_tx_0>;
 		asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
 			"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
+			"msm-dai-q6-mi2s.5",
 			"msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
 			"msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
 			"msm-dai-q6-mi2s.11", "msm-dai-q6-mi2s.12",
 			"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
 			"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
+			"msm-dai-q6-auxpcm.5",
 			"msm-dai-q6-dev.224", "msm-dai-q6-dev.225",
 			"msm-dai-q6-dev.241", "msm-dai-q6-dev.240",
 			"msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772",
@@ -223,136 +153,8 @@
 			"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36865",
 			"msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881",
 			"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
-			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
-		asoc-codec = <&stub_codec>, <&msm_digital_codec>,
-				<&pmic_analog_codec>, <&msm_sdw_codec>;
-		asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
-				"analog-codec", "msm_sdw_codec";
-
-		qcom,wsa-max-devs = <2>;
-		qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
-				<&wsa881x_213_en>, <&wsa881x_214_en>;
-		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
-					"SpkrLeft", "SpkrRight";
-	};
-
-	cdc_pdm_gpios: cdc_pdm_pinctrl {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&cdc_pdm_clk_active &cdc_pdm_sync_active
-			     &cdc_pdm_rx0_active &cdc_pdm_rx1_2_active
-			     &cdc_pdm_2_gpios_active>;
-		pinctrl-1 = <&cdc_pdm_clk_sleep &cdc_pdm_sync_sleep
-			     &cdc_pdm_rx0_sleep &cdc_pdm_rx1_2_sleep
-			     &cdc_pdm_2_gpios_sleep>;
-		qcom,lpi-gpios;
-	};
-
-	cdc_comp_gpios: cdc_comp_pinctrl {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&cdc_rx0_comp_active &cdc_rx1_comp_active>;
-		pinctrl-1 = <&cdc_rx0_comp_sleep &cdc_rx1_comp_sleep>;
-		qcom,lpi-gpios;
-	};
-
-	cdc_dmic_gpios: cdc_dmic_pinctrl {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&cdc_dmic12_gpios_active
-				&cdc_dmic34_gpios_active>;
-		pinctrl-1 = <&cdc_dmic12_gpios_sleep
-				&cdc_dmic34_gpios_sleep>;
-		qcom,lpi-gpios;
-	};
-
-	cdc_sdw_gpios: sdw_clk_data_pinctrl {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&sdw_clk_active &sdw_data_active>;
-		pinctrl-1 = <&sdw_clk_sleep &sdw_data_sleep>;
-	};
-
-	wsa_spkr_en1: wsa_spkr_en1_pinctrl {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&spkr_1_sd_n_active>;
-		pinctrl-1 = <&spkr_1_sd_n_sleep>;
-	};
-
-	wsa_spkr_en2: wsa_spkr_en2_pinctrl {
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&spkr_2_sd_n_active>;
-		pinctrl-1 = <&spkr_2_sd_n_sleep>;
-	};
-
-	msm_sdw_codec: msm-sdw-codec@62ec1000 {
-		status = "okay";
-		compatible = "qcom,msm-sdw-codec";
-		reg = <0x62ec1000 0x0>;
-		interrupts = <0 161 0>;
-		interrupt-names = "swr_master_irq";
-		qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
-
-		swr_master {
-			compatible = "qcom,swr-wcd";
-			#address-cells = <2>;
-			#size-cells = <0>;
-
-			wsa881x_211_en: wsa881x_en@20170211 {
-				compatible = "qcom,wsa881x";
-				reg = <0x0 0x20170211>;
-				qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
-			};
-
-			wsa881x_212_en: wsa881x_en@20170212 {
-				compatible = "qcom,wsa881x";
-				reg = <0x0 0x20170212>;
-				qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
-			};
-
-			wsa881x_213_en: wsa881x_en@21170213 {
-				compatible = "qcom,wsa881x";
-				reg = <0x0 0x21170213>;
-				qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
-			};
-
-			wsa881x_214_en: wsa881x_en@21170214 {
-				compatible = "qcom,wsa881x";
-				reg = <0x0 0x21170214>;
-				qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
-			};
-		};
-	};
-
-	wcd9xxx_intc: wcd9xxx-irq {
-		status = "disabled";
-		compatible = "qcom,wcd9xxx-irq";
-		interrupt-controller;
-		#interrupt-cells = <1>;
-		interrupt-parent = <&tlmm>;
-		qcom,gpio-connect = <&tlmm 80 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&wcd_intr_default>;
-	};
-
-	clock_audio_lnbb: audio_ext_clk_lnbb {
-		status = "disabled";
-		compatible = "qcom,audio-ref-clk";
-		clock-names = "osr_clk";
-		clocks = <&clock_rpmh RPMH_LN_BB_CLK2>;
-		qcom,node_has_rpm_clock;
-		#clock-cells = <1>;
-	};
-
-	wcd_rst_gpio: msm_cdc_pinctrl@64 {
-		status = "disabled";
-		compatible = "qcom,msm-cdc-pinctrl";
-		pinctrl-names = "aud_active", "aud_sleep";
-		pinctrl-0 = <&lpi_cdc_reset_active>;
-		pinctrl-1 = <&lpi_cdc_reset_sleep>;
-		qcom,lpi-gpios;
+			"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913",
+			"msm-dai-q6-tdm.36928", "msm-dai-q6-tdm.36929";
 	};
 
 	cpe: qcom,msm-cpe-lsm {
@@ -363,18 +165,6 @@
 		compatible = "qcom,msm-cpe-lsm";
 		qcom,msm-cpe-lsm-id = <3>;
 	};
-
-	wdsp_mgr: qcom,wcd-dsp-mgr {
-		compatible = "qcom,wcd-dsp-mgr";
-		qcom,wdsp-components = <&wcd934x_cdc 0>,
-				       <&wcd_spi_0 1>,
-				       <&glink_spi_xprt_wdsp 2>;
-		qcom,img-filename = "cpe_9340";
-	};
-
-	wdsp_glink: qcom,wcd-dsp-glink {
-		compatible = "qcom,wcd-dsp-glink";
-	};
 };
 
 &slim_aud {
@@ -384,56 +174,6 @@
 		compatible = "qcom,msm-dai-slim";
 		elemental-addr = [ff ff ff fe 17 02];
 	};
-
-	wcd934x_cdc: tavil_codec {
-		status = "disabled";
-		compatible = "qcom,tavil-slim-pgd";
-		elemental-addr = [00 01 50 02 17 02];
-
-		interrupt-parent = <&wcd9xxx_intc>;
-		interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
-			      17 18 19 20 21 22 23 24 25 26 27 28 29
-			      30 31>;
-
-		qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
-
-		clock-names = "wcd_clk";
-		clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>;
-
-		cdc-vdd-mic-bias-supply = <&pm660l_bob>;
-		qcom,cdc-vdd-mic-bias-voltage = <3300000 3300000>;
-		qcom,cdc-vdd-mic-bias-current = <30400>;
-
-		qcom,cdc-static-supplies = "cdc-vdd-mic-bias";
-
-		qcom,cdc-micbias1-mv = <1800>;
-		qcom,cdc-micbias2-mv = <1800>;
-		qcom,cdc-micbias3-mv = <1800>;
-		qcom,cdc-micbias4-mv = <1800>;
-
-		qcom,cdc-mclk-clk-rate = <9600000>;
-		qcom,cdc-slim-ifd = "tavil-slim-ifd";
-		qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
-		qcom,cdc-dmic-sample-rate = <4800000>;
-		qcom,cdc-mad-dmic-rate = <600000>;
-
-		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
-
-		wcd_spi_0: wcd_spi {
-			compatible = "qcom,wcd-spi-v2";
-			qcom,master-bus-num = <8>;
-			qcom,chip-select = <0>;
-			qcom,max-frequency = <24000000>;
-			qcom,mem-base-addr = <0x100000>;
-		};
-
-		wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl_usbc_audio_en1 {
-			compatible = "qcom,msm-cdc-pinctrl";
-			pinctrl-names = "aud_active", "aud_sleep";
-			pinctrl-0 = <&wcd_usbc_analog_en1_active>;
-			pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
-		};
-	};
 };
 
 &msm_dai_mi2s {
@@ -486,83 +226,3 @@
 		qcom,msm-mi2s-tx-lines = <3>;
 	};
 };
-
-&pm660l_3 {
-	pmic_analog_codec: analog-codec@f000 {
-		status = "okay";
-		compatible = "qcom,pmic-analog-codec";
-		reg = <0xf000 0x200>;
-		#address-cells = <2>;
-		#size-cells = <0>;
-		interrupt-parent = <&spmi_bus>;
-		interrupts = <0x3 0xf0 0x0 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x1 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x2 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x3 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x4 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x5 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x6 IRQ_TYPE_NONE>,
-				<0x3 0xf0 0x7 IRQ_TYPE_NONE>,
-				<0x3 0xf1 0x0 IRQ_TYPE_NONE>,
-				<0x3 0xf1 0x1 IRQ_TYPE_NONE>,
-				<0x3 0xf1 0x2 IRQ_TYPE_NONE>,
-				<0x3 0xf1 0x3 IRQ_TYPE_NONE>,
-				<0x3 0xf1 0x4 IRQ_TYPE_NONE>,
-				<0x3 0xf1 0x5 IRQ_TYPE_NONE>;
-		interrupt-names = "spk_cnp_int",
-				  "spk_clip_int",
-				  "spk_ocp_int",
-				  "ins_rem_det1",
-				  "but_rel_det",
-				  "but_press_det",
-				  "ins_rem_det",
-				  "mbhc_int",
-				  "ear_ocp_int",
-				  "hphr_ocp_int",
-				  "hphl_ocp_det",
-				  "ear_cnp_int",
-				  "hphr_cnp_int",
-				  "hphl_cnp_int";
-
-
-		cdc-vdda-cp-supply = <&pm660_s4>;
-		qcom,cdc-vdda-cp-voltage = <1900000 2050000>;
-		qcom,cdc-vdda-cp-current = <50000>;
-
-		cdc-vdd-pa-supply = <&pm660_s4>;
-		qcom,cdc-vdd-pa-voltage = <2040000 2040000>;
-		qcom,cdc-vdd-pa-current = <260000>;
-
-		cdc-vdd-mic-bias-supply = <&pm660l_l7>;
-		qcom,cdc-vdd-mic-bias-voltage = <3088000 3088000>;
-		qcom,cdc-vdd-mic-bias-current = <5000>;
-
-		qcom,cdc-mclk-clk-rate = <9600000>;
-
-		qcom,cdc-static-supplies = "cdc-vdda-cp",
-					   "cdc-vdd-pa";
-
-		qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
-
-		/*
-		 * Not marking address @ as driver searches this child
-		 * with name msm-dig-codec
-		 */
-		msm_digital_codec: msm-dig-codec {
-			compatible = "qcom,msm-digital-codec";
-			reg = <0x62ec0000 0x0>;
-		};
-	};
-};
-
-&pm660_gpios {
-	gpio@c200 {
-		status = "ok";
-		qcom,mode = <1>;
-		qcom,pull = <4>;
-		qcom,vin-sel = <0>;
-		qcom,src-sel = <2>;
-		qcom,master-en = <1>;
-		qcom,out-strength = <2>;
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
new file mode 100644
index 0000000..8749145
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-bus.dtsi
@@ -0,0 +1,1873 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
+
+&soc {
+	ad_hoc_bus: ad-hoc-bus {
+		compatible = "qcom,msm-bus-device";
+		reg = <0x016E0000 0x40000>,
+			<0x1700000 0x40000>,
+			<0x1500000 0x40000>,
+			<0x14E0000 0x40000>,
+			<0x17900000 0x40000>,
+			<0x1380000 0x40000>,
+			<0x1380000 0x40000>,
+			<0x1740000 0x40000>,
+			<0x1620000 0x40000>,
+			<0x1620000 0x40000>,
+			<0x1620000 0x40000>;
+
+		reg-names = "aggre1_noc-base", "aggre2_noc-base",
+			"config_noc-base", "dc_noc-base",
+			"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
+			"mmss_noc-base", "system_noc-base", "ipa_virt-base",
+			"camnoc_virt-base";
+
+		mbox-names = "apps_rsc", "disp_rsc";
+		mboxes = <&apps_rsc 0 &disp_rsc 0>;
+
+	/*RSCs*/
+		rsc_apps: rsc-apps {
+			cell-id = <MSM_BUS_RSC_APPS>;
+			label = "apps_rsc";
+			qcom,rsc-dev;
+			qcom,req_state = <2>;
+		};
+
+		rsc_disp: rsc-disp {
+			cell-id = <MSM_BUS_RSC_DISP>;
+			label = "disp_rsc";
+			qcom,rsc-dev;
+			qcom,req_state = <3>;
+		};
+
+	/*BCMs*/
+		bcm_acv: bcm-acv {
+			cell-id = <MSM_BUS_BCM_ACV>;
+			label = "ACV";
+			qcom,bcm-name = "ACV";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_alc: bcm-alc {
+			cell-id = <MSM_BUS_BCM_ALC>;
+			label = "ALC";
+			qcom,bcm-name = "ALC";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mc0: bcm-mc0 {
+			cell-id = <MSM_BUS_BCM_MC0>;
+			label = "MC0";
+			qcom,bcm-name = "MC0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sh0: bcm-sh0 {
+			cell-id = <MSM_BUS_BCM_SH0>;
+			label = "SH0";
+			qcom,bcm-name = "SH0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm0: bcm-mm0 {
+			cell-id = <MSM_BUS_BCM_MM0>;
+			label = "MM0";
+			qcom,bcm-name = "MM0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sh1: bcm-sh1 {
+			cell-id = <MSM_BUS_BCM_SH1>;
+			label = "SH1";
+			qcom,bcm-name = "SH1";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm1: bcm-mm1 {
+			cell-id = <MSM_BUS_BCM_MM1>;
+			label = "MM1";
+			qcom,bcm-name = "MM1";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sh2: bcm-sh2 {
+			cell-id = <MSM_BUS_BCM_SH2>;
+			label = "SH2";
+			qcom,bcm-name = "SH2";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm2: bcm-mm2 {
+			cell-id = <MSM_BUS_BCM_MM2>;
+			label = "MM2";
+			qcom,bcm-name = "MM2";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sh3: bcm-sh3 {
+			cell-id = <MSM_BUS_BCM_SH3>;
+			label = "SH3";
+			qcom,bcm-name = "SH3";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm3: bcm-mm3 {
+			cell-id = <MSM_BUS_BCM_MM3>;
+			label = "MM3";
+			qcom,bcm-name = "MM3";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sh5: bcm-sh5 {
+			cell-id = <MSM_BUS_BCM_SH5>;
+			label = "SH5";
+			qcom,bcm-name = "SH5";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn0: bcm-sn0 {
+			cell-id = <MSM_BUS_BCM_SN0>;
+			label = "SN0";
+			qcom,bcm-name = "SN0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_ce0: bcm-ce0 {
+			cell-id = <MSM_BUS_BCM_CE0>;
+			label = "CE0";
+			qcom,bcm-name = "CE0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_ip0: bcm-ip0 {
+			cell-id = <MSM_BUS_BCM_IP0>;
+			label = "IP0";
+			qcom,bcm-name = "IP0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_cn0: bcm-cn0 {
+			cell-id = <MSM_BUS_BCM_CN0>;
+			label = "CN0";
+			qcom,bcm-name = "CN0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_qup0: bcm-qup0 {
+			cell-id = <MSM_BUS_BCM_QUP0>;
+			label = "QUP0";
+			qcom,bcm-name = "QUP0";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn1: bcm-sn1 {
+			cell-id = <MSM_BUS_BCM_SN1>;
+			label = "SN1";
+			qcom,bcm-name = "SN1";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn2: bcm-sn2 {
+			cell-id = <MSM_BUS_BCM_SN2>;
+			label = "SN2";
+			qcom,bcm-name = "SN2";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn3: bcm-sn3 {
+			cell-id = <MSM_BUS_BCM_SN3>;
+			label = "SN3";
+			qcom,bcm-name = "SN3";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn4: bcm-sn4 {
+			cell-id = <MSM_BUS_BCM_SN4>;
+			label = "SN4";
+			qcom,bcm-name = "SN4";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn5: bcm-sn5 {
+			cell-id = <MSM_BUS_BCM_SN5>;
+			label = "SN5";
+			qcom,bcm-name = "SN5";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn8: bcm-sn8 {
+			cell-id = <MSM_BUS_BCM_SN8>;
+			label = "SN8";
+			qcom,bcm-name = "SN8";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn10: bcm-sn10 {
+			cell-id = <MSM_BUS_BCM_SN10>;
+			label = "SN10";
+			qcom,bcm-name = "SN10";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn11: bcm-sn11 {
+			cell-id = <MSM_BUS_BCM_SN11>;
+			label = "SN11";
+			qcom,bcm-name = "SN11";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sn13: bcm-sn13 {
+			cell-id = <MSM_BUS_BCM_SN13>;
+			label = "SN13";
+			qcom,bcm-name = "SN13";
+			qcom,rscs = <&rsc_apps>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mc0_display: bcm-mc0_display {
+			cell-id = <MSM_BUS_BCM_MC0_DISPLAY>;
+			label = "MC0_DISPLAY";
+			qcom,bcm-name = "MC0";
+			qcom,rscs = <&rsc_disp>;
+			qcom,bcm-dev;
+		};
+
+		bcm_sh0_display: bcm-sh0_display {
+			cell-id = <MSM_BUS_BCM_SH0_DISPLAY>;
+			label = "SH0_DISPLAY";
+			qcom,bcm-name = "SH0";
+			qcom,rscs = <&rsc_disp>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm0_display: bcm-mm0_display {
+			cell-id = <MSM_BUS_BCM_MM0_DISPLAY>;
+			label = "MM0_DISPLAY";
+			qcom,bcm-name = "MM0";
+			qcom,rscs = <&rsc_disp>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm1_display: bcm-mm1_display {
+			cell-id = <MSM_BUS_BCM_MM1_DISPLAY>;
+			label = "MM1_DISPLAY";
+			qcom,bcm-name = "MM1";
+			qcom,rscs = <&rsc_disp>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm2_display: bcm-mm2_display {
+			cell-id = <MSM_BUS_BCM_MM2_DISPLAY>;
+			label = "MM2_DISPLAY";
+			qcom,bcm-name = "MM2";
+			qcom,rscs = <&rsc_disp>;
+			qcom,bcm-dev;
+		};
+
+		bcm_mm3_display: bcm-mm3_display {
+			cell-id = <MSM_BUS_BCM_MM3_DISPLAY>;
+			label = "MM3_DISPLAY";
+			qcom,bcm-name = "MM3";
+			qcom,rscs = <&rsc_disp>;
+			qcom,bcm-dev;
+		};
+
+
+		/*Buses*/
+		fab_aggre1_noc: fab-aggre1_noc {
+			cell-id = <MSM_BUS_FAB_A1_NOC>;
+			label = "fab-aggre1_noc";
+			qcom,fab-dev;
+			qcom,base-name = "aggre1_noc-base";
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <16384>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_aggre2_noc: fab-aggre2_noc {
+			cell-id = <MSM_BUS_FAB_A2_NOC>;
+			label = "fab-aggre2_noc";
+			qcom,fab-dev;
+			qcom,base-name = "aggre2_noc-base";
+			qcom,qos-off = <2048>;
+			qcom,base-offset = <12288>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_camnoc_virt: fab-camnoc_virt {
+			cell-id = <MSM_BUS_FAB_CAMNOC_VIRT>;
+			label = "fab-camnoc_virt";
+			qcom,fab-dev;
+			qcom,base-name = "camnoc_virt-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
+		fab_config_noc: fab-config_noc {
+			cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
+			label = "fab-config_noc";
+			qcom,fab-dev;
+			qcom,base-name = "config_noc-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_dc_noc: fab-dc_noc {
+			cell-id = <MSM_BUS_FAB_DC_NOC>;
+			label = "fab-dc_noc";
+			qcom,fab-dev;
+			qcom,base-name = "dc_noc-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_gladiator_noc: fab-gladiator_noc {
+			cell-id = <MSM_BUS_FAB_GNOC>;
+			label = "fab-gladiator_noc";
+			qcom,fab-dev;
+			qcom,base-name = "gladiator_noc-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_ipa_virt: fab-ipa_virt {
+			cell-id = <MSM_BUS_FAB_IPA_VIRT>;
+			label = "fab-ipa_virt";
+			qcom,fab-dev;
+			qcom,base-name = "ipa_virt-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
+		fab_mc_virt: fab-mc_virt {
+			cell-id = <MSM_BUS_FAB_MC_VIRT>;
+			label = "fab-mc_virt";
+			qcom,fab-dev;
+			qcom,base-name = "mc_virt-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
+		fab_mem_noc: fab-mem_noc {
+			cell-id = <MSM_BUS_FAB_MEM_NOC>;
+			label = "fab-mem_noc";
+			qcom,fab-dev;
+			qcom,base-name = "mem_noc-base";
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <65536>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_mmss_noc: fab-mmss_noc {
+			cell-id = <MSM_BUS_FAB_MMSS_NOC>;
+			label = "fab-mmss_noc";
+			qcom,fab-dev;
+			qcom,base-name = "mmss_noc-base";
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <36864>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_system_noc: fab-system_noc {
+			cell-id = <MSM_BUS_FAB_SYS_NOC>;
+			label = "fab-system_noc";
+			qcom,fab-dev;
+			qcom,base-name = "system_noc-base";
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <36864>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_mc_virt_display: fab-mc_virt_display {
+			cell-id = <MSM_BUS_FAB_MC_VIRT_DISPLAY>;
+			label = "fab-mc_virt_display";
+			qcom,fab-dev;
+			qcom,base-name = "mc_virt-base";
+			qcom,qos-off = <0>;
+			qcom,base-offset = <0>;
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
+		fab_mem_noc_display: fab-mem_noc_display {
+			cell-id = <MSM_BUS_FAB_MEM_NOC_DISPLAY>;
+			label = "fab-mem_noc_display";
+			qcom,fab-dev;
+			qcom,base-name = "mem_noc-base";
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <65536>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+		fab_mmss_noc_display: fab-mmss_noc_display {
+			cell-id = <MSM_BUS_FAB_MMSS_NOC_DISPLAY>;
+			label = "fab-mmss_noc_display";
+			qcom,fab-dev;
+			qcom,base-name = "mmss_noc-base";
+			qcom,qos-off = <4096>;
+			qcom,base-offset = <36864>;
+			qcom,bypass-qos-prg;
+			qcom,bus-type = <1>;
+			clocks = <>;
+		};
+
+
+		/*Masters*/
+
+		mas_qhm_a1noc_cfg: mas-qhm-a1noc-cfg {
+			cell-id = <MSM_BUS_MASTER_A1NOC_CFG>;
+			label = "mas-qhm-a1noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_aggre1_noc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_qhm_qup1: mas-qhm-qup1 {
+			cell-id = <MSM_BUS_MASTER_BLSP_1>;
+			label = "mas-qhm-qup1";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <16>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,bcms = <&bcm_qup0>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+		};
+
+		mas_qhm_tsif: mas-qhm-tsif {
+			cell-id = <MSM_BUS_MASTER_TSIF>;
+			label = "mas-qhm-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+		};
+
+		mas_xm_emmc: mas-xm-emmc {
+			cell-id = <MSM_BUS_MASTER_EMMC>;
+			label = "mas-xm-emmc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+		};
+
+		mas_xm_sdc2: mas-xm-sdc2 {
+			cell-id = <MSM_BUS_MASTER_SDCC_2>;
+			label = "mas-xm-sdc2";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <1>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,ap-owned;
+			qcom,prio = <1>;
+		};
+
+		mas_xm_sdc4: mas-xm-sdc4 {
+			cell-id = <MSM_BUS_MASTER_SDCC_4>;
+			label = "mas-xm-sdc4";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,ap-owned;
+			qcom,prio = <1>;
+		};
+
+		mas_xm_ufs_mem: mas-xm-ufs-mem {
+			cell-id = <MSM_BUS_MASTER_UFS_MEM>;
+			label = "mas-xm-ufs-mem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,connections = <&slv_qns_a1noc_snoc>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,ap-owned;
+			qcom,prio = <2>;
+		};
+
+		mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg {
+			cell-id = <MSM_BUS_MASTER_A2NOC_CFG>;
+			label = "mas-qhm-a2noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_aggre2_noc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+		};
+
+		mas_qhm_qdss_bam: mas-qhm-qdss-bam {
+			cell-id = <MSM_BUS_MASTER_QDSS_BAM>;
+			label = "mas-qhm-qdss-bam";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <17>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+		};
+
+		mas_qhm_qup2: mas-qhm-qup2 {
+			cell-id = <MSM_BUS_MASTER_BLSP_2>;
+			label = "mas-qhm-qup2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_qup0>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+		};
+
+		mas_qnm_cnoc: mas-qnm-cnoc {
+			cell-id = <MSM_BUS_MASTER_CNOC_A2NOC>;
+			label = "mas-qnm-cnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
+			qcom,prio = <1>;
+		};
+
+		mas_qxm_crypto: mas-qxm-crypto {
+			cell-id = <MSM_BUS_MASTER_CRYPTO_CORE_0>;
+			label = "mas-qxm-crypto";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_ce0>;
+			qcom,ap-owned;
+			qcom,prio = <2>;
+		};
+
+		mas_qxm_ipa: mas-qxm-ipa {
+			cell-id = <MSM_BUS_MASTER_IPA>;
+			label = "mas-qxm-ipa";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
+			qcom,prio = <2>;
+		};
+
+		mas_xm_qdss_etr: mas-xm-qdss-etr {
+			cell-id = <MSM_BUS_MASTER_QDSS_ETR>;
+			label = "mas-xm-qdss-etr";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <16>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
+			qcom,prio = <2>;
+		};
+
+		mas_xm_usb3_0: mas-xm-usb3-0 {
+			cell-id = <MSM_BUS_MASTER_USB3>;
+			label = "mas-xm-usb3-0";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <22>;
+			qcom,connections = <&slv_qns_a2noc_snoc>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,ap-owned;
+			qcom,prio = <2>;
+		};
+
+		mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP>;
+			label = "mas-qxm-camnoc-hf0-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP>;
+			label = "mas-qxm-camnoc-hf1-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP>;
+			label = "mas-qxm-camnoc-sf-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qhm_spdm: mas-qhm-spdm {
+			cell-id = <MSM_BUS_MASTER_SPDM>;
+			label = "mas-qhm-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_cnoc_a2noc>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_qhm_tic: mas-qhm-tic {
+			cell-id = <MSM_BUS_MASTER_TIC>;
+			label = "mas-qhm-tic";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_tlmm_south
+				&slv_qhs_camera_cfg &slv_qhs_sdc4
+				&slv_qhs_sdc2 &slv_qhs_mnoc_cfg
+				&slv_qhs_ufs_mem_cfg &slv_qhs_glm
+				&slv_qhs_pdm &slv_qhs_a2_noc_cfg
+				&slv_qhs_qdss_cfg &slv_qhs_display_cfg
+				&slv_qhs_tcsr &slv_qhs_dcc_cfg
+				&slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
+				&slv_qhs_snoc_cfg &slv_qhs_phy_refgen_south
+				&slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
+				&slv_qhs_tsif &slv_qhs_compute_dsp_cfg
+				&slv_qhs_aop &slv_qhs_qupv3_north
+				&slv_srvc_cnoc &slv_qhs_usb3_0
+				&slv_qhs_ipa &slv_qhs_cpr_cx
+				&slv_qhs_a1_noc_cfg &slv_qhs_aoss
+				&slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
+				&slv_qhs_qupv3_south &slv_qhs_spdm
+				&slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg
+				&slv_qhs_tlmm_north &slv_qhs_clk_ctl
+				&slv_qhs_imem_cfg>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_qnm_snoc: mas-qnm-snoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_MAS>;
+			label = "mas-qnm-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_tlmm_south
+				&slv_qhs_camera_cfg &slv_qhs_sdc4
+				&slv_qhs_sdc2 &slv_qhs_mnoc_cfg
+				&slv_qhs_ufs_mem_cfg &slv_qhs_glm
+				&slv_qhs_pdm &slv_qhs_a2_noc_cfg
+				&slv_qhs_qdss_cfg &slv_qhs_display_cfg
+				&slv_qhs_tcsr &slv_qhs_dcc_cfg
+				&slv_qhs_ddrss_cfg &slv_qhs_snoc_cfg
+				&slv_qhs_phy_refgen_south &slv_qhs_gpuss_cfg
+				&slv_qhs_venus_cfg &slv_qhs_tsif
+				&slv_qhs_compute_dsp_cfg &slv_qhs_aop
+				&slv_qhs_qupv3_north &slv_srvc_cnoc
+				&slv_qhs_usb3_0 &slv_qhs_ipa
+				&slv_qhs_cpr_cx &slv_qhs_a1_noc_cfg
+				&slv_qhs_aoss &slv_qhs_prng
+				&slv_qhs_vsense_ctrl_cfg &slv_qhs_qupv3_south
+				&slv_qhs_spdm &slv_qhs_crypto0_cfg
+				&slv_qhs_pimem_cfg &slv_qhs_tlmm_north
+				&slv_qhs_clk_ctl &slv_qhs_imem_cfg>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_xm_qdss_dap: mas-xm-qdss-dap {
+			cell-id = <MSM_BUS_MASTER_QDSS_DAP>;
+			label = "mas-xm-qdss-dap";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_tlmm_south
+				&slv_qhs_camera_cfg
+				&slv_qhs_sdc4
+				&slv_qhs_sdc2 &slv_qhs_mnoc_cfg
+				&slv_qhs_ufs_mem_cfg &slv_qhs_glm
+				&slv_qhs_pdm &slv_qhs_a2_noc_cfg
+				&slv_qhs_qdss_cfg &slv_qhs_display_cfg
+				&slv_qhs_tcsr &slv_qhs_dcc_cfg
+				&slv_qhs_ddrss_cfg &slv_qns_cnoc_a2noc
+				&slv_qhs_snoc_cfg &slv_qhs_phy_refgen_south
+				&slv_qhs_gpuss_cfg &slv_qhs_venus_cfg
+				&slv_qhs_tsif &slv_qhs_compute_dsp_cfg
+				&slv_qhs_aop &slv_qhs_qupv3_north
+				&slv_srvc_cnoc &slv_qhs_usb3_0
+				&slv_qhs_ipa &slv_qhs_cpr_cx
+				&slv_qhs_a1_noc_cfg &slv_qhs_aoss
+				&slv_qhs_prng &slv_qhs_vsense_ctrl_cfg
+				&slv_qhs_qupv3_south &slv_qhs_spdm
+				&slv_qhs_crypto0_cfg &slv_qhs_pimem_cfg
+				&slv_qhs_tlmm_north &slv_qhs_clk_ctl
+				&slv_qhs_imem_cfg>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		mas_qhm_cnoc: mas-qhm-cnoc {
+			cell-id = <MSM_BUS_MASTER_CNOC_DC_NOC>;
+			label = "mas-qhm-cnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qhs_memnoc &slv_qhs_llcc>;
+			qcom,bus-dev = <&fab_dc_noc>;
+		};
+
+		mas_acm_l3: mas-acm-l3 {
+			cell-id = <MSM_BUS_MASTER_AMPSS_M0>;
+			label = "mas-acm-l3";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_gnoc
+				&slv_qns_gladiator_sodv &slv_qns_gnoc_memnoc>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+		};
+
+		mas_pm_gnoc_cfg: mas-pm-gnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_GNOC_CFG>;
+			label = "mas-pm-gnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_gnoc>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+		};
+
+		mas_ipa_core_master: mas-ipa-core-master {
+			cell-id = <MSM_BUS_MASTER_IPA_CORE>;
+			label = "mas-ipa-core-master";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_ipa_core_slave>;
+			qcom,bus-dev = <&fab_ipa_virt>;
+		};
+
+		mas_llcc_mc: mas-llcc-mc {
+			cell-id = <MSM_BUS_MASTER_LLCC>;
+			label = "mas-llcc-mc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <2>;
+			qcom,connections = <&slv_ebi>;
+			qcom,bus-dev = <&fab_mc_virt>;
+		};
+
+		mas_acm_tcu: mas-acm-tcu {
+			cell-id = <MSM_BUS_MASTER_TCU_0>;
+			label = "mas-acm-tcu";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qns_apps_io &slv_qns_llcc
+				&slv_qns_memnoc_snoc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh3>;
+			qcom,ap-owned;
+			qcom,prio = <6>;
+		};
+
+		mas_qhm_memnoc_cfg: mas-qhm-memnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_MEM_NOC_CFG>;
+			label = "mas-qhm-memnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_memnoc
+				&slv_qhs_mdsp_ms_mpu_cfg>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		mas_qnm_apps: mas-qnm-apps {
+			cell-id = <MSM_BUS_MASTER_GNOC_MEM_NOC>;
+			label = "mas-qnm-apps";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <2 3>;
+			qcom,connections = <&slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh5>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+		};
+
+		mas_qnm_mnoc_hf: mas-qnm-mnoc-hf {
+			cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC>;
+			label = "mas-qnm-mnoc-hf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <4 5>;
+			qcom,connections = <&slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qnm_mnoc_sf: mas-qnm-mnoc-sf {
+			cell-id = <MSM_BUS_MASTER_MNOC_SF_MEM_NOC>;
+			label = "mas-qnm-mnoc-sf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,connections = <&slv_qns_apps_io
+				 &slv_qns_llcc &slv_qns_memnoc_snoc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qnm_snoc_gc: mas-qnm-snoc-gc {
+			cell-id = <MSM_BUS_MASTER_SNOC_GC_MEM_NOC>;
+			label = "mas-qnm-snoc-gc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,connections = <&slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qnm_snoc_sf: mas-qnm-snoc-sf {
+			cell-id = <MSM_BUS_MASTER_SNOC_SF_MEM_NOC>;
+			label = "mas-qnm-snoc-sf";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <9>;
+			qcom,connections = <&slv_qns_apps_io &slv_qns_llcc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_gpu: mas-qxm-gpu {
+			cell-id = <MSM_BUS_MASTER_GRAPHICS_3D>;
+			label = "mas-qxm-gpu";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <10 11>;
+			qcom,connections = <&slv_qns_apps_io
+				 &slv_qns_llcc &slv_qns_memnoc_snoc>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+		};
+
+		mas_qhm_mnoc_cfg: mas-qhm-mnoc-cfg {
+			cell-id = <MSM_BUS_MASTER_CNOC_MNOC_CFG>;
+			label = "mas-qhm-mnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_mnoc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+		};
+
+		mas_qxm_camnoc_hf0: mas-qxm-camnoc-hf0 {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF0>;
+			label = "mas-qxm-camnoc-hf0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <1>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF1>;
+			label = "mas-qxm-camnoc-hf1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_camnoc_sf: mas-qxm-camnoc-sf {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_SF>;
+			label = "mas-qxm-camnoc-sf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_mdp0: mas-qxm-mdp0 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT0>;
+			label = "mas-qxm-mdp0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_mdp1: mas-qxm-mdp1 {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT1>;
+			label = "mas-qxm-mdp1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_rot: mas-qxm-rot {
+			cell-id = <MSM_BUS_MASTER_ROTATOR>;
+			label = "mas-qxm-rot";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <5>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_venus0: mas-qxm-venus0 {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P0>;
+			label = "mas-qxm-venus0";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <6>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_venus1: mas-qxm-venus1 {
+			cell-id = <MSM_BUS_MASTER_VIDEO_P1>;
+			label = "mas-qxm-venus1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qxm_venus_arm9: mas-qxm-venus-arm9 {
+			cell-id = <MSM_BUS_MASTER_VIDEO_PROC>;
+			label = "mas-qxm-venus-arm9";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <8>;
+			qcom,connections = <&slv_qns2_mem_noc>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm3>;
+			qcom,ap-owned;
+			qcom,prio = <0>;
+			qcom,forwarding;
+		};
+
+		mas_qhm_snoc_cfg: mas-qhm-snoc-cfg {
+			cell-id = <MSM_BUS_MASTER_SNOC_CFG>;
+			label = "mas-qhm-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_srvc_snoc>;
+			qcom,bus-dev = <&fab_system_noc>;
+		};
+
+		mas_qnm_aggre1_noc: mas-qnm-aggre1-noc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_MAS>;
+			label = "mas-qnm-aggre1-noc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_pimem
+				 &slv_qns_memnoc_sf &slv_qxs_imem
+				 &slv_qhs_apss &slv_qns_cnoc
+				 &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn8>;
+		};
+
+		mas_qnm_aggre2_noc: mas-qnm-aggre2-noc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_MAS>;
+			label = "mas-qnm-aggre2-noc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_pimem
+				&slv_qns_memnoc_sf &slv_qxs_imem
+				&slv_qhs_apss &slv_qns_cnoc
+				&slv_xs_sys_tcu_cfg &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn10>;
+		};
+
+		mas_qnm_gladiator_sodv: mas-qnm-gladiator-sodv {
+			cell-id = <MSM_BUS_MASTER_GNOC_SNOC>;
+			label = "mas-qnm-gladiator-sodv";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_pimem
+				&slv_qxs_imem &slv_qhs_apss
+				&slv_qns_cnoc &slv_xs_sys_tcu_cfg
+				&slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn11>;
+		};
+
+		mas_qnm_memnoc: mas-qnm-memnoc {
+			cell-id = <MSM_BUS_MASTER_MEM_NOC_SNOC>;
+			label = "mas-qnm-memnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qxs_imem
+				&slv_qhs_apss &slv_qxs_pimem
+				&slv_qns_cnoc &slv_xs_qdss_stm>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn13>;
+		};
+
+		mas_qxm_pimem: mas-qxm-pimem {
+			cell-id = <MSM_BUS_MASTER_PIMEM>;
+			label = "mas-qxm-pimem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn4>;
+			qcom,ap-owned;
+			qcom,prio = <2>;
+		};
+
+		mas_xm_gic: mas-xm-gic {
+			cell-id = <MSM_BUS_MASTER_GIC>;
+			label = "mas-xm-gic";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <0>;
+			qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn11>;
+			qcom,ap-owned;
+			qcom,prio = <1>;
+		};
+
+		mas_alc: mas-alc {
+			cell-id = <MSM_BUS_MASTER_ALC>;
+			label = "mas-alc";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_alc>;
+		};
+
+		mas_llcc_mc_display: mas-llcc-mc_display {
+			cell-id = <MSM_BUS_MASTER_LLCC_DISPLAY>;
+			label = "mas-llcc-mc_display";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <2>;
+			qcom,connections = <&slv_ebi_display>;
+			qcom,bus-dev = <&fab_mc_virt_display>;
+		};
+
+		mas_qnm_mnoc_hf_display: mas-qnm-mnoc-hf_display {
+			cell-id = <MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY>;
+			label = "mas-qnm-mnoc-hf_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,qport = <4 5>;
+			qcom,connections = <&slv_qns_llcc_display>;
+			qcom,bus-dev = <&fab_mem_noc_display>;
+		};
+
+		mas_qnm_mnoc_sf_display: mas-qnm-mnoc-sf_display {
+			cell-id = <MSM_BUS_MASTER_MNOC_SF_MEM_NOC_DISPLAY>;
+			label = "mas-qnm-mnoc-sf_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <7>;
+			qcom,connections = <&slv_qns_llcc_display>;
+			qcom,bus-dev = <&fab_mem_noc_display>;
+		};
+
+		mas_qxm_mdp0_display: mas-qxm-mdp0_display {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT0_DISPLAY>;
+			label = "mas-qxm-mdp0_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <3>;
+			qcom,connections = <&slv_qns_mem_noc_hf_display>;
+			qcom,bus-dev = <&fab_mmss_noc_display>;
+			qcom,bcms = <&bcm_mm1_display>;
+		};
+
+		mas_qxm_mdp1_display: mas-qxm-mdp1_display {
+			cell-id = <MSM_BUS_MASTER_MDP_PORT1_DISPLAY>;
+			label = "mas-qxm-mdp1_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <4>;
+			qcom,connections = <&slv_qns_mem_noc_hf_display>;
+			qcom,bus-dev = <&fab_mmss_noc_display>;
+			qcom,bcms = <&bcm_mm1_display>;
+		};
+
+		mas_qxm_rot_display: mas-qxm-rot_display {
+			cell-id = <MSM_BUS_MASTER_ROTATOR_DISPLAY>;
+			label = "mas-qxm-rot_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <5>;
+			qcom,connections = <&slv_qns2_mem_noc_display>;
+			qcom,bus-dev = <&fab_mmss_noc_display>;
+			qcom,bcms = <&bcm_mm3_display>;
+		};
+
+		/*Internal nodes*/
+
+		/*Slaves*/
+
+		slv_qns_a1noc_snoc:slv-qns-a1noc-snoc {
+			cell-id = <MSM_BUS_A1NOC_SNOC_SLV>;
+			label = "slv-qns-a1noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,connections = <&mas_qnm_aggre1_noc>;
+		};
+
+		slv_srvc_aggre1_noc:slv-srvc-aggre1-noc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_A1NOC>;
+			label = "slv-srvc-aggre1-noc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre1_noc>;
+			qcom,bcms = <&bcm_sn8>;
+		};
+
+		slv_qns_a2noc_snoc:slv-qns-a2noc-snoc {
+			cell-id = <MSM_BUS_A2NOC_SNOC_SLV>;
+			label = "slv-qns-a2noc-snoc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,connections = <&mas_qnm_aggre2_noc>;
+		};
+
+		slv_srvc_aggre2_noc:slv-srvc-aggre2-noc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_A2NOC>;
+			label = "slv-srvc-aggre2-noc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_aggre2_noc>;
+			qcom,bcms = <&bcm_sn10>;
+		};
+
+		slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp {
+			cell-id = <MSM_BUS_SLAVE_CAMNOC_UNCOMP>;
+			label = "slv-qns-camnoc-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
+		slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
+			label = "slv-qhs-a1-noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_a1noc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_a2_noc_cfg:slv-qhs-a2-noc-cfg {
+			cell-id = <MSM_BUS_SLAVE_A2NOC_CFG>;
+			label = "slv-qhs-a2-noc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_a2noc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_aop:slv-qhs-aop {
+			cell-id = <MSM_BUS_SLAVE_AOP>;
+			label = "slv-qhs-aop";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_aoss:slv-qhs-aoss {
+			cell-id = <MSM_BUS_SLAVE_AOSS>;
+			label = "slv-qhs-aoss";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_camera_cfg:slv-qhs-camera-cfg {
+			cell-id = <MSM_BUS_SLAVE_CAMERA_CFG>;
+			label = "slv-qhs-camera-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_clk_ctl:slv-qhs-clk-ctl {
+			cell-id = <MSM_BUS_SLAVE_CLK_CTL>;
+			label = "slv-qhs-clk-ctl";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_compute_dsp_cfg:slv-qhs-compute-dsp-cfg {
+			cell-id = <MSM_BUS_SLAVE_CDSP_CFG>;
+			label = "slv-qhs-compute-dsp-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_cpr_cx:slv-qhs-cpr-cx {
+			cell-id = <MSM_BUS_SLAVE_RBCPR_CX_CFG>;
+			label = "slv-qhs-cpr-cx";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_crypto0_cfg:slv-qhs-crypto0-cfg {
+			cell-id = <MSM_BUS_SLAVE_CRYPTO_0_CFG>;
+			label = "slv-qhs-crypto0-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_dcc_cfg:slv-qhs-dcc-cfg {
+			cell-id = <MSM_BUS_SLAVE_DCC_CFG>;
+			label = "slv-qhs-dcc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_cnoc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ddrss_cfg:slv-qhs-ddrss-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_DDRSS>;
+			label = "slv-qhs-ddrss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_display_cfg:slv-qhs-display-cfg {
+			cell-id = <MSM_BUS_SLAVE_DISPLAY_CFG>;
+			label = "slv-qhs-display-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_glm:slv-qhs-glm {
+			cell-id = <MSM_BUS_SLAVE_GLM>;
+			label = "slv-qhs-glm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_gpuss_cfg:slv-qhs-gpuss-cfg {
+			cell-id = <MSM_BUS_SLAVE_GRAPHICS_3D_CFG>;
+			label = "slv-qhs-gpuss-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_imem_cfg:slv-qhs-imem-cfg {
+			cell-id = <MSM_BUS_SLAVE_IMEM_CFG>;
+			label = "slv-qhs-imem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ipa:slv-qhs-ipa {
+			cell-id = <MSM_BUS_SLAVE_IPA_CFG>;
+			label = "slv-qhs-ipa";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_mnoc_cfg:slv-qhs-mnoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_CNOC_MNOC_CFG>;
+			label = "slv-qhs-mnoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_mnoc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_pdm:slv-qhs-pdm {
+			cell-id = <MSM_BUS_SLAVE_PDM>;
+			label = "slv-qhs-pdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_phy_refgen_south:slv-qhs-phy-refgen-south {
+			cell-id = <MSM_BUS_SLAVE_SOUTH_PHY_CFG>;
+			label = "slv-qhs-phy-refgen-south";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_pimem_cfg:slv-qhs-pimem-cfg {
+			cell-id = <MSM_BUS_SLAVE_PIMEM_CFG>;
+			label = "slv-qhs-pimem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_prng:slv-qhs-prng {
+			cell-id = <MSM_BUS_SLAVE_PRNG>;
+			label = "slv-qhs-prng";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_qdss_cfg:slv-qhs-qdss-cfg {
+			cell-id = <MSM_BUS_SLAVE_QDSS_CFG>;
+			label = "slv-qhs-qdss-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_qupv3_north:slv-qhs-qupv3-north {
+			cell-id = <MSM_BUS_SLAVE_BLSP_2>;
+			label = "slv-qhs-qupv3-north";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_qupv3_south:slv-qhs-qupv3-south {
+			cell-id = <MSM_BUS_SLAVE_BLSP_1>;
+			label = "slv-qhs-qupv3-south";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_sdc2:slv-qhs-sdc2 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_2>;
+			label = "slv-qhs-sdc2";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_sdc4:slv-qhs-sdc4 {
+			cell-id = <MSM_BUS_SLAVE_SDCC_4>;
+			label = "slv-qhs-sdc4";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_snoc_cfg:slv-qhs-snoc-cfg {
+			cell-id = <MSM_BUS_SLAVE_SNOC_CFG>;
+			label = "slv-qhs-snoc-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qhm_snoc_cfg>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_spdm:slv-qhs-spdm {
+			cell-id = <MSM_BUS_SLAVE_SPDM_WRAPPER>;
+			label = "slv-qhs-spdm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tcsr:slv-qhs-tcsr {
+			cell-id = <MSM_BUS_SLAVE_TCSR>;
+			label = "slv-qhs-tcsr";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tlmm_north:slv-qhs-tlmm-north {
+			cell-id = <MSM_BUS_SLAVE_TLMM_NORTH>;
+			label = "slv-qhs-tlmm-north";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tlmm_south:slv-qhs-tlmm-south {
+			cell-id = <MSM_BUS_SLAVE_TLMM_SOUTH>;
+			label = "slv-qhs-tlmm-south";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_tsif:slv-qhs-tsif {
+			cell-id = <MSM_BUS_SLAVE_TSIF>;
+			label = "slv-qhs-tsif";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_ufs_mem_cfg:slv-qhs-ufs-mem-cfg {
+			cell-id = <MSM_BUS_SLAVE_UFS_MEM_CFG>;
+			label = "slv-qhs-ufs-mem-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_usb3_0:slv-qhs-usb3-0 {
+			cell-id = <MSM_BUS_SLAVE_USB3>;
+			label = "slv-qhs-usb3-0";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_venus_cfg:slv-qhs-venus-cfg {
+			cell-id = <MSM_BUS_SLAVE_VENUS_CFG>;
+			label = "slv-qhs-venus-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_vsense_ctrl_cfg:slv-qhs-vsense-ctrl-cfg {
+			cell-id = <MSM_BUS_SLAVE_VSENSE_CTRL_CFG>;
+			label = "slv-qhs-vsense-ctrl-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qns_cnoc_a2noc:slv-qns-cnoc-a2noc {
+			cell-id = <MSM_BUS_SLAVE_CNOC_A2NOC>;
+			label = "slv-qns-cnoc-a2noc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,connections = <&mas_qnm_cnoc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_srvc_cnoc:slv-srvc-cnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_CNOC>;
+			label = "slv-srvc-cnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_config_noc>;
+			qcom,bcms = <&bcm_cn0>;
+		};
+
+		slv_qhs_llcc:slv-qhs-llcc {
+			cell-id = <MSM_BUS_SLAVE_LLCC_CFG>;
+			label = "slv-qhs-llcc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_dc_noc>;
+		};
+
+		slv_qhs_memnoc:slv-qhs-memnoc {
+			cell-id = <MSM_BUS_SLAVE_MEM_NOC_CFG>;
+			label = "slv-qhs-memnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_dc_noc>;
+			qcom,connections = <&mas_qhm_memnoc_cfg>;
+		};
+
+		slv_qns_gladiator_sodv:slv-qns-gladiator-sodv {
+			cell-id = <MSM_BUS_SLAVE_GNOC_SNOC>;
+			label = "slv-qns-gladiator-sodv";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+			qcom,connections = <&mas_qnm_gladiator_sodv>;
+		};
+
+		slv_qns_gnoc_memnoc:slv-qns-gnoc-memnoc {
+			cell-id = <MSM_BUS_SLAVE_GNOC_MEM_NOC>;
+			label = "slv-qns-gnoc-memnoc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+			qcom,connections = <&mas_qnm_apps>;
+		};
+
+		slv_srvc_gnoc:slv-srvc-gnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_GNOC>;
+			label = "slv-srvc-gnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_gladiator_noc>;
+		};
+
+		slv_ipa_core_slave:slv-ipa-core-slave {
+			cell-id = <MSM_BUS_SLAVE_IPA_CORE>;
+			label = "slv-ipa-core-slave";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_ipa_virt>;
+			qcom,bcms = <&bcm_ip0>;
+		};
+
+		slv_ebi:slv-ebi {
+			cell-id = <MSM_BUS_SLAVE_EBI_CH0>;
+			label = "slv-ebi";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_mc0>, <&bcm_acv>;
+		};
+
+		slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg {
+			cell-id = <MSM_BUS_SLAVE_MSS_PROC_MS_MPU_CFG>;
+			label = "slv-qhs-mdsp-ms-mpu-cfg";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		slv_qns_apps_io:slv-qns-apps-io {
+			cell-id = <MSM_BUS_SLAVE_MEM_NOC_GNOC>;
+			label = "slv-qns-apps-io";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,bcms = <&bcm_sh1>;
+		};
+
+		slv_qns_llcc:slv-qns-llcc {
+			cell-id = <MSM_BUS_SLAVE_LLCC>;
+			label = "slv-qns-llcc";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,connections = <&mas_llcc_mc>;
+			qcom,bcms = <&bcm_sh0>;
+		};
+
+		slv_qns_memnoc_snoc:slv-qns-memnoc-snoc {
+			cell-id = <MSM_BUS_SLAVE_MEM_NOC_SNOC>;
+			label = "slv-qns-memnoc-snoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+			qcom,connections = <&mas_qnm_memnoc>;
+			qcom,bcms = <&bcm_sh2>;
+		};
+
+		slv_srvc_memnoc:slv-srvc-memnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_MEM_NOC>;
+			label = "slv-srvc-memnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mem_noc>;
+		};
+
+		slv_qns2_mem_noc:slv-qns2-mem-noc {
+			cell-id = <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC>;
+			label = "slv-qns2-mem-noc";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,connections = <&mas_qnm_mnoc_sf>;
+			qcom,bcms = <&bcm_mm2>;
+		};
+
+		slv_qns_mem_noc_hf:slv-qns-mem-noc-hf {
+			cell-id = <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC>;
+			label = "slv-qns-mem-noc-hf";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,connections = <&mas_qnm_mnoc_hf>;
+			qcom,bcms = <&bcm_mm0>;
+		};
+
+		slv_srvc_mnoc:slv-srvc-mnoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_MNOC>;
+			label = "slv-srvc-mnoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+		};
+
+		slv_qhs_apss:slv-qhs-apss {
+			cell-id = <MSM_BUS_SLAVE_APPSS>;
+			label = "slv-qhs-apss";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+		};
+
+		slv_qns_cnoc:slv-qns-cnoc {
+			cell-id = <MSM_BUS_SNOC_CNOC_SLV>;
+			label = "slv-qns-cnoc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,connections = <&mas_qnm_snoc>;
+			qcom,bcms = <&bcm_sn3>;
+		};
+
+		slv_qns_memnoc_gc:slv-qns-memnoc-gc {
+			cell-id = <MSM_BUS_SLAVE_SNOC_MEM_NOC_GC>;
+			label = "slv-qns-memnoc-gc";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,connections = <&mas_qnm_snoc_gc>;
+			qcom,bcms = <&bcm_sn2>;
+		};
+
+		slv_qns_memnoc_sf:slv-qns-memnoc-sf {
+			cell-id = <MSM_BUS_SLAVE_SNOC_MEM_NOC_SF>;
+			label = "slv-qns-memnoc-sf";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,connections = <&mas_qnm_snoc_sf>;
+			qcom,bcms = <&bcm_sn0>;
+		};
+
+		slv_qxs_imem:slv-qxs-imem {
+			cell-id = <MSM_BUS_SLAVE_OCIMEM>;
+			label = "slv-qxs-imem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn1>;
+		};
+
+		slv_qxs_pimem:slv-qxs-pimem {
+			cell-id = <MSM_BUS_SLAVE_PIMEM>;
+			label = "slv-qxs-pimem";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn4>;
+		};
+
+		slv_srvc_snoc:slv-srvc-snoc {
+			cell-id = <MSM_BUS_SLAVE_SERVICE_SNOC>;
+			label = "slv-srvc-snoc";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+		};
+
+		slv_xs_qdss_stm:slv-xs-qdss-stm {
+			cell-id = <MSM_BUS_SLAVE_QDSS_STM>;
+			label = "slv-xs-qdss-stm";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+			qcom,bcms = <&bcm_sn5>;
+		};
+
+		slv_xs_sys_tcu_cfg:slv-xs-sys-tcu-cfg {
+			cell-id = <MSM_BUS_SLAVE_TCU>;
+			label = "slv-xs-sys-tcu-cfg";
+			qcom,buswidth = <8>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_system_noc>;
+		};
+
+		slv_ebi_display:slv-ebi_display {
+			cell-id = <MSM_BUS_SLAVE_EBI_CH0_DISPLAY>;
+			label = "slv-ebi_display";
+			qcom,buswidth = <4>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mc_virt_display>;
+			qcom,bcms = <&bcm_mc0_display>;
+		};
+
+		slv_qns_llcc_display:slv-qns-llcc_display {
+			cell-id = <MSM_BUS_SLAVE_LLCC_DISPLAY>;
+			label = "slv-qns-llcc_display";
+			qcom,buswidth = <16>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mem_noc_display>;
+			qcom,connections = <&mas_llcc_mc_display>;
+			qcom,bcms = <&bcm_sh0_display>;
+		};
+
+		slv_qns2_mem_noc_display:slv-qns2-mem-noc_display {
+			cell-id = <MSM_BUS_SLAVE_MNOC_SF_MEM_NOC_DISPLAY>;
+			label = "slv-qns2-mem-noc_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mmss_noc_display>;
+			qcom,connections = <&mas_qnm_mnoc_sf_display>;
+			qcom,bcms = <&bcm_mm2_display>;
+		};
+
+		slv_qns_mem_noc_hf_display:slv-qns-mem-noc-hf_display {
+			cell-id = <MSM_BUS_SLAVE_MNOC_HF_MEM_NOC_DISPLAY>;
+			label = "slv-qns-mem-noc-hf_display";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <2>;
+			qcom,bus-dev = <&fab_mmss_noc_display>;
+			qcom,connections = <&mas_qnm_mnoc_hf_display>;
+			qcom,bcms = <&bcm_mm0_display>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts
index 9feb5b4..2b5ed1a 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
index 27882dd..5a1b945 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
@@ -15,6 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index d5d4847..fa06779 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -10,7 +10,9 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include "sdm670-pmic-overlay.dtsi"
+#include "sdm670-sde-display.dtsi"
 
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
@@ -27,6 +29,7 @@
 	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm660l_l4>;
+	vcc-voltage-level = <2960000 2960000>;
 	vccq2-supply = <&pm660_l8>;
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
@@ -89,8 +92,19 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 0 204 0
+			1 &intc 0 0 222 0
+			2 &tlmm 96 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 96 0x1>;
 
 	status = "ok";
 };
@@ -98,3 +112,136 @@
 &pm660_charger {
 	qcom,batteryless-platform;
 };
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_cam_snapshot_default
+			     &key_cam_focus_default
+			     &key_vol_up_default>;
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&tlmm 91 0>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&tlmm 92 0>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm660l_gpios 7 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,dsi-display-active;
+};
+
+&pm660l_wled {
+	status = "okay";
+	qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
index 16efb4c..8b79d8b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-coresight.dtsi
@@ -400,15 +400,6 @@
 			};
 
 			port@4 {
-				reg = <5>;
-				tpda_in_funnel_lpass: endpoint {
-					slave-mode;
-					remote-endpoint =
-						<&funnel_lpass_out_tpda>;
-				};
-			};
-
-			port@5 {
 				reg = <6>;
 				tpda_in_funnel_turing: endpoint {
 					slave-mode;
@@ -417,7 +408,7 @@
 				};
 			};
 
-			port@6 {
+			port@5 {
 				reg = <7>;
 				tpda_in_tpdm_vsense: endpoint {
 					slave-mode;
@@ -426,7 +417,7 @@
 				};
 			};
 
-			port@7 {
+			port@6 {
 				reg = <10>;
 				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
@@ -435,7 +426,7 @@
 				};
 			};
 
-			port@8 {
+			port@7 {
 				reg = <11>;
 				tpda_in_tpdm_north: endpoint {
 					slave-mode;
@@ -444,7 +435,7 @@
 				};
 			};
 
-			port@9 {
+			port@8 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -545,67 +536,6 @@
 		};
 	};
 
-	funnel_lpass: funnel@6845000 {
-		compatible = "arm,primecell";
-		arm,primecell-periphid = <0x0003b908>;
-
-		reg = <0x6845000 0x1000>;
-		reg-names = "funnel-base";
-
-		coresight-name = "coresight-funnel-lpass";
-
-		clocks = <&clock_aop QDSS_CLK>;
-		clock-names = "apb_pclk";
-
-		ports {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			port@0 {
-				reg = <0>;
-				funnel_lpass_out_tpda: endpoint {
-					remote-endpoint =
-					    <&tpda_in_funnel_lpass>;
-				};
-			};
-
-			port@1 {
-				reg = <0>;
-				funnel_lpass_in_tpdm_lpass: endpoint {
-					slave-mode;
-					remote-endpoint =
-					    <&tpdm_lpass_out_funnel_lpass>;
-				};
-			};
-
-			port@2 {
-				reg = <1>;
-				funnel_lpass_in_audio_etm0: endpoint {
-					slave-mode;
-					remote-endpoint =
-					    <&audio_etm0_out_funnel_lpass>;
-				};
-			};
-		};
-	};
-
-	tpdm_lpass: tpdm@6844000 {
-		compatible = "arm,primecell";
-		reg = <0x6844000 0x1000>;
-		reg-names = "tpdm-base";
-
-		coresight-name = "coresight-tpdm-lpass";
-
-		clocks = <&clock_aop QDSS_CLK>;
-		clock-names = "apb_pclk";
-
-		port {
-			tpdm_lpass_out_funnel_lpass: endpoint {
-				remote-endpoint = <&funnel_lpass_in_tpdm_lpass>;
-			};
-		};
-	};
-
 	tpdm_center: tpdm@6c28000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b968>;
@@ -816,6 +746,7 @@
 		arm,primecell-periphid = <0x0003b968>;
 		reg = <0x6860000 0x1000>;
 		reg-names = "tpdm-base";
+		status = "disabled";
 
 		coresight-name = "coresight-tpdm-turing";
 
@@ -1652,19 +1583,6 @@
 		};
 	};
 
-	audio_etm0 {
-		compatible = "qcom,coresight-remote-etm";
-
-		coresight-name = "coresight-audio-etm0";
-		qcom,inst-id = <5>;
-
-		port {
-			audio_etm0_out_funnel_lpass: endpoint {
-				remote-endpoint = <&funnel_lpass_in_audio_etm0>;
-			};
-		};
-	};
-
 	funnel_apss_merg: funnel@7810000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ext-cdc-usbc-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ext-cdc-usbc-audio.dtsi
new file mode 100644
index 0000000..cd113b3
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-ext-cdc-usbc-audio.dtsi
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&tavil_snd {
+	qcom,msm-mbhc-usbc-audio-supported = <1>;
+	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
new file mode 100644
index 0000000..775cf48
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-ext-codec-audio-overlay.dtsi
@@ -0,0 +1,96 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-audio-overlay.dtsi"
+
+&pmic_analog_codec {
+	status = "disabled";
+};
+
+&msm_sdw_codec {
+	status = "disabled";
+};
+
+&cdc_pdm_gpios {
+	status = "disabled";
+};
+
+&cdc_comp_gpios {
+	status = "disabled";
+};
+
+&cdc_dmic_gpios {
+	status = "disabled";
+};
+
+&cdc_sdw_gpios {
+	status = "disabled";
+};
+
+&wsa_spkr_en1 {
+	status = "disabled";
+};
+
+&wsa_spkr_en2 {
+	status = "disabled";
+};
+
+&qupv3_se8_spi {
+	status = "okay";
+};
+
+&soc {
+	wcd_buck_vreg_gpio: msm_cdc_pinctrl@94 {
+		status = "okay";
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_buck_vsel_default>;
+		pinctrl-1 = <&wcd_buck_vsel_default>;
+	};
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
+
+&wdsp_mgr {
+	status = "okay";
+};
+
+&wdsp_glink {
+	status = "okay";
+};
+
+&slim_aud {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
+
+&wcd934x_cdc {
+	status = "okay";
+	qcom,has-buck-vsel-gpio;
+	qcom,buck-vsel-gpio-node = <&wcd_buck_vreg_gpio>;
+};
+
+&clock_audio_lnbb {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-external-codec.dtsi b/arch/arm64/boot/dts/qcom/sdm670-external-codec.dtsi
index 6ea92ee..dbcc6bd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-external-codec.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-external-codec.dtsi
@@ -9,3 +9,21 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+#include "sdm670-ext-codec-audio-overlay.dtsi"
+
+&int_codec {
+	status = "disabled";
+};
+
+&tavil_snd {
+	status = "okay";
+};
+
+&slim_aud {
+	status = "okay";
+};
+
+&dai_slim {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
new file mode 100644
index 0000000..df10e7d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-int-cdc-usbc-audio-overlay.dtsi
@@ -0,0 +1,17 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-audio-overlay.dtsi"
+
+&int_codec {
+	qcom,msm-mbhc-usbc-audio-supported = <1>;
+	qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts
index 65c16c1..ac254fd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
index 38a9fae..1241a20 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
@@ -15,6 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index 0f3a61f..466062b 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -10,7 +10,9 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include "sdm670-pmic-overlay.dtsi"
+#include "sdm670-sde-display.dtsi"
 
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
@@ -27,6 +29,7 @@
 	vdd-hba-supply = <&ufs_phy_gdsc>;
 	vdd-hba-fixed-regulator;
 	vcc-supply = <&pm660l_l4>;
+	vcc-voltage-level = <2960000 2960000>;
 	vccq2-supply = <&pm660_l8>;
 	vcc-max-microamp = <600000>;
 	vccq2-max-microamp = <600000>;
@@ -89,8 +92,19 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+	#address-cells = <0>;
+	interrupt-parent = <&sdhc_2>;
+	interrupts = <0 1 2>;
+	#interrupt-cells = <1>;
+	interrupt-map-mask = <0xffffffff>;
+	interrupt-map = <0 &intc 0 0 204 0
+			1 &intc 0 0 222 0
+			2 &tlmm 96 0>;
+	interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+	cd-gpios = <&tlmm 96 0x1>;
 
 	status = "ok";
 };
@@ -107,3 +121,136 @@
 &pm660_fg {
 	qcom,battery-data = <&mtp_batterydata>;
 };
+
+&soc {
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_cam_snapshot_default
+			     &key_cam_focus_default
+			     &key_vol_up_default>;
+
+		cam_snapshot {
+			label = "cam_snapshot";
+			gpios = <&tlmm 91 0>;
+			linux,input-type = <1>;
+			linux,code = <766>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		cam_focus {
+			label = "cam_focus";
+			gpios = <&tlmm 92 0>;
+			linux,input-type = <1>;
+			linux,code = <528>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm660l_gpios 7 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 76 0>;
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_vid {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,dsi-display-active;
+};
+
+&pm660l_wled {
+	status = "okay";
+	qcom,led-strings-list = [01 02];
+};
+
+&mdss_mdp {
+	#cooling-cells = <2>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index a080db4..177813f 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1258,6 +1258,32 @@
 			};
 		};
 
+		sdc2_cd_on: cd_on {
+			mux {
+				pins = "gpio96";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio96";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+
+		sdc2_cd_off: cd_off {
+			mux {
+				pins = "gpio96";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio96";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+
 		/* USB C analog configuration */
 		wcd_usbc_analog_en1 {
 			wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
@@ -1406,6 +1432,22 @@
 			};
 		};
 
+		wcd_buck_vsel {
+			wcd_buck_vsel_default: wcd_buck_vsel_default{
+				mux {
+					pins = "gpio94";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio94";
+					drive-strength = <8>; /* 8 mA */
+					bias-pull-down; /* pull down */
+					output-high;
+				};
+			};
+		};
+
 		wcd_gnd_mic_swap {
 			wcd_gnd_mic_swap_idle: wcd_gnd_mic_swap_idle {
 				mux {
@@ -1451,5 +1493,145 @@
 				};
 			};
 		};
+
+		/* Pinctrl setting for CAMERA GPIO key */
+		key_cam_snapshot {
+			key_cam_snapshot_default: key_cam_snapshot_default {
+				pins = "gpio91";
+				function = "normal";
+				input-enable;
+				bias-pull-up;
+				power-source = <0>;
+			};
+		};
+
+		key_cam_focus {
+			key_cam_focus_default: key_cam_focus_default {
+				pins = "gpio92";
+				function = "normal";
+				input-enable;
+				bias-pull-up;
+				power-source = <0>;
+			};
+		};
+
+		pmx_sde: pmx_sde {
+			sde_dsi_active: sde_dsi_active {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable = <0>;   /* no pull */
+				};
+			};
+			sde_dsi_suspend: sde_dsi_suspend {
+				mux {
+					pins = "gpio75", "gpio76";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio75", "gpio76";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
+		pmx_sde_te {
+			sde_te_active: sde_te_active {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+
+			sde_te_suspend: sde_te_suspend {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
+		sde_dp_aux_active: sde_dp_aux_active {
+			mux {
+				pins = "gpio40", "gpio50";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio40", "gpio50";
+				bias-disable = <0>; /* no pull */
+				drive-strength = <8>;
+			};
+		};
+
+		sde_dp_aux_suspend: sde_dp_aux_suspend {
+			mux {
+				pins = "gpio40", "gpio50";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio40", "gpio50";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+
+		sde_dp_usbplug_cc_active: sde_dp_usbplug_cc_active {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+
+		sde_dp_usbplug_cc_suspend: sde_dp_usbplug_cc_suspend {
+			mux {
+				pins = "gpio38";
+				function = "gpio";
+			};
+
+			config {
+				pins = "gpio38";
+				bias-pull-down;
+				drive-strength = <2>;
+			};
+		};
+	};
+};
+
+&pm660l_gpios {
+	key_vol_up {
+		key_vol_up_default: key_vol_up_default {
+			pins = "gpio7";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
index b3d2357..5b67765 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
index 5cf3513..26f5e78 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-cdp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts
index ff3270d..1550661 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts
index febd5d9..14f48a0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-pm660a-mtp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
index 75922b0..aa6be24 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pmic-overlay.dtsi
@@ -156,6 +156,15 @@
 		};
 	};
 
+	pm660_rradc: rradc@4500 {
+		compatible = "qcom,rradc";
+		reg = <0x4500 0x100>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		#io-channel-cells = <1>;
+		qcom,pmic-revid = <&pm660_revid>;
+	};
+
 	pm660_fg: qpnp,fg {
 		compatible = "qcom,fg-gen3";
 		#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
index 23792b1..24b8dd6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-regulator.dtsi
@@ -642,4 +642,14 @@
 			qcom,init-voltage = <3312000>;
 		};
 	};
+
+	refgen: refgen-regulator@ff1000 {
+		compatible = "qcom,refgen-regulator";
+		reg = <0xff1000 0x60>;
+		regulator-name = "refgen";
+		regulator-enable-ramp-delay = <5>;
+		proxy-supply = <&refgen>;
+		qcom,proxy-consumer-enable;
+		regulator-always-on;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
index 6201488..e137705 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
@@ -16,6 +16,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-rumi.dtsi"
+#include "sdm670-audio-overlay.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 RUMI";
 	compatible = "qcom,sdm670-rumi", "qcom,sdm670", "qcom,rumi";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
new file mode 100644
index 0000000..3e9d967
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -0,0 +1,597 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi-panel-sim-video.dtsi"
+#include "dsi-panel-sim-cmd.dtsi"
+#include "dsi-panel-sim-dsc375-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-video.dtsi"
+#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-dsc375-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+&soc {
+	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "lab";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+
+		qcom,panel-supply-entry@2 {
+			reg = <2>;
+			qcom,supply-name = "ibb";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+	};
+
+	dsi_panel_pwr_supply_no_labibb: dsi_panel_pwr_supply_no_labibb {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+	};
+
+	dsi_panel_pwr_supply_vdd_no_labibb: dsi_panel_pwr_supply_vdd_no_labibb {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "vdd";
+			qcom,supply-min-voltage = <3000000>;
+			qcom,supply-max-voltage = <3000000>;
+			qcom,supply-enable-load = <857000>;
+			qcom,supply-disable-load = <0>;
+			qcom,supply-post-on-sleep = <0>;
+		};
+	};
+
+	dsi_dual_nt35597_truly_video_display: qcom,dsi-display@0 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt35597_truly_video_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+		qcom,panel-mode-gpio = <&tlmm 76 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt35597_truly_video>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
+	dsi_dual_nt35597_truly_cmd_display: qcom,dsi-display@1 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt35597_truly_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+		qcom,panel-mode-gpio = <&tlmm 76 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt35597_truly_cmd>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
+	dsi_nt35597_truly_dsc_cmd_display: qcom,dsi-display@2 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_nt35597_truly_dsc_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy1>;
+		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+		qcom,panel-mode-gpio = <&tlmm 76 0>;
+
+		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_cmd>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
+	dsi_nt35597_truly_dsc_video_display: qcom,dsi-display@3 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_nt35597_truly_dsc_video_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy1>;
+		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 75 0>;
+		qcom,panel-mode-gpio = <&tlmm 76 0>;
+
+		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_video>;
+		vddio-supply = <&pm660_l11>;
+		lab-supply = <&lcdb_ldo_vreg>;
+		ibb-supply = <&lcdb_ncp_vreg>;
+	};
+
+	dsi_sim_vid_display: qcom,dsi-display@4 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_vid>;
+	};
+
+	dsi_dual_sim_vid_display: qcom,dsi-display@5 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_vid_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_vid>;
+	};
+
+	dsi_sim_cmd_display: qcom,dsi-display@6 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_cmd>;
+	};
+
+	dsi_dual_sim_cmd_display: qcom,dsi-display@7 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_cmd>;
+	};
+
+	dsi_sim_dsc_375_cmd_display: qcom,dsi-display@8 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sim_dsc_375_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0>;
+		qcom,dsi-phy = <&mdss_dsi_phy0>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_sim_dsc_375_cmd>;
+	};
+
+	dsi_dual_sim_dsc_375_cmd_display: qcom,dsi-display@9 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sim_dsc_375_cmd_display";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
+			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+
+		qcom,dsi-panel = <&dsi_dual_sim_dsc_375_cmd>;
+	};
+
+	sde_wb: qcom,wb-display@0 {
+		compatible = "qcom,wb-display";
+		cell-index = <0>;
+		label = "wb_display";
+	};
+
+	ext_disp: qcom,msm-ext-disp {
+		compatible = "qcom,msm-ext-disp";
+		status = "disabled";
+
+		ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx {
+			compatible = "qcom,msm-ext-disp-audio-codec-rx";
+		};
+	};
+
+	sde_dp: qcom,dp_display@0{
+		cell-index = <0>;
+		compatible = "qcom,dp-display";
+		status = "disabled";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm660_l1>;
+		vdda-0p9-supply = <&pm660l_l1>;
+
+		reg =	<0xae90000 0xa84>,
+			<0x88eaa00 0x200>,
+			<0x88ea200 0x200>,
+			<0x88ea600 0x200>,
+			<0xaf02000 0x1a0>,
+			<0x780000 0x621c>,
+			<0x88ea030 0x10>,
+			<0x0aee1000 0x034>;
+		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+			"dp_mmss_cc", "qfprom_physical", "dp_pll",
+			"hdcp_physical";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <12 0>;
+
+		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+			"core_usb_pipe_clk", "ctrl_link_clk",
+			"ctrl_link_iface_clk", "ctrl_pixel_clk",
+			"crypto_clk", "pixel_clk_rcg", "pixel_parent";
+
+		qcom,dp-usbpd-detection = <&pm660_pdphy>;
+		qcom,ext-disp = <&ext_disp>;
+
+		qcom,aux-cfg0-settings = [20 00];
+		qcom,aux-cfg1-settings = [24 13 23 1d];
+		qcom,aux-cfg2-settings = [28 24];
+		qcom,aux-cfg3-settings = [2c 00];
+		qcom,aux-cfg4-settings = [30 0a];
+		qcom,aux-cfg5-settings = [34 26];
+		qcom,aux-cfg6-settings = [38 0a];
+		qcom,aux-cfg7-settings = [3c 03];
+		qcom,aux-cfg8-settings = [40 bb];
+		qcom,aux-cfg9-settings = [44 03];
+
+		qcom,max-pclk-frequency-khz = <675000>;
+
+		qcom,core-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,core-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
+};
+
+&sde_dp {
+	status = "disabled";
+	pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+	pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+	pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+	qcom,aux-en-gpio = <&tlmm 50 0>;
+	qcom,aux-sel-gpio = <&tlmm 40 0>;
+	qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+};
+
+&mdss_mdp {
+	connectors = <&sde_rscc &sde_wb>;
+};
+
+&dsi_dual_nt35597_truly_video {
+	qcom,mdss-dsi-t-clk-post = <0x0D>;
+	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <2 0 2>,
+						<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_nt35597_truly_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0D>;
+	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <2 0 2>,
+						<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0b>;
+	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
+				05 03 03 04 00];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>, /* dsc merge */
+						<2 1 1>; /* 3d mux */
+			qcom,default-topology-index = <1>;
+		};
+	};
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,mdss-dsi-t-clk-post = <0x0b>;
+	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
+				04 03 03 04 00];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>, /* dsc merge */
+						<2 1 1>; /* 3d mux */
+			qcom,default-topology-index = <1>;
+		};
+	};
+};
+
+&dsi_sim_vid {
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <1 0 1>,
+						<2 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_sim_vid {
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <2 0 2>,
+						<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sim_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <1 0 1>,
+						<2 0 1>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_sim_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-display-timings {
+		timing@0{
+			qcom,mdss-dsi-panel-phy-timings = [00 24 09 09 26 24 09
+				09 06 03 04 00];
+			qcom,display-topology = <2 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+		timing@1{
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <2 0 2>,
+						<1 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+		timing@2{
+			qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06
+				06 04 03 04 00];
+			qcom,display-topology = <2 0 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_sim_dsc_375_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-display-timings {
+		timing@0 { /* 1080p */
+			qcom,mdss-dsi-panel-phy-timings = [00 1A 06 06 22 20 07
+				07 04 03 04 00];
+			qcom,display-topology = <1 1 1>;
+			qcom,default-topology-index = <0>;
+		};
+		timing@1 { /* qhd */
+			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
+				05 03 03 04 00];
+			qcom,display-topology = <1 1 1>,
+						<2 2 1>, /* dsc merge */
+						<2 1 1>; /* 3d mux */
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+	qcom,mdss-dsi-t-clk-post = <0x0d>;
+	qcom,mdss-dsi-t-clk-pre = <0x2d>;
+	qcom,mdss-dsi-display-timings {
+		timing@0 { /* qhd */
+			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
+				07 05 03 04 00];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+		timing@1 { /* 4k */
+			qcom,mdss-dsi-panel-phy-timings = [00 18 06 06 21 20 06
+				06 04 03 04 00];
+			qcom,display-topology = <2 2 2>;
+			qcom,default-topology-index = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
new file mode 100644
index 0000000..78e5d94
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
@@ -0,0 +1,110 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	mdss_dsi0_pll: qcom,mdss_dsi_pll@ae94a00 {
+		compatible = "qcom,mdss_dsi_pll_10nm";
+		label = "MDSS DSI 0 PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+		reg = <0xae94a00 0x1e0>,
+		      <0xae94400 0x800>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		gdsc-supply = <&mdss_core_gdsc>;
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
+	mdss_dsi1_pll: qcom,mdss_dsi_pll@ae96a00 {
+		compatible = "qcom,mdss_dsi_pll_10nm";
+		label = "MDSS DSI 1 PLL";
+		cell-index = <1>;
+		#clock-cells = <1>;
+		reg = <0xae96a00 0x1e0>,
+		      <0xae96400 0x800>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		gdsc-supply = <&mdss_core_gdsc>;
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
+	mdss_dp_pll: qcom,mdss_dp_pll@88ea000 {
+		compatible = "qcom,mdss_dp_pll_10nm";
+		status = "disabled";
+		label = "MDSS DP PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0x088ea000 0x200>,
+		      <0x088eaa00 0x200>,
+		      <0x088ea200 0x200>,
+		      <0x088ea600 0x200>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "ln_tx0_base",
+			"ln_tx1_base", "gdsc_base";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+		clock-names = "iface_clk", "ref_clk_src", "ref_clk",
+			"cfg_ahb_clk", "pipe_clk";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
new file mode 100644
index 0000000..bb30a20
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -0,0 +1,532 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	mdss_mdp: qcom,mdss_mdp@ae00000 {
+		compatible = "qcom,sde-kms";
+		reg = <0x0ae00000 0x81d40>,
+		      <0x0aeb0000 0x2008>,
+		      <0x0aeac000 0xf0>;
+		reg-names = "mdp_phys",
+			"vbif_phys",
+			"regdma_phys";
+
+		clocks =
+			<&clock_gcc GCC_DISP_AHB_CLK>,
+			<&clock_gcc GCC_DISP_AXI_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+		clock-names = "gcc_iface", "gcc_bus", "iface_clk",
+				"bus_clk", "core_clk", "vsync_clk";
+		clock-rate = <0 0 0 0 300000000 19200000 0>;
+		clock-max-rate = <0 0 0 0 412500000 19200000 0>;
+
+		sde-vdd-supply = <&mdss_core_gdsc>;
+
+		/* interrupt config */
+		interrupt-parent = <&pdc>;
+		interrupts = <0 83 0>;
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		iommus = <&apps_smmu 0x880 0x8>,
+			<&apps_smmu 0xc80 0x8>;
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		/* hw blocks */
+		qcom,sde-off = <0x1000>;
+		qcom,sde-len = <0x45C>;
+
+		qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+				     0x2600 0x2800>;
+		qcom,sde-ctl-size = <0xE4>;
+
+		qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+				      0x48000 0x49000 0x4a000>;
+		qcom,sde-mixer-size = <0x320>;
+
+		qcom,sde-dspp-top-off = <0x1300>;
+		qcom,sde-dspp-top-size = <0xc>;
+
+		qcom,sde-dspp-off = <0x55000 0x57000>;
+		qcom,sde-dspp-size = <0x17e0>;
+
+		qcom,sde-wb-off = <0x66000>;
+		qcom,sde-wb-size = <0x2c8>;
+		qcom,sde-wb-xin-id = <6>;
+		qcom,sde-wb-id = <2>;
+		qcom,sde-wb-clk-ctrl = <0x3b8 24>;
+
+		qcom,sde-intf-off = <0x6b000 0x6b800
+					0x6c000 0x6c800>;
+		qcom,sde-intf-size = <0x280>;
+
+		qcom,sde-intf-type = "dp", "dsi", "dsi", "dp";
+		qcom,sde-pp-off = <0x71000 0x71800
+					  0x72000 0x72800 0x73000>;
+		qcom,sde-pp-slave = <0x0 0x0 0x0 0x0 0x1>;
+		qcom,sde-pp-size = <0xd4>;
+
+		qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0 0x0>;
+		qcom,sde-cdm-off = <0x7a200>;
+		qcom,sde-cdm-size = <0x224>;
+
+		qcom,sde-dsc-off = <0x81000 0x81400>;
+		qcom,sde-dsc-size = <0x140>;
+
+		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0 0x0>;
+		qcom,sde-dither-version = <0x00010000>;
+		qcom,sde-dither-size = <0x20>;
+
+		qcom,sde-sspp-type = "vig", "vig",
+					"dma", "dma", "dma";
+
+		qcom,sde-sspp-off = <0x5000 0x7000 0x25000 0x27000 0x29000>;
+		qcom,sde-sspp-src-size = <0x1c8>;
+
+		qcom,sde-sspp-xin-id = <0 4 1 5 9>;
+		qcom,sde-sspp-excl-rect = <1 1 1 1 1>;
+		qcom,sde-sspp-smart-dma-priority = <4 5 1 2 3>;
+		qcom,sde-smart-dma-rev = "smart_dma_v2";
+
+		qcom,sde-mixer-pair-mask = <2 1 6 0 0 3>;
+
+		qcom,sde-mixer-blend-op-off = <0x20 0x38 0x50 0x68 0x80 0x98
+						0xb0 0xc8 0xe0 0xf8 0x110>;
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+		qcom,sde-sspp-clk-ctrl =
+				<0x2ac 0>, <0x2b4 0>,
+				 <0x2ac 8>, <0x2b4 8>, <0x2bc 8>;
+		qcom,sde-sspp-csc-off = <0x1a00>;
+		qcom,sde-csc-type = "csc-10bit";
+		qcom,sde-qseed-type = "qseedv3";
+		qcom,sde-sspp-qseed-off = <0xa00>;
+		qcom,sde-mixer-linewidth = <2560>;
+		qcom,sde-sspp-linewidth = <2560>;
+		qcom,sde-wb-linewidth = <4096>;
+		qcom,sde-mixer-blendstages = <0xb>;
+		qcom,sde-highest-bank-bit = <0x1>;
+		qcom,sde-ubwc-version = <0x200>;
+		qcom,sde-panic-per-pipe;
+		qcom,sde-has-cdp;
+		qcom,sde-has-src-split;
+		qcom,sde-has-dim-layer;
+		qcom,sde-has-idle-pc;
+		qcom,sde-max-bw-low-kbps = <9600000>;
+		qcom,sde-max-bw-high-kbps = <9600000>;
+		qcom,sde-dram-channels = <2>;
+		qcom,sde-num-nrt-paths = <0>;
+		qcom,sde-dspp-ad-version = <0x00040000>;
+		qcom,sde-dspp-ad-off = <0x28000 0x27000>;
+
+		qcom,sde-vbif-off = <0>;
+		qcom,sde-vbif-size = <0x1040>;
+		qcom,sde-vbif-id = <0>;
+		qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+		qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
+
+		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
+		qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+			0x00000000>;
+		qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+		qcom,sde-qos-lut-linear =
+			<4 0x00000000 0x00000357>,
+			<5 0x00000000 0x00003357>,
+			<6 0x00000000 0x00023357>,
+			<7 0x00000000 0x00223357>,
+			<8 0x00000000 0x02223357>,
+			<9 0x00000000 0x22223357>,
+			<10 0x00000002 0x22223357>,
+			<11 0x00000022 0x22223357>,
+			<12 0x00000222 0x22223357>,
+			<13 0x00002222 0x22223357>,
+			<14 0x00012222 0x22223357>,
+			<0 0x00112222 0x22223357>;
+		qcom,sde-qos-lut-macrotile =
+			<10 0x00000003 0x44556677>,
+			<11 0x00000033 0x44556677>,
+			<12 0x00000233 0x44556677>,
+			<13 0x00002233 0x44556677>,
+			<14 0x00012233 0x44556677>,
+			<0 0x00112233 0x44556677>;
+		qcom,sde-qos-lut-nrt =
+			<0 0x00000000 0x00000000>;
+		qcom,sde-qos-lut-cwb =
+			<0 0x75300000 0x00000000>;
+
+		qcom,sde-cdp-setting = <1 1>, <1 0>;
+
+		qcom,sde-inline-rotator = <&mdss_rotator 0>;
+		qcom,sde-inline-rot-xin = <10 11>;
+		qcom,sde-inline-rot-xin-type = "sspp", "wb";
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+		qcom,sde-inline-rot-clk-ctrl = <0x2bc 0x8>, <0x2bc 0xc>;
+
+		qcom,sde-reg-dma-off = <0>;
+		qcom,sde-reg-dma-version = <0x1>;
+		qcom,sde-reg-dma-trigger-off = <0x119c>;
+
+		qcom,sde-sspp-vig-blocks {
+			qcom,sde-vig-csc-off = <0x1a00>;
+			qcom,sde-vig-qseed-off = <0xa00>;
+			qcom,sde-vig-qseed-size = <0xa0>;
+		};
+
+		qcom,sde-dspp-blocks {
+			qcom,sde-dspp-igc = <0x0 0x00030001>;
+			qcom,sde-dspp-vlut = <0xa00 0x00010008>;
+			qcom,sde-dspp-gamut = <0x1000 0x00040000>;
+			qcom,sde-dspp-pcc = <0x1700 0x00040000>;
+			qcom,sde-dspp-gc = <0x17c0 0x00010008>;
+		};
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "sde-vdd";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		smmu_sde_sec: qcom,smmu_sde_sec_cb {
+			compatible = "qcom,smmu_sde_sec";
+			iommus = <&apps_smmu 0x881 0x8>,
+			       <&apps_smmu 0xc81 0x8>;
+		};
+
+		/* data and reg bus scale settings */
+		qcom,sde-data-bus {
+			qcom,msm-bus,name = "mdss_sde_mnoc";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+			    <22 773 0 0>, <23 773 0 0>,
+			    <22 773 0 6400000>, <23 773 0 6400000>,
+			    <22 773 0 6400000>, <23 773 0 6400000>;
+		};
+
+		qcom,sde-llcc-bus {
+			qcom,msm-bus,name = "mdss_sde_llcc";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <132 770 0 0>,
+			    <132 770 0 6400000>,
+			    <132 770 0 6400000>;
+		};
+
+		qcom,sde-ebi-bus {
+			qcom,msm-bus,name = "mdss_sde_ebi";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <129 512 0 0>,
+			    <129 512 0 6400000>,
+			    <129 512 0 6400000>;
+		};
+
+		qcom,sde-reg-bus {
+			qcom,msm-bus,name = "mdss_reg";
+			qcom,msm-bus,num-cases = <4>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,vectors-KBps =
+				<1 590 0 0>,
+				<1 590 0 76800>,
+				<1 590 0 150000>,
+				<1 590 0 300000>;
+		};
+	};
+
+	sde_rscc: qcom,sde_rscc@af20000 {
+		cell-index = <0>;
+		compatible = "qcom,sde-rsc";
+		reg = <0xaf20000 0x1c44>,
+			<0xaf30000 0x3fd4>;
+		reg-names = "drv", "wrapper";
+		qcom,sde-rsc-version = <1>;
+
+		vdd-supply = <&mdss_core_gdsc>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>;
+		clock-names = "vsync_clk", "iface_clk";
+		clock-rate = <0 0>;
+
+		qcom,sde-dram-channels = <2>;
+
+		mboxes = <&disp_rsc 0>;
+		mbox-names = "disp_rsc";
+
+		/* data and reg bus scale settings */
+		qcom,sde-data-bus {
+			qcom,msm-bus,name = "disp_rsc_mnoc";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+			    <20003 20515 0 0>, <20004 20515 0 0>,
+			    <20003 20515 0 6400000>, <20004 20515 0 6400000>,
+			    <20003 20515 0 6400000>, <20004 20515 0 6400000>;
+		};
+
+		qcom,sde-llcc-bus {
+			qcom,msm-bus,name = "disp_rsc_llcc";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <20001 20513 0 0>,
+			    <20001 20513 0 6400000>,
+			    <20001 20513 0 6400000>;
+		};
+
+		qcom,sde-ebi-bus {
+			qcom,msm-bus,name = "disp_rsc_ebi";
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+			    <20000 20512 0 0>,
+			    <20000 20512 0 6400000>,
+			    <20000 20512 0 6400000>;
+		};
+	};
+
+	mdss_rotator: qcom,mdss_rotator@ae00000 {
+		compatible = "qcom,sde_rotator";
+		reg = <0x0ae00000 0xac000>,
+		      <0x0aeb8000 0x3000>;
+		reg-names = "mdp_phys",
+			"rot_vbif_phys";
+
+		#list-cells = <1>;
+
+		qcom,mdss-rot-mode = <1>;
+		qcom,mdss-highest-bank-bit = <0x1>;
+
+		/* Bus Scale Settings */
+		qcom,msm-bus,name = "mdss_rotator";
+		qcom,msm-bus,num-cases = <3>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<25 512 0 0>,
+			<25 512 0 6400000>,
+			<25 512 0 6400000>;
+
+		rot-vdd-supply = <&mdss_core_gdsc>;
+		qcom,supply-names = "rot-vdd";
+
+		clocks =
+			<&clock_gcc GCC_DISP_AHB_CLK>,
+			<&clock_gcc GCC_DISP_AXI_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_ROT_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AXI_CLK>;
+		clock-names = "gcc_iface", "gcc_bus",
+			"iface_clk", "rot_clk", "axi_clk";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <2 0>;
+
+		/* Offline rotator QoS setting */
+		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
+		qcom,mdss-rot-vbif-memtype = <3 3>;
+		qcom,mdss-rot-cdp-setting = <1 1>;
+		qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+		qcom,mdss-rot-danger-lut = <0x0 0x0>;
+		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>;
+
+		/* Inline rotator QoS Setting */
+		/* setting default register values for RD - qos/danger/safe */
+		qcom,mdss-inline-rot-qos-lut = <0x44556677 0x00112233
+							0x44556677 0x00112233>;
+		qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>;
+		qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000ff00>;
+
+		qcom,mdss-default-ot-rd-limit = <32>;
+		qcom,mdss-default-ot-wr-limit = <32>;
+
+		qcom,mdss-sbuf-headroom = <20>;
+
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 4>;
+
+		/* reg bus scale settings */
+		rot_reg: qcom,rot-reg-bus {
+			qcom,msm-bus,name = "mdss_rot_reg";
+			qcom,msm-bus,num-cases = <2>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,active-only;
+			qcom,msm-bus,vectors-KBps =
+				<1 590 0 0>,
+				<1 590 0 76800>;
+		};
+
+		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
+			compatible = "qcom,smmu_sde_rot_unsec";
+			iommus = <&apps_smmu 0x1090 0x0>;
+		};
+
+		smmu_rot_sec: qcom,smmu_rot_sec_cb {
+			compatible = "qcom,smmu_sde_rot_sec";
+			iommus = <&apps_smmu 0x1091 0x0>;
+		};
+	};
+
+	mdss_dsi0: qcom,mdss_dsi_ctrl0@ae94000 {
+		compatible = "qcom,dsi-ctrl-hw-v2.2";
+		label = "dsi-ctrl-0";
+		cell-index = <0>;
+		reg =   <0xae94000 0x400>,
+			<0xaf08000 0x4>;
+		reg-names = "dsi_ctrl", "disp_cc_base";
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <4 0>;
+		vdda-1p2-supply = <&pm660_l1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_ESC0_CLK>;
+		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
+					"pixel_clk", "pixel_clk_rcg",
+					"esc_clk";
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+	};
+
+	mdss_dsi1: qcom,mdss_dsi_ctrl1@ae96000 {
+		compatible = "qcom,dsi-ctrl-hw-v2.2";
+		label = "dsi-ctrl-1";
+		cell-index = <1>;
+		reg =   <0xae96000 0x400>,
+			<0xaf08000 0x4>;
+		reg-names = "dsi_ctrl", "disp_cc_base";
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <5 0>;
+		vdda-1p2-supply = <&pm660_l1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE1_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE1_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK1_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK1_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_ESC1_CLK>;
+		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
+				"pixel_clk", "pixel_clk_rcg", "esc_clk";
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+	};
+
+	mdss_dsi_phy0: qcom,mdss_dsi_phy0@ae94400 {
+		compatible = "qcom,dsi-phy-v3.0";
+		label = "dsi-phy-0";
+		cell-index = <0>;
+		reg = <0xae94400 0x7c0>;
+		reg-names = "dsi_phy";
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-0p9-supply = <&pm660l_l1>;
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
+
+	mdss_dsi_phy1: qcom,mdss_dsi_phy0@ae96400 {
+		compatible = "qcom,dsi-phy-v3.0";
+		label = "dsi-phy-1";
+		cell-index = <1>;
+		reg = <0xae96400 0x7c0>;
+		reg-names = "dsi_phy";
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-0p9-supply = <&pm660l_l1>;
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
+
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts
index 190b32f..e4e1db5 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-cdp.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts
index 45ff83f..80a8423 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-cdp.dts
@@ -15,7 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
-
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, CDP";
 	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts
index 226c2ae..c5bab55 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660L, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts
index 8928f80..2c53334 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-cdp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660L, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts
index 78d1dfa..09ba184 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp-overlay.dts
@@ -21,7 +21,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "sdm670-external-codec.dtsi"
-
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660L, USB-C Audio, Ext. Audio Codec MTP";
 	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts
index 5628e92..7a19819 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-mtp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660L, USB-C Audio, Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts
index c1891a4..71db0f7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp-overlay.dts
@@ -22,6 +22,7 @@
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660A, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts
index c059113..ff641e6 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-cdp.dts
@@ -17,6 +17,7 @@
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660A, USB-C Audio, Ext. Audio Codec CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts
index c54e299..c2e6f58 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp-overlay.dts
@@ -22,6 +22,7 @@
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660+PM660A, USB-C Audio, Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts
index d09b5e5..2cd68f1 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-external-codec-pm660a-mtp.dts
@@ -17,6 +17,7 @@
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
 #include "sdm670-external-codec.dtsi"
+#include "sdm670-ext-cdc-usbc-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM 670 PM660+PM660A, USB-C Audio, Ext. Audio Codec MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts
index d8c4102..3d5c04e 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp-overlay.dts
@@ -20,6 +20,7 @@
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
 #include "sdm670-mtp.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts
index b9bb3ef..8449625 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-mtp.dts
@@ -15,6 +15,7 @@
 
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660L, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts
index 95df620..6a26d95 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts
index 5e33308..1871b45 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-cdp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-cdp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts
index 226a46b..d565cdd 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp-overlay.dts
@@ -21,6 +21,7 @@
 
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts
index ca99736..b288569 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-usbc-pm660a-mtp.dts
@@ -16,6 +16,7 @@
 #include "sdm670.dtsi"
 #include "sdm670-mtp.dtsi"
 #include "pm660a.dtsi"
+#include "sdm670-int-cdc-usbc-audio-overlay.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM670 PM660 + PM660A, USB-C Audio, MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index 97a79bd..cb920d7 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -23,6 +23,8 @@
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <dt-bindings/clock/qcom,aop-qmp.h>
 
+#define MHZ_TO_MBPS(mhz, w) ((mhz * 1000000 * w) / (1024 * 1024))
+
 / {
 	model = "Qualcomm Technologies, Inc. SDM670";
 	compatible = "qcom,sdm670";
@@ -536,24 +538,6 @@
 			reg = <0 0x93d00000 0 0x1e00000>;
 		};
 
-		pil_ipa_fw_mem: pil_ipa_fw_region@95b00000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x95b00000 0 0x10000>;
-		};
-
-		pil_ipa_gsi_mem: pil_ipa_gsi_region@95b10000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x95b10000 0 0x5000>;
-		};
-
-		pil_gpu_mem: pil_gpu_region@95b15000 {
-			compatible = "removed-dma-pool";
-			no-map;
-			reg = <0 0x95b15000 0 0x1000>;
-		};
-
 		adsp_mem: adsp_region {
 			compatible = "shared-dma-pool";
 			alloc-ranges = <0 0x00000000 0 0xffffffff>;
@@ -614,6 +598,10 @@
 
 #include "sdm670-vidc.dtsi"
 
+#include "sdm670-sde-pll.dtsi"
+
+#include "sdm670-sde.dtsi"
+
 &soc {
 	#address-cells = <1>;
 	#size-cells = <1>;
@@ -646,6 +634,14 @@
 		qcom,pipe-attr-ee;
 	};
 
+	qcom,qbt1000 {
+		compatible = "qcom,qbt1000";
+		clock-names = "core", "iface";
+		clock-frequency = <25000000>;
+		qcom,ipc-gpio = <&tlmm 121 0>;
+		qcom,finger-detect-gpio = <&tlmm 122 0>;
+	};
+
 	thermal_zones: thermal-zones {};
 
 	tsens0: tsens@c222000 {
@@ -793,6 +789,7 @@
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm660l_s3_level>;
 		vdd_mx-supply = <&pm660l_s1_level>;
+		qcom,gpu_cc_gmu_clk_src-opp-handle = <&gmu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -802,6 +799,7 @@
 		reg = <0x5090000 0x9000>;
 		reg-names = "cc_base";
 		vdd_gfx-supply = <&pm660l_s2_level>;
+		qcom,gpu_cc_gx_gfx3d_clk_src-opp-handle = <&msm_gpu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -833,7 +831,7 @@
 	};
 
 	clock_aop: qcom,aopclk {
-		compatible = "qcom,aop-qmp-clk-v2";
+		compatible = "qcom,aop-qmp-clk-v1";
 		#clock-cells = <1>;
 		mboxes = <&qmp_aop 0>;
 		mbox-names = "qdss_clk";
@@ -1391,6 +1389,15 @@
 		interrupts = <0 17 0>;
 	};
 
+	eud: qcom,msm-eud@88e0000 {
+		compatible = "qcom,msm-eud";
+		interrupt-names = "eud_irq";
+		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
+		reg = <0x88e0000 0x2000>;
+		reg-names = "eud_base";
+		status = "disabled";
+	};
+
 	qcom,llcc@1100000 {
 		compatible = "qcom,llcc-core", "syscon", "simple-mfd";
 		reg = <0x1100000 0x250000>;
@@ -1619,6 +1626,13 @@
 		status = "ok";
 	};
 
+	qcom,rmtfs_sharedmem@0 {
+		compatible = "qcom,sharedmem-uio";
+		reg = <0x0 0x200000>;
+		reg-names = "rmtfs";
+		qcom,client-id = <0x00000001>;
+	};
+
 	qcom,rmnet-ipa {
 		compatible = "qcom,rmnet-ipa3";
 		qcom,rmnet-ipa-ssr;
@@ -1659,17 +1673,17 @@
 			<90 512 80000 640000>,
 			<90 585 80000 640000>,
 			<1 676 80000 80000>,
-			<143 777 0 150000>,
+			<143 777 0 150>, /* IB defined for IPA clk in MHz*/
 		/* NOMINAL */
 			<90 512 206000 960000>,
 			<90 585 206000 960000>,
 			<1 676 206000 160000>,
-			<143 777 0 300000>,
+			<143 777 0 300>, /* IB defined for IPA clk in MHz*/
 		/* TURBO */
 			<90 512 206000 3600000>,
 			<90 585 206000 3600000>,
 			<1 676 206000 300000>,
-			<143 777 0 355333>;
+			<143 777 0 355>; /* IB defined for IPA clk in MHz*/
 		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
 
 		/* IPA RAM mmap */
@@ -1919,6 +1933,46 @@
 
 		qcom,devfreq,freq-table = <50000000 200000000>;
 
+		qcom,msm-bus,name = "sdhc1";
+		qcom,msm-bus,num-cases = <9>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			/* No vote */
+			<78 512 0 0>, <1 606 0 0>,
+			/* 400 KB/s*/
+			<78 512 1046 1600>,
+			<1 606 1600 1600>,
+			/* 20 MB/s */
+			<78 512 52286 80000>,
+			<1 606 80000 80000>,
+			/* 25 MB/s */
+			<78 512 65360 100000>,
+			<1 606 100000 100000>,
+			/* 50 MB/s */
+			<78 512 130718 200000>,
+			<1 606 133320 133320>,
+			/* 100 MB/s */
+			<78 512 130718 200000>,
+			<1 606 150000 150000>,
+			/* 200 MB/s */
+			<78 512 261438 400000>,
+			<1 606 300000 300000>,
+			/* 400 MB/s */
+			<78 512 261438 400000>,
+			<1 606 300000 300000>,
+			/* Max. bandwidth */
+			<78 512 1338562 4096000>,
+			<1 606 1338562 4096000>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100000000 200000000 400000000 4294967295>;
+
+		/* PM QoS */
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <70 70>;
+		qcom,pm-qos-cpu-groups = <0x3f 0xc0>;
+		qcom,pm-qos-cmdq-latency-us = <70 70>, <70 70>;
+		qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
+
 		clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
 			<&clock_gcc GCC_SDCC1_APPS_CLK>;
 		clock-names = "iface_clk", "core_clk";
@@ -1946,6 +2000,43 @@
 				      "SDR104";
 
 		qcom,devfreq,freq-table = <50000000 201500000>;
+
+		qcom,msm-bus,name = "sdhc2";
+		qcom,msm-bus,num-cases = <8>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+			/* No vote */
+			<81 512 0 0>, <1 608 0 0>,
+			/* 400 KB/s*/
+			<81 512 1046 1600>,
+			<1 608 1600 1600>,
+			/* 20 MB/s */
+			<81 512 52286 80000>,
+			<1 608 80000 80000>,
+			/* 25 MB/s */
+			<81 512 65360 100000>,
+			<1 608 100000 100000>,
+			/* 50 MB/s */
+			<81 512 130718 200000>,
+			<1 608 133320 133320>,
+			/* 100 MB/s */
+			<81 512 261438 200000>,
+			<1 608 150000 150000>,
+			/* 200 MB/s */
+			<81 512 261438 400000>,
+			<1 608 300000 300000>,
+			/* Max. bandwidth */
+			<81 512 1338562 4096000>,
+			<1 608 1338562 4096000>;
+		qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+			100000000 200000000 4294967295>;
+
+		/* PM QoS */
+		qcom,pm-qos-irq-type = "affine_irq";
+		qcom,pm-qos-irq-latency = <70 70>;
+		qcom,pm-qos-cpu-groups = <0x3f 0xc0>;
+		qcom,pm-qos-legacy-latency-us = <70 70>, <70 70>;
+
 		clocks = <&clock_gcc GCC_SDCC2_AHB_CLK>,
 			<&clock_gcc GCC_SDCC2_APPS_CLK>;
 		clock-names = "iface_clk", "core_clk";
@@ -2055,8 +2146,220 @@
 		qcom,wlan-msa-memory = <0x100000>;
 		qcom,smmu-s1-bypass;
 	};
+
+	cpubw: qcom,cpubw {
+		compatible = "qcom,devbw";
+		governor = "performance";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_LLCC>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS(150, 16) >, /*  2288 MB/s */
+			< MHZ_TO_MBPS(300, 16) >, /*  4577 MB/s */
+			< MHZ_TO_MBPS(466, 16) >, /*  7110 MB/s */
+			< MHZ_TO_MBPS(600, 16) >, /*  9155 MB/s */
+			< MHZ_TO_MBPS(806, 16) >, /* 12298 MB/s */
+			< MHZ_TO_MBPS(933, 16) >; /* 14236 MB/s */
+	};
+
+	bwmon: qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon4";
+		reg = <0x1436400 0x300>, <0x1436300 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 581 4>;
+		qcom,mport = <0>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&cpubw>;
+	};
+
+	llccbw: qcom,llccbw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports =
+			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+	};
+
+	llcc_bwmon: qcom,llcc-bwmon {
+		compatible = "qcom,bimc-bwmon5";
+		reg = <0x0114a000 0x1000>;
+		reg-names = "base";
+		interrupts = <GIC_SPI 580 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hw-timer-hz = <19200000>;
+		qcom,target-dev = <&llccbw>;
+		qcom,count-unit = <0x200000>;
+		qcom,byte-mid-mask = <0xe000>;
+		qcom,byte-mid-match = <0xe000>;
+	};
+
+	memlat_cpu0: qcom,memlat-cpu0 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+	};
+
+	memlat_cpu4: qcom,memlat-cpu4 {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		status = "ok";
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+	};
+
+	devfreq_memlat_0: qcom,cpu0-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&memlat_cpu0>;
+		qcom,cachemiss-ev = <0x24>;
+		qcom,core-dev-table =
+			<  748800 MHZ_TO_MBPS( 300, 4) >,
+			<  998400 MHZ_TO_MBPS( 451, 4) >,
+			< 1209600 MHZ_TO_MBPS( 547, 4) >,
+			< 1497600 MHZ_TO_MBPS( 768, 4) >,
+			< 1728000 MHZ_TO_MBPS(1017, 4) >;
+	};
+
+	devfreq_memlat_4: qcom,cpu4-memlat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&memlat_cpu4>;
+		qcom,cachemiss-ev = <0x24>;
+		qcom,core-dev-table =
+			<  787200 MHZ_TO_MBPS( 300, 4) >,
+			< 1113600 MHZ_TO_MBPS( 547, 4) >,
+			< 1344000 MHZ_TO_MBPS(1017, 4) >,
+			< 1900800 MHZ_TO_MBPS(1555, 4) >,
+			< 2438400 MHZ_TO_MBPS(1804, 4) >;
+	};
+
+	l3_cpu0: qcom,l3-cpu0 {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_CLUSTER0_VOTE_CLK>;
+		governor = "performance";
+	};
+
+	l3_cpu4: qcom,l3-cpu4 {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_cpucc L3_CLUSTER1_VOTE_CLK>;
+		governor = "performance";
+	};
+
+	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
+		qcom,target-dev = <&l3_cpu0>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			<  748800  566400000 >,
+			<  998400  787200000 >,
+			< 1209660  940800000 >,
+			< 1497600 1190400000 >,
+			< 1612800 1382400000 >,
+			< 1728000 1440000000 >;
+	};
+
+	devfreq_l3lat_4: qcom,cpu4-l3lat-mon {
+		compatible = "qcom,arm-memlat-mon";
+		qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
+		qcom,target-dev = <&l3_cpu4>;
+		qcom,cachemiss-ev = <0x17>;
+		qcom,core-dev-table =
+			< 1113600  566400000 >,
+			< 1344000  787200000 >,
+			< 1728000  940800000 >,
+			< 1900800 1190400000 >,
+			< 2438400 1440000000 >;
+	};
+
+	mincpubw: qcom,mincpubw {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <1 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			< MHZ_TO_MBPS( 100, 4) >, /* 381 MB/s */
+			< MHZ_TO_MBPS( 200, 4) >, /* 762 MB/s */
+			< MHZ_TO_MBPS( 300, 4) >, /* 1144 MB/s */
+			< MHZ_TO_MBPS( 451, 4) >, /* 1720 MB/s */
+			< MHZ_TO_MBPS( 547, 4) >, /* 2086 MB/s */
+			< MHZ_TO_MBPS( 681, 4) >, /* 2597 MB/s */
+			< MHZ_TO_MBPS( 768, 4) >, /* 2929 MB/s */
+			< MHZ_TO_MBPS(1017, 4) >, /* 3879 MB/s */
+			< MHZ_TO_MBPS(1353, 4) >, /* 5161 MB/s */
+			< MHZ_TO_MBPS(1555, 4) >, /* 5931 MB/s */
+			< MHZ_TO_MBPS(1804, 4) >; /* 6881 MB/s */
+	};
+
+	devfreq-cpufreq {
+		mincpubw-cpufreq {
+			target-dev = <&mincpubw>;
+			cpu-to-dev-map-0 =
+				<  748800 MHZ_TO_MBPS( 300, 4) >,
+				< 1209600 MHZ_TO_MBPS( 451, 4) >,
+				< 1612000 MHZ_TO_MBPS( 547, 4) >,
+				< 1728000 MHZ_TO_MBPS( 768, 4) >;
+			cpu-to-dev-map-4 =
+				< 1113600 MHZ_TO_MBPS( 300, 4) >,
+				< 1344000 MHZ_TO_MBPS( 547, 4) >,
+				< 1728000 MHZ_TO_MBPS( 768, 4) >,
+				< 1900800 MHZ_TO_MBPS(1017, 4) >,
+				< 2438400 MHZ_TO_MBPS(1804, 4) >;
+		};
+	};
+
+	gpu_gx_domain_addr: syscon@0x5091508 {
+		compatible = "syscon";
+		reg = <0x5091508 0x4>;
+	};
+
+	gpu_gx_sw_reset: syscon@0x5091008 {
+		compatible = "syscon";
+		reg = <0x5091008 0x4>;
+	};
 };
 
+#include "pm660.dtsi"
+#include "pm660l.dtsi"
+#include "sdm670-regulator.dtsi"
 #include "sdm670-pinctrl.dtsi"
 #include "msm-arm-smmu-sdm670.dtsi"
 #include "msm-gdsc-sdm845.dtsi"
@@ -2131,6 +2434,9 @@
 	clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
 	qcom,force-enable-root-clk;
 	parent-supply = <&pm660l_s2_level>;
+	domain-addr = <&gpu_gx_domain_addr>;
+	sw-reset = <&gpu_gx_sw_reset>;
+	qcom,reset-aon-logic;
 	status = "ok";
 };
 
@@ -2148,10 +2454,8 @@
 	status = "ok";
 };
 
-#include "pm660.dtsi"
-#include "pm660l.dtsi"
-#include "sdm670-regulator.dtsi"
 #include "sdm670-audio.dtsi"
 #include "sdm670-usb.dtsi"
 #include "sdm670-gpu.dtsi"
 #include "sdm670-thermal.dtsi"
+#include "sdm670-bus.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
index cb11a9e..9d485b5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi
@@ -47,6 +47,8 @@
 
 	qcom,msm-mbhc-hphl-swh = <1>;
 	qcom,msm-mbhc-gnd-swh = <1>;
+	qcom,msm-mbhc-hs-mic-max-threshold-mv = <1700>;
+	qcom,msm-mbhc-hs-mic-min-threshold-mv = <50>;
 	qcom,hph-en0-gpio = <&tavil_hph_en0>;
 	qcom,hph-en1-gpio = <&tavil_hph_en1>;
 	qcom,tavil-mclk-clk-freq = <9600000>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 646dbad..cf7ccae 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -903,7 +903,7 @@
 			qcom,bus-dev = <&fab_mem_noc>;
 			qcom,bcms = <&bcm_sh3>;
 			qcom,ap-owned;
-			qcom,prio = <6>;
+			qcom,prio = <7>;
 		};
 
 		mas_qhm_memnoc_cfg: mas-qhm-memnoc-cfg {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 9d799cb..9a1f055 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -111,7 +111,7 @@
 		rgltr-min-voltage = <2800000>;
 		rgltr-max-voltage = <2800000>;
 		rgltr-load-current = <0>;
-		status = "disabled";
+		status = "ok";
 	};
 
 	eeprom_rear: qcom,eeprom@0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index f18137c..2702ca1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -111,7 +111,7 @@
 		rgltr-min-voltage = <2800000>;
 		rgltr-max-voltage = <2800000>;
 		rgltr-load-current = <0>;
-		status = "disabled";
+		status = "ok";
 	};
 
 	eeprom_rear: qcom,eeprom@0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 8df879a..e4f768f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -325,18 +325,8 @@
 
 		msm_cam_smmu_secure {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1001 0x0>;
 			label = "cam-secure";
-			cam_secure_iova_mem_map: iova-mem-map {
-				/* Secure IO region is approximately 3.4 GB */
-				iova-mem-region-io {
-					iova-region-name = "io";
-					iova-region-start = <0x7400000>;
-					iova-region-len = <0xd8c00000>;
-					iova-region-id = <0x3>;
-					status = "ok";
-				};
-			};
+			qcom,secure-cb;
 		};
 
 		msm_cam_smmu_fd {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index fe19658..81ce1e5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -121,45 +121,6 @@
 	status = "ok";
 };
 
-&extcon_storage_cd {
-	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
-	debounce-ms = <200>;
-	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&storage_cd>;
-
-	status = "ok";
-};
-
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qmp-v3";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	extcon = <&extcon_storage_cd>;
-
-	status = "ok";
-};
-
 &sdhc_2 {
 	vdd-supply = <&pm8998_l21>;
 	qcom,vdd-voltage-level = <2950000 2960000>;
@@ -170,10 +131,10 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
-	extcon = <&extcon_storage_cd>;
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 99ea326..a61d96e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -12,6 +12,16 @@
 
 &soc {
 
+	csr: csr@6001000 {
+		compatible = "qcom,coresight-csr";
+		reg = <0x6001000 0x1000>;
+		reg-names = "csr-base";
+
+		coresight-name = "coresight-csr";
+
+		qcom,blk-size = <1>;
+	};
+
 	replicator_qdss: replicator@6046000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b909>;
@@ -271,6 +281,9 @@
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
 
+		interrupts = <GIC_SPI 270 IRQ_TYPE_EDGE_RISING>;
+		interrupt-names = "byte-cntr-irq";
+
 		port {
 			tmc_etr_in_replicator: endpoint {
 				slave-mode;
@@ -397,16 +410,6 @@
 		clock-names = "apb_pclk";
 	};
 
-	csr: csr@6001000 {
-		compatible = "qcom,coresight-csr";
-		reg = <0x6001000 0x1000>;
-		reg-names = "csr-base";
-
-		coresight-name = "coresight-csr";
-
-		qcom,blk-size = <1>;
-	};
-
 	funnel_in0: funnel@0x6041000 {
 		compatible = "arm,primecell";
 		arm,primecell-periphid = <0x0003b908>;
@@ -1553,7 +1556,7 @@
 		reg = <0x69e1000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-ddr0";
+		coresight-name = "coresight-cti-DDR_DL_0_CTI";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1565,7 +1568,7 @@
 		reg = <0x69e4000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-ddr1";
+		coresight-name = "coresight-cti-DDR_DL_1_CTI0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1577,7 +1580,7 @@
 		reg = <0x69e5000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti1-ddr1";
+		coresight-name = "coresight-cti-DDR_DL_1_CTI1";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1589,7 +1592,7 @@
 		reg = <0x6c09000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-dlmm";
+		coresight-name = "coresight-cti-DLMM_CTI0";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1601,7 +1604,43 @@
 		reg = <0x6c0a000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti1-dlmm";
+		coresight-name = "coresight-cti-DLMM_CTI1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti0_apss: cti@78e0000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x78e0000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-APSS_CTI0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti1_apss: cti@78f0000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x78f0000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-APSS_CTI1";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	cti2_apss: cti@7900000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b966>;
+		reg = <0x7900000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti-APSS_CTI2";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
@@ -1929,7 +1968,23 @@
 		reg = <0x6b04000 0x1000>;
 		reg-names = "cti-base";
 
-		coresight-name = "coresight-cti0-swao";
+		coresight-name = "coresight-cti-SWAO_CTI0";
+
+		clocks = <&clock_aop QDSS_CLK>;
+		clock-names = "apb_pclk";
+	};
+
+	 ipcb_tgu: tgu@6b0c000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b999>;
+		reg = <0x06B0C000 0x1000>;
+		reg-names = "tgu-base";
+		tgu-steps = <3>;
+		tgu-conditions = <4>;
+		tgu-regs = <4>;
+		tgu-timer-counters = <8>;
+
+		coresight-name = "coresight-tgu-ipcb";
 
 		clocks = <&clock_aop QDSS_CLK>;
 		clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
index e36a759e..73cd794 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-pm660.dtsi
@@ -84,17 +84,6 @@
 	/delete-property/ qcom,vddp-ref-clk-supply;
 };
 
-&ufsphy_card {
-	/delete-property/ vdda-phy-supply;
-	/delete-property/ vdda-pll-supply;
-};
-
-&ufshc_card {
-	/delete-property/ vcc-supply;
-	/delete-property/ vccq2-supply;
-	/delete-property/ qcom,vddp-ref-clk-supply;
-};
-
 &sdhc_2 {
 	/delete-property/ vdd-supply;
 	/delete-property/ vdd-io-supply;
@@ -216,11 +205,11 @@
 
 &clock_gpucc {
 	/delete-property/ vdd_cx-supply;
+	/delete-property/ vdd_mx-supply;
 };
 
 &clock_gfx {
 	/delete-property/ vdd_gfx-supply;
-	/delete-property/ vdd_mx-supply;
 };
 
 &pil_modem {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio-overlay.dtsi
new file mode 100644
index 0000000..54bd5201
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio-overlay.dtsi
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "sdm670-audio-overlay.dtsi"
+
+&pm660l_3 {
+	/delete-node/analog-codec;
+};
+
+&soc {
+	/delete-node/msm-sdw-codec@62ec1000;
+	/delete-node/cdc_pdm_pinctrl;
+	/delete-node/wsa_spkr_en1_pinctrl;
+	/delete-node/wsa_spkr_en2_pinctrl;
+	/delete-node/sdw_clk_data_pinctrl;
+};
+
+&qupv3_se8_spi {
+	status = "okay";
+};
+
+&wcd9xxx_intc {
+	status = "okay";
+	qcom,gpio-connect = <&tlmm 54 0>;
+};
+
+&wdsp_mgr {
+	status = "okay";
+};
+
+&wdsp_glink {
+	status = "okay";
+};
+
+&wcd934x_cdc {
+	status = "okay";
+};
+
+&clock_audio_lnbb {
+	status = "okay";
+};
+
+&wcd_rst_gpio {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi
index f861ca3..88892d7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-audio.dtsi
@@ -10,44 +10,17 @@
  * GNU General Public License for more details.
  */
 
-#include "sdm670-audio.dtsi"
 
 &msm_audio_ion {
 	iommus = <&apps_smmu 0x1821 0x0>;
 	qcom,smmu-sid-mask = /bits/ 64 <0xf>;
 };
 
-&qupv3_se8_spi {
-	status = "okay";
-};
-
-&pm660l_3 {
-	/delete-node/analog-codec;
-};
-
 &soc {
-	/delete-node/msm-sdw-codec@62ec1000;
 	/delete-node/sound;
-	/delete-node/cdc_pdm_pinctrl;
-	/delete-node/wsa_spkr_en1_pinctrl;
-	/delete-node/wsa_spkr_en2_pinctrl;
-	/delete-node/sdw_clk_data_pinctrl;
 };
 
-&msm_audio_ion {
-	iommus = <&apps_smmu 0x1821 0x0>;
-};
-
-&wcd9xxx_intc {
-	status = "okay";
-	qcom,gpio-connect = <&tlmm 54 0>;
-};
-
-&wdsp_mgr {
-	status = "okay";
-};
-
-&wdsp_glink {
+&tavil_snd {
 	status = "okay";
 };
 
@@ -58,24 +31,3 @@
 &dai_slim {
 	status = "okay";
 };
-
-&wcd934x_cdc {
-	status = "okay";
-};
-
-&clock_audio_lnbb {
-	status = "okay";
-};
-
-&wcd_rst_gpio {
-	status = "okay";
-};
-
-&wcd9xxx_intc {
-	status = "okay";
-};
-
-&tavil_snd {
-	status = "okay";
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts
index da59bcf..9d61324 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp-overlay.dts
@@ -21,7 +21,7 @@
 
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-interposer-sdm670-cdp.dtsi"
-
+#include "sdm845-interposer-sdm670-audio-overlay.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v1 Interposer SDM670 CDP";
 	compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts
index ebb5e8f..597773e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dts
@@ -16,6 +16,8 @@
 #include "sdm845-interposer-sdm670.dtsi"
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-interposer-sdm670-cdp.dtsi"
+#include "sdm670-audio.dtsi"
+#include "sdm845-interposer-sdm670-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM sdm845 v1 Interposer SDM670 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
index e435cdd..1265d2a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-cdp.dtsi
@@ -12,7 +12,6 @@
 
 #include "sdm845-cdp.dtsi"
 #include "sdm845-interposer-pm660.dtsi"
-#include "sdm845-interposer-sdm670-audio.dtsi"
 
 &soc {
 	/delete-node/ ssusb@a800000;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts
index 3ca15b9..1690174 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp-overlay.dts
@@ -21,7 +21,7 @@
 
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-interposer-sdm670-mtp.dtsi"
-
+#include "sdm845-interposer-sdm670-audio-overlay.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v1 Interposer SDM670 MTP";
 	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts
index 39664f1..52869da 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dts
@@ -16,6 +16,8 @@
 #include "sdm845-interposer-sdm670.dtsi"
 #include "sdm845-sde-display.dtsi"
 #include "sdm845-interposer-sdm670-mtp.dtsi"
+#include "sdm670-audio.dtsi"
+#include "sdm845-interposer-sdm670-audio.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. MSM sdm845 v1 Interposer SDM670 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
index 4b24b0d..9d722df 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-interposer-sdm670-mtp.dtsi
@@ -12,7 +12,6 @@
 
 #include "sdm845-mtp.dtsi"
 #include "sdm845-interposer-pm660.dtsi"
-#include "sdm845-interposer-sdm670-audio.dtsi"
 
 &qupv3_se10_i2c {
 	/delete-node/ qcom,smb1355@8;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 3756197..f0d16ec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -209,45 +209,6 @@
 	status = "ok";
 };
 
-&extcon_storage_cd {
-	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
-	debounce-ms = <200>;
-	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&storage_cd>;
-
-	status = "ok";
-};
-
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qmp-v3";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	extcon = <&extcon_storage_cd>;
-
-	status = "ok";
-};
-
 &sdhc_2 {
 	vdd-supply = <&pm8998_l21>;
 	qcom,vdd-voltage-level = <2950000 2960000>;
@@ -258,10 +219,10 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
-	extcon = <&extcon_storage_cd>;
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi
index 000d65e7..2ac313d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pmic-overlay.dtsi
@@ -32,6 +32,9 @@
 	smb2_vconn: qcom,smb2-vconn {
 		regulator-name = "smb2-vconn";
 	};
+	smb2_vbus: qcom,smb2-vbus {
+		regulator-name = "smb2-vbus";
+	};
 };
 
 &usb0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 02f30fd..6dae069 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -126,45 +126,6 @@
 	status = "ok";
 };
 
-&extcon_storage_cd {
-	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
-	debounce-ms = <200>;
-	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
-
-	pinctrl-names = "default";
-	pinctrl-0 = <&storage_cd>;
-
-	status = "ok";
-};
-
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qmp-v3";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	extcon = <&extcon_storage_cd>;
-
-	status = "ok";
-};
-
 &sdhc_2 {
 	vdd-supply = <&pm8998_l21>;
 	qcom,vdd-voltage-level = <2950000 2960000>;
@@ -175,10 +136,10 @@
 	qcom,vdd-io-current-level = <200 22000>;
 
 	pinctrl-names = "active", "sleep";
-	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
-	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &storage_cd>;
 
-	extcon = <&extcon_storage_cd>;
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>;
 
 	status = "ok";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 1fa6e26..a805e2e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -221,6 +221,9 @@
 		interrupts = <GIC_SPI 601 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 0 1 64 0>,
+			<&gpi_dma0 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -240,6 +243,9 @@
 		interrupts = <GIC_SPI 602 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 1 1 64 0>,
+			<&gpi_dma0 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -259,6 +265,9 @@
 		interrupts = <GIC_SPI 603 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 2 1 64 0>,
+			<&gpi_dma0 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -278,6 +287,9 @@
 		interrupts = <GIC_SPI 604 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 3 1 64 0>,
+			<&gpi_dma0 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -297,6 +309,9 @@
 		interrupts = <GIC_SPI 605 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 4 1 64 0>,
+			<&gpi_dma0 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -316,6 +331,9 @@
 		interrupts = <GIC_SPI 606 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 5 1 64 0>,
+			<&gpi_dma0 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -335,6 +353,9 @@
 		interrupts = <GIC_SPI 607 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 6 1 64 0>,
+			<&gpi_dma0 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -354,6 +375,9 @@
 		interrupts = <GIC_SPI 608 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_0>;
+		dmas = <&gpi_dma0 0 7 1 64 0>,
+			<&gpi_dma0 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -561,6 +585,9 @@
 		interrupts = <GIC_SPI 353 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 0 1 64 0>,
+			<&gpi_dma1 1 0 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -580,6 +607,9 @@
 		interrupts = <GIC_SPI 354 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 1 1 64 0>,
+			<&gpi_dma1 1 1 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -599,6 +629,9 @@
 		interrupts = <GIC_SPI 355 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 2 1 64 0>,
+			<&gpi_dma1 1 2 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -618,6 +651,9 @@
 		interrupts = <GIC_SPI 356 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 3 1 64 0>,
+			<&gpi_dma1 1 3 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -637,6 +673,9 @@
 		interrupts = <GIC_SPI 357 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 4 1 64 0>,
+			<&gpi_dma1 1 4 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -656,6 +695,9 @@
 		interrupts = <GIC_SPI 358 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 5 1 64 0>,
+			<&gpi_dma1 1 5 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -675,6 +717,9 @@
 		interrupts = <GIC_SPI 359 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 6 1 64 0>,
+			<&gpi_dma1 1 6 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 
@@ -694,6 +739,9 @@
 		interrupts = <GIC_SPI 360 0>;
 		spi-max-frequency = <50000000>;
 		qcom,wrapper-core = <&qupv3_1>;
+		dmas = <&gpi_dma1 0 7 1 64 0>,
+			<&gpi_dma1 1 7 1 64 0>;
+		dma-names = "tx", "rx";
 		status = "disabled";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
index 0da0e67..d89722f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qvr.dtsi
@@ -10,6 +10,71 @@
  * GNU General Public License for more details.
  */
 
+#include "sdm845-pmic-overlay.dtsi"
+#include "sdm845-pinctrl-overlay.dtsi"
+#include "smb1355.dtsi"
+
+&pmi8998_pdphy {
+	vbus-supply = <&smb2_vbus>;
+};
+
+&pmi8998_fg {
+	qcom,fg-bmd-en-delay-ms = <300>;
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
+
+&smb1355_charger_0 {
+	status = "ok";
+};
+
+&smb1355_charger_1 {
+	status = "ok";
+};
+
+&soc {
+	qcom,qbt1000 {
+		status = "disabled";
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		label = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&key_vol_up_default
+			     &key_home_default>;
+
+		vol_up {
+			label = "volume_up";
+			gpios = <&pm8998_gpios 6 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <115>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+
+		home {
+			label = "home";
+			gpios = <&pm8998_gpios 5 GPIO_ACTIVE_LOW>;
+			linux,input-type = <1>;
+			linux,code = <102>;
+			gpio-key,wakeup;
+			debounce-interval = <15>;
+			linux,can-disable;
+		};
+	};
+};
+
+&pmi8998_haptics {
+	qcom,vmax-mv = <1800>;
+	qcom,wave-play-rate-us = <4255>;
+	qcom,lra-auto-mode;
+	status = "okay";
+};
+
 &ufsphy_mem {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -34,3 +99,21 @@
 
 	status = "ok";
 };
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &storage_cd>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &storage_cd>;
+
+	cd-gpios = <&tlmm 126 GPIO_ACTIVE_HIGH>;
+
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 99004bf..d607f75 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -140,38 +140,6 @@
 	qcom,cpr-ignore-invalid-fuses;
 };
 
-&ufsphy_card {
-	compatible = "qcom,ufs-phy-qrbtc-sdm845";
-
-	vdda-phy-supply = <&pm8998_l1>; /* 0.88v */
-	vdda-pll-supply = <&pm8998_l26>; /* 1.2v */
-	vdda-phy-max-microamp = <62900>;
-	vdda-pll-max-microamp = <18300>;
-
-	status = "ok";
-};
-
-&ufshc_card {
-	limit-tx-hs-gear = <1>;
-	limit-rx-hs-gear = <1>;
-
-	vdd-hba-supply = <&ufs_card_gdsc>;
-	vdd-hba-fixed-regulator;
-	vcc-supply = <&pm8998_l21>;
-	vcc-voltage-level = <2950000 2960000>;
-	vccq2-supply = <&pm8998_s4>;
-	vcc-max-microamp = <300000>;
-	vccq2-max-microamp = <300000>;
-
-	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
-	qcom,vddp-ref-clk-max-microamp = <100>;
-
-	qcom,disable-lpm;
-	rpm-level = <0>;
-	spm-level = <0>;
-	status = "ok";
-};
-
 &pmi8998_charger {
 	qcom,suspend-input;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 03b40b3..4337da7 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -414,109 +414,12 @@
 			compatible = "qcom,msm-ext-disp-audio-codec-rx";
 		};
 	};
-
-	sde_dp: qcom,dp_display@0{
-		cell-index = <0>;
-		compatible = "qcom,dp-display";
-
-		gdsc-supply = <&mdss_core_gdsc>;
-		vdda-1p2-supply = <&pm8998_l26>;
-		vdda-0p9-supply = <&pm8998_l1>;
-
-		reg =	<0xae90000 0xa84>,
-			<0x88eaa00 0x200>,
-			<0x88ea200 0x200>,
-			<0x88ea600 0x200>,
-			<0xaf02000 0x1a0>,
-			<0x780000 0x621c>,
-			<0x88ea030 0x10>,
-			<0x88e8000 0x20>,
-			<0x0aee1000 0x034>;
-		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
-			"dp_mmss_cc", "qfprom_physical", "dp_pll",
-			"usb3_dp_com", "hdcp_physical";
-
-		interrupt-parent = <&mdss_mdp>;
-		interrupts = <12 0>;
-
-		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
-			 <&clock_rpmh RPMH_CXO_CLK>,
-			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
-			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
-			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
-			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
-			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
-			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
-			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
-			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
-			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
-		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
-			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
-			"core_usb_pipe_clk", "ctrl_link_clk",
-			"ctrl_link_iface_clk", "ctrl_pixel_clk",
-			"crypto_clk", "pixel_clk_rcg", "pixel_parent";
-
-		qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
-		qcom,ext-disp = <&ext_disp>;
-
-		qcom,aux-cfg0-settings = [20 00];
-		qcom,aux-cfg1-settings = [24 13 23 1d];
-		qcom,aux-cfg2-settings = [28 24];
-		qcom,aux-cfg3-settings = [2c 00];
-		qcom,aux-cfg4-settings = [30 0a];
-		qcom,aux-cfg5-settings = [34 26];
-		qcom,aux-cfg6-settings = [38 0a];
-		qcom,aux-cfg7-settings = [3c 03];
-		qcom,aux-cfg8-settings = [40 bb];
-		qcom,aux-cfg9-settings = [44 03];
-
-		qcom,max-pclk-frequency-khz = <576000>;
-
-		qcom,core-supply-entries {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			qcom,core-supply-entry@0 {
-				reg = <0>;
-				qcom,supply-name = "gdsc";
-				qcom,supply-min-voltage = <0>;
-				qcom,supply-max-voltage = <0>;
-				qcom,supply-enable-load = <0>;
-				qcom,supply-disable-load = <0>;
-			};
-		};
-
-		qcom,ctrl-supply-entries {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			qcom,ctrl-supply-entry@0 {
-				reg = <0>;
-				qcom,supply-name = "vdda-1p2";
-				qcom,supply-min-voltage = <1200000>;
-				qcom,supply-max-voltage = <1200000>;
-				qcom,supply-enable-load = <21800>;
-				qcom,supply-disable-load = <4>;
-			};
-		};
-
-		qcom,phy-supply-entries {
-			#address-cells = <1>;
-			#size-cells = <0>;
-
-			qcom,phy-supply-entry@0 {
-				reg = <0>;
-				qcom,supply-name = "vdda-0p9";
-				qcom,supply-min-voltage = <880000>;
-				qcom,supply-max-voltage = <880000>;
-				qcom,supply-enable-load = <36000>;
-				qcom,supply-disable-load = <32>;
-			};
-		};
-	};
 };
 
 &sde_dp {
+	qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
+	qcom,ext-disp = <&ext_disp>;
+
 	pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
 	pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
 	pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
@@ -532,6 +435,11 @@
 &dsi_dual_nt35597_truly_video {
 	qcom,mdss-dsi-t-clk-post = <0x0D>;
 	qcom,mdss-dsi-t-clk-pre = <0x2D>;
+	qcom,mdss-dsi-min-refresh-rate = <53>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
@@ -575,6 +483,11 @@
 &dsi_nt35597_truly_dsc_video {
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
+	qcom,mdss-dsi-min-refresh-rate = <53>;
+	qcom,mdss-dsi-max-refresh-rate = <60>;
+	qcom,mdss-dsi-pan-enable-dynamic-fps;
+	qcom,mdss-dsi-pan-fps-update =
+		"dfps_immediate_porch_mode_vfp";
 	qcom,mdss-dsi-display-timings {
 		timing@0{
 			qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index 6051106..0494495 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -9,6 +9,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 &soc {
 	mdss_mdp: qcom,mdss_mdp@ae00000 {
@@ -45,6 +46,8 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 
+		#power-domain-cells = <0>;
+
 		/* hw blocks */
 		qcom,sde-off = <0x1000>;
 		qcom,sde-len = <0x45C>;
@@ -63,6 +66,11 @@
 		qcom,sde-dspp-off = <0x55000 0x57000 0x59000 0x5b000>;
 		qcom,sde-dspp-size = <0x17e0>;
 
+		qcom,sde-dest-scaler-top-off = <0x00061000>;
+		qcom,sde-dest-scaler-top-size = <0xc>;
+		qcom,sde-dest-scaler-off = <0x800 0x1000>;
+		qcom,sde-dest-scaler-size = <0x800>;
+
 		qcom,sde-wb-off = <0x66000>;
 		qcom,sde-wb-size = <0x2c8>;
 		qcom,sde-wb-xin-id = <6>;
@@ -128,8 +136,11 @@
 		qcom,sde-has-src-split;
 		qcom,sde-has-dim-layer;
 		qcom,sde-has-idle-pc;
-		qcom,sde-max-bw-low-kbps = <9600000>;
-		qcom,sde-max-bw-high-kbps = <9600000>;
+		qcom,sde-has-dest-scaler;
+		qcom,sde-max-dest-scaler-input-linewidth = <2048>;
+		qcom,sde-max-dest-scaler-output-linewidth = <2560>;
+		qcom,sde-max-bw-low-kbps = <6800000>;
+		qcom,sde-max-bw-high-kbps = <6800000>;
 		qcom,sde-min-core-ib-kbps = <2400000>;
 		qcom,sde-min-llcc-ib-kbps = <800000>;
 		qcom,sde-min-dram-ib-kbps = <800000>;
@@ -356,6 +367,8 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
+		power-domains = <&mdss_mdp>;
+
 		/* Offline rotator QoS setting */
 		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
 		qcom,mdss-rot-vbif-memtype = <3 3>;
@@ -535,4 +548,86 @@
 		};
 	};
 
+	sde_dp: qcom,dp_display@0{
+		cell-index = <0>;
+		compatible = "qcom,dp-display";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+
+		reg =	<0xae90000 0xa84>,
+			<0x88eaa00 0x200>,
+			<0x88ea200 0x200>,
+			<0x88ea600 0x200>,
+			<0xaf02000 0x1a0>,
+			<0x780000 0x621c>,
+			<0x88ea030 0x10>,
+			<0x88e8000 0x20>,
+			<0x0aee1000 0x034>;
+		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+			"dp_mmss_cc", "qfprom_physical", "dp_pll",
+			"usb3_dp_com", "hdcp_physical";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <12 0>;
+
+		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+			"core_usb_pipe_clk", "ctrl_link_clk",
+			"ctrl_link_iface_clk", "ctrl_pixel_clk",
+			"crypto_clk", "pixel_clk_rcg", "pixel_parent";
+
+		qcom,aux-cfg0-settings = [20 00];
+		qcom,aux-cfg1-settings = [24 13 23 1d];
+		qcom,aux-cfg2-settings = [28 24];
+		qcom,aux-cfg3-settings = [2c 00];
+		qcom,aux-cfg4-settings = [30 0a];
+		qcom,aux-cfg5-settings = [34 26];
+		qcom,aux-cfg6-settings = [38 0a];
+		qcom,aux-cfg7-settings = [3c 03];
+		qcom,aux-cfg8-settings = [40 bb];
+		qcom,aux-cfg9-settings = [44 03];
+
+		qcom,max-pclk-frequency-khz = <675000>;
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 16d0988..09f4efa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -78,6 +78,7 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			usb-core-id = <0>;
 		};
 
 		qcom,usbbam@a704000 {
@@ -116,8 +117,9 @@
 	qusb_phy0: qusb@88e2000 {
 		compatible = "qcom,qusb2phy-v2";
 		reg = <0x088e2000 0x400>,
-			<0x007801e8 0x4>;
-		reg-names = "qusb_phy_base", "efuse_addr";
+			<0x007801e8 0x4>,
+			<0x088e0000 0x2000>;
+		reg-names = "qusb_phy_base", "efuse_addr", "eud_base";
 
 		qcom,efuse-bit-pos = <25>;
 		qcom,efuse-num-bits = <3>;
@@ -385,6 +387,7 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			usb-core-id = <1>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
index c42a7be..c070ed6 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-camera.dtsi
@@ -243,18 +243,8 @@
 
 		msm_cam_smmu_secure {
 			compatible = "qcom,msm-cam-smmu-cb";
-			iommus = <&apps_smmu 0x1001 0x0>;
 			label = "cam-secure";
-			cam_secure_iova_mem_map: iova-mem-map {
-				/* Secure IO region is approximately 3.4 GB */
-				iova-mem-region-io {
-					iova-region-name = "io";
-					iova-region-start = <0x7400000>;
-					iova-region-len = <0xd8c00000>;
-					iova-region-id = <0x3>;
-					status = "ok";
-				};
-			};
+			qcom,secure-cb;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
index 0b55736..138b750 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -57,7 +57,7 @@
 		qcom,cpr-saw-use-unit-mV;
 
 		qcom,saw-avs-ctrl = <0x101C031>;
-		qcom,saw-avs-limit = <0x3B803B8>;
+		qcom,saw-avs-limit = <0x3E803E8>;
 
 		qcom,cpr-enable;
 		qcom,cpr-hw-closed-loop;
@@ -85,9 +85,9 @@
 				regulator-max-microvolt = <18>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <18 18>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <18 18 18>;
 				qcom,cpr-corners = <18>;
 
 				qcom,cpr-corner-fmax-map = <6 12 15 18>;
@@ -133,10 +133,62 @@
 					 1950 2632>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<     0      0  12000  12000>;
+					/* Speed bin 0 */
+					<      0        0    12000    12000>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					/* Speed bin 1 */
+					<      0        0    12000    12000>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					/* Speed bin 2 */
+					<      0        0    12000    12000>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>,
+					<(-15000) (-15000)  (-3000)  (-3000)>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<     0      0  12000  10000>;
+					/* Speed bin 0 */
+					<      0        0    12000    10000>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					/* Speed bin 1 */
+					<      0        0    12000    10000>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					/* Speed bin 2 */
+					<      0        0    12000    10000>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>,
+					<(-15000) (-15000)  (-3000)  (-5000)>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
@@ -149,6 +201,8 @@
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -168,19 +222,23 @@
 				regulator-max-microvolt = <15>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <14 15>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <14 15 15>;
 				qcom,cpr-corners =
 					/* Speed bin 0 */
 					<14 14 14 14 14 14 14 14>,
 					/* Speed bin 1 */
+					<15 15 15 15 15 15 15 15>,
+					/* Speed bin 2 */
 					<15 15 15 15 15 15 15 15>;
 
 				qcom,cpr-corner-fmax-map =
 					/* Speed bin 0 */
 					<4 8 11 14>,
 					/* Speed bin 1 */
+					<4 8 11 15>,
+					/* Speed bin 2 */
 					<4 8 11 15>;
 
 				qcom,cpr-voltage-ceiling =
@@ -192,6 +250,11 @@
 					<828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  932000  932000 1000000
+					1000000>,
+					/* Speed bin 2 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  932000  932000 1000000
 					1000000>;
 
 				qcom,cpr-voltage-floor =
@@ -203,6 +266,11 @@
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
 					 568000  568000  568000  568000
+					 568000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000
 					 568000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
@@ -213,6 +281,10 @@
 					/* Speed bin 1 */
 					<32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
+					 32000  32000  32000  40000  40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
 					 32000  32000  32000  40000  40000>;
 
 				qcom,corner-frequencies =
@@ -227,6 +299,12 @@
 					 576000000  652800000  748800000
 					 844800000  940800000 1036800000
 					1132800000 1209600000 1305600000
+					1401600000 1497600000 1593600000>,
+					/* Speed bin 2 */
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 844800000  940800000 1036800000
+					1132800000 1209600000 1305600000
 					1401600000 1497600000 1593600000>;
 
 				qcom,cpr-ro-scaling-factor =
@@ -244,22 +322,76 @@
 					 2501 2095>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					<  8000  16000  16000  12000>;
+					/* Speed bin 0 */
+					<   8000    16000    16000    12000>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					/* Speed bin 1 */
+					<   8000    16000    16000    12000>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					/* Speed bin 2 */
+					<   8000    16000    16000    12000>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>,
+					< (-7000)    1000     1000   (-3000)>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					<  6000  14000  16000  12000>;
+					/* Speed bin 0 */
+					<   6000    14000    16000    12000>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					/* Speed bin 1 */
+					<   6000    14000    16000    12000>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					/* Speed bin 2 */
+					<   6000    14000    16000    12000>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>,
+					< (-9000)  (-1000)    1000   (-3000)>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <14 15>;
+				qcom,cpr-aging-ref-corner = <14 15 15>;
 				qcom,cpr-aging-ro-scaling-factor = <1620>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -302,7 +434,7 @@
 		qcom,cpr-saw-use-unit-mV;
 
 		qcom,saw-avs-ctrl = <0x101C031>;
-		qcom,saw-avs-limit = <0x4700470>;
+		qcom,saw-avs-limit = <0x4880488>;
 
 		qcom,cpr-enable;
 		qcom,cpr-hw-closed-loop;
@@ -312,7 +444,7 @@
 		qcom,cpr-panic-reg-name-list =
 			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
 
-		qcom,cpr-aging-ref-voltage = <1136000>;
+		qcom,cpr-aging-ref-voltage = <1160000>;
 		vdd-supply = <&pm8998_s12>;
 
 		thread@0 {
@@ -325,23 +457,27 @@
 			apc1_perfcl_vreg: regulator {
 				regulator-name = "apc1_perfcl_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <33>;
+				regulator-max-microvolt = <35>;
 
 				qcom,cpr-fuse-corners = <5>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <28 31>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <28 31 33>;
 				qcom,cpr-corners =
 					/* Speed bin 0 */
 					<28 28 28 28 28 28 28 28>,
 					/* Speed bin 1 */
-					<31 31 31 31 31 31 31 31>;
+					<31 31 31 31 31 31 31 31>,
+					/* Speed bin 2 */
+					<33 33 33 33 33 33 33 33>;
 
 				qcom,cpr-corner-fmax-map =
 					/* Speed bin 0 */
 					<7 14 22 27 28>,
 					/* Speed bin 1 */
-					<7 14 22 27 31>;
+					<7 14 22 27 31>,
+					/* Speed bin 2 */
+					<7 14 22 30 33>;
 
 				qcom,cpr-voltage-ceiling =
 					/* Speed bin 0 */
@@ -350,15 +486,23 @@
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  932000  932000
 					 932000  932000 1104000 1104000 1104000
-					1104000 1136000 1136000>,
+					1104000 1160000 1160000>,
 					/* Speed bin 1 */
 					<828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  932000  932000
 					 932000  932000 1104000 1104000 1104000
-					1104000 1136000 1136000 1136000 1136000
-					1136000>;
+					1104000 1160000 1160000 1160000 1160000
+					1160000>,
+					/* Speed bin 2 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  932000  932000
+					 932000  932000 1104000 1104000 1104000
+					1104000 1160000 1160000 1160000 1160000
+					1160000 1160000 1160000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -375,7 +519,15 @@
 					 568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  568000
-					 568000>;
+					 568000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 568000  568000  568000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					/* Speed bin 0 */
@@ -392,7 +544,15 @@
 					 32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  40000  40000  40000
-					 40000>;
+					 40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000  40000>;
 
 				qcom,corner-frequencies =
 					/* Speed bin 0 */
@@ -417,7 +577,19 @@
 					1996800000 2092800000 2169600000
 					2246400000 2323200000 2400000000
 					2476800000 2553600000 2649600000
-					2707200000>;
+					2707200000>,
+					/* Speed bin 2 */
+					<300000000  403200000  480000000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1459200000
+					1536000000 1612800000 1689600000
+					1766400000 1843200000 1920000000
+					1996800000 2092800000 2169600000
+					2246400000 2323200000 2400000000
+					2476800000 2553600000 2649600000
+					2707200000 2726400000 2745600000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -437,28 +609,76 @@
 					 2003 1675>;
 
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
-					/* Speed bin 0 */
-					<  8000   8000   8000      0      0>,
-					/* Speed bin 1 */
-					<  8000   8000   8000      0  16000>;
+				 /* Speed bin 0 */
+				 <   8000     8000     8000        0        0>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 < (-7000)  (-7000)  (-7000) (-15000) (-15000)>,
+				 /* Speed bin 1 */
+				 <   8000     8000     8000        0    16000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 /* Speed bin 2 */
+				 <   8000     8000     8000        0    16000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>,
+				 < (-7000)  (-7000)  (-7000) (-15000)    1000>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
-					/* Speed bin 0 */
-					<  6000   6000   8000      0      0>,
-					/* Speed bin 1 */
-					<  6000   6000   8000      0  16000>;
+				 /* Speed bin 0 */
+				 <   6000     6000     8000        0        0>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 < (-9000)  (-9000)  (-7000) (-15000) (-15000)>,
+				 /* Speed bin 1 */
+				 <   6000     6000     8000        0    16000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 /* Speed bin 2 */
+				 <   6000     6000     8000        0    16000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>,
+				 < (-9000)  (-9000)  (-7000) (-15000)    1000>;
 
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <27 31>;
+				qcom,cpr-aging-ref-corner = <27 31 33>;
 				qcom,cpr-aging-ro-scaling-factor = <1700>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -490,7 +710,7 @@
 /* VDD_APC1 */
 &pm8998_s12 {
 	regulator-min-microvolt = <568000>;
-	regulator-max-microvolt = <1136000>;
+	regulator-max-microvolt = <1160000>;
 };
 
 &clock_cpucc {
@@ -533,6 +753,23 @@
 		<  1497600000 0x403c0d4e 0x00003e3e 0x2 14 >,
 		<  1593600000 0x403c0e53 0x00004242 0x2 15 >;
 
+	qcom,l3-speedbin2-v0 =
+		<   300000000 0x000c000f 0x00002020 0x1 1 >,
+		<   403200000 0x500c0115 0x00002020 0x1 2 >,
+		<   480000000 0x50140219 0x00002020 0x1 3 >,
+		<   576000000 0x5014031e 0x00002020 0x1 4 >,
+		<   652800000 0x401c0422 0x00002020 0x1 5 >,
+		<   748800000 0x401c0527 0x00002020 0x1 6 >,
+		<   844800000 0x4024062c 0x00002323 0x2 7 >,
+		<   940800000 0x40240731 0x00002727 0x2 8 >,
+		<  1036800000 0x40240836 0x00002b2b 0x2 9 >,
+		<  1132800000 0x402c093b 0x00002f2f 0x2 10 >,
+		<  1209600000 0x402c0a3f 0x00003232 0x2 11 >,
+		<  1305600000 0x40340b44 0x00003636 0x2 12 >,
+		<  1401600000 0x40340c49 0x00003a3a 0x2 13 >,
+		<  1497600000 0x403c0d4e 0x00003e3e 0x2 14 >,
+		<  1593600000 0x403c0e53 0x00004242 0x2 15 >;
+
 	qcom,pwrcl-speedbin0-v0 =
 		<   300000000 0x000c000f 0x00002020 0x1 1 >,
 		<   403200000 0x500c0115 0x00002020 0x1 2 >,
@@ -573,6 +810,26 @@
 		<  1689600000 0x40441058 0x00004646 0x2 17 >,
 		<  1766400000 0x4044115c 0x00004a4a 0x2 18 >;
 
+	qcom,pwrcl-speedbin2-v0 =
+		<   300000000 0x000c000f 0x00002020 0x1 1 >,
+		<   403200000 0x500c0115 0x00002020 0x1 2 >,
+		<   480000000 0x50140219 0x00002020 0x1 3 >,
+		<   576000000 0x5014031e 0x00002020 0x1 4 >,
+		<   652800000 0x401c0422 0x00002020 0x1 5 >,
+		<   748800000 0x401c0527 0x00002020 0x1 6 >,
+		<   825600000 0x401c062b 0x00002222 0x1 7 >,
+		<   902400000 0x4024072f 0x00002626 0x1 8 >,
+		<   979200000 0x40240833 0x00002929 0x1 9 >,
+		<  1056000000 0x402c0937 0x00002c2c 0x2 10 >,
+		<  1132800000 0x402c0a3b 0x00002f2f 0x2 11 >,
+		<  1228800000 0x402c0b40 0x00003333 0x2 12 >,
+		<  1324800000 0x40340c45 0x00003737 0x2 13 >,
+		<  1420800000 0x40340d4a 0x00003b3b 0x2 14 >,
+		<  1516800000 0x403c0e4f 0x00003f3f 0x2 15 >,
+		<  1612800000 0x403c0f54 0x00004343 0x2 16 >,
+		<  1689600000 0x40441058 0x00004646 0x2 17 >,
+		<  1766400000 0x4044115c 0x00004a4a 0x2 18 >;
+
 	qcom,perfcl-speedbin0-v0 =
 		<   300000000 0x000c000f 0x00002020 0x1 1 >,
 		<   403200000 0x500c0115 0x00002020 0x1 2 >,
@@ -635,14 +892,53 @@
 		<  2649600000 0x40541d8a 0x00006e6e 0x2 30 >,
 		<  2745600000 0x40511e8f 0x00007272 0x2 31 >;
 
+	qcom,perfcl-speedbin2-v0 =
+		<   300000000 0x000c000f 0x00002020 0x1 1 >,
+		<   403200000 0x500c0115 0x00002020 0x1 2 >,
+		<   480000000 0x50140219 0x00002020 0x1 3 >,
+		<   576000000 0x5014031e 0x00002020 0x1 4 >,
+		<   652800000 0x401c0422 0x00002020 0x1 5 >,
+		<   748800000 0x401c0527 0x00002020 0x1 6 >,
+		<   825600000 0x401c062b 0x00002222 0x1 7 >,
+		<   902400000 0x4024072f 0x00002626 0x1 8 >,
+		<   979200000 0x40240833 0x00002929 0x1 9 >,
+		<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+		<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+		<  1209600000 0x402c0b3f 0x00003232 0x2 12 >,
+		<  1286400000 0x40340c43 0x00003636 0x2 13 >,
+		<  1363200000 0x40340d47 0x00003939 0x2 14 >,
+		<  1459200000 0x403c0e4c 0x00003d3d 0x2 15 >,
+		<  1536000000 0x403c0f50 0x00004040 0x2 16 >,
+		<  1612800000 0x403c1054 0x00004343 0x2 17 >,
+		<  1689600000 0x40441158 0x00004646 0x2 18 >,
+		<  1766400000 0x4044125c 0x00004a4a 0x2 19 >,
+		<  1843200000 0x40441360 0x00004d4d 0x2 20 >,
+		<  1920000000 0x404c1464 0x00005050 0x2 21 >,
+		<  1996800000 0x404c1568 0x00005353 0x2 22 >,
+		<  2092800000 0x4054166d 0x00005757 0x2 23 >,
+		<  2169600000 0x40541771 0x00005a5a 0x2 24 >,
+		<  2246400000 0x40541875 0x00005e5e 0x2 25 >,
+		<  2323200000 0x40541979 0x00006161 0x2 26 >,
+		<  2400000000 0x40541a7d 0x00006464 0x2 27 >,
+		<  2476800000 0x40541b81 0x00006767 0x2 28 >,
+		<  2553600000 0x40541c85 0x00006a6a 0x2 29 >,
+		<  2649600000 0x40541d8a 0x00006e6e 0x2 30 >,
+		<  2707200000 0x40511e8d 0x00007171 0x2 30 >,
+		<  2764800000 0x40511f90 0x00007373 0x2 31 >,
+		<  2784000000 0x40512091 0x00007474 0x2 32 >,
+		<  2803200000 0x40512192 0x00007575 0x2 33 >;
+
 	qcom,l3-memacc-level-vc-bin0 = <8 13>;
 	qcom,l3-memacc-level-vc-bin1 = <8 13>;
+	qcom,l3-memacc-level-vc-bin2 = <8 13>;
 
 	qcom,pwrcl-memacc-level-vc-bin0 = <12 16>;
 	qcom,pwrcl-memacc-level-vc-bin1 = <12 16>;
+	qcom,pwrcl-memacc-level-vc-bin2 = <12 16>;
 
 	qcom,perfcl-memacc-level-vc-bin0 = <14 22>;
 	qcom,perfcl-memacc-level-vc-bin1 = <14 22>;
+	qcom,perfcl-memacc-level-vc-bin2 = <14 22>;
 };
 
 &pcie1 {
@@ -911,6 +1207,54 @@
 	qcom,sde-min-core-ib-kbps = <4800000>;
 };
 
+&mdss_dsi0 {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
+&mdss_dsi1 {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
+&sde_dp {
+	qcom,core-supply-entries {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,core-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "refgen";
+			qcom,supply-min-voltage = <0>;
+			qcom,supply-max-voltage = <0>;
+			qcom,supply-enable-load = <0>;
+			qcom,supply-disable-load = <0>;
+		};
+	};
+};
+
 &energy_costs {
 	CPU_COST_0: core-cost0 {
 		busy-cost-data = <
@@ -1139,3 +1483,26 @@
 		};
 	};
 };
+
+&qusb_phy0 {
+		qcom,qusb-phy-init-seq =
+			/* <value reg_offset> */
+			   <0x23 0x210 /* PWR_CTRL1 */
+			    0x03 0x04  /* PLL_ANALOG_CONTROLS_TWO */
+			    0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+			    0x80 0x2c  /* PLL_CMODE */
+			    0x0a 0x184 /* PLL_LOCK_DELAY */
+			    0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+			    0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+			    0x20 0x198 /* PLL_BIAS_CONTROL_2 */
+			    0x21 0x214 /* PWR_CTRL2 */
+			    0x00 0x220 /* IMP_CTRL1 */
+			    0x58 0x224 /* IMP_CTRL2 */
+			    0x45 0x240 /* TUNE1 */
+			    0x29 0x244 /* TUNE2 */
+			    0xca 0x248 /* TUNE3 */
+			    0x04 0x24c /* TUNE4 */
+			    0x03 0x250 /* TUNE5 */
+			    0x00 0x23c /* CHG_CTRL2 */
+			    0x22 0x210>; /* PWR_CTRL1 */
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index b4c1fb3..077770d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -36,7 +36,6 @@
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
-		ufshc2 = &ufshc_card; /* Removable UFS slot */
 		pci-domain0 = &pcie0;
 		pci-domain1 = &pcie1;
 		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
@@ -847,7 +846,7 @@
 
 	llccbw: qcom,llccbw {
 		compatible = "qcom,devbw";
-		governor = "powersave";
+		governor = "performance";
 		qcom,src-dst-ports =
 			<MSM_BUS_MASTER_LLCC MSM_BUS_SLAVE_EBI_CH0>;
 		qcom,active-only;
@@ -1094,6 +1093,7 @@
 		reg = <0x5090000 0x9000>;
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
 		qcom,gpu_cc_gmu_clk_src-opp-handle = <&gmu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
@@ -1104,7 +1104,6 @@
 		reg = <0x5090000 0x9000>;
 		reg-names = "cc_base";
 		vdd_gfx-supply = <&pm8005_s1_level>;
-		vdd_mx-supply = <&pm8998_s6_level>;
 		qcom,gpu_cc_gx_gfx3d_clk_src-opp-handle = <&msm_gpu>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
@@ -1546,102 +1545,6 @@
 		status = "disabled";
 	};
 
-	extcon_storage_cd: extcon_storage_cd {
-		compatible = "extcon-gpio";
-		extcon-id = <62>; /* EXTCON_MECHANICAL */
-		status = "disabled";
-	};
-
-	ufsphy_card: ufsphy_card@1da7000 {
-		reg = <0x1da7000 0xda8>; /* PHY regs */
-		reg-names = "phy_mem";
-		#phy-cells = <0>;
-
-		lanes-per-direction = <1>;
-
-		clock-names = "ref_clk_src",
-			"ref_clk",
-			"ref_aux_clk";
-		clocks = <&clock_rpmh RPMH_CXO_CLK>,
-			<&clock_gcc GCC_UFS_CARD_CLKREF_CLK>,
-			<&clock_gcc GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK>;
-
-		status = "disabled";
-	};
-
-	ufshc_card: ufshc_card@1da4000 {
-		compatible = "qcom,ufshc";
-		reg = <0x1da4000 0x2500>;
-		interrupts = <0 125 0>;
-		phys = <&ufsphy_card>;
-		phy-names = "ufsphy";
-
-		lanes-per-direction = <1>;
-		dev-ref-clk-freq = <0>; /* 19.2 MHz */
-
-		clock-names =
-			"core_clk",
-			"bus_aggr_clk",
-			"iface_clk",
-			"core_clk_unipro",
-			"core_clk_ice",
-			"ref_clk",
-			"tx_lane0_sync_clk",
-			"rx_lane0_sync_clk";
-		clocks =
-			<&clock_gcc GCC_UFS_CARD_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK>,
-			<&clock_gcc GCC_UFS_CARD_AHB_CLK>,
-			<&clock_gcc GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK>,
-			<&clock_gcc GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK>,
-			<&clock_rpmh RPMH_CXO_CLK>,
-			<&clock_gcc GCC_UFS_CARD_TX_SYMBOL_0_CLK>,
-			<&clock_gcc GCC_UFS_CARD_RX_SYMBOL_0_CLK>;
-		freq-table-hz =
-			<50000000 200000000>,
-			<0 0>,
-			<0 0>,
-			<37500000 150000000>,
-			<75000000 300000000>,
-			<0 0>,
-			<0 0>,
-			<0 0>;
-
-		qcom,msm-bus,name = "ufshc_card";
-		qcom,msm-bus,num-cases = <9>;
-		qcom,msm-bus,num-paths = <2>;
-		qcom,msm-bus,vectors-KBps =
-		<122 512 0 0>, <1 756 0 0>,          /* No vote */
-		<122 512 922 0>, <1 756 1000 0>,     /* PWM G1 */
-		<122 512 127796 0>, <1 756 1000 0>,  /* HS G1 RA */
-		<122 512 255591 0>, <1 756 1000 0>,  /* HS G2 RA */
-		<122 512 2097152 0>, <1 756 102400 0>,  /* HS G3 RA */
-		<122 512 149422 0>, <1 756 1000 0>,  /* HS G1 RB */
-		<122 512 298189 0>, <1 756 1000 0>,  /* HS G2 RB */
-		<122 512 2097152 0>, <1 756 102400 0>,  /* HS G3 RB */
-		<122 512 7643136 0>, <1 756 307200 0>; /* Max. bandwidth */
-		qcom,bus-vector-names = "MIN",
-		"PWM_G1_L1",
-		"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
-		"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
-		"MAX";
-
-		/* PM QoS */
-		qcom,pm-qos-cpu-groups = <0x0f 0xf0>;
-		qcom,pm-qos-cpu-group-latency-us = <70 70>;
-		qcom,pm-qos-default-cpu = <0>;
-
-		/*
-		 * Note: this instance doesn't have control over UFS device
-		 * reset
-		 */
-
-		resets = <&clock_gcc GCC_UFS_CARD_BCR>;
-		reset-names = "core_reset";
-
-		status = "disabled";
-	};
-
 	sdhc_2: sdhci@8804000 {
 		compatible = "qcom,sdhci-msm-v5";
 		reg = <0x8804000 0x1000>;
@@ -1740,6 +1643,7 @@
 		qcom,sysmon-id = <0>;
 		qcom,ssctl-instance-id = <0x12>;
 		qcom,override-acc;
+		qcom,signal-aop;
 		qcom,qdsp6v65-1-0;
 		qcom,mss_pdc_offset = <8>;
 		status = "ok";
@@ -1755,6 +1659,9 @@
 
 		/* GPIO output to mss */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "mss-pil";
 		qcom,mba-mem@0 {
 			compatible = "qcom,pil-mba-mem";
 			memory-region = <&pil_mba_mem>;
@@ -1781,6 +1688,7 @@
 		status = "ok";
 		qcom,ssctl-instance-id = <0x14>;
 		qcom,firmware-name = "adsp";
+		qcom,signal-aop;
 		memory-region = <&pil_adsp_mem>;
 
 		/* GPIO inputs from lpass */
@@ -1791,6 +1699,9 @@
 
 		/* GPIO output to lpass */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "adsp-pil";
 	};
 
 	qcom,ssc@5c00000 {
@@ -1812,6 +1723,7 @@
 		qcom,smem-id = <424>;
 		qcom,sysmon-id = <3>;
 		qcom,ssctl-instance-id = <0x16>;
+		qcom,signal-aop;
 		qcom,firmware-name = "slpi";
 		status = "ok";
 		memory-region = <&pil_slpi_mem>;
@@ -1824,6 +1736,9 @@
 
 		/* GPIO output to ssc */
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_3_out 0 0>;
+
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "slpi-pil";
 	};
 
 	slim_aud: slim@171c0000 {
@@ -1906,9 +1821,13 @@
 
 		qcom,pas-id = <14>;
 		qcom,proxy-timeout-ms = <10000>;
+		qcom,signal-aop;
 		qcom,firmware-name = "spss";
 		memory-region = <&pil_spss_mem>;
 		qcom,spss-scsr-bits = <24 25>;
+
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "spss-pil";
 	};
 
 	wdog: qcom,wdt@17980000{
@@ -1941,6 +1860,7 @@
 		qcom,sysmon-id = <7>;
 		qcom,ssctl-instance-id = <0x17>;
 		qcom,firmware-name = "cdsp";
+		qcom,signal-aop;
 		memory-region = <&pil_cdsp_mem>;
 
 		/* GPIO inputs from turing */
@@ -1952,6 +1872,9 @@
 		/* GPIO output to turing*/
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
 		status = "ok";
+
+		mboxes = <&qmp_aop 0>;
+		mbox-names = "cdsp-pil";
 	};
 
 	qcom,msm-rtb {
@@ -2278,19 +2201,19 @@
 		};
 
 		LLCC_1: llcc_1_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 
 		LLCC_2: llcc_2_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 
 		LLCC_3: llcc_3_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 
 		LLCC_4: llcc_4_dcache {
-			qcom,dump-size = <0x114100>;
+			qcom,dump-size = <0x1141c0>;
 		};
 	};
 
@@ -2543,7 +2466,7 @@
 	qcom,qsee_ipc_irq_bridge {
 		compatible = "qcom,qsee-ipc-irq-bridge";
 
-		qcom,qsee-ipq-irq-spss {
+		qcom,qsee-ipc-irq-spss {
 			qcom,rx-irq-clr = <0x1888008 0x4>;
 			qcom,rx-irq-clr-mask = <0x1>;
 			qcom,dev-name = "qsee_ipc_irq_spss";
@@ -2949,6 +2872,7 @@
 		compatible = "qcom,pil-tz-generic";
 		qcom,pas-id = <0xf>;
 		qcom,firmware-name = "ipa_fws";
+		qcom,pil-force-shutdown;
 	};
 
 	qcom,chd_sliver {
@@ -3704,6 +3628,10 @@
 			 <&clock_rpmh RPMH_RF_CLK3_A>;
 		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
 		qcom,smmu-support;
+		qcom,smmu-mapping = <0x20000000 0xe0000000>;
+		qcom,smmu-s1-en;
+		qcom,smmu-fast-map;
+		qcom,smmu-coherent;
 		qcom,keep-radio-on-during-sleep;
 		status = "disabled";
 	};
diff --git a/arch/arm64/configs/msm8953-perf_defconfig b/arch/arm64/configs/msm8953-perf_defconfig
index 98def01..4e7fe31 100644
--- a/arch/arm64/configs/msm8953-perf_defconfig
+++ b/arch/arm64/configs/msm8953-perf_defconfig
@@ -288,10 +288,8 @@
 CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
-CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
@@ -393,6 +391,7 @@
 CONFIG_USB_BAM=y
 CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_QCOM_EUD=y
diff --git a/arch/arm64/configs/msm8953_defconfig b/arch/arm64/configs/msm8953_defconfig
index 8b5c2ea..6951086 100644
--- a/arch/arm64/configs/msm8953_defconfig
+++ b/arch/arm64/configs/msm8953_defconfig
@@ -298,10 +298,8 @@
 CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_QCOM_GENI=y
 CONFIG_SPI=y
 CONFIG_SPI_QUP=y
-CONFIG_SPI_QCOM_GENI=y
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
@@ -404,6 +402,7 @@
 CONFIG_USB_BAM=y
 CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_MAILBOX=y
 CONFIG_ARM_SMMU=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_DEBUG_TRACKING=y
diff --git a/arch/arm64/configs/sdm670-perf_defconfig b/arch/arm64/configs/sdm670-perf_defconfig
index dc09f04..cc7f169 100644
--- a/arch/arm64/configs/sdm670-perf_defconfig
+++ b/arch/arm64/configs/sdm670-perf_defconfig
@@ -335,6 +335,7 @@
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP_OLEDB=y
 CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_REFGEN=y
 CONFIG_REGULATOR_RPMH=y
 CONFIG_REGULATOR_STUB=y
 CONFIG_MEDIA_SUPPORT=y
@@ -485,6 +486,8 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QPNP_PBS=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig
index 79bb308..b47850c 100644
--- a/arch/arm64/configs/sdm670_defconfig
+++ b/arch/arm64/configs/sdm670_defconfig
@@ -23,6 +23,7 @@
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_SCHEDTUNE=y
 CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
 CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
@@ -35,12 +36,13 @@
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
 CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
 # CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -133,6 +135,7 @@
 CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
 CONFIG_NETFILTER_XT_TARGET_LOG=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -160,6 +163,7 @@
 CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
@@ -270,9 +274,15 @@
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
 CONFIG_PPPOLAC=y
 CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
 CONFIG_USB_USBNET=y
 CONFIG_WIL6210=m
 CONFIG_WCNSS_MEM_PRE_ALLOC=y
@@ -342,6 +352,7 @@
 CONFIG_REGULATOR_QPNP_LCDB=y
 CONFIG_REGULATOR_QPNP_OLEDB=y
 CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_REFGEN=y
 CONFIG_REGULATOR_RPMH=y
 CONFIG_REGULATOR_STUB=y
 CONFIG_MEDIA_SUPPORT=y
@@ -494,6 +505,8 @@
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QPNP_PBS=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_BUS_CONFIG_RPMH=y
 CONFIG_QCOM_SECURE_BUFFER=y
 CONFIG_QCOM_EARLY_RANDOM=y
 CONFIG_MSM_SMEM=y
@@ -512,6 +525,7 @@
 CONFIG_MSM_GLINK_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
 CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_GLINK_COMM=y
 CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_MSM_PIL_MSS_QDSP6V5=y
 CONFIG_ICNSS=y
@@ -519,13 +533,16 @@
 CONFIG_QCOM_COMMAND_DB=y
 CONFIG_MSM_PERFORMANCE=y
 CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
 CONFIG_MSM_EVENT_TIMER=y
 CONFIG_MSM_PM=y
 CONFIG_MSM_QBT1000=y
+CONFIG_APSS_CORE_EA=y
 CONFIG_QCOM_DCC_V2=y
 CONFIG_QTI_RPM_STATS_LOG=y
 CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
 CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_MSM_REMOTEQDSS=y
 CONFIG_QCOM_BIMC_BWMON=y
 CONFIG_ARM_MEMLAT_MON=y
 CONFIG_QCOMCCI_HWMON=y
@@ -623,13 +640,16 @@
 CONFIG_CORESIGHT_TPDA=y
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_EVENT=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
 CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_CTR=y
 CONFIG_CRYPTO_XCBC=y
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index a04d0f4..fd96708 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -7,6 +7,9 @@
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IRQ_TIME_ACCOUNTING=y
 CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_RCU_NOCB_CPU=y
@@ -234,6 +237,7 @@
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_MEMORY_STATE_TIME=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
@@ -296,6 +300,7 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
 CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 4933500..0d89dc7 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -304,6 +304,7 @@
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_MSM_LEGACY=y
+# CONFIG_DEVPORT is not set
 CONFIG_MSM_ADSPRPC=y
 CONFIG_MSM_RDBG=m
 CONFIG_I2C_CHARDEV=y
@@ -455,9 +456,6 @@
 CONFIG_EDAC_KRYO3XX_ARM64=y
 CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE=y
 CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y
-CONFIG_EDAC_QCOM_LLCC=y
-CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y
-CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_QPNP=y
 CONFIG_DMADEVICES=y
@@ -653,6 +651,7 @@
 CONFIG_CORESIGHT_TPDM=y
 CONFIG_CORESIGHT_CTI=y
 CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_TGU=y
 CONFIG_CORESIGHT_HWEVENT=y
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 6ebd2c3..f1ace59 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -21,6 +21,7 @@
 #include <asm/sysreg.h>
 
 #define ICC_EOIR1_EL1			sys_reg(3, 0, 12, 12, 1)
+#define ICC_HPPIR1_EL1			sys_reg(3, 0, 12, 12, 2)
 #define ICC_DIR_EL1			sys_reg(3, 0, 12, 11, 1)
 #define ICC_IAR1_EL1			sys_reg(3, 0, 12, 12, 0)
 #define ICC_SGI1R_EL1			sys_reg(3, 0, 12, 11, 5)
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
index ab0e5b2..110f750 100644
--- a/arch/arm64/include/asm/dma-iommu.h
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -14,14 +14,16 @@
 struct dma_iommu_mapping {
 	/* iommu specific data */
 	struct iommu_domain	*domain;
+	bool			init;
+	struct kref		kref;
+	const struct dma_map_ops *ops;
 
+	/* Protects bitmap */
+	spinlock_t		lock;
 	void			*bitmap;
 	size_t			bits;
 	dma_addr_t		base;
 
-	spinlock_t		lock;
-	struct kref		kref;
-
 	struct dma_fast_smmu_mapping *fast;
 };
 
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index afa23b0..1fb0230 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -114,10 +114,10 @@
 
 /*
  * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
  * space open for things that want to use the area for 32-bit pointers.
  */
-#define ELF_ET_DYN_BASE		0x100000000UL
+#define ELF_ET_DYN_BASE		(2 * TASK_SIZE_64 / 3)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 5edb6ed..f3a142e 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -165,6 +165,11 @@
 /* the offset between the kernel virtual and physical mappings */
 extern u64			kimage_voffset;
 
+static inline unsigned long kaslr_offset(void)
+{
+	return kimage_vaddr - KIMAGE_VADDR;
+}
+
 /*
  * Allow all memory at the discovery stage. We will clip it later.
  */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 394c61d..1d5890f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -157,9 +157,11 @@
 
 void fpsimd_flush_thread(void)
 {
+	preempt_disable();
 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
 	fpsimd_flush_task_state(current);
 	set_thread_flag(TIF_FOREIGN_FPSTATE);
+	preempt_enable();
 }
 
 /*
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index ba29095..a58fb92 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -356,11 +356,11 @@
 static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
 			      void *p)
 {
-	u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
+	const unsigned long offset = kaslr_offset();
 
-	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
-		pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
-			 kaslr_offset, KIMAGE_VADDR);
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
+		pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
+			 offset, KIMAGE_VADDR);
 	} else {
 		pr_emerg("Kernel Offset: disabled\n");
 	}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 7f90b7e..5229b33 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1906,23 +1906,42 @@
  * IO address ranges, which is required to perform memory allocation and
  * mapping with IOMMU aware functions.
  *
- * The client device need to be attached to the mapping with
- * arm_iommu_attach_device function.
+ * Clients may use iommu_domain_set_attr() to set additional flags prior
+ * to calling arm_iommu_attach_device() to complete initialization.
  */
 struct dma_iommu_mapping *
 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
 {
 	unsigned int bits = size >> PAGE_SHIFT;
-	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
 	struct dma_iommu_mapping *mapping;
-	int err = -ENOMEM;
 
-	if (!bitmap_size)
+	if (!bits)
 		return ERR_PTR(-EINVAL);
 
 	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
 	if (!mapping)
-		goto err;
+		return ERR_PTR(-ENOMEM);
+
+	mapping->base = base;
+	mapping->bits = bits;
+
+	mapping->domain = iommu_domain_alloc(bus);
+	if (!mapping->domain)
+		goto err_domain_alloc;
+
+	mapping->init = false;
+	return mapping;
+
+err_domain_alloc:
+	kfree(mapping);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static int
+bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
+{
+	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
 
 	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
 							__GFP_NORETRY);
@@ -1930,67 +1949,124 @@
 		mapping->bitmap = vzalloc(bitmap_size);
 
 	if (!mapping->bitmap)
-		goto err2;
+		return -ENOMEM;
 
-	mapping->base = base;
-	mapping->bits = bits;
 	spin_lock_init(&mapping->lock);
-
-	mapping->domain = iommu_domain_alloc(bus);
-	if (!mapping->domain)
-		goto err3;
-
-	kref_init(&mapping->kref);
-	return mapping;
-err3:
-	kvfree(mapping->bitmap);
-err2:
-	kfree(mapping);
-err:
-	return ERR_PTR(err);
+	mapping->ops = &iommu_ops;
+	return 0;
 }
-EXPORT_SYMBOL(arm_iommu_create_mapping);
 
-static void release_iommu_mapping(struct kref *kref)
+static void bitmap_iommu_release_mapping(struct kref *kref)
+{
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
+
+	kfree(mapping->bitmap);
+	iommu_domain_free(mapping->domain);
+	kfree(mapping);
+}
+
+static void bypass_iommu_release_mapping(struct kref *kref)
 {
 	struct dma_iommu_mapping *mapping =
 		container_of(kref, struct dma_iommu_mapping, kref);
 
 	iommu_domain_free(mapping->domain);
-	kvfree(mapping->bitmap);
 	kfree(mapping);
 }
 
+static int upstream_iommu_init_mapping(struct device *dev,
+					struct dma_iommu_mapping *mapping)
+{
+	struct iommu_domain *domain = mapping->domain;
+	struct iommu_group *group = dev->iommu_group;
+	dma_addr_t base = mapping->base;
+	u64 size = mapping->bits << PAGE_SHIFT;
+
+	if (iommu_get_dma_cookie(domain))
+		return -EINVAL;
+
+	/* Need to attach to get geometry */
+	if (iommu_attach_group(domain, group))
+		goto out_put_cookie;
+
+	if (iommu_dma_init_domain(domain, base, size, dev))
+		goto out_detach_group;
+
+	mapping->ops = &iommu_dma_ops;
+	iommu_detach_group(domain, group);
+	return 0;
+
+out_detach_group:
+	iommu_detach_group(domain, group);
+out_put_cookie:
+	iommu_put_dma_cookie(domain);
+	return -EINVAL;
+}
+
+static void upstream_iommu_release_mapping(struct kref *kref)
+{
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
+
+	iommu_put_dma_cookie(mapping->domain);
+	iommu_domain_free(mapping->domain);
+	kfree(mapping);
+}
+
+/*
+ * arm_iommu_release_mapping
+ * @mapping: allocted via arm_iommu_create_mapping()
+ *
+ * Frees all resources associated with the iommu mapping.
+ * The device associated with this mapping must be in the 'detached' state
+ */
 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 {
-	if (mapping)
-		kref_put(&mapping->kref, release_iommu_mapping);
+	int s1_bypass = 0, is_fast = 0, is_upstream = 0;
+	void (*release)(struct kref *kref);
+
+	if (!mapping)
+		return;
+
+	if (!mapping->init) {
+		iommu_domain_free(mapping->domain);
+		kfree(mapping);
+		return;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+	iommu_domain_get_attr(mapping->domain,
+				DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
+				&is_upstream);
+
+	if (s1_bypass)
+		release = bypass_iommu_release_mapping;
+	else if (is_fast)
+		release = fast_smmu_release_mapping;
+	else if (is_upstream)
+		release = upstream_iommu_release_mapping;
+	else
+		release = bitmap_iommu_release_mapping;
+
+	kref_put(&mapping->kref, release);
 }
 EXPORT_SYMBOL(arm_iommu_release_mapping);
 
-/**
- * arm_iommu_attach_device
- * @dev: valid struct device pointer
- * @mapping: io address space mapping structure (returned from
- *	arm_iommu_create_mapping)
- *
- * Attaches specified io address space mapping to the provided device,
- * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version. Only one device in an iommu_group may use this
- * function.
- */
-int arm_iommu_attach_device(struct device *dev,
+static int arm_iommu_init_mapping(struct device *dev,
 			    struct dma_iommu_mapping *mapping)
 {
-	int err;
-	int s1_bypass = 0, is_fast = 0;
+	int err = -EINVAL;
+	int s1_bypass = 0, is_fast = 0, is_upstream = 0;
 	struct iommu_group *group;
 	dma_addr_t iova_end;
 
 	group = dev->iommu_group;
 	if (!group) {
 		dev_err(dev, "No iommu associated with device\n");
-		return -ENODEV;
+		return -EINVAL;
 	}
 
 	if (iommu_get_domain_for_dev(dev)) {
@@ -1998,6 +2074,11 @@
 		return -EINVAL;
 	}
 
+	if (mapping->init) {
+		kref_get(&mapping->kref);
+		return 0;
+	}
+
 	iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1;
 	if (iova_end > dma_get_mask(dev)) {
 		dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n",
@@ -2005,21 +2086,59 @@
 		return -EINVAL;
 	}
 
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
-	if (is_fast)
-		return fast_smmu_attach_device(dev, mapping);
+	iommu_domain_get_attr(mapping->domain,
+				DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
+				&is_upstream);
 
-	err = iommu_attach_group(mapping->domain, group);
+	if (s1_bypass) {
+		mapping->ops = &swiotlb_dma_ops;
+		err = 0;
+	} else if (is_fast) {
+		err = fast_smmu_init_mapping(dev, mapping);
+	} else if (is_upstream) {
+		err = upstream_iommu_init_mapping(dev, mapping);
+	} else {
+		err = bitmap_iommu_init_mapping(dev, mapping);
+	}
+	if (!err) {
+		kref_init(&mapping->kref);
+		mapping->init = true;
+	}
+	return err;
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version.
+ *
+ * Clients are expected to call arm_iommu_attach_device() prior to sharing
+ * the dma_iommu_mapping structure with another device. This ensures
+ * initialization is complete.
+ */
+int arm_iommu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int err;
+
+	err = arm_iommu_init_mapping(dev, mapping);
 	if (err)
 		return err;
 
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
-					&s1_bypass);
+	err = iommu_attach_group(mapping->domain, dev->iommu_group);
+	if (err)
+		return err;
 
-	kref_get(&mapping->kref);
 	dev->archdata.mapping = mapping;
-	if (!s1_bypass)
-		set_dma_ops(dev, &iommu_ops);
+	set_dma_ops(dev, mapping->ops);
 
 	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
 	return 0;
@@ -2036,8 +2155,7 @@
 void arm_iommu_detach_device(struct device *dev)
 {
 	struct dma_iommu_mapping *mapping;
-	int is_fast, s1_bypass = 0;
-	struct iommu_group *group;
+	int s1_bypass = 0;
 
 	mapping = to_dma_iommu_mapping(dev);
 	if (!mapping) {
@@ -2045,26 +2163,22 @@
 		return;
 	}
 
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
-	if (is_fast) {
-		fast_smmu_detach_device(dev, mapping);
+	if (!dev->iommu_group) {
+		dev_err(dev, "No iommu associated with device\n");
 		return;
 	}
 
 	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
 					&s1_bypass);
 
+	/*
+	 * ION defers dma_unmap calls. Ensure they have all completed prior to
+	 * setting dma_ops to NULL.
+	 */
 	if (msm_dma_unmap_all_for_dev(dev))
 		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
 
-	group = dev->iommu_group;
-	if (!group) {
-		dev_err(dev, "No iommu associated with device\n");
-		return;
-	}
-
-	iommu_detach_group(mapping->domain, group);
-	kref_put(&mapping->kref, release_iommu_mapping);
+	iommu_detach_group(mapping->domain, dev->iommu_group);
 	dev->archdata.mapping = NULL;
 	if (!s1_bypass)
 		set_dma_ops(dev, NULL);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7f9501a..792dac8 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -384,8 +384,11 @@
 	 * signal first. We do not need to release the mmap_sem because it
 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
 	 */
-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+		if (!user_mode(regs))
+			goto no_context;
 		return 0;
+	}
 
 	/*
 	 * Major/minor page fault accounting is only done on the initial
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 1910223..cea2bb1 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -147,23 +147,12 @@
 		 * Find irq with highest priority
 		 */
 		# open coded PTR_LA t1, cpu_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 		# open coded la t1, cpu_mask_nr_tbl
 		lui	t1, %hi(cpu_mask_nr_tbl)
 		addiu	t1, %lo(cpu_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
-		# open coded dla t1, cpu_mask_nr_tbl
-		.set	push
-		.set	noat
-		lui	t1, %highest(cpu_mask_nr_tbl)
-		lui	AT, %hi(cpu_mask_nr_tbl)
-		daddiu	t1, t1, %higher(cpu_mask_nr_tbl)
-		daddiu	AT, AT, %lo(cpu_mask_nr_tbl)
-		dsll	t1, 32
-		daddu	t1, t1, AT
-		.set	pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
 #endif
 1:		lw	t2,(t1)
 		nop
@@ -214,23 +203,12 @@
 		 * Find irq with highest priority
 		 */
 		# open coded PTR_LA t1,asic_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 		# open coded la t1, asic_mask_nr_tbl
 		lui	t1, %hi(asic_mask_nr_tbl)
 		addiu	t1, %lo(asic_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
-		# open coded dla t1, asic_mask_nr_tbl
-		.set	push
-		.set	noat
-		lui	t1, %highest(asic_mask_nr_tbl)
-		lui	AT, %hi(asic_mask_nr_tbl)
-		daddiu	t1, t1, %higher(asic_mask_nr_tbl)
-		daddiu	AT, AT, %lo(asic_mask_nr_tbl)
-		dsll	t1, 32
-		daddu	t1, t1, AT
-		.set	pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
 #endif
 2:		lw	t2,(t1)
 		nop
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c721ea2..df757c9 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -604,13 +604,12 @@
 	if (parisc_requires_coherency())
 		flush_tlb_range(vma, start, end);
 
-	if ((end - start) >= parisc_cache_flush_threshold) {
+	if ((end - start) >= parisc_cache_flush_threshold
+	    || vma->vm_mm->context != mfsp(3)) {
 		flush_cache_all();
 		return;
 	}
 
-	BUG_ON(vma->vm_mm->context != mfsp(3));
-
 	flush_user_dcache_range_asm(start, end);
 	if (vma->vm_flags & VM_EXEC)
 		flush_user_icache_range_asm(start, end);
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 0012f03..fe208b7 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -75,9 +75,27 @@
 				      struct task_struct *tsk)
 {
 	/* Mark this context has been used on the new CPU */
-	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
+	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 
+		/*
+		 * This full barrier orders the store to the cpumask above vs
+		 * a subsequent operation which allows this CPU to begin loading
+		 * translations for next.
+		 *
+		 * When using the radix MMU that operation is the load of the
+		 * MMU context id, which is then moved to SPRN_PID.
+		 *
+		 * For the hash MMU it is either the first load from slb_cache
+		 * in switch_slb(), and/or the store of paca->mm_ctx_id in
+		 * copy_mm_to_paca().
+		 *
+		 * On the read side the barrier is in pte_xchg(), which orders
+		 * the store to the PTE vs the load of mm_cpumask.
+		 */
+		smp_mb();
+	}
+
 	/* 32-bit keeps track of the current PGDIR in the thread struct */
 #ifdef CONFIG_PPC32
 	tsk->thread.pgdir = next->pgd;
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a..68e087e 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -87,6 +87,7 @@
 	unsigned long *p = (unsigned long *)ptep;
 	__be64 prev;
 
+	/* See comment in switch_mm_irqs_off() */
 	prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
 					     (__force unsigned long)pte_raw(new));
 
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e..41e9d0a 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -62,6 +62,7 @@
 {
 	unsigned long *p = (unsigned long *)ptep;
 
+	/* See comment in switch_mm_irqs_off() */
 	return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
 }
 #endif
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 3c05c31..028a22b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -146,6 +146,19 @@
 
 	/* Clear bit 0 which we wouldn't clear otherwise */
 	local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+	if (happened & PACA_IRQ_HARD_DIS) {
+		/*
+		 * We may have missed a decrementer interrupt if hard disabled.
+		 * Check the decrementer register in case we had a rollover
+		 * while hard disabled.
+		 */
+		if (!(happened & PACA_IRQ_DEC)) {
+			if (decrementer_check_overflow()) {
+				local_paca->irq_happened |= PACA_IRQ_DEC;
+				happened |= PACA_IRQ_DEC;
+			}
+		}
+	}
 
 	/*
 	 * Force the delivery of pending soft-disabled interrupts on PS3.
@@ -171,7 +184,7 @@
 	 * in case we also had a rollover while hard disabled
 	 */
 	local_paca->irq_happened &= ~PACA_IRQ_DEC;
-	if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
+	if (happened & PACA_IRQ_DEC)
 		return 0x900;
 
 	/* Finally check if an external interrupt happened */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b249c2f..1c141d5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -359,7 +359,8 @@
 
 	cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
 
-	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+	if (current->thread.regs &&
+	    (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
 		check_if_tm_restore_required(current);
 		/*
 		 * If a thread has already been reclaimed then the
@@ -383,7 +384,7 @@
 {
 	if (tsk->thread.regs) {
 		preempt_disable();
-		if (tsk->thread.regs->msr & MSR_VSX) {
+		if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
 			BUG_ON(tsk != current);
 			giveup_vsx(tsk);
 		}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 5c8f12f..dcbb914 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -127,12 +127,19 @@
 	 * If task is not current, it will have been flushed already to
 	 * it's thread_struct during __switch_to().
 	 *
-	 * A reclaim flushes ALL the state.
+	 * A reclaim flushes ALL the state or if not in TM save TM SPRs
+	 * in the appropriate thread structures from live.
 	 */
 
-	if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
-		tm_reclaim_current(TM_CAUSE_SIGNAL);
+	if (tsk != current)
+		return;
 
+	if (MSR_TM_SUSPENDED(mfmsr())) {
+		tm_reclaim_current(TM_CAUSE_SIGNAL);
+	} else {
+		tm_enable();
+		tm_save_sprs(&(tsk->thread));
+	}
 }
 #else
 static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 0cea702..d33f245 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -480,7 +480,7 @@
  * In the case that a guest uses storage keys
  * faults should no longer be backed by zero pages
  */
-#define mm_forbids_zeropage mm_use_skey
+#define mm_forbids_zeropage mm_has_pgste
 static inline int mm_use_skey(struct mm_struct *mm)
 {
 #ifdef CONFIG_PGSTE
diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
index 05c98bb..2f04ad1 100644
--- a/arch/s390/kvm/sthyi.c
+++ b/arch/s390/kvm/sthyi.c
@@ -394,7 +394,7 @@
 		"srl     %[cc],28\n"
 		: [cc] "=d" (cc)
 		: [code] "d" (code), [addr] "a" (addr)
-		: "memory", "cc");
+		: "3", "memory", "cc");
 	return cc;
 }
 
@@ -422,7 +422,7 @@
 	VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
 	trace_kvm_s390_handle_sthyi(vcpu, code, addr);
 
-	if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
+	if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
 	if (code & 0xffff) {
@@ -430,6 +430,9 @@
 		goto out;
 	}
 
+	if (addr & ~PAGE_MASK)
+		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
 	/*
 	 * If the page has not yet been faulted in, we want to do that
 	 * now and not after all the expensive calculations.
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 3ba6227..cb2cd04 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2125,6 +2125,37 @@
 }
 
 /*
+ * Remove all empty zero pages from the mapping for lazy refaulting
+ * - This must be called after mm->context.has_pgste is set, to avoid
+ *   future creation of zero pages
+ * - This must be called after THP was enabled
+ */
+static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
+			   unsigned long end, struct mm_walk *walk)
+{
+	unsigned long addr;
+
+	for (addr = start; addr != end; addr += PAGE_SIZE) {
+		pte_t *ptep;
+		spinlock_t *ptl;
+
+		ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+		if (is_zero_pfn(pte_pfn(*ptep)))
+			ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
+		pte_unmap_unlock(ptep, ptl);
+	}
+	return 0;
+}
+
+static inline void zap_zero_pages(struct mm_struct *mm)
+{
+	struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
+
+	walk.mm = mm;
+	walk_page_range(0, TASK_SIZE, &walk);
+}
+
+/*
  * switch on pgstes for its userspace process (for kvm)
  */
 int s390_enable_sie(void)
@@ -2141,6 +2172,7 @@
 	mm->context.has_pgste = 1;
 	/* split thp mappings and disable thp for future mappings */
 	thp_split_mm(mm);
+	zap_zero_pages(mm);
 	up_write(&mm->mmap_sem);
 	return 0;
 }
@@ -2153,13 +2185,6 @@
 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
 			      unsigned long next, struct mm_walk *walk)
 {
-	/*
-	 * Remove all zero page mappings,
-	 * after establishing a policy to forbid zero page mappings
-	 * following faults for that page will get fresh anonymous pages
-	 */
-	if (is_zero_pfn(pte_pfn(*pte)))
-		ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
 	/* Clear storage key */
 	ptep_zap_key(walk->mm, addr, pte);
 	return 0;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bee281f..e8dee62 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1252,7 +1252,8 @@
 		insn_count = bpf_jit_insn(jit, fp, i);
 		if (insn_count < 0)
 			return -1;
-		jit->addrs[i + 1] = jit->prg; /* Next instruction address */
+		/* Next instruction address */
+		jit->addrs[i + insn_count] = jit->prg;
 	}
 	bpf_jit_epilogue(jit);
 
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 349dd23..0cdeb2b 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -25,9 +25,11 @@
 void __tsb_context_switch(unsigned long pgd_pa,
 			  struct tsb_config *tsb_base,
 			  struct tsb_config *tsb_huge,
-			  unsigned long tsb_descr_pa);
+			  unsigned long tsb_descr_pa,
+			  unsigned long secondary_ctx);
 
-static inline void tsb_context_switch(struct mm_struct *mm)
+static inline void tsb_context_switch_ctx(struct mm_struct *mm,
+					  unsigned long ctx)
 {
 	__tsb_context_switch(__pa(mm->pgd),
 			     &mm->context.tsb_block[0],
@@ -38,9 +40,12 @@
 #else
 			     NULL
 #endif
-			     , __pa(&mm->context.tsb_descr[0]));
+			     , __pa(&mm->context.tsb_descr[0]),
+			     ctx);
 }
 
+#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
+
 void tsb_grow(struct mm_struct *mm,
 	      unsigned long tsb_index,
 	      unsigned long mm_rss);
@@ -110,8 +115,7 @@
 	 * cpu0 to update it's TSB because at that point the cpu_vm_mask
 	 * only had cpu1 set in it.
 	 */
-	load_secondary_context(mm);
-	tsb_context_switch(mm);
+	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
 
 	/* Any time a processor runs a context on an address space
 	 * for the first time, we must flush that context out of the
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04d..ff05992 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@
 void init_cur_cpu_trap(struct thread_info *);
 void setup_tba(void);
 extern int ncpus_probed;
+extern u64 cpu_mondo_counter[NR_CPUS];
 
 unsigned long real_hard_smp_processor_id(void);
 
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 06981cc..d04111a 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -1240,8 +1240,6 @@
 			 * ATU group, but ATU hcalls won't be available.
 			 */
 			hv_atu = false;
-			pr_err(PFX "Could not register hvapi ATU err=%d\n",
-			       err);
 		} else {
 			pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
 				vatu_major, vatu_minor);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index d5807d2..2deb89e 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -621,22 +621,48 @@
 	}
 }
 
-/* Multi-cpu list version.  */
+#define	CPU_MONDO_COUNTER(cpuid)	(cpu_mondo_counter[cpuid])
+#define	MONDO_USEC_WAIT_MIN		2
+#define	MONDO_USEC_WAIT_MAX		100
+#define	MONDO_RETRY_LIMIT		500000
+
+/* Multi-cpu list version.
+ *
+ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
+ * Sometimes not all cpus receive the mondo, requiring us to re-send
+ * the mondo until all cpus have received, or cpus are truly stuck
+ * unable to receive mondo, and we timeout.
+ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
+ * perform guest service, such as PCIe error handling. Consider the
+ * service time, 1 second overall wait is reasonable for 1 cpu.
+ * Here two in-between mondo check wait time are defined: 2 usec for
+ * single cpu quick turn around and up to 100usec for large cpu count.
+ * Deliver mondo to large number of cpus could take longer, we adjusts
+ * the retry count as long as target cpus are making forward progress.
+ */
 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
 {
-	int retries, this_cpu, prev_sent, i, saw_cpu_error;
+	int this_cpu, tot_cpus, prev_sent, i, rem;
+	int usec_wait, retries, tot_retries;
+	u16 first_cpu = 0xffff;
+	unsigned long xc_rcvd = 0;
 	unsigned long status;
+	int ecpuerror_id = 0;
+	int enocpu_id = 0;
 	u16 *cpu_list;
+	u16 cpu;
 
 	this_cpu = smp_processor_id();
-
 	cpu_list = __va(tb->cpu_list_pa);
-
-	saw_cpu_error = 0;
-	retries = 0;
+	usec_wait = cnt * MONDO_USEC_WAIT_MIN;
+	if (usec_wait > MONDO_USEC_WAIT_MAX)
+		usec_wait = MONDO_USEC_WAIT_MAX;
+	retries = tot_retries = 0;
+	tot_cpus = cnt;
 	prev_sent = 0;
+
 	do {
-		int forward_progress, n_sent;
+		int n_sent, mondo_delivered, target_cpu_busy;
 
 		status = sun4v_cpu_mondo_send(cnt,
 					      tb->cpu_list_pa,
@@ -644,94 +670,113 @@
 
 		/* HV_EOK means all cpus received the xcall, we're done.  */
 		if (likely(status == HV_EOK))
-			break;
+			goto xcall_done;
+
+		/* If not these non-fatal errors, panic */
+		if (unlikely((status != HV_EWOULDBLOCK) &&
+			(status != HV_ECPUERROR) &&
+			(status != HV_ENOCPU)))
+			goto fatal_errors;
 
 		/* First, see if we made any forward progress.
 		 *
+		 * Go through the cpu_list, count the target cpus that have
+		 * received our mondo (n_sent), and those that did not (rem).
+		 * Re-pack cpu_list with the cpus remain to be retried in the
+		 * front - this simplifies tracking the truly stalled cpus.
+		 *
 		 * The hypervisor indicates successful sends by setting
 		 * cpu list entries to the value 0xffff.
+		 *
+		 * EWOULDBLOCK means some target cpus did not receive the
+		 * mondo and retry usually helps.
+		 *
+		 * ECPUERROR means at least one target cpu is in error state,
+		 * it's usually safe to skip the faulty cpu and retry.
+		 *
+		 * ENOCPU means one of the target cpu doesn't belong to the
+		 * domain, perhaps offlined which is unexpected, but not
+		 * fatal and it's okay to skip the offlined cpu.
 		 */
+		rem = 0;
 		n_sent = 0;
 		for (i = 0; i < cnt; i++) {
-			if (likely(cpu_list[i] == 0xffff))
+			cpu = cpu_list[i];
+			if (likely(cpu == 0xffff)) {
 				n_sent++;
+			} else if ((status == HV_ECPUERROR) &&
+				(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
+				ecpuerror_id = cpu + 1;
+			} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
+				enocpu_id = cpu + 1;
+			} else {
+				cpu_list[rem++] = cpu;
+			}
 		}
 
-		forward_progress = 0;
-		if (n_sent > prev_sent)
-			forward_progress = 1;
+		/* No cpu remained, we're done. */
+		if (rem == 0)
+			break;
 
+		/* Otherwise, update the cpu count for retry. */
+		cnt = rem;
+
+		/* Record the overall number of mondos received by the
+		 * first of the remaining cpus.
+		 */
+		if (first_cpu != cpu_list[0]) {
+			first_cpu = cpu_list[0];
+			xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+		}
+
+		/* Was any mondo delivered successfully? */
+		mondo_delivered = (n_sent > prev_sent);
 		prev_sent = n_sent;
 
-		/* If we get a HV_ECPUERROR, then one or more of the cpus
-		 * in the list are in error state.  Use the cpu_state()
-		 * hypervisor call to find out which cpus are in error state.
+		/* or, was any target cpu busy processing other mondos? */
+		target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
+		xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+
+		/* Retry count is for no progress. If we're making progress,
+		 * reset the retry count.
 		 */
-		if (unlikely(status == HV_ECPUERROR)) {
-			for (i = 0; i < cnt; i++) {
-				long err;
-				u16 cpu;
-
-				cpu = cpu_list[i];
-				if (cpu == 0xffff)
-					continue;
-
-				err = sun4v_cpu_state(cpu);
-				if (err == HV_CPU_STATE_ERROR) {
-					saw_cpu_error = (cpu + 1);
-					cpu_list[i] = 0xffff;
-				}
-			}
-		} else if (unlikely(status != HV_EWOULDBLOCK))
-			goto fatal_mondo_error;
-
-		/* Don't bother rewriting the CPU list, just leave the
-		 * 0xffff and non-0xffff entries in there and the
-		 * hypervisor will do the right thing.
-		 *
-		 * Only advance timeout state if we didn't make any
-		 * forward progress.
-		 */
-		if (unlikely(!forward_progress)) {
-			if (unlikely(++retries > 10000))
-				goto fatal_mondo_timeout;
-
-			/* Delay a little bit to let other cpus catch up
-			 * on their cpu mondo queue work.
-			 */
-			udelay(2 * cnt);
+		if (likely(mondo_delivered || target_cpu_busy)) {
+			tot_retries += retries;
+			retries = 0;
+		} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
+			goto fatal_mondo_timeout;
 		}
+
+		/* Delay a little bit to let other cpus catch up on
+		 * their cpu mondo queue work.
+		 */
+		if (!mondo_delivered)
+			udelay(usec_wait);
+
+		retries++;
 	} while (1);
 
-	if (unlikely(saw_cpu_error))
-		goto fatal_mondo_cpu_error;
-
+xcall_done:
+	if (unlikely(ecpuerror_id > 0)) {
+		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
+		       this_cpu, ecpuerror_id - 1);
+	} else if (unlikely(enocpu_id > 0)) {
+		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
+		       this_cpu, enocpu_id - 1);
+	}
 	return;
 
-fatal_mondo_cpu_error:
-	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
-	       "(including %d) were in error state\n",
-	       this_cpu, saw_cpu_error - 1);
-	return;
+fatal_errors:
+	/* fatal errors include bad alignment, etc */
+	pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
+	       this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+	panic("Unexpected SUN4V mondo error %lu\n", status);
 
 fatal_mondo_timeout:
-	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
-	       " progress after %d retries.\n",
-	       this_cpu, retries);
-	goto dump_cpu_list_and_out;
-
-fatal_mondo_error:
-	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
-	       this_cpu, status);
-	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
-	       "mondo_block_pa(%lx)\n",
-	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
-
-dump_cpu_list_and_out:
-	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
-	for (i = 0; i < cnt; i++)
-		printk("%u ", cpu_list[i]);
-	printk("]\n");
+	/* some cpus being non-responsive to the cpu mondo */
+	pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
+	       this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
+	panic("SUN4V mondo timeout panic\n");
 }
 
 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e..3463199 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@
 	ldxa	[%g0] ASI_SCRATCHPAD, %g4
 	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
 
+	/* Get smp_processor_id() into %g3 */
+	sethi	%hi(trap_block), %g5
+	or	%g5, %lo(trap_block), %g5
+	sub	%g4, %g5, %g3
+	srlx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
+
+	/* Increment cpu_mondo_counter[smp_processor_id()] */
+	sethi	%hi(cpu_mondo_counter), %g5
+	or	%g5, %lo(cpu_mondo_counter), %g5
+	sllx	%g3, 3, %g3
+	add	%g5, %g3, %g5
+	ldx	[%g5], %g3
+	add	%g3, 1, %g3
+	stx	%g3, [%g5]
+
 	/* Get CPU mondo queue base phys address into %g7.  */
 	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
 
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d44fb80..32dafb92 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2732,6 +2732,7 @@
 	}
 }
 
+u64 cpu_mondo_counter[NR_CPUS] = {0};
 struct trap_per_cpu trap_block[NR_CPUS];
 EXPORT_SYMBOL(trap_block);
 
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 395ec18..7d961f6 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -375,6 +375,7 @@
 	 * %o1:	TSB base config pointer
 	 * %o2:	TSB huge config pointer, or NULL if none
 	 * %o3:	Hypervisor TSB descriptor physical address
+	 * %o4: Secondary context to load, if non-zero
 	 *
 	 * We have to run this whole thing with interrupts
 	 * disabled so that the current cpu doesn't change
@@ -387,6 +388,17 @@
 	rdpr	%pstate, %g1
 	wrpr	%g1, PSTATE_IE, %pstate
 
+	brz,pn	%o4, 1f
+	 mov	SECONDARY_CONTEXT, %o5
+
+661:	stxa	%o4, [%o5] ASI_DMMU
+	.section .sun4v_1insn_patch, "ax"
+	.word	661b
+	stxa	%o4, [%o5] ASI_MMU
+	.previous
+	flush	%g6
+
+1:
 	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
 
 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 54f9870..5a8cb37 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -145,13 +145,13 @@
 ENTRY(U3_retl_o2_and_7_plus_GS)
 	and	%o2, 7, %o2
 	retl
-	 add	%o2, GLOBAL_SPARE, %o2
+	 add	%o2, GLOBAL_SPARE, %o0
 ENDPROC(U3_retl_o2_and_7_plus_GS)
 ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
 	add	GLOBAL_SPARE, 8, GLOBAL_SPARE
 	and	%o2, 7, %o2
 	retl
-	 add	%o2, GLOBAL_SPARE, %o2
+	 add	%o2, GLOBAL_SPARE, %o0
 ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
 #endif
 
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
index 17bd2e1..df707a8 100644
--- a/arch/sparc/power/hibernate.c
+++ b/arch/sparc/power/hibernate.c
@@ -35,6 +35,5 @@
 {
 	struct mm_struct *mm = current->active_mm;
 
-	load_secondary_context(mm);
-	tsb_context_switch(mm);
+	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
 }
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index cc3bd58..9e240fc 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
 
 #include <linux/types.h>
 #include "ctype.h"
+#include "string.h"
 
 int memcmp(const void *s1, const void *s2, size_t len)
 {
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820..113588d 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@
 #define memset(d,c,l) __builtin_memset(d,c,l)
 #define memcmp	__builtin_memcmp
 
+extern int strcmp(const char *str1, const char *str2);
+extern int strncmp(const char *cs, const char *ct, size_t count);
+extern size_t strlen(const char *s);
+extern char *strstr(const char *s1, const char *s2);
+extern size_t strnlen(const char *s, size_t maxlen);
+extern unsigned int atou(const char *s);
+extern unsigned long long simple_strtoull(const char *cp, char **endp,
+					  unsigned int base);
+
 #endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 1cd792d..1eab79c 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -117,11 +117,10 @@
 	.set T1, REG_T1
 .endm
 
-#define K_BASE		%r8
 #define HASH_PTR	%r9
+#define BLOCKS_CTR	%r8
 #define BUFFER_PTR	%r10
 #define BUFFER_PTR2	%r13
-#define BUFFER_END	%r11
 
 #define PRECALC_BUF	%r14
 #define WK_BUF		%r15
@@ -205,14 +204,14 @@
 		 * blended AVX2 and ALU instruction scheduling
 		 * 1 vector iteration per 8 rounds
 		 */
-		vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
+		vmovdqu (i * 2)(BUFFER_PTR), W_TMP
 	.elseif ((i & 7) == 1)
-		vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
+		vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
 			 WY_TMP, WY_TMP
 	.elseif ((i & 7) == 2)
 		vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
 	.elseif ((i & 7) == 4)
-		vpaddd  K_XMM(K_BASE), WY, WY_TMP
+		vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
 	.elseif ((i & 7) == 7)
 		vmovdqu  WY_TMP, PRECALC_WK(i&~7)
 
@@ -255,7 +254,7 @@
 		vpxor	WY, WY_TMP, WY_TMP
 	.elseif ((i & 7) == 7)
 		vpxor	WY_TMP2, WY_TMP, WY
-		vpaddd	K_XMM(K_BASE), WY, WY_TMP
+		vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
 		vmovdqu	WY_TMP, PRECALC_WK(i&~7)
 
 		PRECALC_ROTATE_WY
@@ -291,7 +290,7 @@
 		vpsrld	$30, WY, WY
 		vpor	WY, WY_TMP, WY
 	.elseif ((i & 7) == 7)
-		vpaddd	K_XMM(K_BASE), WY, WY_TMP
+		vpaddd  K_XMM + K_XMM_AR(%rip), WY, WY_TMP
 		vmovdqu	WY_TMP, PRECALC_WK(i&~7)
 
 		PRECALC_ROTATE_WY
@@ -446,6 +445,16 @@
 
 .endm
 
+/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
+ * %1 + %2 >= %3 ? %4 : 0
+ */
+.macro ADD_IF_GE a, b, c, d
+	mov     \a, RTA
+	add     $\d, RTA
+	cmp     $\c, \b
+	cmovge  RTA, \a
+.endm
+
 /*
  * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
  */
@@ -463,13 +472,16 @@
 	lea	(2*4*80+32)(%rsp), WK_BUF
 
 	# Precalc WK for first 2 blocks
-	PRECALC_OFFSET = 0
+	ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
 	.set i, 0
 	.rept    160
 		PRECALC i
 		.set i, i + 1
 	.endr
-	PRECALC_OFFSET = 128
+
+	/* Go to next block if needed */
+	ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
+	ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
 	xchg	WK_BUF, PRECALC_BUF
 
 	.align 32
@@ -479,8 +491,8 @@
 	 * we use K_BASE value as a signal of a last block,
 	 * it is set below by: cmovae BUFFER_PTR, K_BASE
 	 */
-	cmp	K_BASE, BUFFER_PTR
-	jne	_begin
+	test BLOCKS_CTR, BLOCKS_CTR
+	jnz _begin
 	.align 32
 	jmp	_end
 	.align 32
@@ -512,10 +524,10 @@
 		.set j, j+2
 	.endr
 
-	add	$(2*64), BUFFER_PTR       /* move to next odd-64-byte block */
-	cmp	BUFFER_END, BUFFER_PTR    /* is current block the last one? */
-	cmovae	K_BASE, BUFFER_PTR	/* signal the last iteration smartly */
-
+	/* Update Counter */
+	sub $1, BLOCKS_CTR
+	/* Move to the next block only if needed*/
+	ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
 	/*
 	 * rounds
 	 * 60,62,64,66,68
@@ -532,8 +544,8 @@
 	UPDATE_HASH	12(HASH_PTR), D
 	UPDATE_HASH	16(HASH_PTR), E
 
-	cmp	K_BASE, BUFFER_PTR	/* is current block the last one? */
-	je	_loop
+	test	BLOCKS_CTR, BLOCKS_CTR
+	jz	_loop
 
 	mov	TB, B
 
@@ -575,10 +587,10 @@
 		.set j, j+2
 	.endr
 
-	add	$(2*64), BUFFER_PTR2      /* move to next even-64-byte block */
-
-	cmp	BUFFER_END, BUFFER_PTR2   /* is current block the last one */
-	cmovae	K_BASE, BUFFER_PTR       /* signal the last iteration smartly */
+	/* update counter */
+	sub     $1, BLOCKS_CTR
+	/* Move to the next block only if needed*/
+	ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
 
 	jmp	_loop3
 _loop3:
@@ -641,19 +653,12 @@
 
 	avx2_zeroupper
 
-	lea	K_XMM_AR(%rip), K_BASE
-
+	/* Setup initial values */
 	mov	CTX, HASH_PTR
 	mov	BUF, BUFFER_PTR
-	lea	64(BUF), BUFFER_PTR2
 
-	shl	$6, CNT			/* mul by 64 */
-	add	BUF, CNT
-	add	$64, CNT
-	mov	CNT, BUFFER_END
-
-	cmp	BUFFER_END, BUFFER_PTR2
-	cmovae	K_BASE, BUFFER_PTR2
+	mov	BUF, BUFFER_PTR2
+	mov	CNT, BLOCKS_CTR
 
 	xmm_mov	BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
 
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index f960a04..fc61739 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -201,7 +201,7 @@
 
 static bool avx2_usable(void)
 {
-	if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
+	if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2)
 		&& boot_cpu_has(X86_FEATURE_BMI1)
 		&& boot_cpu_has(X86_FEATURE_BMI2))
 		return true;
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index ef766a3..e7b0e7f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1215,6 +1215,8 @@
 	 * other IST entries.
 	 */
 
+	ASM_CLAC
+
 	/* Use %rdx as our temp variable throughout */
 	pushq	%rdx
 
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 970c1de..4c1b7ea 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -161,7 +161,13 @@
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
-	return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+	unsigned int pkgid = topology_logical_package_id(cpu);
+
+	/*
+	 * The unsigned check also catches the '-1' return value for non
+	 * existent mappings in the topology map.
+	 */
+	return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@
 
 	/* must be done before validate_group */
 	pmu = cpu_to_rapl_pmu(event->cpu);
+	if (!pmu)
+		return -EINVAL;
 	event->cpu = pmu->cpu;
 	event->pmu_private = pmu;
 	event->hw.event_base = msr;
@@ -585,6 +593,19 @@
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 
+	if (!pmu) {
+		pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+		if (!pmu)
+			return -ENOMEM;
+
+		raw_spin_lock_init(&pmu->lock);
+		INIT_LIST_HEAD(&pmu->active_list);
+		pmu->pmu = &rapl_pmus->pmu;
+		pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+		rapl_hrtimer_init(pmu);
+
+		rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+	}
 	/*
 	 * Check if there is an online cpu in the package which collects rapl
 	 * events already.
@@ -598,27 +619,6 @@
 	return 0;
 }
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-	if (pmu)
-		return 0;
-
-	pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-	if (!pmu)
-		return -ENOMEM;
-
-	raw_spin_lock_init(&pmu->lock);
-	INIT_LIST_HEAD(&pmu->active_list);
-	pmu->pmu = &rapl_pmus->pmu;
-	pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-	pmu->cpu = -1;
-	rapl_hrtimer_init(pmu);
-	rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-	return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 	u64 msr_rapl_power_unit_bits;
@@ -804,28 +804,21 @@
 	 * Install callbacks. Core will call them for each online cpu.
 	 */
 
-	ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
-				rapl_cpu_prepare, NULL);
-	if (ret)
-		goto out;
-
 	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
 				"AP_PERF_X86_RAPL_ONLINE",
 				rapl_cpu_online, rapl_cpu_offline);
 	if (ret)
-		goto out1;
+		goto out;
 
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	if (ret)
-		goto out2;
+		goto out1;
 
 	rapl_advertise();
 	return 0;
 
-out2:
-	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
-	cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	cleanup_rapl_pmus();
@@ -836,7 +829,6 @@
 static void __exit intel_rapl_exit(void)
 {
 	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	cleanup_rapl_pmus();
 }
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index c152db2..7bcd138 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -204,6 +204,7 @@
 
 #define ELF_CORE_COPY_REGS(pr_reg, regs)			\
 do {								\
+	unsigned long base;					\
 	unsigned v;						\
 	(pr_reg)[0] = (regs)->r15;				\
 	(pr_reg)[1] = (regs)->r14;				\
@@ -226,8 +227,8 @@
 	(pr_reg)[18] = (regs)->flags;				\
 	(pr_reg)[19] = (regs)->sp;				\
 	(pr_reg)[20] = (regs)->ss;				\
-	(pr_reg)[21] = current->thread.fsbase;			\
-	(pr_reg)[22] = current->thread.gsbase;			\
+	rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base;		\
+	rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base;	\
 	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
 	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
 	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
@@ -247,11 +248,11 @@
 
 /*
  * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
  * space open for things that want to use the area for 32-bit pointers.
  */
 #define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
-						  0x100000000UL)
+						  (TASK_SIZE / 3 * 2))
 
 /* This yields a mask that user programs can use to figure out what
    instruction set this CPU supports.  This could be done in user space,
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d34bd37..6c50201 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -304,13 +304,13 @@
 static inline void outs##bwl(int port, const void *addr, unsigned long count) \
 {									\
 	asm volatile("rep; outs" #bwl					\
-		     : "+S"(addr), "+c"(count) : "d"(port));		\
+		     : "+S"(addr), "+c"(count) : "d"(port) : "memory");	\
 }									\
 									\
 static inline void ins##bwl(int port, void *addr, unsigned long count)	\
 {									\
 	asm volatile("rep; ins" #bwl					\
-		     : "+D"(addr), "+c"(count) : "d"(port));		\
+		     : "+D"(addr), "+c"(count) : "d"(port) : "memory");	\
 }
 
 BUILDIO(b, b, char)
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8e0a9fe..f9dd224 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -116,9 +116,7 @@
 		mm->context.execute_only_pkey = -1;
 	}
 	#endif
-	init_new_context_ldt(tsk, mm);
-
-	return 0;
+	return init_new_context_ldt(tsk, mm);
 }
 static inline void destroy_context(struct mm_struct *mm)
 {
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9cf697c..55ffd9d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -152,6 +152,8 @@
 		if (hlist_unhashed(&n.link))
 			break;
 
+		rcu_irq_exit();
+
 		if (!n.halted) {
 			local_irq_enable();
 			schedule();
@@ -160,11 +162,11 @@
 			/*
 			 * We cannot reschedule. So halt.
 			 */
-			rcu_irq_exit();
 			native_safe_halt();
 			local_irq_disable();
-			rcu_irq_enter();
 		}
+
+		rcu_irq_enter();
 	}
 	if (!n.halted)
 		finish_swait(&n.wq, &wait);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b3760b3..0887d2a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -136,6 +136,123 @@
 	}
 }
 
+enum which_selector {
+	FS,
+	GS
+};
+
+/*
+ * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
+ * not available.  The goal is to be reasonably fast on non-FSGSBASE systems.
+ * It's forcibly inlined because it'll generate better code and this function
+ * is hot.
+ */
+static __always_inline void save_base_legacy(struct task_struct *prev_p,
+					     unsigned short selector,
+					     enum which_selector which)
+{
+	if (likely(selector == 0)) {
+		/*
+		 * On Intel (without X86_BUG_NULL_SEG), the segment base could
+		 * be the pre-existing saved base or it could be zero.  On AMD
+		 * (with X86_BUG_NULL_SEG), the segment base could be almost
+		 * anything.
+		 *
+		 * This branch is very hot (it's hit twice on almost every
+		 * context switch between 64-bit programs), and avoiding
+		 * the RDMSR helps a lot, so we just assume that whatever
+		 * value is already saved is correct.  This matches historical
+		 * Linux behavior, so it won't break existing applications.
+		 *
+		 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
+		 * report that the base is zero, it needs to actually be zero:
+		 * see the corresponding logic in load_seg_legacy.
+		 */
+	} else {
+		/*
+		 * If the selector is 1, 2, or 3, then the base is zero on
+		 * !X86_BUG_NULL_SEG CPUs and could be anything on
+		 * X86_BUG_NULL_SEG CPUs.  In the latter case, Linux
+		 * has never attempted to preserve the base across context
+		 * switches.
+		 *
+		 * If selector > 3, then it refers to a real segment, and
+		 * saving the base isn't necessary.
+		 */
+		if (which == FS)
+			prev_p->thread.fsbase = 0;
+		else
+			prev_p->thread.gsbase = 0;
+	}
+}
+
+static __always_inline void save_fsgs(struct task_struct *task)
+{
+	savesegment(fs, task->thread.fsindex);
+	savesegment(gs, task->thread.gsindex);
+	save_base_legacy(task, task->thread.fsindex, FS);
+	save_base_legacy(task, task->thread.gsindex, GS);
+}
+
+static __always_inline void loadseg(enum which_selector which,
+				    unsigned short sel)
+{
+	if (which == FS)
+		loadsegment(fs, sel);
+	else
+		load_gs_index(sel);
+}
+
+static __always_inline void load_seg_legacy(unsigned short prev_index,
+					    unsigned long prev_base,
+					    unsigned short next_index,
+					    unsigned long next_base,
+					    enum which_selector which)
+{
+	if (likely(next_index <= 3)) {
+		/*
+		 * The next task is using 64-bit TLS, is not using this
+		 * segment at all, or is having fun with arcane CPU features.
+		 */
+		if (next_base == 0) {
+			/*
+			 * Nasty case: on AMD CPUs, we need to forcibly zero
+			 * the base.
+			 */
+			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
+				loadseg(which, __USER_DS);
+				loadseg(which, next_index);
+			} else {
+				/*
+				 * We could try to exhaustively detect cases
+				 * under which we can skip the segment load,
+				 * but there's really only one case that matters
+				 * for performance: if both the previous and
+				 * next states are fully zeroed, we can skip
+				 * the load.
+				 *
+				 * (This assumes that prev_base == 0 has no
+				 * false positives.  This is the case on
+				 * Intel-style CPUs.)
+				 */
+				if (likely(prev_index | next_index | prev_base))
+					loadseg(which, next_index);
+			}
+		} else {
+			if (prev_index != next_index)
+				loadseg(which, next_index);
+			wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
+			       next_base);
+		}
+	} else {
+		/*
+		 * The next task is using a real segment.  Loading the selector
+		 * is sufficient.
+		 */
+		loadseg(which, next_index);
+	}
+}
+
 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
 		unsigned long arg, struct task_struct *p, unsigned long tls)
 {
@@ -216,10 +333,19 @@
 		    unsigned long new_sp,
 		    unsigned int _cs, unsigned int _ss, unsigned int _ds)
 {
+	WARN_ON_ONCE(regs != current_pt_regs());
+
+	if (static_cpu_has(X86_BUG_NULL_SEG)) {
+		/* Loading zero below won't clear the base. */
+		loadsegment(fs, __USER_DS);
+		load_gs_index(__USER_DS);
+	}
+
 	loadsegment(fs, 0);
 	loadsegment(es, _ds);
 	loadsegment(ds, _ds);
 	load_gs_index(0);
+
 	regs->ip		= new_ip;
 	regs->sp		= new_sp;
 	regs->cs		= _cs;
@@ -264,7 +390,6 @@
 	struct fpu *next_fpu = &next->fpu;
 	int cpu = smp_processor_id();
 	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
-	unsigned prev_fsindex, prev_gsindex;
 	fpu_switch_t fpu_switch;
 
 	fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
@@ -274,8 +399,7 @@
 	 *
 	 * (e.g. xen_load_tls())
 	 */
-	savesegment(fs, prev_fsindex);
-	savesegment(gs, prev_gsindex);
+	save_fsgs(prev_p);
 
 	/*
 	 * Load TLS before restoring any segments so that segment loads
@@ -314,108 +438,10 @@
 	if (unlikely(next->ds | prev->ds))
 		loadsegment(ds, next->ds);
 
-	/*
-	 * Switch FS and GS.
-	 *
-	 * These are even more complicated than DS and ES: they have
-	 * 64-bit bases are that controlled by arch_prctl.  The bases
-	 * don't necessarily match the selectors, as user code can do
-	 * any number of things to cause them to be inconsistent.
-	 *
-	 * We don't promise to preserve the bases if the selectors are
-	 * nonzero.  We also don't promise to preserve the base if the
-	 * selector is zero and the base doesn't match whatever was
-	 * most recently passed to ARCH_SET_FS/GS.  (If/when the
-	 * FSGSBASE instructions are enabled, we'll need to offer
-	 * stronger guarantees.)
-	 *
-	 * As an invariant,
-	 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
-	 * impossible.
-	 */
-	if (next->fsindex) {
-		/* Loading a nonzero value into FS sets the index and base. */
-		loadsegment(fs, next->fsindex);
-	} else {
-		if (next->fsbase) {
-			/* Next index is zero but next base is nonzero. */
-			if (prev_fsindex)
-				loadsegment(fs, 0);
-			wrmsrl(MSR_FS_BASE, next->fsbase);
-		} else {
-			/* Next base and index are both zero. */
-			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
-				/*
-				 * We don't know the previous base and can't
-				 * find out without RDMSR.  Forcibly clear it.
-				 */
-				loadsegment(fs, __USER_DS);
-				loadsegment(fs, 0);
-			} else {
-				/*
-				 * If the previous index is zero and ARCH_SET_FS
-				 * didn't change the base, then the base is
-				 * also zero and we don't need to do anything.
-				 */
-				if (prev->fsbase || prev_fsindex)
-					loadsegment(fs, 0);
-			}
-		}
-	}
-	/*
-	 * Save the old state and preserve the invariant.
-	 * NB: if prev_fsindex == 0, then we can't reliably learn the base
-	 * without RDMSR because Intel user code can zero it without telling
-	 * us and AMD user code can program any 32-bit value without telling
-	 * us.
-	 */
-	if (prev_fsindex)
-		prev->fsbase = 0;
-	prev->fsindex = prev_fsindex;
-
-	if (next->gsindex) {
-		/* Loading a nonzero value into GS sets the index and base. */
-		load_gs_index(next->gsindex);
-	} else {
-		if (next->gsbase) {
-			/* Next index is zero but next base is nonzero. */
-			if (prev_gsindex)
-				load_gs_index(0);
-			wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
-		} else {
-			/* Next base and index are both zero. */
-			if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
-				/*
-				 * We don't know the previous base and can't
-				 * find out without RDMSR.  Forcibly clear it.
-				 *
-				 * This contains a pointless SWAPGS pair.
-				 * Fixing it would involve an explicit check
-				 * for Xen or a new pvop.
-				 */
-				load_gs_index(__USER_DS);
-				load_gs_index(0);
-			} else {
-				/*
-				 * If the previous index is zero and ARCH_SET_GS
-				 * didn't change the base, then the base is
-				 * also zero and we don't need to do anything.
-				 */
-				if (prev->gsbase || prev_gsindex)
-					load_gs_index(0);
-			}
-		}
-	}
-	/*
-	 * Save the old state and preserve the invariant.
-	 * NB: if prev_gsindex == 0, then we can't reliably learn the base
-	 * without RDMSR because Intel user code can zero it without telling
-	 * us and AMD user code can program any 32-bit value without telling
-	 * us.
-	 */
-	if (prev_gsindex)
-		prev->gsbase = 0;
-	prev->gsindex = prev_gsindex;
+	load_seg_legacy(prev->fsindex, prev->fsbase,
+			next->fsindex, next->fsbase, FS);
+	load_seg_legacy(prev->gsindex, prev->gsbase,
+			next->gsindex, next->gsbase, GS);
 
 	switch_fpu_finish(next_fpu, fpu_switch);
 
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 649d8f2..91af75e 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -456,7 +456,7 @@
 			entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
 			cpuid_mask(&entry->ecx, CPUID_7_ECX);
 			/* PKU is not yet implemented for shadow paging. */
-			if (!tdp_enabled)
+			if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
 				entry->ecx &= ~F(PKU);
 		} else {
 			entry->ebx = 0;
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 4d2872f..a71d273 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -94,13 +94,11 @@
 }
 EXPORT_SYMBOL(__sync_fetch_and_or_4);
 
-#ifdef CONFIG_NET
 /*
  * Networking support
  */
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
-#endif /* CONFIG_NET */
 
 /*
  * Architecture-specific symbols
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 1a804a2..3c75c4e 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -103,6 +103,7 @@
 	clear_page_alias(kvaddr, paddr);
 	preempt_enable();
 }
+EXPORT_SYMBOL(clear_user_highpage);
 
 void copy_user_highpage(struct page *dst, struct page *src,
 			unsigned long vaddr, struct vm_area_struct *vma)
@@ -119,10 +120,7 @@
 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
 	preempt_enable();
 }
-
-#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(copy_user_highpage);
 
 /*
  * Any time the kernel writes to a user page cache page, or it is about to
@@ -176,7 +174,7 @@
 
 	/* There shouldn't be an entry in the cache for this page anymore. */
 }
-
+EXPORT_SYMBOL(flush_dcache_page);
 
 /*
  * For now, flush the whole cache. FIXME??
@@ -188,6 +186,7 @@
 	__flush_invalidate_dcache_all();
 	__invalidate_icache_all();
 }
+EXPORT_SYMBOL(local_flush_cache_range);
 
 /* 
  * Remove any entry in the cache for this page. 
@@ -207,8 +206,9 @@
 	__flush_invalidate_dcache_page_alias(virt, phys);
 	__invalidate_icache_page_alias(virt, phys);
 }
+EXPORT_SYMBOL(local_flush_cache_page);
 
-#endif
+#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
 
 void
 update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
@@ -225,7 +225,7 @@
 
 	flush_tlb_page(vma, addr);
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
 	if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
 		unsigned long phys = page_to_phys(page);
@@ -256,7 +256,7 @@
  * flush_dcache_page() on the page.
  */
 
-#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
 
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 		unsigned long vaddr, void *dst, const void *src,
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index 966c216..ee9d3d9 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -36,12 +36,18 @@
 	for (queue = 0; queue < set->nr_hw_queues; queue++) {
 		mask = pci_irq_get_affinity(pdev, queue);
 		if (!mask)
-			return -EINVAL;
+			goto fallback;
 
 		for_each_cpu(cpu, mask)
 			set->mq_map[cpu] = queue;
 	}
 
 	return 0;
+
+fallback:
+	WARN_ON_ONCE(set->nr_hw_queues > 1);
+	for_each_possible_cpu(cpu)
+		set->mq_map[cpu] = 0;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 28556fc..45af0fe 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -86,8 +86,13 @@
 	}
 	sgl = sreq->tsg;
 	n = sg_nents(sgl);
-	for_each_sg(sgl, sg, n, i)
-		put_page(sg_page(sg));
+	for_each_sg(sgl, sg, n, i) {
+		struct page *page = sg_page(sg);
+
+		/* some SGs may not have a page mapped */
+		if (page && page_ref_count(page))
+			put_page(page);
+	}
 
 	kfree(sreq->tsg);
 }
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index e53bef6..0375c60 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1072,6 +1072,7 @@
 		if (list_empty(&ghes_sci))
 			unregister_acpi_hed_notifier(&ghes_notifier_sci);
 		mutex_unlock(&ghes_list_mutex);
+		synchronize_rcu();
 		break;
 	case ACPI_HEST_NOTIFY_NMI:
 		ghes_nmi_remove(ghes);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 79152db..5187469 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1728,7 +1728,7 @@
  * functioning ECDT EC first in order to handle the events.
  * https://bugzilla.kernel.org/show_bug.cgi?id=115021
  */
-int __init acpi_ec_ecdt_start(void)
+static int __init acpi_ec_ecdt_start(void)
 {
 	acpi_handle handle;
 
@@ -1959,20 +1959,17 @@
 int __init acpi_ec_init(void)
 {
 	int result;
+	int ecdt_fail, dsdt_fail;
 
 	/* register workqueue for _Qxx evaluations */
 	result = acpi_ec_query_init();
 	if (result)
-		goto err_exit;
-	/* Now register the driver for the EC */
-	result = acpi_bus_register_driver(&acpi_ec_driver);
-	if (result)
-		goto err_exit;
+		return result;
 
-err_exit:
-	if (result)
-		acpi_ec_query_exit();
-	return result;
+	/* Drivers must be started after acpi_ec_query_init() */
+	ecdt_fail = acpi_ec_ecdt_start();
+	dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+	return ecdt_fail && dsdt_fail ? -ENODEV : 0;
 }
 
 /* EC driver currently not unloadable */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 219b90b..08b3ca0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@
 int acpi_ec_init(void);
 int acpi_ec_ecdt_probe(void);
 int acpi_ec_dsdt_probe(void);
-int acpi_ec_ecdt_start(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_unblock_transactions(void);
 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 6d7ce6e..5e18ccf 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -45,6 +45,12 @@
 	struct resource *res = data;
 	struct resource_win win;
 
+	/*
+	 * We might assign this to 'res' later, make sure all pointers are
+	 * cleared before the resource is added to the global list
+	 */
+	memset(&win, 0, sizeof(win));
+
 	res->flags = 0;
 	if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
 		return AE_OK;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index dd3786a..cf725d5 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2051,7 +2051,6 @@
 
 	acpi_gpe_apply_masked_gpes();
 	acpi_update_all_gpes();
-	acpi_ec_ecdt_start();
 
 	acpi_scan_initialized = true;
 
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4d4cdc1..01de42c 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -44,6 +44,16 @@
 
 	  Note that enabling this will break newer Android user-space.
 
+config ANDROID_BINDER_IPC_SELFTEST
+	bool "Android Binder IPC Driver Selftest"
+	depends on ANDROID_BINDER_IPC
+	---help---
+	  This feature allows binder selftest to run.
+
+	  Binder selftest checks the allocation and free of binder buffers
+	  exhaustively with combinations of various buffer sizes and
+	  alignments.
+
 endif # if ANDROID
 
 endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 4b7c726..a01254c 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
 ccflags-y += -I$(src)			# needed for trace events
 
 obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 9f1a1bb..1ac8008 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2480,7 +2480,6 @@
 			     (u64)node->ptr);
 		binder_node_unlock(node);
 	} else {
-		int ret;
 		struct binder_ref_data dest_rdata;
 
 		binder_node_unlock(node);
@@ -3522,11 +3521,13 @@
 				BUG_ON(buf_node->proc != proc);
 				w = binder_dequeue_work_head_ilocked(
 						&buf_node->async_todo);
-				if (!w)
+				if (!w) {
 					buf_node->has_async_transaction = 0;
-				else
+				} else {
 					binder_enqueue_work_ilocked(
-							w, &thread->todo);
+							w, &proc->todo);
+					binder_wakeup_proc_ilocked(proc);
+				}
 				binder_node_inner_unlock(buf_node);
 			}
 			trace_binder_transaction_buffer_release(buffer);
@@ -3670,22 +3671,12 @@
 				ref->death = death;
 				if (ref->node->proc == NULL) {
 					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-					if (thread->looper &
-					    (BINDER_LOOPER_STATE_REGISTERED |
-					     BINDER_LOOPER_STATE_ENTERED))
-						binder_enqueue_work(
-							proc,
-							&ref->death->work,
-							&thread->todo);
-					else {
-						binder_inner_proc_lock(proc);
-						binder_enqueue_work_ilocked(
-							&ref->death->work,
-							&proc->todo);
-						binder_wakeup_proc_ilocked(
-							proc);
-						binder_inner_proc_unlock(proc);
-					}
+
+					binder_inner_proc_lock(proc);
+					binder_enqueue_work_ilocked(
+						&ref->death->work, &proc->todo);
+					binder_wakeup_proc_ilocked(proc);
+					binder_inner_proc_unlock(proc);
 				}
 			} else {
 				if (ref->death == NULL) {
@@ -3802,12 +3793,6 @@
 	}
 }
 
-static int binder_has_thread_work(struct binder_thread *thread)
-{
-	return !binder_worklist_empty(thread->proc, &thread->todo) ||
-		thread->looper_need_return;
-}
-
 static int binder_put_node_cmd(struct binder_proc *proc,
 			       struct binder_thread *thread,
 			       void __user **ptrp,
@@ -4438,12 +4423,9 @@
 
 	binder_inner_proc_unlock(thread->proc);
 
-	if (binder_has_work(thread, wait_for_proc_work))
-		return POLLIN;
-
 	poll_wait(filp, &thread->wait, wait);
 
-	if (binder_has_thread_work(thread))
+	if (binder_has_work(thread, wait_for_proc_work))
 		return POLLIN;
 
 	return 0;
@@ -4597,6 +4579,8 @@
 	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
 			proc->pid, current->pid, cmd, arg);*/
 
+	binder_selftest_alloc(&proc->alloc);
+
 	trace_binder_ioctl(cmd, arg);
 
 	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5442,6 +5426,8 @@
 	count = binder_alloc_get_allocated_count(&proc->alloc);
 	seq_printf(m, "  buffers: %d\n", count);
 
+	binder_alloc_print_pages(m, &proc->alloc);
+
 	count = 0;
 	binder_inner_proc_lock(proc);
 	list_for_each_entry(w, &proc->todo, entry) {
@@ -5638,6 +5624,8 @@
 	struct binder_device *device;
 	struct hlist_node *tmp;
 
+	binder_alloc_shrinker_init();
+
 	atomic_set(&binder_transaction_log.cur, ~0U);
 	atomic_set(&binder_transaction_log_failed.cur, ~0U);
 
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index b90222a..e026894 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/list_lru.h>
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
+struct list_lru binder_alloc_lru;
+
 static DEFINE_MUTEX(binder_alloc_mmap_lock);
 
 enum {
@@ -48,14 +51,23 @@
 			pr_info(x); \
 	} while (0)
 
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+	return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+	return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
 				       struct binder_buffer *buffer)
 {
 	if (list_is_last(&buffer->entry, &alloc->buffers))
-		return alloc->buffer +
-		       alloc->buffer_size - (void *)buffer->data;
-	return (size_t)list_entry(buffer->entry.next,
-			  struct binder_buffer, entry) - (size_t)buffer->data;
+		return (u8 *)alloc->buffer +
+			alloc->buffer_size - (u8 *)buffer->data;
+	return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
 }
 
 static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -105,9 +117,9 @@
 		buffer = rb_entry(parent, struct binder_buffer, rb_node);
 		BUG_ON(buffer->free);
 
-		if (new_buffer < buffer)
+		if (new_buffer->data < buffer->data)
 			p = &parent->rb_left;
-		else if (new_buffer > buffer)
+		else if (new_buffer->data > buffer->data)
 			p = &parent->rb_right;
 		else
 			BUG();
@@ -122,18 +134,17 @@
 {
 	struct rb_node *n = alloc->allocated_buffers.rb_node;
 	struct binder_buffer *buffer;
-	struct binder_buffer *kern_ptr;
+	void *kern_ptr;
 
-	kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
-		- offsetof(struct binder_buffer, data));
+	kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
 
 	while (n) {
 		buffer = rb_entry(n, struct binder_buffer, rb_node);
 		BUG_ON(buffer->free);
 
-		if (kern_ptr < buffer)
+		if (kern_ptr < buffer->data)
 			n = n->rb_left;
-		else if (kern_ptr > buffer)
+		else if (kern_ptr > buffer->data)
 			n = n->rb_right;
 		else {
 			/*
@@ -180,8 +191,9 @@
 {
 	void *page_addr;
 	unsigned long user_page_addr;
-	struct page **page;
-	struct mm_struct *mm;
+	struct binder_lru_page *page;
+	struct mm_struct *mm = NULL;
+	bool need_mm = false;
 
 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 		     "%d: %s pages %pK-%pK\n", alloc->pid,
@@ -192,9 +204,18 @@
 
 	trace_binder_update_page_range(alloc, allocate, start, end);
 
-	if (vma)
-		mm = NULL;
-	else
+	if (allocate == 0)
+		goto free_range;
+
+	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+		if (!page->page_ptr) {
+			need_mm = true;
+			break;
+		}
+	}
+
+	if (!vma && need_mm)
 		mm = get_task_mm(alloc->tsk);
 
 	if (mm) {
@@ -207,10 +228,7 @@
 		}
 	}
 
-	if (allocate == 0)
-		goto free_range;
-
-	if (vma == NULL) {
+	if (!vma && need_mm) {
 		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
 			alloc->pid);
 		goto err_no_vma;
@@ -218,18 +236,40 @@
 
 	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 		int ret;
+		bool on_lru;
+		size_t index;
 
-		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		page = &alloc->pages[index];
 
-		BUG_ON(*page);
-		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-		if (*page == NULL) {
+		if (page->page_ptr) {
+			trace_binder_alloc_lru_start(alloc, index);
+
+			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+			WARN_ON(!on_lru);
+
+			trace_binder_alloc_lru_end(alloc, index);
+			continue;
+		}
+
+		if (WARN_ON(!vma))
+			goto err_page_ptr_cleared;
+
+		trace_binder_alloc_page_start(alloc, index);
+		page->page_ptr = alloc_page(GFP_KERNEL |
+					    __GFP_HIGHMEM |
+					    __GFP_ZERO);
+		if (!page->page_ptr) {
 			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
 				alloc->pid, page_addr);
 			goto err_alloc_page_failed;
 		}
+		page->alloc = alloc;
+		INIT_LIST_HEAD(&page->lru);
+
 		ret = map_kernel_range_noflush((unsigned long)page_addr,
-					PAGE_SIZE, PAGE_KERNEL, page);
+					       PAGE_SIZE, PAGE_KERNEL,
+					       &page->page_ptr);
 		flush_cache_vmap((unsigned long)page_addr,
 				(unsigned long)page_addr + PAGE_SIZE);
 		if (ret != 1) {
@@ -239,12 +279,14 @@
 		}
 		user_page_addr =
 			(uintptr_t)page_addr + alloc->user_buffer_offset;
-		ret = vm_insert_page(vma, user_page_addr, page[0]);
+		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
 		if (ret) {
 			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
 			       alloc->pid, user_page_addr);
 			goto err_vm_insert_page_failed;
 		}
+
+		trace_binder_alloc_page_end(alloc, index);
 		/* vm_insert_page does not seem to increment the refcount */
 	}
 	if (mm) {
@@ -256,16 +298,27 @@
 free_range:
 	for (page_addr = end - PAGE_SIZE; page_addr >= start;
 	     page_addr -= PAGE_SIZE) {
-		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
-		if (vma)
-			zap_page_range(vma, (uintptr_t)page_addr +
-				alloc->user_buffer_offset, PAGE_SIZE, NULL);
+		bool ret;
+		size_t index;
+
+		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		page = &alloc->pages[index];
+
+		trace_binder_free_lru_start(alloc, index);
+
+		ret = list_lru_add(&binder_alloc_lru, &page->lru);
+		WARN_ON(!ret);
+
+		trace_binder_free_lru_end(alloc, index);
+		continue;
+
 err_vm_insert_page_failed:
 		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
 err_map_kernel_failed:
-		__free_page(*page);
-		*page = NULL;
+		__free_page(page->page_ptr);
+		page->page_ptr = NULL;
 err_alloc_page_failed:
+err_page_ptr_cleared:
 		;
 	}
 err_no_vma:
@@ -321,6 +374,9 @@
 		return ERR_PTR(-ENOSPC);
 	}
 
+	/* Pad 0-size buffers so they get assigned unique addresses */
+	size = max(size, sizeof(void *));
+
 	while (n) {
 		buffer = rb_entry(n, struct binder_buffer, rb_node);
 		BUG_ON(!buffer->free);
@@ -380,14 +436,9 @@
 
 	has_page_addr =
 		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
-	if (n == NULL) {
-		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
-			buffer_size = size; /* no room for other buffers */
-		else
-			buffer_size = size + sizeof(struct binder_buffer);
-	}
+	WARN_ON(n && buffer_size != size);
 	end_page_addr =
-		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+		(void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
 	if (end_page_addr > has_page_addr)
 		end_page_addr = has_page_addr;
 	ret = binder_update_page_range(alloc, 1,
@@ -395,17 +446,25 @@
 	if (ret)
 		return ERR_PTR(ret);
 
-	rb_erase(best_fit, &alloc->free_buffers);
-	buffer->free = 0;
-	buffer->free_in_progress = 0;
-	binder_insert_allocated_buffer_locked(alloc, buffer);
 	if (buffer_size != size) {
-		struct binder_buffer *new_buffer = (void *)buffer->data + size;
+		struct binder_buffer *new_buffer;
 
+		new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+		if (!new_buffer) {
+			pr_err("%s: %d failed to alloc new buffer struct\n",
+			       __func__, alloc->pid);
+			goto err_alloc_buf_struct_failed;
+		}
+		new_buffer->data = (u8 *)buffer->data + size;
 		list_add(&new_buffer->entry, &buffer->entry);
 		new_buffer->free = 1;
 		binder_insert_free_buffer(alloc, new_buffer);
 	}
+
+	rb_erase(best_fit, &alloc->free_buffers);
+	buffer->free = 0;
+	buffer->free_in_progress = 0;
+	binder_insert_allocated_buffer_locked(alloc, buffer);
 	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 		     "%d: binder_alloc_buf size %zd got %pK\n",
 		      alloc->pid, size, buffer);
@@ -420,6 +479,12 @@
 			      alloc->pid, size, alloc->free_async_space);
 	}
 	return buffer;
+
+err_alloc_buf_struct_failed:
+	binder_update_page_range(alloc, 0,
+				 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+				 end_page_addr, NULL);
+	return ERR_PTR(-ENOMEM);
 }
 
 /**
@@ -454,57 +519,59 @@
 
 static void *buffer_start_page(struct binder_buffer *buffer)
 {
-	return (void *)((uintptr_t)buffer & PAGE_MASK);
+	return (void *)((uintptr_t)buffer->data & PAGE_MASK);
 }
 
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
 {
-	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+	return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
 }
 
 static void binder_delete_free_buffer(struct binder_alloc *alloc,
 				      struct binder_buffer *buffer)
 {
 	struct binder_buffer *prev, *next = NULL;
-	int free_page_end = 1;
-	int free_page_start = 1;
-
+	bool to_free = true;
 	BUG_ON(alloc->buffers.next == &buffer->entry);
-	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+	prev = binder_buffer_prev(buffer);
 	BUG_ON(!prev->free);
-	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
-		free_page_start = 0;
-		if (buffer_end_page(prev) == buffer_end_page(buffer))
-			free_page_end = 0;
+	if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+		to_free = false;
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: merge free, buffer %pK share page with %pK\n",
-			      alloc->pid, buffer, prev);
+				   "%d: merge free, buffer %pK share page with %pK\n",
+				   alloc->pid, buffer->data, prev->data);
 	}
 
 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
-		next = list_entry(buffer->entry.next,
-				  struct binder_buffer, entry);
-		if (buffer_start_page(next) == buffer_end_page(buffer)) {
-			free_page_end = 0;
-			if (buffer_start_page(next) ==
-			    buffer_start_page(buffer))
-				free_page_start = 0;
+		next = binder_buffer_next(buffer);
+		if (buffer_start_page(next) == buffer_start_page(buffer)) {
+			to_free = false;
 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-				     "%d: merge free, buffer %pK share page with %pK\n",
-				      alloc->pid, buffer, prev);
+					   "%d: merge free, buffer %pK share page with %pK\n",
+					   alloc->pid,
+					   buffer->data,
+					   next->data);
 		}
 	}
-	list_del(&buffer->entry);
-	if (free_page_start || free_page_end) {
+
+	if (PAGE_ALIGNED(buffer->data)) {
 		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-			     "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
-			     alloc->pid, buffer, free_page_start ? "" : " end",
-			     free_page_end ? "" : " start", prev, next);
-		binder_update_page_range(alloc, 0, free_page_start ?
-			buffer_start_page(buffer) : buffer_end_page(buffer),
-			(free_page_end ? buffer_end_page(buffer) :
-			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+				   "%d: merge free, buffer start %pK is page aligned\n",
+				   alloc->pid, buffer->data);
+		to_free = false;
 	}
+
+	if (to_free) {
+		binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+				   "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+				   alloc->pid, buffer->data,
+				   prev->data, next->data);
+		binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+					 buffer_start_page(buffer) + PAGE_SIZE,
+					 NULL);
+	}
+	list_del(&buffer->entry);
+	kfree(buffer);
 }
 
 static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -525,8 +592,8 @@
 	BUG_ON(buffer->free);
 	BUG_ON(size > buffer_size);
 	BUG_ON(buffer->transaction != NULL);
-	BUG_ON((void *)buffer < alloc->buffer);
-	BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+	BUG_ON(buffer->data < alloc->buffer);
+	BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
 
 	if (buffer->async_transaction) {
 		alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -544,8 +611,7 @@
 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 	buffer->free = 1;
 	if (!list_is_last(&buffer->entry, &alloc->buffers)) {
-		struct binder_buffer *next = list_entry(buffer->entry.next,
-						struct binder_buffer, entry);
+		struct binder_buffer *next = binder_buffer_next(buffer);
 
 		if (next->free) {
 			rb_erase(&next->rb_node, &alloc->free_buffers);
@@ -553,8 +619,7 @@
 		}
 	}
 	if (alloc->buffers.next != &buffer->entry) {
-		struct binder_buffer *prev = list_entry(buffer->entry.prev,
-						struct binder_buffer, entry);
+		struct binder_buffer *prev = binder_buffer_prev(buffer);
 
 		if (prev->free) {
 			binder_delete_free_buffer(alloc, buffer);
@@ -640,14 +705,14 @@
 	}
 	alloc->buffer_size = vma->vm_end - vma->vm_start;
 
-	if (binder_update_page_range(alloc, 1, alloc->buffer,
-				     alloc->buffer + PAGE_SIZE, vma)) {
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer) {
 		ret = -ENOMEM;
-		failure_string = "alloc small buf";
-		goto err_alloc_small_buf_failed;
+		failure_string = "alloc buffer struct";
+		goto err_alloc_buf_struct_failed;
 	}
-	buffer = alloc->buffer;
-	INIT_LIST_HEAD(&alloc->buffers);
+
+	buffer->data = alloc->buffer;
 	list_add(&buffer->entry, &alloc->buffers);
 	buffer->free = 1;
 	binder_insert_free_buffer(alloc, buffer);
@@ -658,7 +723,7 @@
 
 	return 0;
 
-err_alloc_small_buf_failed:
+err_alloc_buf_struct_failed:
 	kfree(alloc->pages);
 	alloc->pages = NULL;
 err_alloc_pages_failed:
@@ -678,14 +743,13 @@
 {
 	struct rb_node *n;
 	int buffers, page_count;
+	struct binder_buffer *buffer;
 
 	BUG_ON(alloc->vma);
 
 	buffers = 0;
 	mutex_lock(&alloc->mutex);
 	while ((n = rb_first(&alloc->allocated_buffers))) {
-		struct binder_buffer *buffer;
-
 		buffer = rb_entry(n, struct binder_buffer, rb_node);
 
 		/* Transaction should already have been freed */
@@ -695,22 +759,36 @@
 		buffers++;
 	}
 
+	while (!list_empty(&alloc->buffers)) {
+		buffer = list_first_entry(&alloc->buffers,
+					  struct binder_buffer, entry);
+		WARN_ON(!buffer->free);
+
+		list_del(&buffer->entry);
+		WARN_ON_ONCE(!list_empty(&alloc->buffers));
+		kfree(buffer);
+	}
+
 	page_count = 0;
 	if (alloc->pages) {
 		int i;
 
 		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
 			void *page_addr;
+			bool on_lru;
 
-			if (!alloc->pages[i])
+			if (!alloc->pages[i].page_ptr)
 				continue;
 
+			on_lru = list_lru_del(&binder_alloc_lru,
+					      &alloc->pages[i].lru);
 			page_addr = alloc->buffer + i * PAGE_SIZE;
 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-				     "%s: %d: page %d at %pK not freed\n",
-				     __func__, alloc->pid, i, page_addr);
+				     "%s: %d: page %d at %pK %s\n",
+				     __func__, alloc->pid, i, page_addr,
+				     on_lru ? "on lru" : "active");
 			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-			__free_page(alloc->pages[i]);
+			__free_page(alloc->pages[i].page_ptr);
 			page_count++;
 		}
 		kfree(alloc->pages);
@@ -754,6 +832,34 @@
 }
 
 /**
+ * binder_alloc_print_pages() - print page usage
+ * @m:     seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+			      struct binder_alloc *alloc)
+{
+	struct binder_lru_page *page;
+	int i;
+	int active = 0;
+	int lru = 0;
+	int free = 0;
+
+	mutex_lock(&alloc->mutex);
+	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+		page = &alloc->pages[i];
+		if (!page->page_ptr)
+			free++;
+		else if (list_empty(&page->lru))
+			active++;
+		else
+			lru++;
+	}
+	mutex_unlock(&alloc->mutex);
+	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
  * binder_alloc_get_allocated_count() - return count of buffers
  * @alloc: binder_alloc for this proc
  *
@@ -787,6 +893,108 @@
 }
 
 /**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item:   item to free
+ * @lock:   lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+				       struct list_lru_one *lru,
+				       spinlock_t *lock,
+				       void *cb_arg)
+{
+	struct mm_struct *mm = NULL;
+	struct binder_lru_page *page = container_of(item,
+						    struct binder_lru_page,
+						    lru);
+	struct binder_alloc *alloc;
+	uintptr_t page_addr;
+	size_t index;
+	struct vm_area_struct *vma;
+
+	alloc = page->alloc;
+	if (!mutex_trylock(&alloc->mutex))
+		goto err_get_alloc_mutex_failed;
+
+	if (!page->page_ptr)
+		goto err_page_already_freed;
+
+	index = page - alloc->pages;
+	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+	vma = alloc->vma;
+	if (vma) {
+		mm = get_task_mm(alloc->tsk);
+		if (!mm)
+			goto err_get_task_mm_failed;
+		if (!down_write_trylock(&mm->mmap_sem))
+			goto err_down_write_mmap_sem_failed;
+	}
+
+	list_lru_isolate(lru, item);
+	spin_unlock(lock);
+
+	if (vma) {
+		trace_binder_unmap_user_start(alloc, index);
+
+		zap_page_range(vma,
+			       page_addr +
+			       alloc->user_buffer_offset,
+			       PAGE_SIZE, NULL);
+
+		trace_binder_unmap_user_end(alloc, index);
+
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+
+	trace_binder_unmap_kernel_start(alloc, index);
+
+	unmap_kernel_range(page_addr, PAGE_SIZE);
+	__free_page(page->page_ptr);
+	page->page_ptr = NULL;
+
+	trace_binder_unmap_kernel_end(alloc, index);
+
+	spin_lock(lock);
+	mutex_unlock(&alloc->mutex);
+	return LRU_REMOVED_RETRY;
+
+err_down_write_mmap_sem_failed:
+	mmput_async(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+	mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+	return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+	unsigned long ret = list_lru_count(&binder_alloc_lru);
+	return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+	unsigned long ret;
+
+	ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+			    NULL, sc->nr_to_scan);
+	return ret;
+}
+
+struct shrinker binder_shrinker = {
+	.count_objects = binder_shrink_count,
+	.scan_objects = binder_shrink_scan,
+	.seeks = DEFAULT_SEEKS,
+};
+
+/**
  * binder_alloc_init() - called by binder_open() for per-proc initialization
  * @alloc: binder_alloc for this proc
  *
@@ -798,5 +1006,11 @@
 	alloc->tsk = current->group_leader;
 	alloc->pid = current->group_leader->pid;
 	mutex_init(&alloc->mutex);
+	INIT_LIST_HEAD(&alloc->buffers);
 }
 
+void binder_alloc_shrinker_init(void)
+{
+	list_lru_init(&binder_alloc_lru);
+	register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 088e4ff..a3a3602 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -21,7 +21,9 @@
 #include <linux/rtmutex.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/list_lru.h>
 
+extern struct list_lru binder_alloc_lru;
 struct binder_transaction;
 
 /**
@@ -57,7 +59,19 @@
 	size_t data_size;
 	size_t offsets_size;
 	size_t extra_buffers_size;
-	uint8_t data[0];
+	void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru:      entry in binder_alloc_lru
+ * @alloc:    binder_alloc for a proc
+ */
+struct binder_lru_page {
+	struct list_head lru;
+	struct page *page_ptr;
+	struct binder_alloc *alloc;
 };
 
 /**
@@ -75,8 +89,7 @@
  * @allocated_buffers:  rb tree of allocated buffers sorted by address
  * @free_async_space:   VA space available for async buffers. This is
  *                      initialized at mmap time to 1/2 the full VA space
- * @pages:              array of physical page addresses for each
- *                      page of mmap'd space
+ * @pages:              array of binder_lru_page
  * @buffer_size:        size of address space specified via mmap
  * @pid:                pid for associated binder_proc (invariant after init)
  *
@@ -96,18 +109,27 @@
 	struct rb_root free_buffers;
 	struct rb_root allocated_buffers;
 	size_t free_async_space;
-	struct page **pages;
+	struct binder_lru_page *pages;
 	size_t buffer_size;
 	uint32_t buffer_free;
 	int pid;
 };
 
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+				       struct list_lru_one *lru,
+				       spinlock_t *lock, void *cb_arg);
 extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 						  size_t data_size,
 						  size_t offsets_size,
 						  size_t extra_buffers_size,
 						  int is_async);
 extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
 extern void binder_alloc_vma_close(struct binder_alloc *alloc);
 extern struct binder_buffer *
 binder_alloc_prepare_to_free(struct binder_alloc *alloc,
@@ -120,6 +142,8 @@
 extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
 extern void binder_alloc_print_allocated(struct seq_file *m,
 					 struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+			      struct binder_alloc *alloc);
 
 /**
  * binder_alloc_get_free_async_space() - get free space available for async
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 0000000..8bd7bce
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+	/**
+	 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+	 * the same page as the end of the previous buffer and
+	 * is not page aligned. Examples:
+	 * buf1 ][ buf2 ][ ...
+	 * buf1 ]|[ buf2 ][ ...
+	 */
+	SAME_PAGE_UNALIGNED = 0,
+	/**
+	 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+	 * is not page aligned, the end of this buffer is on the
+	 * same page as the end of the previous buffer and is page
+	 * aligned. When the previous buffer is page aligned, the
+	 * end of this buffer is aligned to the next page boundary.
+	 * Examples:
+	 * buf1 ][ buf2 ]| ...
+	 * buf1 ]|[ buf2 ]| ...
+	 */
+	SAME_PAGE_ALIGNED,
+	/**
+	 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+	 * the page next to the end of the previous buffer and
+	 * is not page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 ][ ...
+	 * buf1 ]|[ buf2 | buf2 ][ ...
+	 */
+	NEXT_PAGE_UNALIGNED,
+	/**
+	 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+	 * the page next to the end of the previous buffer and
+	 * is page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 ]| ...
+	 * buf1 ]|[ buf2 | buf2 ]| ...
+	 */
+	NEXT_PAGE_ALIGNED,
+	/**
+	 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+	 * the page that follows the page after the end of the
+	 * previous buffer and is not page aligned. Examples:
+	 * buf1 ][ buf2 | buf2 | buf2 ][ ...
+	 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+	 */
+	NEXT_NEXT_UNALIGNED,
+	LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+	int i;
+
+	pr_err("alloc sizes: ");
+	for (i = 0; i < BUFFER_NUM; i++)
+		pr_cont("[%zu]", sizes[i]);
+	pr_cont("\n");
+	pr_err("free seq: ");
+	for (i = 0; i < BUFFER_NUM; i++)
+		pr_cont("[%d]", seq[i]);
+	pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+					 struct binder_buffer *buffer,
+					 size_t size)
+{
+	void *page_addr, *end;
+	int page_index;
+
+	end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+	page_addr = buffer->data;
+	for (; page_addr < end; page_addr += PAGE_SIZE) {
+		page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		if (!alloc->pages[page_index].page_ptr ||
+		    !list_empty(&alloc->pages[page_index].lru)) {
+			pr_err("expect alloc but is %s at page index %d\n",
+			       alloc->pages[page_index].page_ptr ?
+			       "lru" : "free", page_index);
+			return false;
+		}
+	}
+	return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+				      struct binder_buffer *buffers[],
+				      size_t *sizes, int *seq)
+{
+	int i;
+
+	for (i = 0; i < BUFFER_NUM; i++) {
+		buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+		if (IS_ERR(buffers[i]) ||
+		    !check_buffer_pages_allocated(alloc, buffers[i],
+						  sizes[i])) {
+			pr_err_size_seq(sizes, seq);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+				     struct binder_buffer *buffers[],
+				     size_t *sizes, int *seq, size_t end)
+{
+	int i;
+
+	for (i = 0; i < BUFFER_NUM; i++)
+		binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+	for (i = 0; i < end / PAGE_SIZE; i++) {
+		/**
+		 * Error message on a free page can be false positive
+		 * if binder shrinker ran during binder_alloc_free_buf
+		 * calls above.
+		 */
+		if (list_empty(&alloc->pages[i].lru)) {
+			pr_err_size_seq(sizes, seq);
+			pr_err("expect lru but is %s at page index %d\n",
+			       alloc->pages[i].page_ptr ? "alloc" : "free", i);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+	int i;
+	unsigned long count;
+
+	while ((count = list_lru_count(&binder_alloc_lru))) {
+		list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+			      NULL, count);
+	}
+
+	for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+		if (alloc->pages[i].page_ptr) {
+			pr_err("expect free but is %s at page index %d\n",
+			       list_empty(&alloc->pages[i].lru) ?
+			       "alloc" : "lru", i);
+			binder_selftest_failures++;
+		}
+	}
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+				       size_t *sizes, int *seq, size_t end)
+{
+	struct binder_buffer *buffers[BUFFER_NUM];
+
+	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+	/* Allocate from lru. */
+	binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+	if (list_lru_count(&binder_alloc_lru))
+		pr_err("lru list should be empty but is not\n");
+
+	binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+	binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+	int i;
+
+	for (i = 0; i < index; i++) {
+		if (seq[i] == val)
+			return true;
+	}
+	return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+				     size_t *sizes, int *seq,
+				     int index, size_t end)
+{
+	int i;
+
+	if (index == BUFFER_NUM) {
+		binder_selftest_alloc_free(alloc, sizes, seq, end);
+		return;
+	}
+	for (i = 0; i < BUFFER_NUM; i++) {
+		if (is_dup(seq, index, i))
+			continue;
+		seq[index] = i;
+		binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+	}
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+				       size_t *end_offset)
+{
+	int i;
+	int seq[BUFFER_NUM] = {0};
+	size_t front_sizes[BUFFER_NUM];
+	size_t back_sizes[BUFFER_NUM];
+	size_t last_offset, offset = 0;
+
+	for (i = 0; i < BUFFER_NUM; i++) {
+		last_offset = offset;
+		offset = end_offset[i];
+		front_sizes[i] = offset - last_offset;
+		back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+	}
+	/*
+	 * Buffers share the first or last few pages.
+	 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+	 * we need one giant buffer before getting to the last page.
+	 */
+	back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+	binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+				 end_offset[BUFFER_NUM - 1]);
+	binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+					 size_t *end_offset, int index)
+{
+	int align;
+	size_t end, prev;
+
+	if (index == BUFFER_NUM) {
+		binder_selftest_alloc_size(alloc, end_offset);
+		return;
+	}
+	prev = index == 0 ? 0 : end_offset[index - 1];
+	end = prev;
+
+	BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+	for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+		if (align % 2)
+			end = ALIGN(end, PAGE_SIZE);
+		else
+			end += BUFFER_MIN_SIZE;
+		end_offset[index] = end;
+		binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+	}
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+	size_t end_offset[BUFFER_NUM];
+
+	if (!binder_selftest_run)
+		return;
+	mutex_lock(&binder_selftest_lock);
+	if (!binder_selftest_run || !alloc->vma)
+		goto done;
+	pr_info("STARTED\n");
+	binder_selftest_alloc_offset(alloc, end_offset, 0);
+	binder_selftest_run = false;
+	if (binder_selftest_failures > 0)
+		pr_info("%d tests FAILED\n", binder_selftest_failures);
+	else
+		pr_info("PASSED\n");
+
+done:
+	mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7967db1..76e3b9c 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -291,6 +291,61 @@
 		  __entry->offset, __entry->size)
 );
 
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index),
+	TP_STRUCT__entry(
+		__field(int, proc)
+		__field(size_t, page_index)
+	),
+	TP_fast_assign(
+		__entry->proc = alloc->pid;
+		__entry->page_index = page_index;
+	),
+	TP_printk("proc=%d page_index=%zu",
+		  __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+	TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+	TP_ARGS(alloc, page_index));
+
 TRACE_EVENT(binder_command,
 	TP_PROTO(uint32_t cmd),
 	TP_ARGS(cmd),
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 8e575fb..e3e10e8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2971,10 +2971,12 @@
 static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
 {
 	if (!sata_pmp_attached(ap)) {
-		if (likely(devno < ata_link_max_devices(&ap->link)))
+		if (likely(devno >= 0 &&
+			   devno < ata_link_max_devices(&ap->link)))
 			return &ap->link.device[devno];
 	} else {
-		if (likely(devno < ap->nr_pmp_links))
+		if (likely(devno >= 0 &&
+			   devno < ap->nr_pmp_links))
 			return &ap->pmp_link[devno].device[0];
 	}
 
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959..8706533 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@
 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE),	9 },
 
 	{ },
 };
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a55..dc12552 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@
 
 static const struct pci_device_id cs5536[] = {
 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE), },
+	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
 	{ },
 };
 
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 6470eb8..e32a74e 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -736,7 +736,7 @@
 
 out_unregister:
 	kobject_put(&priv->kobj);
-	kfree(drv->p);
+	/* drv->p is freed in driver_release()  */
 	drv->p = NULL;
 out_put_bus:
 	bus_put(bus);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b0beb52..914433f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -958,7 +958,7 @@
 		timeout = MAX_JIFFY_OFFSET;
 	}
 
-	timeout = wait_for_completion_interruptible_timeout(&buf->completion,
+	timeout = wait_for_completion_killable_timeout(&buf->completion,
 			timeout);
 	if (timeout == -ERESTARTSYS || !timeout) {
 		retval = timeout;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 43a36d6..06f6668 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -182,11 +182,12 @@
 	return 0;
 }
 
-static inline struct fwnode_handle *dev_fwnode(struct device *dev)
+struct fwnode_handle *dev_fwnode(struct device *dev)
 {
 	return IS_ENABLED(CONFIG_OF) && dev->of_node ?
 		&dev->of_node->fwnode : dev->fwnode;
 }
+EXPORT_SYMBOL_GPL(dev_fwnode);
 
 /**
  * device_property_present - check if a property of a device is present
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c9441f9..98b767d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -929,6 +929,7 @@
 		return -ENOMEM;
 
 	for (i = 0; i < nbds_max; i++) {
+		struct request_queue *q;
 		struct gendisk *disk = alloc_disk(1 << part_shift);
 		if (!disk)
 			goto out;
@@ -954,12 +955,13 @@
 		 * every gendisk to have its very own request_queue struct.
 		 * These structs are big so we dynamically allocate them.
 		 */
-		disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
-		if (!disk->queue) {
+		q = blk_mq_init_queue(&nbd_dev[i].tag_set);
+		if (IS_ERR(q)) {
 			blk_mq_free_tag_set(&nbd_dev[i].tag_set);
 			put_disk(disk);
 			goto out;
 		}
+		disk->queue = q;
 
 		/*
 		 * Tell the block layer that we are not a rotational device
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 3c3b8f6..10332c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -630,11 +630,12 @@
 	if (err)
 		goto out_put_disk;
 
-	q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+	q = blk_mq_init_queue(&vblk->tag_set);
 	if (IS_ERR(q)) {
 		err = -ENOMEM;
 		goto out_free_tags;
 	}
+	vblk->disk->queue = q;
 
 	q->queuedata = vblk;
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9908597..f11d62d 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2112,9 +2112,9 @@
 			/*
 			 * Get the bios in the request so we can re-queue them.
 			 */
-			if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
-			    req_op(shadow[i].request) == REQ_OP_DISCARD ||
-			    req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+			if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+			    req_op(shadow[j].request) == REQ_OP_DISCARD ||
+			    req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
 			    shadow[j].request->cmd_flags & REQ_FUA) {
 				/*
 				 * Flush operations don't contain bios, so
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index dd220fa..74e677a 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -342,6 +342,7 @@
 	{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+	{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
 
 	/* Additional Realtek 8821AE Bluetooth devices */
 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 874639f..89bf7c6 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1822,18 +1822,62 @@
 {
 }
 
+static int fastrpc_search_ctx(uint64_t rctx)
+{
+	struct fastrpc_apps *me = &gfa;
+	struct fastrpc_file *fl;
+	struct hlist_node *n, *m;
+	struct smq_invoke_ctx *ictx = NULL;
+	struct smq_invoke_ctx *ctx;
+	int bfound = 0;
+
+	ctx = (struct smq_invoke_ctx *)(uint64_to_ptr(rctx));
+	if (!ctx)
+		return bfound;
+
+	spin_lock(&me->hlock);
+	hlist_for_each_entry_safe(fl, n, &me->drivers, hn) {
+		if (ctx->fl != fl)
+			continue;
+		spin_lock(&fl->hlock);
+		hlist_for_each_entry_safe(ictx, m, &fl->clst.pending, hn) {
+			if (ptr_to_uint64(ictx) == rctx) {
+				bfound = 1;
+				break;
+			}
+		}
+		hlist_for_each_entry_safe(ictx, m, &fl->clst.interrupted, hn) {
+			if (ptr_to_uint64(ictx) == rctx) {
+				bfound = 1;
+				break;
+			}
+		}
+		spin_unlock(&fl->hlock);
+		if (bfound)
+			break;
+	}
+	spin_unlock(&me->hlock);
+	return bfound;
+}
+
 void fastrpc_glink_notify_rx(void *handle, const void *priv,
 	const void *pkt_priv, const void *ptr, size_t size)
 {
 	struct smq_invoke_rsp *rsp = (struct smq_invoke_rsp *)ptr;
-	int len = size;
+	int bfound = 0;
 
-	while (len >= sizeof(*rsp) && rsp) {
-		rsp->ctx = rsp->ctx & ~1;
-		context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
-		rsp++;
-		len = len - sizeof(*rsp);
+	if (!rsp || (size < sizeof(*rsp)))
+		goto bail;
+
+	bfound = fastrpc_search_ctx((uint64_t)(rsp->ctx & ~1));
+	if (!bfound) {
+		pr_err("adsprpc: invalid context %pK\n", (void *)rsp->ctx);
+		goto bail;
 	}
+
+	rsp->ctx = rsp->ctx & ~1;
+	context_notify_user(uint64_to_ptr(rsp->ctx), rsp->retval);
+bail:
 	glink_rx_done(handle, ptr, true);
 }
 
@@ -1899,6 +1943,8 @@
 		return 0;
 	cid = fl->cid;
 
+	(void)fastrpc_release_current_dsp_process(fl);
+
 	spin_lock(&fl->apps->hlock);
 	hlist_del_init(&fl->hn);
 	spin_unlock(&fl->apps->hlock);
@@ -1907,7 +1953,6 @@
 		kfree(fl);
 		return 0;
 	}
-	(void)fastrpc_release_current_dsp_process(fl);
 	spin_lock(&fl->hlock);
 	fl->file_close = 1;
 	spin_unlock(&fl->hlock);
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index d734e29..ee76d39 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -1982,7 +1982,8 @@
 
 void diag_send_updates_peripheral(uint8_t peripheral)
 {
-	diag_send_feature_mask_update(peripheral);
+	if (!driver->feature[peripheral].sent_feature_mask)
+		diag_send_feature_mask_update(peripheral);
 	/*
 	 * Masks (F3, logs and events) will be sent to
 	 * peripheral immediately following feature mask update only
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 354c6a0..710271e 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -765,21 +765,17 @@
 			fwd_info_data->diagid_root = ctrl_pkt.diag_id;
 		} else {
 			i = fwd_info_cmd->num_pd - 2;
-			if (i >= 0)
+			if (i >= 0 && i < MAX_PERIPHERAL_UPD)
 				fwd_info_cmd->diagid_user[i] =
 				ctrl_pkt.diag_id;
 
 			i = fwd_info_data->num_pd - 2;
-			if (i >= 0)
+			if (i >= 0 && i < MAX_PERIPHERAL_UPD)
 				fwd_info_data->diagid_user[i] =
 				ctrl_pkt.diag_id;
 		}
 	}
 
-	if (root_str)
-		driver->diag_id_sent[peripheral] = 0;
-
-
 	DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 		"diag: peripheral = %d: diag_id string = %s,diag_id = %d\n",
 		peripheral, process_name, ctrl_pkt.diag_id);
@@ -796,22 +792,24 @@
 		pr_err("diag: Unable to send diag id ctrl packet to peripheral %d, err: %d\n",
 		       peripheral, err);
 	} else {
-	/*
-	 * Masks (F3, logs and events) will be sent to
-	 * peripheral immediately following feature mask update only
-	 * if diag_id support is not present or
-	 * diag_id support is present and diag_id has been sent to
-	 * peripheral.
-	 * With diag_id being sent now, mask will be updated
-	 * to peripherals.
-	 */
-		driver->diag_id_sent[peripheral] = 1;
+		/*
+		 * Masks (F3, logs and events) will be sent to
+		 * peripheral immediately following feature mask update only
+		 * if diag_id support is not present or
+		 * diag_id support is present and diag_id has been sent to
+		 * peripheral.
+		 * With diag_id being sent now, mask will be updated
+		 * to peripherals.
+		 */
+		if (root_str) {
+			driver->diag_id_sent[peripheral] = 1;
+			diag_send_updates_peripheral(peripheral);
+		}
+		diagfwd_buffers_init(fwd_info_data);
 		DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
 		"diag: diag_id sent = %d to peripheral = %d with diag_id = %d for %s :\n",
 			driver->diag_id_sent[peripheral], peripheral,
 			ctrl_pkt.diag_id, process_name);
-		diag_send_updates_peripheral(peripheral);
-		diagfwd_buffers_init(fwd_info_data);
 	}
 }
 
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 9f7f699..e00a61d 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -497,6 +497,19 @@
 					fwd_info->upd_len[i][1] = 0;
 			}
 		}
+
+		if (flag_buf_1) {
+			fwd_info->cpd_len_1 = len_cpd;
+			for (i = 0; i <= (fwd_info->num_pd - 2); i++)
+				if (fwd_info->type == TYPE_DATA)
+					fwd_info->upd_len[i][0] = len_upd[i];
+		} else if (flag_buf_2) {
+			fwd_info->cpd_len_2 = len_cpd;
+			for (i = 0; i <= (fwd_info->num_pd - 2); i++)
+				if (fwd_info->type == TYPE_DATA)
+					fwd_info->upd_len[i][1] = len_upd[i];
+		}
+
 		if (len_cpd) {
 			if (flag_buf_1)
 				fwd_info->cpd_len_1 = len_cpd;
@@ -1281,7 +1294,7 @@
 
 void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
 {
-	int i = 0;
+	int i = 0, upd_valid_len = 0;
 	struct diagfwd_info *fwd_info = NULL;
 
 	if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
@@ -1293,12 +1306,26 @@
 
 	if (ctxt == 1 && fwd_info->buf_1) {
 		/* Buffer 1 for core PD is freed */
-		atomic_set(&fwd_info->buf_1->in_busy, 0);
 		fwd_info->cpd_len_1 = 0;
+		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+			if (fwd_info->upd_len[i][0]) {
+				upd_valid_len = 1;
+				break;
+			}
+		}
+		if (!upd_valid_len)
+			atomic_set(&fwd_info->buf_1->in_busy, 0);
 	} else if (ctxt == 2 && fwd_info->buf_2) {
 		/* Buffer 2 for core PD is freed */
-		atomic_set(&fwd_info->buf_2->in_busy, 0);
 		fwd_info->cpd_len_2 = 0;
+		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
+			if (fwd_info->upd_len[i][1]) {
+				upd_valid_len = 1;
+				break;
+			}
+		}
+		if (!upd_valid_len)
+			atomic_set(&fwd_info->buf_2->in_busy, 0);
 	} else if (ctxt >= 3 && (ctxt % 2)) {
 		for (i = 0; i <= (fwd_info->num_pd - 2); i++) {
 			if (fwd_info->buf_upd[i][0]) {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5638333..4f2fb77 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2712,9 +2712,10 @@
 	struct clk_core *core;
 	int cnt = 0;
 
-	clock_debug_output(s, 0, "Enabled clocks:\n");
+	if (!mutex_trylock(&clk_debug_lock))
+		return;
 
-	mutex_lock(&clk_debug_lock);
+	clock_debug_output(s, 0, "Enabled clocks:\n");
 
 	hlist_for_each_entry(core, &clk_debug_list, debug_node)
 		cnt += clock_debug_print_clock(core, s);
@@ -3037,14 +3038,19 @@
 
 /*
  * Print the names of all enabled clocks and their parents if
- * debug_suspend is set from debugfs.
+ * debug_suspend is set from debugfs along with print_parent flag set to 1.
+ * Otherwise if print_parent set to 0, print only enabled clocks
+ *
  */
-void clock_debug_print_enabled(void)
+void clock_debug_print_enabled(bool print_parent)
 {
 	if (likely(!debug_suspend))
 		return;
 
-	clock_debug_print_enabled_debug_suspend(NULL);
+	if (print_parent)
+		clock_debug_print_enabled_clocks(NULL);
+	else
+		clock_debug_print_enabled_debug_suspend(NULL);
 }
 EXPORT_SYMBOL_GPL(clock_debug_print_enabled);
 
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index b52aa25..a7d0981 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -23,7 +23,7 @@
 void __clk_free_clk(struct clk *clk);
 
 /* Debugfs API to print the enabled clocks */
-void clock_debug_print_enabled(void);
+void clock_debug_print_enabled(bool print_parent);
 void clk_debug_print_hw(struct clk_core *clk, struct seq_file *f);
 
 #else
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index 3819959..62ae8c5 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -442,7 +442,7 @@
 	.mnd_width = 0,
 	.hid_width = 5,
 	.parent_map = cam_cc_parent_map_0,
-	.freq_tbl = NULL,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_csi3phytimer_clk_src",
 		.parent_names = cam_cc_parent_names_0,
diff --git a/drivers/clk/qcom/clk-aop-qmp.c b/drivers/clk/qcom/clk-aop-qmp.c
index ff229fb..e2bfe42 100644
--- a/drivers/clk/qcom/clk-aop-qmp.c
+++ b/drivers/clk/qcom/clk-aop-qmp.c
@@ -139,6 +139,12 @@
 	struct clk_aop_qmp *clk = to_aop_qmp_clk(hw);
 
 	mutex_lock(&clk_aop_lock);
+	/*
+	 * Return early if the clock has been enabled already. This
+	 * is to avoid issues with sending duplicate enable requests.
+	 */
+	if (clk->enabled)
+		goto err;
 
 	if (clk->level)
 		rate = clk->level;
@@ -179,6 +185,9 @@
 
 	mutex_lock(&clk_aop_lock);
 
+	if (!clk->enabled)
+		goto err;
+
 	rate = clk->disable_state;
 
 	snprintf(mbox_msg, MAX_LEN, "{class: %s, res: %s, val: %ld}",
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 7f56fb6..60758b4 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -23,8 +23,6 @@
 	u8 pre_div;
 	u16 m;
 	u16 n;
-	unsigned long src_freq;
-#define FIXED_FREQ_SRC   0
 };
 
 /**
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 7382cfa..8d5e527 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -281,10 +281,9 @@
 		const struct freq_tbl *f, struct clk_rate_request *req)
 {
 	unsigned long clk_flags, rate = req->rate;
-	struct clk_rate_request parent_req = { };
 	struct clk_hw *p;
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-	int index, ret = 0;
+	int index;
 
 	f = qcom_find_freq(f, rate);
 	if (!f)
@@ -315,21 +314,6 @@
 	req->best_parent_rate = rate;
 	req->rate = f->freq;
 
-	if (f->src_freq != FIXED_FREQ_SRC) {
-		rate = parent_req.rate = f->src_freq;
-		parent_req.best_parent_hw = p;
-		ret = __clk_determine_rate(p, &parent_req);
-		if (ret)
-			return ret;
-
-		ret = clk_set_rate(p->clk, parent_req.rate);
-		if (ret) {
-			pr_err("Failed set rate(%lu) on parent for non-fixed source\n",
-							parent_req.rate);
-			return ret;
-		}
-	}
-
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index ef1da5c..be84b46 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -117,6 +117,7 @@
 	"gcc_aggre_ufs_phy_axi_clk",
 	"gcc_aggre_usb3_prim_axi_clk",
 	"gcc_aggre_usb3_sec_axi_clk",
+	"gcc_apc_vs_clk",
 	"gcc_boot_rom_ahb_clk",
 	"gcc_camera_ahb_clk",
 	"gcc_camera_axi_clk",
@@ -144,12 +145,14 @@
 	"gcc_gpu_gpll0_div_clk_src",
 	"gcc_gpu_memnoc_gfx_clk",
 	"gcc_gpu_snoc_dvm_gfx_clk",
+	"gcc_gpu_vs_clk",
 	"gcc_mss_axis2_clk",
 	"gcc_mss_cfg_ahb_clk",
 	"gcc_mss_gpll0_div_clk_src",
 	"gcc_mss_mfab_axis_clk",
 	"gcc_mss_q6_memnoc_axi_clk",
 	"gcc_mss_snoc_axi_clk",
+	"gcc_mss_vs_clk",
 	"gcc_pcie_0_aux_clk",
 	"gcc_pcie_0_cfg_ahb_clk",
 	"gcc_pcie_0_mstr_axi_clk",
@@ -232,9 +235,14 @@
 	"gcc_usb3_sec_phy_com_aux_clk",
 	"gcc_usb3_sec_phy_pipe_clk",
 	"gcc_usb_phy_cfg_ahb2phy_clk",
+	"gcc_vdda_vs_clk",
+	"gcc_vddcx_vs_clk",
+	"gcc_vddmx_vs_clk",
 	"gcc_video_ahb_clk",
 	"gcc_video_axi_clk",
 	"gcc_video_xo_clk",
+	"gcc_vs_ctrl_ahb_clk",
+	"gcc_vs_ctrl_clk",
 	"gcc_sdcc1_ahb_clk",
 	"gcc_sdcc1_apps_clk",
 	"gcc_sdcc1_ice_core_clk",
@@ -452,6 +460,8 @@
 			0x11B, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_aggre_usb3_sec_axi_clk", 0x11C, 4, GCC,
 			0x11C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_apc_vs_clk", 0x113, 4, GCC,
+			0x113, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_boot_rom_ahb_clk", 0x94, 4, GCC,
 			0x94, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_camera_ahb_clk", 0x3A, 4, GCC,
@@ -506,6 +516,8 @@
 			0x145, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_gpu_snoc_dvm_gfx_clk", 0x147, 4, GCC,
 			0x147, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_gpu_vs_clk", 0x112, 4, GCC,
+			0x112, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_mss_axis2_clk", 0x12F, 4, GCC,
 			0x12F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_mss_cfg_ahb_clk", 0x12D, 4, GCC,
@@ -518,6 +530,8 @@
 			0x135, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_mss_snoc_axi_clk", 0x134, 4, GCC,
 			0x134, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_mss_vs_clk", 0x111, 4, GCC,
+			0x111, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_pcie_0_aux_clk", 0xE5, 4, GCC,
 			0xE5, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_pcie_0_cfg_ahb_clk", 0xE4, 4, GCC,
@@ -682,12 +696,22 @@
 			0x6A, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_usb_phy_cfg_ahb2phy_clk", 0x6F, 4, GCC,
 			0x6F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vdda_vs_clk", 0x10E, 4, GCC,
+			0x10E, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vddcx_vs_clk", 0x10C, 4, GCC,
+			0x10C, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vddmx_vs_clk", 0x10D, 4, GCC,
+			0x10D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_video_ahb_clk", 0x39, 4, GCC,
 			0x39, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_video_axi_clk", 0x3F, 4, GCC,
 			0x3F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_video_xo_clk", 0x42, 4, GCC,
 			0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vs_ctrl_ahb_clk", 0x110, 4, GCC,
+			0x110, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "gcc_vs_ctrl_clk", 0x10F, 4, GCC,
+			0x10F, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_sdcc1_ahb_clk", 0x15C, 4, GCC,
 			0x42, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_sdcc1_apps_clk", 0x15B, 4, GCC,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index d57bf5f..b256157 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -40,8 +40,6 @@
 #define DISP_CC_MISC_CMD	0x8000
 
 #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-#define F_SLEW(f, s, h, m, n, src_freq) { (f), (s), (2 * (h) - 1), (m), (n), \
-					(src_freq) }
 
 static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
 
@@ -385,6 +383,20 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src_sdm670[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(85714286, P_GPLL0_OUT_MAIN, 7, 0, 0),
+	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+	F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+	F(171428571, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+	F(286670000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+	F(344000000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+	F(430000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
 	.cmd_rcgr = 0x2088,
 	.mnd_width = 0,
@@ -1036,9 +1048,9 @@
 	disp_cc_mdss_byte1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
 		358000000;
 	disp_cc_mdss_dp_pixel1_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
-		337500000;
+		337500;
 	disp_cc_mdss_dp_pixel_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] =
-		337500000;
+		337500;
 	disp_cc_mdss_mdp_clk_src.freq_tbl =
 		ftbl_disp_cc_mdss_mdp_clk_src_sdm845_v2;
 	disp_cc_mdss_mdp_clk_src.clkr.hw.init->rate_max[VDD_CX_LOWER] =
@@ -1068,6 +1080,9 @@
 static void disp_cc_sdm845_fixup_sdm670(struct regmap *regmap)
 {
 	disp_cc_sdm845_fixup_sdm845v2(regmap);
+
+	disp_cc_mdss_mdp_clk_src.freq_tbl =
+		ftbl_disp_cc_mdss_mdp_clk_src_sdm670;
 }
 
 static int disp_cc_sdm845_fixup(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index eb6c658..1cd6c9c 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -665,7 +665,6 @@
 
 static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
 {
-	dsi_pll_disable_global_clk(rsc);
 	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
 	dsi_pll_disable_pll_bias(rsc);
 }
@@ -684,11 +683,20 @@
 
 	pr_debug("stop PLL (%d)\n", rsc->index);
 
+	/*
+	 * To avoid any stray glitches while
+	 * abruptly powering down the PLL
+	 * make sure to gate the clock using
+	 * the clock enable bit before powering
+	 * down the PLL
+	 */
+	dsi_pll_disable_global_clk(rsc);
 	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
 	dsi_pll_disable_sub(rsc);
-	if (rsc->slave)
+	if (rsc->slave) {
+		dsi_pll_disable_global_clk(rsc->slave);
 		dsi_pll_disable_sub(rsc->slave);
-
+	}
 	/* flush, ensure all register writes are done*/
 	wmb();
 	rsc->pll_on = false;
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 3311e9f..f8fdf3f 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -114,6 +114,17 @@
 	{ }
 };
 
+static const struct freq_tbl ftbl_video_cc_venus_clk_src_sdm670[] = {
+	F(100000000, P_VIDEO_PLL0_OUT_MAIN, 4, 0, 0),
+	F(200000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(330000000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(364800000, P_VIDEO_PLL0_OUT_MAIN, 2, 0, 0),
+	F(404000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(444000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	F(533000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 video_cc_venus_clk_src = {
 	.cmd_rcgr = 0x7f0,
 	.mnd_width = 0,
@@ -343,8 +354,10 @@
 
 static void video_cc_sdm845_fixup_sdm670(void)
 {
-	video_cc_sdm845_fixup_sdm845v2();
-
+	video_cc_venus_clk_src.freq_tbl = ftbl_video_cc_venus_clk_src_sdm670;
+	video_cc_venus_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW] = 330000000;
+	video_cc_venus_clk_src.clkr.hw.init->rate_max[VDD_CX_LOW_L1] =
+		404000000;
 }
 
 static int video_cc_sdm845_fixup(struct platform_device *pdev)
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495..cdc092a 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@
 	GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
 				GATE_BUS_TOP, 24, 0, 0),
 	GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
-				GATE_BUS_TOP, 27, 0, 0),
+				GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
 };
 
 static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@
 	GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
 
 	GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
-			GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+			GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
 	GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
 			GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
 
 	GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
 			GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
 	GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
-			GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+			GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
 	GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
 			GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
 	GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
 			GATE_BUS_TOP, 5, 0, 0),
 	GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
-			GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+			GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
 	GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
 			GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
 	GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@
 	GATE(0, "aclk166", "mout_user_aclk166",
 			GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
 	GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
-			GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+			GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
 	GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
 			GATE_BUS_TOP, 16, 0, 0),
 	GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
 			GATE_BUS_TOP, 17, 0, 0),
 	GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
-			GATE_BUS_TOP, 18, 0, 0),
+			GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
 	GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
 			GATE_BUS_TOP, 28, 0, 0),
 	GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
 			GATE_BUS_TOP, 29, 0, 0),
 
 	GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
-			SRC_MASK_TOP2, 24, 0, 0),
+			SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
 
 	GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
 			SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c
index 0ab4c21..fb11acd 100644
--- a/drivers/cpuidle/lpm-levels-of.c
+++ b/drivers/cpuidle/lpm-levels-of.c
@@ -779,6 +779,7 @@
 	if (ret)
 		goto failed_parse_params;
 
+	INIT_LIST_HEAD(&c->list);
 	INIT_LIST_HEAD(&c->child);
 	INIT_LIST_HEAD(&c->cpu);
 	c->parent = parent;
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 5d2e918..746cdc0 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -1606,7 +1606,7 @@
 	 * clocks that are enabled and preventing the system level
 	 * LPMs(XO and Vmin).
 	 */
-	clock_debug_print_enabled();
+	clock_debug_print_enabled(true);
 
 	psci_enter_sleep(lpm_cpu, idx, false);
 
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7868765..b54af97 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1074,7 +1074,7 @@
 		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
 				&crypt->icv_rev_aes);
 		if (unlikely(!req_ctx->hmac_virt))
-			goto free_buf_src;
+			goto free_buf_dst;
 		if (!encrypt) {
 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
 				req->src, cryptlen, authsize, 0);
@@ -1089,10 +1089,10 @@
 	BUG_ON(qmgr_stat_overflow(SEND_QID));
 	return -EINPROGRESS;
 
-free_buf_src:
-	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 free_buf_dst:
 	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 	crypt->ctl_flags = CTL_FLAG_UNUSED;
 	return -ENOMEM;
 }
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 6ed82ef..6fa91ae 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -888,7 +888,7 @@
 static int qcom_ice_init_clocks(struct ice_device *ice)
 {
 	int ret = -EINVAL;
-	struct ice_clk_info *clki;
+	struct ice_clk_info *clki = NULL;
 	struct device *dev = ice->pdev;
 	struct list_head *head = &ice->clk_list_head;
 
@@ -932,7 +932,7 @@
 static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
 {
 	int ret = 0;
-	struct ice_clk_info *clki;
+	struct ice_clk_info *clki = NULL;
 	struct device *dev = ice->pdev;
 	struct list_head *head = &ice->clk_list_head;
 
@@ -1608,12 +1608,14 @@
 		if (ice_dev->pdev->of_node == node) {
 			pr_info("%s: found ice device %pK\n", __func__,
 			ice_dev);
+			ice_pdev = to_platform_device(ice_dev->pdev);
 			break;
 		}
 	}
 
-	ice_pdev = to_platform_device(ice_dev->pdev);
-	pr_info("%s: matching platform device %pK\n", __func__, ice_pdev);
+	if (ice_pdev)
+		pr_info("%s: matching platform device %pK\n", __func__,
+			ice_pdev);
 out:
 	return ice_pdev;
 }
@@ -1632,11 +1634,11 @@
 	list_for_each_entry(ice_dev, &ice_devices, list) {
 		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
 			pr_info("%s: found ice device %p\n", __func__, ice_dev);
-			break;
+			return ice_dev;
 		}
 	}
 out:
-	return ice_dev;
+	return NULL;
 }
 
 static int enable_ice_setup(struct ice_device *ice_dev)
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 2cf303e..94b3c17 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -340,8 +340,6 @@
 	if (authdata) {
 		handle->sha_ctxt.auth_data[0] = auth32[0];
 		handle->sha_ctxt.auth_data[1] = auth32[1];
-		handle->sha_ctxt.auth_data[2] = auth32[2];
-		handle->sha_ctxt.auth_data[3] = auth32[3];
 	}
 
 	tasklet_schedule(&pdev->done_tasklet);
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
index 5802c21..f4f503e 100644
--- a/drivers/devfreq/arm-memlat-mon.c
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -55,8 +55,7 @@
 struct cpu_grp_info {
 	cpumask_t cpus;
 	cpumask_t inited_cpus;
-	unsigned long cache_miss_event;
-	unsigned long inst_event;
+	unsigned int event_ids[NUM_EVENTS];
 	struct cpu_pmu_stats *cpustats;
 	struct memlat_hwmon hw;
 	struct notifier_block arm_memlat_cpu_notif;
@@ -97,13 +96,8 @@
 	u64 total, enabled, running;
 
 	total = perf_event_read_value(event->pevent, &enabled, &running);
-	if (total >= event->prev_count)
-		ev_count = total - event->prev_count;
-	else
-		ev_count = (MAX_COUNT_LIM - event->prev_count) + total;
-
+	ev_count = total - event->prev_count;
 	event->prev_count = total;
-
 	return ev_count;
 }
 
@@ -134,7 +128,7 @@
 {
 	int i;
 
-	for (i = 0; i < NUM_EVENTS; i++) {
+	for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
 		cpustats->events[i].prev_count = 0;
 		perf_event_release_kernel(cpustats->events[i].pevent);
 	}
@@ -187,7 +181,7 @@
 {
 	struct perf_event *pevent;
 	struct perf_event_attr *attr;
-	int err;
+	int err, i;
 	struct cpu_pmu_stats *cpustats = to_cpustats(cpu_grp, cpu);
 
 	/* Allocate an attribute for event initialization */
@@ -195,26 +189,15 @@
 	if (!attr)
 		return -ENOMEM;
 
-	attr->config = cpu_grp->inst_event;
-	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
-	if (IS_ERR(pevent))
-		goto err_out;
-	cpustats->events[INST_IDX].pevent = pevent;
-	perf_event_enable(cpustats->events[INST_IDX].pevent);
-
-	attr->config = cpu_grp->cache_miss_event;
-	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
-	if (IS_ERR(pevent))
-		goto err_out;
-	cpustats->events[CM_IDX].pevent = pevent;
-	perf_event_enable(cpustats->events[CM_IDX].pevent);
-
-	attr->config = CYC_EV;
-	pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
-	if (IS_ERR(pevent))
-		goto err_out;
-	cpustats->events[CYC_IDX].pevent = pevent;
-	perf_event_enable(cpustats->events[CYC_IDX].pevent);
+	for (i = 0; i < ARRAY_SIZE(cpustats->events); i++) {
+		attr->config = cpu_grp->event_ids[i];
+		pevent = perf_event_create_kernel_counter(attr, cpu, NULL,
+							  NULL, NULL);
+		if (IS_ERR(pevent))
+			goto err_out;
+		cpustats->events[i].pevent = pevent;
+		perf_event_enable(pevent);
+	}
 
 	kfree(attr);
 	return 0;
@@ -315,7 +298,7 @@
 	struct memlat_hwmon *hw;
 	struct cpu_grp_info *cpu_grp;
 	int cpu, ret;
-	u32 cachemiss_ev, inst_ev;
+	u32 event_id;
 
 	cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
 	if (!cpu_grp)
@@ -346,22 +329,24 @@
 	if (!cpu_grp->cpustats)
 		return -ENOMEM;
 
+	cpu_grp->event_ids[CYC_IDX] = CYC_EV;
+
 	ret = of_property_read_u32(dev->of_node, "qcom,cachemiss-ev",
-			&cachemiss_ev);
+				   &event_id);
 	if (ret) {
 		dev_dbg(dev, "Cache Miss event not specified. Using def:0x%x\n",
-				L2DM_EV);
-		cachemiss_ev = L2DM_EV;
+			L2DM_EV);
+		event_id = L2DM_EV;
 	}
-	cpu_grp->cache_miss_event = cachemiss_ev;
+	cpu_grp->event_ids[CM_IDX] = event_id;
 
-	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &inst_ev);
+	ret = of_property_read_u32(dev->of_node, "qcom,inst-ev", &event_id);
 	if (ret) {
 		dev_dbg(dev, "Inst event not specified. Using def:0x%x\n",
-				INST_EV);
-		inst_ev = INST_EV;
+			INST_EV);
+		event_id = INST_EV;
 	}
-	cpu_grp->inst_event = inst_ev;
+	cpu_grp->event_ids[INST_IDX] = event_id;
 
 	for_each_cpu(cpu, &cpu_grp->cpus)
 		to_devstats(cpu_grp, cpu)->id = cpu;
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index 81d98d1..1a8ef1f 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -36,6 +36,7 @@
 struct memlat_node {
 	unsigned int ratio_ceil;
 	bool mon_started;
+	bool already_zero;
 	struct list_head list;
 	void *orig_data;
 	struct memlat_hwmon *hw;
@@ -224,7 +225,7 @@
 static int devfreq_memlat_get_freq(struct devfreq *df,
 					unsigned long *freq)
 {
-	int i, lat_dev;
+	int i, lat_dev = 0;
 	struct memlat_node *node = df->data;
 	struct memlat_hwmon *hw = node->hw;
 	unsigned long max_freq = 0;
@@ -238,16 +239,16 @@
 		if (hw->core_stats[i].mem_count)
 			ratio /= hw->core_stats[i].mem_count;
 
+		if (!hw->core_stats[i].inst_count
+		    || !hw->core_stats[i].freq)
+			continue;
+
 		trace_memlat_dev_meas(dev_name(df->dev.parent),
 					hw->core_stats[i].id,
 					hw->core_stats[i].inst_count,
 					hw->core_stats[i].mem_count,
 					hw->core_stats[i].freq, ratio);
 
-		if (!hw->core_stats[i].inst_count
-		    || !hw->core_stats[i].freq)
-			continue;
-
 		if (ratio <= node->ratio_ceil
 		    && hw->core_stats[i].freq > max_freq) {
 			lat_dev = i;
@@ -255,8 +256,10 @@
 		}
 	}
 
-	if (max_freq) {
+	if (max_freq)
 		max_freq = core_to_dev_freq(node, max_freq);
+
+	if (max_freq || !node->already_zero) {
 		trace_memlat_dev_update(dev_name(df->dev.parent),
 					hw->core_stats[lat_dev].id,
 					hw->core_stats[lat_dev].inst_count,
@@ -265,6 +268,8 @@
 					max_freq);
 	}
 
+	node->already_zero = !max_freq;
+
 	*freq = max_freq;
 	return 0;
 }
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 5a9166a..2f34a01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -285,7 +285,7 @@
 	struct sync_file *sync_file = container_of(kref, struct sync_file,
 						     kref);
 
-	if (test_bit(POLL_ENABLED, &sync_file->fence->flags))
+	if (test_bit(POLL_ENABLED, &sync_file->flags))
 		fence_remove_callback(sync_file->fence, &sync_file->cb);
 	fence_put(sync_file->fence);
 	kfree(sync_file);
@@ -305,7 +305,7 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+	if (!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
 					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f2bb512..063d176 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -703,24 +703,23 @@
 {
 	struct lineevent_state *le = p;
 	struct gpioevent_data ge;
-	int ret;
+	int ret, level;
 
 	ge.timestamp = ktime_get_real_ns();
+	level = gpiod_get_value_cansleep(le->desc);
 
 	if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
 	    && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
-		int level = gpiod_get_value_cansleep(le->desc);
-
 		if (level)
 			/* Emit low-to-high event */
 			ge.id = GPIOEVENT_EVENT_RISING_EDGE;
 		else
 			/* Emit high-to-low event */
 			ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
-	} else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
+	} else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
 		/* Emit low-to-high event */
 		ge.id = GPIOEVENT_EVENT_RISING_EDGE;
-	} else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
+	} else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
 		/* Emit high-to-low event */
 		ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
 	} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index dc9511c..327bdf1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1301,6 +1301,7 @@
 		amdgpu_program_register_sequence(adev,
 						 pitcairn_mgcg_cgcg_init,
 						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+		break;
 	case CHIP_VERDE:
 		amdgpu_program_register_sequence(adev,
 						 verde_golden_registers,
@@ -1325,6 +1326,7 @@
 		amdgpu_program_register_sequence(adev,
 						 oland_mgcg_cgcg_init,
 						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
+		break;
 	case CHIP_HAINAN:
 		amdgpu_program_register_sequence(adev,
 						 hainan_golden_registers,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 161c923..3e74e1a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -315,6 +315,8 @@
 	bool edid_read;
 
 	wait_queue_head_t wq;
+	struct work_struct hpd_work;
+
 	struct drm_bridge bridge;
 	struct drm_connector connector;
 
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8ed3906..213d892 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -402,6 +402,27 @@
 	return false;
 }
 
+static void adv7511_hpd_work(struct work_struct *work)
+{
+	struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+	enum drm_connector_status status;
+	unsigned int val;
+	int ret;
+
+	ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+	if (ret < 0)
+		status = connector_status_disconnected;
+	else if (val & ADV7511_STATUS_HPD)
+		status = connector_status_connected;
+	else
+		status = connector_status_disconnected;
+
+	if (adv7511->connector.status != status) {
+		adv7511->connector.status = status;
+		drm_kms_helper_hotplug_event(adv7511->connector.dev);
+	}
+}
+
 static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
 {
 	unsigned int irq0, irq1;
@@ -419,7 +440,7 @@
 	regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
 
 	if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
-		drm_helper_hpd_irq_event(adv7511->connector.dev);
+		schedule_work(&adv7511->hpd_work);
 
 	if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
 		adv7511->edid_read = true;
@@ -1006,6 +1027,8 @@
 			goto err_i2c_unregister_edid;
 	}
 
+	INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
 	if (i2c->irq) {
 		init_waitqueue_head(&adv7511->wq);
 
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 99011621..4e16dff 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1459,6 +1459,9 @@
 	if (config->funcs->atomic_check)
 		ret = config->funcs->atomic_check(state->dev, state);
 
+	if (ret)
+		return ret;
+
 	if (!state->allow_modeset) {
 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
 			if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1469,7 +1472,7 @@
 		}
 	}
 
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL(drm_atomic_check_only);
 
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index ac5437e..6394109 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -358,20 +358,13 @@
  */
 int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
 {
-	u8 value;
+	u8 value = DP_SET_POWER_D0;
 	int err;
 
 	/* DP_SET_POWER register is only available on DPCD v1.1 and later */
 	if (link->revision < 0x11)
 		return 0;
 
-	err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
-	if (err < 0)
-		return err;
-
-	value &= ~DP_SET_POWER_MASK;
-	value |= DP_SET_POWER_D0;
-
 	err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
 	if (err < 0)
 		return err;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 465bacd..48e99ab 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -255,13 +255,13 @@
 	struct drm_gem_object *obj = ptr;
 	struct drm_device *dev = obj->dev;
 
+	if (dev->driver->gem_close_object)
+		dev->driver->gem_close_object(obj, file_priv);
+
 	if (drm_core_check_feature(dev, DRIVER_PRIME))
 		drm_gem_remove_prime_handles(obj, file_priv);
 	drm_vma_node_revoke(&obj->vma_node, file_priv);
 
-	if (dev->driver->gem_close_object)
-		dev->driver->gem_close_object(obj, file_priv);
-
 	drm_gem_object_handle_unreference_unlocked(obj);
 
 	return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index afdd55d..1ac9a95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -264,8 +264,8 @@
 		if (ret)
 			return ret;
 
-		if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) {
-			DRM_ERROR("relocation %u outside object", i);
+		if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
+			DRM_ERROR("relocation %u outside object\n", i);
 			return -EINVAL;
 		}
 
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 95a7277..89a7774 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -394,6 +394,7 @@
 		}
 
 		/* Program the max register to clamp values > 1.0. */
+		i = lut_size - 1;
 		I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
 			   drm_color_lut_extract(lut[i].red, 16));
 		I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4015832..c81832a 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -161,7 +161,8 @@
 	sde/sde_hw_color_processing_v1_7.o \
 	sde/sde_reg_dma.o \
 	sde/sde_hw_reg_dma_v1.o \
-	sde/sde_hw_dsc.o
+	sde/sde_hw_dsc.o \
+	sde/sde_hw_ds.o
 
 msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
 	sde/sde_encoder_phys_wb.o
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 67b6072..c4a60dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -771,8 +771,10 @@
 	dp_audio->off = dp_audio_off;
 
 	rc = dp_audio_init_ext_disp(audio);
-	if (rc)
+	if (rc) {
+		devm_kfree(&pdev->dev, audio);
 		goto error;
+	}
 
 	catalog->init(catalog);
 
@@ -790,5 +792,5 @@
 
 	audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
 
-	kzfree(audio);
+	devm_kfree(&audio->pdev->dev, audio);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 9106027..acbaec4 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -38,6 +38,10 @@
 	bool cmd_busy;
 	bool native;
 	bool read;
+	bool no_send_addr;
+	bool no_send_stop;
+	u32 offset;
+	u32 segment;
 
 	struct drm_dp_aux drm_aux;
 };
@@ -102,9 +106,19 @@
 		aux->catalog->write_data(aux->catalog);
 	}
 
+	aux->catalog->clear_trans(aux->catalog, false);
+	aux->catalog->clear_hw_interrupts(aux->catalog);
+
 	reg = 0; /* Transaction number == 1 */
-	if (!aux->native) /* i2c */
-		reg |= (BIT(8) | BIT(10) | BIT(11));
+	if (!aux->native) { /* i2c */
+		reg |= BIT(8);
+
+		if (aux->no_send_addr)
+			reg |= BIT(10);
+
+		if (aux->no_send_stop)
+			reg |= BIT(11);
+	}
 
 	reg |= BIT(9);
 	aux->catalog->data = reg;
@@ -150,9 +164,11 @@
 {
 	u32 data;
 	u8 *dp;
-	u32 i;
+	u32 i, actual_i;
 	u32 len = msg->size;
 
+	aux->catalog->clear_trans(aux->catalog, true);
+
 	data = 0;
 	data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
 	data |= BIT(0);  /* read */
@@ -168,6 +184,11 @@
 	for (i = 0; i < len; i++) {
 		data = aux->catalog->read_data(aux->catalog);
 		*dp++ = (u8)((data >> 8) & 0xff);
+
+		actual_i = (data >> 16) & 0xFF;
+		if (i != actual_i)
+			pr_warn("Index mismatch: expected %d, found %d\n",
+				i, actual_i);
 	}
 }
 
@@ -183,6 +204,10 @@
 		aux->aux_error_num = DP_AUX_ERR_TOUT;
 	if (isr & DP_INTR_NACK_DEFER)
 		aux->aux_error_num = DP_AUX_ERR_NACK;
+	if (isr & DP_INTR_AUX_ERROR) {
+		aux->aux_error_num = DP_AUX_ERR_PHY;
+		aux->catalog->clear_hw_interrupts(aux->catalog);
+	}
 
 	complete(&aux->comp);
 }
@@ -207,6 +232,10 @@
 			aux->aux_error_num = DP_AUX_ERR_NACK;
 		if (isr & DP_INTR_I2C_DEFER)
 			aux->aux_error_num = DP_AUX_ERR_DEFER;
+		if (isr & DP_INTR_AUX_ERROR) {
+			aux->aux_error_num = DP_AUX_ERR_PHY;
+			aux->catalog->clear_hw_interrupts(aux->catalog);
+		}
 	}
 
 	complete(&aux->comp);
@@ -250,6 +279,87 @@
 	aux->catalog->reset(aux->catalog);
 }
 
+static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *input_msg)
+{
+	u32 const edid_address = 0x50;
+	u32 const segment_address = 0x30;
+	bool i2c_read = input_msg->request &
+		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+	u8 *data = NULL;
+
+	if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
+		(input_msg->address != segment_address)))
+		return;
+
+
+	data = input_msg->buffer;
+	if (input_msg->address == segment_address)
+		aux->segment = *data;
+	else
+		aux->offset = *data;
+}
+
+/**
+ * dp_aux_transfer_helper() - helper function for EDID read transactions
+ *
+ * @aux: DP AUX private structure
+ * @input_msg: input message from DRM upstream APIs
+ *
+ * return: void
+ *
+ * This helper function is used to fix EDID reads for non-compliant
+ * sinks that do not handle the i2c middle-of-transaction flag correctly.
+ */
+static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *input_msg)
+{
+	struct drm_dp_aux_msg helper_msg;
+	u32 const message_size = 0x10;
+	u32 const segment_address = 0x30;
+	bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
+	bool i2c_read = input_msg->request &
+		(DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+	if (!i2c_mot || !i2c_read || (input_msg->size == 0))
+		return;
+
+	aux->read = false;
+	aux->cmd_busy = true;
+	aux->no_send_addr = true;
+	aux->no_send_stop = true;
+
+	/*
+	 * Send the segment address for every i2c read in which the
+	 * middle-of-tranaction flag is set. This is required to support EDID
+	 * reads of more than 2 blocks as the segment address is reset to 0
+	 * since we are overriding the middle-of-transaction flag for read
+	 * transactions.
+	 */
+	memset(&helper_msg, 0, sizeof(helper_msg));
+	helper_msg.address = segment_address;
+	helper_msg.buffer = &aux->segment;
+	helper_msg.size = 1;
+	dp_aux_cmd_fifo_tx(aux, &helper_msg);
+
+	/*
+	 * Send the offset address for every i2c read in which the
+	 * middle-of-transaction flag is set. This will ensure that the sink
+	 * will update its read pointer and return the correct portion of the
+	 * EDID buffer in the subsequent i2c read trasntion triggered in the
+	 * native AUX transfer function.
+	 */
+	memset(&helper_msg, 0, sizeof(helper_msg));
+	helper_msg.address = input_msg->address;
+	helper_msg.buffer = &aux->offset;
+	helper_msg.size = 1;
+	dp_aux_cmd_fifo_tx(aux, &helper_msg);
+	aux->offset += message_size;
+
+	if (aux->offset == 0x80 || aux->offset == 0x100)
+		aux->segment = 0x0; /* reset segment at end of block */
+}
+
 /*
  * This function does the real job to process an AUX transaction.
  * It will call aux_reset() function to reset the AUX channel,
@@ -268,8 +378,6 @@
 	mutex_lock(&aux->mutex);
 
 	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
-	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-	aux->cmd_busy = true;
 
 	/* Ignore address only message */
 	if ((msg->size == 0) || (msg->buffer == NULL)) {
@@ -288,6 +396,20 @@
 		goto unlock_exit;
 	}
 
+	dp_aux_update_offset_and_segment(aux, msg);
+	dp_aux_transfer_helper(aux, msg);
+
+	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+	aux->cmd_busy = true;
+
+	if (aux->read) {
+		aux->no_send_addr = true;
+		aux->no_send_stop = false;
+	} else {
+		aux->no_send_addr = true;
+		aux->no_send_stop = true;
+	}
+
 	ret = dp_aux_cmd_fifo_tx(aux, msg);
 	if ((ret < 0) && aux->native) {
 		aux->retry_cnt++;
@@ -341,11 +463,11 @@
 
 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+	dp_aux_reset_phy_config_indices(aux_cfg);
+	aux->catalog->setup(aux->catalog, aux_cfg);
 	aux->catalog->reset(aux->catalog);
 	aux->catalog->enable(aux->catalog, true);
 	aux->retry_cnt = 0;
-	dp_aux_reset_phy_config_indices(aux_cfg);
-	aux->catalog->setup(aux->catalog, aux_cfg);
 }
 
 static void dp_aux_deinit(struct dp_aux *dp_aux)
@@ -451,5 +573,7 @@
 
 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
+	mutex_destroy(&aux->mutex);
+
 	devm_kfree(aux->dev, aux);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 5d96fd9..85761ce 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -25,6 +25,7 @@
 	DP_AUX_ERR_NACK	= -3,
 	DP_AUX_ERR_DEFER	= -4,
 	DP_AUX_ERR_NACK_DEFER	= -5,
+	DP_AUX_ERR_PHY	= -6,
 };
 
 struct dp_aux {
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 3a33ce4..2894e82 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -20,6 +20,9 @@
 #include "dp_catalog.h"
 #include "dp_reg.h"
 
+#define DP_GET_MSB(x)	(x >> 8)
+#define DP_GET_LSB(x)	(x & 0xff)
+
 #define dp_read(offset) readl_relaxed((offset))
 #define dp_write(offset, data) writel_relaxed((data), (offset))
 
@@ -128,6 +131,58 @@
 	return rc;
 }
 
+static int dp_catalog_aux_clear_trans(struct dp_catalog_aux *aux, bool read)
+{
+	int rc = 0;
+	u32 data = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	if (read) {
+		data = dp_read(base + DP_AUX_TRANS_CTRL);
+		data &= ~BIT(9);
+		dp_write(base + DP_AUX_TRANS_CTRL, data);
+	} else {
+		dp_write(base + DP_AUX_TRANS_CTRL, 0);
+	}
+end:
+	return rc;
+}
+
+static void dp_catalog_aux_clear_hw_interrupts(struct dp_catalog_aux *aux)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *phy_base;
+	u32 data = 0;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	phy_base = catalog->io->phy_io.base;
+
+	data = dp_read(phy_base + DP_PHY_AUX_INTERRUPT_STATUS);
+	pr_debug("PHY_AUX_INTERRUPT_STATUS=0x%08x\n", data);
+
+	dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+	wmb(); /* make sure 0x1f is written before next write */
+	dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+	wmb(); /* make sure 0x9f is written before next write */
+	dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+	wmb(); /* make sure register is cleared */
+}
+
 static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
 {
 	u32 aux_ctrl;
@@ -150,6 +205,7 @@
 
 	aux_ctrl &= ~BIT(1);
 	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+	wmb(); /* make sure AUX reset is done here */
 }
 
 static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
@@ -169,14 +225,15 @@
 	aux_ctrl = dp_read(base + DP_AUX_CTRL);
 
 	if (enable) {
+		aux_ctrl |= BIT(0);
+		dp_write(base + DP_AUX_CTRL, aux_ctrl);
+		wmb(); /* make sure AUX module is enabled */
 		dp_write(base + DP_TIMEOUT_COUNT, 0xffff);
 		dp_write(base + DP_AUX_LIMITS, 0xffff);
-		aux_ctrl |= BIT(0);
 	} else {
 		aux_ctrl &= ~BIT(0);
+		dp_write(base + DP_AUX_CTRL, aux_ctrl);
 	}
-
-	dp_write(base + DP_AUX_CTRL, aux_ctrl);
 }
 
 static void dp_catalog_aux_update_cfg(struct dp_catalog_aux *aux,
@@ -216,13 +273,12 @@
 
 	dp_catalog_get_priv(aux);
 
-	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x02);
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x65);
 	wmb(); /* make sure PD programming happened */
-	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x7d);
 
 	/* Turn on BIAS current for PHY/PLL */
 	dp_write(catalog->io->dp_pll_io.base +
-		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1b);
 
 	/* DP AUX CFG register programming */
 	for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
@@ -234,6 +290,7 @@
 	}
 
 	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
+	wmb(); /* make sure AUX configuration is done before enabling it */
 }
 
 static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
@@ -250,9 +307,6 @@
 	dp_catalog_get_priv(aux);
 	base = catalog->io->ctrl_io.base;
 
-	if (cmd_busy)
-		dp_write(base + DP_AUX_TRANS_CTRL, 0x0);
-
 	aux->isr = dp_read(base + DP_INTR_STATUS);
 	aux->isr &= ~DP_INTR_MASK1;
 	ack = aux->isr & DP_INTERRUPT_STATUS1;
@@ -278,6 +332,174 @@
 	return dp_read(base + DP_HDCP_STATUS);
 }
 
+static void dp_catalog_ctrl_setup_infoframe_sdp(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+	u32 header, data;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	header = dp_read(base + MMSS_DP_VSCEXT_0);
+	header |= ctrl->hdr_data.vsc_hdr_byte1;
+	dp_write(base + MMSS_DP_VSCEXT_0, header);
+
+	header = dp_read(base + MMSS_DP_VSCEXT_1);
+	header |= ctrl->hdr_data.vsc_hdr_byte1;
+	dp_write(base + MMSS_DP_VSCEXT_1, header);
+
+	header = dp_read(base + MMSS_DP_VSCEXT_1);
+	header |= ctrl->hdr_data.vsc_hdr_byte1;
+	dp_write(base + MMSS_DP_VSCEXT_1, header);
+
+	header =  ctrl->hdr_data.version;
+	header |=  ctrl->hdr_data.length << 8;
+	header |= ctrl->hdr_data.eotf << 16;
+	header |= (ctrl->hdr_data.descriptor_id << 24);
+	dp_write(base + MMSS_DP_VSCEXT_2, header);
+
+	data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[0]) |
+		(DP_GET_MSB(ctrl->hdr_data.display_primaries_x[0]) << 8) |
+		(DP_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 16) |
+		(DP_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 24));
+	dp_write(base + MMSS_DP_VSCEXT_3, data);
+
+	data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) |
+		(DP_GET_MSB(ctrl->hdr_data.display_primaries_x[1]) << 8) |
+		(DP_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 16) |
+		(DP_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 24));
+	dp_write(base + MMSS_DP_VSCEXT_4, data);
+
+	data = (DP_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) |
+		(DP_GET_MSB(ctrl->hdr_data.display_primaries_x[2]) << 8) |
+		(DP_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 16) |
+		(DP_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 24));
+	dp_write(base + MMSS_DP_VSCEXT_5, data);
+
+	data = (DP_GET_LSB(ctrl->hdr_data.white_point_x) |
+		(DP_GET_MSB(ctrl->hdr_data.white_point_x) << 8) |
+		(DP_GET_LSB(ctrl->hdr_data.white_point_y) << 16) |
+		(DP_GET_MSB(ctrl->hdr_data.white_point_y) << 24));
+	dp_write(base + MMSS_DP_VSCEXT_6, data);
+
+	data = (DP_GET_LSB(ctrl->hdr_data.max_luminance) |
+		(DP_GET_MSB(ctrl->hdr_data.max_luminance) << 8) |
+		(DP_GET_LSB(ctrl->hdr_data.min_luminance) << 16) |
+		(DP_GET_MSB(ctrl->hdr_data.min_luminance) << 24));
+	dp_write(base + MMSS_DP_VSCEXT_7, data);
+
+	data = (DP_GET_LSB(ctrl->hdr_data.max_content_light_level) |
+		(DP_GET_MSB(ctrl->hdr_data.max_content_light_level) << 8) |
+		(DP_GET_LSB(ctrl->hdr_data.max_average_light_level) << 16) |
+		(DP_GET_MSB(ctrl->hdr_data.max_average_light_level) << 24));
+	dp_write(base + MMSS_DP_VSCEXT_8, data);
+
+	dp_write(base + MMSS_DP_VSCEXT_9, 0x00);
+}
+
+static void dp_catalog_ctrl_setup_vsc_sdp(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+	u32 value;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	value = dp_read(base + MMSS_DP_GENERIC0_0);
+	value |= ctrl->hdr_data.vsc_hdr_byte1;
+	dp_write(base + MMSS_DP_GENERIC0_0, value);
+
+	value = dp_read(base + MMSS_DP_GENERIC0_1);
+	value |= ctrl->hdr_data.vsc_hdr_byte2;
+	dp_write(base + MMSS_DP_GENERIC0_1, value);
+
+	value = dp_read(base + MMSS_DP_GENERIC0_1);
+	value |= ctrl->hdr_data.vsc_hdr_byte3;
+	dp_write(base + MMSS_DP_GENERIC0_1, value);
+
+	dp_write(base + MMSS_DP_GENERIC0_2, 0x00);
+	dp_write(base + MMSS_DP_GENERIC0_3, 0x00);
+	dp_write(base + MMSS_DP_GENERIC0_4, 0x00);
+	dp_write(base + MMSS_DP_GENERIC0_5, 0x00);
+
+	dp_write(base + MMSS_DP_GENERIC0_6, ctrl->hdr_data.pkt_payload);
+	dp_write(base + MMSS_DP_GENERIC0_7, 0x00);
+	dp_write(base + MMSS_DP_GENERIC0_8, 0x00);
+	dp_write(base + MMSS_DP_GENERIC0_9, 0x00);
+}
+
+static void dp_catalog_ctrl_config_hdr(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+	u32 cfg, cfg2;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	cfg = dp_read(base + MMSS_DP_SDP_CFG);
+	/* VSCEXT_SDP_EN */
+	cfg |= BIT(16);
+
+	/* GEN0_SDP_EN */
+	cfg |= BIT(17);
+
+	dp_write(base + MMSS_DP_SDP_CFG, cfg);
+
+	cfg2 = dp_read(base + MMSS_DP_SDP_CFG2);
+	/* Generic0 SDP Payload is 19 bytes which is > 16, so Bit16 is 1 */
+	cfg2 |= BIT(16);
+	dp_write(base + MMSS_DP_SDP_CFG2, cfg2);
+
+	dp_catalog_ctrl_setup_vsc_sdp(ctrl);
+	dp_catalog_ctrl_setup_infoframe_sdp(ctrl);
+
+	cfg = dp_read(base + DP_MISC1_MISC0);
+	/* Indicates presence of VSC */
+	cfg |= BIT(6) << 8;
+
+	dp_write(base + DP_MISC1_MISC0, cfg);
+
+	cfg = dp_read(base + DP_CONFIGURATION_CTRL);
+	/* Send VSC */
+	cfg |= BIT(7);
+
+	switch (ctrl->hdr_data.bpc) {
+	default:
+	case 10:
+		cfg |= BIT(9);
+		break;
+	case 8:
+		cfg |= BIT(8);
+		break;
+	}
+
+	dp_write(base + DP_CONFIGURATION_CTRL, cfg);
+
+	cfg = dp_read(base + DP_COMPRESSION_MODE_CTRL);
+
+	/* Trigger SDP values in registers */
+	cfg |= BIT(8);
+	dp_write(base + DP_COMPRESSION_MODE_CTRL, cfg);
+}
+
 static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
 {
 	struct dp_catalog_private *catalog;
@@ -382,7 +604,7 @@
 static void dp_catalog_ctrl_config_misc(struct dp_catalog_ctrl *ctrl,
 					u32 cc, u32 tb)
 {
-	u32 misc_val = cc;
+	u32 misc_val;
 	struct dp_catalog_private *catalog;
 	void __iomem *base;
 
@@ -394,6 +616,8 @@
 	dp_catalog_get_priv(ctrl);
 	base = catalog->io->ctrl_io.base;
 
+	misc_val = dp_read(base + DP_MISC1_MISC0);
+	misc_val |= cc;
 	misc_val |= (tb << 5);
 	misc_val |= BIT(0); /* Configure clock to synchronous mode */
 
@@ -1040,11 +1264,13 @@
 		.read_data     = dp_catalog_aux_read_data,
 		.write_data    = dp_catalog_aux_write_data,
 		.write_trans   = dp_catalog_aux_write_trans,
+		.clear_trans   = dp_catalog_aux_clear_trans,
 		.reset         = dp_catalog_aux_reset,
 		.update_aux_cfg = dp_catalog_aux_update_cfg,
 		.enable        = dp_catalog_aux_enable,
 		.setup         = dp_catalog_aux_setup,
 		.get_irq       = dp_catalog_aux_get_irq,
+		.clear_hw_interrupts = dp_catalog_aux_clear_hw_interrupts,
 	};
 	struct dp_catalog_ctrl ctrl = {
 		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
@@ -1063,6 +1289,7 @@
 		.phy_lane_cfg   = dp_catalog_ctrl_phy_lane_cfg,
 		.update_vx_px   = dp_catalog_ctrl_update_vx_px,
 		.get_interrupt  = dp_catalog_ctrl_get_interrupt,
+		.config_hdr     = dp_catalog_ctrl_config_hdr,
 		.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
 		.read_hdcp_status     = dp_catalog_ctrl_read_hdcp_status,
 		.send_phy_pattern    = dp_catalog_ctrl_send_phy_pattern,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 4523b4f..aca2f18 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -34,6 +34,32 @@
 #define DP_INTR_FRAME_END		BIT(6)
 #define DP_INTR_CRC_UPDATED		BIT(9)
 
+#define HDR_PRIMARIES_COUNT   3
+
+struct dp_catalog_hdr_data {
+	u32 vsc_hdr_byte0;
+	u32 vsc_hdr_byte1;
+	u32 vsc_hdr_byte2;
+	u32 vsc_hdr_byte3;
+	u32 pkt_payload;
+
+	u32 bpc;
+
+	u32 version;
+	u32 length;
+	u32 eotf;
+	u32 descriptor_id;
+
+	u32 display_primaries_x[HDR_PRIMARIES_COUNT];
+	u32 display_primaries_y[HDR_PRIMARIES_COUNT];
+	u32 white_point_x;
+	u32 white_point_y;
+	u32 max_luminance;
+	u32 min_luminance;
+	u32 max_content_light_level;
+	u32 max_average_light_level;
+};
+
 struct dp_catalog_aux {
 	u32 data;
 	u32 isr;
@@ -41,6 +67,7 @@
 	u32 (*read_data)(struct dp_catalog_aux *aux);
 	int (*write_data)(struct dp_catalog_aux *aux);
 	int (*write_trans)(struct dp_catalog_aux *aux);
+	int (*clear_trans)(struct dp_catalog_aux *aux, bool read);
 	void (*reset)(struct dp_catalog_aux *aux);
 	void (*enable)(struct dp_catalog_aux *aux, bool enable);
 	void (*update_aux_cfg)(struct dp_catalog_aux *aux,
@@ -48,6 +75,7 @@
 	void (*setup)(struct dp_catalog_aux *aux,
 			struct dp_aux_cfg *aux_cfg);
 	void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
+	void (*clear_hw_interrupts)(struct dp_catalog_aux *aux);
 };
 
 struct dp_catalog_ctrl {
@@ -55,6 +83,7 @@
 	u32 valid_boundary;
 	u32 valid_boundary2;
 	u32 isr;
+	struct dp_catalog_hdr_data hdr_data;
 
 	void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
 	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u32 config);
@@ -75,6 +104,7 @@
 	void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
 				u8 p_level);
 	void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
+	void (*config_hdr)(struct dp_catalog_ctrl *ctrl);
 	void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
 	u32 (*read_hdcp_status)(struct dp_catalog_ctrl *ctrl);
 	void (*send_phy_pattern)(struct dp_catalog_ctrl *ctrl,
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 9ce23b1..13ca6b2 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -69,7 +69,6 @@
 	struct completion idle_comp;
 	struct completion video_comp;
 
-	bool psm_enabled;
 	bool orientation;
 	atomic_t aborted;
 
@@ -992,7 +991,10 @@
 
 	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
 
-	drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->link_info);
+	ret = ctrl->link->psm_config(ctrl->link,
+		&ctrl->panel->link_info, false);
+	if (ret)
+		goto end;
 
 	if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
 		goto end;
@@ -1086,7 +1088,6 @@
 	catalog = ctrl->catalog;
 
 	catalog->usb_reset(ctrl->catalog, flip);
-	catalog->reset(ctrl->catalog);
 	catalog->phy_reset(ctrl->catalog);
 	catalog->enable_irq(ctrl->catalog, true);
 
@@ -1112,12 +1113,6 @@
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
 	ctrl->catalog->enable_irq(ctrl->catalog, false);
-	ctrl->catalog->reset(ctrl->catalog);
-
-	/* Make sure DP is disabled before clk disable */
-	wmb();
-
-	dp_ctrl_disable_mainlink_clocks(ctrl);
 
 	pr_debug("Host deinitialized successfully\n");
 }
@@ -1390,8 +1385,14 @@
 	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
 
 	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
-	pr_debug("DP off done\n");
+	ctrl->catalog->reset(ctrl->catalog);
 
+	/* Make sure DP is disabled before clk disable */
+	wmb();
+
+	dp_ctrl_disable_mainlink_clocks(ctrl);
+
+	pr_debug("DP off done\n");
 }
 
 static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
index 69cf6b6..d6d10ed 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.h
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -23,7 +23,6 @@
 #include "dp_catalog.h"
 
 struct dp_ctrl {
-
 	int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
 	void (*deinit)(struct dp_ctrl *dp_ctrl);
 	int (*on)(struct dp_ctrl *dp_ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index cc9e623..25407c4 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -107,6 +107,40 @@
 	return len;
 }
 
+static ssize_t dp_debug_bw_code_write(struct file *file,
+		const char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char buf[SZ_8];
+	size_t len = 0;
+	u32 max_bw_code = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	/* Leave room for termination char */
+	len = min_t(size_t, count, SZ_8 - 1);
+	if (copy_from_user(buf, user_buff, len))
+		return 0;
+
+	buf[len] = '\0';
+
+	if (kstrtoint(buf, 10, &max_bw_code) != 0)
+		return 0;
+
+	if (!is_link_rate_valid(max_bw_code)) {
+		pr_err("Unsupported bw code %d\n", max_bw_code);
+		return len;
+	}
+	debug->panel->max_bw_code = max_bw_code;
+	pr_debug("max_bw_code: %d\n", max_bw_code);
+
+	return len;
+}
+
 static ssize_t dp_debug_read_connected(struct file *file,
 		char __user *user_buff, size_t count, loff_t *ppos)
 {
@@ -349,6 +383,36 @@
 	return -EINVAL;
 }
 
+static ssize_t dp_debug_bw_code_read(struct file *file,
+	char __user *user_buff, size_t count, loff_t *ppos)
+{
+	struct dp_debug_private *debug = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!debug)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, (SZ_4K - len),
+			"max_bw_code = %d\n", debug->panel->max_bw_code);
+
+	if (copy_to_user(user_buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+	kfree(buf);
+	return len;
+}
+
 static const struct file_operations dp_debug_fops = {
 	.open = simple_open,
 	.read = dp_debug_read_info,
@@ -370,6 +434,12 @@
 	.read = dp_debug_read_connected,
 };
 
+static const struct file_operations bw_code_fops = {
+	.open = simple_open,
+	.read = dp_debug_bw_code_read,
+	.write = dp_debug_bw_code_write,
+};
+
 static int dp_debug_init(struct dp_debug *dp_debug)
 {
 	int rc = 0;
@@ -377,6 +447,7 @@
 		struct dp_debug_private, dp_debug);
 	struct dentry *dir, *file, *edid_modes;
 	struct dentry *hpd, *connected;
+	struct dentry *max_bw_code;
 	struct dentry *root = debug->root;
 
 	dir = debugfs_create_dir(DEBUG_NAME, NULL);
@@ -423,6 +494,15 @@
 		goto error_remove_dir;
 	}
 
+	max_bw_code = debugfs_create_file("max_bw_code", 0644, dir,
+			debug, &bw_code_fops);
+	if (IS_ERR_OR_NULL(max_bw_code)) {
+		rc = PTR_ERR(max_bw_code);
+		pr_err("[%s] debugfs max_bw_code failed, rc=%d\n",
+		       DEBUG_NAME, rc);
+		goto error_remove_dir;
+	}
+
 	root = dir;
 	return rc;
 error_remove_dir:
@@ -463,7 +543,11 @@
 	dp_debug->hdisplay = 0;
 	dp_debug->vrefresh = 0;
 
-	dp_debug_init(dp_debug);
+	rc = dp_debug_init(dp_debug);
+	if (rc) {
+		devm_kfree(dev, debug);
+		goto error;
+	}
 
 	return dp_debug;
 error:
@@ -495,5 +579,5 @@
 
 	dp_debug_deinit(dp_debug);
 
-	kzfree(debug);
+	devm_kfree(debug->dev, debug);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 3fa376b..ea07d15 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -37,6 +37,7 @@
 #include "dp_debug.h"
 
 static struct dp_display *g_dp_display;
+#define HPD_STRING_SIZE 30
 
 struct dp_hdcp {
 	void *data;
@@ -434,21 +435,70 @@
 		(dp->link->sink_count.count == 0);
 }
 
+static void dp_display_send_hpd_event(struct dp_display *dp_display)
+{
+	struct drm_device *dev = NULL;
+	struct dp_display_private *dp;
+	struct drm_connector *connector;
+	char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE],
+		bpp[HPD_STRING_SIZE], pattern[HPD_STRING_SIZE];
+	char *envp[5];
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+	if (!dp) {
+		pr_err("invalid params\n");
+		return;
+	}
+	connector = dp->dp_display.connector;
+	dev = dp_display->connector->dev;
+
+	connector->status = connector->funcs->detect(connector, false);
+	pr_debug("[%s] status updated to %s\n",
+			      connector->name,
+			      drm_get_connector_status_name(connector->status));
+	snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name);
+	snprintf(status, HPD_STRING_SIZE, "status=%s",
+		drm_get_connector_status_name(connector->status));
+	snprintf(bpp, HPD_STRING_SIZE, "bpp=%d",
+		dp_link_bit_depth_to_bpp(
+		dp->link->test_video.test_bit_depth));
+	snprintf(pattern, HPD_STRING_SIZE, "pattern=%d",
+		dp->link->test_video.test_video_pattern);
+
+	pr_debug("generating hotplug event [%s]:[%s] [%s] [%s]\n",
+		name, status, bpp, pattern);
+	envp[0] = name;
+	envp[1] = status;
+	envp[2] = bpp;
+	envp[3] = pattern;
+	envp[4] = NULL;
+	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+			envp);
+}
+
 static int dp_display_send_hpd_notification(struct dp_display_private *dp,
 		bool hpd)
 {
-
 	if ((hpd && dp->dp_display.is_connected) ||
 			(!hpd && !dp->dp_display.is_connected)) {
 		pr_info("HPD already %s\n", (hpd ? "on" : "off"));
 		return 0;
 	}
 
+	/* reset video pattern flag on disconnect */
+	if (!hpd)
+		dp->panel->video_test = false;
+
 	dp->dp_display.is_connected = hpd;
 	reinit_completion(&dp->notification_comp);
-	drm_helper_hpd_irq_event(dp->dp_display.connector->dev);
+	dp_display_send_hpd_event(&dp->dp_display);
 
-	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 2)) {
+	if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 5)) {
 		pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
 		return -EINVAL;
 	}
@@ -456,25 +506,20 @@
 	return 0;
 }
 
-static void dp_display_handle_test_edid(struct dp_display_private *dp)
-{
-	if (dp->link->sink_request & DP_TEST_LINK_EDID_READ) {
-		drm_dp_dpcd_write(dp->aux->drm_aux, DP_TEST_EDID_CHECKSUM,
-			&dp->panel->edid_ctrl->edid->checksum, 1);
-		drm_dp_dpcd_write(dp->aux->drm_aux, DP_TEST_RESPONSE,
-			&dp->link->test_response, 1);
-	}
-}
-
 static int dp_display_process_hpd_high(struct dp_display_private *dp)
 {
 	int rc = 0;
 	u32 max_pclk_from_edid = 0;
 	struct edid *edid;
 
+	dp->aux->init(dp->aux, dp->parser->aux_cfg);
+
+	if (dp->link->psm_enabled)
+		goto notify;
+
 	rc = dp->panel->read_sink_caps(dp->panel, dp->dp_display.connector);
 	if (rc)
-		return rc;
+		goto notify;
 
 	dp->link->process_request(dp->link);
 
@@ -488,13 +533,14 @@
 
 	dp->audio_supported = drm_detect_monitor_audio(edid);
 
-	dp_display_handle_test_edid(dp);
+	dp->panel->handle_sink_request(dp->panel);
 
 	max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel);
 
 	dp->dp_display.max_pclk_khz = min(max_pclk_from_edid,
 		dp->parser->max_pclk_khz);
 
+notify:
 	dp_display_send_hpd_notification(dp, true);
 
 end:
@@ -515,7 +561,6 @@
 
 	dp->power->init(dp->power, flip);
 	dp->ctrl->init(dp->ctrl, flip);
-	dp->aux->init(dp->aux, dp->parser->aux_cfg);
 	enable_irq(dp->irq);
 	dp->core_initialized = true;
 }
@@ -527,7 +572,6 @@
 		return;
 	}
 
-	dp->aux->deinit(dp->aux);
 	dp->ctrl->deinit(dp->ctrl);
 	dp->power->deinit(dp->power);
 	disable_irq(dp->irq);
@@ -548,6 +592,8 @@
 		dp->audio->off(dp->audio);
 
 	dp_display_send_hpd_notification(dp, false);
+
+	dp->aux->deinit(dp->aux);
 }
 
 static int dp_display_usbpd_configure_cb(struct device *dev)
@@ -616,6 +662,10 @@
 
 	rc = dp_display_send_hpd_notification(dp, false);
 
+	/* if cable is disconnected, reset psm_enabled flag */
+	if (!dp->usbpd->alt_mode_cfg_done)
+		dp->link->psm_enabled = false;
+
 	if ((rc < 0) && dp->power_on)
 		dp_display_clean(dp);
 
@@ -624,6 +674,17 @@
 	return rc;
 }
 
+static void dp_display_handle_video_request(struct dp_display_private *dp)
+{
+	if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+		/* force disconnect followed by connect */
+		dp->usbpd->connect(dp->usbpd, false);
+		dp->panel->video_test = true;
+		dp->usbpd->connect(dp->usbpd, true);
+		dp->link->send_test_response(dp->link);
+	}
+}
+
 static int dp_display_handle_hpd_irq(struct dp_display_private *dp)
 {
 	if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
@@ -639,6 +700,8 @@
 
 	dp->ctrl->handle_sink_request(dp->ctrl);
 
+	dp_display_handle_video_request(dp);
+
 	return 0;
 }
 
@@ -686,6 +749,20 @@
 	return rc;
 }
 
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+	dp_audio_put(dp->audio);
+	dp_ctrl_put(dp->ctrl);
+	dp_link_put(dp->link);
+	dp_panel_put(dp->panel);
+	dp_aux_put(dp->aux);
+	dp_power_put(dp->power);
+	dp_catalog_put(dp->catalog);
+	dp_parser_put(dp->parser);
+	dp_usbpd_put(dp->usbpd);
+	dp_debug_put(dp->debug);
+}
+
 static int dp_init_sub_modules(struct dp_display_private *dp)
 {
 	int rc = 0;
@@ -694,6 +771,9 @@
 	struct dp_ctrl_in ctrl_in = {
 		.dev = dev,
 	};
+	struct dp_panel_in panel_in = {
+		.dev = dev,
+	};
 
 	cb->configure  = dp_display_usbpd_configure_cb;
 	cb->disconnect = dp_display_usbpd_disconnect_cb;
@@ -703,49 +783,60 @@
 	if (IS_ERR(dp->usbpd)) {
 		rc = PTR_ERR(dp->usbpd);
 		pr_err("failed to initialize usbpd, rc = %d\n", rc);
-		goto err;
+		dp->usbpd = NULL;
+		goto error;
 	}
 
 	dp->parser = dp_parser_get(dp->pdev);
 	if (IS_ERR(dp->parser)) {
 		rc = PTR_ERR(dp->parser);
 		pr_err("failed to initialize parser, rc = %d\n", rc);
-		goto err;
+		dp->parser = NULL;
+		goto error_parser;
 	}
 
 	dp->catalog = dp_catalog_get(dev, &dp->parser->io);
 	if (IS_ERR(dp->catalog)) {
 		rc = PTR_ERR(dp->catalog);
 		pr_err("failed to initialize catalog, rc = %d\n", rc);
-		goto err;
+		dp->catalog = NULL;
+		goto error_catalog;
 	}
 
 	dp->power = dp_power_get(dp->parser);
 	if (IS_ERR(dp->power)) {
 		rc = PTR_ERR(dp->power);
 		pr_err("failed to initialize power, rc = %d\n", rc);
-		goto err;
+		dp->power = NULL;
+		goto error_power;
 	}
 
 	dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser->aux_cfg);
 	if (IS_ERR(dp->aux)) {
 		rc = PTR_ERR(dp->aux);
 		pr_err("failed to initialize aux, rc = %d\n", rc);
-		goto err;
-	}
-
-	dp->panel = dp_panel_get(dev, dp->aux, &dp->catalog->panel);
-	if (IS_ERR(dp->panel)) {
-		rc = PTR_ERR(dp->panel);
-		pr_err("failed to initialize panel, rc = %d\n", rc);
-		goto err;
+		dp->aux = NULL;
+		goto error_aux;
 	}
 
 	dp->link = dp_link_get(dev, dp->aux);
 	if (IS_ERR(dp->link)) {
 		rc = PTR_ERR(dp->link);
 		pr_err("failed to initialize link, rc = %d\n", rc);
-		goto err;
+		dp->link = NULL;
+		goto error_link;
+	}
+
+	panel_in.aux = dp->aux;
+	panel_in.catalog = &dp->catalog->panel;
+	panel_in.link = dp->link;
+
+	dp->panel = dp_panel_get(&panel_in);
+	if (IS_ERR(dp->panel)) {
+		rc = PTR_ERR(dp->panel);
+		pr_err("failed to initialize panel, rc = %d\n", rc);
+		dp->panel = NULL;
+		goto error_panel;
 	}
 
 	ctrl_in.link = dp->link;
@@ -759,13 +850,16 @@
 	if (IS_ERR(dp->ctrl)) {
 		rc = PTR_ERR(dp->ctrl);
 		pr_err("failed to initialize ctrl, rc = %d\n", rc);
-		goto err;
+		dp->ctrl = NULL;
+		goto error_ctrl;
 	}
 
 	dp->audio = dp_audio_get(dp->pdev, dp->panel, &dp->catalog->audio);
 	if (IS_ERR(dp->audio)) {
 		rc = PTR_ERR(dp->audio);
 		pr_err("failed to initialize audio, rc = %d\n", rc);
+		dp->audio = NULL;
+		goto error_audio;
 	}
 
 	dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
@@ -773,9 +867,30 @@
 	if (IS_ERR(dp->debug)) {
 		rc = PTR_ERR(dp->debug);
 		pr_err("failed to initialize debug, rc = %d\n", rc);
-		goto err;
+		dp->debug = NULL;
+		goto error_debug;
 	}
-err:
+
+	return rc;
+error_debug:
+	dp_audio_put(dp->audio);
+error_audio:
+	dp_ctrl_put(dp->ctrl);
+error_ctrl:
+	dp_panel_put(dp->panel);
+error_panel:
+	dp_link_put(dp->link);
+error_link:
+	dp_aux_put(dp->aux);
+error_aux:
+	dp_power_put(dp->power);
+error_power:
+	dp_catalog_put(dp->catalog);
+error_catalog:
+	dp_parser_put(dp->parser);
+error_parser:
+	dp_usbpd_put(dp->usbpd);
+error:
 	return rc;
 }
 
@@ -883,6 +998,10 @@
 			dp->hdcp.ops->off(dp->hdcp.data);
 	}
 
+	if (dp->usbpd->alt_mode_cfg_done && (dp->usbpd->hpd_high ||
+		dp->usbpd->forced_disconnect))
+		dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+
 	dp->ctrl->push_idle(dp->ctrl);
 error:
 	return rc;
@@ -969,19 +1088,58 @@
 	return 0;
 }
 
-static int dp_display_get_modes(struct dp_display *dp)
+static int dp_display_get_modes(struct dp_display *dp,
+	struct dp_display_mode *dp_mode)
 {
-	int ret = 0;
 	struct dp_display_private *dp_display;
+	int ret = 0;
+
+	if (!dp) {
+		pr_err("invalid params\n");
+		return 0;
+	}
 
 	dp_display = container_of(dp, struct dp_display_private, dp_display);
 
-	ret = _sde_edid_update_modes(dp->connector,
-		dp_display->panel->edid_ctrl);
-
+	ret = dp_display->panel->get_modes(dp_display->panel,
+		dp->connector, dp_mode);
+	if (dp_mode->timing.pixel_clk_khz)
+		dp->max_pclk_khz = dp_mode->timing.pixel_clk_khz;
 	return ret;
 }
 
+static bool dp_display_check_video_test(struct dp_display *dp)
+{
+	struct dp_display_private *dp_display;
+
+	if (!dp) {
+		pr_err("invalid params\n");
+		return false;
+	}
+
+	dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+	if (dp_display->panel->video_test)
+		return true;
+
+	return false;
+}
+
+static int dp_display_get_test_bpp(struct dp_display *dp)
+{
+	struct dp_display_private *dp_display;
+
+	if (!dp) {
+		pr_err("invalid params\n");
+		return 0;
+	}
+
+	dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+	return dp_link_bit_depth_to_bpp(
+		dp_display->link->test_video.test_bit_depth);
+}
+
 static int dp_display_probe(struct platform_device *pdev)
 {
 	int rc = 0;
@@ -1022,10 +1180,16 @@
 	g_dp_display->unprepare     = dp_display_unprepare;
 	g_dp_display->request_irq   = dp_request_irq;
 	g_dp_display->get_debug     = dp_get_debug;
+	g_dp_display->send_hpd_event    = dp_display_send_hpd_event;
+	g_dp_display->is_video_test = dp_display_check_video_test;
+	g_dp_display->get_test_bpp = dp_display_get_test_bpp;
 
 	rc = component_add(&pdev->dev, &dp_display_comp_ops);
-	if (rc)
+	if (rc) {
 		pr_err("component add failed, rc=%d\n", rc);
+		dp_display_deinit_sub_modules(dp);
+		devm_kfree(&pdev->dev, dp);
+	}
 
 	return rc;
 }
@@ -1051,20 +1215,6 @@
 	return 1;
 }
 
-static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
-{
-	dp_audio_put(dp->audio);
-	dp_ctrl_put(dp->ctrl);
-	dp_link_put(dp->link);
-	dp_panel_put(dp->panel);
-	dp_aux_put(dp->aux);
-	dp_power_put(dp->power);
-	dp_catalog_put(dp->catalog);
-	dp_parser_put(dp->parser);
-	dp_usbpd_put(dp->usbpd);
-	dp_debug_put(dp->debug);
-}
-
 static int dp_display_remove(struct platform_device *pdev)
 {
 	struct dp_display_private *dp;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 5943629..5539d61 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -19,11 +19,6 @@
 
 #include "dp_panel.h"
 
-struct dp_display_mode {
-	struct dp_panel_info timing;
-	u32 capabilities;
-};
-
 struct dp_display {
 	struct drm_device *drm_dev;
 	struct dp_bridge *bridge;
@@ -41,11 +36,15 @@
 			struct dp_display_mode *mode);
 	int (*validate_mode)(struct dp_display *dp_display,
 			struct dp_display_mode *mode);
-	int (*get_modes)(struct dp_display *dp_display);
+	int (*get_modes)(struct dp_display *dp_display,
+		struct dp_display_mode *dp_mode);
 	int (*prepare)(struct dp_display *dp_display);
 	int (*unprepare)(struct dp_display *dp_display);
 	int (*request_irq)(struct dp_display *dp_display);
 	struct dp_debug *(*get_debug)(struct dp_display *dp_display);
+	void (*send_hpd_event)(struct dp_display *dp_display);
+	bool (*is_video_test)(struct dp_display *dp_display);
+	int (*get_test_bpp)(struct dp_display *dp_display);
 };
 
 int dp_display_get_num_of_displays(void);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 802f295..06f8558 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -48,7 +48,15 @@
 
 	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
 					 drm_mode->vdisplay;
-	dp_mode->timing.bpp = dp->connector->display_info.bpc * num_components;
+
+	if (dp->is_video_test(dp))
+		dp_mode->timing.bpp = dp->get_test_bpp(dp);
+	else
+		dp_mode->timing.bpp = dp->connector->display_info.bpc *
+		num_components;
+
+	if (!dp_mode->timing.bpp)
+		dp_mode->timing.bpp = 24;
 
 	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
 
@@ -375,26 +383,65 @@
 	return status;
 }
 
+void dp_connector_send_hpd_event(void *display)
+{
+	struct dp_display *dp;
+
+	if (!display) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp = display;
+
+	if (dp->send_hpd_event)
+		dp->send_hpd_event(dp);
+}
+
 int dp_connector_get_modes(struct drm_connector *connector,
 		void *display)
 {
 	int rc = 0;
 	struct dp_display *dp;
+	struct dp_display_mode *dp_mode = NULL;
+	struct drm_display_mode *m, drm_mode;
 
 	if (!connector || !display)
-		return -EINVAL;
+		return 0;
 
 	dp = display;
+
+	dp_mode = kzalloc(sizeof(*dp_mode),  GFP_KERNEL);
+	if (!dp_mode)
+		return 0;
+
 	/* pluggable case assumes EDID is read when HPD */
 	if (dp->is_connected) {
-		rc = dp->get_modes(dp);
+		rc = dp->get_modes(dp, dp_mode);
 		if (!rc)
 			pr_err("failed to get DP sink modes, rc=%d\n", rc);
+
+		if (dp_mode->timing.pixel_clk_khz) { /* valid DP mode */
+			memset(&drm_mode, 0x0, sizeof(drm_mode));
+			convert_to_drm_mode(dp_mode, &drm_mode);
+			m = drm_mode_duplicate(connector->dev, &drm_mode);
+			if (!m) {
+				pr_err("failed to add mode %ux%u\n",
+				       drm_mode.hdisplay,
+				       drm_mode.vdisplay);
+				kfree(dp_mode);
+				return 0;
+			}
+			m->width_mm = connector->display_info.width_mm;
+			m->height_mm = connector->display_info.height_mm;
+			drm_mode_probed_add(connector, m);
+		}
 	} else {
 		pr_err("No sink connected\n");
 	}
+	kfree(dp_mode);
 
-	return 0;
+	return rc;
 }
 
 int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
@@ -476,6 +523,13 @@
 		else
 			return MODE_ERROR;
 	} else {
+		if (mode->vrefresh == 0) {
+			int vrefresh = (mode->clock * 1000) /
+				(mode->vtotal * mode->htotal);
+			if (vrefresh > 60)
+				return MODE_BAD;
+		}
+
 		if (mode->clock > dp_disp->max_pclk_khz)
 			return MODE_BAD;
 		else
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 5918df1..53570f5 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -86,6 +86,8 @@
 
 int dp_connector_get_info(struct msm_display_info *info, void *display);
 
+void dp_connector_send_hpd_event(void *display);
+
 int dp_drm_bridge_init(void *display,
 	struct drm_encoder *encoder);
 
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index b62c1e9..0cf488d 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -17,12 +17,6 @@
 #include "dp_link.h"
 #include "dp_panel.h"
 
-enum dp_lane_count {
-	DP_LANE_COUNT_1	= 1,
-	DP_LANE_COUNT_2	= 2,
-	DP_LANE_COUNT_4	= 4,
-};
-
 enum dynamic_range {
 	DP_DYNAMIC_RANGE_RGB_VESA = 0x00,
 	DP_DYNAMIC_RANGE_RGB_CEA = 0x01,
@@ -60,42 +54,6 @@
 	u8 link_status[DP_LINK_STATUS_SIZE];
 };
 
-/**
- * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
- * @tbd: test bit depth
- *
- * Returns the bits per pixel (bpp) to be used corresponding to the
- * git bit depth value. This function assumes that bit depth has
- * already been validated.
- */
-static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
-{
-	u32 bpp;
-
-	/*
-	 * Few simplistic rules and assumptions made here:
-	 *    1. Bit depth is per color component
-	 *    2. If bit depth is unknown return 0
-	 *    3. Assume 3 color components
-	 */
-	switch (tbd) {
-	case DP_TEST_BIT_DEPTH_6:
-		bpp = 18;
-		break;
-	case DP_TEST_BIT_DEPTH_8:
-		bpp = 24;
-		break;
-	case DP_TEST_BIT_DEPTH_10:
-		bpp = 30;
-		break;
-	case DP_TEST_BIT_DEPTH_UNKNOWN:
-	default:
-		bpp = 0;
-	}
-
-	return bpp;
-}
-
 static char *dp_link_get_audio_test_pattern(u32 pattern)
 {
 	switch (pattern) {
@@ -659,33 +617,6 @@
 }
 
 /**
- * dp_link_is_link_rate_valid() - validates the link rate
- * @lane_rate: link rate requested by the sink
- *
- * Returns true if the requested link rate is supported.
- */
-static bool dp_link_is_link_rate_valid(u32 bw_code)
-{
-	return ((bw_code == DP_LINK_BW_1_62) ||
-		(bw_code == DP_LINK_BW_2_7) ||
-		(bw_code == DP_LINK_BW_5_4) ||
-		(bw_code == DP_LINK_BW_8_1));
-}
-
-/**
- * dp_link_is_lane_count_valid() - validates the lane count
- * @lane_count: lane count requested by the sink
- *
- * Returns true if the requested lane count is supported.
- */
-static bool dp_link_is_lane_count_valid(u32 lane_count)
-{
-	return (lane_count == DP_LANE_COUNT_1) ||
-		(lane_count == DP_LANE_COUNT_2) ||
-		(lane_count == DP_LANE_COUNT_4);
-}
-
-/**
  * dp_link_parse_link_training_params() - parses link training parameters from
  * DPCD
  * @link: Display Port Driver data
@@ -710,7 +641,7 @@
 	}
 	data = bp;
 
-	if (!dp_link_is_link_rate_valid(data)) {
+	if (!is_link_rate_valid(data)) {
 		pr_err("invalid link rate = 0x%x\n", data);
 		ret = -EINVAL;
 		goto exit;
@@ -729,7 +660,7 @@
 	data = bp;
 	data &= 0x1F;
 
-	if (!dp_link_is_lane_count_valid(data)) {
+	if (!is_lane_count_valid(data)) {
 		pr_err("invalid lane count = 0x%x\n", data);
 		ret = -EINVAL;
 		goto exit;
@@ -1021,7 +952,6 @@
 static void dp_link_send_test_response(struct dp_link *dp_link)
 {
 	struct dp_link_private *link = NULL;
-	u32 const test_response_addr = 0x260;
 	u32 const response_len = 0x1;
 
 	if (!dp_link) {
@@ -1031,10 +961,52 @@
 
 	link = container_of(dp_link, struct dp_link_private, dp_link);
 
-	drm_dp_dpcd_write(link->aux->drm_aux, test_response_addr,
+	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_RESPONSE,
 			&dp_link->test_response, response_len);
 }
 
+static int dp_link_psm_config(struct dp_link *dp_link,
+	struct drm_dp_link *link_info, bool enable)
+{
+	struct dp_link_private *link = NULL;
+	int ret = 0;
+
+	if (!dp_link) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	if (enable)
+		ret = drm_dp_link_power_down(link->aux->drm_aux, link_info);
+	else
+		ret = drm_dp_link_power_up(link->aux->drm_aux, link_info);
+
+	if (ret)
+		pr_err("Failed to %s low power mode\n",
+			(enable ? "enter" : "exit"));
+	else
+		dp_link->psm_enabled = enable;
+
+	return ret;
+}
+
+static void dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+{
+	struct dp_link_private *link = NULL;
+	u32 const response_len = 0x1;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_EDID_CHECKSUM,
+			&checksum, response_len);
+}
 
 static int dp_link_parse_vx_px(struct dp_link_private *link)
 {
@@ -1131,8 +1103,8 @@
 	test_link_rate = link->request.test_link_rate;
 	test_lane_count = link->request.test_lane_count;
 
-	if (!dp_link_is_link_rate_valid(test_link_rate) ||
-		!dp_link_is_lane_count_valid(test_lane_count)) {
+	if (!is_link_rate_valid(test_link_rate) ||
+		!is_lane_count_valid(test_lane_count)) {
 		pr_err("Invalid params: link rate = 0x%x, lane count = 0x%x\n",
 				test_link_rate, test_lane_count);
 		return -EINVAL;
@@ -1555,6 +1527,8 @@
 	dp_link->adjust_levels          = dp_link_adjust_levels;
 	dp_link->send_psm_request       = dp_link_send_psm_request;
 	dp_link->send_test_response     = dp_link_send_test_response;
+	dp_link->psm_config             = dp_link_psm_config;
+	dp_link->send_edid_checksum     = dp_link_send_edid_checksum;
 
 	return dp_link;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 9a5b75a..b1d9249 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -86,6 +86,7 @@
 struct dp_link {
 	u32 sink_request;
 	u32 test_response;
+	bool psm_enabled;
 
 	struct dp_link_sink_count sink_count;
 	struct dp_link_test_video test_video;
@@ -99,6 +100,9 @@
 	int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
 	int (*send_psm_request)(struct dp_link *dp_link, bool req);
 	void (*send_test_response)(struct dp_link *dp_link);
+	int (*psm_config)(struct dp_link *dp_link,
+		struct drm_dp_link *link_info, bool enable);
+	void (*send_edid_checksum)(struct dp_link *dp_link, u8 checksum);
 };
 
 static inline char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
@@ -126,6 +130,42 @@
 }
 
 /**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+{
+	u32 bpp;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Bit depth is per color component
+	 *    2. If bit depth is unknown return 0
+	 *    3. Assume 3 color components
+	 */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		bpp = 18;
+		break;
+	case DP_TEST_BIT_DEPTH_8:
+		bpp = 24;
+		break;
+	case DP_TEST_BIT_DEPTH_10:
+		bpp = 30;
+		break;
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		bpp = 0;
+	}
+
+	return bpp;
+}
+
+/**
  * dp_link_get() - get the functionalities of dp test module
  *
  *
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 56b2d37..fc3fb56 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -27,10 +27,28 @@
 	struct device *dev;
 	struct dp_panel dp_panel;
 	struct dp_aux *aux;
+	struct dp_link *link;
 	struct dp_catalog_panel *catalog;
 	bool aux_cfg_update_done;
 };
 
+static const struct dp_panel_info fail_safe = {
+	.h_active = 640,
+	.v_active = 480,
+	.h_back_porch = 48,
+	.h_front_porch = 16,
+	.h_sync_width = 96,
+	.h_active_low = 0,
+	.v_back_porch = 33,
+	.v_front_porch = 10,
+	.v_sync_width = 2,
+	.v_active_low = 0,
+	.h_skew = 0,
+	.refresh_rate = 60,
+	.pixel_clk_khz = 25200,
+	.bpp = 24,
+};
+
 static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
 {
 	int rlen, rc = 0;
@@ -100,6 +118,23 @@
 	return rc;
 }
 
+static int dp_panel_set_default_link_params(struct dp_panel *dp_panel)
+{
+	struct drm_dp_link *link_info;
+	const int default_bw_code = 162000;
+	const int default_num_lanes = 1;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+	link_info = &dp_panel->link_info;
+	link_info->rate = default_bw_code;
+	link_info->num_lanes = default_num_lanes;
+	pr_debug("link_rate=%d num_lanes=%d\n",
+		link_info->rate, link_info->num_lanes);
+	return 0;
+}
 
 static int dp_panel_read_edid(struct dp_panel *dp_panel,
 	struct drm_connector *connector)
@@ -145,14 +180,18 @@
 	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
 	rc = dp_panel_read_dpcd(dp_panel);
-	if (rc) {
-		pr_err("panel dpcd read failed\n");
-		return rc;
+	if (rc || !is_link_rate_valid(drm_dp_link_rate_to_bw_code(
+		dp_panel->link_info.rate)) || !is_lane_count_valid(
+		dp_panel->link_info.num_lanes) ||
+		((drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate)) >
+		dp_panel->max_bw_code)) {
+		pr_err("panel dpcd read failed/incorrect, set default params\n");
+		dp_panel_set_default_link_params(dp_panel);
 	}
 
 	rc = dp_panel_read_edid(dp_panel, connector);
 	if (rc) {
-		pr_err("panel edid read failed\n");
+		pr_err("panel edid read failed, set failsafe mode\n");
 		return rc;
 	}
 
@@ -193,6 +232,94 @@
 	return max_pclk_rate_khz;
 }
 
+static void dp_panel_set_test_mode(struct dp_panel_private *panel,
+		struct dp_display_mode *mode)
+{
+	struct dp_panel_info *pinfo = NULL;
+	struct dp_link_test_video *test_info = NULL;
+
+	if (!panel) {
+		pr_err("invalid params\n");
+		return;
+	}
+
+	pinfo = &mode->timing;
+	test_info = &panel->link->test_video;
+
+	pinfo->h_active = test_info->test_h_width;
+	pinfo->h_sync_width = test_info->test_hsync_width;
+	pinfo->h_back_porch = test_info->test_h_start -
+		test_info->test_hsync_width;
+	pinfo->h_front_porch = test_info->test_h_total -
+		(test_info->test_h_start + test_info->test_h_width);
+
+	pinfo->v_active = test_info->test_v_height;
+	pinfo->v_sync_width = test_info->test_vsync_width;
+	pinfo->v_back_porch = test_info->test_v_start -
+		test_info->test_vsync_width;
+	pinfo->v_front_porch = test_info->test_v_total -
+		(test_info->test_v_start + test_info->test_v_height);
+
+	pinfo->bpp = dp_link_bit_depth_to_bpp(test_info->test_bit_depth);
+	pinfo->h_active_low = test_info->test_hsync_pol;
+	pinfo->v_active_low = test_info->test_vsync_pol;
+
+	pinfo->refresh_rate = test_info->test_rr_n;
+	pinfo->pixel_clk_khz = test_info->test_h_total *
+		test_info->test_v_total * pinfo->refresh_rate;
+
+	if (test_info->test_rr_d == 0)
+		pinfo->pixel_clk_khz /= 1000;
+	else
+		pinfo->pixel_clk_khz /= 1001;
+
+	if (test_info->test_h_width == 640)
+		pinfo->pixel_clk_khz = 25170;
+}
+
+static int dp_panel_get_modes(struct dp_panel *dp_panel,
+	struct drm_connector *connector, struct dp_display_mode *mode)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	if (dp_panel->video_test) {
+		dp_panel_set_test_mode(panel, mode);
+		return 1;
+	} else if (dp_panel->edid_ctrl->edid) {
+		return _sde_edid_update_modes(connector, dp_panel->edid_ctrl);
+	} else { /* fail-safe mode */
+		memcpy(&mode->timing, &fail_safe,
+			sizeof(fail_safe));
+		return 1;
+	}
+}
+
+static void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
+		u8 checksum = sde_get_edid_checksum(dp_panel->edid_ctrl);
+
+		panel->link->send_edid_checksum(panel->link, checksum);
+		panel->link->send_test_response(panel->link);
+	}
+}
+
 static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
 {
 	int rc = 0;
@@ -351,31 +478,32 @@
 	return min_link_rate_khz;
 }
 
-struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
-				struct dp_catalog_panel *catalog)
+struct dp_panel *dp_panel_get(struct dp_panel_in *in)
 {
 	int rc = 0;
 	struct dp_panel_private *panel;
 	struct dp_panel *dp_panel;
 
-	if (!dev || !aux || !catalog) {
+	if (!in->dev || !in->catalog || !in->aux || !in->link) {
 		pr_err("invalid input\n");
 		rc = -EINVAL;
 		goto error;
 	}
 
-	panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+	panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
 	if (!panel) {
 		rc = -ENOMEM;
 		goto error;
 	}
 
-	panel->dev = dev;
-	panel->aux = aux;
-	panel->catalog = catalog;
+	panel->dev = in->dev;
+	panel->aux = in->aux;
+	panel->catalog = in->catalog;
+	panel->link = in->link;
 
 	dp_panel = &panel->dp_panel;
 	panel->aux_cfg_update_done = false;
+	dp_panel->max_bw_code = DP_LINK_BW_8_1;
 
 	dp_panel->sde_edid_register = dp_panel_edid_register;
 	dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
@@ -384,6 +512,8 @@
 	dp_panel->read_sink_caps = dp_panel_read_sink_caps;
 	dp_panel->get_min_req_link_rate = dp_panel_get_min_req_link_rate;
 	dp_panel->get_max_pclk = dp_panel_get_max_pclk;
+	dp_panel->get_modes = dp_panel_get_modes;
+	dp_panel->handle_sink_request = dp_panel_handle_sink_request;
 
 	return dp_panel;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index a94f4b8..01a978a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -16,8 +16,16 @@
 #define _DP_PANEL_H_
 
 #include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_usbpd.h"
 #include "sde_edid_parser.h"
 
+enum dp_lane_count {
+	DP_LANE_COUNT_1	= 1,
+	DP_LANE_COUNT_2	= 2,
+	DP_LANE_COUNT_4	= 4,
+};
+
 #define DP_MAX_DOWNSTREAM_PORTS 0x10
 
 struct dp_panel_info {
@@ -37,6 +45,18 @@
 	u32 bpp;
 };
 
+struct dp_display_mode {
+	struct dp_panel_info timing;
+	u32 capabilities;
+};
+
+struct dp_panel_in {
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_link *link;
+	struct dp_catalog_panel *catalog;
+};
+
 struct dp_panel {
 	/* dpcd raw data */
 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -46,10 +66,14 @@
 	struct sde_edid_ctrl *edid_ctrl;
 	struct drm_connector *connector;
 	struct dp_panel_info pinfo;
+	bool video_test;
 
 	u32 vic;
 	u32 max_pclk_khz;
 
+	/* debug */
+	u32 max_bw_code;
+
 	int (*sde_edid_register)(struct dp_panel *dp_panel);
 	void (*sde_edid_deregister)(struct dp_panel *dp_panel);
 	int (*init_info)(struct dp_panel *dp_panel);
@@ -58,9 +82,38 @@
 		struct drm_connector *connector);
 	u32 (*get_min_req_link_rate)(struct dp_panel *dp_panel);
 	u32 (*get_max_pclk)(struct dp_panel *dp_panel);
+	int (*get_modes)(struct dp_panel *dp_panel,
+		struct drm_connector *connector, struct dp_display_mode *mode);
+	void (*handle_sink_request)(struct dp_panel *dp_panel);
 };
 
-struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
-				struct dp_catalog_panel *catalog);
+/**
+ * is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static inline bool is_link_rate_valid(u32 bw_code)
+{
+	return ((bw_code == DP_LINK_BW_1_62) ||
+		(bw_code == DP_LINK_BW_2_7) ||
+		(bw_code == DP_LINK_BW_5_4) ||
+		(bw_code == DP_LINK_BW_8_1));
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static inline bool is_lane_count_valid(u32 lane_count)
+{
+	return (lane_count == DP_LANE_COUNT_1) ||
+		(lane_count == DP_LANE_COUNT_2) ||
+		(lane_count == DP_LANE_COUNT_4);
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in);
 void dp_panel_put(struct dp_panel *dp_panel);
 #endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 60c8966..eb787fa 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -612,8 +612,12 @@
 
 void dp_power_put(struct dp_power *dp_power)
 {
-	struct dp_power_private *power = container_of(dp_power,
-			struct dp_power_private, dp_power);
+	struct dp_power_private *power = NULL;
+
+	if (!dp_power)
+		return;
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
 
 	devm_kfree(&power->pdev->dev, power);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 27cf147..25d035d 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -79,6 +79,8 @@
 #define MMSS_DP_PSR_CRC_RG			(0x00000554)
 #define MMSS_DP_PSR_CRC_B			(0x00000558)
 
+#define DP_COMPRESSION_MODE_CTRL		(0x00000580)
+
 #define MMSS_DP_AUDIO_CFG			(0x00000600)
 #define MMSS_DP_AUDIO_STATUS			(0x00000604)
 #define MMSS_DP_AUDIO_PKT_CTRL			(0x00000608)
@@ -141,6 +143,17 @@
 #define MMSS_DP_GENERIC1_8			(0x00000748)
 #define MMSS_DP_GENERIC1_9			(0x0000074C)
 
+#define MMSS_DP_VSCEXT_0			(0x000006D0)
+#define MMSS_DP_VSCEXT_1			(0x000006D4)
+#define MMSS_DP_VSCEXT_2			(0x000006D8)
+#define MMSS_DP_VSCEXT_3			(0x000006DC)
+#define MMSS_DP_VSCEXT_4			(0x000006E0)
+#define MMSS_DP_VSCEXT_5			(0x000006E4)
+#define MMSS_DP_VSCEXT_6			(0x000006E8)
+#define MMSS_DP_VSCEXT_7			(0x000006EC)
+#define MMSS_DP_VSCEXT_8			(0x000006F0)
+#define MMSS_DP_VSCEXT_9			(0x000006F4)
+
 #define MMSS_DP_TIMING_ENGINE_EN		(0x00000A10)
 #define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000A88)
 
@@ -166,6 +179,7 @@
 #define DP_PHY_AUX_CFG9                         (0x00000044)
 #define DP_PHY_AUX_INTERRUPT_MASK               (0x00000048)
 #define DP_PHY_AUX_INTERRUPT_CLEAR              (0x0000004C)
+#define DP_PHY_AUX_INTERRUPT_STATUS             (0x000000BC)
 
 #define DP_PHY_SPARE0				(0x00AC)
 
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index c0bcdda..98781abb 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -73,8 +73,6 @@
 	struct dp_usbpd dp_usbpd;
 	enum dp_usbpd_alt_mode alt_mode;
 	u32 dp_usbpd_config;
-
-	bool forced_disconnect;
 };
 
 static const char *dp_usbpd_pin_name(u8 pin)
@@ -347,7 +345,7 @@
 		dp_usbpd_send_event(pd, DP_USBPD_EVT_STATUS);
 		break;
 	case USBPD_SVDM_ATTENTION:
-		if (pd->forced_disconnect)
+		if (pd->dp_usbpd.forced_disconnect)
 			break;
 
 		pd->vdo = *vdos;
@@ -412,7 +410,7 @@
 	pd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
 
 	dp_usbpd->hpd_high = hpd;
-	pd->forced_disconnect = !hpd;
+	dp_usbpd->forced_disconnect = !hpd;
 
 	if (hpd)
 		pd->dp_cb->configure(pd->dev);
@@ -466,7 +464,7 @@
 	if (rc) {
 		pr_err("pd registration failed\n");
 		rc = -ENODEV;
-		kfree(usbpd);
+		devm_kfree(dev, usbpd);
 		goto error;
 	}
 
@@ -487,5 +485,7 @@
 
 	usbpd = container_of(dp_usbpd, struct dp_usbpd_private, dp_usbpd);
 
-	kfree(usbpd);
+	usbpd_unregister_svid(usbpd->pd, &usbpd->svid_handler);
+
+	devm_kfree(usbpd->dev, usbpd);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
index 2682e98..5b392f5 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -63,6 +63,7 @@
 	bool hpd_irq;
 	bool alt_mode_cfg_done;
 	bool debug_en;
+	bool forced_disconnect;
 
 	int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
 };
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
index e9dabb1..5d9d21f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -28,6 +28,7 @@
 	ctrl->ops.video_engine_en        = dsi_ctrl_hw_cmn_video_engine_en;
 	ctrl->ops.video_engine_setup     = dsi_ctrl_hw_cmn_video_engine_setup;
 	ctrl->ops.set_video_timing       = dsi_ctrl_hw_cmn_set_video_timing;
+	ctrl->ops.set_timing_db          = dsi_ctrl_hw_cmn_set_timing_db;
 	ctrl->ops.cmd_engine_setup       = dsi_ctrl_hw_cmn_cmd_engine_setup;
 	ctrl->ops.setup_cmd_stream       = dsi_ctrl_hw_cmn_setup_cmd_stream;
 	ctrl->ops.ctrl_en                = dsi_ctrl_hw_cmn_ctrl_en;
@@ -58,6 +59,9 @@
 	ctrl->ops.phy_reset_config = dsi_ctrl_hw_cmn_phy_reset_config;
 	ctrl->ops.setup_misr = dsi_ctrl_hw_cmn_setup_misr;
 	ctrl->ops.collect_misr = dsi_ctrl_hw_cmn_collect_misr;
+	ctrl->ops.debug_bus = dsi_ctrl_hw_cmn_debug_bus;
+	ctrl->ops.get_cmd_read_data = dsi_ctrl_hw_cmn_get_cmd_read_data;
+	ctrl->ops.clear_rdbk_register = dsi_ctrl_hw_cmn_clear_rdbk_reg;
 
 	switch (version) {
 	case DSI_CTRL_VERSION_1_4:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index e8d594c..186a5b5 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -104,6 +104,7 @@
 
 /* DSI controller common ops */
 u32 dsi_ctrl_hw_cmn_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl);
 void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
 void dsi_ctrl_hw_cmn_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
 					     u32 ints);
@@ -132,7 +133,8 @@
 				       struct dsi_video_engine_cfg *cfg);
 void dsi_ctrl_hw_cmn_set_video_timing(struct dsi_ctrl_hw *ctrl,
 			 struct dsi_mode_info *mode);
-
+void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl,
+				     bool enable);
 void dsi_ctrl_hw_cmn_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_host_common_cfg *common_cfg,
 				     struct dsi_cmd_engine_cfg *cfg);
@@ -168,6 +170,12 @@
 			bool enable);
 void dsi_ctrl_hw_22_phy_reset_config(struct dsi_ctrl_hw *ctrl,
 			bool enable);
+u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+				     u8 *rd_buf,
+				     u32 read_offset,
+				     u32 rx_byte,
+				     u32 pkt_size, u32 *hw_read_cnt);
+void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl);
 
 /* Definitions specific to 1.4 DSI controller hardware */
 int dsi_ctrl_hw_14_wait_for_lane_idle(struct dsi_ctrl_hw *ctrl, u32 lanes);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index dcde566..790ee22 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -38,6 +38,8 @@
 #define DSI_CTRL_TX_TO_MS     200
 
 #define TO_ON_OFF(x) ((x) ? "ON" : "OFF")
+
+#define CEIL(x, y)              (((x) + ((y)-1)) / (y))
 /**
  * enum dsi_ctrl_driver_ops - controller driver ops
  */
@@ -884,12 +886,49 @@
 		buf[3] |= BIT(6);
 
 	buf[3] |= BIT(7);
+
+	/* send embedded BTA for read commands */
+	if ((buf[2] & 0x3f) == MIPI_DSI_DCS_READ)
+		buf[3] |= BIT(5);
+
 	*buffer = buf;
 	*size = len;
 
 	return rc;
 }
 
+static void dsi_ctrl_wait_for_video_done(struct dsi_ctrl *dsi_ctrl)
+{
+	u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0, ret;
+	struct dsi_mode_info *timing;
+
+	if (dsi_ctrl->host_config.panel_mode != DSI_OP_VIDEO_MODE)
+		return;
+
+	dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+				DSI_VIDEO_MODE_FRAME_DONE);
+
+	dsi_ctrl_enable_status_interrupt(dsi_ctrl,
+				DSI_SINT_VIDEO_MODE_FRAME_DONE, NULL);
+	reinit_completion(&dsi_ctrl->irq_info.vid_frame_done);
+	ret = wait_for_completion_timeout(
+			&dsi_ctrl->irq_info.vid_frame_done,
+			msecs_to_jiffies(DSI_CTRL_TX_TO_MS));
+	if (ret <= 0)
+		pr_debug("wait for video done failed\n");
+	dsi_ctrl_disable_status_interrupt(dsi_ctrl,
+				DSI_SINT_VIDEO_MODE_FRAME_DONE);
+
+	timing = &(dsi_ctrl->host_config.video_timing);
+	v_total = timing->v_sync_width + timing->v_back_porch +
+			timing->v_front_porch + timing->v_active;
+	v_blank = timing->v_sync_width + timing->v_back_porch;
+	fps = timing->refresh_rate;
+
+	sleep_ms = CEIL((v_blank * 1000), (v_total * fps)) + 1;
+	udelay(sleep_ms * 1000);
+}
+
 static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
 			  const struct mipi_dsi_msg *msg,
 			  u32 flags)
@@ -971,6 +1010,7 @@
 	}
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
+		dsi_ctrl_wait_for_video_done(dsi_ctrl);
 		dsi_ctrl_enable_status_interrupt(dsi_ctrl,
 					DSI_SINT_CMD_MODE_DMA_DONE, NULL);
 		reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done);
@@ -1026,6 +1066,7 @@
 {
 	int rc = 0;
 	u8 tx[2] = { (u8)(size & 0xFF), (u8)(size >> 8) };
+	u32 flags = DSI_CTRL_CMD_FETCH_MEMORY;
 	struct mipi_dsi_msg msg = {
 		.channel = rx_msg->channel,
 		.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
@@ -1033,24 +1074,77 @@
 		.tx_buf = tx,
 	};
 
-	rc = dsi_message_tx(dsi_ctrl, &msg, 0x0);
+	rc = dsi_message_tx(dsi_ctrl, &msg, flags);
 	if (rc)
 		pr_err("failed to send max return size packet, rc=%d\n", rc);
 
 	return rc;
 }
 
+/* Helper functions to support DCS read operation */
+static int dsi_parse_short_read1_resp(const struct mipi_dsi_msg *msg,
+		unsigned char *buff)
+{
+	u8 *data = msg->rx_buf;
+	int read_len = 1;
+
+	if (!data)
+		return 0;
+
+	/* remove dcs type */
+	if (msg->rx_len >= 1)
+		data[0] = buff[1];
+	else
+		read_len = 0;
+
+	return read_len;
+}
+
+static int dsi_parse_short_read2_resp(const struct mipi_dsi_msg *msg,
+		unsigned char *buff)
+{
+	u8 *data = msg->rx_buf;
+	int read_len = 2;
+
+	if (!data)
+		return 0;
+
+	/* remove dcs type */
+	if (msg->rx_len >= 2) {
+		data[0] = buff[1];
+		data[1] = buff[2];
+	} else {
+		read_len = 0;
+	}
+
+	return read_len;
+}
+
+static int dsi_parse_long_read_resp(const struct mipi_dsi_msg *msg,
+		unsigned char *buff)
+{
+	if (!msg->rx_buf)
+		return 0;
+
+	/* remove dcs type */
+	if (msg->rx_buf && msg->rx_len)
+		memcpy(msg->rx_buf, buff + 4, msg->rx_len);
+
+	return msg->rx_len;
+}
+
 static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl,
 			  const struct mipi_dsi_msg *msg,
 			  u32 flags)
 {
 	int rc = 0;
-	u32 rd_pkt_size;
-	u32 total_read_len;
-	u32 bytes_read = 0, tot_bytes_read = 0;
-	u32 current_read_len;
+	u32 rd_pkt_size, total_read_len, hw_read_cnt;
+	u32 current_read_len = 0, total_bytes_read = 0;
 	bool short_resp = false;
 	bool read_done = false;
+	u32 dlen, diff, rlen = msg->rx_len;
+	unsigned char *buff;
+	char cmd;
 
 	if (msg->rx_len <= 2) {
 		short_resp = true;
@@ -1066,6 +1160,7 @@
 
 		total_read_len = current_read_len + 6;
 	}
+	buff = msg->rx_buf;
 
 	while (!read_done) {
 		rc = dsi_set_max_return_size(dsi_ctrl, msg, rd_pkt_size);
@@ -1075,24 +1170,79 @@
 			goto error;
 		}
 
+		/* clear RDBK_DATA registers before proceeding */
+		dsi_ctrl->hw.ops.clear_rdbk_register(&dsi_ctrl->hw);
+
 		rc = dsi_message_tx(dsi_ctrl, msg, flags);
 		if (rc) {
 			pr_err("Message transmission failed, rc=%d\n", rc);
 			goto error;
 		}
 
+		dlen = dsi_ctrl->hw.ops.get_cmd_read_data(&dsi_ctrl->hw,
+					buff, total_bytes_read,
+					total_read_len, rd_pkt_size,
+					&hw_read_cnt);
+		if (!dlen)
+			goto error;
 
-		tot_bytes_read += bytes_read;
 		if (short_resp)
+			break;
+
+		if (rlen <= current_read_len) {
+			diff = current_read_len - rlen;
 			read_done = true;
-		else if (msg->rx_len <= tot_bytes_read)
-			read_done = true;
+		} else {
+			diff = 0;
+			rlen -= current_read_len;
+		}
+
+		dlen -= 2; /* 2 bytes of CRC */
+		dlen -= diff;
+		buff += dlen;
+		total_bytes_read += dlen;
+		if (!read_done) {
+			current_read_len = 14; /* Not first read */
+			if (rlen < current_read_len)
+				rd_pkt_size += rlen;
+			else
+				rd_pkt_size += current_read_len;
+		}
 	}
+
+	if (hw_read_cnt < 16 && !short_resp)
+		buff = msg->rx_buf + (16 - hw_read_cnt);
+	else
+		buff = msg->rx_buf;
+
+	/* parse the data read from panel */
+	cmd = buff[0];
+	switch (cmd) {
+	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+		pr_err("Rx ACK_ERROR\n");
+		rc = 0;
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+		rc = dsi_parse_short_read1_resp(msg, buff);
+		break;
+	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+		rc = dsi_parse_short_read2_resp(msg, buff);
+		break;
+	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+		rc = dsi_parse_long_read_resp(msg, buff);
+		break;
+	default:
+		pr_warn("Invalid response\n");
+		rc = 0;
+	}
+
 error:
 	return rc;
 }
 
-
 static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
 {
 	int rc = 0;
@@ -1193,7 +1343,9 @@
 
 		msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace);
 
+		mutex_lock(&dsi_ctrl->drm_dev->struct_mutex);
 		msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
+		mutex_unlock(&dsi_ctrl->drm_dev->struct_mutex);
 		dsi_ctrl->tx_cmd_buf = NULL;
 	}
 
@@ -1429,6 +1581,26 @@
 	},
 };
 
+#if defined(CONFIG_DEBUG_FS)
+
+void dsi_ctrl_debug_dump(void)
+{
+	struct list_head *pos, *tmp;
+	struct dsi_ctrl *ctrl = NULL;
+
+	mutex_lock(&dsi_ctrl_list_lock);
+	list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+		struct dsi_ctrl_list_item *n;
+
+		n = list_entry(pos, struct dsi_ctrl_list_item, list);
+		ctrl = n->ctrl;
+		pr_err("dsi ctrl:%d\n", ctrl->cell_index);
+		ctrl->hw.ops.debug_bus(&ctrl->hw);
+	}
+	mutex_unlock(&dsi_ctrl_list_lock);
+}
+
+#endif
 /**
  * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
  * @of_node:    of_node of the DSI controller.
@@ -1645,7 +1817,7 @@
 
 	host_mode = &dsi_ctrl->host_config.video_timing;
 	memcpy(host_mode, timing, sizeof(*host_mode));
-
+	dsi_ctrl->hw.ops.set_timing_db(&dsi_ctrl->hw, true);
 	dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, host_mode);
 
 exit:
@@ -1653,6 +1825,53 @@
 	return rc;
 }
 
+/**
+ * dsi_ctrl_timing_db_update() - update only controller Timing DB
+ * @dsi_ctrl:          DSI controller handle.
+ * @enable:            Enable/disable Timing DB register
+ *
+ *  Update timing db register value during dfps usecases
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_timing_db_update(struct dsi_ctrl *dsi_ctrl,
+		bool enable)
+{
+	int rc = 0;
+
+	if (!dsi_ctrl) {
+		pr_err("Invalid dsi_ctrl\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&dsi_ctrl->ctrl_lock);
+
+	rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ASYNC_TIMING,
+			DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+		       dsi_ctrl->cell_index, rc);
+		goto exit;
+	}
+
+	/*
+	 * Add HW recommended delay for dfps feature.
+	 * When prefetch is enabled, MDSS HW works on 2 vsync
+	 * boundaries i.e. mdp_vsync and panel_vsync.
+	 * In the current implementation we are only waiting
+	 * for mdp_vsync. We need to make sure that interface
+	 * flush is after panel_vsync. So, added the recommended
+	 * delays after dfps update.
+	 */
+	usleep_range(2000, 2010);
+
+	dsi_ctrl->hw.ops.set_timing_db(&dsi_ctrl->hw, enable);
+
+exit:
+	mutex_unlock(&dsi_ctrl->ctrl_lock);
+	return rc;
+}
+
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 {
 	int rc = 0;
@@ -2146,7 +2365,7 @@
 		goto error;
 	}
 
-	if (!(flags & DSI_MODE_FLAG_SEAMLESS)) {
+	if (!(flags & (DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR))) {
 		rc = dsi_ctrl_update_link_freqs(ctrl, config, clk_handle);
 		if (rc) {
 			pr_err("[%s] failed to update link frequencies, rc=%d\n",
@@ -2231,8 +2450,8 @@
 
 	if (flags & DSI_CTRL_CMD_READ) {
 		rc = dsi_message_rx(dsi_ctrl, msg, flags);
-		if (rc)
-			pr_err("read message failed, rc=%d\n", rc);
+		if (rc <= 0)
+			pr_err("read message failed read length, rc=%d\n", rc);
 	} else {
 		rc = dsi_message_tx(dsi_ctrl, msg, flags);
 		if (rc)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index f10c7a6..4781299 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -311,6 +311,18 @@
 				int flags, void *clk_handle);
 
 /**
+ * dsi_ctrl_timing_db_update() - update only controller Timing DB
+ * @dsi_ctrl:          DSI controller handle.
+ * @enable:            Enable/disable Timing DB register
+ *
+ * Update timing db register value during dfps usecases
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_timing_db_update(struct dsi_ctrl *dsi_ctrl,
+		bool enable);
+
+/**
  * dsi_ctrl_async_timing_update() - update only controller timing
  * @dsi_ctrl:          DSI controller handle.
  * @timing:            New DSI timing info
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 57f9bcd..714a450 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -445,6 +445,12 @@
 	void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
 
 	/**
+	 * debug_bus() - get dsi debug bus status.
+	 * @ctrl:        Pointer to the controller host hardware.
+	 */
+	void (*debug_bus)(struct dsi_ctrl_hw *ctrl);
+
+	/**
 	 * soft_reset() - perform a soft reset on DSI controller
 	 * @ctrl:          Pointer to the controller host hardware.
 	 *
@@ -514,11 +520,17 @@
 	 * get_cmd_read_data() - get data read from the peripheral
 	 * @ctrl:           Pointer to the controller host hardware.
 	 * @rd_buf:         Buffer where data will be read into.
-	 * @total_read_len: Number of bytes to read.
+	 * @read_offset:    Offset from where to read.
+	 * @rx_byte:        Number of bytes to be read.
+	 * @pkt_size:        Size of response expected.
+	 * @hw_read_cnt:    Actual number of bytes read by HW.
 	 */
 	u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
 				 u8 *rd_buf,
-				 u32 total_read_len);
+				 u32 read_offset,
+				 u32 rx_byte,
+				 u32 pkt_size,
+				 u32 *hw_read_cnt);
 
 	/**
 	 * wait_for_lane_idle() - wait for DSI lanes to go to idle state
@@ -695,6 +707,20 @@
 	u32 (*collect_misr)(struct dsi_ctrl_hw *ctrl,
 			    enum dsi_op_mode panel_mode);
 
+	/**
+	 * set_timing_db() - enable/disable Timing DB register
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @enable:        Enable/Disable flag.
+	 *
+	 * Enable or Disabe the Timing DB register.
+	 */
+	void (*set_timing_db)(struct dsi_ctrl_hw *ctrl,
+				 bool enable);
+	/**
+	 * clear_rdbk_register() - Clear and reset read back register
+	 * @ctrl:         Pointer to the controller host hardware.
+	 */
+	void (*clear_rdbk_register)(struct dsi_ctrl_hw *ctrl);
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index cd21413..2959e94 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -16,11 +16,13 @@
 #include <linux/delay.h>
 #include <linux/iopoll.h>
 
+#include "dsi_catalog.h"
 #include "dsi_ctrl_hw.h"
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
 #include "dsi_panel.h"
 #include "dsi_catalog.h"
+#include "sde_dbg.h"
 
 #define MMSS_MISC_CLAMP_REG_OFF           0x0014
 #define DSI_CTRL_DYNAMIC_FORCE_ON         (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
@@ -218,6 +220,27 @@
 }
 
 /**
+* set_timing_db() - enable/disable Timing DB register
+* @ctrl:          Pointer to controller host hardware.
+* @enable:        Enable/Disable flag.
+*
+* Enable or Disabe the Timing DB register.
+*/
+void dsi_ctrl_hw_cmn_set_timing_db(struct dsi_ctrl_hw *ctrl,
+				     bool enable)
+{
+	if (enable)
+		DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+	else
+		DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x0);
+
+	wmb(); /* make sure timing db registers are set */
+	pr_debug("[DSI_%d] ctrl timing DB set:%d\n", ctrl->index,
+				enable);
+	SDE_EVT32(ctrl->index, enable);
+}
+
+/**
  * set_video_timing() - set up the timing for video frame
  * @ctrl:          Pointer to controller host hardware.
  * @mode:          Video mode information.
@@ -290,6 +313,7 @@
 	DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
 	DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
 	pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+	SDE_EVT32(v_total, h_total);
 }
 
 /**
@@ -424,6 +448,18 @@
 	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
 }
 
+void dsi_ctrl_hw_cmn_debug_bus(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+
+	DSI_W32(ctrl, DSI_DEBUG_BUS_CTL, 0x181);
+
+	/* make sure that debug test point is enabled */
+	wmb();
+	reg = DSI_R32(ctrl, DSI_DEBUG_BUS_STATUS);
+
+	pr_err("[DSI_%d] debug bus status:0x%x\n", ctrl->index, reg);
+}
 /**
  * cmd_engine_setup() - setup dsi host controller for command mode
  * @ctrl:          Pointer to the controller host hardware.
@@ -674,6 +710,22 @@
 }
 
 /**
+ * clear_rdbk_reg() - clear previously read panel data.
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * This function is called before sending DSI Rx command to
+ * panel in order to clear if any stale data remaining from
+ * previous read operation.
+ */
+void dsi_ctrl_hw_cmn_clear_rdbk_reg(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_RDBK_DATA_CTRL, 0x1);
+	wmb(); /* ensure read back register is reset */
+	DSI_W32(ctrl, DSI_RDBK_DATA_CTRL, 0x0);
+	wmb(); /* ensure read back register is cleared */
+}
+
+/**
  * get_cmd_read_data() - get data read from the peripheral
  * @ctrl:           Pointer to the controller host hardware.
  * @rd_buf:         Buffer where data will be read into.
@@ -684,16 +736,16 @@
 u32 dsi_ctrl_hw_cmn_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
 				     u8 *rd_buf,
 				     u32 read_offset,
-				     u32 total_read_len)
+				     u32 rx_byte,
+				     u32 pkt_size,
+				     u32 *hw_read_cnt)
 {
 	u32 *lp, *temp, data;
-	int i, j = 0, cnt;
+	int i, j = 0, cnt, off;
 	u32 read_cnt;
-	u32 rx_byte = 0;
 	u32 repeated_bytes = 0;
 	u8 reg[16] = {0};
-	u32 pkt_size = 0;
-	int buf_offset = read_offset;
+	bool ack_err = false;
 
 	lp = (u32 *)rd_buf;
 	temp = (u32 *)reg;
@@ -702,28 +754,49 @@
 	if (cnt > 4)
 		cnt = 4;
 
-	if (rx_byte == 4)
-		read_cnt = 4;
-	else
-		read_cnt = pkt_size + 6;
+	read_cnt = (DSI_R32(ctrl, DSI_RDBK_DATA_CTRL) >> 16);
+	ack_err = (rx_byte == 4) ? (read_cnt == 8) :
+			((read_cnt - 4) == (pkt_size + 6));
+
+	if (ack_err)
+		read_cnt -= 4;
+	if (!read_cnt) {
+		pr_err("Panel detected error, no data read\n");
+		return 0;
+	}
 
 	if (read_cnt > 16) {
-		int bytes_shifted;
+		int bytes_shifted, data_lost = 0, rem_header = 0;
 
-		bytes_shifted = read_cnt - 16;
-		repeated_bytes = buf_offset - bytes_shifted;
+		bytes_shifted = read_cnt - rx_byte;
+		if (bytes_shifted >= 4)
+			data_lost = bytes_shifted - 4; /* remove DCS header */
+		else
+			rem_header = 4 - bytes_shifted; /* remaining header */
+
+		repeated_bytes = (read_offset - 4) - data_lost + rem_header;
 	}
 
-	for (i = cnt - 1; i >= 0; i--) {
-		data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
-		*temp++ = ntohl(data);
+	off = DSI_RDBK_DATA0;
+	off += ((cnt - 1) * 4);
+
+	for (i = 0; i < cnt; i++) {
+		data = DSI_R32(ctrl, off);
+		if (!repeated_bytes)
+			*lp++ = ntohl(data);
+		else
+			*temp++ = ntohl(data);
+		off -= 4;
 	}
 
-	for (i = repeated_bytes; i < 16; i++)
-		rd_buf[j++] = reg[i];
+	if (repeated_bytes) {
+		for (i = repeated_bytes; i < 16; i++)
+			rd_buf[j++] = reg[i];
+	}
 
-	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
-	return j;
+	*hw_read_cnt = read_cnt;
+	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, rx_byte);
+	return rx_byte;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
index f8f7e13..39ac021 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg.h
@@ -82,6 +82,8 @@
 #define DSI_SOFT_RESET                             (0x0118)
 #define DSI_CLK_CTRL                               (0x011C)
 #define DSI_CLK_STATUS                             (0x0120)
+#define DSI_DEBUG_BUS_CTL                          (0x0124)
+#define DSI_DEBUG_BUS_STATUS                       (0x0128)
 #define DSI_PHY_SW_RESET                           (0x012C)
 #define DSI_AXI2AHB_CTRL                           (0x0130)
 #define DSI_MISR_CMD_MDP0_32BIT                    (0x0134)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index fcc59ef..96ed47e 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -77,12 +77,15 @@
  * @DSI_MODE_FLAG_DFPS:		Seamless transition is DynamicFPS
  * @DSI_MODE_FLAG_VBLANK_PRE_MODESET:	Transition needs VBLANK before Modeset
  * @DSI_MODE_FLAG_DMS: Seamless transition is dynamic mode switch
+ * @DSI_MODE_FLAG_VRR: Seamless transition is DynamicFPS.
+ *                     New timing values are sent from DAL.
  */
 enum dsi_mode_flags {
 	DSI_MODE_FLAG_SEAMLESS			= BIT(0),
 	DSI_MODE_FLAG_DFPS			= BIT(1),
 	DSI_MODE_FLAG_VBLANK_PRE_MODESET	= BIT(2),
 	DSI_MODE_FLAG_DMS			= BIT(3),
+	DSI_MODE_FLAG_VRR			= BIT(4),
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index aa81d55..d71a5f21 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -28,6 +28,7 @@
 #include "dsi_drm.h"
 #include "dsi_clk.h"
 #include "dsi_pwr.h"
+#include "sde_dbg.h"
 
 #define to_dsi_display(x) container_of(x, struct dsi_display, host)
 #define INT_BASE_10 10
@@ -84,6 +85,10 @@
 		return -EINVAL;
 
 	panel = dsi_display->panel;
+
+	if (!dsi_panel_initialized(panel))
+		return -EINVAL;
+
 	panel->bl_config.bl_level = bl_lvl;
 
 	/* scale backlight */
@@ -120,6 +125,282 @@
 	return rc;
 }
 
+static int dsi_display_cmd_engine_enable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	if (display->cmd_engine_refcount > 0) {
+		display->cmd_engine_refcount++;
+		return 0;
+	}
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_ON);
+		if (rc) {
+			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+			       display->name, rc);
+			goto error_disable_master;
+		}
+	}
+
+	display->cmd_engine_refcount++;
+	return rc;
+error_disable_master:
+	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+	return rc;
+}
+
+static int dsi_display_cmd_engine_disable(struct dsi_display *display)
+{
+	int rc = 0;
+	int i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	if (display->cmd_engine_refcount == 0) {
+		pr_err("[%s] Invalid refcount\n", display->name);
+		return 0;
+	} else if (display->cmd_engine_refcount > 1) {
+		display->cmd_engine_refcount--;
+		return 0;
+	}
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (!ctrl->ctrl || (ctrl == m_ctrl))
+			continue;
+
+		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+						   DSI_CTRL_ENGINE_OFF);
+		if (rc)
+			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+			       display->name, rc);
+	}
+
+	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+	if (rc) {
+		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+		       display->name, rc);
+		goto error;
+	}
+
+error:
+	display->cmd_engine_refcount = 0;
+	return rc;
+}
+
+static bool dsi_display_validate_reg_read(struct dsi_panel *panel)
+{
+	int i, j = 0;
+	int len = 0, *lenp;
+	int group = 0, count = 0;
+	struct dsi_display_mode *mode;
+	struct drm_panel_esd_config *config;
+
+	if (!panel)
+		return false;
+
+	config = &(panel->esd_config);
+
+	lenp = config->status_valid_params ?: config->status_cmds_rlen;
+	mode = panel->cur_mode;
+	count = mode->priv_info->cmd_sets[DSI_CMD_SET_PANEL_STATUS].count;
+
+	for (i = 0; i < count; i++)
+		len += lenp[i];
+
+	for (i = 0; i < len; i++)
+		j += len;
+
+	for (j = 0; j < config->groups; ++j) {
+		for (i = 0; i < len; ++i) {
+			if (config->return_buf[i] !=
+				config->status_value[group + i])
+				break;
+		}
+
+		if (i == len)
+			return true;
+		group += len;
+	}
+
+	return false;
+}
+
+static int dsi_display_read_status(struct dsi_display_ctrl *ctrl,
+		struct dsi_panel *panel)
+{
+	int i, rc = 0, count = 0, start = 0, *lenp;
+	struct drm_panel_esd_config *config;
+	struct dsi_cmd_desc *cmds;
+	u32 flags = 0;
+
+	if (!panel)
+		return -EINVAL;
+
+	config = &(panel->esd_config);
+	lenp = config->status_valid_params ?: config->status_cmds_rlen;
+	count = config->status_cmd.count;
+	cmds = config->status_cmd.cmds;
+	flags = (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ);
+
+	for (i = 0; i < count; ++i) {
+		memset(config->status_buf, 0x0, SZ_4K);
+		cmds[i].msg.rx_buf = config->status_buf;
+		cmds[i].msg.rx_len = config->status_cmds_rlen[i];
+		rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i].msg, flags);
+		if (rc <= 0) {
+			pr_err("rx cmd transfer failed rc=%d\n", rc);
+			goto error;
+		}
+
+		memcpy(config->return_buf + start,
+			config->status_buf, lenp[i]);
+		start += lenp[i];
+	}
+
+error:
+	return rc;
+}
+
+static int dsi_display_validate_status(struct dsi_display_ctrl *ctrl,
+		struct dsi_panel *panel)
+{
+	int rc = 0;
+
+	rc = dsi_display_read_status(ctrl, panel);
+	if (rc <= 0) {
+		goto exit;
+	} else {
+		/*
+		 * panel status read successfully.
+		 * check for validity of the data read back.
+		 */
+		rc = dsi_display_validate_reg_read(panel);
+		if (!rc) {
+			rc = -EINVAL;
+			goto exit;
+		}
+	}
+
+exit:
+	return rc;
+}
+
+static int dsi_display_status_reg_read(struct dsi_display *display)
+{
+	int rc = 0, i;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+	pr_debug(" ++\n");
+
+	m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+	rc = dsi_display_cmd_engine_enable(display);
+	if (rc) {
+		pr_err("cmd engine enable failed\n");
+		return -EPERM;
+	}
+
+	rc = dsi_display_validate_status(m_ctrl, display->panel);
+	if (rc <= 0) {
+		pr_err("[%s] read status failed on master,rc=%d\n",
+		       display->name, rc);
+		goto exit;
+	}
+
+	if (!display->panel->sync_broadcast_en)
+		goto exit;
+
+	for (i = 0; i < display->ctrl_count; i++) {
+		ctrl = &display->ctrl[i];
+		if (ctrl == m_ctrl)
+			continue;
+
+		rc = dsi_display_validate_status(ctrl, display->panel);
+		if (rc <= 0) {
+			pr_err("[%s] read status failed on master,rc=%d\n",
+			       display->name, rc);
+			goto exit;
+		}
+	}
+
+exit:
+	dsi_display_cmd_engine_disable(display);
+	return rc;
+}
+
+static int dsi_display_status_bta_request(struct dsi_display *display)
+{
+	int rc = 0;
+
+	pr_debug(" ++\n");
+	/* TODO: trigger SW BTA and wait for acknowledgment */
+
+	return rc;
+}
+
+static int dsi_display_status_check_te(struct dsi_display *display)
+{
+	int rc = 0;
+
+	pr_debug(" ++\n");
+	/* TODO: wait for TE interrupt from panel */
+
+	return rc;
+}
+
+int dsi_display_check_status(void *display)
+{
+	struct dsi_display *dsi_display = display;
+	struct dsi_panel *panel;
+	u32 status_mode;
+	int rc = 0;
+
+	if (dsi_display == NULL)
+		return -EINVAL;
+
+	panel = dsi_display->panel;
+
+	status_mode = panel->esd_config.status_mode;
+
+	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
+		DSI_ALL_CLKS, DSI_CLK_ON);
+
+	if (status_mode == ESD_MODE_REG_READ) {
+		rc = dsi_display_status_reg_read(dsi_display);
+	} else if (status_mode == ESD_MODE_SW_BTA) {
+		rc = dsi_display_status_bta_request(dsi_display);
+	} else if (status_mode == ESD_MODE_PANEL_TE) {
+		rc = dsi_display_status_check_te(dsi_display);
+	} else {
+		pr_warn("unsupported check status mode\n");
+		panel->esd_config.esd_enabled = false;
+	}
+
+	dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
+		DSI_ALL_CLKS, DSI_CLK_OFF);
+
+	return rc;
+}
+
 int dsi_display_soft_reset(void *display)
 {
 	struct dsi_display *dsi_display;
@@ -1206,87 +1487,6 @@
 	return rc;
 }
 
-static int dsi_display_cmd_engine_enable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	if (display->cmd_engine_refcount > 0) {
-		display->cmd_engine_refcount++;
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-
-	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-	for (i = 0; i < display->ctrl_count; i++) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_ON);
-		if (rc) {
-			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-			       display->name, rc);
-			goto error_disable_master;
-		}
-	}
-
-	display->cmd_engine_refcount++;
-	return rc;
-error_disable_master:
-	(void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-error:
-	return rc;
-}
-
-static int dsi_display_cmd_engine_disable(struct dsi_display *display)
-{
-	int rc = 0;
-	int i;
-	struct dsi_display_ctrl *m_ctrl, *ctrl;
-
-	if (display->cmd_engine_refcount == 0) {
-		pr_err("[%s] Invalid refcount\n", display->name);
-		return 0;
-	} else if (display->cmd_engine_refcount > 1) {
-		display->cmd_engine_refcount--;
-		return 0;
-	}
-
-	m_ctrl = &display->ctrl[display->cmd_master_idx];
-	for (i = 0; i < display->ctrl_count; i++) {
-		ctrl = &display->ctrl[i];
-		if (!ctrl->ctrl || (ctrl == m_ctrl))
-			continue;
-
-		rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
-						   DSI_CTRL_ENGINE_OFF);
-		if (rc)
-			pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-			       display->name, rc);
-	}
-
-	rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
-	if (rc) {
-		pr_err("[%s] failed to enable cmd engine, rc=%d\n",
-		       display->name, rc);
-		goto error;
-	}
-
-error:
-	display->cmd_engine_refcount = 0;
-	return rc;
-}
-
 static int dsi_display_ctrl_host_enable(struct dsi_display *display)
 {
 	int rc = 0;
@@ -1708,7 +1908,9 @@
 put_iova:
 	msm_gem_put_iova(display->tx_cmd_buf, aspace);
 free_gem:
+	mutex_lock(&display->drm_dev->struct_mutex);
 	msm_gem_free_object(display->tx_cmd_buf);
+	mutex_unlock(&display->drm_dev->struct_mutex);
 error:
 	return rc;
 }
@@ -2551,7 +2753,7 @@
 }
 
 static int dsi_display_dfps_calc_front_porch(
-		u64 clk_hz,
+		u32 old_fps,
 		u32 new_fps,
 		u32 a_total,
 		u32 b_total,
@@ -2559,6 +2761,7 @@
 		u32 *b_fp_out)
 {
 	s32 b_fp_new;
+	int add_porches, diff;
 
 	if (!b_fp_out) {
 		pr_err("Invalid params");
@@ -2570,15 +2773,22 @@
 		return -EINVAL;
 	}
 
-	/**
+	/*
 	 * Keep clock, other porches constant, use new fps, calc front porch
-	 * clk = (hor * ver * fps)
-	 * hfront = clk / (vtotal * fps)) - hactive - hback - hsync
+	 * new_vtotal = old_vtotal * (old_fps / new_fps )
+	 * new_vfp - old_vfp = new_vtotal - old_vtotal
+	 * new_vfp = old_vfp + old_vtotal * ((old_fps - new_fps)/ new_fps)
 	 */
-	b_fp_new = (clk_hz / (a_total * new_fps)) - (b_total - b_fp);
+	diff = abs(old_fps - new_fps);
+	add_porches = mult_frac(b_total, diff, new_fps);
 
-	pr_debug("clk %llu fps %u a %u b %u b_fp %u new_fp %d\n",
-			clk_hz, new_fps, a_total, b_total, b_fp, b_fp_new);
+	if (old_fps > new_fps)
+		b_fp_new = b_fp + add_porches;
+	else
+		b_fp_new = b_fp - add_porches;
+
+	pr_debug("fps %u a %u b %u b_fp %u new_fp %d\n",
+			new_fps, a_total, b_total, b_fp, b_fp_new);
 
 	if (b_fp_new < 0) {
 		pr_err("Invalid new_hfp calcluated%d\n", b_fp_new);
@@ -2594,14 +2804,25 @@
 	return 0;
 }
 
+/**
+ * dsi_display_get_dfps_timing() - Get the new dfps values.
+ * @display:         DSI display handle.
+ * @adj_mode:        Mode value structure to be changed.
+ *                   It contains old timing values and latest fps value.
+ *                   New timing values are updated based on new fps.
+ * @curr_refresh_rate:  Current fps rate.
+ *                      If zero , current fps rate is taken from
+ *                      display->panel->cur_mode.
+ * Return: error code.
+ */
 static int dsi_display_get_dfps_timing(struct dsi_display *display,
-				       struct dsi_display_mode *adj_mode)
+			struct dsi_display_mode *adj_mode,
+				u32 curr_refresh_rate)
 {
 	struct dsi_dfps_capabilities dfps_caps;
 	struct dsi_display_mode per_ctrl_mode;
 	struct dsi_mode_info *timing;
 	struct dsi_ctrl *m_ctrl;
-	u64 clk_hz;
 
 	int rc = 0;
 
@@ -2620,20 +2841,27 @@
 	per_ctrl_mode = *adj_mode;
 	adjust_timing_by_ctrl_count(display, &per_ctrl_mode);
 
-	if (!dsi_display_is_seamless_dfps_possible(display,
-			&per_ctrl_mode, dfps_caps.type)) {
-		pr_err("seamless dynamic fps not supported for mode\n");
-		return -EINVAL;
+	if (!curr_refresh_rate) {
+		if (!dsi_display_is_seamless_dfps_possible(display,
+				&per_ctrl_mode, dfps_caps.type)) {
+			pr_err("seamless dynamic fps not supported for mode\n");
+			return -EINVAL;
+		}
+		if (display->panel->cur_mode) {
+			curr_refresh_rate =
+				display->panel->cur_mode->timing.refresh_rate;
+		} else {
+			pr_err("cur_mode is not initialized\n");
+			return -EINVAL;
+		}
 	}
-
 	/* TODO: Remove this direct reference to the dsi_ctrl */
-	clk_hz = m_ctrl->clk_freq.pix_clk_rate;
 	timing = &per_ctrl_mode.timing;
 
 	switch (dfps_caps.type) {
 	case DSI_DFPS_IMMEDIATE_VFP:
 		rc = dsi_display_dfps_calc_front_porch(
-				clk_hz,
+				curr_refresh_rate,
 				timing->refresh_rate,
 				DSI_H_TOTAL(timing),
 				DSI_V_TOTAL(timing),
@@ -2643,7 +2871,7 @@
 
 	case DSI_DFPS_IMMEDIATE_HFP:
 		rc = dsi_display_dfps_calc_front_porch(
-				clk_hz,
+				curr_refresh_rate,
 				timing->refresh_rate,
 				DSI_V_TOTAL(timing),
 				DSI_H_TOTAL(timing),
@@ -2672,7 +2900,7 @@
 	}
 
 	/* Currently the only seamless transition is dynamic fps */
-	rc = dsi_display_get_dfps_timing(display, adj_mode);
+	rc = dsi_display_get_dfps_timing(display, adj_mode, 0);
 	if (rc) {
 		pr_debug("Dynamic FPS not supported for seamless\n");
 	} else {
@@ -2712,7 +2940,8 @@
 	memcpy(&display->config.lane_map, &display->lane_map,
 	       sizeof(display->lane_map));
 
-	if (mode->dsi_mode_flags & DSI_MODE_FLAG_DFPS) {
+	if (mode->dsi_mode_flags &
+			(DSI_MODE_FLAG_DFPS | DSI_MODE_FLAG_VRR)) {
 		rc = dsi_display_dfps_update(display, mode);
 		if (rc) {
 			pr_err("[%s]DSI dfps update failed, rc=%d\n",
@@ -3384,6 +3613,9 @@
 		break;
 	}
 
+	if (display->panel->esd_config.esd_enabled)
+		info->capabilities |= MSM_DISPLAY_ESD_ENABLED;
+
 	memcpy(&info->roi_caps, &display->panel->roi_caps,
 			sizeof(info->roi_caps));
 
@@ -3490,6 +3722,7 @@
 
 		for (i = 0; i < num_dfps_rates; i++) {
 			struct dsi_display_mode *sub_mode = &modes[array_idx];
+			u32 curr_refresh_rate;
 
 			if (!sub_mode) {
 				pr_err("invalid mode data\n");
@@ -3499,9 +3732,15 @@
 			memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
 
 			if (dfps_caps.dfps_support) {
+				curr_refresh_rate =
+					sub_mode->timing.refresh_rate;
 				sub_mode->timing.refresh_rate =
 					dfps_caps.min_refresh_rate +
 					(i % num_dfps_rates);
+
+				dsi_display_get_dfps_timing(display,
+					sub_mode, curr_refresh_rate);
+
 				sub_mode->pixel_clk_khz =
 					(DSI_H_TOTAL(&sub_mode->timing) *
 					DSI_V_TOTAL(&sub_mode->timing) *
@@ -3516,6 +3755,102 @@
 	return rc;
 }
 
+/**
+ * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
+ * @display:     DSI display handle.
+ * @cur_dsi_mode:   Current DSI mode.
+ * @mode:        Mode value structure to be validated.
+ *               MSM_MODE_FLAG_SEAMLESS_VRR flag is set if there
+ *               is change in fps but vactive and hactive are same.
+ * Return: error code.
+ */
+int dsi_display_validate_mode_vrr(struct dsi_display *display,
+			struct dsi_display_mode *cur_dsi_mode,
+			struct dsi_display_mode *mode)
+{
+	int rc = 0;
+	struct dsi_display_mode adj_mode, cur_mode;
+	struct dsi_dfps_capabilities dfps_caps;
+	u32 curr_refresh_rate;
+
+	if (!display || !mode) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!display->panel || !display->panel->cur_mode) {
+		pr_debug("Current panel mode not set\n");
+		return rc;
+	}
+
+	mutex_lock(&display->display_lock);
+
+	adj_mode = *mode;
+	cur_mode = *cur_dsi_mode;
+
+	if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
+		(cur_mode.timing.v_active == adj_mode.timing.v_active) &&
+		(cur_mode.timing.h_active == adj_mode.timing.h_active)) {
+
+		curr_refresh_rate = cur_mode.timing.refresh_rate;
+		rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+		if (rc) {
+			pr_err("[%s] failed to get dfps caps from panel\n",
+					display->name);
+			goto error;
+		}
+
+		cur_mode.timing.refresh_rate =
+			adj_mode.timing.refresh_rate;
+
+		rc = dsi_display_get_dfps_timing(display,
+			&cur_mode, curr_refresh_rate);
+		if (rc) {
+			pr_err("[%s] seamless vrr not possible rc=%d\n",
+			display->name, rc);
+			goto error;
+		}
+		switch (dfps_caps.type) {
+		/*
+		 * Ignore any round off factors in porch calculation.
+		 * Worse case is set to 5.
+		 */
+		case DSI_DFPS_IMMEDIATE_VFP:
+			if (abs(DSI_V_TOTAL(&cur_mode.timing) -
+				DSI_V_TOTAL(&adj_mode.timing)) > 5)
+				pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
+				adj_mode.timing.refresh_rate,
+				cur_mode.timing.v_front_porch,
+				adj_mode.timing.v_front_porch);
+			break;
+
+		case DSI_DFPS_IMMEDIATE_HFP:
+			if (abs(DSI_H_TOTAL(&cur_mode.timing) -
+				DSI_H_TOTAL(&adj_mode.timing)) > 5)
+				pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
+				adj_mode.timing.refresh_rate,
+				cur_mode.timing.h_front_porch,
+				adj_mode.timing.h_front_porch);
+			break;
+
+		default:
+			pr_err("Unsupported DFPS mode %d\n",
+				dfps_caps.type);
+			rc = -ENOTSUPP;
+		}
+
+		pr_debug("Mode switch is seamless variable refresh\n");
+		mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
+		SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
+				cur_mode.timing.h_front_porch,
+				adj_mode.timing.h_front_porch);
+	}
+
+error:
+	mutex_unlock(&display->display_lock);
+	return rc;
+}
+
 int dsi_display_validate_mode(struct dsi_display *display,
 			      struct dsi_display_mode *mode,
 			      u32 flags)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 1c30b9c..da4f5eb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -324,6 +324,17 @@
 			      u32 flags);
 
 /**
+ * dsi_display_validate_mode_vrr() - validates mode if variable refresh case
+ * @display:             Handle to display.
+ * @mode:                Mode to be validated..
+ *
+ * Return: 0 if  error code.
+ */
+int dsi_display_validate_mode_vrr(struct dsi_display *display,
+			struct dsi_display_mode *cur_dsi_mode,
+			struct dsi_display_mode *mode);
+
+/**
  * dsi_display_set_mode() - Set mode on the display.
  * @display:           Handle to display.
  * @mode:              mode to be set.
@@ -478,6 +489,12 @@
 int dsi_display_set_backlight(void *display, u32 bl_lvl);
 
 /**
+ * dsi_display_check_status() - check if panel is dead or alive
+ * @display:            Handle to display.
+ */
+int dsi_display_check_status(void *display);
+
+/**
  * dsi_display_soft_reset() - perform a soft reset on DSI controller
  * @display:         Handle to display
  *
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 30e5f02..af721eb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -61,6 +61,8 @@
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
 	if (msm_is_mode_seamless_dms(drm_mode))
 		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_DMS;
+	if (msm_is_mode_seamless_vrr(drm_mode))
+		dsi_mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
 }
 
 static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
@@ -96,6 +98,8 @@
 		drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
 	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_DMS)
 		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DMS;
+	if (dsi_mode->dsi_mode_flags & DSI_MODE_FLAG_VRR)
+		drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_VRR;
 
 	drm_mode_set_name(drm_mode);
 }
@@ -134,7 +138,8 @@
 		return;
 	}
 
-	if (c_bridge->dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS) {
+	if (c_bridge->dsi_mode.dsi_mode_flags &
+		(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
 		pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
 		return;
 	}
@@ -169,7 +174,8 @@
 		return;
 	}
 
-	if (c_bridge->dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS) {
+	if (c_bridge->dsi_mode.dsi_mode_flags &
+			(DSI_MODE_FLAG_SEAMLESS | DSI_MODE_FLAG_VRR)) {
 		pr_debug("[%d] seamless enable\n", c_bridge->id);
 		return;
 	}
@@ -249,7 +255,7 @@
 {
 	int rc = 0;
 	struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
-	struct dsi_display_mode dsi_mode;
+	struct dsi_display_mode dsi_mode, cur_dsi_mode;
 	struct drm_display_mode cur_mode;
 
 	if (!bridge || !mode || !adjusted_mode) {
@@ -267,9 +273,20 @@
 	}
 
 	if (bridge->encoder && bridge->encoder->crtc) {
+
+		convert_to_dsi_mode(&bridge->encoder->crtc->mode,
+							&cur_dsi_mode);
+		rc = dsi_display_validate_mode_vrr(c_bridge->display,
+					&cur_dsi_mode, &dsi_mode);
+		if (rc)
+			pr_debug("[%s] vrr mode mismatch failure rc=%d\n",
+				c_bridge->display->name, rc);
+
 		cur_mode = bridge->encoder->crtc->mode;
 
-		if (!drm_mode_equal(&cur_mode, adjusted_mode))
+		if (!drm_mode_equal(&cur_mode, adjusted_mode) &&
+			(!(dsi_mode.dsi_mode_flags &
+				DSI_MODE_FLAG_VRR)))
 			dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
 	}
 
@@ -383,6 +400,13 @@
 	sde_kms_info_add_keystr(info, "dfps support",
 			panel->dfps_caps.dfps_support ? "true" : "false");
 
+	if (panel->dfps_caps.dfps_support) {
+		sde_kms_info_add_keyint(info, "min_fps",
+			panel->dfps_caps.min_refresh_rate);
+		sde_kms_info_add_keyint(info, "max_fps",
+			panel->dfps_caps.max_refresh_rate);
+	}
+
 	switch (panel->phy_props.rotation) {
 	case DSI_PANEL_ROTATE_NONE:
 		sde_kms_info_add_keystr(info, "panel orientation", "none");
@@ -598,6 +622,52 @@
 	dsi_display_enable_event(display, event_idx, &event_info, enable);
 }
 
+int dsi_conn_post_kickoff(struct drm_connector *connector)
+{
+	struct drm_encoder *encoder;
+	struct dsi_bridge *c_bridge;
+	struct dsi_display_mode adj_mode;
+	struct dsi_display *display;
+	struct dsi_display_ctrl *m_ctrl, *ctrl;
+	int i, rc = 0;
+
+	if (!connector || !connector->state->best_encoder)
+		return -EINVAL;
+
+	encoder = connector->state->best_encoder;
+	c_bridge = to_dsi_bridge(encoder->bridge);
+	adj_mode = c_bridge->dsi_mode;
+	display = c_bridge->display;
+
+	if (adj_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR) {
+		m_ctrl = &display->ctrl[display->clk_master_idx];
+		rc = dsi_ctrl_timing_db_update(m_ctrl->ctrl, false);
+		if (rc) {
+			pr_err("[%s] failed to dfps update  rc=%d\n",
+				display->name, rc);
+			return -EINVAL;
+		}
+
+		/* Update the rest of the controllers */
+		for (i = 0; i < display->ctrl_count; i++) {
+			ctrl = &display->ctrl[i];
+			if (!ctrl->ctrl || (ctrl == m_ctrl))
+				continue;
+
+			rc = dsi_ctrl_timing_db_update(ctrl->ctrl, false);
+			if (rc) {
+				pr_err("[%s] failed to dfps update rc=%d\n",
+					display->name,  rc);
+				return -EINVAL;
+			}
+		}
+
+		c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_VRR;
+	}
+
+	return 0;
+}
+
 struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
 				       struct drm_device *dev,
 				       struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 793f8f1..9700e68 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -119,4 +119,11 @@
 		void *display,
 		struct msm_display_kickoff_params *params);
 
+/**
+ * dsi_display_post_kickoff - program post kickoff-time features
+ * @connector: Pointer to drm connector structure
+ * Returns: Zero on success
+ */
+int dsi_conn_post_kickoff(struct drm_connector *connector);
+
 #endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 693295f..6718156 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2495,6 +2495,7 @@
 
 static void dsi_panel_esd_config_deinit(struct drm_panel_esd_config *esd_config)
 {
+	kfree(esd_config->status_buf);
 	kfree(esd_config->return_buf);
 	kfree(esd_config->status_value);
 	kfree(esd_config->status_valid_params);
@@ -2621,6 +2622,10 @@
 		goto error3;
 	}
 
+	esd_config->status_buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!esd_config->status_buf)
+		goto error4;
+
 	rc = of_property_read_u32_array(of_node,
 		"qcom,mdss-dsi-panel-status-value",
 		esd_config->status_value, esd_config->groups * status_len);
@@ -2632,6 +2637,8 @@
 
 	return 0;
 
+error4:
+	kfree(esd_config->return_buf);
 error3:
 	kfree(esd_config->status_value);
 error2:
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index f80dea2..f63fd27 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -135,7 +135,8 @@
 	u32 *status_cmds_rlen;
 	u32 *status_valid_params;
 	u32 *status_value;
-	unsigned char *return_buf;
+	u8 *return_buf;
+	u8 *status_buf;
 	u32 groups;
 };
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e0617ee..cbcf580 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -130,11 +130,13 @@
 			continue;
 
 		if (msm_is_mode_seamless(
-				&connector->encoder->crtc->state->mode))
+			&connector->encoder->crtc->state->mode) ||
+			msm_is_mode_seamless_vrr(
+			&connector->encoder->crtc->state->adjusted_mode))
 			continue;
 
 		if (msm_is_mode_seamless_dms(
-			       &connector->encoder->crtc->state->adjusted_mode))
+			&connector->encoder->crtc->state->adjusted_mode))
 			continue;
 
 		funcs = encoder->helper_private;
@@ -169,7 +171,8 @@
 		if (!old_crtc_state->active)
 			continue;
 
-		if (msm_is_mode_seamless(&crtc->state->mode))
+		if (msm_is_mode_seamless(&crtc->state->mode) ||
+			msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
 			continue;
 
 		if (msm_is_mode_seamless_dms(&crtc->state->adjusted_mode))
@@ -309,7 +312,8 @@
 		if (!crtc->state->active)
 			continue;
 
-		if (msm_is_mode_seamless(&crtc->state->mode))
+		if (msm_is_mode_seamless(&crtc->state->mode) ||
+			msm_is_mode_seamless_vrr(&crtc->state->adjusted_mode))
 			continue;
 
 		funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5e47daf..0f48db6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -58,51 +58,6 @@
 #define MSM_VERSION_PATCHLEVEL	0
 
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
-#define HPD_STRING_SIZE 30
-
-static void msm_drm_helper_hotplug_event(struct drm_device *dev)
-{
-	struct drm_connector *connector;
-	char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE];
-	char const *connector_name;
-	char *envp[3];
-
-	if (!dev) {
-		DRM_ERROR("hotplug_event failed, invalid input\n");
-		return;
-	}
-
-	if (!dev->mode_config.poll_enabled)
-		return;
-
-	mutex_lock(&dev->mode_config.mutex);
-	drm_for_each_connector(connector, dev) {
-		/* Only handle HPD capable connectors. */
-		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
-			continue;
-
-		connector->status = connector->funcs->detect(connector, false);
-
-		if (connector->name)
-			connector_name = connector->name;
-		else
-			connector_name = "unknown";
-
-		snprintf(name, HPD_STRING_SIZE, "name=%s", connector_name);
-
-		snprintf(status, HPD_STRING_SIZE, "status=%s",
-			drm_get_connector_status_name(connector->status));
-
-		DRM_DEBUG("generating hotplug event [%s]: [%s]\n",
-			name, status);
-		envp[0] = name;
-		envp[1] = status;
-		envp[2] = NULL;
-		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
-				envp);
-	}
-	mutex_unlock(&dev->mode_config.mutex);
-}
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
@@ -117,8 +72,6 @@
 
 	if (priv->fbdev)
 		drm_fb_helper_hotplug_event(priv->fbdev);
-	else
-		msm_drm_helper_hotplug_event(dev);
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 12d2d15..e19d1db 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -127,6 +127,9 @@
 
 enum msm_mdp_crtc_property {
 	CRTC_PROP_INFO,
+	CRTC_PROP_DEST_SCALER_LUT_ED,
+	CRTC_PROP_DEST_SCALER_LUT_CIR,
+	CRTC_PROP_DEST_SCALER_LUT_SEP,
 
 	/* # of blob properties */
 	CRTC_PROP_BLOBCOUNT,
@@ -148,6 +151,7 @@
 	CRTC_PROP_ROI_V1,
 	CRTC_PROP_SECURITY_LEVEL,
 	CRTC_PROP_IDLE_TIMEOUT,
+	CRTC_PROP_DEST_SCALER,
 
 	/* total # of properties */
 	CRTC_PROP_COUNT
@@ -208,12 +212,14 @@
  * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
  * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
  * @MSM_DISPLAY_CAP_EDID:               EDID supported
+ * @MSM_DISPLAY_ESD_ENABLED:            ESD feature enabled
  */
 enum msm_display_caps {
 	MSM_DISPLAY_CAP_VID_MODE	= BIT(0),
 	MSM_DISPLAY_CAP_CMD_MODE	= BIT(1),
 	MSM_DISPLAY_CAP_HOT_PLUG	= BIT(2),
 	MSM_DISPLAY_CAP_EDID		= BIT(3),
+	MSM_DISPLAY_ESD_ENABLED		= BIT(4),
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b829460..f8c3df5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -311,8 +311,7 @@
 	}
 }
 
-static void
-put_iova(struct drm_gem_object *obj)
+static void put_iova(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -324,12 +323,14 @@
 		if (iommu_present(&platform_bus_type)) {
 			msm_gem_unmap_vma(domain->aspace, domain,
 				msm_obj->sgt, get_dmabuf_ptr(obj));
-
-			msm_gem_remove_obj_from_aspace_active_list(
-					domain->aspace,
-					obj);
 		}
 
+		/*
+		 * put_iova removes the domain connected to the obj which makes
+		 * the aspace inaccessible. Store the aspace, as it is used to
+		 * update the active_list during gem_free_obj and gem_purege.
+		 */
+		msm_obj->aspace = domain->aspace;
 		obj_remove_domain(domain);
 	}
 }
@@ -409,7 +410,8 @@
 
 	if (!ret && domain) {
 		*iova = domain->iova;
-		msm_gem_add_obj_to_aspace_active_list(aspace, obj);
+		if (aspace && aspace->domain_attached)
+			msm_gem_add_obj_to_aspace_active_list(aspace, obj);
 	} else {
 		obj_remove_domain(domain);
 	}
@@ -485,24 +487,21 @@
 		/**
 		 * Unmap active buffers,
 		 * typically clients should do this when the callback is called,
-		 * but this needs to be done for the framebuffers which are not
-		 * attached to any planes. (background apps)
+		 * but this needs to be done for the buffers which are not
+		 * attached to any planes.
 		 */
 		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
 			obj = &msm_obj->base;
-			if (obj->import_attach) {
+			if (obj->import_attach)
 				put_iova(obj);
-				put_pages(obj);
-			}
 		}
 	} else {
 		/* map active buffers */
-		list_for_each_entry(msm_obj, &aspace->active_list,
-				iova_list) {
+		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
 			obj = &msm_obj->base;
 			ret = msm_gem_get_iova_locked(obj, aspace, &iova);
 			if (ret) {
-				mutex_unlock(&obj->dev->struct_mutex);
+				mutex_unlock(&aspace->dev->struct_mutex);
 				return;
 			}
 		}
@@ -613,6 +612,7 @@
 	WARN_ON(obj->import_attach);
 
 	put_iova(obj);
+	msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);
 
 	msm_gem_vunmap(obj);
 
@@ -841,6 +841,7 @@
 	list_del(&msm_obj->mm_list);
 
 	put_iova(obj);
+	msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace, obj);
 
 	if (obj->import_attach) {
 		if (msm_obj->vaddr)
@@ -913,6 +914,8 @@
 		return -EINVAL;
 	}
 
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
 	if (!iommu_present(&platform_bus_type))
 		use_vram = true;
 	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
@@ -946,6 +949,7 @@
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
 	INIT_LIST_HEAD(&msm_obj->domains);
 	INIT_LIST_HEAD(&msm_obj->iova_list);
+	msm_obj->aspace = NULL;
 
 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 
@@ -987,7 +991,7 @@
 		struct dma_buf *dmabuf, struct sg_table *sgt)
 {
 	struct msm_gem_object *msm_obj;
-	struct drm_gem_object *obj;
+	struct drm_gem_object *obj = NULL;
 	uint32_t size;
 	int ret, npages;
 
@@ -999,7 +1003,12 @@
 
 	size = PAGE_ALIGN(dmabuf->size);
 
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ERR_PTR(ret);
+
 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
+	mutex_unlock(&dev->struct_mutex);
 	if (ret)
 		goto fail;
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c50c453..8521bea 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -122,6 +122,8 @@
 	 */
 	struct drm_mm_node *vram_node;
 	struct list_head iova_list;
+
+	struct msm_gem_address_space *aspace;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 76523c7..304faa6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -36,6 +36,8 @@
 #define MSM_MODE_FLAG_VBLANK_PRE_MODESET	(1<<1)
 /* Request to switch the connector mode */
 #define MSM_MODE_FLAG_SEAMLESS_DMS			(1<<2)
+/* Request to switch the fps */
+#define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
 
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
@@ -163,6 +165,12 @@
 		(mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
 }
 
+static inline bool msm_is_mode_seamless_vrr(const struct drm_display_mode *mode)
+{
+	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_VRR)
+		: false;
+}
+
 static inline bool msm_needs_vblank_pre_modeset(
 		const struct drm_display_mode *mode)
 {
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index d1991a4..033be6e 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -269,9 +269,16 @@
 		info->property_data[property_idx].default_value = 0;
 		info->property_data[property_idx].force_dirty = false;
 
+		/* select first defined value for enums */
+		if (!is_bitmask)
+			info->property_data[property_idx].default_value =
+				values->type;
+
 		/* always attach property, if created */
 		if (*prop) {
-			drm_object_attach_property(info->base, *prop, 0);
+			drm_object_attach_property(info->base, *prop,
+					info->property_data
+					[property_idx].default_value);
 			++info->install_count;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 69ee2be..e4dcea7 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -366,6 +366,29 @@
 	return c_conn->ops.get_info(info, c_conn->display);
 }
 
+void sde_connector_schedule_status_work(struct drm_connector *connector,
+		bool en)
+{
+	struct sde_connector *c_conn;
+	struct msm_display_info info;
+
+	c_conn = to_sde_connector(connector);
+	if (!c_conn)
+		return;
+
+	sde_connector_get_info(connector, &info);
+	if (c_conn->ops.check_status &&
+		(info.capabilities & MSM_DISPLAY_ESD_ENABLED)) {
+		if (en)
+			/* Schedule ESD status check */
+			schedule_delayed_work(&c_conn->status_work,
+				msecs_to_jiffies(STATUS_CHECK_INTERVAL_MS));
+		else
+			/* Cancel any pending ESD status check */
+			cancel_delayed_work_sync(&c_conn->status_work);
+	}
+}
+
 static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
 {
 	struct drm_connector *connector;
@@ -411,6 +434,9 @@
 	}
 	c_conn->last_panel_power_mode = mode;
 
+	if (mode != SDE_MODE_DPMS_ON)
+		sde_connector_schedule_status_work(connector, false);
+
 	return rc;
 }
 
@@ -492,6 +518,9 @@
 
 	c_conn = to_sde_connector(connector);
 
+	/* cancel if any pending esd work */
+	sde_connector_schedule_status_work(connector, false);
+
 	if (c_conn->ops.put_modes)
 		c_conn->ops.put_modes(connector, c_conn->display);
 
@@ -559,6 +588,8 @@
 	if (c_state->out_fb)
 		_sde_connector_destroy_fb(c_conn, c_state);
 
+	__drm_atomic_helper_connector_destroy_state(&c_state->base);
+
 	if (!c_conn) {
 		kfree(c_state);
 	} else {
@@ -596,8 +627,7 @@
 			&c_state->property_state,
 			c_state->property_values);
 
-	c_state->base.connector = connector;
-	connector->state = &c_state->base;
+	__drm_atomic_helper_connector_reset(connector, &c_state->base);
 }
 
 static struct drm_connector_state *
@@ -624,6 +654,9 @@
 			c_oldstate, c_state,
 			&c_state->property_state, c_state->property_values);
 
+	__drm_atomic_helper_connector_duplicate_state(connector,
+			&c_state->base);
+
 	/* additional handling for drm framebuffer objects */
 	if (c_state->out_fb)
 		drm_framebuffer_reference(c_state->out_fb);
@@ -1056,6 +1089,7 @@
 static int sde_connector_init_debugfs(struct drm_connector *connector)
 {
 	struct sde_connector *sde_connector;
+	struct msm_display_info info;
 
 	if (!connector || !connector->debugfs_entry) {
 		SDE_ERROR("invalid connector\n");
@@ -1064,6 +1098,13 @@
 
 	sde_connector = to_sde_connector(connector);
 
+	sde_connector_get_info(connector, &info);
+	if (sde_connector->ops.check_status &&
+		(info.capabilities & MSM_DISPLAY_ESD_ENABLED))
+		debugfs_create_u32("force_panel_dead", 0600,
+				connector->debugfs_entry,
+				&sde_connector->force_panel_dead);
+
 	if (!debugfs_create_bool("fb_kmap", 0600, connector->debugfs_entry,
 			&sde_connector->fb_kmap)) {
 		SDE_ERROR("failed to create connector fb_kmap\n");
@@ -1160,6 +1201,56 @@
 	return c_conn->encoder;
 }
 
+static void sde_connector_check_status_work(struct work_struct *work)
+{
+	struct sde_connector *conn;
+	struct drm_event event;
+	int rc = 0;
+	bool panel_dead = false;
+
+	conn = container_of(to_delayed_work(work),
+			struct sde_connector, status_work);
+	if (!conn) {
+		SDE_ERROR("not able to get connector object\n");
+		return;
+	}
+
+	mutex_lock(&conn->lock);
+	if (!conn->ops.check_status ||
+			(conn->dpms_mode != DRM_MODE_DPMS_ON)) {
+		SDE_DEBUG("dpms mode: %d\n", conn->dpms_mode);
+		mutex_unlock(&conn->lock);
+		return;
+	}
+
+	rc = conn->ops.check_status(conn->display);
+	mutex_unlock(&conn->lock);
+
+	if (conn->force_panel_dead) {
+		conn->force_panel_dead--;
+		if (!conn->force_panel_dead)
+			goto status_dead;
+	}
+
+	if (rc > 0) {
+		SDE_DEBUG("esd check status success conn_id: %d enc_id: %d\n",
+				conn->base.base.id, conn->encoder->base.id);
+		schedule_delayed_work(&conn->status_work,
+			msecs_to_jiffies(STATUS_CHECK_INTERVAL_MS));
+		return;
+	}
+
+status_dead:
+	SDE_EVT32(rc, SDE_EVTLOG_ERROR);
+	SDE_ERROR("esd check failed report PANEL_DEAD conn_id: %d enc_id: %d\n",
+			conn->base.base.id, conn->encoder->base.id);
+	panel_dead = true;
+	event.type = DRM_EVENT_PANEL_DEAD;
+	event.length = sizeof(u32);
+	msm_mode_object_event_notify(&conn->base.base,
+		conn->base.dev, &event, (u8 *)&panel_dead);
+}
+
 static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
 	.get_modes =    sde_connector_get_modes,
 	.mode_valid =   sde_connector_mode_valid,
@@ -1368,6 +1459,9 @@
 
 	priv->connectors[priv->num_connectors++] = &c_conn->base;
 
+	INIT_DELAYED_WORK(&c_conn->status_work,
+			sde_connector_check_status_work);
+
 	return &c_conn->base;
 
 error_destroy_property:
@@ -1399,6 +1493,9 @@
 	case DRM_EVENT_SYS_BACKLIGHT:
 		ret = 0;
 		break;
+	case DRM_EVENT_PANEL_DEAD:
+		ret = 0;
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index ceeafc2..4018441 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -199,6 +199,26 @@
 	 * Returns: dst_format of display
 	 */
 	enum dsi_pixel_format (*get_dst_format)(void *display);
+
+	/**
+	 * post_kickoff - display to program post kickoff-time features
+	 * @connector: Pointer to drm connector structure
+	 * Returns: Zero on success
+	 */
+	int (*post_kickoff)(struct drm_connector *connector);
+
+	/**
+	 * send_hpd_event - send HPD uevent notification to userspace
+	 * @display: Pointer to private display structure
+	 */
+	void (*send_hpd_event)(void *display);
+
+	/**
+	 * check_status - check status of connected display panel
+	 * @display: Pointer to private display handle
+	 * Returns: positive value for success, negetive or zero for failure
+	 */
+	int (*check_status)(void *display);
 };
 
 /**
@@ -248,6 +268,8 @@
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
  * @bl_device: backlight device node
+ * @status_work: work object to perform status checks
+ * @force_panel_dead: variable to trigger forced ESD recovery
  */
 struct sde_connector {
 	struct drm_connector base;
@@ -280,6 +302,8 @@
 	spinlock_t event_lock;
 
 	struct backlight_device *bl_device;
+	struct delayed_work status_work;
+	u32 force_panel_dead;
 };
 
 /**
@@ -567,4 +591,11 @@
 int sde_connector_get_dither_cfg(struct drm_connector *conn,
 		struct drm_connector_state *state, void **cfg, size_t *len);
 
+/**
+ * sde_connector_schedule_status_work - manage ESD thread
+ * conn: Pointer to drm_connector struct
+ * @en: flag to start/stop ESD thread
+ */
+void sde_connector_schedule_status_work(struct drm_connector *conn, bool en);
+
 #endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 1402fdd..a0846ff 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -31,14 +31,17 @@
 	struct sde_irq *irq_obj = &sde_kms->irq_obj;
 	struct sde_irq_callback *cb;
 	unsigned long irq_flags;
+	bool cb_tbl_error = false;
+	int enable_counts = 0;
 
 	pr_debug("irq_idx=%d\n", irq_idx);
 
+	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
 	if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
-		SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
-		SDE_EVT32_IRQ(irq_idx, atomic_read(
-				&sde_kms->irq_obj.enable_counts[irq_idx]),
-				SDE_EVTLOG_ERROR);
+		/* print error outside lock */
+		cb_tbl_error = true;
+		enable_counts = atomic_read(
+				&sde_kms->irq_obj.enable_counts[irq_idx]);
 	}
 
 	atomic_inc(&irq_obj->irq_counts[irq_idx]);
@@ -46,12 +49,17 @@
 	/*
 	 * Perform registered function callback
 	 */
-	spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
 	list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
 		if (cb->func)
 			cb->func(cb->arg, irq_idx);
 	spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
 
+	if (cb_tbl_error) {
+		SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
+				irq_idx, enable_counts);
+		SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+	}
+
 	/*
 	 * Clear pending interrupt status in HW.
 	 * NOTE: sde_core_irq_callback_handler is protected by top-level
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 30cf3df..593e236 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -611,6 +611,18 @@
 		return;
 }
 
+/**
+ * sde_crtc_destroy_dest_scaler - free memory allocated for scaler lut
+ * @sde_crtc: Pointer to sde crtc
+ */
+static void _sde_crtc_destroy_dest_scaler(struct sde_crtc *sde_crtc)
+{
+	if (!sde_crtc)
+		return;
+
+	kfree(sde_crtc->scl3_lut_cfg);
+}
+
 static void sde_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
@@ -624,6 +636,7 @@
 		drm_property_unreference_blob(sde_crtc->blob_info);
 	msm_property_destroy(&sde_crtc->property_info);
 	sde_cp_crtc_destroy_properties(crtc);
+	_sde_crtc_destroy_dest_scaler(sde_crtc);
 
 	sde_fence_deinit(&sde_crtc->output_fence);
 	_sde_crtc_deinit_events(sde_crtc);
@@ -639,8 +652,9 @@
 {
 	SDE_DEBUG("\n");
 
-	if (msm_is_mode_seamless(adjusted_mode) &&
-		(!crtc->enabled || crtc->state->active_changed)) {
+	if ((msm_is_mode_seamless(adjusted_mode) ||
+			msm_is_mode_seamless_vrr(adjusted_mode)) &&
+		(!crtc->enabled)) {
 		SDE_ERROR("crtc state prevents seamless transition\n");
 		return false;
 	}
@@ -790,7 +804,7 @@
 }
 
 static int _sde_crtc_set_roi_v1(struct drm_crtc_state *state,
-		void *usr_ptr)
+		void __user *usr_ptr)
 {
 	struct drm_crtc *crtc;
 	struct sde_crtc_state *cstate;
@@ -912,23 +926,6 @@
 
 	sde_kms_rect_merge_rectangles(&crtc_state->user_roi_list, crtc_roi);
 
-	/*
-	 * for 3dmux dsc, make sure is full ROI, since current driver doesn't
-	 * support partial update for this configuration.
-	 */
-	if (!sde_kms_rect_is_null(crtc_roi) &&
-		_sde_crtc_setup_is_3dmux_dsc(state)) {
-		struct drm_display_mode *adj_mode = &state->adjusted_mode;
-
-		if (crtc_roi->w != adj_mode->hdisplay ||
-			crtc_roi->h != adj_mode->vdisplay) {
-			SDE_ERROR("%s: unsupported top roi[%d %d] wxh[%d %d]\n",
-				sde_crtc->name, crtc_roi->w, crtc_roi->h,
-				adj_mode->hdisplay, adj_mode->vdisplay);
-			return -EINVAL;
-		}
-	}
-
 	SDE_DEBUG("%s: crtc roi (%d,%d,%d,%d)\n", sde_crtc->name,
 			crtc_roi->x, crtc_roi->y, crtc_roi->w, crtc_roi->h);
 
@@ -999,6 +996,18 @@
 	SDE_DEBUG("%s: lm%d roi (%d,%d,%d,%d)\n", sde_crtc->name, lm_idx,
 			lm_roi->x, lm_roi->y, lm_roi->w, lm_roi->h);
 
+	/*
+	 * partial update is not supported with 3dmux dsc or dest scaler.
+	 * hence, crtc roi must match the mixer dimensions.
+	 */
+	if (crtc_state->num_ds_enabled ||
+		_sde_crtc_setup_is_3dmux_dsc(state)) {
+		if (memcmp(lm_roi, lm_bounds, sizeof(struct sde_rect))) {
+			SDE_ERROR("Unsupported: Dest scaler/3d mux DSC + PU\n");
+			return -EINVAL;
+		}
+	}
+
 	/* if any dimension is zero, clear all dimensions for clarity */
 	if (sde_kms_rect_is_null(lm_roi))
 		memset(lm_roi, 0, sizeof(*lm_roi));
@@ -1263,7 +1272,7 @@
 	struct sde_hw_stage_cfg *stage_cfg;
 	struct sde_rect plane_crtc_roi;
 
-	u32 flush_mask, flush_sbuf, flush_tmp;
+	u32 flush_mask, flush_sbuf;
 	uint32_t stage_idx, lm_idx;
 	int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
 	int i;
@@ -1279,10 +1288,9 @@
 	lm = mixer->hw_lm;
 	stage_cfg = &sde_crtc->stage_cfg;
 	cstate = to_sde_crtc_state(crtc->state);
-	flush_sbuf = 0x0;
 
-	cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
 	cstate->sbuf_prefill_line = 0;
+	sde_crtc->sbuf_flush_mask = 0x0;
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		state = plane->state;
@@ -1297,17 +1305,14 @@
 		pstate = to_sde_plane_state(state);
 		fb = state->fb;
 
-		if (sde_plane_is_sbuf_mode(plane, &prefill))
-			cstate->sbuf_cfg.rot_op_mode =
-					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+		prefill = sde_plane_rot_calc_prefill(plane);
 		if (prefill > cstate->sbuf_prefill_line)
 			cstate->sbuf_prefill_line = prefill;
 
-		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_tmp);
+		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_sbuf);
 
-		/* persist rotator flush bit(s) for one more commit */
-		flush_mask |= cstate->sbuf_flush_mask | flush_tmp;
-		flush_sbuf |= flush_tmp;
+		/* save sbuf flush value for later */
+		sde_crtc->sbuf_flush_mask |= flush_sbuf;
 
 		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 				crtc->base.id,
@@ -1331,8 +1336,7 @@
 				state->src_w >> 16, state->src_h >> 16,
 				state->crtc_x, state->crtc_y,
 				state->crtc_w, state->crtc_h,
-				flush_tmp ? cstate->sbuf_cfg.rot_op_mode :
-				SDE_CTL_ROT_OP_MODE_OFFLINE);
+				flush_sbuf != 0);
 
 		stage_idx = zpos_cnt[pstate->stage]++;
 		stage_cfg->stage[pstate->stage][stage_idx] =
@@ -1359,8 +1363,6 @@
 		}
 	}
 
-	cstate->sbuf_flush_mask = flush_sbuf;
-
 	if (lm && lm->ops.setup_dim_layer) {
 		cstate = to_sde_crtc_state(crtc->state);
 		for (i = 0; i < cstate->num_dim_layers; i++)
@@ -1502,6 +1504,7 @@
 
 		lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
 
+		mixer[i].pipe_mask = mixer[i].flush_mask;
 		mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
 			mixer[i].hw_lm->idx);
 
@@ -1665,8 +1668,6 @@
 			smmu_state->transition_type = post_commit ?
 				POST_COMMIT : PRE_COMMIT;
 			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE;
-			if (translation_mode == SDE_DRM_FB_SEC)
-				ops |= SDE_KMS_OPS_PREPARE_PLANE_FB;
 			if (old_valid_fb)
 				ops |= SDE_KMS_OPS_WAIT_FOR_TX_DONE;
 		} else if ((smmu_state->state == DETACHED) ||
@@ -1674,8 +1675,7 @@
 			smmu_state->state = ATTACH_ALL_REQ;
 			smmu_state->transition_type = post_commit ?
 				POST_COMMIT : PRE_COMMIT;
-			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE |
-				SDE_KMS_OPS_PREPARE_PLANE_FB;
+			ops |= SDE_KMS_OPS_CRTC_SECURE_STATE_CHANGE;
 			if (old_valid_fb)
 				ops |= (SDE_KMS_OPS_WAIT_FOR_TX_DONE |
 				 SDE_KMS_OPS_CLEANUP_PLANE_FB);
@@ -1738,6 +1738,64 @@
 }
 
 /**
+ * _sde_crtc_setup_scaler3_lut - Set up scaler lut
+ * LUTs are configured only once during boot
+ * @sde_crtc: Pointer to sde crtc
+ * @cstate: Pointer to sde crtc state
+ */
+static int _sde_crtc_set_dest_scaler_lut(struct sde_crtc *sde_crtc,
+		struct sde_crtc_state *cstate, uint32_t lut_idx)
+{
+	struct sde_hw_scaler3_lut_cfg *cfg;
+	u32 *lut_data = NULL;
+	size_t len = 0;
+	int ret = 0;
+
+	if (!sde_crtc || !cstate || !sde_crtc->scl3_lut_cfg) {
+		SDE_ERROR("invalid args\n");
+		return -EINVAL;
+	}
+
+	if (sde_crtc->scl3_lut_cfg->is_configured) {
+		SDE_DEBUG("lut already configured\n");
+		return 0;
+	}
+
+	lut_data = msm_property_get_blob(&sde_crtc->property_info,
+			&cstate->property_state, &len, lut_idx);
+	if (!lut_data || !len) {
+		SDE_ERROR("lut(%d): no data, len(%zu)\n", lut_idx, len);
+		return -ENODATA;
+	}
+
+	cfg = sde_crtc->scl3_lut_cfg;
+
+	switch (lut_idx) {
+	case CRTC_PROP_DEST_SCALER_LUT_ED:
+		cfg->dir_lut = lut_data;
+		cfg->dir_len = len;
+		break;
+	case CRTC_PROP_DEST_SCALER_LUT_CIR:
+		cfg->cir_lut = lut_data;
+		cfg->cir_len = len;
+		break;
+	case CRTC_PROP_DEST_SCALER_LUT_SEP:
+		cfg->sep_lut = lut_data;
+		cfg->sep_len = len;
+		break;
+	default:
+		ret = -EINVAL;
+		SDE_ERROR("invalid LUT index = %d", lut_idx);
+		break;
+	}
+
+	if (cfg->dir_lut && cfg->cir_lut && cfg->sep_lut)
+		cfg->is_configured = true;
+
+	return ret;
+}
+
+/**
  * sde_crtc_secure_ctrl - Initiates the operations to swtich  between secure
  *                       and non-secure mode
  * @crtc: Pointer to crtc
@@ -1855,6 +1913,105 @@
 	return ret;
 }
 
+/**
+ * _sde_crtc_dest_scaler_setup - Set up dest scaler block
+ * @crtc: Pointer to drm crtc
+ */
+static void _sde_crtc_dest_scaler_setup(struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct sde_hw_mixer *hw_lm;
+	struct sde_hw_ctl *hw_ctl;
+	struct sde_hw_ds *hw_ds;
+	struct sde_hw_ds_cfg *cfg;
+	struct sde_kms *kms;
+	u32 flush_mask = 0, op_mode = 0;
+	u32 lm_idx = 0, num_mixers = 0;
+	int i, count = 0;
+
+	if (!crtc)
+		return;
+
+	sde_crtc   = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+	kms    = _sde_crtc_get_kms(crtc);
+	num_mixers = sde_crtc->num_mixers;
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	if (!cstate->ds_dirty) {
+		SDE_DEBUG("no change in settings, skip commit\n");
+	} else if (!kms || !kms->catalog) {
+		SDE_ERROR("invalid parameters\n");
+	} else if (!kms->catalog->mdp[0].has_dest_scaler) {
+		SDE_DEBUG("dest scaler feature not supported\n");
+	} else if (num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("invalid number mixers: %d\n", num_mixers);
+	} else if (!sde_crtc->scl3_lut_cfg->is_configured) {
+		SDE_DEBUG("no LUT data available\n");
+	} else {
+		count = cstate->num_ds_enabled ? cstate->num_ds : num_mixers;
+
+		for (i = 0; i < count; i++) {
+			cfg = &cstate->ds_cfg[i];
+
+			if (!cfg->flags)
+				continue;
+
+			lm_idx = cfg->ndx;
+			hw_lm  = sde_crtc->mixers[lm_idx].hw_lm;
+			hw_ctl = sde_crtc->mixers[lm_idx].hw_ctl;
+			hw_ds  = sde_crtc->mixers[lm_idx].hw_ds;
+
+			/* Setup op mode - Dual/single */
+			if (cfg->flags & SDE_DRM_DESTSCALER_ENABLE)
+				op_mode |= BIT(hw_ds->idx - DS_0);
+
+			if ((i == count-1) && hw_ds->ops.setup_opmode) {
+				op_mode |= (cstate->num_ds_enabled ==
+					CRTC_DUAL_MIXERS) ?
+					SDE_DS_OP_MODE_DUAL : 0;
+				hw_ds->ops.setup_opmode(hw_ds, op_mode);
+				SDE_EVT32(DRMID(crtc), op_mode);
+			}
+
+			/* Setup scaler */
+			if ((cfg->flags & SDE_DRM_DESTSCALER_SCALE_UPDATE) ||
+				(cfg->flags &
+					SDE_DRM_DESTSCALER_ENHANCER_UPDATE)) {
+				if (hw_ds->ops.setup_scaler)
+					hw_ds->ops.setup_scaler(hw_ds,
+							cfg->scl3_cfg,
+							sde_crtc->scl3_lut_cfg);
+
+				/**
+				 * Clear the flags as the block doesn't have to
+				 * be programmed in each commit if no updates
+				 */
+				cfg->flags &= ~SDE_DRM_DESTSCALER_SCALE_UPDATE;
+				cfg->flags &=
+					~SDE_DRM_DESTSCALER_ENHANCER_UPDATE;
+			}
+
+			/*
+			 * Dest scaler shares the flush bit of the LM in control
+			 */
+			if (cfg->set_lm_flush && hw_lm && hw_ctl &&
+				hw_ctl->ops.get_bitmask_mixer) {
+				flush_mask = hw_ctl->ops.get_bitmask_mixer(
+						hw_ctl, hw_lm->idx);
+				SDE_DEBUG("Set lm[%d] flush = %d",
+					hw_lm->idx, flush_mask);
+				hw_ctl->ops.update_pending_flush(hw_ctl,
+								flush_mask);
+			}
+			cfg->set_lm_flush = false;
+		}
+		cstate->ds_dirty = false;
+	}
+}
+
 void sde_crtc_prepare_commit(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state)
 {
@@ -2261,7 +2418,7 @@
  * @user_ptr:    User ptr for sde_drm_dim_layer_v1 struct
  */
 static void _sde_crtc_set_dim_layer_v1(struct sde_crtc_state *cstate,
-		void *usr_ptr)
+		void __user *usr_ptr)
 {
 	struct sde_drm_dim_layer_v1 dim_layer_v1;
 	struct sde_drm_dim_layer_cfg *user_cfg;
@@ -2323,6 +2480,363 @@
 }
 
 /**
+ * _sde_crtc_dest_scaler_init - allocate memory for scaler lut
+ * @sde_crtc    :  Pointer to sde crtc
+ * @catalog :  Pointer to mdss catalog info
+ */
+static void _sde_crtc_dest_scaler_init(struct sde_crtc *sde_crtc,
+				struct sde_mdss_cfg *catalog)
+{
+	if (!sde_crtc || !catalog)
+		return;
+
+	if (!catalog->mdp[0].has_dest_scaler) {
+		SDE_DEBUG("dest scaler feature not supported\n");
+		return;
+	}
+
+	sde_crtc->scl3_lut_cfg = kzalloc(sizeof(struct sde_hw_scaler3_lut_cfg),
+				GFP_KERNEL);
+	if (!sde_crtc->scl3_lut_cfg)
+		SDE_ERROR("failed to create scale LUT for dest scaler");
+}
+
+/**
+ * _sde_crtc_set_dest_scaler - copy dest scaler settings from userspace
+ * @sde_crtc   :  Pointer to sde crtc
+ * @cstate :  Pointer to sde crtc state
+ * @usr_ptr:  User ptr for sde_drm_dest_scaler_data struct
+ */
+static int _sde_crtc_set_dest_scaler(struct sde_crtc *sde_crtc,
+				struct sde_crtc_state *cstate,
+				void __user *usr_ptr)
+{
+	struct sde_drm_dest_scaler_data ds_data;
+	struct sde_drm_dest_scaler_cfg *ds_cfg_usr;
+	struct sde_drm_scaler_v2 scaler_v2;
+	void __user *scaler_v2_usr;
+	int i, count, ret = 0;
+
+	if (!sde_crtc || !cstate) {
+		SDE_ERROR("invalid sde_crtc/state\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG("crtc %s\n", sde_crtc->name);
+
+	cstate->num_ds = 0;
+	cstate->ds_dirty = false;
+	if (!usr_ptr) {
+		SDE_DEBUG("ds data removed\n");
+		return 0;
+	}
+
+	if (copy_from_user(&ds_data, usr_ptr, sizeof(ds_data))) {
+		SDE_ERROR("failed to copy dest scaler data from user\n");
+		return -EINVAL;
+	}
+
+	count = ds_data.num_dest_scaler;
+	if (!sde_crtc->num_mixers || count > sde_crtc->num_mixers ||
+		(count && (count != sde_crtc->num_mixers) &&
+		!(ds_data.ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
+		SDE_ERROR("invalid config:num ds(%d), mixers(%d),flags(%d)\n",
+			count, sde_crtc->num_mixers, ds_data.ds_cfg[0].flags);
+		return -EINVAL;
+	}
+
+	/* Populate from user space */
+	for (i = 0; i < count; i++) {
+		ds_cfg_usr = &ds_data.ds_cfg[i];
+
+		cstate->ds_cfg[i].ndx = ds_cfg_usr->index;
+		cstate->ds_cfg[i].flags = ds_cfg_usr->flags;
+		cstate->ds_cfg[i].lm_width = ds_cfg_usr->lm_width;
+		cstate->ds_cfg[i].lm_height = ds_cfg_usr->lm_height;
+		cstate->ds_cfg[i].scl3_cfg = NULL;
+
+		if (ds_cfg_usr->scaler_cfg) {
+			scaler_v2_usr =
+			(void __user *)((uintptr_t)ds_cfg_usr->scaler_cfg);
+
+			memset(&scaler_v2, 0, sizeof(scaler_v2));
+
+			cstate->ds_cfg[i].scl3_cfg =
+				kzalloc(sizeof(struct sde_hw_scaler3_cfg),
+					GFP_KERNEL);
+
+			if (!cstate->ds_cfg[i].scl3_cfg) {
+				ret = -ENOMEM;
+				goto err;
+			}
+
+			if (copy_from_user(&scaler_v2, scaler_v2_usr,
+					sizeof(scaler_v2))) {
+				SDE_ERROR("scale data:copy from user failed\n");
+				ret = -EINVAL;
+				goto err;
+			}
+
+			sde_set_scaler_v2(cstate->ds_cfg[i].scl3_cfg,
+					&scaler_v2);
+
+			SDE_DEBUG("en(%d)dir(%d)de(%d) src(%dx%d) dst(%dx%d)\n",
+				scaler_v2.enable, scaler_v2.dir_en,
+				scaler_v2.de.enable, scaler_v2.src_width[0],
+				scaler_v2.src_height[0], scaler_v2.dst_width,
+				scaler_v2.dst_height);
+			SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base),
+				scaler_v2.enable, scaler_v2.dir_en,
+				scaler_v2.de.enable, scaler_v2.src_width[0],
+				scaler_v2.src_height[0], scaler_v2.dst_width,
+				scaler_v2.dst_height);
+		}
+
+		SDE_DEBUG("ds cfg[%d]-ndx(%d) flags(%d) lm(%dx%d)\n",
+			i, ds_cfg_usr->index, ds_cfg_usr->flags,
+			ds_cfg_usr->lm_width, ds_cfg_usr->lm_height);
+		SDE_EVT32_VERBOSE(DRMID(&sde_crtc->base), i, ds_cfg_usr->index,
+			ds_cfg_usr->flags, ds_cfg_usr->lm_width,
+			ds_cfg_usr->lm_height);
+	}
+
+	cstate->num_ds = count;
+	cstate->ds_dirty = true;
+	return 0;
+
+err:
+	for (; i >= 0; i--)
+		kfree(cstate->ds_cfg[i].scl3_cfg);
+
+	return ret;
+}
+
+/**
+ * _sde_crtc_check_dest_scaler_data - validate the dest scaler data
+ * @crtc  :  Pointer to drm crtc
+ * @state :  Pointer to drm crtc state
+ */
+static int _sde_crtc_check_dest_scaler_data(struct drm_crtc *crtc,
+				struct drm_crtc_state *state)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct drm_display_mode *mode;
+	struct sde_kms *kms;
+	struct sde_hw_ds *hw_ds;
+	struct sde_hw_ds_cfg *cfg;
+	u32 i, ret = 0, lm_idx;
+	u32 num_ds_enable = 0;
+	u32 max_in_width = 0, max_out_width = 0;
+	u32 prev_lm_width = 0, prev_lm_height = 0;
+
+	if (!crtc || !state)
+		return -EINVAL;
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+	kms = _sde_crtc_get_kms(crtc);
+	mode = &state->adjusted_mode;
+
+	SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+	if (!cstate->ds_dirty && !cstate->num_ds_enabled) {
+		SDE_DEBUG("dest scaler property not set, skip validation\n");
+		return 0;
+	}
+
+	if (!kms || !kms->catalog) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	if (!kms->catalog->mdp[0].has_dest_scaler) {
+		SDE_DEBUG("dest scaler feature not supported\n");
+		return 0;
+	}
+
+	if (!sde_crtc->num_mixers) {
+		SDE_ERROR("mixers not allocated\n");
+		return -EINVAL;
+	}
+
+	/**
+	 * Check if sufficient hw resources are
+	 * available as per target caps & topology
+	 */
+	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+		SDE_ERROR("invalid config: mixers(%d) max(%d)\n",
+			sde_crtc->num_mixers, CRTC_DUAL_MIXERS);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		if (!sde_crtc->mixers[i].hw_lm || !sde_crtc->mixers[i].hw_ds) {
+			SDE_ERROR("insufficient HW resources allocated\n");
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	/**
+	 * Check if DS needs to be enabled or disabled
+	 * In case of enable, validate the data
+	 */
+	if (!cstate->ds_dirty || !cstate->num_ds ||
+		!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_ENABLE)) {
+		SDE_DEBUG("disable dest scaler,dirty(%d)num(%d)flags(%d)\n",
+			cstate->ds_dirty, cstate->num_ds,
+			cstate->ds_cfg[0].flags);
+		goto disable;
+	}
+
+	/**
+	 * No of dest scalers shouldn't exceed hw ds block count and
+	 * also, match the num of mixers unless it is partial update
+	 * left only/right only use case - currently PU + DS is not supported
+	 */
+	if (cstate->num_ds > kms->catalog->ds_count ||
+		((cstate->num_ds != sde_crtc->num_mixers) &&
+		!(cstate->ds_cfg[0].flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
+		SDE_ERROR("invalid cfg: num_ds(%d), hw_ds_cnt(%d) flags(%d)\n",
+			cstate->num_ds, kms->catalog->ds_count,
+			cstate->ds_cfg[0].flags);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* Validate the DS data */
+	for (i = 0; i < cstate->num_ds; i++) {
+		cfg = &cstate->ds_cfg[i];
+		lm_idx = cfg->ndx;
+
+		/**
+		 * Validate against topology
+		 * No of dest scalers should match the num of mixers
+		 * unless it is partial update left only/right only use case
+		 */
+		if (lm_idx >= sde_crtc->num_mixers || (i != lm_idx &&
+			!(cfg->flags & SDE_DRM_DESTSCALER_PU_ENABLE))) {
+			SDE_ERROR("invalid user data(%d):idx(%d), flags(%d)\n",
+				i, lm_idx, cfg->flags);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		hw_ds = sde_crtc->mixers[lm_idx].hw_ds;
+
+		if (!max_in_width && !max_out_width) {
+			max_in_width = hw_ds->scl->top->maxinputwidth;
+			max_out_width = hw_ds->scl->top->maxoutputwidth;
+
+			if (cstate->num_ds == CRTC_DUAL_MIXERS)
+				max_in_width -= SDE_DS_OVERFETCH_SIZE;
+
+			SDE_DEBUG("max DS width [%d,%d] for num_ds = %d\n",
+				max_in_width, max_out_width, cstate->num_ds);
+		}
+
+		/* Check LM width and height */
+		if (cfg->lm_width > (mode->hdisplay/sde_crtc->num_mixers) ||
+			cfg->lm_height > mode->vdisplay ||
+			!cfg->lm_width || !cfg->lm_height) {
+			SDE_ERROR("invalid lm size[%d,%d] display [%d,%d]\n",
+				cfg->lm_width,
+				cfg->lm_height,
+				mode->hdisplay/sde_crtc->num_mixers,
+				mode->vdisplay);
+			ret = -E2BIG;
+			goto err;
+		}
+
+		if (!prev_lm_width && !prev_lm_height) {
+			prev_lm_width = cfg->lm_width;
+			prev_lm_height = cfg->lm_height;
+		} else {
+			if (cfg->lm_width != prev_lm_width ||
+				cfg->lm_height != prev_lm_height) {
+				SDE_ERROR("lm size:left[%d,%d], right[%d %d]\n",
+					cfg->lm_width, cfg->lm_height,
+					prev_lm_width, prev_lm_height);
+				ret = -EINVAL;
+				goto err;
+			}
+		}
+
+		/* Check scaler data */
+		if (cfg->flags & SDE_DRM_DESTSCALER_SCALE_UPDATE ||
+			cfg->flags & SDE_DRM_DESTSCALER_ENHANCER_UPDATE) {
+			if (!cfg->scl3_cfg) {
+				ret = -EINVAL;
+				SDE_ERROR("null scale data\n");
+				goto err;
+			}
+			if (cfg->scl3_cfg->src_width[0] > max_in_width ||
+				cfg->scl3_cfg->dst_width > max_out_width ||
+				!cfg->scl3_cfg->src_width[0] ||
+				!cfg->scl3_cfg->dst_width) {
+				SDE_ERROR("scale width(%d %d) for ds-%d:\n",
+					cfg->scl3_cfg->src_width[0],
+					cfg->scl3_cfg->dst_width,
+					hw_ds->idx - DS_0);
+				SDE_ERROR("scale_en = %d, DE_en =%d\n",
+					cfg->scl3_cfg->enable,
+					cfg->scl3_cfg->de.enable);
+
+				cfg->flags &=
+					~SDE_DRM_DESTSCALER_SCALE_UPDATE;
+				cfg->flags &=
+					~SDE_DRM_DESTSCALER_ENHANCER_UPDATE;
+
+				ret = -EINVAL;
+				goto err;
+			}
+		}
+
+		if (cfg->flags & SDE_DRM_DESTSCALER_ENABLE)
+			num_ds_enable++;
+
+		/**
+		 * Validation successful, indicator for flush to be issued
+		 */
+		cfg->set_lm_flush = true;
+
+		SDE_DEBUG("ds[%d]: flags = 0x%X\n",
+			hw_ds->idx - DS_0, cfg->flags);
+	}
+
+disable:
+	SDE_DEBUG("dest scaler enable status, old = %d, new = %d",
+		cstate->num_ds_enabled, num_ds_enable);
+	SDE_EVT32(DRMID(crtc), cstate->num_ds_enabled, num_ds_enable,
+		cstate->ds_dirty);
+
+	if (cstate->num_ds_enabled != num_ds_enable) {
+		/* Disabling destination scaler */
+		if (!num_ds_enable) {
+			for (i = 0; i < sde_crtc->num_mixers; i++) {
+				cfg = &cstate->ds_cfg[i];
+				cfg->ndx = i;
+				/* Update scaler settings in disable case */
+				cfg->flags = SDE_DRM_DESTSCALER_SCALE_UPDATE;
+				cfg->scl3_cfg->enable = 0;
+				cfg->scl3_cfg->de.enable = 0;
+				cfg->set_lm_flush = true;
+			}
+		}
+		cstate->num_ds_enabled = num_ds_enable;
+		cstate->ds_dirty = true;
+	}
+
+	return 0;
+
+err:
+	cstate->ds_dirty = false;
+	return ret;
+}
+
+/**
  * _sde_crtc_wait_for_fences - wait for incoming framebuffer sync fences
  * @crtc: Pointer to CRTC object
  */
@@ -2379,11 +2893,12 @@
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_ctl *last_valid_ctl = NULL;
 	int i;
-	struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter;
+	struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter, ds_iter;
 
 	sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
 	sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
 	sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
+	sde_rm_init_hw_iter(&ds_iter, enc->base.id, SDE_HW_BLK_DS);
 
 	/* Set up all the mixers and ctls reserved by this encoder */
 	for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
@@ -2414,6 +2929,10 @@
 		(void) sde_rm_get_hw(rm, &dspp_iter);
 		mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
 
+		/* DS may be null */
+		(void) sde_rm_get_hw(rm, &ds_iter);
+		mixer->hw_ds = (struct sde_hw_ds *)ds_iter.hw;
+
 		mixer->encoder = enc;
 
 		sde_crtc->num_mixers++;
@@ -2421,6 +2940,9 @@
 				i, mixer->hw_lm->idx - LM_0);
 		SDE_DEBUG("setup mixer %d: ctl %d\n",
 				i, mixer->hw_ctl->idx - CTL_0);
+		if (mixer->hw_ds)
+			SDE_DEBUG("setup mixer %d: ds %d\n",
+				i, mixer->hw_ds->idx - DS_0);
 	}
 }
 
@@ -2430,6 +2952,7 @@
 	struct drm_encoder *enc;
 
 	sde_crtc->num_mixers = 0;
+	sde_crtc->mixers_swapped = false;
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 
 	mutex_lock(&sde_crtc->crtc_lock);
@@ -2479,13 +3002,14 @@
 	cstate = to_sde_crtc_state(state);
 
 	adj_mode = &state->adjusted_mode;
-	crtc_split_width = sde_crtc_mixer_width(sde_crtc, adj_mode);
+	crtc_split_width = sde_crtc_get_mixer_width(sde_crtc, cstate, adj_mode);
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		cstate->lm_bounds[i].x = crtc_split_width * i;
 		cstate->lm_bounds[i].y = 0;
 		cstate->lm_bounds[i].w = crtc_split_width;
-		cstate->lm_bounds[i].h = adj_mode->vdisplay;
+		cstate->lm_bounds[i].h =
+			sde_crtc_get_mixer_height(sde_crtc, cstate, adj_mode);
 		memcpy(&cstate->lm_roi[i], &cstate->lm_bounds[i],
 				sizeof(cstate->lm_roi[i]));
 		SDE_EVT32_VERBOSE(DRMID(crtc), i,
@@ -2556,6 +3080,7 @@
 		return;
 
 	_sde_crtc_blend_setup(crtc);
+	_sde_crtc_dest_scaler_setup(crtc);
 
 	/*
 	 * Since CP properties use AXI buffer to program the
@@ -2731,17 +3256,120 @@
 	return rc;
 }
 
-void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+static int _sde_crtc_commit_kickoff_rot(struct drm_crtc *crtc,
+		struct sde_crtc_state *cstate)
 {
 	struct drm_plane *plane;
+	struct sde_crtc *sde_crtc;
+	struct sde_hw_ctl *ctl, *master_ctl;
+	u32 flush_mask;
+	int i, rc = 0;
+
+	if (!crtc || !cstate)
+		return -EINVAL;
+
+	sde_crtc = to_sde_crtc(crtc);
+
+	/*
+	 * Update sbuf configuration and flush bits if a flush
+	 * mask has been defined for either the current or
+	 * previous commit.
+	 *
+	 * Updates are also required for the first commit after
+	 * sbuf_flush_mask becomes 0x0, to properly transition
+	 * the hardware out of sbuf mode.
+	 */
+	if (!sde_crtc->sbuf_flush_mask_old && !sde_crtc->sbuf_flush_mask)
+		return 0;
+
+	flush_mask = sde_crtc->sbuf_flush_mask_old | sde_crtc->sbuf_flush_mask;
+	sde_crtc->sbuf_flush_mask_old = sde_crtc->sbuf_flush_mask;
+
+	SDE_ATRACE_BEGIN("crtc_kickoff_rot");
+
+	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
+		drm_atomic_crtc_for_each_plane(plane, crtc) {
+			rc = sde_plane_kickoff_rot(plane);
+			if (rc) {
+				SDE_ERROR("crtc%d cancelling inline rotation\n",
+						crtc->base.id);
+				SDE_EVT32(DRMID(crtc), SDE_EVTLOG_ERROR);
+
+				/* revert to offline on errors */
+				cstate->sbuf_cfg.rot_op_mode =
+					SDE_CTL_ROT_OP_MODE_OFFLINE;
+				break;
+			}
+		}
+	}
+
+	master_ctl = NULL;
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		ctl = sde_crtc->mixers[i].hw_ctl;
+		if (!ctl)
+			continue;
+
+		if (!master_ctl || master_ctl->idx > ctl->idx)
+			master_ctl = ctl;
+	}
+
+	/* only update sbuf_cfg and flush for master ctl */
+	if (master_ctl && master_ctl->ops.setup_sbuf_cfg &&
+			master_ctl->ops.update_pending_flush) {
+		master_ctl->ops.setup_sbuf_cfg(master_ctl, &cstate->sbuf_cfg);
+		master_ctl->ops.update_pending_flush(master_ctl, flush_mask);
+
+		/* explicitly trigger rotator for async modes */
+		if (cstate->sbuf_cfg.rot_op_mode ==
+				SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
+				master_ctl->ops.trigger_rot_start) {
+			master_ctl->ops.trigger_rot_start(master_ctl);
+			SDE_EVT32(DRMID(crtc), master_ctl->idx - CTL_0);
+		}
+	}
+
+	SDE_ATRACE_END("crtc_kickoff_rot");
+	return rc;
+}
+
+/**
+ * _sde_crtc_remove_pipe_flush - remove staged pipes from flush mask
+ * @sde_crtc: Pointer to sde crtc structure
+ */
+static void _sde_crtc_remove_pipe_flush(struct sde_crtc *sde_crtc)
+{
+	struct sde_crtc_mixer *mixer;
+	struct sde_hw_ctl *ctl;
+	u32 i, flush_mask;
+
+	if (!sde_crtc)
+		return;
+
+	mixer = sde_crtc->mixers;
+	for (i = 0; i < sde_crtc->num_mixers; i++) {
+		ctl = mixer[i].hw_ctl;
+		if (!ctl || !ctl->ops.get_pending_flush ||
+				!ctl->ops.clear_pending_flush ||
+				!ctl->ops.update_pending_flush)
+			continue;
+
+		flush_mask = ctl->ops.get_pending_flush(ctl);
+		flush_mask &= ~mixer[i].pipe_mask;
+		ctl->ops.clear_pending_flush(ctl);
+		ctl->ops.update_pending_flush(ctl, flush_mask);
+	}
+}
+
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
 	struct drm_encoder *encoder;
 	struct drm_device *dev;
 	struct sde_crtc *sde_crtc;
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	struct sde_crtc_state *cstate;
-	struct sde_hw_ctl *ctl;
-	int ret, i;
+	bool is_error;
+	int ret;
 
 	if (!crtc) {
 		SDE_ERROR("invalid argument\n");
@@ -2750,6 +3378,7 @@
 	dev = crtc->dev;
 	sde_crtc = to_sde_crtc(crtc);
 	sde_kms = _sde_crtc_get_kms(crtc);
+	is_error = false;
 
 	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
 		SDE_ERROR("invalid argument\n");
@@ -2768,6 +3397,11 @@
 		return;
 
 	SDE_ATRACE_BEGIN("crtc_commit");
+
+	/* default to ASYNC mode for inline rotation */
+	cstate->sbuf_cfg.rot_op_mode = sde_crtc->sbuf_flush_mask ?
+		SDE_CTL_ROT_OP_MODE_INLINE_ASYNC : SDE_CTL_ROT_OP_MODE_OFFLINE;
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct sde_encoder_kickoff_params params = { 0 };
 
@@ -2782,8 +3416,28 @@
 		params.affected_displays = _sde_crtc_get_displays_affected(crtc,
 				crtc->state);
 		sde_encoder_prepare_for_kickoff(encoder, &params);
+
+		/*
+		 * For inline ASYNC modes, the flush bits are not written
+		 * to hardware atomically, so avoid using it if a video
+		 * mode encoder is active on this CRTC.
+		 */
+		if (cstate->sbuf_cfg.rot_op_mode ==
+				SDE_CTL_ROT_OP_MODE_INLINE_ASYNC &&
+				sde_encoder_get_intf_mode(encoder) ==
+				INTF_MODE_VIDEO)
+			cstate->sbuf_cfg.rot_op_mode =
+				SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
 	}
 
+	/*
+	 * For ASYNC inline modes, kick off the rotator now so that the H/W
+	 * can start as soon as it's ready.
+	 */
+	if (cstate->sbuf_cfg.rot_op_mode == SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
+		if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+			is_error = true;
+
 	/* wait for frame_event_done completion */
 	SDE_ATRACE_BEGIN("wait_for_frame_done_event");
 	ret = _sde_crtc_wait_for_frame_done(crtc);
@@ -2792,41 +3446,48 @@
 		SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
 				crtc->base.id,
 				atomic_read(&sde_crtc->frame_pending));
-		goto end;
+
+		is_error = true;
+
+		/* force offline rotation mode since the commit has no pipes */
+		cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
 	}
 
 	if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
 		/* acquire bandwidth and other resources */
 		SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE1);
+		SDE_EVT32(DRMID(crtc), cstate->sbuf_cfg.rot_op_mode,
+				SDE_EVTLOG_FUNC_CASE1);
 	} else {
 		SDE_DEBUG("crtc%d commit\n", crtc->base.id);
-		SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE2);
+		SDE_EVT32(DRMID(crtc), cstate->sbuf_cfg.rot_op_mode,
+				SDE_EVTLOG_FUNC_CASE2);
 	}
 	sde_crtc->play_count++;
 
-	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE) {
-		drm_atomic_crtc_for_each_plane(plane, crtc) {
-			sde_plane_kickoff(plane);
-		}
-	}
-
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		ctl = sde_crtc->mixers[i].hw_ctl;
-		if (ctl && ctl->ops.setup_sbuf_cfg)
-			ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
-	}
+	/*
+	 * For SYNC inline modes, delay the kick off until after the
+	 * wait for frame done in case the wait times out.
+	 *
+	 * Also perform a final kickoff when transitioning back to
+	 * offline mode.
+	 */
+	if (cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_INLINE_ASYNC)
+		if (_sde_crtc_commit_kickoff_rot(crtc, cstate))
+			is_error = true;
 
 	sde_vbif_clear_errors(sde_kms);
 
+	if (is_error)
+		_sde_crtc_remove_pipe_flush(sde_crtc);
+
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		if (encoder->crtc != crtc)
 			continue;
 
-		sde_encoder_kickoff(encoder);
+		sde_encoder_kickoff(encoder, is_error);
 	}
 
-end:
 	reinit_completion(&sde_crtc->frame_done_comp);
 	SDE_ATRACE_END("crtc_commit");
 	return;
@@ -3152,6 +3813,10 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
+	for (i = 0; i < cstate->num_connectors; i++)
+		sde_connector_schedule_status_work(cstate->connectors[i],
+							false);
+
 	if (sde_kms_is_suspend_state(crtc->dev))
 		_sde_crtc_set_suspend(crtc, true);
 
@@ -3226,6 +3891,7 @@
 
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
+	sde_crtc->mixers_swapped = false;
 
 	/* disable clk & bw control until clk & bw properties are set */
 	cstate->bw_control = false;
@@ -3243,13 +3909,15 @@
 	struct sde_crtc_irq_info *node = NULL;
 	struct drm_event event;
 	u32 power_on;
-	int ret;
+	int ret, i;
+	struct sde_crtc_state *cstate;
 
 	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	priv = crtc->dev->dev_private;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 	SDE_EVT32_VERBOSE(DRMID(crtc));
@@ -3300,6 +3968,9 @@
 		SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE |
 		SDE_POWER_EVENT_PRE_DISABLE,
 		sde_crtc_handle_power_event, crtc, sde_crtc->name);
+
+	for (i = 0; i < cstate->num_connectors; i++)
+		sde_connector_schedule_status_work(cstate->connectors[i], true);
 }
 
 struct plane_state {
@@ -3508,7 +4179,14 @@
 
 	memset(pipe_staged, 0, sizeof(pipe_staged));
 
-	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
+	rc = _sde_crtc_check_dest_scaler_data(crtc, state);
+	if (rc) {
+		SDE_ERROR("crtc%d failed dest scaler check %d\n",
+			crtc->base.id, rc);
+		goto end;
+	}
+
+	mixer_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode);
 
 	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
@@ -3861,7 +4539,7 @@
 			CRTC_PROP_ROT_CLK);
 
 	msm_property_install_range(&sde_crtc->property_info,
-		"idle_timeout", IDLE_TIMEOUT, 0, U64_MAX, 0,
+		"idle_time", IDLE_TIMEOUT, 0, U64_MAX, 0,
 		CRTC_PROP_IDLE_TIMEOUT);
 
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
@@ -3903,6 +4581,39 @@
 					"smart_dma_rev", "smart_dma_v2");
 	}
 
+	if (catalog->mdp[0].has_dest_scaler) {
+		sde_kms_info_add_keyint(info, "has_dest_scaler",
+				catalog->mdp[0].has_dest_scaler);
+		sde_kms_info_add_keyint(info, "dest_scaler_count",
+					catalog->ds_count);
+
+		if (catalog->ds[0].top) {
+			sde_kms_info_add_keyint(info,
+					"max_dest_scaler_input_width",
+					catalog->ds[0].top->maxinputwidth);
+			sde_kms_info_add_keyint(info,
+					"max_dest_scaler_output_width",
+					catalog->ds[0].top->maxinputwidth);
+			sde_kms_info_add_keyint(info, "max_dest_scale_up",
+					catalog->ds[0].top->maxupscale);
+		}
+
+		if (catalog->ds[0].features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+			msm_property_install_volatile_range(
+					&sde_crtc->property_info, "dest_scaler",
+					0x0, 0, ~0, 0, CRTC_PROP_DEST_SCALER);
+			msm_property_install_blob(&sde_crtc->property_info,
+					"ds_lut_ed", 0,
+					CRTC_PROP_DEST_SCALER_LUT_ED);
+			msm_property_install_blob(&sde_crtc->property_info,
+					"ds_lut_cir", 0,
+					CRTC_PROP_DEST_SCALER_LUT_CIR);
+			msm_property_install_blob(&sde_crtc->property_info,
+					"ds_lut_sep", 0,
+					CRTC_PROP_DEST_SCALER_LUT_SEP);
+		}
+	}
+
 	sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
 	if (catalog->perf.max_bw_low)
 		sde_kms_info_add_keyint(info, "max_bandwidth_low",
@@ -3987,10 +4698,22 @@
 				_sde_crtc_set_input_fence_timeout(cstate);
 				break;
 			case CRTC_PROP_DIM_LAYER_V1:
-				_sde_crtc_set_dim_layer_v1(cstate, (void *)val);
+				_sde_crtc_set_dim_layer_v1(cstate,
+							(void __user *)val);
 				break;
 			case CRTC_PROP_ROI_V1:
-				ret = _sde_crtc_set_roi_v1(state, (void *)val);
+				ret = _sde_crtc_set_roi_v1(state,
+							(void __user *)val);
+				break;
+			case CRTC_PROP_DEST_SCALER:
+				ret = _sde_crtc_set_dest_scaler(sde_crtc,
+						cstate, (void __user *)val);
+				break;
+			case CRTC_PROP_DEST_SCALER_LUT_ED:
+			case CRTC_PROP_DEST_SCALER_LUT_CIR:
+			case CRTC_PROP_DEST_SCALER_LUT_SEP:
+				ret = _sde_crtc_set_dest_scaler_lut(sde_crtc,
+								cstate, idx);
 				break;
 			case CRTC_PROP_CORE_CLK:
 			case CRTC_PROP_CORE_AB:
@@ -4138,7 +4861,7 @@
 
 	mutex_lock(&sde_crtc->crtc_lock);
 	mode = &crtc->state->adjusted_mode;
-	out_width = sde_crtc_mixer_width(sde_crtc, mode);
+	out_width = sde_crtc_get_mixer_width(sde_crtc, cstate, mode);
 
 	seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
 				mode->hdisplay, mode->vdisplay);
@@ -4673,6 +5396,9 @@
 
 	sde_crtc_install_properties(crtc, kms->catalog);
 
+	/* Init dest scaler */
+	_sde_crtc_dest_scaler_init(sde_crtc, kms->catalog);
+
 	/* Install color processing properties */
 	sde_cp_crtc_init(crtc);
 	sde_cp_crtc_install_properties(crtc);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index ea606b3..a46feeb 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -26,6 +26,7 @@
 #include "sde_kms.h"
 #include "sde_core_perf.h"
 #include "sde_hw_blk.h"
+#include "sde_hw_ds.h"
 
 #define SDE_CRTC_NAME_SIZE	12
 
@@ -106,17 +107,21 @@
  * @hw_lm:	LM HW Driver context
  * @hw_ctl:	CTL Path HW driver context
  * @hw_dspp:	DSPP HW driver context
+ * @hw_ds:	DS HW driver context
  * @encoder:	Encoder attached to this lm & ctl
- * @mixer_op_mode: mixer blending operation mode
+ * @mixer_op_mode:	mixer blending operation mode
  * @flush_mask:	mixer flush mask for ctl, mixer and pipe
+ * @pipe_mask:	mixer flush mask for pipe
  */
 struct sde_crtc_mixer {
 	struct sde_hw_mixer *hw_lm;
 	struct sde_hw_ctl *hw_ctl;
-	struct sde_hw_dspp  *hw_dspp;
+	struct sde_hw_dspp *hw_dspp;
+	struct sde_hw_ds *hw_ds;
 	struct drm_encoder *encoder;
 	u32 mixer_op_mode;
 	u32 flush_mask;
+	u32 pipe_mask;
 };
 
 /**
@@ -204,10 +209,13 @@
  * @misr_enable   : boolean entry indicates misr enable/disable status.
  * @misr_frame_count  : misr frame count provided by client
  * @misr_data     : store misr data before turning off the clocks.
+ * @sbuf_flush_mask: flush mask for inline rotator
+ * @sbuf_flush_mask_old: inline rotator flush mask for previous commit
  * @power_event   : registered power event handle
  * @cur_perf      : current performance committed to clock/bandwidth driver
  * @rp_lock       : serialization lock for resource pool
  * @rp_head       : list of active resource pool
+ * @scl3_cfg_lut  : qseed3 lut config
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -218,6 +226,7 @@
 	u32 num_mixers;
 	bool mixers_swapped;
 	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+	struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg;
 
 	struct drm_pending_vblank_event *event;
 	u32 vsync_count;
@@ -264,6 +273,9 @@
 	u32 misr_frame_count;
 	u32 misr_data[CRTC_DUAL_MIXERS];
 
+	u32 sbuf_flush_mask;
+	u32 sbuf_flush_mask_old;
+
 	struct sde_power_event *power_event;
 
 	struct sde_core_perf_params cur_perf;
@@ -337,7 +349,6 @@
  * @base: Base drm crtc state structure
  * @connectors    : Currently associated drm connectors
  * @num_connectors: Number of associated drm connectors
- * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
  * @is_ppsplit    : Whether current topology requires PPSplit special handling
  * @bw_control    : true if bw/clk controlled by core bw/clk properties
@@ -354,17 +365,19 @@
  * @input_fence_timeout_ns : Cached input fence timeout, in ns
  * @num_dim_layers: Number of dim layers
  * @dim_layer: Dim layer configs
+ * @num_ds: Number of destination scalers to be configured
+ * @num_ds_enabled: Number of destination scalers enabled
+ * @ds_dirty: Boolean to indicate if dirty or not
+ * @ds_cfg: Destination scaler config
  * @new_perf: new performance state being requested
  * @sbuf_cfg: stream buffer configuration
  * @sbuf_prefill_line: number of line for inline rotator prefetch
- * @sbuf_flush_mask: flush mask for inline rotator
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
 
 	struct drm_connector *connectors[MAX_CONNECTORS];
 	int num_connectors;
-	enum sde_intf_mode intf_mode;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
 	bool bw_control;
@@ -381,11 +394,14 @@
 	uint64_t input_fence_timeout_ns;
 	uint32_t num_dim_layers;
 	struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
+	uint32_t num_ds;
+	uint32_t num_ds_enabled;
+	bool ds_dirty;
+	struct sde_hw_ds_cfg ds_cfg[SDE_MAX_DS_COUNT];
 
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
 	u32 sbuf_prefill_line;
-	u32 sbuf_flush_mask;
 
 	struct sde_crtc_respool rp;
 };
@@ -402,14 +418,41 @@
 #define sde_crtc_get_property(S, X) \
 	((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)].value) : 0)
 
-static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
-	struct drm_display_mode *mode)
+/**
+ * sde_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ * unless destination scaler feature is enabled
+ */
+static inline int sde_crtc_get_mixer_width(struct sde_crtc *sde_crtc,
+	struct sde_crtc_state *cstate, struct drm_display_mode *mode)
 {
-	if (!sde_crtc || !mode)
+	u32 mixer_width;
+
+	if (!sde_crtc || !cstate || !mode)
 		return 0;
 
-	return  sde_crtc->num_mixers == CRTC_DUAL_MIXERS ?
-		mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay;
+	if (cstate->num_ds_enabled)
+		mixer_width = cstate->ds_cfg[0].lm_width;
+	else
+		mixer_width = (sde_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+			mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+	return mixer_width;
+}
+
+/**
+ * sde_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height unless
+ * destination scaler feature is enabled
+ */
+static inline int sde_crtc_get_mixer_height(struct sde_crtc *sde_crtc,
+		struct sde_crtc_state *cstate, struct drm_display_mode *mode)
+{
+	if (!sde_crtc || !cstate || !mode)
+		return 0;
+
+	return (cstate->num_ds_enabled ?
+			cstate->ds_cfg[0].lm_height : mode->vdisplay);
 }
 
 /**
@@ -500,8 +543,8 @@
 	if (!cstate)
 		return NRT_CLIENT;
 
-	return cstate->rsc_client ? RT_RSC_CLIENT :
-	    (cstate->intf_mode == INTF_MODE_WB_LINE ? NRT_CLIENT : RT_CLIENT);
+	return sde_crtc_get_intf_mode(crtc) == INTF_MODE_WB_LINE ? NRT_CLIENT :
+			(cstate->rsc_client ? RT_RSC_CLIENT : RT_CLIENT);
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 270e677..7c600ca 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -179,6 +179,7 @@
  * @crtc_frame_event_cb_data:	callback handler private data
  * @frame_done_timeout:		frame done timeout in Hz
  * @frame_done_timer:		watchdog timer for frame done event
+ * @vsync_event_timer:		vsync timer
  * @rsc_client:			rsc client pointer
  * @rsc_state_init:		boolean to indicate rsc config init
  * @disp_info:			local copy of msm_display_info struct
@@ -191,6 +192,7 @@
  * @rc_state:			resource controller state
  * @delayed_off_work:		delayed worker to schedule disabling of
  *				clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work:		worker to handle vsync event for autorefresh
  * @topology:                   topology of the display
  * @mode_set_complete:          flag to indicate modeset completion
  * @rsc_config:			rsc configuration for display vtotal, fps, etc.
@@ -224,6 +226,7 @@
 
 	atomic_t frame_done_timeout;
 	struct timer_list frame_done_timer;
+	struct timer_list vsync_event_timer;
 
 	struct sde_rsc_client *rsc_client;
 	bool rsc_state_init;
@@ -236,6 +239,7 @@
 	struct mutex rc_lock;
 	enum sde_enc_rc_states rc_state;
 	struct kthread_delayed_work delayed_off_work;
+	struct kthread_work vsync_event_work;
 	struct msm_display_topology topology;
 	bool mode_set_complete;
 
@@ -438,7 +442,7 @@
 	irq = &phys_enc->irq[intr_idx];
 
 	if (irq->irq_idx >= 0) {
-		SDE_ERROR_PHYS(phys_enc,
+		SDE_DEBUG_PHYS(phys_enc,
 				"skipping already registered irq %s type %d\n",
 				irq->name, irq->intr_type);
 		return 0;
@@ -1291,7 +1295,7 @@
 		}
 		break;
 	default:
-		SDE_ERROR_ENC(sde_enc, "Unexpected topology:%d\n", topology);
+		SDE_DEBUG_ENC(sde_enc, "Unexpected topology:%d\n", topology);
 		return -EINVAL;
 	};
 
@@ -1502,29 +1506,31 @@
 {
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	struct sde_encoder_rsc_config rsc_cfg = { 0 };
+	int i;
 
 	if (enable) {
 		rsc_cfg.inline_rotate_prefill =
 				sde_crtc_get_inline_prefill(drm_enc->crtc);
 
-		/* connect the TE source to actual TE GPIO to drive RSC */
-		_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info,
-				false);
-
 		_sde_encoder_update_rsc_client(drm_enc, &rsc_cfg, true);
 	} else {
 		_sde_encoder_update_rsc_client(drm_enc, NULL, false);
 
 		/**
-		 * disconnect the TE source from the actual TE GPIO for RSC
-		 *
-		 * this call is for hardware workaround on sdm845 and should
-		 * not be removed without considering the design changes for
-		 * sde rsc + command mode concurrency. It may lead to pp
-		 * timeout due to vsync from panel for command mode panel.
+		 * disable the vsync source after updating the rsc state. rsc
+		 * state update might have vsync wait and vsync source must be
+		 * disabled after it. It will avoid generating any vsync from
+		 * this point till mode-2 entry. It is SW workaround for
+		 * HW limitation and should not be removed without checking the
+		 * updated design.
 		 */
-		_sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info,
-				true);
+		for (i = 0; i < sde_enc->num_phys_encs; i++) {
+			struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+			if (phys && phys->ops.prepare_idle_pc)
+				phys->ops.prepare_idle_pc(phys);
+		}
+
 	}
 }
 
@@ -2842,6 +2848,165 @@
 		phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len);
 }
 
+static u32 _sde_encoder_calculate_linetime(struct sde_encoder_virt *sde_enc,
+		struct drm_display_mode *mode)
+{
+	u64 pclk_rate;
+	u32 pclk_period;
+	u32 line_time;
+
+	/*
+	 * For linetime calculation, only operate on master encoder.
+	 */
+	if (!sde_enc->cur_master)
+		return 0;
+
+	if (!sde_enc->cur_master->ops.get_line_count) {
+		SDE_ERROR("get_line_count function not defined\n");
+		return 0;
+	}
+
+	pclk_rate = mode->clock; /* pixel clock in kHz */
+	if (pclk_rate == 0) {
+		SDE_ERROR("pclk is 0, cannot calculate line time\n");
+		return 0;
+	}
+
+	pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+	if (pclk_period == 0) {
+		SDE_ERROR("pclk period is 0\n");
+		return 0;
+	}
+
+	/*
+	 * Line time calculation based on Pixel clock and HTOTAL.
+	 * Final unit is in ns.
+	 */
+	line_time = (pclk_period * mode->htotal) / 1000;
+	if (line_time == 0) {
+		SDE_ERROR("line time calculation is 0\n");
+		return 0;
+	}
+
+	SDE_DEBUG_ENC(sde_enc,
+			"clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+			pclk_rate, pclk_period, line_time);
+
+	return line_time;
+}
+
+static int _sde_encoder_wakeup_time(struct drm_encoder *drm_enc,
+		ktime_t *wakeup_time)
+{
+	struct drm_display_mode *mode;
+	struct sde_encoder_virt *sde_enc;
+	u32 cur_line;
+	u32 line_time;
+	u32 vtotal, time_to_vsync;
+	ktime_t cur_time;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (!drm_enc->crtc || !drm_enc->crtc->state) {
+		SDE_ERROR("crtc/crtc state object is NULL\n");
+		return -EINVAL;
+	}
+	mode = &drm_enc->crtc->state->adjusted_mode;
+
+	line_time = _sde_encoder_calculate_linetime(sde_enc, mode);
+	if (!line_time)
+		return -EINVAL;
+
+	cur_line = sde_enc->cur_master->ops.get_line_count(sde_enc->cur_master);
+
+	vtotal = mode->vtotal;
+	if (cur_line >= vtotal)
+		time_to_vsync = line_time * vtotal;
+	else
+		time_to_vsync = line_time * (vtotal - cur_line);
+
+	if (time_to_vsync == 0) {
+		SDE_ERROR("time to vsync should not be zero, vtotal=%d\n",
+				vtotal);
+		return -EINVAL;
+	}
+
+	cur_time = ktime_get();
+	*wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+	SDE_DEBUG_ENC(sde_enc,
+			"cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+			cur_line, vtotal, time_to_vsync,
+			ktime_to_ms(cur_time),
+			ktime_to_ms(*wakeup_time));
+	return 0;
+}
+
+static void sde_encoder_vsync_event_handler(unsigned long data)
+{
+	struct drm_encoder *drm_enc = (struct drm_encoder *) data;
+	struct sde_encoder_virt *sde_enc;
+	struct msm_drm_private *priv;
+	struct msm_drm_thread *event_thread;
+	bool autorefresh_enabled = false;
+
+	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+			!drm_enc->crtc) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	priv = drm_enc->dev->dev_private;
+
+	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+		SDE_ERROR("invalid crtc index\n");
+		return;
+	}
+	event_thread = &priv->event_thread[drm_enc->crtc->index];
+	if (!event_thread) {
+		SDE_ERROR("event_thread not found for crtc:%d\n",
+				drm_enc->crtc->index);
+		return;
+	}
+
+	if (sde_enc->cur_master &&
+		sde_enc->cur_master->ops.is_autorefresh_enabled)
+		autorefresh_enabled =
+			sde_enc->cur_master->ops.is_autorefresh_enabled(
+						sde_enc->cur_master);
+
+	/*
+	 * Queue work to update the vsync event timer
+	 * if autorefresh is enabled.
+	 */
+	SDE_EVT32_VERBOSE(autorefresh_enabled);
+	if (autorefresh_enabled)
+		kthread_queue_work(&event_thread->worker,
+				&sde_enc->vsync_event_work);
+	else
+		del_timer(&sde_enc->vsync_event_timer);
+}
+
+static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+	struct sde_encoder_virt *sde_enc = container_of(work,
+			struct sde_encoder_virt, vsync_event_work);
+	ktime_t wakeup_time;
+
+	if (!sde_enc) {
+		SDE_ERROR("invalid sde encoder\n");
+		return;
+	}
+
+	if (_sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time))
+		return;
+
+	SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
+	mod_timer(&sde_enc->vsync_event_timer,
+			nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
 void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -2905,11 +3070,51 @@
 	}
 }
 
-void sde_encoder_kickoff(struct drm_encoder *drm_enc)
+/**
+ * _sde_encoder_reset_ctl_hw - reset h/w configuration for all ctl's associated
+ *	with the specified encoder, and unstage all pipes from it
+ * @encoder:	encoder pointer
+ * Returns: 0 on success
+ */
+static int _sde_encoder_reset_ctl_hw(struct drm_encoder *drm_enc)
 {
 	struct sde_encoder_virt *sde_enc;
 	struct sde_encoder_phys *phys;
 	unsigned int i;
+	int rc = 0;
+
+	if (!drm_enc) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	SDE_ATRACE_BEGIN("encoder_release_lm");
+	SDE_DEBUG_ENC(sde_enc, "\n");
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		phys = sde_enc->phys_encs[i];
+		if (!phys)
+			continue;
+
+		SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0);
+
+		rc = sde_encoder_helper_reset_mixers(phys, NULL);
+		if (rc)
+			SDE_EVT32(DRMID(drm_enc), rc, SDE_EVTLOG_ERROR);
+	}
+
+	SDE_ATRACE_END("encoder_release_lm");
+	return rc;
+}
+
+void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *phys;
+	ktime_t wakeup_time;
+	unsigned int i;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -2926,6 +3131,10 @@
 	mod_timer(&sde_enc->frame_done_timer, jiffies +
 		((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
 
+	/* create a 'no pipes' commit to release buffers on errors */
+	if (is_error)
+		_sde_encoder_reset_ctl_hw(drm_enc);
+
 	/* All phys encs are ready to go, trigger the kickoff */
 	_sde_encoder_kickoff_phys(sde_enc);
 
@@ -2935,10 +3144,18 @@
 		if (phys && phys->ops.handle_post_kickoff)
 			phys->ops.handle_post_kickoff(phys);
 	}
+
+	if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+			!_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+		SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
+		mod_timer(&sde_enc->vsync_event_timer,
+				nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+	}
+
 	SDE_ATRACE_END("encoder_kickoff");
 }
 
-int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
+int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
 		struct drm_framebuffer *fb)
 {
 	struct drm_encoder *drm_enc;
@@ -2955,8 +3172,6 @@
 	memset(&mixer, 0, sizeof(mixer));
 
 	/* reset associated CTL/LMs */
-	if (phys_enc->hw_ctl->ops.clear_pending_flush)
-		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
 	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
 		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
 
@@ -2996,7 +3211,7 @@
 	}
 
 	if (!lm_valid) {
-		SDE_DEBUG_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
+		SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -3553,6 +3768,12 @@
 	setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
 			(unsigned long) sde_enc);
 
+	if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) &&
+			disp_info->is_primary)
+		setup_timer(&sde_enc->vsync_event_timer,
+				sde_encoder_vsync_event_handler,
+				(unsigned long)sde_enc);
+
 	snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id);
 	sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
 					disp_info->is_primary);
@@ -3566,6 +3787,10 @@
 	kthread_init_delayed_work(&sde_enc->delayed_off_work,
 			sde_encoder_off_work);
 	sde_enc->idle_timeout = IDLE_TIMEOUT;
+
+	kthread_init_work(&sde_enc->vsync_event_work,
+			sde_encoder_vsync_event_work_handler);
+
 	memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
 
 	SDE_DEBUG_ENC(sde_enc, "created\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index bb7f31d..baf59b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -128,8 +128,10 @@
  * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
  *	(i.e. ctl flush and start) immediately.
  * @encoder:	encoder pointer
+ * @is_error:	whether the current commit needs to be aborted and replaced
+ *		with a 'safe' commit
  */
-void sde_encoder_kickoff(struct drm_encoder *encoder);
+void sde_encoder_kickoff(struct drm_encoder *encoder, bool is_error);
 
 /**
  * sde_encoder_wait_for_event - Waits for encoder events
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 5a78e4d..8813fd2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -125,9 +125,12 @@
  *				SDE_ENC_ERR_NEEDS_HW_RESET state
  * @irq_control:		Handler to enable/disable all the encoder IRQs
  * @update_split_role:		Update the split role of the phys enc
+ * @prepare_idle_pc:		phys encoder can update the vsync_enable status
+ *                              on idle power collapse prepare
  * @restore:			Restore all the encoder configs.
  * @is_autorefresh_enabled:	provides the autorefresh current
  *                              enable/disable state.
+ * @get_line_count:		Obtain current vertical line count
  */
 
 struct sde_encoder_phys_ops {
@@ -167,8 +170,10 @@
 	void (*irq_control)(struct sde_encoder_phys *phys, bool enable);
 	void (*update_split_role)(struct sde_encoder_phys *phys_enc,
 			enum sde_enc_split_role role);
+	void (*prepare_idle_pc)(struct sde_encoder_phys *phys_enc);
 	void (*restore)(struct sde_encoder_phys *phys);
 	bool (*is_autorefresh_enabled)(struct sde_encoder_phys *phys);
+	int (*get_line_count)(struct sde_encoder_phys *phys);
 };
 
 /**
@@ -517,12 +522,12 @@
 		enum sde_intf interface);
 
 /**
- * sde_encoder_helper_hw_release - prepare for h/w reset during disable
+ * sde_encoder_helper_reset_mixers - reset mixers associated with phys enc
  * @phys_enc: Pointer to physical encoder structure
  * @fb: Optional fb for specifying new mixer output resolution, may be NULL
  * Return: Zero on success
  */
-int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
+int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
 		struct drm_framebuffer *fb);
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 70a25dd..4291098 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -34,9 +34,6 @@
 
 #define PP_TIMEOUT_MAX_TRIALS	10
 
-/* wait for 2 vyncs only */
-#define CTL_START_TIMEOUT_MS	32
-
 /*
  * Tearcheck sync start and continue thresholds are empirically found
  * based on common panels In the future, may want to allow panels to override
@@ -897,6 +894,30 @@
 	phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
 }
 
+static void sde_encoder_phys_cmd_prepare_idle_pc(
+		struct sde_encoder_phys *phys_enc)
+{
+	_sde_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int sde_encoder_phys_cmd_get_line_count(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_hw_pingpong *hw_pp;
+
+	if (!phys_enc || !phys_enc->hw_pp)
+		return -EINVAL;
+
+	if (!sde_encoder_phys_cmd_is_master(phys_enc))
+		return -EINVAL;
+
+	hw_pp = phys_enc->hw_pp;
+	if (!hw_pp->ops.get_line_count)
+		return -EINVAL;
+
+	return hw_pp->ops.get_line_count(hw_pp);
+}
+
 static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_cmd *cmd_enc =
@@ -1007,7 +1028,7 @@
 
 	wait_info.wq = &phys_enc->pending_kickoff_wq;
 	wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
-	wait_info.timeout_ms = CTL_START_TIMEOUT_MS;
+	wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
 
 	/* slave encoder doesn't enable for ppsplit */
 	if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
@@ -1239,9 +1260,11 @@
 	ops->irq_control = sde_encoder_phys_cmd_irq_control;
 	ops->update_split_role = sde_encoder_phys_cmd_update_split_role;
 	ops->restore = sde_encoder_phys_cmd_enable_helper;
+	ops->prepare_idle_pc = sde_encoder_phys_cmd_prepare_idle_pc;
 	ops->is_autorefresh_enabled =
 			sde_encoder_phys_cmd_is_autorefresh_enabled;
 	ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff;
+	ops->get_line_count = sde_encoder_phys_cmd_get_line_count;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_cmd_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index b80ed1f..6a4348ba 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -279,8 +279,9 @@
 		return;
 
 	SDE_DEBUG_VIDENC(vid_enc,
-		"rot_fetch_lines %u rot_fetch_start_vsync_counter %u\n",
-		rot_fetch_lines, rot_fetch_start_vsync_counter);
+		"rot_fetch_lines %u vfp_fetch_lines %u rot_fetch_start_vsync_counter %u\n",
+		rot_fetch_lines, vfp_fetch_lines,
+		rot_fetch_start_vsync_counter);
 
 	phys_enc->hw_ctl->ops.get_bitmask_intf(
 			phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
@@ -482,13 +483,19 @@
 {
 	struct sde_encoder_irq *irq;
 
+	/*
+	 * Initialize irq->hw_idx only when irq is not registered.
+	 * Prevent invalidating irq->irq_idx as modeset may be
+	 * called many times during dfps.
+	 */
+
 	irq = &phys_enc->irq[INTR_IDX_VSYNC];
-	irq->hw_idx = phys_enc->intf_idx;
-	irq->irq_idx = -EINVAL;
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
 
 	irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
-	irq->hw_idx = phys_enc->intf_idx;
-	irq->irq_idx = -EINVAL;
+	if (irq->irq_idx < 0)
+		irq->hw_idx = phys_enc->intf_idx;
 }
 
 static void sde_encoder_phys_vid_mode_set(
@@ -898,6 +905,24 @@
 		vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
 }
 
+static int sde_encoder_phys_vid_get_line_count(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_vid *vid_enc;
+
+	if (!phys_enc)
+		return -EINVAL;
+
+	if (!sde_encoder_phys_vid_is_master(phys_enc))
+		return -EINVAL;
+
+	vid_enc = to_sde_encoder_phys_vid(phys_enc);
+	if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+		return -EINVAL;
+
+	return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
 static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
 {
 	ops->is_master = sde_encoder_phys_vid_is_master;
@@ -918,6 +943,7 @@
 	ops->setup_misr = sde_encoder_phys_vid_setup_misr;
 	ops->collect_misr = sde_encoder_phys_vid_collect_misr;
 	ops->hw_reset = sde_encoder_helper_hw_reset;
+	ops->get_line_count = sde_encoder_phys_vid_get_line_count;
 }
 
 struct sde_encoder_phys *sde_encoder_phys_vid_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 240e521..12115756 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -1139,7 +1139,9 @@
 	}
 
 	/* reset h/w before final flush */
-	if (sde_encoder_helper_hw_release(phys_enc, wb_enc->fb_disable))
+	if (phys_enc->hw_ctl->ops.clear_pending_flush)
+		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+	if (sde_encoder_helper_reset_mixers(phys_enc, wb_enc->fb_disable))
 		goto exit;
 
 	phys_enc->enable_state = SDE_ENC_DISABLING;
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 5732aae6..816339b 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -206,9 +206,9 @@
 	/* create fd */
 	fd = get_unused_fd_flags(0);
 	if (fd < 0) {
-		fence_put(&sde_fence->base);
 		SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
 							sde_fence->name);
+		fence_put(&sde_fence->base);
 		goto exit;
 	}
 
@@ -217,8 +217,8 @@
 	if (sync_file == NULL) {
 		put_unused_fd(fd);
 		fd = -EINVAL;
-		fence_put(&sde_fence->base);
 		SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
+		fence_put(&sde_fence->base);
 		goto exit;
 	}
 
@@ -305,6 +305,7 @@
 		spin_unlock_irqrestore(&ctx->lock, flags);
 
 		if (is_signaled) {
+			list_del_init(&fc->fence_list);
 			fence_put(&fc->base);
 			kref_put(&ctx->kref, sde_fence_destroy);
 		} else {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index f35e999..aea2c6b 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -36,9 +36,12 @@
 /* each entry will have register address and bit offset in that register */
 #define MAX_BIT_OFFSET 2
 
-/* default line width for sspp */
+/* default line width for sspp, mixer, ds (input), wb */
 #define DEFAULT_SDE_LINE_WIDTH 2048
 
+/* default output line width for ds */
+#define DEFAULT_SDE_OUTPUT_LINE_WIDTH 2560
+
 /* max mixer blend stages */
 #define DEFAULT_SDE_MIXER_BLENDSTAGES 7
 
@@ -60,8 +63,8 @@
 /* total number of intf - dp, dsi, hdmi */
 #define INTF_COUNT			3
 
-#define MAX_SSPP_UPSCALE		20
-#define MAX_SSPP_DOWNSCALE		4
+#define MAX_UPSCALE_RATIO		20
+#define MAX_DOWNSCALE_RATIO		4
 #define SSPP_UNITY_SCALE		1
 
 #define MAX_HORZ_DECIMATION		4
@@ -140,6 +143,7 @@
 	DIM_LAYER,
 	SMART_DMA_REV,
 	IDLE_PC,
+	DEST_SCALER,
 	SDE_PROP_MAX,
 };
 
@@ -233,6 +237,20 @@
 };
 
 enum {
+	DS_TOP_OFF,
+	DS_TOP_LEN,
+	DS_TOP_INPUT_LINEWIDTH,
+	DS_TOP_OUTPUT_LINEWIDTH,
+	DS_TOP_PROP_MAX,
+};
+
+enum {
+	DS_OFF,
+	DS_LEN,
+	DS_PROP_MAX,
+};
+
+enum {
 	DSPP_TOP_OFF,
 	DSPP_TOP_SIZE,
 	DSPP_TOP_PROP_MAX,
@@ -374,6 +392,7 @@
 	{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
 	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
 	{IDLE_PC, "qcom,sde-has-idle-pc", false, PROP_TYPE_BOOL},
+	{DEST_SCALER, "qcom,sde-has-dest-scaler", false, PROP_TYPE_BOOL},
 };
 
 static struct sde_prop_type sde_perf_prop[] = {
@@ -506,6 +525,20 @@
 	{AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
 };
 
+static struct sde_prop_type ds_top_prop[] = {
+	{DS_TOP_OFF, "qcom,sde-dest-scaler-top-off", false, PROP_TYPE_U32},
+	{DS_TOP_LEN, "qcom,sde-dest-scaler-top-size", false, PROP_TYPE_U32},
+	{DS_TOP_INPUT_LINEWIDTH, "qcom,sde-max-dest-scaler-input-linewidth",
+		false, PROP_TYPE_U32},
+	{DS_TOP_OUTPUT_LINEWIDTH, "qcom,sde-max-dest-scaler-output-linewidth",
+		false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type ds_prop[] = {
+	{DS_OFF, "qcom,sde-dest-scaler-off", false, PROP_TYPE_U32_ARRAY},
+	{DS_LEN, "qcom,sde-dest-scaler-size", false, PROP_TYPE_U32},
+};
+
 static struct sde_prop_type pp_prop[] = {
 	{PP_OFF, "qcom,sde-pp-off", true, PROP_TYPE_U32_ARRAY},
 	{PP_LEN, "qcom,sde-pp-size", false, PROP_TYPE_U32},
@@ -863,8 +896,8 @@
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
 	bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
 {
-	sblk->maxupscale = MAX_SSPP_UPSCALE;
-	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+	sblk->maxupscale = MAX_UPSCALE_RATIO;
+	sblk->maxdwnscale = MAX_DOWNSCALE_RATIO;
 	sspp->id = SSPP_VIG0 + *vig_count;
 	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
 			sspp->id - SSPP_VIG0);
@@ -958,8 +991,8 @@
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
 	bool *prop_exists, struct sde_prop_value *prop_value, u32 *rgb_count)
 {
-	sblk->maxupscale = MAX_SSPP_UPSCALE;
-	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+	sblk->maxupscale = MAX_UPSCALE_RATIO;
+	sblk->maxdwnscale = MAX_DOWNSCALE_RATIO;
 	sspp->id = SSPP_RGB0 + *rgb_count;
 	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
 			sspp->id - SSPP_VIG0);
@@ -1293,8 +1326,8 @@
 	u32 off_count, blend_off_count, max_blendstages, lm_pair_mask;
 	struct sde_lm_cfg *mixer;
 	struct sde_lm_sub_blks *sblk;
-	int pp_count, dspp_count;
-	u32 pp_idx, dspp_idx;
+	int pp_count, dspp_count, ds_count;
+	u32 pp_idx, dspp_idx, ds_idx;
 	struct device_node *snp = NULL;
 
 	if (!sde_cfg) {
@@ -1325,6 +1358,7 @@
 
 	pp_count = sde_cfg->pingpong_count;
 	dspp_count = sde_cfg->dspp_count;
+	ds_count = sde_cfg->ds_count;
 
 	/* get mixer feature dt properties if they exist */
 	snp = of_get_child_by_name(np, mixer_prop[MIXER_BLOCKS].prop_name);
@@ -1364,7 +1398,7 @@
 	if (rc)
 		goto end;
 
-	for (i = 0, pp_idx = 0, dspp_idx = 0; i < off_count; i++) {
+	for (i = 0, pp_idx = 0, dspp_idx = 0, ds_idx = 0; i < off_count; i++) {
 		mixer = sde_cfg->mixer + i;
 		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
 		if (!sblk) {
@@ -1406,13 +1440,17 @@
 								: PINGPONG_MAX;
 			mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0
 								: DSPP_MAX;
+			mixer->ds = ds_count > 0 ? ds_idx + DS_0 : DS_MAX;
 			pp_count--;
 			dspp_count--;
+			ds_count--;
 			pp_idx++;
 			dspp_idx++;
+			ds_idx++;
 		} else {
 			mixer->pingpong = PINGPONG_MAX;
 			mixer->dspp = DSPP_MAX;
+			mixer->ds = DS_MAX;
 		}
 
 		sblk->gc.id = SDE_MIXER_GC;
@@ -2044,6 +2082,116 @@
 	return rc;
 }
 
+static int sde_ds_parse_dt(struct device_node *np,
+			struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[DS_PROP_MAX], top_prop_count[DS_TOP_PROP_MAX], i;
+	struct sde_prop_value *prop_value = NULL, *top_prop_value = NULL;
+	bool prop_exists[DS_PROP_MAX], top_prop_exists[DS_TOP_PROP_MAX];
+	u32 off_count = 0, top_off_count = 0;
+	struct sde_ds_cfg *ds;
+	struct sde_ds_top_cfg *ds_top = NULL;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!sde_cfg->mdp[0].has_dest_scaler) {
+		SDE_DEBUG("dest scaler feature not supported\n");
+		rc = 0;
+		goto end;
+	}
+
+	/* Parse the dest scaler top register offset and capabilities */
+	top_prop_value = kzalloc(DS_TOP_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!top_prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, ds_top_prop,
+				ARRAY_SIZE(ds_top_prop),
+				top_prop_count, &top_off_count);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, ds_top_prop,
+			ARRAY_SIZE(ds_top_prop), top_prop_count,
+			top_prop_exists, top_prop_value);
+	if (rc)
+		goto end;
+
+	/* Parse the offset of each dest scaler block */
+	prop_value = kzalloc(DS_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, ds_prop, ARRAY_SIZE(ds_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->ds_count = off_count;
+
+	rc = _read_dt_entry(np, ds_prop, ARRAY_SIZE(ds_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	if (!off_count)
+		goto end;
+
+	ds_top = kzalloc(sizeof(struct sde_ds_top_cfg), GFP_KERNEL);
+	if (!ds_top) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	ds_top->id = DS_TOP;
+	snprintf(ds_top->name, SDE_HW_BLK_NAME_LEN, "ds_top_%u",
+		ds_top->id - DS_TOP);
+	ds_top->base = PROP_VALUE_ACCESS(top_prop_value, DS_TOP_OFF, 0);
+	ds_top->len = PROP_VALUE_ACCESS(top_prop_value, DS_TOP_LEN, 0);
+	ds_top->maxupscale = MAX_UPSCALE_RATIO;
+
+	ds_top->maxinputwidth = PROP_VALUE_ACCESS(top_prop_value,
+			DS_TOP_INPUT_LINEWIDTH, 0);
+	if (!top_prop_exists[DS_TOP_INPUT_LINEWIDTH])
+		ds_top->maxinputwidth = DEFAULT_SDE_LINE_WIDTH;
+
+	ds_top->maxoutputwidth = PROP_VALUE_ACCESS(top_prop_value,
+			DS_TOP_OUTPUT_LINEWIDTH, 0);
+	if (!top_prop_exists[DS_TOP_OUTPUT_LINEWIDTH])
+		ds_top->maxoutputwidth = DEFAULT_SDE_OUTPUT_LINE_WIDTH;
+
+	for (i = 0; i < off_count; i++) {
+		ds = sde_cfg->ds + i;
+		ds->top = ds_top;
+		ds->base = PROP_VALUE_ACCESS(prop_value, DS_OFF, i);
+		ds->id = DS_0 + i;
+		ds->len = PROP_VALUE_ACCESS(prop_value, DS_LEN, 0);
+		snprintf(ds->name, SDE_HW_BLK_NAME_LEN, "ds_%u",
+			ds->id - DS_0);
+
+		if (!prop_exists[DS_LEN])
+			ds->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+		if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3)
+			set_bit(SDE_SSPP_SCALER_QSEED3, &ds->features);
+	}
+
+end:
+	kfree(top_prop_value);
+	kfree(prop_value);
+	return rc;
+};
+
 static int sde_dsc_parse_dt(struct device_node *np,
 			struct sde_mdss_cfg *sde_cfg)
 {
@@ -2548,6 +2696,9 @@
 	if (!prop_exists[UBWC_SWIZZLE])
 		cfg->mdp[0].ubwc_swizzle = DEFAULT_SDE_UBWC_SWIZZLE;
 
+	cfg->mdp[0].has_dest_scaler =
+		PROP_VALUE_ACCESS(prop_value, DEST_SCALER, 0);
+
 	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
 	if (!rc && !strcmp(type, "qseedv3")) {
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
@@ -3007,6 +3158,9 @@
 	for (i = 0; i < sde_cfg->dspp_count; i++)
 		kfree(sde_cfg->dspp[i].sblk);
 
+	if (sde_cfg->ds_count)
+		kfree(sde_cfg->ds[0].top);
+
 	for (i = 0; i < sde_cfg->pingpong_count; i++)
 		kfree(sde_cfg->pingpong[i].sblk);
 
@@ -3075,6 +3229,10 @@
 	if (rc)
 		goto end;
 
+	rc = sde_ds_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = sde_dsc_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
@@ -3083,7 +3241,9 @@
 	if (rc)
 		goto end;
 
-	/* mixer parsing should be done after dspp and pp for mapping setup */
+	/* mixer parsing should be done after dspp,
+	 * ds and pp for mapping setup
+	 */
 	rc = sde_mixer_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 7c43988..e60b5ca 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -519,6 +519,7 @@
  * @highest_bank_bit:  UBWC parameter
  * @ubwc_static:       ubwc static configuration
  * @ubwc_swizzle:      ubwc default swizzle setting
+ * @has_dest_scaler:   indicates support of destination scaler
  * @clk_ctrls          clock control register definition
  */
 struct sde_mdp_cfg {
@@ -526,6 +527,7 @@
 	u32 highest_bank_bit;
 	u32 ubwc_static;
 	u32 ubwc_swizzle;
+	bool has_dest_scaler;
 	struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
 };
 
@@ -564,6 +566,7 @@
  * @sblk:              LM Sub-blocks information
  * @dspp:              ID of connected DSPP, DSPP_MAX if unsupported
  * @pingpong:          ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds:                ID of connected DS, DS_MAX if unsupported
  * @lm_pair_mask:      Bitmask of LMs that can be controlled by same CTL
  */
 struct sde_lm_cfg {
@@ -571,6 +574,7 @@
 	const struct sde_lm_sub_blks *sblk;
 	u32 dspp;
 	u32 pingpong;
+	u32 ds;
 	unsigned long lm_pair_mask;
 };
 
@@ -599,6 +603,38 @@
 };
 
 /**
+ * struct sde_ds_top_cfg - information of dest scaler top
+ * @id               enum identifying this block
+ * @base             register offset of this block
+ * @features         bit mask identifying features
+ * @version          hw version of dest scaler
+ * @maxinputwidth    maximum input line width
+ * @maxoutputwidth   maximum output line width
+ * @maxupscale       maximum upscale ratio
+ */
+struct sde_ds_top_cfg {
+	SDE_HW_BLK_INFO;
+	u32 version;
+	u32 maxinputwidth;
+	u32 maxoutputwidth;
+	u32 maxupscale;
+};
+
+/**
+ * struct sde_ds_cfg - information of dest scaler blocks
+ * @id          enum identifying this block
+ * @base        register offset wrt DS top offset
+ * @features    bit mask identifying features
+ * @version     hw version of the qseed block
+ * @top         DS top information
+ */
+struct sde_ds_cfg {
+	SDE_HW_BLK_INFO;
+	u32 version;
+	const struct sde_ds_top_cfg *top;
+};
+
+/**
  * struct sde_pingpong_cfg - information of PING-PONG blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -921,6 +957,9 @@
 	u32 dspp_count;
 	struct sde_dspp_cfg dspp[MAX_BLOCKS];
 
+	u32 ds_count;
+	struct sde_ds_cfg ds[MAX_BLOCKS];
+
 	u32 pingpong_count;
 	struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
 
@@ -973,6 +1012,7 @@
 #define BLK_CURSOR(s) ((s)->cursor)
 #define BLK_MIXER(s) ((s)->mixer)
 #define BLK_DSPP(s) ((s)->dspp)
+#define BLK_DS(s) ((s)->ds)
 #define BLK_PINGPONG(s) ((s)->pingpong)
 #define BLK_CDM(s) ((s)->cdm)
 #define BLK_INTF(s) ((s)->intf)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
index 4191367..c8e732a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -12,6 +12,7 @@
 
 #include <drm/msm_drm_pp.h>
 #include "sde_hw_color_processing_v1_7.h"
+#include "sde_hw_ctl.h"
 
 #define PA_HUE_VIG_OFF		0x110
 #define PA_SAT_VIG_OFF		0x114
@@ -26,6 +27,9 @@
 #define PA_LUTV_DSPP_OFF	0x1400
 #define PA_LUT_SWAP_OFF		0x234
 
+#define PA_LUTV_DSPP_CTRL_OFF	0x4c
+#define PA_LUTV_DSPP_SWAP_OFF	0x18
+
 #define PA_HUE_MASK		0xFFF
 #define PA_SAT_MASK		0xFFFF
 #define PA_VAL_MASK		0xFF
@@ -458,6 +462,62 @@
 	SDE_REG_WRITE(&ctx->hw, base, op_mode);
 }
 
+void sde_setup_dspp_pa_vlut_v1_8(struct sde_hw_dspp *ctx, void *cfg)
+{
+	struct drm_msm_pa_vlut *payload = NULL;
+	struct sde_hw_cp_cfg *hw_cfg = cfg;
+	struct sde_hw_ctl *ctl = NULL;
+	u32 vlut_base, pa_hist_base;
+	u32 ctrl_off, swap_off;
+	u32 tmp = 0;
+	int i = 0, j = 0;
+	u32 flush_mask = 0;
+
+	if (!ctx) {
+		DRM_ERROR("invalid input parameter NULL ctx\n");
+		return;
+	}
+
+	if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
+			sizeof(struct drm_msm_pa_vlut))) {
+		DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+			  hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+			  ((hw_cfg) ? hw_cfg->len : 0),
+			  sizeof(struct drm_msm_pa_vlut));
+		return;
+	}
+
+	ctl = hw_cfg->ctl;
+	vlut_base = ctx->cap->sblk->vlut.base;
+	pa_hist_base = ctx->cap->sblk->hist.base;
+	ctrl_off = pa_hist_base + PA_LUTV_DSPP_CTRL_OFF;
+	swap_off = pa_hist_base + PA_LUTV_DSPP_SWAP_OFF;
+
+	if (!hw_cfg->payload) {
+		DRM_DEBUG_DRIVER("Disable vlut feature\n");
+		SDE_REG_WRITE(&ctx->hw, ctrl_off, 0);
+		goto exit;
+	}
+
+	payload = hw_cfg->payload;
+	DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
+	for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
+		tmp = (payload->val[i] & REG_MASK(10)) |
+			((payload->val[i + 1] & REG_MASK(10)) << 16);
+		SDE_REG_WRITE(&ctx->hw, (vlut_base + j), tmp);
+	}
+	SDE_REG_WRITE(&ctx->hw, ctrl_off, 1);
+	SDE_REG_WRITE(&ctx->hw, swap_off, 1);
+
+exit:
+	/* update flush bit */
+	if (ctl && ctl->ops.get_bitmask_dspp_pavlut) {
+		ctl->ops.get_bitmask_dspp_pavlut(ctl, &flush_mask, ctx->idx);
+		if (ctl->ops.update_pending_flush)
+			ctl->ops.update_pending_flush(ctl, flush_mask);
+	}
+}
+
 void sde_setup_dspp_gc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
 {
 	struct drm_msm_pgc_lut *payload = NULL;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
index 25e446b7..4cd2e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -76,6 +76,13 @@
 void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
 
 /**
+ * sde_setup_dspp_pa_vlut_v1_8 - setup DSPP PA vLUT feature in v1.8 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to vLUT data
+ */
+void sde_setup_dspp_pa_vlut_v1_8(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
  * sde_setup_dspp_gc_v1_7 - setup DSPP gc feature in v1.7 hardware
  * @ctx: Pointer to DSPP context
  * @cfg: Pointer to gc data
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 621a172..95b7a6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -89,6 +89,11 @@
 
 static inline void sde_hw_ctl_trigger_rot_start(struct sde_hw_ctl *ctx)
 {
+	/* ROT flush bit is latched during ROT start, so set it first */
+	if (CTL_FLUSH_MASK_ROT & ctx->pending_flush_mask) {
+		ctx->pending_flush_mask &= ~CTL_FLUSH_MASK_ROT;
+		SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, CTL_FLUSH_MASK_ROT);
+	}
 	SDE_REG_WRITE(&ctx->hw, CTL_ROT_START, BIT(0));
 }
 
@@ -235,6 +240,22 @@
 	return 0;
 }
 
+static inline int sde_hw_ctl_get_bitmask_dspp_pavlut(struct sde_hw_ctl *ctx,
+		u32 *flushbits, enum sde_dspp dspp)
+{
+	switch (dspp) {
+	case DSPP_0:
+		*flushbits |= BIT(3);
+		break;
+	case DSPP_1:
+		*flushbits |= BIT(4);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
 		u32 *flushbits, enum sde_intf intf)
 {
@@ -568,6 +589,7 @@
 	ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
 	ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
 	ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
+	ops->get_bitmask_dspp_pavlut = sde_hw_ctl_get_bitmask_dspp_pavlut;
 	ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
 	ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
 	ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 5d3ced3..97bc1c1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -173,6 +173,10 @@
 		u32 *flushbits,
 		enum sde_dspp blk);
 
+	int (*get_bitmask_dspp_pavlut)(struct sde_hw_ctl *ctx,
+		u32 *flushbits,
+		enum sde_dspp blk);
+
 	int (*get_bitmask_intf)(struct sde_hw_ctl *ctx,
 		u32 *flushbits,
 		enum sde_intf blk);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ds.c b/drivers/gpu/drm/msm/sde/sde_hw_ds.c
new file mode 100644
index 0000000..e37a7d6
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ds.c
@@ -0,0 +1,149 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_ds.h"
+#include "sde_formats.h"
+#include "sde_dbg.h"
+#include "sde_kms.h"
+
+/* Destination scaler TOP registers */
+#define DEST_SCALER_OP_MODE     0x00
+#define DEST_SCALER_HW_VERSION  0x10
+
+static void sde_hw_ds_setup_opmode(struct sde_hw_ds *hw_ds,
+				u32 op_mode)
+{
+	struct sde_hw_blk_reg_map *hw = &hw_ds->hw;
+
+	SDE_REG_WRITE(hw, DEST_SCALER_OP_MODE, op_mode);
+}
+
+static void sde_hw_ds_setup_scaler3(struct sde_hw_ds *hw_ds,
+			void *scaler_cfg, void *scaler_lut_cfg)
+{
+	struct sde_hw_scaler3_cfg *scl3_cfg = scaler_cfg;
+	struct sde_hw_scaler3_lut_cfg *scl3_lut_cfg = scaler_lut_cfg;
+
+	if (!hw_ds || !hw_ds->scl || !scl3_cfg || !scl3_lut_cfg)
+		return;
+
+	/*
+	 * copy LUT values to scaler structure
+	 */
+	if (scl3_lut_cfg->is_configured) {
+		scl3_cfg->dir_lut = scl3_lut_cfg->dir_lut;
+		scl3_cfg->dir_len = scl3_lut_cfg->dir_len;
+		scl3_cfg->cir_lut = scl3_lut_cfg->cir_lut;
+		scl3_cfg->cir_len = scl3_lut_cfg->cir_len;
+		scl3_cfg->sep_lut = scl3_lut_cfg->sep_lut;
+		scl3_cfg->sep_len = scl3_lut_cfg->sep_len;
+	}
+
+	sde_hw_setup_scaler3(&hw_ds->hw, scl3_cfg,
+			 hw_ds->scl->base,
+			 hw_ds->scl->version,
+			 sde_get_sde_format(DRM_FORMAT_XBGR2101010));
+}
+
+static void _setup_ds_ops(struct sde_hw_ds_ops *ops, unsigned long features)
+{
+	ops->setup_opmode = sde_hw_ds_setup_opmode;
+
+	if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+		ops->setup_scaler = sde_hw_ds_setup_scaler3;
+}
+
+static struct sde_ds_cfg *_ds_offset(enum sde_ds ds,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	if (!m || !addr || !b)
+		return ERR_PTR(-EINVAL);
+
+	for (i = 0; i < m->ds_count; i++) {
+		if ((ds == m->ds[i].id) &&
+			 (m->ds[i].top)) {
+			b->base_off = addr;
+			b->blk_off = m->ds[i].top->base;
+			b->length = m->ds[i].top->len;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_DS;
+			return &m->ds[i];
+		}
+	}
+
+	return ERR_PTR(-EINVAL);
+}
+
+static struct sde_hw_blk_ops sde_hw_ops = {
+	.start = NULL,
+	.stop = NULL,
+};
+
+struct sde_hw_ds *sde_hw_ds_init(enum sde_ds idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *m)
+{
+	struct sde_hw_ds *hw_ds;
+	struct sde_ds_cfg *cfg;
+	int rc;
+
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
+	hw_ds = kzalloc(sizeof(*hw_ds), GFP_KERNEL);
+	if (!hw_ds)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _ds_offset(idx, m, addr, &hw_ds->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		SDE_ERROR("failed to get ds cfg\n");
+		kfree(hw_ds);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	hw_ds->idx = idx;
+	hw_ds->scl = cfg;
+	_setup_ds_ops(&hw_ds->ops, hw_ds->scl->features);
+
+	rc = sde_hw_blk_init(&hw_ds->base, SDE_HW_BLK_DS, idx, &sde_hw_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	if (cfg->len) {
+		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
+				hw_ds->hw.blk_off + cfg->base,
+				hw_ds->hw.blk_off + cfg->base + cfg->len,
+				hw_ds->hw.xin_id);
+	}
+
+	return hw_ds;
+
+blk_init_error:
+	kzfree(hw_ds);
+
+	return ERR_PTR(rc);
+
+}
+
+void sde_hw_ds_destroy(struct sde_hw_ds *hw_ds)
+{
+	if (hw_ds)
+		sde_hw_blk_destroy(&hw_ds->base);
+	kfree(hw_ds);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ds.h b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
new file mode 100644
index 0000000..d81cfaf
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ds.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_DS_H
+#define _SDE_HW_DS_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_blk.h"
+
+struct sde_hw_ds;
+
+/* Destination Scaler DUAL mode overfetch pixel count */
+#define SDE_DS_OVERFETCH_SIZE 5
+
+/* Destination scaler DUAL mode operation bit */
+#define SDE_DS_OP_MODE_DUAL BIT(16)
+
+/* struct sde_hw_ds_cfg - destination scaler config
+ * @ndx          : DS selection index
+ * @flags        : Flag to switch between mode for DS
+ * @lm_width     : Layer mixer width configuration
+ * @lm_heigh     : Layer mixer height configuration
+ * @set_lm_flush : LM flush bit
+ * @scl3_cfg     : Pointer to sde_hw_scaler3_cfg.
+ */
+struct sde_hw_ds_cfg {
+	u32 ndx;
+	int flags;
+	u32 lm_width;
+	u32 lm_height;
+	bool set_lm_flush;
+	struct sde_hw_scaler3_cfg *scl3_cfg;
+};
+
+/**
+ * struct sde_hw_ds_ops - interface to the destination scaler
+ * hardware driver functions
+ * Caller must call the init function to get the ds context for each ds
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_ds_ops {
+	/**
+	 * setup_opmode - destination scaler op mode setup
+	 * @hw_ds   : Pointer to ds context
+	 * @op_mode : Op mode configuration
+	 */
+	void (*setup_opmode)(struct sde_hw_ds *hw_ds,
+				u32 op_mode);
+
+	/**
+	 * setup_scaler - destination scaler block setup
+	 * @hw_ds          : Pointer to ds context
+	 * @scaler_cfg     : Pointer to scaler data
+	 * @scaler_lut_cfg : Pointer to scaler lut
+	 */
+	void (*setup_scaler)(struct sde_hw_ds *hw_ds,
+				void *scaler_cfg,
+				void *scaler_lut_cfg);
+
+};
+
+/**
+ * struct sde_hw_ds - destination scaler description
+ * @base : Hardware block base structure
+ * @hw   : Block hardware details
+ * @idx  : Destination scaler index
+ * @scl  : Pointer to
+ *          - scaler offset relative to top offset
+ *          - capabilities
+ * @ops  : Pointer to operations for this DS
+ */
+struct sde_hw_ds {
+	struct sde_hw_blk base;
+	struct sde_hw_blk_reg_map hw;
+	enum sde_ds idx;
+	const struct sde_ds_cfg *scl;
+	struct sde_hw_ds_ops ops;
+};
+
+/**
+ * sde_hw_ds_init - initializes the destination scaler
+ * hw driver object and should be called once before
+ * accessing every destination scaler
+ * @idx : DS index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m   : MDSS catalog information
+ * @Return: pointer to structure or ERR_PTR
+ */
+struct sde_hw_ds *sde_hw_ds_init(enum sde_ds idx,
+			void __iomem *addr,
+			struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_ds_destroy - destroys destination scaler
+ * driver context
+ * @hw_ds:   Pointer to DS context
+ */
+void sde_hw_ds_destroy(struct sde_hw_ds *hw_ds);
+
+#endif /*_SDE_HW_DS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 5b3f51e..d30c0ae 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -82,9 +82,12 @@
 			} else if (c->cap->sblk->vlut.version ==
 					(SDE_COLOR_PROCESS_VER(0x1, 0x8))) {
 				ret = reg_dmav1_init_dspp_op_v4(i, c->idx);
-				if (ret)
+				if (!ret)
 					c->ops.setup_vlut =
 					reg_dmav1_setup_dspp_vlutv18;
+				else
+					c->ops.setup_vlut =
+					sde_setup_dspp_pa_vlut_v1_8;
 			}
 			break;
 		case SDE_DSPP_GAMUT:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 35f1800..fd06c12 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -289,6 +289,18 @@
 	return SDE_REG_READ(c, INTF_MISR_SIGNATURE);
 }
 
+static u32 sde_hw_intf_get_line_count(struct sde_hw_intf *intf)
+{
+	struct sde_hw_blk_reg_map *c;
+
+	if (!intf)
+		return 0;
+
+	c = &intf->hw;
+
+	return SDE_REG_READ(c, INTF_LINE_COUNT);
+}
+
 static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
 		unsigned long cap)
 {
@@ -298,6 +310,7 @@
 	ops->enable_timing = sde_hw_intf_enable_timing_engine;
 	ops->setup_misr = sde_hw_intf_setup_misr;
 	ops->collect_misr = sde_hw_intf_collect_misr;
+	ops->get_line_count = sde_hw_intf_get_line_count;
 	if (cap & BIT(SDE_INTF_ROT_START))
 		ops->setup_rot_start = sde_hw_intf_setup_rot_start;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
index 83e206d..89068bc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -62,6 +62,7 @@
  * @ get_status: returns if timing engine is enabled or not
  * @ setup_misr: enables/disables MISR in HW register
  * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
  */
 struct sde_hw_intf_ops {
 	void (*setup_timing_gen)(struct sde_hw_intf *intf,
@@ -84,6 +85,8 @@
 			bool enable, u32 frame_count);
 
 	u32 (*collect_misr)(struct sde_hw_intf *intf);
+
+	u32 (*get_line_count)(struct sde_hw_intf *intf);
 };
 
 struct sde_hw_intf {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index f07f5ed..952ee8f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -100,6 +100,7 @@
 	SDE_HW_BLK_SSPP,
 	SDE_HW_BLK_LM,
 	SDE_HW_BLK_DSPP,
+	SDE_HW_BLK_DS,
 	SDE_HW_BLK_CTL,
 	SDE_HW_BLK_CDM,
 	SDE_HW_BLK_PINGPONG,
@@ -176,6 +177,13 @@
 	DSPP_MAX
 };
 
+enum sde_ds {
+	DS_TOP,
+	DS_0,
+	DS_1,
+	DS_MAX
+};
+
 enum sde_ctl {
 	CTL_0 = 1,
 	CTL_1,
@@ -489,6 +497,7 @@
 #define SDE_DBG_MASK_VBIF     (1 << 10)
 #define SDE_DBG_MASK_DSC      (1 << 11)
 #define SDE_DBG_MASK_ROT      (1 << 12)
+#define SDE_DBG_MASK_DS       (1 << 13)
 
 /**
  * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index d65e8d0..d8f79f1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -302,6 +302,33 @@
 	return 0;
 }
 
+static u32 sde_hw_pp_get_line_count(struct sde_hw_pingpong *pp)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+	u32 height, init;
+	u32 line = 0xFFFF;
+
+	if (!pp)
+		return 0;
+	c = &pp->hw;
+
+	init = SDE_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+	height = SDE_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+	if (height < init)
+		goto line_count_exit;
+
+	line = SDE_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+	if (line < init)
+		line += (0xFFFF - init);
+	else
+		line -= init;
+
+line_count_exit:
+	return line;
+}
+
 static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
 	const struct sde_pingpong_cfg *hw_cap)
 {
@@ -317,6 +344,7 @@
 	ops->disable_dsc = sde_hw_pp_dsc_disable;
 	ops->get_autorefresh = sde_hw_pp_get_autorefresh_config;
 	ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr;
+	ops->get_line_count = sde_hw_pp_get_line_count;
 
 	version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version);
 	switch (version) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index f0a2054..389b2d2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -64,6 +64,7 @@
  *  @enable_dsc : enables DSC encoder
  *  @disable_dsc : disables DSC encoder
  *  @setup_dither : function to program the dither hw block
+ *  @get_line_count: obtain current vertical line counter
  */
 struct sde_hw_pingpong_ops {
 	/**
@@ -130,6 +131,11 @@
 	 * Program the dither hw block
 	 */
 	int (*setup_dither)(struct sde_hw_pingpong *pp, void *cfg, size_t len);
+
+	/**
+	 * Obtain current vertical line counter
+	 */
+	u32 (*get_line_count)(struct sde_hw_pingpong *pp);
 };
 
 struct sde_hw_pingpong {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
index 4487d78..d7b7625 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c
@@ -582,7 +582,7 @@
 	return 0;
 }
 
-static void sde_reg_dma_aspace_cb(void *cb_data, bool attach)
+static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
 {
 	struct sde_reg_dma_buffer *dma_buf = NULL;
 	struct msm_gem_address_space *aspace = NULL;
@@ -597,14 +597,23 @@
 	dma_buf = (struct sde_reg_dma_buffer *)cb_data;
 	aspace = dma_buf->aspace;
 
-	if (attach) {
-		rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
+	if (is_detach) {
+		/* invalidate the stored iova */
+		dma_buf->iova = 0;
+
+		/* return the virtual address mapping */
+		msm_gem_put_vaddr_locked(dma_buf->buf);
+		msm_gem_vunmap(dma_buf->buf);
+
+	} else {
+		rc = msm_gem_get_iova_locked(dma_buf->buf, aspace,
+				&dma_buf->iova);
 		if (rc) {
 			DRM_ERROR("failed to get the iova rc %d\n", rc);
 			return;
 		}
 
-		dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
+		dma_buf->vaddr = msm_gem_get_vaddr_locked(dma_buf->buf);
 		if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
 			DRM_ERROR("failed to get va rc %d\n", rc);
 			return;
@@ -615,13 +624,6 @@
 		dma_buf->iova = dma_buf->iova + offset;
 		dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
 		dma_buf->next_op_allowed = DECODE_SEL_OP;
-	} else {
-		/* invalidate the stored iova */
-		dma_buf->iova = 0;
-
-		/* return the virtual address mapping */
-		msm_gem_put_vaddr(dma_buf->buf);
-		msm_gem_vunmap(dma_buf->buf);
 	}
 }
 
@@ -661,7 +663,7 @@
 
 	/* register to aspace */
 	rc = msm_gem_address_space_register_cb(aspace,
-			sde_reg_dma_aspace_cb,
+			sde_reg_dma_aspace_cb_locked,
 			(void *)dma_buf);
 	if (rc) {
 		DRM_ERROR("failed to register callback %d", rc);
@@ -694,10 +696,12 @@
 put_iova:
 	msm_gem_put_iova(dma_buf->buf, aspace);
 free_aspace_cb:
-	msm_gem_address_space_unregister_cb(aspace, sde_reg_dma_aspace_cb,
-			dma_buf);
+	msm_gem_address_space_unregister_cb(aspace,
+			sde_reg_dma_aspace_cb_locked, dma_buf);
 free_gem:
+	mutex_lock(&reg_dma->drm_dev->struct_mutex);
 	msm_gem_free_object(dma_buf->buf);
+	mutex_unlock(&reg_dma->drm_dev->struct_mutex);
 fail:
 	kfree(dma_buf);
 	return ERR_PTR(rc);
@@ -713,7 +717,7 @@
 	if (dma_buf->buf) {
 		msm_gem_put_iova(dma_buf->buf, 0);
 		msm_gem_address_space_unregister_cb(dma_buf->aspace,
-				sde_reg_dma_aspace_cb, dma_buf);
+				sde_reg_dma_aspace_cb_locked, dma_buf);
 		mutex_lock(&reg_dma->drm_dev->struct_mutex);
 		msm_gem_free_object(dma_buf->buf);
 		mutex_unlock(&reg_dma->drm_dev->struct_mutex);
@@ -774,6 +778,11 @@
 		return -EINVAL;
 	}
 
+	if (!last_cmd_buf->iova) {
+		DRM_DEBUG("iova not set, possible secure session\n");
+		return 0;
+	}
+
 	cfg.dma_buf = last_cmd_buf;
 	reset_reg_dma_buffer_v1(last_cmd_buf);
 	if (validate_last_cmd(&cfg)) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
index 55cb260..b59dd16 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c
@@ -44,7 +44,7 @@
 static struct sde_reg_dma_buffer *dspp_buf[REG_DMA_FEATURES_MAX][DSPP_MAX];
 
 static u32 feature_map[SDE_DSPP_MAX] = {
-	[SDE_DSPP_VLUT] = VLUT,
+	[SDE_DSPP_VLUT] = REG_DMA_FEATURES_MAX,
 	[SDE_DSPP_GAMUT] = GAMUT,
 	[SDE_DSPP_IGC] = IGC,
 	[SDE_DSPP_PCC] = PCC,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index 1a00517..c5af3a9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -656,7 +656,7 @@
 			SDE_ERROR("failed to get dst format\n");
 			return -EINVAL;
 		}
-	} else if (hw_cmd == SDE_HW_ROT_CMD_COMMIT) {
+	} else {
 		rc = sde_hw_rot_to_v4l2_pixfmt(data->dst_pixel_format,
 				data->dst_modifier, &rot_cmd.dst_pixfmt);
 		if (rc) {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index e7e39d3..acecf1a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -85,6 +85,7 @@
 #define SSPP_TRAFFIC_SHAPER                0x130
 #define SSPP_CDP_CNTL                      0x134
 #define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_CDP_CNTL_REC1                 0x13c
 #define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
 #define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
 #define SSPP_TRAFFIC_SHAPER_REC1           0x158
@@ -119,57 +120,6 @@
 #define COMP1_2_INIT_PHASE_Y               0x2C
 #define VIG_0_QSEED2_SHARP                 0x30
 
-/* SDE_SSPP_SCALER_QSEED3 */
-#define QSEED3_HW_VERSION                  0x00
-#define QSEED3_OP_MODE                     0x04
-#define QSEED3_RGB2Y_COEFF                 0x08
-#define QSEED3_PHASE_INIT                  0x0C
-#define QSEED3_PHASE_STEP_Y_H              0x10
-#define QSEED3_PHASE_STEP_Y_V              0x14
-#define QSEED3_PHASE_STEP_UV_H             0x18
-#define QSEED3_PHASE_STEP_UV_V             0x1C
-#define QSEED3_PRELOAD                     0x20
-#define QSEED3_DE_SHARPEN                  0x24
-#define QSEED3_DE_SHARPEN_CTL              0x28
-#define QSEED3_DE_SHAPE_CTL                0x2C
-#define QSEED3_DE_THRESHOLD                0x30
-#define QSEED3_DE_ADJUST_DATA_0            0x34
-#define QSEED3_DE_ADJUST_DATA_1            0x38
-#define QSEED3_DE_ADJUST_DATA_2            0x3C
-#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
-#define QSEED3_SRC_SIZE_UV                 0x44
-#define QSEED3_DST_SIZE                    0x48
-#define QSEED3_COEF_LUT_CTRL               0x4C
-#define QSEED3_COEF_LUT_SWAP_BIT           0
-#define QSEED3_COEF_LUT_DIR_BIT            1
-#define QSEED3_COEF_LUT_Y_CIR_BIT          2
-#define QSEED3_COEF_LUT_UV_CIR_BIT         3
-#define QSEED3_COEF_LUT_Y_SEP_BIT          4
-#define QSEED3_COEF_LUT_UV_SEP_BIT         5
-#define QSEED3_BUFFER_CTRL                 0x50
-#define QSEED3_CLK_CTRL0                   0x54
-#define QSEED3_CLK_CTRL1                   0x58
-#define QSEED3_CLK_STATUS                  0x5C
-#define QSEED3_MISR_CTRL                   0x70
-#define QSEED3_MISR_SIGNATURE_0            0x74
-#define QSEED3_MISR_SIGNATURE_1            0x78
-#define QSEED3_PHASE_INIT_Y_H              0x90
-#define QSEED3_PHASE_INIT_Y_V              0x94
-#define QSEED3_PHASE_INIT_UV_H             0x98
-#define QSEED3_PHASE_INIT_UV_V             0x9C
-#define QSEED3_COEF_LUT                    0x100
-#define QSEED3_FILTERS                     5
-#define QSEED3_LUT_REGIONS                 4
-#define QSEED3_CIRCULAR_LUTS               9
-#define QSEED3_SEPARABLE_LUTS              10
-#define QSEED3_LUT_SIZE                    60
-#define QSEED3_ENABLE                      2
-#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
-#define QSEED3_CIR_LUT_SIZE \
-	(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
-#define QSEED3_SEP_LUT_SIZE \
-	(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
-
 /*
  * Definitions for ViG op modes
  */
@@ -556,144 +506,12 @@
 		pe->phase_step_y[SDE_SSPP_COMP_1_2]);
 }
 
-static void _sde_hw_sspp_setup_scaler3_lut(struct sde_hw_pipe *ctx,
-		struct sde_hw_scaler3_cfg *scaler3_cfg)
-{
-	u32 idx;
-	int i, j, filter;
-	int config_lut = 0x0;
-	unsigned long lut_flags;
-	u32 lut_addr, lut_offset, lut_len;
-	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
-	static const uint32_t offset[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
-		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
-		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
-		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
-		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
-		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
-	};
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) ||
-		!scaler3_cfg)
-		return;
-
-	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
-	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
-		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
-		lut[0] = scaler3_cfg->dir_lut;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
-		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
-		lut[1] = scaler3_cfg->cir_lut +
-			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
-		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
-		lut[2] = scaler3_cfg->cir_lut +
-			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
-		lut[3] = scaler3_cfg->sep_lut +
-			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
-		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
-		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
-		lut[4] = scaler3_cfg->sep_lut +
-			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
-		config_lut = 1;
-	}
-
-	if (config_lut) {
-		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
-			if (!lut[filter])
-				continue;
-			lut_offset = 0;
-			for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
-				lut_addr = QSEED3_COEF_LUT + idx
-					+ offset[filter][i][1];
-				lut_len = offset[filter][i][0] << 2;
-				for (j = 0; j < lut_len; j++) {
-					SDE_REG_WRITE(&ctx->hw,
-						lut_addr,
-						(lut[filter])[lut_offset++]);
-					lut_addr += 4;
-				}
-			}
-		}
-	}
-
-	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
-		SDE_REG_WRITE(&ctx->hw, QSEED3_COEF_LUT_CTRL + idx, BIT(0));
-
-}
-
-static void _sde_hw_sspp_setup_scaler3_de(struct sde_hw_pipe *ctx,
-		struct sde_hw_scaler3_de_cfg *de_cfg)
-{
-	u32 idx;
-	u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
-	u32 adjust_a, adjust_b, adjust_c;
-	struct sde_hw_blk_reg_map *hw;
-
-	if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !de_cfg)
-		return;
-
-	if (!de_cfg->enable)
-		return;
-
-	hw = &ctx->hw;
-	sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
-		((de_cfg->sharpen_level2 & 0x1FF) << 16);
-
-	sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
-		((de_cfg->prec_shift & 0x7) << 13) |
-		((de_cfg->clip & 0x7) << 16);
-
-	shape_ctl = (de_cfg->thr_quiet & 0xFF) |
-		((de_cfg->thr_dieout & 0x3FF) << 16);
-
-	de_thr = (de_cfg->thr_low & 0x3FF) |
-		((de_cfg->thr_high & 0x3FF) << 16);
-
-	adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
-		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_a[2] & 0x3FF) << 20);
-
-	adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
-		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_b[2] & 0x3FF) << 20);
-
-	adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
-		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
-		((de_cfg->adjust_c[2] & 0x3FF) << 20);
-
-	SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN + idx, sharp_lvl);
-	SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN_CTL + idx, sharp_ctl);
-	SDE_REG_WRITE(hw, QSEED3_DE_SHAPE_CTL + idx, shape_ctl);
-	SDE_REG_WRITE(hw, QSEED3_DE_THRESHOLD + idx, de_thr);
-	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_0 + idx, adjust_a);
-	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_1 + idx, adjust_b);
-	SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_2 + idx, adjust_c);
-
-}
-
 static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx,
 		struct sde_hw_pipe_cfg *sspp,
 		struct sde_hw_pixel_ext *pe,
 		void *scaler_cfg)
 {
 	u32 idx;
-	u32 op_mode = 0;
-	u32 phase_init, preload, src_y_rgb, src_uv, dst;
 	struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
 
 	(void)pe;
@@ -701,93 +519,9 @@
 		|| !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
 		return;
 
-	if (!scaler3_cfg->enable)
-		goto end;
-
-	op_mode |= BIT(0);
-	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
-
-	if (SDE_FORMAT_IS_YUV(sspp->layout.format)) {
-		op_mode |= BIT(12);
-		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
-	}
-
-	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
-	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
-
-	preload =
-		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
-		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
-		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
-		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
-
-	src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
-		((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
-
-	src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
-		((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
-
-	dst = (scaler3_cfg->dst_width & 0x1FFFF) |
-		((scaler3_cfg->dst_height & 0x1FFFF) << 16);
-
-	if (scaler3_cfg->de.enable) {
-		_sde_hw_sspp_setup_scaler3_de(ctx, &scaler3_cfg->de);
-		op_mode |= BIT(8);
-	}
-
-	if (scaler3_cfg->lut_flag)
-		_sde_hw_sspp_setup_scaler3_lut(ctx, scaler3_cfg);
-
-	if (ctx->cap->sblk->scaler_blk.version == 0x1002) {
-		phase_init =
-			((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
-			((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
-			((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
-			((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
-		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT + idx, phase_init);
-	} else {
-		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_H + idx,
-			scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
-		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_V + idx,
-			scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
-		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_H + idx,
-			scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
-		SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_V + idx,
-			scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
-	}
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_H + idx,
-		scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_V + idx,
-		scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_H + idx,
-		scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_V + idx,
-		scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_PRELOAD + idx, preload);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_Y_RGB_A + idx, src_y_rgb);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_UV + idx, src_uv);
-
-	SDE_REG_WRITE(&ctx->hw, QSEED3_DST_SIZE + idx, dst);
-
-end:
-	if (!SDE_FORMAT_IS_DX(sspp->layout.format))
-		op_mode |= BIT(14);
-
-	if (sspp->layout.format->alpha_enable) {
-		op_mode |= BIT(10);
-		if (ctx->cap->sblk->scaler_blk.version == 0x1002)
-			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
-		else
-			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
-	}
-	SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
+	sde_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+			ctx->cap->sblk->scaler_blk.version,
+			sspp->layout.format);
 }
 
 static u32 _sde_hw_sspp_get_scaler3_ver(struct sde_hw_pipe *ctx)
@@ -797,7 +531,7 @@
 	if (!ctx || _sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx))
 		return 0;
 
-	return SDE_REG_READ(&ctx->hw, QSEED3_HW_VERSION + idx);
+	return sde_hw_get_scaler3_ver(&ctx->hw, idx);
 }
 
 /**
@@ -1140,10 +874,12 @@
 }
 
 static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
-		struct sde_hw_pipe_cdp_cfg *cfg)
+		struct sde_hw_pipe_cdp_cfg *cfg,
+		enum sde_sspp_multirect_index index)
 {
 	u32 idx;
 	u32 cdp_cntl = 0;
+	u32 cdp_cntl_offset = 0;
 
 	if (!ctx || !cfg)
 		return;
@@ -1151,6 +887,13 @@
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
 		return;
 
+	if (index == SDE_SSPP_RECT_0)
+		cdp_cntl_offset = SSPP_CDP_CNTL;
+	else if (index == SDE_SSPP_RECT_1)
+		cdp_cntl_offset = SSPP_CDP_CNTL_REC1;
+	else
+		return;
+
 	if (cfg->enable)
 		cdp_cntl |= BIT(0);
 	if (cfg->ubwc_meta_enable)
@@ -1160,7 +903,7 @@
 	if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
 		cdp_cntl |= BIT(3);
 
-	SDE_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+	SDE_REG_WRITE(&ctx->hw, cdp_cntl_offset, cdp_cntl);
 }
 
 static void _setup_layer_ops(struct sde_hw_pipe *c,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 8700627..d32c9d8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -156,114 +156,6 @@
 };
 
 /**
- * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
- * @enable:         detail enhancer enable/disable
- * @sharpen_level1: sharpening strength for noise
- * @sharpen_level2: sharpening strength for signal
- * @ clip:          clip shift
- * @ limit:         limit value
- * @ thr_quiet:     quiet threshold
- * @ thr_dieout:    dieout threshold
- * @ thr_high:      low threshold
- * @ thr_high:      high threshold
- * @ prec_shift:    precision shift
- * @ adjust_a:      A-coefficients for mapping curve
- * @ adjust_b:      B-coefficients for mapping curve
- * @ adjust_c:      C-coefficients for mapping curve
- */
-struct sde_hw_scaler3_de_cfg {
-	u32 enable;
-	int16_t sharpen_level1;
-	int16_t sharpen_level2;
-	uint16_t clip;
-	uint16_t limit;
-	uint16_t thr_quiet;
-	uint16_t thr_dieout;
-	uint16_t thr_low;
-	uint16_t thr_high;
-	uint16_t prec_shift;
-	int16_t adjust_a[SDE_MAX_DE_CURVES];
-	int16_t adjust_b[SDE_MAX_DE_CURVES];
-	int16_t adjust_c[SDE_MAX_DE_CURVES];
-};
-
-/**
- * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
- * @enable:        scaler enable
- * @dir_en:        direction detection block enable
- * @ init_phase_x: horizontal initial phase
- * @ phase_step_x: horizontal phase step
- * @ init_phase_y: vertical initial phase
- * @ phase_step_y: vertical phase step
- * @ preload_x:    horizontal preload value
- * @ preload_y:    vertical preload value
- * @ src_width:    source width
- * @ src_height:   source height
- * @ dst_width:    destination width
- * @ dst_height:   destination height
- * @ y_rgb_filter_cfg: y/rgb plane filter configuration
- * @ uv_filter_cfg: uv plane filter configuration
- * @ alpha_filter_cfg: alpha filter configuration
- * @ blend_cfg:    blend coefficients configuration
- * @ lut_flag:     scaler LUT update flags
- *                 0x1 swap LUT bank
- *                 0x2 update 2D filter LUT
- *                 0x4 update y circular filter LUT
- *                 0x8 update uv circular filter LUT
- *                 0x10 update y separable filter LUT
- *                 0x20 update uv separable filter LUT
- * @ dir_lut_idx:  2D filter LUT index
- * @ y_rgb_cir_lut_idx: y circular filter LUT index
- * @ uv_cir_lut_idx: uv circular filter LUT index
- * @ y_rgb_sep_lut_idx: y circular filter LUT index
- * @ uv_sep_lut_idx: uv separable filter LUT index
- * @ dir_lut:      pointer to 2D LUT
- * @ cir_lut:      pointer to circular filter LUT
- * @ sep_lut:      pointer to separable filter LUT
- * @ de: detail enhancer configuration
- */
-struct sde_hw_scaler3_cfg {
-	u32 enable;
-	u32 dir_en;
-	int32_t init_phase_x[SDE_MAX_PLANES];
-	int32_t phase_step_x[SDE_MAX_PLANES];
-	int32_t init_phase_y[SDE_MAX_PLANES];
-	int32_t phase_step_y[SDE_MAX_PLANES];
-
-	u32 preload_x[SDE_MAX_PLANES];
-	u32 preload_y[SDE_MAX_PLANES];
-	u32 src_width[SDE_MAX_PLANES];
-	u32 src_height[SDE_MAX_PLANES];
-
-	u32 dst_width;
-	u32 dst_height;
-
-	u32 y_rgb_filter_cfg;
-	u32 uv_filter_cfg;
-	u32 alpha_filter_cfg;
-	u32 blend_cfg;
-
-	u32 lut_flag;
-	u32 dir_lut_idx;
-
-	u32 y_rgb_cir_lut_idx;
-	u32 uv_cir_lut_idx;
-	u32 y_rgb_sep_lut_idx;
-	u32 uv_sep_lut_idx;
-	u32 *dir_lut;
-	size_t dir_len;
-	u32 *cir_lut;
-	size_t cir_len;
-	u32 *sep_lut;
-	size_t sep_len;
-
-	/*
-	 * Detail enhancer settings
-	 */
-	struct sde_hw_scaler3_de_cfg de;
-};
-
-/**
  * struct sde_hw_pipe_cfg : Pipe description
  * @layout:    format layout information for programming buffer to hardware
  * @src_rect:  src ROI, caller takes into account the different operations
@@ -610,9 +502,11 @@
 	 * setup_cdp - setup client driven prefetch
 	 * @ctx: Pointer to pipe context
 	 * @cfg: Pointer to cdp configuration
+	 * @index: rectangle index in multirect
 	 */
 	void (*setup_cdp)(struct sde_hw_pipe *ctx,
-			struct sde_hw_pipe_cdp_cfg *cfg);
+			struct sde_hw_pipe_cdp_cfg *cfg,
+			enum sde_sspp_multirect_index index);
 
 	/**
 	 * setup_secure_address - setup secureity status of the source address
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c
index 7df5736..08fe5e1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/sde_drm.h>
 #include "msm_drv.h"
 #include "sde_kms.h"
 #include "sde_hw_mdss.h"
@@ -18,6 +20,57 @@
 /* using a file static variables for debugfs access */
 static u32 sde_hw_util_log_mask = SDE_DBG_MASK_NONE;
 
+/* SDE_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION                  0x00
+#define QSEED3_OP_MODE                     0x04
+#define QSEED3_RGB2Y_COEFF                 0x08
+#define QSEED3_PHASE_INIT                  0x0C
+#define QSEED3_PHASE_STEP_Y_H              0x10
+#define QSEED3_PHASE_STEP_Y_V              0x14
+#define QSEED3_PHASE_STEP_UV_H             0x18
+#define QSEED3_PHASE_STEP_UV_V             0x1C
+#define QSEED3_PRELOAD                     0x20
+#define QSEED3_DE_SHARPEN                  0x24
+#define QSEED3_DE_SHARPEN_CTL              0x28
+#define QSEED3_DE_SHAPE_CTL                0x2C
+#define QSEED3_DE_THRESHOLD                0x30
+#define QSEED3_DE_ADJUST_DATA_0            0x34
+#define QSEED3_DE_ADJUST_DATA_1            0x38
+#define QSEED3_DE_ADJUST_DATA_2            0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A            0x40
+#define QSEED3_SRC_SIZE_UV                 0x44
+#define QSEED3_DST_SIZE                    0x48
+#define QSEED3_COEF_LUT_CTRL               0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT           0
+#define QSEED3_COEF_LUT_DIR_BIT            1
+#define QSEED3_COEF_LUT_Y_CIR_BIT          2
+#define QSEED3_COEF_LUT_UV_CIR_BIT         3
+#define QSEED3_COEF_LUT_Y_SEP_BIT          4
+#define QSEED3_COEF_LUT_UV_SEP_BIT         5
+#define QSEED3_BUFFER_CTRL                 0x50
+#define QSEED3_CLK_CTRL0                   0x54
+#define QSEED3_CLK_CTRL1                   0x58
+#define QSEED3_CLK_STATUS                  0x5C
+#define QSEED3_MISR_CTRL                   0x70
+#define QSEED3_MISR_SIGNATURE_0            0x74
+#define QSEED3_MISR_SIGNATURE_1            0x78
+#define QSEED3_PHASE_INIT_Y_H              0x90
+#define QSEED3_PHASE_INIT_Y_V              0x94
+#define QSEED3_PHASE_INIT_UV_H             0x98
+#define QSEED3_PHASE_INIT_UV_V             0x9C
+#define QSEED3_COEF_LUT                    0x100
+#define QSEED3_FILTERS                     5
+#define QSEED3_LUT_REGIONS                 4
+#define QSEED3_CIRCULAR_LUTS               9
+#define QSEED3_SEPARABLE_LUTS              10
+#define QSEED3_LUT_SIZE                    60
+#define QSEED3_ENABLE                      2
+#define QSEED3_DIR_LUT_SIZE                (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+	(QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
 void sde_reg_write(struct sde_hw_blk_reg_map *c,
 		u32 reg_off,
 		u32 val,
@@ -40,6 +93,283 @@
 	return &sde_hw_util_log_mask;
 }
 
+void sde_set_scaler_v2(struct sde_hw_scaler3_cfg *cfg,
+		const struct sde_drm_scaler_v2 *scale_v2)
+{
+	int i;
+
+	cfg->enable = scale_v2->enable;
+	cfg->dir_en = scale_v2->dir_en;
+
+	for (i = 0; i < SDE_MAX_PLANES; i++) {
+		cfg->init_phase_x[i] = scale_v2->init_phase_x[i];
+		cfg->phase_step_x[i] = scale_v2->phase_step_x[i];
+		cfg->init_phase_y[i] = scale_v2->init_phase_y[i];
+		cfg->phase_step_y[i] = scale_v2->phase_step_y[i];
+
+		cfg->preload_x[i] = scale_v2->preload_x[i];
+		cfg->preload_y[i] = scale_v2->preload_y[i];
+		cfg->src_width[i] = scale_v2->src_width[i];
+		cfg->src_height[i] = scale_v2->src_height[i];
+	}
+
+	cfg->dst_width = scale_v2->dst_width;
+	cfg->dst_height = scale_v2->dst_height;
+
+	cfg->y_rgb_filter_cfg = scale_v2->y_rgb_filter_cfg;
+	cfg->uv_filter_cfg = scale_v2->uv_filter_cfg;
+	cfg->alpha_filter_cfg = scale_v2->alpha_filter_cfg;
+	cfg->blend_cfg = scale_v2->blend_cfg;
+
+	cfg->lut_flag = scale_v2->lut_flag;
+	cfg->dir_lut_idx = scale_v2->dir_lut_idx;
+	cfg->y_rgb_cir_lut_idx = scale_v2->y_rgb_cir_lut_idx;
+	cfg->uv_cir_lut_idx = scale_v2->uv_cir_lut_idx;
+	cfg->y_rgb_sep_lut_idx = scale_v2->y_rgb_sep_lut_idx;
+	cfg->uv_sep_lut_idx = scale_v2->uv_sep_lut_idx;
+
+	cfg->de.enable = scale_v2->de.enable;
+	cfg->de.sharpen_level1 = scale_v2->de.sharpen_level1;
+	cfg->de.sharpen_level2 = scale_v2->de.sharpen_level2;
+	cfg->de.clip = scale_v2->de.clip;
+	cfg->de.limit = scale_v2->de.limit;
+	cfg->de.thr_quiet = scale_v2->de.thr_quiet;
+	cfg->de.thr_dieout = scale_v2->de.thr_dieout;
+	cfg->de.thr_low = scale_v2->de.thr_low;
+	cfg->de.thr_high = scale_v2->de.thr_high;
+	cfg->de.prec_shift = scale_v2->de.prec_shift;
+
+	for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
+		cfg->de.adjust_a[i] = scale_v2->de.adjust_a[i];
+		cfg->de.adjust_b[i] = scale_v2->de.adjust_b[i];
+		cfg->de.adjust_c[i] = scale_v2->de.adjust_c[i];
+	}
+}
+
+static void _sde_hw_setup_scaler3_lut(struct sde_hw_blk_reg_map *c,
+		struct sde_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+	int i, j, filter;
+	int config_lut = 0x0;
+	unsigned long lut_flags;
+	u32 lut_addr, lut_offset, lut_len;
+	u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+	static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+		{{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+		{{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+		{{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+		{{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+		{{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+	};
+
+	lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+	if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+		(scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+		lut[0] = scaler3_cfg->dir_lut;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[1] = scaler3_cfg->cir_lut +
+			scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+		(scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+		lut[2] = scaler3_cfg->cir_lut +
+			scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[3] = scaler3_cfg->sep_lut +
+			scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+	if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+		(scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+		(scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+		lut[4] = scaler3_cfg->sep_lut +
+			scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+		config_lut = 1;
+	}
+
+	if (config_lut) {
+		for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+			if (!lut[filter])
+				continue;
+			lut_offset = 0;
+			for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+				lut_addr = QSEED3_COEF_LUT + offset
+					+ off_tbl[filter][i][1];
+				lut_len = off_tbl[filter][i][0] << 2;
+				for (j = 0; j < lut_len; j++) {
+					SDE_REG_WRITE(c,
+						lut_addr,
+						(lut[filter])[lut_offset++]);
+					lut_addr += 4;
+				}
+			}
+		}
+	}
+
+	if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+		SDE_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _sde_hw_setup_scaler3_de(struct sde_hw_blk_reg_map *c,
+		struct sde_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+	u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+	u32 adjust_a, adjust_b, adjust_c;
+
+	if (!de_cfg->enable)
+		return;
+
+	sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+		((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+	sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+		((de_cfg->prec_shift & 0x7) << 13) |
+		((de_cfg->clip & 0x7) << 16);
+
+	shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+		((de_cfg->thr_dieout & 0x3FF) << 16);
+
+	de_thr = (de_cfg->thr_low & 0x3FF) |
+		((de_cfg->thr_high & 0x3FF) << 16);
+
+	adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+		((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+	adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+		((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+	adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+		((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+		((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+	SDE_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+	SDE_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+	SDE_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+	SDE_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+	SDE_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+	SDE_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+	SDE_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void sde_hw_setup_scaler3(struct sde_hw_blk_reg_map *c,
+		struct sde_hw_scaler3_cfg *scaler3_cfg,
+		u32 scaler_offset, u32 scaler_version,
+		const struct sde_format *format)
+{
+	u32 op_mode = 0;
+	u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+	if (!scaler3_cfg->enable)
+		goto end;
+
+	op_mode |= BIT(0);
+	op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+	if (format && SDE_FORMAT_IS_YUV(format)) {
+		op_mode |= BIT(12);
+		op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+	}
+
+	op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+	op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+	preload =
+		((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+		((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+		((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+		((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+	src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+		((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+	src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+		((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+	dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+		((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+	if (scaler3_cfg->de.enable) {
+		_sde_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+		op_mode |= BIT(8);
+	}
+
+	if (scaler3_cfg->lut_flag)
+		_sde_hw_setup_scaler3_lut(c, scaler3_cfg,
+								scaler_offset);
+
+	if (scaler_version == 0x1002) {
+		phase_init =
+			((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+			((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+			((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+			((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+		SDE_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+	} else {
+		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+			scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+			scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+			scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+		SDE_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+			scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+	}
+
+	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+		scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+		scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+		scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+	SDE_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+		scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+	SDE_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+	SDE_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+	SDE_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+	SDE_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+	if (format && !SDE_FORMAT_IS_DX(format))
+		op_mode |= BIT(14);
+
+	if (format && format->alpha_enable) {
+		op_mode |= BIT(10);
+		if (scaler_version == 0x1002)
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+		else
+			op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+	}
+
+	SDE_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 sde_hw_get_scaler3_ver(struct sde_hw_blk_reg_map *c,
+			u32 scaler_offset)
+{
+	return SDE_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
 void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
 		u32 csc_reg_off,
 		struct sde_csc_cfg *data, bool csc10)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
index aa3d5b9..720e113 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_util.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -39,6 +39,125 @@
 	u32 log_mask;
 };
 
+/**
+ * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable:         detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip:          clip shift
+ * @ limit:         limit value
+ * @ thr_quiet:     quiet threshold
+ * @ thr_dieout:    dieout threshold
+ * @ thr_high:      low threshold
+ * @ thr_high:      high threshold
+ * @ prec_shift:    precision shift
+ * @ adjust_a:      A-coefficients for mapping curve
+ * @ adjust_b:      B-coefficients for mapping curve
+ * @ adjust_c:      C-coefficients for mapping curve
+ */
+struct sde_hw_scaler3_de_cfg {
+	u32 enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[SDE_MAX_DE_CURVES];
+	int16_t adjust_b[SDE_MAX_DE_CURVES];
+	int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable:        scaler enable
+ * @dir_en:        direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x:    horizontal preload value
+ * @ preload_y:    vertical preload value
+ * @ src_width:    source width
+ * @ src_height:   source height
+ * @ dst_width:    destination width
+ * @ dst_height:   destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg:    blend coefficients configuration
+ * @ lut_flag:     scaler LUT update flags
+ *                 0x1 swap LUT bank
+ *                 0x2 update 2D filter LUT
+ *                 0x4 update y circular filter LUT
+ *                 0x8 update uv circular filter LUT
+ *                 0x10 update y separable filter LUT
+ *                 0x20 update uv separable filter LUT
+ * @ dir_lut_idx:  2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut:      pointer to 2D LUT
+ * @ cir_lut:      pointer to circular filter LUT
+ * @ sep_lut:      pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct sde_hw_scaler3_cfg {
+	u32 enable;
+	u32 dir_en;
+	int32_t init_phase_x[SDE_MAX_PLANES];
+	int32_t phase_step_x[SDE_MAX_PLANES];
+	int32_t init_phase_y[SDE_MAX_PLANES];
+	int32_t phase_step_y[SDE_MAX_PLANES];
+
+	u32 preload_x[SDE_MAX_PLANES];
+	u32 preload_y[SDE_MAX_PLANES];
+	u32 src_width[SDE_MAX_PLANES];
+	u32 src_height[SDE_MAX_PLANES];
+
+	u32 dst_width;
+	u32 dst_height;
+
+	u32 y_rgb_filter_cfg;
+	u32 uv_filter_cfg;
+	u32 alpha_filter_cfg;
+	u32 blend_cfg;
+
+	u32 lut_flag;
+	u32 dir_lut_idx;
+
+	u32 y_rgb_cir_lut_idx;
+	u32 uv_cir_lut_idx;
+	u32 y_rgb_sep_lut_idx;
+	u32 uv_sep_lut_idx;
+	u32 *dir_lut;
+	size_t dir_len;
+	u32 *cir_lut;
+	size_t cir_len;
+	u32 *sep_lut;
+	size_t sep_len;
+
+	/*
+	 * Detail enhancer settings
+	 */
+	struct sde_hw_scaler3_de_cfg de;
+};
+
+struct sde_hw_scaler3_lut_cfg {
+	bool is_configured;
+	u32 *dir_lut;
+	size_t dir_len;
+	u32 *cir_lut;
+	size_t cir_len;
+	u32 *sep_lut;
+	size_t sep_len;
+};
+
 u32 *sde_hw_util_get_log_mask_ptr(void);
 
 void sde_reg_write(struct sde_hw_blk_reg_map *c,
@@ -58,6 +177,17 @@
 
 void *sde_hw_util_get_dir(void);
 
+void sde_set_scaler_v2(struct sde_hw_scaler3_cfg *cfg,
+		const struct sde_drm_scaler_v2 *scale_v2);
+
+void sde_hw_setup_scaler3(struct sde_hw_blk_reg_map *c,
+		struct sde_hw_scaler3_cfg *scaler3_cfg,
+		u32 scaler_offset, u32 scaler_version,
+		const struct sde_format *format);
+
+u32 sde_hw_get_scaler3_ver(struct sde_hw_blk_reg_map *c,
+		u32 scaler_offset);
+
 void sde_hw_csc_setup(struct sde_hw_blk_reg_map  *c,
 		u32 csc_reg_off,
 		struct sde_csc_cfg *data, bool csc10);
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index eeb7a00..76f89f4 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -19,6 +19,30 @@
 #include "sde_irq.h"
 #include "sde_core_irq.h"
 
+static uint32_t g_sde_irq_status;
+
+void sde_irq_update(struct msm_kms *msm_kms, bool enable)
+{
+	int irq_num;
+	struct sde_kms *sde_kms = to_sde_kms(msm_kms);
+
+	if (!msm_kms || !sde_kms) {
+		SDE_ERROR("invalid kms arguments\n");
+		return;
+	}
+
+	irq_num = platform_get_irq(sde_kms->dev->platformdev, 0);
+	if (irq_num < 0) {
+		SDE_ERROR("invalid irq number\n");
+		return;
+	}
+
+	if (enable)
+		enable_irq(irq_num);
+	else
+		disable_irq(irq_num);
+}
+
 irqreturn_t sde_irq(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
@@ -27,6 +51,9 @@
 	sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
 			&interrupts);
 
+	/* store irq status in case of irq-storm debugging */
+	g_sde_irq_status = interrupts;
+
 	/*
 	 * Taking care of MDP interrupt
 	 */
@@ -40,13 +67,30 @@
 	 */
 	while (interrupts) {
 		irq_hw_number_t hwirq = fls(interrupts) - 1;
+		unsigned int mapping;
+		int rc;
 
-		generic_handle_irq(irq_find_mapping(
-				sde_kms->irq_controller.domain, hwirq));
+		mapping = irq_find_mapping(sde_kms->irq_controller.domain,
+				hwirq);
+		if (mapping == 0) {
+			SDE_EVT32(hwirq, SDE_EVTLOG_ERROR);
+			goto error;
+		}
+
+		rc = generic_handle_irq(mapping);
+		if (rc < 0) {
+			SDE_EVT32(hwirq, mapping, rc, SDE_EVTLOG_ERROR);
+			goto error;
+		}
+
 		interrupts &= ~(1 << hwirq);
 	}
 
 	return IRQ_HANDLED;
+
+error:
+	/* bad situation, inform irq system, it may disable overall MDSS irq */
+	return IRQ_NONE;
 }
 
 void sde_irq_preinstall(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.h b/drivers/gpu/drm/msm/sde/sde_irq.h
index e1090071..5bb299a 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,4 +56,11 @@
  */
 irqreturn_t sde_irq(struct msm_kms *kms);
 
+/**
+ * sde_irq_update - enable/disable IRQ line
+ * @kms:		pointer to kms context
+ * @enable:		enable:true, disable:false
+ */
+void sde_irq_update(struct msm_kms *kms, bool enable);
+
 #endif /* __SDE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 1a6585a..bf06dfb 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -545,7 +545,9 @@
 	struct msm_drm_private *priv;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *old_crtc_state;
-	int i;
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	int i, rc = 0;
 
 	if (!kms || !old_state)
 		return;
@@ -558,6 +560,19 @@
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
 		sde_crtc_complete_commit(crtc, old_crtc_state);
 
+	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+		struct sde_connector *c_conn;
+
+		c_conn = to_sde_connector(connector);
+		if (!c_conn->ops.post_kickoff)
+			continue;
+		rc = c_conn->ops.post_kickoff(connector);
+		if (rc) {
+			pr_err("Connector Post kickoff failed rc=%d\n",
+					 rc);
+		}
+	}
+
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
 	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
@@ -758,6 +773,8 @@
 		.set_power = dsi_display_set_power,
 		.get_mode_info = dsi_conn_get_mode_info,
 		.get_dst_format = dsi_display_get_dst_format,
+		.post_kickoff = dsi_conn_post_kickoff,
+		.check_status = dsi_display_check_status,
 	};
 	static const struct sde_connector_ops wb_ops = {
 		.post_init =    sde_wb_connector_post_init,
@@ -767,7 +784,8 @@
 		.get_info =     sde_wb_get_info,
 		.soft_reset =   NULL,
 		.get_mode_info = sde_wb_get_mode_info,
-		.get_dst_format = NULL
+		.get_dst_format = NULL,
+		.check_status = NULL,
 	};
 	static const struct sde_connector_ops dp_ops = {
 		.post_init  = dp_connector_post_init,
@@ -776,6 +794,8 @@
 		.mode_valid = dp_connector_mode_valid,
 		.get_info   = dp_connector_get_info,
 		.get_mode_info  = dp_connector_get_mode_info,
+		.send_hpd_event = dp_connector_send_hpd_event,
+		.check_status = NULL,
 	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
@@ -1403,6 +1423,12 @@
 	if (!priv)
 		return;
 
+	if (sde_kms->genpd_init) {
+		sde_kms->genpd_init = false;
+		pm_genpd_remove(&sde_kms->genpd);
+		of_genpd_del_provider(pdev->dev.of_node);
+	}
+
 	if (sde_kms->hw_intr)
 		sde_hw_intr_destroy(sde_kms->hw_intr);
 	sde_kms->hw_intr = NULL;
@@ -1685,6 +1711,8 @@
 {
 	struct drm_device *dev = NULL;
 	struct sde_kms *sde_kms = NULL;
+	struct drm_connector *connector = NULL;
+	struct sde_connector *sde_conn = NULL;
 
 	if (!kms) {
 		SDE_ERROR("invalid kms\n");
@@ -1699,8 +1727,22 @@
 		return;
 	}
 
-	if (dev->mode_config.funcs->output_poll_changed)
-		dev->mode_config.funcs->output_poll_changed(dev);
+	if (!dev->mode_config.poll_enabled)
+		return;
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev) {
+		/* Only handle HPD capable connectors. */
+		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		sde_conn = to_sde_connector(connector);
+
+		if (sde_conn->ops.send_hpd_event)
+			sde_conn->ops.send_hpd_event(sde_conn->display);
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+
 }
 
 static int sde_kms_pm_suspend(struct device *dev)
@@ -1960,12 +2002,71 @@
 static void sde_kms_handle_power_event(u32 event_type, void *usr)
 {
 	struct sde_kms *sde_kms = usr;
+	struct msm_kms *msm_kms;
 
+	msm_kms = &sde_kms->base;
 	if (!sde_kms)
 		return;
 
-	if (event_type == SDE_POWER_EVENT_POST_ENABLE)
+	SDE_DEBUG("event_type:%d\n", event_type);
+	SDE_EVT32_VERBOSE(event_type);
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
+		sde_irq_update(msm_kms, true);
 		sde_vbif_init_memtypes(sde_kms);
+	} else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
+		sde_irq_update(msm_kms, false);
+	}
+}
+
+#define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
+
+static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
+{
+	struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int rc;
+
+	SDE_DEBUG("\n");
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return -EINVAL;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return -EINVAL;
+
+	SDE_EVT32(genpd->device_count);
+
+	rc = sde_power_resource_enable(&priv->phandle, priv->pclient, true);
+
+	return rc;
+}
+
+static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
+{
+	struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
+	struct drm_device *dev;
+	struct msm_drm_private *priv;
+	int rc;
+
+	SDE_DEBUG("\n");
+
+	dev = sde_kms->dev;
+	if (!dev)
+		return -EINVAL;
+
+	priv = dev->dev_private;
+	if (!priv)
+		return -EINVAL;
+
+	SDE_EVT32(genpd->device_count);
+
+	rc = sde_power_resource_enable(&priv->phandle, priv->pclient, false);
+
+	return rc;
 }
 
 static int sde_kms_hw_init(struct msm_kms *kms)
@@ -2193,12 +2294,41 @@
 	 */
 	sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
 	sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
-			SDE_POWER_EVENT_POST_ENABLE,
+			SDE_POWER_EVENT_POST_ENABLE |
+			SDE_POWER_EVENT_PRE_DISABLE,
 			sde_kms_handle_power_event, sde_kms, "kms");
 
+	/* initialize power domain if defined */
+	if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
+		sde_kms->genpd.name = dev->unique;
+		sde_kms->genpd.power_off = sde_kms_pd_disable;
+		sde_kms->genpd.power_on = sde_kms_pd_enable;
+
+		rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
+		if (rc < 0) {
+			SDE_ERROR("failed to init genpd provider %s: %d\n",
+					sde_kms->genpd.name, rc);
+			goto genpd_err;
+		}
+
+		rc = of_genpd_add_provider_simple(dev->dev->of_node,
+				&sde_kms->genpd);
+		if (rc < 0) {
+			SDE_ERROR("failed to add genpd provider %s: %d\n",
+					sde_kms->genpd.name, rc);
+			pm_genpd_remove(&sde_kms->genpd);
+			goto genpd_err;
+		}
+
+		sde_kms->genpd_init = true;
+		SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
+	}
+
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
 	return 0;
 
+genpd_err:
 drm_obj_init_err:
 	sde_core_perf_destroy(&sde_kms->perf);
 hw_intr_init_err:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 0ddfb30..aacff78 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -20,6 +20,7 @@
 #define __SDE_KMS_H__
 
 #include <linux/msm_ion.h>
+#include <linux/pm_domain.h>
 
 #include "msm_drv.h"
 #include "msm_kms.h"
@@ -107,6 +108,9 @@
 #define SDE_KMS_OPS_CLEANUP_PLANE_FB                       BIT(2)
 #define SDE_KMS_OPS_PREPARE_PLANE_FB                       BIT(3)
 
+/* ESD status check interval in miliseconds */
+#define STATUS_CHECK_INTERVAL_MS 5000
+
 /*
  * struct sde_irq_callback - IRQ callback handlers
  * @list: list to callback
@@ -173,6 +177,9 @@
 	int core_rev;
 	struct sde_mdss_cfg *catalog;
 
+	struct generic_pm_domain genpd;
+	bool genpd_init;
+
 	struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
 	struct sde_power_client *core_client;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 10f796f..9c13ab9 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -916,6 +916,22 @@
 		return;
 	}
 
+	/*
+	 * framebuffer prepare is deferred for prepare_fb calls that
+	 * happen during the transition from secure to non-secure.
+	 * Handle the prepare at this point for such cases. This can be
+	 * expected for one or two frames during the transition.
+	 */
+	if (aspace && pstate->defer_prepare_fb) {
+		ret = msm_framebuffer_prepare(fb, pstate->aspace);
+		if (ret) {
+			SDE_ERROR_PLANE(psde,
+				"failed to prepare framebuffer %d\n", ret);
+			return;
+		}
+		pstate->defer_prepare_fb = false;
+	}
+
 	ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
 	if (ret == -EAGAIN)
 		SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
@@ -1518,7 +1534,7 @@
  * @plane: Pointer to drm plane
  * return: prefill time in line
  */
-static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
+u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
 {
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
@@ -1547,33 +1563,19 @@
 				&blocksize, &blocksize);
 
 	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
-
-	SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
+	prefill_line = mult_frac(prefill_line, rstate->out_src_h >> 16,
+			state->crtc_h);
+	SDE_DEBUG(
+		"plane%d.%d blk:%u head:%u vdst/vsrc:%u/%u prefill:%u\n",
+			plane->base.id, rstate->sequence_id,
+			blocksize, sde_kms->catalog->sbuf_headroom,
+			state->crtc_h, rstate->out_src_h >> 16,
+			prefill_line);
 
 	return prefill_line;
 }
 
 /**
- * sde_plane_is_sbuf_mode - check if sspp of given plane is in streaming
- *	buffer mode
- * @plane: Pointer to drm plane
- * @prefill: Pointer to prefill line count
- * return: true if sspp is in stream buffer mode
- */
-bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill)
-{
-	struct sde_plane_state *pstate = plane && plane->state ?
-			to_sde_plane_state(plane->state) : NULL;
-	struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
-	bool sbuf_mode = rstate ? rstate->out_sbuf : false;
-
-	if (prefill)
-		*prefill = sde_plane_rot_calc_prefill(plane);
-
-	return sbuf_mode;
-}
-
-/**
  * sde_plane_rot_calc_cfg - calculate rotator/sspp configuration by
  *	enumerating over all planes attached to the same rotator
  * @plane: Pointer to drm plane
@@ -2131,6 +2133,13 @@
 		}
 	}
 
+	if (new_pstate->defer_prepare_fb) {
+		SDE_DEBUG(
+		    "plane%d, domain not attached, prepare fb handled later\n",
+		    plane->base.id);
+		return 0;
+	}
+
 	/* prepare rotator input buffer */
 	ret = msm_framebuffer_prepare(new_state->fb, new_pstate->aspace);
 	if (ret) {
@@ -2400,6 +2409,7 @@
 	struct drm_plane_state *state;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
+	int ret = 0;
 
 	if (!plane || !plane->state) {
 		SDE_ERROR("invalid plane/state\n");
@@ -2421,24 +2431,57 @@
 	if (!rstate->out_sbuf || !rstate->rot_hw)
 		return;
 
+	/*
+	 * framebuffer prepare is deferred for prepare_fb calls that
+	 * happen during the transition from secure to non-secure.
+	 * Handle the prepare at this point for rotator in such cases.
+	 * This can be expected for one or two frames during the transition.
+	 */
+	if (pstate->aspace && pstate->defer_prepare_fb) {
+		/* prepare rotator input buffer */
+		ret = msm_framebuffer_prepare(state->fb, pstate->aspace);
+		if (ret) {
+			SDE_ERROR("p%d failed to prepare input fb %d\n",
+							plane->base.id, ret);
+			return;
+		}
+
+		/* prepare rotator output buffer */
+		if (sde_plane_enabled(state) && rstate->out_fb) {
+			ret = msm_framebuffer_prepare(rstate->out_fb,
+						pstate->aspace);
+			if (ret) {
+				SDE_ERROR(
+				  "p%d failed to prepare inline fb %d\n",
+				  plane->base.id, ret);
+				goto error_prepare_output_buffer;
+			}
+		}
+	}
+
 	sde_plane_rot_submit_command(plane, state, SDE_HW_ROT_CMD_COMMIT);
+
+	return;
+
+error_prepare_output_buffer:
+	msm_framebuffer_cleanup(state->fb, pstate->aspace);
 }
 
-void sde_plane_kickoff(struct drm_plane *plane)
+int sde_plane_kickoff_rot(struct drm_plane *plane)
 {
 	struct sde_plane_state *pstate;
 
 	if (!plane || !plane->state) {
 		SDE_ERROR("invalid plane\n");
-		return;
+		return -EINVAL;
 	}
 
 	pstate = to_sde_plane_state(plane->state);
 
 	if (!pstate->rot.rot_hw || !pstate->rot.rot_hw->ops.commit)
-		return;
+		return 0;
 
-	pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
+	return pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
 			&pstate->rot.rot_cmd,
 			SDE_HW_ROT_CMD_START);
 }
@@ -2752,7 +2795,7 @@
 		return;
 
 	*flush_rot = 0x0;
-	if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
+	if (rstate && rstate->out_sbuf && rstate->rot_hw &&
 			ctl->ops.get_bitmask_rot)
 		ctl->ops.get_bitmask_rot(ctl, flush_rot, rstate->rot_hw->idx);
 }
@@ -2779,29 +2822,33 @@
 		return ret;
 	}
 
-	/*cache aspace */
+	/* cache aspace */
 	pstate->aspace = aspace;
+
+	/*
+	 * when transitioning from secure to non-secure,
+	 * plane->prepare_fb happens before the commit. In such case,
+	 * defer the prepare_fb and handled it late, during the commit
+	 * after attaching the domains as part of the transition
+	 */
+	pstate->defer_prepare_fb = (aspace && !aspace->domain_attached) ?
+							true : false;
+
 	ret = sde_plane_rot_prepare_fb(plane, new_state);
 	if (ret) {
 		SDE_ERROR("failed to prepare rot framebuffer\n");
 		return ret;
 	}
 
+	if (pstate->defer_prepare_fb) {
+		SDE_DEBUG_PLANE(psde,
+		    "domain not attached, prepare_fb handled later\n");
+		return 0;
+	}
+
 	new_rstate = &to_sde_plane_state(new_state)->rot;
 
 	if (pstate->aspace) {
-		/*
-		 * when transitioning from secure to non-secure,
-		 * plane->prepare_fb happens before the commit. In such case,
-		 * return early, as prepare_fb would be handled as part
-		 * of the transition after attaching the domains,
-		 * during the commit
-		 */
-		if (!pstate->aspace->domain_attached) {
-			SDE_DEBUG_PLANE(psde,
-			    "domain not attached, prepare_fb handled later\n");
-			return 0;
-		}
 		ret = msm_framebuffer_prepare(new_rstate->out_fb,
 				pstate->aspace);
 		if (ret) {
@@ -3556,7 +3603,8 @@
 					SDE_FORMAT_IS_TILE(fmt);
 			cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
 
-			psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg);
+			psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg,
+					pstate->multirect_index);
 		}
 
 		if (psde->pipe_hw->ops.setup_sys_cache) {
@@ -4078,51 +4126,11 @@
 			&pstate->property_state, PLANE_PROP_SCALER_V2);
 
 	/* populate from user space */
+	sde_set_scaler_v2(cfg, &scale_v2);
+
 	pe = &pstate->pixel_ext;
 	memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
-	cfg->enable = scale_v2.enable;
-	cfg->dir_en = scale_v2.dir_en;
-	for (i = 0; i < SDE_MAX_PLANES; i++) {
-		cfg->init_phase_x[i] = scale_v2.init_phase_x[i];
-		cfg->phase_step_x[i] = scale_v2.phase_step_x[i];
-		cfg->init_phase_y[i] = scale_v2.init_phase_y[i];
-		cfg->phase_step_y[i] = scale_v2.phase_step_y[i];
 
-		cfg->preload_x[i] = scale_v2.preload_x[i];
-		cfg->preload_y[i] = scale_v2.preload_y[i];
-		cfg->src_width[i] = scale_v2.src_width[i];
-		cfg->src_height[i] = scale_v2.src_height[i];
-	}
-	cfg->dst_width = scale_v2.dst_width;
-	cfg->dst_height = scale_v2.dst_height;
-
-	cfg->y_rgb_filter_cfg = scale_v2.y_rgb_filter_cfg;
-	cfg->uv_filter_cfg = scale_v2.uv_filter_cfg;
-	cfg->alpha_filter_cfg = scale_v2.alpha_filter_cfg;
-	cfg->blend_cfg = scale_v2.blend_cfg;
-
-	cfg->lut_flag = scale_v2.lut_flag;
-	cfg->dir_lut_idx = scale_v2.dir_lut_idx;
-	cfg->y_rgb_cir_lut_idx = scale_v2.y_rgb_cir_lut_idx;
-	cfg->uv_cir_lut_idx = scale_v2.uv_cir_lut_idx;
-	cfg->y_rgb_sep_lut_idx = scale_v2.y_rgb_sep_lut_idx;
-	cfg->uv_sep_lut_idx = scale_v2.uv_sep_lut_idx;
-
-	cfg->de.enable = scale_v2.de.enable;
-	cfg->de.sharpen_level1 = scale_v2.de.sharpen_level1;
-	cfg->de.sharpen_level2 = scale_v2.de.sharpen_level2;
-	cfg->de.clip = scale_v2.de.clip;
-	cfg->de.limit = scale_v2.de.limit;
-	cfg->de.thr_quiet = scale_v2.de.thr_quiet;
-	cfg->de.thr_dieout = scale_v2.de.thr_dieout;
-	cfg->de.thr_low = scale_v2.de.thr_low;
-	cfg->de.thr_high = scale_v2.de.thr_high;
-	cfg->de.prec_shift = scale_v2.de.prec_shift;
-	for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
-		cfg->de.adjust_a[i] = scale_v2.de.adjust_a[i];
-		cfg->de.adjust_b[i] = scale_v2.de.adjust_b[i];
-		cfg->de.adjust_c[i] = scale_v2.de.adjust_c[i];
-	}
 	for (i = 0; i < SDE_MAX_PLANES; i++) {
 		pe->left_ftch[i] = scale_v2.pe.left_ftch[i];
 		pe->right_ftch[i] = scale_v2.pe.right_ftch[i];
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 913647f..d6c5876 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -129,6 +129,7 @@
  * @multirect_index: index of the rectangle of SSPP
  * @multirect_mode: parallel or time multiplex multirect mode
  * @pending:	whether the current update is still pending
+ * @defer_prepare_fb:	indicate if prepare_fb call was deferred
  * @scaler3_cfg: configuration data for scaler3
  * @pixel_ext: configuration data for pixel extensions
  * @scaler_check_state: indicates status of user provided pixel extension data
@@ -146,6 +147,7 @@
 	uint32_t multirect_index;
 	uint32_t multirect_mode;
 	bool pending;
+	bool defer_prepare_fb;
 
 	/* scaler configuration */
 	struct sde_hw_scaler3_cfg scaler3_cfg;
@@ -207,12 +209,11 @@
 		u32 *flush_sspp, u32 *flush_rot);
 
 /**
- * sde_plane_is_sbuf_mode - return status of stream buffer mode
- * @plane:   Pointer to DRM plane object
- * @prefill: Pointer to updated prefill in stream buffer mode (optional)
- * Returns: true if plane is in stream buffer mode
+ * sde_plane_rot_calc_prefill - calculate rotator start prefill
+ * @plane: Pointer to drm plane
+ * return: prefill time in line
  */
-bool sde_plane_is_sbuf_mode(struct drm_plane *plane, u32 *prefill);
+u32 sde_plane_rot_calc_prefill(struct drm_plane *plane);
 
 /**
  * sde_plane_restore - restore hw state if previously power collapsed
@@ -227,10 +228,11 @@
 void sde_plane_flush(struct drm_plane *plane);
 
 /**
- * sde_plane_kickoff - final plane operations before commit kickoff
+ * sde_plane_kickoff_rot - final plane rotator operations before commit kickoff
  * @plane: Pointer to drm plane structure
+ * Returns: Zero on success
  */
-void sde_plane_kickoff(struct drm_plane *plane);
+int sde_plane_kickoff_rot(struct drm_plane *plane);
 
 /**
  * sde_plane_set_error: enable/disable error condition
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index be3a8af..ebcf9e0 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -18,6 +18,7 @@
 #include "sde_hw_ctl.h"
 #include "sde_hw_cdm.h"
 #include "sde_hw_dspp.h"
+#include "sde_hw_ds.h"
 #include "sde_hw_pingpong.h"
 #include "sde_hw_intf.h"
 #include "sde_hw_wb.h"
@@ -32,6 +33,7 @@
 #define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
 #define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
 #define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DS))
 #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
 				(t).num_comp_enc == (r).num_enc && \
 				(t).num_intf == (r).num_intf)
@@ -234,6 +236,9 @@
 	case SDE_HW_BLK_DSPP:
 		sde_hw_dspp_destroy(hw);
 		break;
+	case SDE_HW_BLK_DS:
+		sde_hw_ds_destroy(hw);
+		break;
 	case SDE_HW_BLK_CTL:
 		sde_hw_ctl_destroy(hw);
 		break;
@@ -304,7 +309,7 @@
 static int _sde_rm_hw_blk_create(
 		struct sde_rm *rm,
 		struct sde_mdss_cfg *cat,
-		void *mmio,
+		void __iomem *mmio,
 		enum sde_hw_blk_type type,
 		uint32_t id,
 		void *hw_catalog_info)
@@ -322,6 +327,9 @@
 	case SDE_HW_BLK_DSPP:
 		hw = sde_hw_dspp_init(id, mmio, cat);
 		break;
+	case SDE_HW_BLK_DS:
+		hw = sde_hw_ds_init(id, mmio, cat);
+		break;
 	case SDE_HW_BLK_CTL:
 		hw = sde_hw_ctl_init(id, mmio, cat);
 		break;
@@ -375,7 +383,7 @@
 
 int sde_rm_init(struct sde_rm *rm,
 		struct sde_mdss_cfg *cat,
-		void *mmio,
+		void __iomem *mmio,
 		struct drm_device *dev)
 {
 	int rc, i;
@@ -444,6 +452,17 @@
 		}
 	}
 
+	if (cat->mdp[0].has_dest_scaler) {
+		for (i = 0; i < cat->ds_count; i++) {
+			rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DS,
+					cat->ds[i].id, &cat->ds[i]);
+			if (rc) {
+				SDE_ERROR("failed: ds hw not available\n");
+				goto fail;
+			}
+		}
+	}
+
 	for (i = 0; i < cat->pingpong_count; i++) {
 		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
 				cat->pingpong[i].id, &cat->pingpong[i]);
@@ -543,18 +562,22 @@
 		struct sde_rm_requirements *reqs,
 		struct sde_rm_hw_blk *lm,
 		struct sde_rm_hw_blk **dspp,
+		struct sde_rm_hw_blk **ds,
 		struct sde_rm_hw_blk **pp,
 		struct sde_rm_hw_blk *primary_lm)
 {
 	const struct sde_lm_cfg *lm_cfg = to_sde_hw_mixer(lm->hw)->cap;
 	const struct sde_pingpong_cfg *pp_cfg;
 	struct sde_rm_hw_iter iter;
+	bool is_valid_dspp, is_valid_ds, ret;
 
 	*dspp = NULL;
+	*ds = NULL;
 	*pp = NULL;
 
-	SDE_DEBUG("check lm %d: dspp %d pp %d\n", lm_cfg->id, lm_cfg->dspp,
-			lm_cfg->pingpong);
+	SDE_DEBUG("check lm %d: dspp %d ds %d pp %d\n",
+			   lm_cfg->id, lm_cfg->dspp,
+			   lm_cfg->ds, lm_cfg->pingpong);
 
 	/* Check if this layer mixer is a peer of the proposed primary LM */
 	if (primary_lm) {
@@ -568,13 +591,28 @@
 		}
 	}
 
-	/* Matches user requirements? */
-	if ((RM_RQ_DSPP(reqs) && lm_cfg->dspp == DSPP_MAX) ||
-			(!RM_RQ_DSPP(reqs) && lm_cfg->dspp != DSPP_MAX)) {
-		SDE_DEBUG("dspp req mismatch lm %d reqdspp %d, lm->dspp %d\n",
-				lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
-				lm_cfg->dspp);
-		return false;
+	is_valid_dspp = (lm_cfg->dspp != DSPP_MAX) ? true : false;
+	is_valid_ds = (lm_cfg->ds != DS_MAX) ? true : false;
+
+	/**
+	 * RM_RQ_X: specification of which LMs to choose
+	 * is_valid_X: indicates whether LM is tied with block X
+	 * ret: true if given LM matches the user requirement, false otherwise
+	 */
+	if (RM_RQ_DSPP(reqs) && RM_RQ_DS(reqs))
+		ret = (is_valid_dspp && is_valid_ds);
+	else if (RM_RQ_DSPP(reqs))
+		ret = is_valid_dspp;
+	else if (RM_RQ_DS(reqs))
+		ret = is_valid_ds;
+	else
+		ret = !(is_valid_dspp || is_valid_ds);
+
+	if (!ret) {
+		SDE_DEBUG("fail:lm(%d)req_dspp(%d)dspp(%d)req_ds(%d)ds(%d)\n",
+			lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)), lm_cfg->dspp,
+			(bool)(RM_RQ_DS(reqs)), lm_cfg->ds);
+		return ret;
 	}
 
 	/* Already reserved? */
@@ -605,6 +643,28 @@
 		}
 	}
 
+	if (lm_cfg->ds != DS_MAX) {
+		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DS);
+		while (_sde_rm_get_hw_locked(rm, &iter)) {
+			if (iter.blk->id == lm_cfg->ds) {
+				*ds = iter.blk;
+				break;
+			}
+		}
+
+		if (!*ds) {
+			SDE_DEBUG("lm %d failed to retrieve ds %d\n", lm->id,
+					lm_cfg->ds);
+			return false;
+		}
+
+		if (RESERVED_BY_OTHER(*ds, rsvp)) {
+			SDE_DEBUG("lm %d ds %d already reserved\n",
+					lm->id, (*ds)->id);
+			return false;
+		}
+	}
+
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
 	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (iter.blk->id == lm_cfg->pingpong) {
@@ -622,6 +682,7 @@
 		SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
 				(*pp)->id);
 		*dspp = NULL;
+		*ds = NULL;
 		return false;
 	}
 
@@ -630,6 +691,7 @@
 			!(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
 		SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
 		*dspp = NULL;
+		*ds = NULL;
 		return false;
 	}
 
@@ -644,6 +706,7 @@
 {
 	struct sde_rm_hw_blk *lm[MAX_BLOCKS];
 	struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
+	struct sde_rm_hw_blk *ds[MAX_BLOCKS];
 	struct sde_rm_hw_blk *pp[MAX_BLOCKS];
 	struct sde_rm_hw_iter iter_i, iter_j;
 	int lm_count = 0;
@@ -660,14 +723,16 @@
 			_sde_rm_get_hw_locked(rm, &iter_i)) {
 		memset(&lm, 0, sizeof(lm));
 		memset(&dspp, 0, sizeof(dspp));
+		memset(&ds, 0, sizeof(ds));
 		memset(&pp, 0, sizeof(pp));
 
 		lm_count = 0;
 		lm[lm_count] = iter_i.blk;
 
-		if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
-				lm[lm_count], &dspp[lm_count], &pp[lm_count],
-				NULL))
+		if (!_sde_rm_check_lm_and_get_connected_blks(
+				rm, rsvp, reqs, lm[lm_count],
+				&dspp[lm_count], &ds[lm_count],
+				&pp[lm_count], NULL))
 			continue;
 
 		++lm_count;
@@ -680,8 +745,9 @@
 			if (iter_i.blk == iter_j.blk)
 				continue;
 
-			if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp,
-					reqs, iter_j.blk, &dspp[lm_count],
+			if (!_sde_rm_check_lm_and_get_connected_blks(
+					rm, rsvp, reqs, iter_j.blk,
+					&dspp[lm_count], &ds[lm_count],
 					&pp[lm_count], iter_i.blk))
 				continue;
 
@@ -704,8 +770,12 @@
 		if (dspp[i])
 			dspp[i]->rsvp_nxt = rsvp;
 
+		if (ds[i])
+			ds[i]->rsvp_nxt = rsvp;
+
 		SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
-				dspp[i] ? dspp[i]->id : 0);
+				dspp[i] ? dspp[i]->id : 0,
+				ds[i] ? ds[i]->id : 0);
 	}
 
 	if (reqs->topology->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
@@ -1018,6 +1088,15 @@
 	if (reqs->topology->num_comp_enc)
 		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
 
+	/**
+	 * Set the requirement based on caps if not set from user space
+	 * This will ensure to select LM tied with DS blocks
+	 * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+	 */
+	if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+		conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DS);
+
 	SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
 			reqs->hw_res.display_num_of_h_tiles);
 	SDE_DEBUG("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index b4a801a..1e48855 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -56,11 +56,13 @@
  *                               Normal behavior would not impact the
  *                               reservation list during the AtomicTest phase.
  * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
+ * @SDE_RM_TOPCTL_DS  : Require layer mixers with DS capabilities
  */
 enum sde_rm_topology_control {
 	SDE_RM_TOPCTL_RESERVE_LOCK,
 	SDE_RM_TOPCTL_RESERVE_CLEAR,
 	SDE_RM_TOPCTL_DSPP,
+	SDE_RM_TOPCTL_DS,
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 7143a8b..dc16ab1 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -44,6 +44,7 @@
 
 /* offsets from sde top address for the debug buses */
 #define DBGBUS_SSPP0	0x188
+#define DBGBUS_AXI_INTF	0x194
 #define DBGBUS_SSPP1	0x298
 #define DBGBUS_DSPP	0x348
 #define DBGBUS_PERIPH	0x418
@@ -66,6 +67,7 @@
 /* print debug ranges in groups of 4 u32s */
 #define REG_DUMP_ALIGN		16
 
+#define RSC_DEBUG_MUX_SEL_SDM845 9
 /**
  * struct sde_dbg_reg_offset - tracking for start and end of region
  * @start: start offset
@@ -128,7 +130,8 @@
 	u32 wr_addr;
 	u32 block_id;
 	u32 test_id;
-	void (*analyzer)(struct sde_debug_bus_entry *entry, u32 val);
+	void (*analyzer)(void __iomem *mem_base,
+				struct sde_debug_bus_entry *entry, u32 val);
 };
 
 struct vbif_debug_bus_entry {
@@ -174,6 +177,7 @@
  * @dbgbus_sde: debug bus structure for the sde
  * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
  * @dump_all: dump all entries in register dump
+ * @dsi_dbg_bus: dump dsi debug bus register
  */
 static struct sde_dbg_base {
 	struct sde_dbg_evtlog *evtlog;
@@ -191,20 +195,21 @@
 	struct sde_dbg_sde_debug_bus dbgbus_sde;
 	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
 	bool dump_all;
+	bool dsi_dbg_bus;
 } sde_dbg_base;
 
 /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
 struct sde_dbg_evtlog *sde_dbg_base_evtlog;
 
-static void _sde_debug_bus_xbar_dump(struct sde_debug_bus_entry *entry,
-		u32 val)
+static void _sde_debug_bus_xbar_dump(void __iomem *mem_base,
+		struct sde_debug_bus_entry *entry, u32 val)
 {
 	dev_err(sde_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
 			entry->wr_addr, entry->block_id, entry->test_id, val);
 }
 
-static void _sde_debug_bus_lm_dump(struct sde_debug_bus_entry *entry,
-		u32 val)
+static void _sde_debug_bus_lm_dump(void __iomem *mem_base,
+		struct sde_debug_bus_entry *entry, u32 val)
 {
 	if (!(val & 0xFFF000))
 		return;
@@ -213,8 +218,8 @@
 			entry->wr_addr, entry->block_id, entry->test_id, val);
 }
 
-static void _sde_debug_bus_ppb0_dump(struct sde_debug_bus_entry *entry,
-		u32 val)
+static void _sde_debug_bus_ppb0_dump(void __iomem *mem_base,
+		struct sde_debug_bus_entry *entry, u32 val)
 {
 	if (!(val & BIT(15)))
 		return;
@@ -223,8 +228,8 @@
 			entry->wr_addr, entry->block_id, entry->test_id, val);
 }
 
-static void _sde_debug_bus_ppb1_dump(struct sde_debug_bus_entry *entry,
-		u32 val)
+static void _sde_debug_bus_ppb1_dump(void __iomem *mem_base,
+		struct sde_debug_bus_entry *entry, u32 val)
 {
 	if (!(val & BIT(15)))
 		return;
@@ -233,6 +238,29 @@
 			entry->wr_addr, entry->block_id, entry->test_id, val);
 }
 
+static void _sde_debug_bus_axi_dump_sdm845(void __iomem *mem_base,
+		struct sde_debug_bus_entry *entry, u32 val)
+{
+	u32 status, i;
+
+	if (!mem_base || !entry)
+		return;
+
+	for (i = 0; i <= RSC_DEBUG_MUX_SEL_SDM845; i++) {
+		sde_rsc_debug_dump(i);
+
+		/* make sure that mux_sel updated */
+		wmb();
+
+		/* read status again after rsc routes the debug bus */
+		status = readl_relaxed(mem_base + DBGBUS_DSPP_STATUS);
+
+		dev_err(sde_dbg_base.dev, "rsc mux_sel:%d 0x%x %d %d 0x%x\n",
+			i, entry->wr_addr, entry->block_id,
+			entry->test_id, status);
+	}
+}
+
 static struct sde_debug_bus_entry dbg_bus_sde_8998[] = {
 
 	/* Unpack 0 sspp 0*/
@@ -1986,6 +2014,9 @@
 	{ DBGBUS_PERIPH, 71, 3},
 	{ DBGBUS_PERIPH, 71, 4},
 	{ DBGBUS_PERIPH, 71, 5},
+
+	/* axi - should be last entry */
+	{ DBGBUS_AXI_INTF, 62, 0, _sde_debug_bus_axi_dump_sdm845},
 };
 
 static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
@@ -2332,7 +2363,7 @@
 		}
 
 		if (head->analyzer)
-			head->analyzer(head, status);
+			head->analyzer(mem_base, head, status);
 
 		/* Disable debug bus once we are done */
 		writel_relaxed(0, mem_base + head->wr_addr);
@@ -2541,6 +2572,9 @@
 	if (dump_dbgbus_vbif_rt)
 		_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
 
+	if (sde_dbg_base.dsi_dbg_bus || dump_all)
+		dsi_ctrl_debug_dump();
+
 	if (do_panic && sde_dbg_base.panic_on_err)
 		panic(name);
 }
@@ -2615,6 +2649,9 @@
 		if (!strcmp(blk_name, "vbif_dbg_bus"))
 			dump_dbgbus_vbif_rt = true;
 
+		if (!strcmp(blk_name, "dsi_dbg_bus"))
+			sde_dbg_base.dsi_dbg_bus = true;
+
 		if (!strcmp(blk_name, "panic"))
 			do_panic = true;
 	}
@@ -2627,6 +2664,7 @@
 				dump_dbgbus_sde;
 		sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
 				dump_dbgbus_vbif_rt;
+		sde_dbg_base.dump_all = dump_all;
 		schedule_work(&sde_dbg_base.dump_work);
 	} else {
 		_sde_dump_array(blk_arr, blk_len, do_panic, name,
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index a266574..786451c3 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -315,6 +315,17 @@
 int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
 		char *buf, size_t bufsz);
 
+/**
+ * sde_rsc_debug_dump - sde rsc debug dump status
+ * @mux_sel:	select mux on rsc debug bus
+ */
+void sde_rsc_debug_dump(u32 mux_sel);
+
+/**
+ * dsi_ctrl_debug_dump - dump dsi debug dump status
+ */
+void dsi_ctrl_debug_dump(void);
+
 #else
 static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
 {
@@ -396,6 +407,14 @@
 	return -EINVAL;
 }
 
+static inline void sde_rsc_debug_dump(u32 mux_sel)
+{
+}
+
+static inline void dsi_ctrl_debug_dump(void)
+{
+}
+
 #endif /* defined(CONFIG_DEBUG_FS) */
 
 
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 3d6c2ea..791a6ca 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -605,6 +605,30 @@
 	}
 }
 
+u8 sde_get_edid_checksum(void *input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+	struct edid *edid = NULL, *last_block = NULL;
+	u8 *raw_edid = NULL;
+
+	if (!edid_ctrl || !edid_ctrl->edid) {
+		SDE_ERROR("invalid edid input\n");
+		return 0;
+	}
+
+	edid = edid_ctrl->edid;
+
+	raw_edid = (u8 *)edid;
+	raw_edid += (edid->extensions * EDID_LENGTH);
+	last_block = (struct edid *)raw_edid;
+
+	if (last_block)
+		return last_block->checksum;
+
+	SDE_ERROR("Invalid block, no checksum\n");
+	return 0;
+}
+
 bool sde_detect_hdmi_monitor(void *input)
 {
 	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index b58b322..07bdf50 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -146,6 +146,14 @@
 u32 sde_get_sink_bpc(void *edid_ctrl);
 
 /**
+ * sde_get_edid_checksum() - return the checksum of last block of EDID.
+ * @input:     Handle to the edid_ctrl structure.
+ *
+ * Return: checksum of the last EDID block.
+ */
+u8 sde_get_edid_checksum(void *input);
+
+/**
  * _sde_edid_update_modes() - populate EDID modes.
  * @edid_ctrl:     Handle to the edid_ctrl structure.
  *
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index 7951c23..3673d125 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -363,12 +363,15 @@
 			if (bytes_read != read_size) {
 				pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
 					offset, read_size, bytes_read);
+				rc = -EIO;
 				break;
 			}
 
 			buf += read_size;
-			offset += read_size;
 			size -= read_size;
+
+			if (!realign)
+				offset += read_size;
 		} while (size > 0);
 	}
 
@@ -393,6 +396,7 @@
 			if (bytes_written != write_size) {
 				pr_err("fail: offset(0x%x), size(0x%x), rc(0x%x)\n",
 					offset, write_size, bytes_written);
+				rc = -EIO;
 				break;
 			}
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index b654b26..82b1199 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -456,7 +456,7 @@
 	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
 		if (!rc)
-			rpmh_mode_solver_set(rsc->disp_rsc, false);
+			rpmh_mode_solver_set(rsc->disp_rsc, true);
 	}
 
 	return rc;
@@ -837,6 +837,21 @@
 }
 EXPORT_SYMBOL(sde_rsc_client_vote);
 
+#if defined(CONFIG_DEBUG_FS)
+void sde_rsc_debug_dump(u32 mux_sel)
+{
+	struct sde_rsc_priv *rsc;
+
+	rsc = rsc_prv_list[SDE_RSC_INDEX];
+	if (!rsc)
+		return;
+
+	/* this must be called with rsc clocks enabled */
+	if (rsc->hw_ops.debug_dump)
+		rsc->hw_ops.debug_dump(rsc, mux_sel);
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
 static int _sde_debugfs_status_show(struct seq_file *s, void *data)
 {
 	struct sde_rsc_priv *rsc;
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index b474d21..e957779 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -515,7 +515,7 @@
 		reg = dss_reg_r(&rsc->wrapper_io,
 			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
 		reg |= (BIT(0) | BIT(8));
-		reg &= ~(BIT(1) | BIT(2) | BIT(3) | BIT(6) | BIT(7));
+		reg &= ~(BIT(1) | BIT(2) | BIT(3) | BIT(6) | BIT(7) | BIT(9));
 		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
 							reg, rsc->debug_mode);
 		/* make sure that solver is enabled */
@@ -746,6 +746,12 @@
 	return blen;
 }
 
+static void rsc_hw_debug_dump(struct sde_rsc_priv *rsc, u32 mux_sel)
+{
+	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_DEBUG_BUS,
+		((mux_sel & 0xf) << 1) | BIT(0), rsc->debug_mode);
+}
+
 bool rsc_hw_is_amc_mode(struct sde_rsc_priv *rsc)
 {
 	return dss_reg_r(&rsc->drv_io, SDE_RSCC_TCS_DRV0_CONTROL,
@@ -806,6 +812,7 @@
 	rsc->hw_ops.state_update = sde_rsc_state_update;
 	rsc->hw_ops.debug_show = sde_rsc_debug_show;
 	rsc->hw_ops.mode_ctrl = rsc_hw_mode_ctrl;
+	rsc->hw_ops.debug_dump = rsc_hw_debug_dump;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index c96ce75..64b0216 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -73,6 +73,7 @@
  * @hw_vsync:			Enables the vsync on RSC block.
  * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
  * @is_amc_mode:		Check current amc mode status
+ * @debug_dump:			dump debug bus registers or enable debug bus
  * @state_update:		Enable/override the solver based on rsc state
  *                              status (command/video)
  * @mode_show:			shows current mode status, mode0/1/2
@@ -87,6 +88,7 @@
 		char *buffer, int buffer_size, u32 mode);
 	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
 	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
+	void (*debug_dump)(struct sde_rsc_priv *rsc, u32 mux_sel);
 	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
 	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
 	int (*mode_ctrl)(struct sde_rsc_priv *rsc, enum rsc_mode_req request,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index eb9b278..a4cb824 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -192,6 +192,10 @@
 		}
 	}
 
+#ifdef __BIG_ENDIAN
+	pci->msi = false;
+#endif
+
 	pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
 	if (pci->msi && func->msi_rearm) {
 		pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 7316fc7..a2ec6d8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -149,8 +149,8 @@
 	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
 
 	/* Signal polarities */
-	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
-	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
 	      | DSMR_DIPM_DISP | DSMR_CSPM;
 	rcar_du_crtc_write(rcrtc, DSMR, value);
 
@@ -172,7 +172,7 @@
 					mode->crtc_vsync_start - 1);
 	rcar_du_crtc_write(rcrtc, VCR,  mode->crtc_vtotal - 1);
 
-	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start);
+	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start - 1);
 	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
 }
 
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index cfc302c..c58602b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -453,13 +453,13 @@
 	}
 
 	ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
-	of_node_put(encoder);
-	of_node_put(connector);
-
 	if (ret && ret != -EPROBE_DEFER)
 		dev_warn(rcdu->dev,
-			 "failed to initialize encoder %s (%d), skipping\n",
-			 encoder->full_name, ret);
+			 "failed to initialize encoder %s on output %u (%d), skipping\n",
+			 of_node_full_name(encoder), output, ret);
+
+	of_node_put(encoder);
+	of_node_put(connector);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index a37de5d..ddd6badd 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@
 		} else {
 			pr_err("Failed to fill pool (%p)\n", pool);
 			/* If we have any pages left put them to the pool. */
-			list_for_each_entry(p, &pool->list, lru) {
+			list_for_each_entry(p, &new_pages, lru) {
 				++cpages;
 			}
 			list_splice(&new_pages, &pool->list);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 2242a80..dc2976c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -337,7 +337,7 @@
 	info->fbops = &virtio_gpufb_ops;
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-	info->screen_base = obj->vmap;
+	info->screen_buffer = obj->vmap;
 	info->screen_size = obj->gem_base.size;
 	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 	drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e112fd1..c2eb9ea 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -764,6 +764,29 @@
 #define A6XX_VBIF_PERF_PWR_CNT_HIGH1            0x3119
 #define A6XX_VBIF_PERF_PWR_CNT_HIGH2            0x311a
 
+/* GBIF registers */
+#define A6XX_GBIF_HALT                    0x3c45
+#define A6XX_GBIF_HALT_ACK                0x3c46
+#define A6XX_GBIF_HALT_MASK               0x1
+
+#define A6XX_GBIF_PERF_PWR_CNT_EN         0x3cc0
+#define A6XX_GBIF_PERF_CNT_SEL            0x3cc2
+#define A6XX_GBIF_PERF_CNT_LOW0           0x3cc4
+#define A6XX_GBIF_PERF_CNT_LOW1           0x3cc5
+#define A6XX_GBIF_PERF_CNT_LOW2           0x3cc6
+#define A6XX_GBIF_PERF_CNT_LOW3           0x3cc7
+#define A6XX_GBIF_PERF_CNT_HIGH0          0x3cc8
+#define A6XX_GBIF_PERF_CNT_HIGH1          0x3cc9
+#define A6XX_GBIF_PERF_CNT_HIGH2          0x3cca
+#define A6XX_GBIF_PERF_CNT_HIGH3          0x3ccb
+#define A6XX_GBIF_PWR_CNT_LOW0            0x3ccc
+#define A6XX_GBIF_PWR_CNT_LOW1            0x3ccd
+#define A6XX_GBIF_PWR_CNT_LOW2            0x3cce
+#define A6XX_GBIF_PWR_CNT_HIGH0           0x3ccf
+#define A6XX_GBIF_PWR_CNT_HIGH1           0x3cd0
+#define A6XX_GBIF_PWR_CNT_HIGH2           0x3cd1
+
+
 /* CX_DBGC_CFG registers */
 #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A                   0x18400
 #define A6XX_CX_DBGC_CFG_DBGBUS_SEL_B                   0x18401
@@ -935,6 +958,7 @@
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
 
 /* GMU RSC control registers */
+#define A6XX_GPU_RSCC_RSC_STATUS0_DRV0		0x23404
 #define A6XX_GMU_RSCC_CONTROL_REQ		0x23B07
 #define A6XX_GMU_RSCC_CONTROL_ACK		0x23B08
 
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 780cefe..7943745 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2870,7 +2870,7 @@
 			gpu_busy += adj;
 		}
 
-		if (kgsl_gmu_isenabled(device)) {
+		if (adreno_is_a6xx(adreno_dev)) {
 			/* clock sourced from XO */
 			stats->busy_time = gpu_busy * 10 / 192;
 		} else {
@@ -3091,6 +3091,7 @@
 	.irq_handler = adreno_irq_handler,
 	.drain = adreno_drain,
 	/* Optional functions */
+	.snapshot_gmu = adreno_snapshot_gmu,
 	.drawctxt_create = adreno_drawctxt_create,
 	.drawctxt_detach = adreno_drawctxt_detach,
 	.drawctxt_destroy = adreno_drawctxt_destroy,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 49dc5ed..342a85a 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -621,6 +621,8 @@
 	ADRENO_REG_RBBM_RBBM_CTL,
 	ADRENO_REG_UCHE_INVALIDATE0,
 	ADRENO_REG_UCHE_INVALIDATE1,
+	ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+	ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
 	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
 	ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
 	ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
@@ -634,6 +636,8 @@
 	ADRENO_REG_VBIF_XIN_HALT_CTRL0,
 	ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 	ADRENO_REG_VBIF_VERSION,
+	ADRENO_REG_GBIF_HALT,
+	ADRENO_REG_GBIF_HALT_ACK,
 	ADRENO_REG_GMU_AO_INTERRUPT_EN,
 	ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR,
 	ADRENO_REG_GMU_AO_HOST_INTERRUPT_STATUS,
@@ -832,6 +836,7 @@
 	/* GPU specific function hooks */
 	void (*irq_trace)(struct adreno_device *, unsigned int status);
 	void (*snapshot)(struct adreno_device *, struct kgsl_snapshot *);
+	void (*snapshot_gmu)(struct adreno_device *, struct kgsl_snapshot *);
 	void (*platform_setup)(struct adreno_device *);
 	void (*init)(struct adreno_device *);
 	void (*remove)(struct adreno_device *);
@@ -1006,6 +1011,9 @@
 		struct kgsl_snapshot *snapshot,
 		struct kgsl_context *context);
 
+void adreno_snapshot_gmu(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot);
+
 int adreno_reset(struct kgsl_device *device, int fault);
 
 void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
@@ -1166,6 +1174,12 @@
 		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0);
 }
 
+static inline int adreno_is_a630v2(struct adreno_device *adreno_dev)
+{
+	return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A630) &&
+		(ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1);
+}
+
 /*
  * adreno_checkreg_off() - Checks the validity of a register enum
  * @adreno_dev:		Pointer to adreno device
@@ -1697,21 +1711,60 @@
 	spin_unlock_irqrestore(&rb->preempt_lock, flags);
 }
 
+static inline bool is_power_counter_overflow(struct adreno_device *adreno_dev,
+	unsigned int reg, unsigned int prev_val, unsigned int *perfctr_pwr_hi)
+{
+	unsigned int val;
+	bool ret = false;
+
+	/*
+	 * If prev_val is zero, it is first read after perf counter reset.
+	 * So set perfctr_pwr_hi register to zero.
+	 */
+	if (prev_val == 0) {
+		*perfctr_pwr_hi = 0;
+		return ret;
+	}
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, &val);
+	if (val != *perfctr_pwr_hi) {
+		*perfctr_pwr_hi = val;
+		ret = true;
+	}
+	return ret;
+}
+
 static inline unsigned int counter_delta(struct kgsl_device *device,
 			unsigned int reg, unsigned int *counter)
 {
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int val;
 	unsigned int ret = 0;
+	bool overflow = true;
+	static unsigned int perfctr_pwr_hi;
 
 	/* Read the value */
 	kgsl_regread(device, reg, &val);
 
+	if (adreno_is_a5xx(adreno_dev) && reg == adreno_getreg
+		(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO))
+		overflow = is_power_counter_overflow(adreno_dev, reg,
+				*counter, &perfctr_pwr_hi);
+
 	/* Return 0 for the first read */
 	if (*counter != 0) {
-		if (val < *counter)
-			ret = (0xFFFFFFFF - *counter) + val;
-		else
+		if (val >= *counter) {
 			ret = val - *counter;
+		} else if (overflow == true) {
+			ret = (0xFFFFFFFF - *counter) + val;
+		} else {
+			/*
+			 * Since KGSL got abnormal value from the counter,
+			 * We will drop the value from being accumulated.
+			 */
+			pr_warn_once("KGSL: Abnormal value :0x%x (0x%x) from perf counter : 0x%x\n",
+					val, *counter, reg);
+			return 0;
+		}
 	}
 
 	*counter = val;
@@ -1750,6 +1803,47 @@
 	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
 }
 
+static inline bool adreno_has_gbif(struct adreno_device *adreno_dev)
+{
+	if (adreno_is_a615(adreno_dev))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * adreno_wait_for_vbif_halt_ack() - wait for VBIF acknowledgment
+ * for given HALT request.
+ * @ack_reg: register offset to wait for acknowledge
+ */
+static inline int adreno_wait_for_vbif_halt_ack(struct kgsl_device *device,
+	int ack_reg)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned long wait_for_vbif;
+	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
+	unsigned int val;
+	int ret = 0;
+
+	/* wait for the transactions to clear */
+	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	while (1) {
+		adreno_readreg(adreno_dev, ack_reg,
+			&val);
+		if ((val & mask) == mask)
+			break;
+		if (time_after(jiffies, wait_for_vbif)) {
+			KGSL_DRV_ERR(device,
+				"Wait limit reached for VBIF XIN Halt\n");
+			ret = -ETIMEDOUT;
+			break;
+		}
+	}
+
+	return ret;
+}
+
 /**
  * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
  * @device: Pointer to the device whose VBIF pipe is to be cleared
@@ -1760,26 +1854,20 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
-	unsigned int val;
-	unsigned long wait_for_vbif;
 	int ret = 0;
 
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
-	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
-	while (1) {
-		adreno_readreg(adreno_dev,
-			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
-		if ((val & mask) == mask)
-			break;
-		if (time_after(jiffies, wait_for_vbif)) {
-			KGSL_DRV_ERR(device,
-				"Wait limit reached for VBIF XIN Halt\n");
-			ret = -ETIMEDOUT;
-			break;
-		}
+	if (adreno_has_gbif(adreno_dev)) {
+		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, mask);
+		ret = adreno_wait_for_vbif_halt_ack(device,
+				ADRENO_REG_GBIF_HALT_ACK);
+		adreno_writereg(adreno_dev, ADRENO_REG_GBIF_HALT, 0);
+	} else {
+		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0,
+			mask);
+		ret = adreno_wait_for_vbif_halt_ack(device,
+				ADRENO_REG_VBIF_XIN_HALT_CTRL1);
+		adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
 	}
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
 	return ret;
 }
 
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 356d7c2..b2cdf56 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1533,6 +1533,10 @@
 			A3XX_UCHE_CACHE_INVALIDATE0_REG),
 	ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1,
 			A3XX_UCHE_CACHE_INVALIDATE1_REG),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+			A3XX_RBBM_PERFCTR_RBBM_0_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+			A3XX_RBBM_PERFCTR_RBBM_0_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
 				A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 0cf909e..80ceabd 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -808,6 +808,10 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A4XX_RBBM_SW_RESET_CMD),
 	ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A4XX_UCHE_INVALIDATE0),
 	ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A4XX_UCHE_INVALIDATE1),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+				A4XX_RBBM_PERFCTR_RBBM_0_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+				A4XX_RBBM_PERFCTR_RBBM_0_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
 				A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 6cddd08..2d078ba 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2998,6 +2998,10 @@
 		ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
 					  A5XX_RBBM_BLOCK_SW_RESET_CMD2),
 	ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A5XX_UCHE_INVALIDATE0),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+				A5XX_RBBM_PERFCTR_RBBM_0_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+				A5XX_RBBM_PERFCTR_RBBM_0_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
 				A5XX_RBBM_PERFCTR_LOAD_VALUE_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 78b56bc..6dc62866 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -767,6 +767,8 @@
 
 	crash_dump_valid = false;
 
+	if (!device->snapshot_crashdumper)
+		return;
 	if (capturescript.gpuaddr == 0 || registers.gpuaddr == 0)
 		return;
 
@@ -872,8 +874,7 @@
 		ARRAY_SIZE(a5xx_vbif_snapshot_registers));
 
 	/* Try to run the crash dumper */
-	if (device->snapshot_crashdumper)
-		_a5xx_do_crashdump(device);
+	_a5xx_do_crashdump(device);
 
 	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
 		snapshot, a5xx_snapshot_registers, NULL);
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index b9df4ec..301a4f8 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -50,10 +50,15 @@
 	{0, 0},
 };
 
-static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
-	{ adreno_is_a630, a630_vbif },
+static const struct adreno_vbif_data a615_gbif[] = {
+	{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
+	{0, 0},
 };
 
+static const struct adreno_vbif_platform a6xx_vbif_platforms[] = {
+	{ adreno_is_a630, a630_vbif },
+	{ adreno_is_a615, a615_gbif },
+};
 
 struct kgsl_hwcg_reg {
 	unsigned int off;
@@ -244,18 +249,6 @@
 	{ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
 };
 
-static void a6xx_platform_setup(struct adreno_device *adreno_dev)
-{
-	uint64_t addr;
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
-	/* Calculate SP local and private mem addresses */
-	addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
-	adreno_dev->sp_local_gpuaddr = addr;
-	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
-	gpudev->vbif_xin_halt_ctrl0_mask = A6XX_VBIF_XIN_HALT_CTRL0_MASK;
-}
-
 static void _update_always_on_regs(struct adreno_device *adreno_dev)
 {
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
@@ -369,14 +362,33 @@
 	kgsl_regwrite(device, A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
 }
 
+#define RBBM_CLOCK_CNTL_ON 0x8AA8AA02
 
 static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	const struct kgsl_hwcg_reg *regs;
+	unsigned int value;
 	int i, j;
 
 	if (!test_bit(ADRENO_HWCG_CTRL, &adreno_dev->pwrctrl_flag))
+		on = false;
+
+	if (kgsl_gmu_isenabled(device)) {
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+			on ? 0x00020222 : 0);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+			on ? 0x00010111 : 0);
+		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+			on ? 0x00050555 : 0);
+	}
+
+	kgsl_regread(device, A6XX_RBBM_CLOCK_CNTL, &value);
+
+	if (value == RBBM_CLOCK_CNTL_ON && on)
+		return;
+
+	if (value == 0 && !on)
 		return;
 
 	for (i = 0; i < ARRAY_SIZE(a6xx_hwcg_registers); i++) {
@@ -395,19 +407,12 @@
 	for (j = 0; j < a6xx_hwcg_registers[i].count; j++)
 		kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0);
 
-	if (kgsl_gmu_isenabled(device)) {
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
-			0x00020222);
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
-			0x00010111);
-		kgsl_gmu_regwrite(device, A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
-			0x00050555);
-	}
 	/* Enable SP clock */
 	kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
 
 	/* enable top level HWCG */
-	kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL, on ? 0x8AA8AA02 : 0);
+	kgsl_regwrite(device, A6XX_RBBM_CLOCK_CNTL,
+		on ? RBBM_CLOCK_CNTL_ON : 0);
 }
 
 #define LM_DEFAULT_LIMIT	6000
@@ -888,8 +893,12 @@
  */
 static void _load_gmu_rpmh_ucode(struct kgsl_device *device)
 {
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
 
+	/* Disable SDE clock gating */
+	kgsl_gmu_regwrite(device, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
 	/* Setup RSC PDC handshake for sleep and wakeup */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
 	kgsl_gmu_regwrite(device, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
@@ -909,8 +918,9 @@
 	kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
 	kgsl_gmu_regwrite(device, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
 
-	/* Enable timestamp event */
-	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+	/* Enable timestamp event for v1 only */
+	if (adreno_is_a630v1(adreno_dev))
+		kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
 
 	/* Load RSC sequencer uCode for sleep and wakeup */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
@@ -920,11 +930,11 @@
 	kgsl_gmu_regwrite(device, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
 
 	/* Load PDC sequencer uCode for power up and power down sequence */
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFFBFA1E1);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xE0A4A3A2);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0xE2848382);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xFDBDE4E3);
-	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x00002081);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0, 0xFEBEA1E1);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 1, 0xA5A4A3A2);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 2, 0x8382A6E0);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 3, 0xBCE3E284);
+	_regwrite(gmu->pdc_reg_virt, PDC_GPU_SEQ_MEM_0 + 4, 0x002081FC);
 
 	/* Set TCS commands used by PDC sequence for low power modes */
 	_regwrite(gmu->pdc_reg_virt, PDC_GPU_TCS0_CMD_ENABLE_BANK, 7);
@@ -1007,6 +1017,11 @@
 		cond_resched();
 	} while (!time_after(jiffies, t));
 
+	/* Double check one last time */
+	kgsl_gmu_regread(device, offset, &value);
+	if ((value & mask) == expected_ret)
+		return 0;
+
 	return -EINVAL;
 }
 
@@ -1430,36 +1445,50 @@
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
 {
 	struct gmu_device *gmu = &device->gmu;
-	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int val;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int ret;
 
-	/* RSC sleep sequence */
-	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+	/* RSC sleep sequence is different on v1 */
+	if (adreno_is_a630v1(adreno_dev))
+		kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
 	wmb();
 
-	if (timed_poll_check(device,
-			A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
-			BIT(0),
-			GPU_START_TIMEOUT,
-			BIT(0))) {
+	if (adreno_is_a630v1(adreno_dev))
+		ret = timed_poll_check(device,
+				A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0,
+				BIT(0),
+				GPU_START_TIMEOUT,
+				BIT(0));
+	else
+		ret = timed_poll_check(device,
+				A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+				BIT(16),
+				GPU_START_TIMEOUT,
+				BIT(16));
+
+	if (ret) {
 		dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
-		return -EINVAL;
+		return -ETIMEDOUT;
 	}
 
-	/* Read to clear the timestamp */
-	kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
-			&val);
-	kgsl_gmu_regread(device, A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
-			&val);
+	/* Read to clear the timestamp valid signal. Don't care what we read. */
+	if (adreno_is_a630v1(adreno_dev)) {
+		kgsl_gmu_regread(device,
+				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0,
+				&ret);
+		kgsl_gmu_regread(device,
+				A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0,
+				&ret);
+	}
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
-		test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
+			test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
 		kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
 
-	/* FIXME: v2 has different procedure to trigger sequence */
-
 	return 0;
 }
 
@@ -2685,6 +2714,27 @@
 		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
 };
 
+
+static struct adreno_perfcount_register a6xx_perfcounters_gbif[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW0,
+		A6XX_GBIF_PERF_CNT_HIGH0, -1, A6XX_GBIF_PERF_CNT_SEL },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW1,
+		A6XX_GBIF_PERF_CNT_HIGH1, -1, A6XX_GBIF_PERF_CNT_SEL },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW2,
+		A6XX_GBIF_PERF_CNT_HIGH2, -1, A6XX_GBIF_PERF_CNT_SEL },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PERF_CNT_LOW3,
+		A6XX_GBIF_PERF_CNT_HIGH3, -1, A6XX_GBIF_PERF_CNT_SEL },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW0,
+		A6XX_GBIF_PWR_CNT_HIGH0, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW1,
+		A6XX_GBIF_PWR_CNT_HIGH1, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_GBIF_PWR_CNT_LOW2,
+		A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
+};
+
 static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
 	{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
@@ -2795,6 +2845,35 @@
 	return 0;
 }
 
+static void a6xx_platform_setup(struct adreno_device *adreno_dev)
+{
+	uint64_t addr;
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* Calculate SP local and private mem addresses */
+	addr = ALIGN(ADRENO_UCHE_GMEM_BASE + adreno_dev->gmem_size, SZ_64K);
+	adreno_dev->sp_local_gpuaddr = addr;
+	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
+
+	if (adreno_has_gbif(adreno_dev)) {
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs =
+				a6xx_perfcounters_gbif;
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
+				= ARRAY_SIZE(a6xx_perfcounters_gbif);
+
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
+				a6xx_perfcounters_gbif_pwr;
+		a6xx_perfcounter_groups[KGSL_PERFCOUNTER_GROUP_VBIF].reg_count
+				= ARRAY_SIZE(a6xx_perfcounters_gbif_pwr);
+
+		gpudev->vbif_xin_halt_ctrl0_mask =
+				A6XX_GBIF_HALT_MASK;
+	} else
+		gpudev->vbif_xin_halt_ctrl0_mask =
+				A6XX_VBIF_XIN_HALT_CTRL0_MASK;
+}
+
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
@@ -2854,6 +2933,8 @@
 				A6XX_VBIF_XIN_HALT_CTRL0),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_XIN_HALT_CTRL1,
 				A6XX_VBIF_XIN_HALT_CTRL1),
+	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT, A6XX_GBIF_HALT),
+	ADRENO_REG_DEFINE(ADRENO_REG_GBIF_HALT_ACK, A6XX_GBIF_HALT_ACK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_GMU_ALWAYS_ON_COUNTER_L),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
@@ -2915,6 +2996,7 @@
 	.reg_offsets = &a6xx_reg_offsets,
 	.start = a6xx_start,
 	.snapshot = a6xx_snapshot,
+	.snapshot_gmu = a6xx_snapshot_gmu,
 	.irq = &a6xx_irq,
 	.snapshot_data = &a6xx_snapshot_data,
 	.irq_trace = trace_kgsl_a5xx_irq_status,
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index ee2fd71..dd8af80 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -110,6 +110,8 @@
 
 void a6xx_snapshot(struct adreno_device *adreno_dev,
 		struct kgsl_snapshot *snapshot);
+void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
+		struct kgsl_snapshot *snapshot);
 
 void a6xx_crashdump_init(struct adreno_device *adreno_dev);
 #endif
diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c
index 1d5f4a5..ca011e4 100644
--- a/drivers/gpu/msm/adreno_a6xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a6xx_preempt.c
@@ -635,6 +635,9 @@
 		return;
 
 	gpumem_free_entry(context->user_ctxt_record);
+
+	/* Put the extra ref from gpumem_alloc_entry() */
+	kgsl_mem_entry_put(context->user_ctxt_record);
 }
 
 int a6xx_preemption_context_init(struct kgsl_context *context)
@@ -645,6 +648,10 @@
 	if (!adreno_is_preemption_setup_enabled(adreno_dev))
 		return 0;
 
+	/*
+	 * gpumem_alloc_entry takes an extra refcount. Put it only when
+	 * destroying the context to keep the context record valid
+	 */
 	context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv,
 			A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, 0);
 	if (IS_ERR(context->user_ctxt_record)) {
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 80c9a61..755fc7f 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -569,38 +569,33 @@
 	(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
 
 static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
-		u8 *buf, size_t remain)
+		u8 *buf, size_t remain, struct reg_list *regs)
 {
-	unsigned int i;
-	size_t used = 0;
+	struct kgsl_snapshot_registers snapshot_regs = {
+		.regs = regs->regs,
+		.count = regs->count,
+	};
 
-	for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
-		struct reg_list *regs = &a6xx_reg_list[i];
-		struct kgsl_snapshot_registers snapshot_regs = {
-			.regs = regs->regs,
-			.count = regs->count,
-		};
+	if (regs->sel)
+		kgsl_regwrite(device, regs->sel->host_reg, regs->sel->val);
 
-		if (regs->sel)
-			kgsl_regwrite(device, regs->sel->host_reg,
-				regs->sel->val);
-		used += kgsl_snapshot_dump_registers(device, buf + used,
-						remain - used, &snapshot_regs);
-	}
-	return used;
+	return kgsl_snapshot_dump_registers(device, buf, remain,
+		&snapshot_regs);
 }
 
 static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
 		size_t remain, void *priv)
 {
 	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	struct reg_list *regs = (struct reg_list *)priv;
 	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
 	unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
-	unsigned int i, j, k;
+	unsigned int j, k;
 	unsigned int count = 0;
 
 	if (crash_dump_valid == false)
-		return a6xx_legacy_snapshot_registers(device, buf, remain);
+		return a6xx_legacy_snapshot_registers(device, buf, remain,
+			regs);
 
 	if (remain < sizeof(*header)) {
 		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
@@ -609,24 +604,20 @@
 
 	remain -= sizeof(*header);
 
-	for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
-		struct reg_list *regs = &a6xx_reg_list[i];
+	for (j = 0; j < regs->count; j++) {
+		unsigned int start = regs->regs[2 * j];
+		unsigned int end = regs->regs[(2 * j) + 1];
 
-		for (j = 0; j < regs->count; j++) {
-			unsigned int start = regs->regs[2 * j];
-			unsigned int end = regs->regs[(2 * j) + 1];
+		if (remain < ((end - start) + 1) * 8) {
+			SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+			goto out;
+		}
 
-			if (remain < ((end - start) + 1) * 8) {
-				SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-				goto out;
-			}
+		remain -= ((end - start) + 1) * 8;
 
-			remain -= ((end - start) + 1) * 8;
-
-			for (k = start; k <= end; k++, count++) {
-				*data++ = k;
-				*data++ = *src++;
-			}
+		for (k = start; k <= end; k++, count++) {
+			*data++ = k;
+			*data++ = *src++;
 		}
 	}
 
@@ -1366,6 +1357,7 @@
 		struct kgsl_snapshot *snapshot)
 {
 	int i;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 
 	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
 		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
@@ -1456,9 +1448,12 @@
 			(void *) &a6xx_dbgc_debugbus_blocks[i]);
 	}
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUGBUS,
-			snapshot, a6xx_snapshot_vbif_debugbus_block,
-			(void *) &a6xx_vbif_debugbus_blocks);
+	/* Skip if GPU has GBIF */
+	if (!adreno_has_gbif(adreno_dev))
+		kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+				snapshot, a6xx_snapshot_vbif_debugbus_block,
+				(void *) &a6xx_vbif_debugbus_blocks);
 
 	if (a6xx_cx_dbgc) {
 		for (i = 0; i < ARRAY_SIZE(a6xx_cx_dbgc_debugbus_blocks); i++) {
@@ -1471,10 +1466,18 @@
 	}
 }
 
-static void a6xx_snapshot_gmu(struct kgsl_device *device,
+/*
+ * a6xx_snapshot_gmu() - A6XX GMU snapshot function
+ * @adreno_dev: Device being snapshotted
+ * @snapshot: Pointer to the snapshot instance
+ *
+ * This is where all of the A6XX GMU specific bits and pieces are grabbed
+ * into the snapshot memory
+ */
+void a6xx_snapshot_gmu(struct adreno_device *adreno_dev,
 		struct kgsl_snapshot *snapshot)
 {
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
 	if (!kgsl_gmu_isenabled(device))
@@ -1519,6 +1522,8 @@
 
 	crash_dump_valid = false;
 
+	if (!device->snapshot_crashdumper)
+		return;
 	if (a6xx_capturescript.gpuaddr == 0 ||
 		a6xx_crashdump_registers.gpuaddr == 0)
 		return;
@@ -1570,33 +1575,32 @@
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
 	bool sptprac_on;
-
-	/* GMU TCM data dumped through AHB */
-	a6xx_snapshot_gmu(device, snapshot);
+	unsigned int i;
 
 	sptprac_on = gpudev->sptprac_is_on(adreno_dev);
 
 	/* Return if the GX is off */
-	if (!gpudev->gx_is_on(adreno_dev)) {
-		pr_err("GX is off. Only dumping GMU data in snapshot\n");
+	if (!gpudev->gx_is_on(adreno_dev))
 		return;
-	}
 
 	/* Dump the registers which get affected by crash dumper trigger */
 	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
 		snapshot, a6xx_snapshot_pre_crashdump_regs, NULL);
 
 	/* Dump vbif registers as well which get affected by crash dumper */
-	adreno_snapshot_vbif_registers(device, snapshot,
-		a6xx_vbif_snapshot_registers,
-		ARRAY_SIZE(a6xx_vbif_snapshot_registers));
+	if (!adreno_has_gbif(adreno_dev))
+		adreno_snapshot_vbif_registers(device, snapshot,
+			a6xx_vbif_snapshot_registers,
+			ARRAY_SIZE(a6xx_vbif_snapshot_registers));
 
 	/* Try to run the crash dumper */
 	if (sptprac_on)
 		_a6xx_do_crashdump(device);
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
-		snapshot, a6xx_snapshot_registers, NULL);
+	for (i = 0; i < ARRAY_SIZE(a6xx_reg_list); i++) {
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+			snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
+	}
 
 	/* CP_SQE indexed registers */
 	kgsl_snapshot_indexed_registers(device, snapshot,
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 21ecaa1..cb42a70 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2066,7 +2066,8 @@
 	adreno_fault_header(device, rb, cmdobj);
 
 	if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
-		kgsl_device_snapshot(device, NULL, fault & ADRENO_GMU_FAULT);
+		kgsl_device_snapshot(device, drawobj->context,
+					fault & ADRENO_GMU_FAULT);
 }
 
 static int dispatcher_do_fault(struct adreno_device *adreno_dev)
@@ -2078,7 +2079,7 @@
 	struct adreno_ringbuffer *rb;
 	struct adreno_ringbuffer *hung_rb = NULL;
 	unsigned int reg;
-	uint64_t base;
+	uint64_t base = 0;
 	struct kgsl_drawobj_cmd *cmdobj = NULL;
 	int ret, i;
 	int fault;
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 0da4da9..dfce3eb 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -28,6 +28,20 @@
 /* offset of enable register from select register */
 #define VBIF2_PERF_EN_REG_SEL_OFF 16
 
+/* offset of clear register from select register for GBIF */
+#define GBIF_PERF_CLR_REG_SEL_OFF 1
+
+/* offset of enable register from select register for GBIF*/
+#define GBIF_PERF_EN_REG_SEL_OFF  2
+
+/* offset of clear register from the power enable register for GBIF*/
+#define GBIF_PWR_CLR_REG_EN_OFF    1
+
+/* */
+#define GBIF_PERF_RMW_MASK   0xFF
+/* */
+#define GBIF_PWR_RMW_MASK    0x10000
+
 /* offset of clear register from the enable register */
 #define VBIF2_PERF_PWR_CLR_REG_EN_OFF 8
 
@@ -612,14 +626,41 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
+	unsigned int shift = counter << 3;
 
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs[counter];
-	/* Write 1, followed by 0 to CLR register for clearing the counter */
-	kgsl_regwrite(device, reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 1);
-	kgsl_regwrite(device, reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 0);
-	kgsl_regwrite(device, reg->select, countable & VBIF2_PERF_CNT_SEL_MASK);
-	/* enable reg is 8 DWORDS before select reg */
-	kgsl_regwrite(device, reg->select - VBIF2_PERF_EN_REG_SEL_OFF, 1);
+
+	if (adreno_has_gbif(adreno_dev)) {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
+			1 << counter, 1);
+		kgsl_regrmw(device, reg->select - GBIF_PERF_CLR_REG_SEL_OFF,
+			1 << counter, 0);
+		/* select the desired countable */
+		kgsl_regrmw(device, reg->select,
+			GBIF_PERF_RMW_MASK << shift, countable << shift);
+		/* enable counter */
+		kgsl_regrmw(device, reg->select - GBIF_PERF_EN_REG_SEL_OFF,
+			1 << counter, 1);
+
+	} else {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regwrite(device,
+			reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 1);
+		kgsl_regwrite(device,
+			reg->select - VBIF2_PERF_CLR_REG_SEL_OFF, 0);
+		kgsl_regwrite(device,
+			reg->select, countable & VBIF2_PERF_CNT_SEL_MASK);
+		/* enable reg is 8 DWORDS before select reg */
+		kgsl_regwrite(device,
+			reg->select - VBIF2_PERF_EN_REG_SEL_OFF, 1);
+	}
 	reg->value = 0;
 }
 
@@ -630,10 +671,30 @@
 	struct adreno_perfcount_register *reg;
 
 	reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
-	/* Write 1, followed by 0 to CLR register for clearing the counter */
-	kgsl_regwrite(device, reg->select + VBIF2_PERF_PWR_CLR_REG_EN_OFF, 1);
-	kgsl_regwrite(device, reg->select + VBIF2_PERF_PWR_CLR_REG_EN_OFF, 0);
-	kgsl_regwrite(device, reg->select, 1);
+
+	if (adreno_has_gbif(adreno_dev)) {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
+			GBIF_PWR_RMW_MASK << counter, 1);
+		kgsl_regrmw(device, reg->select + GBIF_PWR_CLR_REG_EN_OFF,
+			GBIF_PWR_RMW_MASK << counter, 0);
+		/* Enable the counter */
+		kgsl_regrmw(device, reg->select,
+			GBIF_PWR_RMW_MASK << counter, 1);
+	} else {
+		/*
+		 * Write 1, followed by 0 to CLR register for
+		 * clearing the counter
+		 */
+		kgsl_regwrite(device, reg->select +
+			VBIF2_PERF_PWR_CLR_REG_EN_OFF, 1);
+		kgsl_regwrite(device, reg->select +
+			VBIF2_PERF_PWR_CLR_REG_EN_OFF, 0);
+		kgsl_regwrite(device, reg->select, 1);
+	}
 	reg->value = 0;
 }
 
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index f608927..b5999e6 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -837,8 +837,7 @@
 
 	snapshot_frozen_objsize = 0;
 
-	if (!IS_ERR(context))
-		setup_fault_process(device, snapshot,
+	setup_fault_process(device, snapshot,
 			context ? context->proc_priv : NULL);
 
 	/* Add GPU specific sections - registers mainly, but other stuff too */
@@ -946,6 +945,24 @@
 
 }
 
+/* adreno_snapshot_gmu - Snapshot the Adreno GMU state
+ * @device - KGSL device to snapshot
+ * @snapshot - Pointer to the snapshot instance
+ * This is a hook function called by kgsl_snapshot to snapshot the
+ * Adreno specific information for the GMU snapshot.  In turn, this function
+ * calls the GMU specific snapshot function to get core specific information.
+ */
+void adreno_snapshot_gmu(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* Add GMU specific sections */
+	if (gpudev->snapshot_gmu)
+		gpudev->snapshot_gmu(adreno_dev, snapshot);
+}
+
 /*
  * adreno_snapshot_cp_roq - Dump CP merciu data in snapshot
  * @device: Device being snapshotted
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 7dff251..0a7d165 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2280,7 +2280,7 @@
 		struct kgsl_mem_entry *entry,
 		struct kgsl_gpuobj_import *param)
 {
-	struct kgsl_gpuobj_import_useraddr useraddr;
+	struct kgsl_gpuobj_import_useraddr useraddr = {0};
 	int ret;
 
 	param->flags &= KGSL_MEMFLAGS_GPUREADONLY
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index f8d5f5b..0ab775a 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -151,6 +151,8 @@
 	unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
 	void (*snapshot)(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot, struct kgsl_context *context);
+	void (*snapshot_gmu)(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot);
 	irqreturn_t (*irq_handler)(struct kgsl_device *device);
 	int (*drain)(struct kgsl_device *device);
 	/*
@@ -706,7 +708,6 @@
 void kgsl_device_snapshot(struct kgsl_device *device,
 			struct kgsl_context *context, bool gmu_fault);
 void kgsl_device_snapshot_close(struct kgsl_device *device);
-void kgsl_snapshot_save_frozen_objs(struct work_struct *work);
 
 void kgsl_events_init(void);
 void kgsl_events_exit(void);
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 3bd7cf3..82f0c11 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1149,7 +1149,6 @@
 		goto error;
 
 	gmu->num_gpupwrlevels = pwr->num_pwrlevels;
-	gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
 
 	for (i = 0; i < gmu->num_gpupwrlevels; i++) {
 		int j = gmu->num_gpupwrlevels - 1 - i;
@@ -1366,7 +1365,7 @@
 		/* Wait for the NMI to be handled */
 		wmb();
 		udelay(100);
-		kgsl_device_snapshot(device, ERR_PTR(-EINVAL), true);
+		kgsl_device_snapshot(device, NULL, true);
 
 		adreno_write_gmureg(adreno_dev,
 				ADRENO_REG_GMU_GMU2HOST_INTR_CLR, 0xFFFFFFFF);
@@ -1411,11 +1410,8 @@
 		if (ret)
 			goto error_gmu;
 
-		/* Send default DCVS level */
-		ret = gmu_dcvs_set(gmu, pwr->default_pwrlevel,
-				pwr->pwrlevels[pwr->default_pwrlevel].bus_freq);
-		if (ret)
-			goto error_gmu;
+		/* Request default DCVS level */
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
 
 		msm_bus_scale_client_update_request(gmu->pcl, 0);
 		break;
@@ -1435,12 +1431,7 @@
 		if (ret)
 			goto error_gmu;
 
-		ret = gmu_dcvs_set(gmu, gmu->wakeup_pwrlevel,
-				pwr->pwrlevels[gmu->wakeup_pwrlevel].bus_freq);
-		if (ret)
-			goto error_gmu;
-
-		gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
 		break;
 
 	case KGSL_STATE_RESET:
@@ -1462,11 +1453,8 @@
 				goto error_gmu;
 
 			/* Send DCVS level prior to reset*/
-			ret = gmu_dcvs_set(gmu, pwr->active_pwrlevel,
-					pwr->pwrlevels[pwr->active_pwrlevel]
-					.bus_freq);
-			if (ret)
-				goto error_gmu;
+			kgsl_pwrctrl_pwrlevel_change(device,
+				pwr->default_pwrlevel);
 
 			ret = gpudev->oob_set(adreno_dev,
 				OOB_CPINIT_SET_MASK,
@@ -1519,6 +1507,14 @@
 		cond_resched();
 	}
 
+	/* Double check one last time */
+	if (idle == false) {
+		adreno_read_gmureg(ADRENO_DEVICE(device),
+			ADRENO_REG_GMU_RPMH_POWER_STATE, &reg);
+		if (reg == device->gmu.idle_level)
+			idle = true;
+	}
+
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
 
 	if (!idle || (gpudev->wait_for_gmu_idle &&
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index ff65f66..a5dc692 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -82,7 +82,8 @@
 	GMU_BOOT_INIT_DONE = 0,
 	GMU_CLK_ON = 1,
 	GMU_HFI_ON = 2,
-	GMU_FAULT = 3
+	GMU_FAULT = 3,
+	GMU_DCVS_REPLAY = 4,
 };
 
 /**
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 007121c..97e5c22 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -244,12 +244,9 @@
 	/* GMU scales GPU freq */
 	if (kgsl_gmu_isenabled(device)) {
 		/* If GMU has not been started, save it */
-		if (!(gmu->flags & GMU_HFI_ON)) {
-			/* In slumber the clock is off so we are done */
-			if (pwrlevel == (gmu->num_gpupwrlevels - 1))
-				return 0;
-
-			gmu->wakeup_pwrlevel = pwrlevel;
+		if (!test_bit(GMU_HFI_ON, &gmu->flags)) {
+			/* store clock change request */
+			set_bit(GMU_DCVS_REPLAY, &gmu->flags);
 			return 0;
 		}
 
@@ -259,6 +256,8 @@
 			return -EINVAL;
 		}
 		ret = gmu_dcvs_set(gmu, pwrlevel, INVALID_DCVS_IDX);
+		/* indicate actual clock  change */
+		clear_bit(GMU_DCVS_REPLAY, &gmu->flags);
 	} else
 		/* Linux clock driver scales GPU freq */
 		ret = clk_set_rate(pwr->grp_clks[0], pl->gpu_freq);
@@ -412,7 +411,8 @@
 	 */
 	kgsl_pwrctrl_set_thermal_cycle(pwr, new_level);
 
-	if (new_level == old_level)
+	if (new_level == old_level &&
+		!test_bit(GMU_DCVS_REPLAY, &device->gmu.flags))
 		return;
 
 	kgsl_pwrscale_update_stats(device);
@@ -2485,6 +2485,9 @@
 static void kgsl_pwrctrl_disable(struct kgsl_device *device)
 {
 	if (kgsl_gmu_isenabled(device)) {
+		struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+		pwr->active_pwrlevel = pwr->num_pwrlevels - 1;
 		kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
 		return gmu_stop(device);
 	}
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index f9494a4..f710d8f 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -24,6 +24,8 @@
 #include "kgsl_snapshot.h"
 #include "adreno_cp_parser.h"
 
+static void kgsl_snapshot_save_frozen_objs(struct work_struct *work);
+
 /* Placeholder for list of ib objects that contain all objects in that IB */
 
 struct kgsl_snapshot_cp_obj {
@@ -182,8 +184,7 @@
 	context = kgsl_context_get(device, header->current_context);
 
 	/* Get the current PT base */
-	if (!IS_ERR(priv))
-		header->ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
+	header->ptbase = kgsl_mmu_get_current_ttbr0(&device->mmu);
 
 	/* And the PID for the task leader */
 	if (context) {
@@ -207,6 +208,44 @@
 	return size;
 }
 
+/* Snapshot the Linux specific information */
+static size_t snapshot_os_no_ctxt(struct kgsl_device *device,
+	u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_linux_v2 *header =
+		(struct kgsl_snapshot_linux_v2 *)buf;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	size_t size = sizeof(*header);
+
+	/* Make sure there is enough room for the data */
+	if (remain < size) {
+		SNAPSHOT_ERR_NOMEM(device, "OS");
+		return 0;
+	}
+
+	memset(header, 0, sizeof(*header));
+
+	header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
+
+	/* Get the kernel build information */
+	strlcpy(header->release, init_utsname()->release,
+			sizeof(header->release));
+	strlcpy(header->version, init_utsname()->version,
+			sizeof(header->version));
+
+	/* Get the Unix time for the timestamp */
+	header->seconds = get_seconds();
+
+	/* Remember the power information */
+	header->power_flags = pwr->power_flags;
+	header->power_level = pwr->active_pwrlevel;
+	header->power_interval_timeout = pwr->interval_timeout;
+	header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
+
+	/* Return the size of the data segment */
+	return size;
+}
+
 static void kgsl_snapshot_put_object(struct kgsl_snapshot_object *obj)
 {
 	list_del(&obj->node);
@@ -663,14 +702,24 @@
 	snapshot->size += sizeof(*header);
 
 	/* Build the Linux specific header */
-	/* Context err is implied a GMU fault, so limit dump */
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
-			snapshot, snapshot_os,
-			IS_ERR(context) ? context : NULL);
+	/* We either want to only dump GMU, or we want to dump GPU and GMU */
+	if (gmu_fault) {
+		/* Dump only the GMU */
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
+				snapshot, snapshot_os_no_ctxt, NULL);
 
-	/* Get the device specific sections */
-	if (device->ftbl->snapshot)
-		device->ftbl->snapshot(device, snapshot, context);
+		if (device->ftbl->snapshot_gmu)
+			device->ftbl->snapshot_gmu(device, snapshot);
+	} else {
+		/* Dump GPU and GMU */
+		kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
+				snapshot, snapshot_os, NULL);
+
+		if (device->ftbl->snapshot)
+			device->ftbl->snapshot(device, snapshot, context);
+		if (device->ftbl->snapshot_gmu)
+			device->ftbl->snapshot_gmu(device, snapshot);
+	}
 
 	/*
 	 * The timestamp is the seconds since boot so it is easier to match to
@@ -1193,7 +1242,7 @@
  * is taken
  * @work: The work item that scheduled this work
  */
-void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
+static void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
 {
 	struct kgsl_snapshot *snapshot = container_of(work,
 				struct kgsl_snapshot, work);
@@ -1205,6 +1254,9 @@
 	if (IS_ERR_OR_NULL(device))
 		return;
 
+	if (snapshot->gmu_fault)
+		goto gmu_only;
+
 	kgsl_snapshot_process_ib_obj_list(snapshot);
 
 	list_for_each_entry(obj, &snapshot->obj_list, node) {
@@ -1251,6 +1303,7 @@
 			       "snapshot: Active IB2:%016llx not dumped\n",
 				snapshot->ib2base);
 
+gmu_only:
 	complete_all(&snapshot->dump_gate);
 	BUG_ON(device->force_panic);
 }
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ae494d7..8d242f8 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -156,6 +156,17 @@
           occurrence. These events can be controlled by using module
           parameters.
 
+config CORESIGHT_TGU
+	bool "CoreSight Trigger Generation Unit driver"
+	help
+	  This driver provides support for Trigger Generation Unit that is
+	  used to detect patterns or sequences on a given set of signals.
+	  TGU is used to monitor a particular bus within a given region to
+	  detect illegal transaction sequences or slave responses. It is also
+	  used to monitor a data stream to detect protocol violations and to
+	  provide a trigger point for centering data around a specific event
+	  within the trace data buffer.
+
 config CORESIGHT_CSR
 	bool "CoreSight Slave Register driver"
 	help
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 0b5e434..cb47ecd 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -5,7 +5,8 @@
 obj-$(CONFIG_OF) += of_coresight.o
 obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
 					     coresight-tmc-etf.o \
-					     coresight-tmc-etr.o
+					     coresight-tmc-etr.o \
+					     coresight-byte-cntr.o
 obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
 obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
 obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
@@ -21,6 +22,7 @@
 obj-$(CONFIG_CORESIGHT_TPDM) += coresight-tpdm.o
 obj-$(CONFIG_CORESIGHT_EVENT) += coresight-event.o
 obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+obj-$(CONFIG_CORESIGHT_TGU) += coresight-tgu.o
 obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
 obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
 obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
new file mode 100644
index 0000000..496738c
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/of_irq.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+
+#include "coresight-byte-cntr.h"
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+static struct tmc_drvdata *tmcdrvdata;
+
+static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, loff_t *ppos,
+			       size_t bytes, size_t *len)
+{
+	if (*len >= bytes) {
+		atomic_dec(&byte_cntr_data->irq_cnt);
+		*len = bytes;
+	} else {
+		if (((uint32_t)*ppos % bytes) + *len > bytes)
+			*len = bytes - ((uint32_t)*ppos % bytes);
+		if ((*len + (uint32_t)*ppos) % bytes == 0)
+			atomic_dec(&byte_cntr_data->irq_cnt);
+	}
+}
+
+static void tmc_etr_sg_read_pos(loff_t *ppos,
+				size_t bytes, bool noirq, size_t *len,
+				char **bufpp)
+{
+	uint32_t rwp, i = 0;
+	uint32_t blk_num, sg_tbl_num, blk_num_loc, read_off;
+	uint32_t *virt_pte, *virt_st_tbl;
+	void *virt_blk;
+	phys_addr_t phys_pte;
+	int total_ents = DIV_ROUND_UP(tmcdrvdata->size, PAGE_SIZE);
+	int ents_per_pg = PAGE_SIZE/sizeof(uint32_t);
+
+	if (*len == 0)
+		return;
+
+	blk_num = *ppos / PAGE_SIZE;
+	read_off = *ppos % PAGE_SIZE;
+
+	virt_st_tbl = (uint32_t *)tmcdrvdata->vaddr;
+
+	/* Compute table index and block entry index within that table */
+	if (blk_num && (blk_num == (total_ents - 1)) &&
+	    !(blk_num % (ents_per_pg - 1))) {
+		sg_tbl_num = blk_num / ents_per_pg;
+		blk_num_loc = ents_per_pg - 1;
+	} else {
+		sg_tbl_num = blk_num / (ents_per_pg - 1);
+		blk_num_loc = blk_num % (ents_per_pg - 1);
+	}
+
+	for (i = 0; i < sg_tbl_num; i++) {
+		virt_pte = virt_st_tbl + (ents_per_pg - 1);
+		phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+		virt_st_tbl = (uint32_t *)phys_to_virt(phys_pte);
+	}
+
+	virt_pte = virt_st_tbl + blk_num_loc;
+	phys_pte = TMC_ETR_SG_ENT_TO_BLK(*virt_pte);
+	virt_blk = phys_to_virt(phys_pte);
+
+	*bufpp = (char *)(virt_blk + read_off);
+
+	if (noirq) {
+		rwp = readl_relaxed(tmcdrvdata->base + TMC_RWP);
+		tmc_etr_sg_rwp_pos(tmcdrvdata, rwp);
+		if (tmcdrvdata->sg_blk_num == blk_num &&
+		    rwp >= (phys_pte + read_off))
+			*len = rwp - phys_pte - read_off;
+		else if (tmcdrvdata->sg_blk_num > blk_num)
+			*len = PAGE_SIZE - read_off;
+		else
+			*len = bytes;
+	} else {
+
+		if (*len > (PAGE_SIZE - read_off))
+			*len = PAGE_SIZE - read_off;
+
+		if (*len >= (bytes - ((uint32_t)*ppos % bytes)))
+			*len = bytes - ((uint32_t)*ppos % bytes);
+
+		if ((*len + (uint32_t)*ppos) % bytes == 0)
+			atomic_dec(&tmcdrvdata->byte_cntr->irq_cnt);
+	}
+
+	/*
+	 * Invalidate cache range before reading. This will make sure that CPU
+	 * reads latest contents from DDR
+	 */
+	dmac_inv_range((void *)(*bufpp), (void *)(*bufpp) + *len);
+}
+
+static irqreturn_t etr_handler(int irq, void *data)
+{
+	struct byte_cntr *byte_cntr_data = data;
+
+	atomic_inc(&byte_cntr_data->irq_cnt);
+
+	wake_up(&byte_cntr_data->wq);
+
+	return IRQ_HANDLED;
+}
+
+static void tmc_etr_flush_bytes(loff_t *ppos, size_t bytes, size_t *len)
+{
+	uint32_t rwp = 0;
+
+	rwp = readl_relaxed(tmcdrvdata->base + TMC_RWP);
+
+	if (rwp >= (tmcdrvdata->paddr + *ppos)) {
+		if (bytes > (rwp - tmcdrvdata->paddr - *ppos))
+			*len = rwp - tmcdrvdata->paddr - *ppos;
+	}
+}
+
+static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
+			       size_t len, loff_t *ppos)
+{
+	struct byte_cntr *byte_cntr_data = fp->private_data;
+	char *bufp;
+
+	if (!data)
+		return -EINVAL;
+
+	mutex_lock(&byte_cntr_data->byte_cntr_lock);
+	if (!byte_cntr_data->read_active)
+		goto err0;
+
+	if (byte_cntr_data->enable) {
+		if (!atomic_read(&byte_cntr_data->irq_cnt)) {
+			mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+			if (wait_event_interruptible(byte_cntr_data->wq,
+				atomic_read(&byte_cntr_data->irq_cnt) > 0))
+				return -ERESTARTSYS;
+			mutex_lock(&byte_cntr_data->byte_cntr_lock);
+			if (!byte_cntr_data->read_active)
+				goto err0;
+		}
+		bufp = (char *)(tmcdrvdata->vaddr + *ppos);
+
+		if (tmcdrvdata->mem_type == TMC_ETR_MEM_TYPE_CONTIG)
+			tmc_etr_read_bytes(byte_cntr_data, ppos,
+					   byte_cntr_data->block_size, &len);
+		else
+			tmc_etr_sg_read_pos(ppos, byte_cntr_data->block_size, 0,
+					    &len, &bufp);
+
+	} else {
+		if (!atomic_read(&byte_cntr_data->irq_cnt)) {
+			if (tmcdrvdata->mem_type == TMC_ETR_MEM_TYPE_CONTIG)
+				tmc_etr_flush_bytes(ppos,
+						    byte_cntr_data->block_size,
+						    &len);
+			else
+				tmc_etr_sg_read_pos(ppos,
+						    byte_cntr_data->block_size,
+						    1,
+						    &len, &bufp);
+			if (!len)
+				goto err0;
+		} else {
+			if (tmcdrvdata->mem_type == TMC_ETR_MEM_TYPE_CONTIG)
+				tmc_etr_read_bytes(byte_cntr_data, ppos,
+						   byte_cntr_data->block_size,
+						   &len);
+			else
+				tmc_etr_sg_read_pos(ppos,
+						    byte_cntr_data->block_size,
+						    1,
+						    &len, &bufp);
+		}
+	}
+
+	if (copy_to_user(data, bufp, len)) {
+		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+		dev_dbg(tmcdrvdata->dev, "%s: copy_to_user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	if (*ppos + len >= tmcdrvdata->size)
+		*ppos = 0;
+	else
+		*ppos += len;
+err0:
+	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+	return len;
+}
+
+void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data)
+{
+	if (!byte_cntr_data)
+		return;
+
+	mutex_lock(&byte_cntr_data->byte_cntr_lock);
+
+	if (byte_cntr_data->block_size == 0) {
+		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+		return;
+	}
+
+	atomic_set(&byte_cntr_data->irq_cnt, 0);
+	byte_cntr_data->enable = true;
+	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+}
+EXPORT_SYMBOL(tmc_etr_byte_cntr_start);
+
+void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data)
+{
+	if (!byte_cntr_data)
+		return;
+
+	mutex_lock(&byte_cntr_data->byte_cntr_lock);
+	byte_cntr_data->enable = false;
+	coresight_csr_set_byte_cntr(0);
+	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+}
+EXPORT_SYMBOL(tmc_etr_byte_cntr_stop);
+
+
+static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp)
+{
+	struct byte_cntr *byte_cntr_data = fp->private_data;
+
+	mutex_lock(&byte_cntr_data->byte_cntr_lock);
+	byte_cntr_data->read_active = false;
+
+	coresight_csr_set_byte_cntr(0);
+	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+	return 0;
+}
+
+static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
+{
+	struct byte_cntr *byte_cntr_data =
+			container_of(in->i_cdev, struct byte_cntr, dev);
+
+	mutex_lock(&byte_cntr_data->byte_cntr_lock);
+
+	if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
+		mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+		return -EINVAL;
+	}
+
+	coresight_csr_set_byte_cntr(byte_cntr_data->block_size);
+	fp->private_data = byte_cntr_data;
+	nonseekable_open(in, fp);
+	byte_cntr_data->enable = true;
+	byte_cntr_data->read_active = true;
+	mutex_unlock(&byte_cntr_data->byte_cntr_lock);
+
+	return 0;
+}
+
+static const struct file_operations byte_cntr_fops = {
+	.owner		= THIS_MODULE,
+	.open		= tmc_etr_byte_cntr_open,
+	.read		= tmc_etr_byte_cntr_read,
+	.release	= tmc_etr_byte_cntr_release,
+	.llseek		= no_llseek,
+};
+
+static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
+{
+	int ret;
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	struct device *device;
+	dev_t dev;
+
+	ret = alloc_chrdev_region(&dev, baseminor, count, "byte-cntr");
+	if (ret < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", ret);
+		return ret;
+	}
+	cdev_init(&byte_cntr_data->dev, &byte_cntr_fops);
+
+	byte_cntr_data->dev.owner = THIS_MODULE;
+	byte_cntr_data->dev.ops = &byte_cntr_fops;
+
+	ret = cdev_add(&byte_cntr_data->dev, dev, 1);
+	if (ret)
+		goto exit_unreg_chrdev_region;
+
+	byte_cntr_data->driver_class = class_create(THIS_MODULE,
+						   "coresight-tmc-etr-stream");
+	if (IS_ERR(byte_cntr_data->driver_class)) {
+		ret = -ENOMEM;
+		pr_err("class_create failed %d\n", ret);
+		goto exit_unreg_chrdev_region;
+	}
+
+	device = device_create(byte_cntr_data->driver_class, NULL,
+			       byte_cntr_data->dev.dev, byte_cntr_data,
+			       "byte-cntr");
+
+	if (IS_ERR(device)) {
+		pr_err("class_device_create failed %d\n", ret);
+		ret = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	return 0;
+
+exit_destroy_class:
+	class_destroy(byte_cntr_data->driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(byte_cntr_data->dev.dev, 1);
+	return ret;
+}
+
+struct byte_cntr *byte_cntr_init(struct amba_device *adev,
+				 struct tmc_drvdata *drvdata)
+{
+	struct device *dev = &adev->dev;
+	struct device_node *np = adev->dev.of_node;
+	int byte_cntr_irq;
+	int ret;
+	struct byte_cntr *byte_cntr_data;
+
+	byte_cntr_irq = of_irq_get_byname(np, "byte-cntr-irq");
+	if (byte_cntr_irq < 0)
+		return NULL;
+
+	byte_cntr_data = devm_kmalloc(dev, sizeof(*byte_cntr_data), GFP_KERNEL);
+	if (!byte_cntr_data)
+		return NULL;
+
+	ret = devm_request_irq(dev, byte_cntr_irq, etr_handler,
+			       IRQF_TRIGGER_RISING | IRQF_SHARED,
+			       "tmc-etr", byte_cntr_data);
+	if (ret) {
+		devm_kfree(dev, byte_cntr_data);
+		dev_err(dev, "Byte_cntr interrupt registration failed\n");
+		return NULL;
+	}
+
+	ret = byte_cntr_register_chardev(byte_cntr_data);
+	if (ret) {
+		devm_free_irq(dev, byte_cntr_irq, byte_cntr_data);
+		devm_kfree(dev, byte_cntr_data);
+		dev_err(dev, "Byte_cntr char dev registration failed\n");
+		return NULL;
+	}
+
+	tmcdrvdata = drvdata;
+	byte_cntr_data->block_size = 0;
+	byte_cntr_data->byte_cntr_irq = byte_cntr_irq;
+	atomic_set(&byte_cntr_data->irq_cnt, 0);
+	init_waitqueue_head(&byte_cntr_data->wq);
+	mutex_init(&byte_cntr_data->byte_cntr_lock);
+
+	return byte_cntr_data;
+}
+EXPORT_SYMBOL(byte_cntr_init);
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.h b/drivers/hwtracing/coresight/coresight-byte-cntr.h
new file mode 100644
index 0000000..94e9089
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.h
@@ -0,0 +1,24 @@
+#ifndef _CORESIGHT_BYTE_CNTR_H
+#define _CORESIGHT_BYTE_CNTR_H
+#include <linux/cdev.h>
+#include <linux/amba/bus.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+struct byte_cntr {
+	struct cdev		dev;
+	struct class		*driver_class;
+	bool			enable;
+	bool			read_active;
+	uint32_t		byte_cntr_value;
+	uint32_t		block_size;
+	int			byte_cntr_irq;
+	atomic_t		irq_cnt;
+	wait_queue_head_t	wq;
+	struct mutex		byte_cntr_lock;
+};
+
+extern void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data);
+extern void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data);
+
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 9caac56..afe9f3d 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -153,12 +153,14 @@
 extern void msm_qdss_csr_disable_bam_to_usb(void);
 extern void msm_qdss_csr_disable_flush(void);
 extern int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val);
+extern void coresight_csr_set_byte_cntr(uint32_t count);
 #else
 static inline void msm_qdss_csr_enable_bam_to_usb(void) {}
 static inline void msm_qdss_csr_disable_bam_to_usb(void) {}
 static inline void msm_qdss_csr_disable_flush(void) {}
 static inline int coresight_csr_hwctrl_set(uint64_t addr,
 					   uint32_t val) { return -EINVAL; }
+static inline void coresight_csr_set_byte_cntr(uint32_t count) {}
 #endif
 
 #endif
diff --git a/drivers/hwtracing/coresight/coresight-tgu.c b/drivers/hwtracing/coresight/coresight-tgu.c
new file mode 100644
index 0000000..e919f47
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tgu.c
@@ -0,0 +1,534 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/amba/bus.h>
+#include <linux/topology.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define tgu_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define tgu_readl(drvdata, off)		__raw_readl(drvdata->base + off)
+
+#define TGU_LOCK(drvdata)						\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	tgu_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define TGU_UNLOCK(drvdata)						\
+do {									\
+	tgu_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+#define TGU_CONTROL			0x0000
+#define TIMER0_STATUS			0x0004
+#define COUNTER0_STATUS			0x000C
+#define TGU_STATUS			0x0014
+#define TIMER0_COMPARE_STEP(n)		(0x0040 + 0x1D8 * n)
+#define COUNTER0_COMPARE_STEP(n)	(0x0048 + 0x1D8 * n)
+#define GROUP_REG_STEP(grp, reg, step)	(0x0074 + 0x60 * grp + 0x4 * reg + \
+								 0x1D8 * step)
+#define CONDITION_DECODE_STEP(m, n)	(0x0050 + 0x4 * m + 0x1D8 * n)
+#define CONDITION_SELECT_STEP(m, n)	(0x0060 + 0x4 * m + 0x1D8 * n)
+#define GROUP0				0x0074
+#define GROUP1				0x00D4
+#define GROUP2				0x0134
+#define GROUP3				0x0194
+#define TGU_LAR				0x0FB0
+
+#define MAX_GROUP_SETS			256
+#define MAX_GROUPS			4
+#define MAX_CONDITION_SETS		64
+#define MAX_TIMER_COUNTER_SETS		8
+
+#define to_tgu_drvdata(c)		container_of(c, struct tgu_drvdata, tgu)
+
+struct Trigger_group_data {
+	unsigned long grpaddr;
+	unsigned long value;
+};
+
+struct Trigger_condition_data {
+	unsigned long condaddr;
+	unsigned long value;
+};
+
+struct Trigger_select_data {
+	unsigned long selectaddr;
+	unsigned long value;
+};
+
+struct Trigger_timer_data {
+	unsigned long timeraddr;
+	unsigned long value;
+};
+
+struct Trigger_counter_data {
+	unsigned long counteraddr;
+	unsigned long value;
+};
+struct tgu_drvdata {
+	void __iomem			*base;
+	struct device			*dev;
+	struct coresight_device		*csdev;
+	struct clk			*clk;
+	spinlock_t			spinlock;
+	int				max_steps;
+	int				max_conditions;
+	int				max_regs;
+	int				max_timer_counter;
+	struct Trigger_group_data	*grp_data;
+	struct Trigger_condition_data	*condition_data;
+	struct Trigger_select_data	*select_data;
+	struct Trigger_timer_data	*timer_data;
+	struct Trigger_counter_data	*counter_data;
+	int				grp_refcnt;
+	int				cond_refcnt;
+	int				select_refcnt;
+	int				timer_refcnt;
+	int				counter_refcnt;
+	bool				enable;
+};
+
+static ssize_t enable_tgu(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	unsigned long value;
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	int ret, i, j;
+
+	if (kstrtoul(buf, 16, &value))
+		return -EINVAL;
+
+	/* Enable clock */
+	ret = pm_runtime_get_sync(drvdata->dev);
+	if (ret)
+		return ret;
+
+	spin_lock(&drvdata->spinlock);
+	/* Unlock the TGU LAR */
+	TGU_UNLOCK(drvdata);
+
+	if (value) {
+
+		/* Disable TGU to program the triggers */
+		tgu_writel(drvdata, 0, TGU_CONTROL);
+
+		/* program the TGU Group data for the desired use case*/
+
+		for (i = 0; i <= drvdata->grp_refcnt; i++)
+			tgu_writel(drvdata, drvdata->grp_data[i].value,
+						drvdata->grp_data[i].grpaddr);
+
+		/* program the unused Condition Decode registers NOT bits to 1*/
+		for (i = 0; i <= drvdata->max_conditions; i++) {
+			for (j = 0; j <= drvdata->max_steps; j++)
+				tgu_writel(drvdata, 0x1000000,
+						CONDITION_DECODE_STEP(i, j));
+		}
+		/* program the TGU Condition Decode for the desired use case*/
+		for (i = 0; i <= drvdata->cond_refcnt; i++)
+			tgu_writel(drvdata, drvdata->condition_data[i].value,
+					drvdata->condition_data[i].condaddr);
+
+		/* program the TGU Condition Select for the desired use case*/
+		for (i = 0; i <= drvdata->select_refcnt; i++)
+			tgu_writel(drvdata, drvdata->select_data[i].value,
+					drvdata->select_data[i].selectaddr);
+
+		/*  Timer and Counter Check */
+		for (i = 0; i <= drvdata->timer_refcnt; i++)
+			tgu_writel(drvdata, drvdata->timer_data[i].value,
+					drvdata->timer_data[i].timeraddr);
+
+		for (i = 0; i <= drvdata->counter_refcnt; i++)
+			tgu_writel(drvdata, drvdata->counter_data[i].value,
+					drvdata->counter_data[i].counteraddr);
+
+		/* Enable TGU to program the triggers */
+		tgu_writel(drvdata, 1, TGU_CONTROL);
+
+		drvdata->enable = true;
+		dev_dbg(dev, "Coresight-TGU enabled\n");
+
+	} else {
+		/* Disable TGU to program the triggers */
+		tgu_writel(drvdata, 0, TGU_CONTROL);
+		TGU_LOCK(drvdata);
+		spin_unlock(&drvdata->spinlock);
+
+		pm_runtime_put(drvdata->dev);
+		dev_dbg(dev, "Coresight-TGU disabled\n");
+	}
+
+	TGU_LOCK(drvdata);
+	spin_unlock(&drvdata->spinlock);
+	return size;
+}
+static DEVICE_ATTR(enable_tgu, 0200, NULL, enable_tgu);
+
+static ssize_t reset_tgu(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	unsigned long value;
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	int ret;
+
+	if (kstrtoul(buf, 16, &value))
+		return -EINVAL;
+
+	if (!drvdata->enable) {
+		/* Enable clock */
+		ret = pm_runtime_get_sync(drvdata->dev);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock(&drvdata->spinlock);
+	/* Unlock the TGU LAR */
+	TGU_UNLOCK(drvdata);
+
+	if (value) {
+		/* Disable TGU to program the triggers */
+		tgu_writel(drvdata, 0, TGU_CONTROL);
+
+		/* Reset the Reference counters*/
+		drvdata->grp_refcnt = 0;
+		drvdata->cond_refcnt = 0;
+		drvdata->select_refcnt = 0;
+		drvdata->timer_refcnt = 0;
+		drvdata->counter_refcnt = 0;
+
+		dev_dbg(dev, "Coresight-TGU disabled\n");
+	} else
+		dev_dbg(dev, "Invalid input to reset the TGU\n");
+
+	TGU_LOCK(drvdata);
+	spin_unlock(&drvdata->spinlock);
+	pm_runtime_put(drvdata->dev);
+	return size;
+}
+static DEVICE_ATTR(reset_tgu, 0200, NULL, reset_tgu);
+
+static ssize_t set_group(struct device *dev, struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	int grp, reg, step;
+	unsigned long value;
+
+	if (drvdata->grp_refcnt >= MAX_GROUP_SETS) {
+		dev_err(drvdata->dev, " Too many groups are being configured");
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d %d %d %lx", &grp, &reg, &step, &value) != 4)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	if ((grp <= MAX_GROUPS) && (reg <= drvdata->max_regs)) {
+		drvdata->grp_data[drvdata->grp_refcnt].grpaddr =
+						GROUP_REG_STEP(grp, reg, step);
+		drvdata->grp_data[drvdata->grp_refcnt].value = value;
+		drvdata->grp_refcnt++;
+	} else
+		dev_err(drvdata->dev, "Invalid group data\n");
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR(set_group, 0200, NULL, set_group);
+
+static ssize_t tgu_set_condition(struct device *dev, struct device_attribute
+					*attr, const char *buf, size_t size)
+{
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long value;
+	int cond, step;
+
+	if (drvdata->cond_refcnt >= MAX_CONDITION_SETS) {
+		dev_err(drvdata->dev, " Too many groups are being configured");
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d %d %lx", &cond, &step, &value) != 3)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	if ((cond <= drvdata->max_conditions) && (step <=
+						drvdata->max_steps)) {
+		drvdata->condition_data[drvdata->cond_refcnt].condaddr =
+					CONDITION_DECODE_STEP(cond, step);
+		drvdata->condition_data[drvdata->cond_refcnt].value = value;
+		drvdata->cond_refcnt++;
+	} else
+		dev_err(drvdata->dev, "Invalid condition decode data\n");
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR(set_condition, 0200, NULL, tgu_set_condition);
+
+static ssize_t tgu_set_select(struct device *dev, struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long value;
+	int select, step;
+
+	if (drvdata->select_refcnt >= MAX_CONDITION_SETS) {
+		dev_err(drvdata->dev, " Too many groups are being configured");
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d %d %lx", &select, &step, &value) != 3)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+
+	if ((select <= drvdata->max_conditions) && (step <=
+					drvdata->max_steps)) {
+		drvdata->select_data[drvdata->select_refcnt].selectaddr =
+					CONDITION_SELECT_STEP(select, step);
+		drvdata->select_data[drvdata->select_refcnt].value = value;
+		drvdata->select_refcnt++;
+	} else
+		dev_err(drvdata->dev, "Invalid select decode data\n");
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR(set_select, 0200, NULL, tgu_set_select);
+
+static ssize_t tgu_set_timers(struct device *dev, struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long value;
+	int step;
+
+	if (drvdata->select_refcnt >= MAX_TIMER_COUNTER_SETS) {
+		dev_err(drvdata->dev, " Too many groups are being configured");
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d %lx", &step, &value) != 2)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	if (step <= drvdata->max_timer_counter) {
+		drvdata->timer_data[drvdata->timer_refcnt].timeraddr =
+						TIMER0_COMPARE_STEP(step);
+		drvdata->timer_data[drvdata->timer_refcnt].value = value;
+		drvdata->timer_refcnt++;
+	} else
+		dev_err(drvdata->dev, "Invalid TGU timer data\n");
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR(set_timer, 0200, NULL, tgu_set_timers);
+
+static ssize_t tgu_set_counters(struct device *dev, struct device_attribute
+					*attr, const char *buf, size_t size)
+{
+	struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long value;
+	int step;
+
+	if (drvdata->counter_refcnt >= MAX_TIMER_COUNTER_SETS) {
+		dev_err(drvdata->dev, " Too many groups are being configured");
+		return -EINVAL;
+	}
+
+	if (sscanf(buf, "%d %lx", &step, &value) != 2)
+		return -EINVAL;
+
+	spin_lock(&drvdata->spinlock);
+	if (step <= drvdata->max_timer_counter) {
+		drvdata->counter_data[drvdata->counter_refcnt].counteraddr =
+						COUNTER0_COMPARE_STEP(step);
+		drvdata->counter_data[drvdata->counter_refcnt].value = value;
+		drvdata->counter_refcnt++;
+	} else
+		dev_err(drvdata->dev, "Invalid TGU counter data\n");
+
+	spin_unlock(&drvdata->spinlock);
+
+	return size;
+}
+static DEVICE_ATTR(set_counter, 0200, NULL, tgu_set_counters);
+
+static struct attribute *tgu_attrs[] = {
+	&dev_attr_enable_tgu.attr,
+	&dev_attr_reset_tgu.attr,
+	&dev_attr_set_group.attr,
+	&dev_attr_set_condition.attr,
+	&dev_attr_set_select.attr,
+	&dev_attr_set_timer.attr,
+	&dev_attr_set_counter.attr,
+	NULL,
+};
+
+static struct attribute_group tgu_attr_grp = {
+	.attrs = tgu_attrs,
+};
+
+static const struct attribute_group *tgu_attr_grps[] = {
+	&tgu_attr_grp,
+	NULL,
+};
+
+static int tgu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+	int ret = 0;
+	struct device *dev = &adev->dev;
+	struct coresight_platform_data *pdata;
+	struct tgu_drvdata *drvdata;
+	struct coresight_desc *desc;
+
+	pdata = of_get_coresight_platform_data(dev, adev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	adev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	drvdata->dev = &adev->dev;
+
+	dev_set_drvdata(dev, drvdata);
+
+	drvdata->base = devm_ioremap_resource(dev, &adev->res);
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	spin_lock_init(&drvdata->spinlock);
+
+	ret = of_property_read_u32(adev->dev.of_node, "tgu-steps",
+						&drvdata->max_steps);
+	if (ret)
+		return -EINVAL;
+
+	ret = of_property_read_u32(adev->dev.of_node, "tgu-conditions",
+						&drvdata->max_conditions);
+	if (ret)
+		return -EINVAL;
+
+	ret = of_property_read_u32(adev->dev.of_node, "tgu-regs",
+							&drvdata->max_regs);
+	if (ret)
+		return -EINVAL;
+
+	ret = of_property_read_u32(adev->dev.of_node, "tgu-timer-counters",
+						&drvdata->max_timer_counter);
+	if (ret)
+		return -EINVAL;
+
+	/* Alloc memory for Grps, Conditions and Steps */
+	drvdata->grp_data = devm_kzalloc(dev, MAX_GROUP_SETS *
+				       sizeof(*drvdata->grp_data),
+				       GFP_KERNEL);
+	if (!drvdata->grp_data)
+		return -ENOMEM;
+
+	drvdata->condition_data = devm_kzalloc(dev, MAX_CONDITION_SETS *
+				       sizeof(*drvdata->condition_data),
+				       GFP_KERNEL);
+
+	if (!drvdata->condition_data)
+		return -ENOMEM;
+
+	drvdata->select_data = devm_kzalloc(dev, MAX_CONDITION_SETS *
+				       sizeof(*drvdata->select_data),
+				       GFP_KERNEL);
+	if (!drvdata->select_data)
+		return -ENOMEM;
+
+	drvdata->timer_data = devm_kzalloc(dev, MAX_TIMER_COUNTER_SETS *
+				       sizeof(*drvdata->timer_data),
+				       GFP_KERNEL);
+	if (!drvdata->timer_data)
+		return -ENOMEM;
+
+	drvdata->counter_data = devm_kzalloc(dev, MAX_TIMER_COUNTER_SETS *
+				       sizeof(*drvdata->counter_data),
+				       GFP_KERNEL);
+	if (!drvdata->counter_data)
+		return -ENOMEM;
+
+	drvdata->enable = false;
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	desc->type = CORESIGHT_DEV_TYPE_NONE;
+	desc->pdata = adev->dev.platform_data;
+	desc->dev = &adev->dev;
+	desc->groups = tgu_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev)) {
+		ret = PTR_ERR(drvdata->csdev);
+		goto err;
+	}
+
+	pm_runtime_put(&adev->dev);
+	dev_dbg(dev, "TGU initialized\n");
+	return 0;
+err:
+	pm_runtime_put(&adev->dev);
+	return ret;
+}
+
+static struct amba_id tgu_ids[] = {
+	{
+		.id	=	0x0003b999,
+		.mask	=	0x0003ffff,
+		.data	=	"TGU",
+	},
+	{ 0, 0},
+};
+
+static struct amba_driver tgu_driver = {
+	.drv = {
+		.name			=	"coresight-tgu",
+		.owner			=	THIS_MODULE,
+		.suppress_bind_attrs	=	true,
+	},
+	.probe		=	tgu_probe,
+	.id_table	=	tgu_ids,
+};
+
+builtin_amba_driver(tgu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight TGU driver");
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2a5a1bd..9e024ce 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -351,7 +351,7 @@
 	tmc_etr_sg_tbl_flush(vaddr, size);
 }
 
-static void tmc_etr_sg_rwp_pos(struct tmc_drvdata *drvdata, uint32_t rwp)
+void tmc_etr_sg_rwp_pos(struct tmc_drvdata *drvdata, uint32_t rwp)
 {
 	uint32_t i = 0, pte_n = 0, last_pte;
 	uint32_t *virt_st_tbl, *virt_pte;
@@ -403,6 +403,7 @@
 			break;
 	}
 }
+EXPORT_SYMBOL(tmc_etr_sg_rwp_pos);
 
 static void tmc_etr_mem_reset(struct tmc_drvdata *drvdata)
 {
@@ -831,6 +832,8 @@
 	drvdata->sticky_enable = true;
 out:
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
+		tmc_etr_byte_cntr_start(drvdata->byte_cntr);
 	mutex_unlock(&drvdata->mem_lock);
 
 	if (!ret)
@@ -919,6 +922,7 @@
 	if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
 		coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
 		coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+		tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
 	}
 out:
 	mutex_unlock(&drvdata->mem_lock);
@@ -990,6 +994,11 @@
 		goto out;
 	}
 
+	if (!drvdata->byte_cntr || drvdata->byte_cntr->enable) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	/* Disable the TMC if need be */
 	if (val == CS_MODE_SYSFS)
 		tmc_etr_disable_hw(drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index b97ebb8..6f13eb3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -459,6 +459,42 @@
 }
 static DEVICE_ATTR_RW(mem_type);
 
+static ssize_t block_size_show(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	uint32_t val = 0;
+
+	if (drvdata->byte_cntr)
+		val = drvdata->byte_cntr->block_size;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n",
+			val);
+}
+
+static ssize_t block_size_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf,
+			      size_t size)
+{
+	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	if (!drvdata->byte_cntr)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->byte_cntr->byte_cntr_lock);
+	drvdata->byte_cntr->block_size = val * 8;
+	mutex_unlock(&drvdata->byte_cntr->byte_cntr_lock);
+
+	return size;
+}
+static DEVICE_ATTR_RW(block_size);
+
 static struct attribute *coresight_tmc_etf_attrs[] = {
 	&dev_attr_trigger_cntr.attr,
 	NULL,
@@ -470,6 +506,7 @@
 	&dev_attr_trigger_cntr.attr,
 	&dev_attr_out_mode.attr,
 	&dev_attr_available_out_modes.attr,
+	&dev_attr_block_size.attr,
 	NULL,
 };
 
@@ -587,6 +624,8 @@
 		desc.groups = coresight_tmc_etr_groups;
 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
 
+		drvdata->byte_cntr = byte_cntr_init(adev, drvdata);
+
 		ret = tmc_etr_bam_init(adev, drvdata);
 		if (ret)
 			goto out;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6643adc..fe6bc76 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -27,6 +27,8 @@
 #include <linux/usb/usb_qdss.h>
 #include <linux/coresight-cti.h>
 
+#include "coresight-byte-cntr.h"
+
 #define TMC_RSZ			0x004
 #define TMC_STS			0x00c
 #define TMC_RRD			0x010
@@ -167,7 +169,7 @@
 	bool			enable;
 	char			*buf;
 	dma_addr_t		paddr;
-	void __iomem		*vaddr;
+	void			*vaddr;
 	u32			size;
 	u32			len;
 	local_t			mode;
@@ -187,6 +189,7 @@
 	bool			sticky_enable;
 	struct coresight_cti	*cti_flush;
 	struct coresight_cti	*cti_reset;
+	struct byte_cntr	*byte_cntr;
 };
 
 /* Generic functions */
@@ -214,5 +217,9 @@
 		  struct usb_qdss_ch *ch);
 int tmc_etr_bam_init(struct amba_device *adev,
 		     struct tmc_drvdata *drvdata);
+extern struct byte_cntr *byte_cntr_init(struct amba_device *adev,
+					struct tmc_drvdata *drvdata);
+extern void tmc_etr_sg_rwp_pos(struct tmc_drvdata *drvdata, uint32_t rwp);
+
 extern const struct coresight_ops tmc_etr_cs_ops;
 #endif
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 0bba384..63b5db4 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -85,6 +85,16 @@
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
 		.driver_data = (kernel_ulong_t)0,
 	},
+	{
+		/* Cannon Lake H */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+		.driver_data = (kernel_ulong_t)0,
+	},
+	{
+		/* Cannon Lake LP */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+		.driver_data = (kernel_ulong_t)0,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0b42a12..b42d95f 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -319,7 +319,7 @@
 #endif
 
 #ifdef CONFIG_PM
-static int dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -343,11 +343,21 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int dw_i2c_plat_suspend(struct device *dev)
+{
+	pm_runtime_resume(dev);
+	return dw_i2c_plat_runtime_suspend(dev);
+}
+#endif
+
 static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
 	.prepare = dw_i2c_plat_prepare,
 	.complete = dw_i2c_plat_complete,
 	SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
-	SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
+	SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+			   dw_i2c_plat_resume,
+			   NULL)
 };
 
 #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index f573448..8477292 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -341,8 +341,10 @@
 			break;
 		case I2C_SMBUS_BLOCK_DATA:
 		case I2C_SMBUS_I2C_BLOCK_DATA:
-			memcpy(&data->block[1], dma_buffer, desc->rxbytes);
-			data->block[0] = desc->rxbytes;
+			if (desc->rxbytes != dma_buffer[0] + 1)
+				return -EMSGSIZE;
+
+			memcpy(data->block, dma_buffer, desc->rxbytes);
 			break;
 		}
 		return 0;
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 3de0e25..4daed7f 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -2164,7 +2164,6 @@
 
 			++cur_buf->msg_idx;
 			++cur_msg;
-			}
 		} else {
 			cur_buf->is_init = true;
 		}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index f70dbf2..7e9999b 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -191,13 +191,6 @@
 
 static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
 {
-	u32 m_cmd = readl_relaxed(gi2c->base + SE_GENI_M_CMD0);
-	u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
-	u32 geni_s = readl_relaxed(gi2c->base + SE_GENI_STATUS);
-	u32 geni_ios = readl_relaxed(gi2c->base + SE_GENI_IOS);
-	u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
-	u32 rx_st, tx_st;
-
 	if (gi2c->cur)
 		GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
 			    "len:%d, slv-addr:0x%x, RD/WR:%d\n", gi2c->cur->len,
@@ -211,23 +204,9 @@
 		GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev, "%s\n",
 			     gi2c_log[err].msg);
 	}
-	if (gi2c->se_mode == GSI_ONLY)
-		goto err_out;
-
-	if (dma) {
-		rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
-		tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
-	} else {
-		rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
-		tx_st = readl_relaxed(gi2c->base + SE_GENI_TX_FIFO_STATUS);
-	}
-	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
-		     "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
-		     dma, tx_st, rx_st, m_stat);
-err_out:
-	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
-			     "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
-			     m_cmd, geni_s, geni_ios);
+	GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s: se-mode:%d\n", __func__,
+							gi2c->se_mode);
+	geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base, gi2c->ipcl);
 err_ret:
 	gi2c->err = gi2c_log[err].err;
 }
@@ -469,8 +448,8 @@
 		int stretch = (i < (num - 1));
 		dma_cookie_t tx_cookie, rx_cookie;
 		struct msm_gpi_tre *go_t = &gi2c->go_t;
-		struct device *rx_dev = gi2c->dev;
-		struct device *tx_dev = gi2c->dev;
+		struct device *rx_dev = gi2c->wrapper_dev;
+		struct device *tx_dev = gi2c->wrapper_dev;
 
 		gi2c->cur = &msgs[i];
 		if (!gi2c->cfg_sent) {
@@ -501,9 +480,8 @@
 
 		if (msgs[i].flags & I2C_M_RD) {
 			sg_init_table(&gi2c->rx_sg, 1);
-			gi2c->rx_ph = dma_map_single(rx_dev, msgs[i].buf,
-						     msgs[i].len,
-						     DMA_FROM_DEVICE);
+			geni_se_iommu_map_buf(rx_dev, &gi2c->rx_ph, msgs[i].buf,
+						msgs[i].len, DMA_FROM_DEVICE);
 			gi2c->rx_t.dword[0] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->rx_ph);
 			gi2c->rx_t.dword[1] =
@@ -533,9 +511,8 @@
 			rx_cookie = dmaengine_submit(gi2c->rx_desc);
 			dma_async_issue_pending(gi2c->rx_c);
 		} else {
-			gi2c->tx_ph = dma_map_single(tx_dev, msgs[i].buf,
-						     msgs[i].len,
-						     DMA_TO_DEVICE);
+			geni_se_iommu_map_buf(tx_dev, &gi2c->tx_ph, msgs[i].buf,
+						msgs[i].len, DMA_TO_DEVICE);
 			gi2c->tx_t.dword[0] =
 				MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->tx_ph);
 			gi2c->tx_t.dword[1] =
@@ -568,11 +545,11 @@
 
 		timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
 		if (msgs[i].flags & I2C_M_RD)
-			dma_unmap_single(rx_dev, gi2c->rx_ph, msgs[i].len,
-					 DMA_FROM_DEVICE);
+			geni_se_iommu_unmap_buf(rx_dev, &gi2c->rx_ph,
+				msgs[i].len, DMA_FROM_DEVICE);
 		else
-			dma_unmap_single(tx_dev, gi2c->tx_ph, msgs[i].len,
-					 DMA_TO_DEVICE);
+			geni_se_iommu_unmap_buf(tx_dev, &gi2c->tx_ph,
+				msgs[i].len, DMA_TO_DEVICE);
 
 		if (!timeout) {
 			GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 59b380d..c388882 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@
 	struct regmap *regmap;
 	int irq;
 	struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
-	atomic_t active_intr;
 	struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
 	struct mutex mutex;
 	u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@
 		goto out_fix_power_state;
 	}
 
-	if (state)
-		atomic_inc(&data->active_intr);
-	else
-		atomic_dec(&data->active_intr);
-
 	return 0;
 
 out_fix_power_state:
@@ -1709,8 +1703,7 @@
 	struct bmc150_accel_data *data = iio_priv(indio_dev);
 
 	mutex_lock(&data->mutex);
-	if (atomic_read(&data->active_intr))
-		bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+	bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
 	bmc150_accel_fifo_set_mode(data);
 	mutex_unlock(&data->mutex);
 
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index cde6f13..472641f 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -80,18 +80,12 @@
 	8, 16, 32, 64, 128, 250, 475, 860
 };
 
-static const struct {
-	int scale;
-	int uscale;
-} ads1015_scale[] = {
-	{3, 0},
-	{2, 0},
-	{1, 0},
-	{0, 500000},
-	{0, 250000},
-	{0, 125000},
-	{0, 125000},
-	{0, 125000},
+/*
+ * Translation from PGA bits to full-scale positive and negative input voltage
+ * range in mV
+ */
+static int ads1015_fullscale_range[] = {
+	6144, 4096, 2048, 1024, 512, 256, 256, 256
 };
 
 #define ADS1015_V_CHAN(_chan, _addr) {				\
@@ -182,6 +176,12 @@
 	struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
 
 	unsigned int *data_rate;
+	/*
+	 * Set to true when the ADC is switched to the continuous-conversion
+	 * mode and exits from a power-down state.  This flag is used to avoid
+	 * getting the stale result from the conversion register.
+	 */
+	bool conv_invalid;
 };
 
 static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
@@ -234,33 +234,43 @@
 		ret = pm_runtime_put_autosuspend(dev);
 	}
 
-	return ret;
+	return ret < 0 ? ret : 0;
 }
 
 static
 int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
 {
 	int ret, pga, dr, conv_time;
-	bool change;
+	unsigned int old, mask, cfg;
 
 	if (chan < 0 || chan >= ADS1015_CHANNELS)
 		return -EINVAL;
 
-	pga = data->channel_data[chan].pga;
-	dr = data->channel_data[chan].data_rate;
-
-	ret = regmap_update_bits_check(data->regmap, ADS1015_CFG_REG,
-				       ADS1015_CFG_MUX_MASK |
-				       ADS1015_CFG_PGA_MASK,
-				       chan << ADS1015_CFG_MUX_SHIFT |
-				       pga << ADS1015_CFG_PGA_SHIFT,
-				       &change);
-	if (ret < 0)
+	ret = regmap_read(data->regmap, ADS1015_CFG_REG, &old);
+	if (ret)
 		return ret;
 
-	if (change) {
-		conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
+	pga = data->channel_data[chan].pga;
+	dr = data->channel_data[chan].data_rate;
+	mask = ADS1015_CFG_MUX_MASK | ADS1015_CFG_PGA_MASK |
+		ADS1015_CFG_DR_MASK;
+	cfg = chan << ADS1015_CFG_MUX_SHIFT | pga << ADS1015_CFG_PGA_SHIFT |
+		dr << ADS1015_CFG_DR_SHIFT;
+
+	cfg = (old & ~mask) | (cfg & mask);
+
+	ret = regmap_write(data->regmap, ADS1015_CFG_REG, cfg);
+	if (ret)
+		return ret;
+
+	if (old != cfg || data->conv_invalid) {
+		int dr_old = (old & ADS1015_CFG_DR_MASK) >>
+				ADS1015_CFG_DR_SHIFT;
+
+		conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]);
+		conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
 		usleep_range(conv_time, conv_time + 1);
+		data->conv_invalid = false;
 	}
 
 	return regmap_read(data->regmap, ADS1015_CONV_REG, val);
@@ -297,17 +307,20 @@
 	return IRQ_HANDLED;
 }
 
-static int ads1015_set_scale(struct ads1015_data *data, int chan,
+static int ads1015_set_scale(struct ads1015_data *data,
+			     struct iio_chan_spec const *chan,
 			     int scale, int uscale)
 {
 	int i, ret, rindex = -1;
+	int fullscale = div_s64((scale * 1000000LL + uscale) <<
+				(chan->scan_type.realbits - 1), 1000000);
 
-	for (i = 0; i < ARRAY_SIZE(ads1015_scale); i++)
-		if (ads1015_scale[i].scale == scale &&
-		    ads1015_scale[i].uscale == uscale) {
+	for (i = 0; i < ARRAY_SIZE(ads1015_fullscale_range); i++) {
+		if (ads1015_fullscale_range[i] == fullscale) {
 			rindex = i;
 			break;
 		}
+	}
 	if (rindex < 0)
 		return -EINVAL;
 
@@ -317,32 +330,23 @@
 	if (ret < 0)
 		return ret;
 
-	data->channel_data[chan].pga = rindex;
+	data->channel_data[chan->address].pga = rindex;
 
 	return 0;
 }
 
 static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
 {
-	int i, ret, rindex = -1;
+	int i;
 
-	for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
+	for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++) {
 		if (data->data_rate[i] == rate) {
-			rindex = i;
-			break;
+			data->channel_data[chan].data_rate = i;
+			return 0;
 		}
-	if (rindex < 0)
-		return -EINVAL;
+	}
 
-	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
-				 ADS1015_CFG_DR_MASK,
-				 rindex << ADS1015_CFG_DR_SHIFT);
-	if (ret < 0)
-		return ret;
-
-	data->channel_data[chan].data_rate = rindex;
-
-	return 0;
+	return -EINVAL;
 }
 
 static int ads1015_read_raw(struct iio_dev *indio_dev,
@@ -384,9 +388,9 @@
 	}
 	case IIO_CHAN_INFO_SCALE:
 		idx = data->channel_data[chan->address].pga;
-		*val = ads1015_scale[idx].scale;
-		*val2 = ads1015_scale[idx].uscale;
-		ret = IIO_VAL_INT_PLUS_MICRO;
+		*val = ads1015_fullscale_range[idx];
+		*val2 = chan->scan_type.realbits - 1;
+		ret = IIO_VAL_FRACTIONAL_LOG2;
 		break;
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		idx = data->channel_data[chan->address].data_rate;
@@ -413,7 +417,7 @@
 	mutex_lock(&data->lock);
 	switch (mask) {
 	case IIO_CHAN_INFO_SCALE:
-		ret = ads1015_set_scale(data, chan->address, val, val2);
+		ret = ads1015_set_scale(data, chan, val, val2);
 		break;
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		ret = ads1015_set_data_rate(data, chan->address, val);
@@ -445,7 +449,10 @@
 	.validate_scan_mask = &iio_validate_scan_mask_onehot,
 };
 
-static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR_NAMED(ads1015_scale_available, scale_available,
+	"3 2 1 0.5 0.25 0.125");
+static IIO_CONST_ATTR_NAMED(ads1115_scale_available, scale_available,
+	"0.1875 0.125 0.0625 0.03125 0.015625 0.007813");
 
 static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
 	sampling_frequency_available, "128 250 490 920 1600 2400 3300");
@@ -453,7 +460,7 @@
 	sampling_frequency_available, "8 16 32 64 128 250 475 860");
 
 static struct attribute *ads1015_attributes[] = {
-	&iio_const_attr_scale_available.dev_attr.attr,
+	&iio_const_attr_ads1015_scale_available.dev_attr.attr,
 	&iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
 	NULL,
 };
@@ -463,7 +470,7 @@
 };
 
 static struct attribute *ads1115_attributes[] = {
-	&iio_const_attr_scale_available.dev_attr.attr,
+	&iio_const_attr_ads1115_scale_available.dev_attr.attr,
 	&iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
 	NULL,
 };
@@ -624,6 +631,15 @@
 		dev_err(&client->dev, "iio triggered buffer setup failed\n");
 		return ret;
 	}
+
+	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+				ADS1015_CFG_MOD_MASK,
+				ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
+	if (ret)
+		return ret;
+
+	data->conv_invalid = true;
+
 	ret = pm_runtime_set_active(&client->dev);
 	if (ret)
 		goto err_buffer_cleanup;
@@ -679,10 +695,15 @@
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
 	struct ads1015_data *data = iio_priv(indio_dev);
+	int ret;
 
-	return regmap_update_bits(data->regmap, ADS1015_CFG_REG,
+	ret = regmap_update_bits(data->regmap, ADS1015_CFG_REG,
 				  ADS1015_CFG_MOD_MASK,
 				  ADS1015_CONTINUOUS << ADS1015_CFG_MOD_SHIFT);
+	if (!ret)
+		data->conv_invalid = true;
+
+	return ret;
 }
 #endif
 
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 228a003..d1bde6d 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
 #define VF610_ADC_ADSTS_MASK		0x300
 #define VF610_ADC_ADLPC_EN		0x80
 #define VF610_ADC_ADHSC_EN		0x400
-#define VF610_ADC_REFSEL_VALT		0x100
+#define VF610_ADC_REFSEL_VALT		0x800
 #define VF610_ADC_REFSEL_VBG		0x1000
 #define VF610_ADC_ADTRG_HARD		0x2000
 #define VF610_ADC_AVGS_8		0x4000
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 6082934..b60e5d8 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,8 +36,6 @@
 	s32 poll_value = 0;
 
 	if (state) {
-		if (!atomic_read(&st->user_requested_state))
-			return 0;
 		if (sensor_hub_device_open(st->hsdev))
 			return -EIO;
 
@@ -86,6 +84,9 @@
 				       &report_val);
 	}
 
+	pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
+		 st->pdev->name, state_val, report_val);
+
 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
 			       st->power_state.index,
 			       sizeof(state_val), &state_val);
@@ -107,6 +108,7 @@
 		ret = pm_runtime_get_sync(&st->pdev->dev);
 	else {
 		pm_runtime_mark_last_busy(&st->pdev->dev);
+		pm_runtime_use_autosuspend(&st->pdev->dev);
 		ret = pm_runtime_put_autosuspend(&st->pdev->dev);
 	}
 	if (ret < 0) {
@@ -201,8 +203,6 @@
 	/* Default to 3 seconds, but can be changed from sysfs */
 	pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
 					 3000);
-	pm_runtime_use_autosuspend(&attrb->pdev->dev);
-
 	return ret;
 error_unreg_trigger:
 	iio_trigger_unregister(trig);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8cf84d3..1289842 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -696,7 +696,7 @@
 		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
 		.gyro_max_scale = 450,
 		.accel_max_val = IIO_M_S_2_TO_G(12500),
-		.accel_max_scale = 5,
+		.accel_max_scale = 10,
 	},
 	[ADIS16485] = {
 		.channels = adis16485_channels,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 04598ae..f0d3f74 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@
 	struct tsl2563_chip *chip = iio_priv(dev_info);
 
 	iio_push_event(dev_info,
-		       IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+		       IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
 					    0,
 					    IIO_EV_TYPE_THRESH,
 					    IIO_EV_DIR_EITHER),
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc..9398143 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@
 	skb_trim(skb, dlen);
 	mutex_lock(&ep->com.mutex);
 
-	/* update RX credits */
-	update_rx_credits(ep, dlen);
-
 	switch (ep->com.state) {
 	case MPA_REQ_SENT:
+		update_rx_credits(ep, dlen);
 		ep->rcv_seq += dlen;
 		disconnect = process_mpa_reply(ep, skb);
 		break;
 	case MPA_REQ_WAIT:
+		update_rx_credits(ep, dlen);
 		ep->rcv_seq += dlen;
 		disconnect = process_mpa_request(ep, skb);
 		break;
 	case FPDU_MODE: {
 		struct c4iw_qp_attributes attrs;
+
+		update_rx_credits(ep, dlen);
 		BUG_ON(!ep->com.qp);
 		if (status)
 			pr_err("%s Unexpected streaming data." \
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
index c5ab3dd..fdcc146 100644
--- a/drivers/input/misc/keychord.c
+++ b/drivers/input/misc/keychord.c
@@ -60,6 +60,10 @@
 	unsigned char		head;
 	unsigned char		tail;
 	__u16			buff[BUFFER_SIZE];
+	/* Bit to serialize writes to this device */
+#define KEYCHORD_BUSY			0x01
+	unsigned long		flags;
+	wait_queue_head_t	write_waitq;
 };
 
 static int check_keychord(struct keychord_device *kdev,
@@ -172,7 +176,6 @@
 		goto err_input_open_device;
 
 	pr_info("keychord: using input dev %s for fevent\n", dev->name);
-
 	return 0;
 
 err_input_open_device:
@@ -225,6 +228,41 @@
 }
 
 /*
+ * serializes writes on a device. can use mutex_lock_interruptible()
+ * for this particular use case as well - a matter of preference.
+ */
+static int
+keychord_write_lock(struct keychord_device *kdev)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	while (kdev->flags & KEYCHORD_BUSY) {
+		spin_unlock_irqrestore(&kdev->lock, flags);
+		ret = wait_event_interruptible(kdev->write_waitq,
+			       ((kdev->flags & KEYCHORD_BUSY) == 0));
+		if (ret)
+			return ret;
+		spin_lock_irqsave(&kdev->lock, flags);
+	}
+	kdev->flags |= KEYCHORD_BUSY;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+	return 0;
+}
+
+static void
+keychord_write_unlock(struct keychord_device *kdev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	kdev->flags &= ~KEYCHORD_BUSY;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+	wake_up_interruptible(&kdev->write_waitq);
+}
+
+/*
  * keychord_write is used to configure the driver
  */
 static ssize_t keychord_write(struct file *file, const char __user *buffer,
@@ -250,6 +288,22 @@
 		return -EFAULT;
 	}
 
+	/*
+	 * Serialize writes to this device to prevent various races.
+	 * 1) writers racing here could do duplicate input_unregister_handler()
+	 *    calls, resulting in attempting to unlink a node from a list that
+	 *    does not exist.
+	 * 2) writers racing here could do duplicate input_register_handler() calls
+	 *    below, resulting in a duplicate insertion of a node into the list.
+	 * 3) a double kfree of keychords can occur (in the event that
+	 *    input_register_handler() fails below.
+	 */
+	ret = keychord_write_lock(kdev);
+	if (ret) {
+		kfree(keychords);
+		return ret;
+	}
+
 	/* unregister handler before changing configuration */
 	if (kdev->registered) {
 		input_unregister_handler(&kdev->input_handler);
@@ -318,15 +372,19 @@
 	if (ret) {
 		kfree(keychords);
 		kdev->keychords = 0;
+		keychord_write_unlock(kdev);
 		return ret;
 	}
 	kdev->registered = 1;
 
+	keychord_write_unlock(kdev);
+
 	return count;
 
 err_unlock_return:
 	spin_unlock_irqrestore(&kdev->lock, flags);
 	kfree(keychords);
+	keychord_write_unlock(kdev);
 	return -EINVAL;
 }
 
@@ -352,6 +410,7 @@
 
 	spin_lock_init(&kdev->lock);
 	init_waitqueue_head(&kdev->waitq);
+	init_waitqueue_head(&kdev->write_waitq);
 
 	kdev->input_handler.event = keychord_event;
 	kdev->input_handler.connect = keychord_connect;
@@ -373,6 +432,7 @@
 
 	if (kdev->registered)
 		input_unregister_handler(&kdev->input_handler);
+	kfree(kdev->keychords);
 	kfree(kdev);
 
 	return 0;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 518e8a7..f26807c 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1212,14 +1212,24 @@
 
 	case SS4_PACKET_ID_TWO:
 		if (priv->flags & ALPS_BUTTONPAD) {
-			f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+			if (IS_SS4PLUS_DEV(priv->dev_id)) {
+				f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+			} else {
+				f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
+			}
 			f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
-			f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
 			f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
 		} else {
-			f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+			if (IS_SS4PLUS_DEV(priv->dev_id)) {
+				f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+			} else {
+				f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+			}
 			f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
-			f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
 			f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
 		}
 		f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@@ -1236,16 +1246,27 @@
 
 	case SS4_PACKET_ID_MULTI:
 		if (priv->flags & ALPS_BUTTONPAD) {
-			f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+			if (IS_SS4PLUS_DEV(priv->dev_id)) {
+				f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
+			} else {
+				f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
+				f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
+			}
+
 			f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
-			f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
 			f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
 			no_data_x = SS4_MFPACKET_NO_AX_BL;
 			no_data_y = SS4_MFPACKET_NO_AY_BL;
 		} else {
-			f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
+			if (IS_SS4PLUS_DEV(priv->dev_id)) {
+				f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
+			} else {
+				f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
+				f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
+			}
 			f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
-			f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
 			f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
 			no_data_x = SS4_MFPACKET_NO_AX;
 			no_data_y = SS4_MFPACKET_NO_AY;
@@ -2535,8 +2556,8 @@
 
 	memset(otp, 0, sizeof(otp));
 
-	if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
-	    alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
+	if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
+	    alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
 		return -1;
 
 	alps_update_device_area_ss4_v2(otp, priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index dbfd260..7931237 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -91,6 +91,10 @@
 				 ((_b[1 + _i * 3]  << 5) & 0x1F00)	\
 				)
 
+#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
+				 ((_b[1 + (_i) * 3]  << 4) & 0x0F80)	\
+				)
+
 #define SS4_STD_MF_Y_V2(_b, _i)	(((_b[1 + (_i) * 3] << 3) & 0x0010) |	\
 				 ((_b[2 + (_i) * 3] << 5) & 0x01E0) |	\
 				 ((_b[2 + (_i) * 3] << 4) & 0x0E00)	\
@@ -100,6 +104,10 @@
 				 ((_b[0 + (_i) * 3] >> 3) & 0x0010)	\
 				)
 
+#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) |	\
+				 ((_b[0 + (_i) * 3] >> 4) & 0x0008)	\
+				)
+
 #define SS4_BTL_MF_Y_V2(_b, _i)	(SS4_STD_MF_Y_V2(_b, _i) | \
 				 ((_b[0 + (_i) * 3] >> 3) & 0x0008)	\
 				)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index da5458d..681dce1 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1234,7 +1234,12 @@
 	{ "ELAN0000", 0 },
 	{ "ELAN0100", 0 },
 	{ "ELAN0600", 0 },
+	{ "ELAN0602", 0 },
 	{ "ELAN0605", 0 },
+	{ "ELAN0608", 0 },
+	{ "ELAN0605", 0 },
+	{ "ELAN0609", 0 },
+	{ "ELAN060B", 0 },
 	{ "ELAN1000", 0 },
 	{ }
 };
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 354d47e..7e2dc5e 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -265,7 +265,8 @@
 	if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
 		return -1;
 
-	if (param[0] != TP_MAGIC_IDENT)
+	/* add new TP ID. */
+	if (!(param[0] & TP_MAGIC_IDENT))
 		return -1;
 
 	if (firmware_id)
@@ -380,8 +381,8 @@
 		return 0;
 
 	if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
-		psmouse_warn(psmouse, "failed to get extended button data\n");
-		button_info = 0;
+		psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+		button_info = 0x33;
 	}
 
 	psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5617ed3..8805575 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,8 +21,9 @@
 #define TP_COMMAND		0xE2	/* Commands start with this */
 
 #define TP_READ_ID		0xE1	/* Sent for device identification */
-#define TP_MAGIC_IDENT		0x01	/* Sent after a TP_READ_ID followed */
+#define TP_MAGIC_IDENT		0x03	/* Sent after a TP_READ_ID followed */
 					/* by the firmware ID */
+					/* Firmware ID includes 0x1, 0x2, 0x3 */
 
 
 /*
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 41800b6..c380b7e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4294,6 +4294,7 @@
 		/* Setting */
 		irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
 		irte->hi.fields.vector = vcpu_pi_info->vector;
+		irte->lo.fields_vapic.ga_log_intr = 1;
 		irte->lo.fields_vapic.guest_mode = 1;
 		irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
 
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9c0dde4..a0e1f59 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -51,6 +51,9 @@
 #include <linux/of_platform.h>
 #include <linux/msm-bus.h>
 #include <dt-bindings/msm/msm-bus-ids.h>
+#include <linux/remote_spinlock.h>
+#include <linux/ktime.h>
+#include <trace/events/iommu.h>
 
 #include <linux/amba/bus.h>
 
@@ -428,6 +431,7 @@
 #define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
 #define ARM_SMMU_OPT_NO_ASID_RETENTION	(1 << 5)
 #define ARM_SMMU_OPT_DISABLE_ATOS	(1 << 6)
+#define ARM_SMMU_OPT_QCOM_MMU500_ERRATA1	(1 << 7)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -527,6 +531,9 @@
 	struct mutex			assign_lock;
 	struct list_head		secure_pool_list;
 	struct iommu_domain		domain;
+
+	bool				qsmmuv500_errata1_init;
+	bool				qsmmuv500_errata1_client;
 };
 
 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -549,6 +556,7 @@
 	{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
 	{ ARM_SMMU_OPT_NO_ASID_RETENTION, "qcom,no-asid-retention" },
 	{ ARM_SMMU_OPT_DISABLE_ATOS, "qcom,disable-atos" },
+	{ ARM_SMMU_OPT_QCOM_MMU500_ERRATA1, "qcom,mmu500-errata-1" },
 	{ 0, NULL},
 };
 
@@ -574,6 +582,7 @@
 static int arm_smmu_alloc_cb(struct iommu_domain *domain,
 				struct arm_smmu_device *smmu,
 				struct device *dev);
+static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops;
 
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
@@ -1228,11 +1237,12 @@
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
 	phys_addr_t phys;
 	phys_addr_t phys_post_tlbiall;
 
 	phys = arm_smmu_iova_to_phys_hard(domain, iova);
-	arm_smmu_tlb_inv_context(smmu_domain);
+	tlb->tlb_flush_all(smmu_domain);
 	phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
 
 	if (phys != phys_post_tlbiall) {
@@ -1588,6 +1598,7 @@
 	bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
 	unsigned long quirks = 0;
 	bool dynamic;
+	const struct iommu_gather_ops *tlb;
 
 	mutex_lock(&smmu_domain->init_mutex);
 	if (smmu_domain->smmu)
@@ -1702,6 +1713,13 @@
 		quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
 	if (is_iommu_pt_coherent(smmu_domain))
 		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
+	if ((quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
+		(smmu->model == QCOM_SMMUV500))
+		quirks |= IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE;
+
+	tlb = &arm_smmu_gather_ops;
+	if (smmu->options & ARM_SMMU_OPT_QCOM_MMU500_ERRATA1)
+		tlb = &qsmmuv500_errata1_smmu_gather_ops;
 
 	ret = arm_smmu_alloc_cb(domain, smmu, dev);
 	if (ret < 0)
@@ -1720,7 +1738,7 @@
 		.pgsize_bitmap	= smmu->pgsize_bitmap,
 		.ias		= ias,
 		.oas		= oas,
-		.tlb		= &arm_smmu_gather_ops,
+		.tlb		= tlb,
 		.iommu_dev	= smmu->dev,
 	};
 
@@ -2378,6 +2396,8 @@
 	if (ret)
 		return ret;
 
+	arm_smmu_secure_domain_lock(smmu_domain);
+
 	__saved_iova_start = iova;
 	idx_start = idx_end = 0;
 	sg_start = sg_end = sg;
@@ -2415,6 +2435,7 @@
 		arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
 		iova = __saved_iova_start;
 	}
+	arm_smmu_secure_domain_unlock(smmu_domain);
 	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
 	return iova - __saved_iova_start;
 }
@@ -2766,6 +2787,11 @@
 					& (1 << DOMAIN_ATTR_FAST));
 		ret = 0;
 		break;
+	case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
+		*((int *)data) = !!(smmu_domain->attributes
+			& (1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR));
+		ret = 0;
+		break;
 	case DOMAIN_ATTR_USE_UPSTREAM_HINT:
 		*((int *)data) = !!(smmu_domain->attributes &
 				   (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
@@ -2918,6 +2944,12 @@
 		}
 		smmu_domain->secure_vmid = *((int *)data);
 		break;
+	case DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR:
+		if (*((int *)data))
+			smmu_domain->attributes |=
+				1 << DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR;
+		ret = 0;
+		break;
 	case DOMAIN_ATTR_FAST:
 		if (*((int *)data))
 			smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
@@ -3127,7 +3159,10 @@
 
 static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
 {
-	arm_smmu_tlb_inv_context(to_smmu_domain(domain));
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	const struct iommu_gather_ops *tlb = smmu_domain->pgtbl_cfg.tlb;
+
+	tlb->tlb_flush_all(smmu_domain);
 }
 
 static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
@@ -4289,6 +4324,14 @@
 	struct list_head		tbus;
 	void __iomem			*tcu_base;
 	u32				version;
+
+	struct actlr_setting		*actlrs;
+	u32				actlr_tbl_size;
+
+	struct arm_smmu_smr		*errata1_clients;
+	u32				num_errata1_clients;
+	remote_spinlock_t		errata1_lock;
+	ktime_t				last_tlbi_ktime;
 };
 #define get_qsmmuv500_archdata(smmu)				\
 	((struct qsmmuv500_archdata *)(smmu->archdata))
@@ -4309,6 +4352,118 @@
 	u32				halt_count;
 };
 
+static bool arm_smmu_domain_match_smr(struct arm_smmu_domain *smmu_domain,
+				      struct arm_smmu_smr *smr)
+{
+	struct arm_smmu_smr *smr2;
+	int i, idx;
+
+	for_each_cfg_sme(smmu_domain->dev->iommu_fwspec, i, idx) {
+		smr2 = &smmu_domain->smmu->smrs[idx];
+		/* Continue if table entry does not match */
+		if ((smr->id ^ smr2->id) & ~(smr->mask | smr2->mask))
+			continue;
+		return true;
+	}
+	return false;
+}
+
+#define ERRATA1_REMOTE_SPINLOCK       "S:6"
+#define ERRATA1_TLBI_INTERVAL_US		10
+static bool
+qsmmuv500_errata1_required(struct arm_smmu_domain *smmu_domain,
+				 struct qsmmuv500_archdata *data)
+{
+	bool ret = false;
+	int j;
+	struct arm_smmu_smr *smr;
+
+	if (smmu_domain->qsmmuv500_errata1_init)
+		return smmu_domain->qsmmuv500_errata1_client;
+
+	for (j = 0; j < data->num_errata1_clients; j++) {
+		smr = &data->errata1_clients[j];
+		if (arm_smmu_domain_match_smr(smmu_domain, smr)) {
+			ret = true;
+			break;
+		}
+	}
+
+	smmu_domain->qsmmuv500_errata1_init = true;
+	smmu_domain->qsmmuv500_errata1_client = ret;
+	return ret;
+}
+
+static void __qsmmuv500_errata1_tlbiall(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct device *dev = smmu_domain->dev;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	void __iomem *base;
+	ktime_t cur;
+	u32 val;
+
+	base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
+	writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
+	if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+				      !(val & TLBSTATUS_SACTIVE), 0, 100)) {
+		cur = ktime_get();
+		trace_errata_throttle_start(dev, 0);
+
+		msm_bus_noc_throttle_wa(true);
+		if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+				      !(val & TLBSTATUS_SACTIVE), 0, 10000)) {
+			dev_err(smmu->dev, "ERRATA1 TLBSYNC timeout");
+			trace_errata_failed(dev, 0);
+		}
+
+		msm_bus_noc_throttle_wa(false);
+
+		trace_errata_throttle_end(
+				dev, ktime_us_delta(ktime_get(), cur));
+	}
+}
+
+/* Must be called with clocks/regulators enabled */
+static void qsmmuv500_errata1_tlb_inv_context(void *cookie)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct device *dev = smmu_domain->dev;
+	struct qsmmuv500_archdata *data =
+			get_qsmmuv500_archdata(smmu_domain->smmu);
+	ktime_t cur;
+	bool errata;
+
+	cur = ktime_get();
+	trace_errata_tlbi_start(dev, 0);
+
+	errata = qsmmuv500_errata1_required(smmu_domain, data);
+	remote_spin_lock(&data->errata1_lock);
+	if (errata) {
+		s64 delta;
+
+		delta = ktime_us_delta(ktime_get(), data->last_tlbi_ktime);
+		if (delta < ERRATA1_TLBI_INTERVAL_US)
+			udelay(ERRATA1_TLBI_INTERVAL_US - delta);
+
+		__qsmmuv500_errata1_tlbiall(smmu_domain);
+
+		data->last_tlbi_ktime = ktime_get();
+	} else {
+		__qsmmuv500_errata1_tlbiall(smmu_domain);
+	}
+	remote_spin_unlock(&data->errata1_lock);
+
+	trace_errata_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
+}
+
+static struct iommu_gather_ops qsmmuv500_errata1_smmu_gather_ops = {
+	.tlb_flush_all	= qsmmuv500_errata1_tlb_inv_context,
+	.alloc_pages_exact = arm_smmu_alloc_pages_exact,
+	.free_pages_exact = arm_smmu_free_pages_exact,
+};
+
 static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
 {
 	unsigned long flags;
@@ -4581,6 +4736,38 @@
 	return 0;
 }
 
+static int qsmmuv500_parse_errata1(struct arm_smmu_device *smmu)
+{
+	int len, i;
+	struct device *dev = smmu->dev;
+	struct qsmmuv500_archdata *data = get_qsmmuv500_archdata(smmu);
+	struct arm_smmu_smr *smrs;
+	const __be32 *cell;
+
+	cell = of_get_property(dev->of_node, "qcom,mmu500-errata-1", NULL);
+	if (!cell)
+		return 0;
+
+	remote_spin_lock_init(&data->errata1_lock, ERRATA1_REMOTE_SPINLOCK);
+	len = of_property_count_elems_of_size(
+			dev->of_node, "qcom,mmu500-errata-1", sizeof(u32) * 2);
+	if (len < 0)
+		return 0;
+
+	smrs = devm_kzalloc(dev, sizeof(*smrs) * len, GFP_KERNEL);
+	if (!smrs)
+		return -ENOMEM;
+
+	for (i = 0; i < len; i++) {
+		smrs[i].id = of_read_number(cell++, 1);
+		smrs[i].mask = of_read_number(cell++, 1);
+	}
+
+	data->errata1_clients = smrs;
+	data->num_errata1_clients = len;
+	return 0;
+}
+
 static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
 {
 	struct resource *res;
@@ -4604,6 +4791,10 @@
 	data->version = readl_relaxed(data->tcu_base + TCU_HW_VERSION_HLOS1);
 	smmu->archdata = data;
 
+	ret = qsmmuv500_parse_errata1(smmu);
+	if (ret)
+		return ret;
+
 	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 	if (ret)
 		return ret;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index b5e817b..04a5c09 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -855,29 +855,28 @@
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
-
 /**
- * fast_smmu_attach_device
+ * fast_smmu_init_mapping
  * @dev: valid struct device pointer
  * @mapping: io address space mapping structure (returned from
- *	fast_smmu_create_mapping)
+ *	arm_iommu_create_mapping)
  *
- * Attaches specified io address space mapping to the provided device,
- * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version. More than one client might be attached to
- * the same io address space mapping.
+ * Called the first time a device is attached to this mapping.
+ * Not for dma client use.
  */
-int fast_smmu_attach_device(struct device *dev,
+int fast_smmu_init_mapping(struct device *dev,
 			    struct dma_iommu_mapping *mapping)
 {
-	int atomic_domain = 1;
+	int err, atomic_domain = 1;
 	struct iommu_domain *domain = mapping->domain;
 	struct iommu_group *group;
 	struct iommu_pgtbl_info info;
 	u64 size = (u64)mapping->bits << PAGE_SHIFT;
 
-	if (mapping->base + size > (SZ_1G * 4ULL))
+	if (mapping->base + size > (SZ_1G * 4ULL)) {
+		dev_err(dev, "Iova end address too large\n");
 		return -EINVAL;
+	}
 
 	if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
 				  &atomic_domain))
@@ -894,54 +893,67 @@
 	group = dev->iommu_group;
 	if (!group) {
 		dev_err(dev, "No iommu associated with device\n");
-		return -ENODEV;
+		err = -ENODEV;
+		goto release_mapping;
 	}
 
 	if (iommu_get_domain_for_dev(dev)) {
 		dev_err(dev, "Device already attached to other iommu_domain\n");
-		return -EINVAL;
+		err = -EINVAL;
+		goto release_mapping;
 	}
 
-	if (iommu_attach_group(mapping->domain, group))
-		return -EINVAL;
+	/*
+	 * Need to attach prior to calling DOMAIN_ATTR_PGTBL_INFO and then
+	 * detach to be in the expected state. Its a bit messy.
+	 */
+	if (iommu_attach_group(mapping->domain, group)) {
+		err = -EINVAL;
+		goto release_mapping;
+	}
 
 	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
 				  &info)) {
 		dev_err(dev, "Couldn't get page table info\n");
-		fast_smmu_detach_device(dev, mapping);
-		return -EINVAL;
+		err = -EINVAL;
+		goto detach_group;
 	}
 	mapping->fast->pgtbl_pmds = info.pmds;
 
 	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
-				  &mapping->fast->is_smmu_pt_coherent))
-		return -EINVAL;
+				  &mapping->fast->is_smmu_pt_coherent)) {
+		err = -EINVAL;
+		goto detach_group;
+	}
 
 	mapping->fast->notifier.notifier_call = fast_smmu_notify;
 	av8l_register_notify(&mapping->fast->notifier);
 
-	dev->archdata.mapping = mapping;
-	set_dma_ops(dev, &fast_smmu_dma_ops);
-
+	iommu_detach_group(mapping->domain, group);
+	mapping->ops = &fast_smmu_dma_ops;
 	return 0;
+
+detach_group:
+	iommu_detach_group(mapping->domain, group);
+release_mapping:
+	kfree(mapping->fast->bitmap);
+	kfree(mapping->fast);
+	return err;
 }
-EXPORT_SYMBOL(fast_smmu_attach_device);
 
 /**
- * fast_smmu_detach_device
- * @dev: valid struct device pointer
+ * fast_smmu_release_mapping
+ * @kref: dma_iommu_mapping->kref
  *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
+ * Cleans up the given iommu mapping.
  */
-void fast_smmu_detach_device(struct device *dev,
-			     struct dma_iommu_mapping *mapping)
+void fast_smmu_release_mapping(struct kref *kref)
 {
-	iommu_detach_group(mapping->domain, dev->iommu_group);
-	dev->archdata.mapping = NULL;
-	set_dma_ops(dev, NULL);
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
 
 	kvfree(mapping->fast->bitmap);
 	kfree(mapping->fast);
+	iommu_domain_free(mapping->domain);
+	kfree(mapping);
 }
-EXPORT_SYMBOL(fast_smmu_detach_device);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index dde2876..a3594d2 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -1011,6 +1011,11 @@
 		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	else if ((cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT) &&
+		(cfg->quirks & IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE))
+		reg = (ARM_LPAE_TCR_SH_NS << ARM_LPAE_TCR_SH0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
 	else if (cfg->quirks & IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
 		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index a686ad0..b35016e 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -81,6 +81,12 @@
 	 *
 	 * IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT: Set the page table as
 	 *	coherent.
+	 *
+	 * IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE:
+	 *	Having page tables which are non coherent, but cached in a
+	 *	system cache requires SH=Non-Shareable. This applies to the
+	 *	qsmmuv500 model. For data buffers SH=Non-Shareable is not
+	 *	required.
 	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
@@ -88,6 +94,7 @@
 	#define IO_PGTABLE_QUIRK_ARM_MTK_4GB	BIT(3)
 	#define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT	BIT(4)
 	#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT BIT(5)
+	#define IO_PGTABLE_QUIRK_QSMMUV500_NON_SHAREABLE BIT(6)
 	unsigned long			quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 28b26c8..0565070 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -142,9 +142,9 @@
 	struct device_node *np;
 	void __iomem *regs;
 
-	np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
+	np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
 	if (!np)
-		np = of_find_compatible_node(root, NULL,
+		np = of_find_compatible_node(NULL, NULL,
 					     "atmel,at91sam9x5-rtc");
 
 	if (!np)
@@ -196,7 +196,6 @@
 		return;
 
 	match = of_match_node(matches, root);
-	of_node_put(root);
 
 	if (match) {
 		void (*fixup)(struct device_node *) = match->data;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 779001e..01f9435 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -132,6 +132,45 @@
 }
 #endif
 
+/*
+ * gic_show_pending_irq - Shows the pending interrupts
+ * Note: Interrupts should be disabled on the cpu from which
+ * this is called to get accurate list of pending interrupts.
+ */
+void gic_show_pending_irqs(void)
+{
+	void __iomem *base;
+	u32 pending[32], enabled;
+	unsigned int j;
+
+	base = gic_data.dist_base;
+	for (j = 0; j * 32 < gic_data.irq_nr; j++) {
+		enabled = readl_relaxed(base +
+					GICD_ISENABLER + j * 4);
+		pending[j] = readl_relaxed(base +
+					GICD_ISPENDR + j * 4);
+		pending[j] &= enabled;
+		pr_err("Pending irqs[%d] %x\n", j, pending[j]);
+	}
+}
+
+/*
+ * get_gic_highpri_irq - Returns next high priority interrupt on current CPU
+ */
+unsigned int get_gic_highpri_irq(void)
+{
+	unsigned long flags;
+	unsigned int val = 0;
+
+	local_irq_save(flags);
+	val = read_gicreg(ICC_HPPIR1_EL1);
+	local_irq_restore(flags);
+
+	if (val >= 1020)
+		return 0;
+	return val;
+}
+
 static void gic_enable_redist(bool enable)
 {
 	void __iomem *rbase;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c0178a1..d74374f 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -1115,8 +1115,11 @@
 		gic_len = resource_size(&res);
 	}
 
-	if (mips_cm_present())
+	if (mips_cm_present()) {
 		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+		/* Ensure GIC region is enabled before trying to access it */
+		__sync();
+	}
 	gic_present = true;
 
 	__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
index 817dfa3..e3cfffb 100644
--- a/drivers/leds/leds-qpnp.c
+++ b/drivers/leds/leds-qpnp.c
@@ -875,6 +875,14 @@
 		}
 		if (led->mpp_cfg->pwm_mode == PWM_MODE) {
 			/*config pwm for brightness scaling*/
+			rc = pwm_change_mode(led->mpp_cfg->pwm_cfg->pwm_dev,
+					PM_PWM_MODE_PWM);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Failed to set PWM mode, rc = %d\n",
+					rc);
+				return rc;
+			}
 			period_us = led->mpp_cfg->pwm_cfg->pwm_period_us;
 			if (period_us > INT_MAX / NSEC_PER_USEC) {
 				duty_us = (period_us * led->cdev.brightness) /
@@ -1213,7 +1221,7 @@
 
 static int qpnp_flash_set(struct qpnp_led_data *led)
 {
-	int rc, error;
+	int rc = 0, error;
 	int val = led->cdev.brightness;
 
 	if (led->flash_cfg->torch_enable)
@@ -1251,7 +1259,8 @@
 				}
 			}
 
-			qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base),
+			rc = qpnp_led_masked_write(led,
+				FLASH_MAX_CURR(led->base),
 				FLASH_CURRENT_MASK,
 				TORCH_MAX_LEVEL);
 			if (rc) {
@@ -1261,7 +1270,7 @@
 				goto error_reg_write;
 			}
 
-			qpnp_led_masked_write(led,
+			rc = qpnp_led_masked_write(led,
 				FLASH_LED_TMR_CTRL(led->base),
 				FLASH_TMR_MASK,
 				FLASH_TMR_WATCHDOG);
@@ -1293,7 +1302,7 @@
 				goto error_reg_write;
 			}
 
-			qpnp_led_masked_write(led,
+			rc = qpnp_led_masked_write(led,
 				FLASH_WATCHDOG_TMR(led->base),
 				FLASH_WATCHDOG_MASK,
 				led->flash_cfg->duration);
@@ -1341,7 +1350,7 @@
 				goto error_flash_set;
 			}
 
-			qpnp_led_masked_write(led,
+			rc = qpnp_led_masked_write(led,
 				FLASH_LED_TMR_CTRL(led->base),
 				FLASH_TMR_MASK,
 				FLASH_TMR_SAFETY);
@@ -1580,6 +1589,14 @@
 		}
 
 		if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+			rc = pwm_change_mode(led->kpdbl_cfg->pwm_cfg->pwm_dev,
+					PM_PWM_MODE_PWM);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Failed to set PWM mode, rc = %d\n",
+					rc);
+				return rc;
+			}
 			period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
 			if (period_us > INT_MAX / NSEC_PER_USEC) {
 				duty_us = (period_us * led->cdev.brightness) /
@@ -1701,6 +1718,14 @@
 			led->rgb_cfg->pwm_cfg->mode =
 				led->rgb_cfg->pwm_cfg->default_mode;
 		if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
+			rc = pwm_change_mode(led->rgb_cfg->pwm_cfg->pwm_dev,
+					PM_PWM_MODE_PWM);
+			if (rc < 0) {
+				dev_err(&led->pdev->dev,
+					"Failed to set PWM mode, rc = %d\n",
+					rc);
+				return rc;
+			}
 			period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
 			if (period_us > INT_MAX / NSEC_PER_USEC) {
 				duty_us = (period_us * led->cdev.brightness) /
@@ -2135,6 +2160,11 @@
 				dev_err(&pdev->dev, "Failed to configure pwm LUT\n");
 				return rc;
 			}
+			rc = pwm_change_mode(pwm_cfg->pwm_dev, PM_PWM_MODE_LPG);
+			if (rc < 0) {
+				dev_err(&pdev->dev, "Failed to set LPG mode\n");
+				return rc;
+			}
 		}
 	} else {
 		dev_err(&pdev->dev, "Invalid PWM device\n");
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index c9f3862..410c39c 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -19,7 +19,6 @@
 #include <linux/sched.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
-#include <linux/suspend.h>
 #include "../leds.h"
 
 static int panic_heartbeats;
@@ -155,30 +154,6 @@
 	.deactivate = heartbeat_trig_deactivate,
 };
 
-static int heartbeat_pm_notifier(struct notifier_block *nb,
-				 unsigned long pm_event, void *unused)
-{
-	int rc;
-
-	switch (pm_event) {
-	case PM_SUSPEND_PREPARE:
-	case PM_HIBERNATION_PREPARE:
-	case PM_RESTORE_PREPARE:
-		led_trigger_unregister(&heartbeat_led_trigger);
-		break;
-	case PM_POST_SUSPEND:
-	case PM_POST_HIBERNATION:
-	case PM_POST_RESTORE:
-		rc = led_trigger_register(&heartbeat_led_trigger);
-		if (rc)
-			pr_err("could not re-register heartbeat trigger\n");
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_DONE;
-}
-
 static int heartbeat_reboot_notifier(struct notifier_block *nb,
 				     unsigned long code, void *unused)
 {
@@ -193,10 +168,6 @@
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block heartbeat_pm_nb = {
-	.notifier_call = heartbeat_pm_notifier,
-};
-
 static struct notifier_block heartbeat_reboot_nb = {
 	.notifier_call = heartbeat_reboot_notifier,
 };
@@ -213,14 +184,12 @@
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &heartbeat_panic_nb);
 		register_reboot_notifier(&heartbeat_reboot_nb);
-		register_pm_notifier(&heartbeat_pm_nb);
 	}
 	return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
-	unregister_pm_notifier(&heartbeat_pm_nb);
 	unregister_reboot_notifier(&heartbeat_reboot_nb);
 	atomic_notifier_chain_unregister(&panic_notifier_list,
 					 &heartbeat_panic_nb);
diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c
index f0bb0bc..3b07c47 100644
--- a/drivers/mailbox/msm_qmp.c
+++ b/drivers/mailbox/msm_qmp.c
@@ -27,7 +27,7 @@
 #define QMP_VERSION	0x1
 #define QMP_FEATURES	0x0
 #define QMP_TOUT_MS	5000
-#define QMP_TX_TOUT_MS	2000
+#define QMP_TX_TOUT_MS	1000
 
 #define QMP_MBOX_LINK_DOWN		0xFFFF0000
 #define QMP_MBOX_LINK_UP		0x0000FFFF
@@ -229,7 +229,7 @@
 }
 
 /**
- * qmp_notify_timeout() - Notify client of tx timeout with -EIO
+ * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
  * @work:	Structure for work that was scheduled.
  */
 static void qmp_notify_timeout(struct work_struct *work)
@@ -237,7 +237,7 @@
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct qmp_mbox *mbox = container_of(dwork, struct qmp_mbox, dwork);
 	struct mbox_chan *chan = &mbox->ctrl.chans[mbox->idx_in_flight];
-	int err = -EIO;
+	int err = -ETIME;
 	unsigned long flags;
 
 	spin_lock_irqsave(&mbox->tx_lock, flags);
@@ -246,6 +246,7 @@
 		return;
 	}
 	pr_err("%s: qmp tx timeout for %d\n", __func__, mbox->idx_in_flight);
+	iowrite32(0, mbox->mdev->msgram + mbox->mcore_mbox_offset);
 	mbox->tx_sent = false;
 	spin_unlock_irqrestore(&mbox->tx_lock, flags);
 	mbox_chan_txdone(chan, err);
diff --git a/drivers/mcb/mcb-lpc.c b/drivers/mcb/mcb-lpc.c
index d072c08..945091a 100644
--- a/drivers/mcb/mcb-lpc.c
+++ b/drivers/mcb/mcb-lpc.c
@@ -114,6 +114,12 @@
 	.flags = IORESOURCE_MEM,
 };
 
+static struct resource sc31_fpga_resource = {
+	.start = 0xf000e000,
+	.end = 0xf000e000 + CHAM_HEADER_SIZE,
+	.flags = IORESOURCE_MEM,
+};
+
 static struct platform_driver mcb_lpc_driver = {
 	.driver		= {
 		.name = "mcb-lpc",
@@ -132,6 +138,15 @@
 		.driver_data = (void *)&sc24_fpga_resource,
 		.callback = mcb_lpc_create_platform_device,
 	},
+	{
+		.ident = "SC31",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
+			DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
+		},
+		.driver_data = (void *)&sc31_fpga_resource,
+		.callback = mcb_lpc_create_platform_device,
+	},
 	{}
 };
 MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index bca4c0e..e289aae 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -848,8 +848,12 @@
 		tio = tio_from_request(rq);
 		/* Establish tio->ti before queuing work (map_tio_request) */
 		tio->ti = ti;
-		kthread_queue_work(&md->kworker, &tio->work);
+		spin_unlock(q->queue_lock);
+		if (map_request(tio) == DM_MAPIO_REQUEUE)
+			dm_requeue_original_request(tio, false);
+
 		BUG_ON(!irqs_disabled());
+		spin_lock(q->queue_lock);
 	}
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 383f19c..549b4af 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5844,6 +5844,8 @@
 
 	spin_unlock_irq(&conf->device_lock);
 
+	r5l_flush_stripe_to_raid(conf->log);
+
 	async_tx_issue_pending_all();
 	blk_finish_plug(&plug);
 
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a18fe5d..b4857cd 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -393,11 +393,11 @@
 	msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
 	msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
 	msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
+	memcpy(msg, &msg_tmp, sizeof(*msg));
 
 	/* No need to update the read positions, because this was a peek */
 	/* If the caller specifically want to peek, return */
 	if (peekonly) {
-		memcpy(msg, &msg_tmp, sizeof(*msg));
 		goto peekout;
 	}
 
@@ -442,21 +442,15 @@
 		space_rem = bus->m_dwSizeGetRing - curr_grp;
 
 		if (space_rem < sizeof(*msg)) {
-			/* msg wraps around the ring */
-			memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
-			memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
-				sizeof(*msg) - space_rem);
 			if (buf)
 				memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
 					space_rem, buf_size);
 
 		} else if (space_rem == sizeof(*msg)) {
-			memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
 			if (buf)
 				memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
 		} else {
 			/* Additional data wraps around the ring */
-			memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
 			if (buf) {
 				memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
 					sizeof(*msg), space_rem - sizeof(*msg));
@@ -469,15 +463,10 @@
 
 	} else {
 		/* No wrapping */
-		memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
 		if (buf)
 			memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
 				buf_size);
 	}
-	/* Convert from little endian to CPU */
-	msg->size = le16_to_cpu((__force __le16)msg->size);
-	msg->command = le32_to_cpu((__force __le32)msg->command);
-	msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
 
 	/* Update the read positions, adjusting the ring */
 	saa7164_writel(bus->m_dwGetReadPos, new_grp);
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 6efb2f1..bdb7a0a 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1725,27 +1725,9 @@
 
 	switch (cmd) {
 	case VPFE_CMD_S_CCDC_RAW_PARAMS:
+		ret = -EINVAL;
 		v4l2_warn(&vpfe_dev->v4l2_dev,
-			  "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
-		if (ccdc_dev->hw_ops.set_params) {
-			ret = ccdc_dev->hw_ops.set_params(param);
-			if (ret) {
-				v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
-					"Error setting parameters in CCDC\n");
-				goto unlock_out;
-			}
-			ret = vpfe_get_ccdc_image_format(vpfe_dev,
-							 &vpfe_dev->fmt);
-			if (ret < 0) {
-				v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
-					"Invalid image format at CCDC\n");
-				goto unlock_out;
-			}
-		} else {
-			ret = -EINVAL;
-			v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
-				"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
-		}
+			"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
 		break;
 	default:
 		ret = -ENOTTY;
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
index 043f44d..3a78b5e 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_node.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -256,7 +256,7 @@
 		return -EINVAL;
 	}
 
-	trace_cam_apply_req("Node", apply);
+	trace_cam_apply_req("Node", apply->request_id);
 
 	return cam_context_handle_crm_apply_req(ctx, apply);
 }
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index d9133b9..e3d46df 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -402,9 +402,10 @@
 			CAM_CPAS_POLL_RETRY_CNT,
 			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
 		if (rc) {
-			CAM_ERR(CAM_CPAS,
+			CAM_DBG(CAM_CPAS,
 				"camnoc flush slave pending trans failed");
 			/* Do not return error, passthrough */
+			rc = 0;
 		}
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
index f4d0e36..918258d 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -515,7 +515,7 @@
 
 static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
 	.camnoc_flush_slave_pending_trans = {
-		.enable = true,
+		.enable = false,
 		.data.reg_info = {
 			.access_type = CAM_REG_TYPE_READ,
 			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index 4c29ffd..f23c4c1 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -15,6 +15,7 @@
 
 #include "cam_debug_util.h"
 #include "cam_fd_context.h"
+#include "cam_trace.h"
 
 /* Functions in Available state */
 static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
@@ -29,6 +30,7 @@
 	}
 
 	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("FD", ctx);
 
 	return rc;
 }
@@ -46,6 +48,7 @@
 	}
 
 	ctx->state = CAM_CTX_AVAILABLE;
+	trace_cam_context_state("FD", ctx);
 
 	return rc;
 }
@@ -76,6 +79,7 @@
 	}
 
 	ctx->state = CAM_CTX_ACTIVATED;
+	trace_cam_context_state("FD", ctx);
 
 	return rc;
 }
@@ -93,6 +97,7 @@
 	}
 
 	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("FD", ctx);
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index d226e17..37e6954 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -26,6 +26,7 @@
 #include "cam_fd_hw_soc.h"
 #include "cam_fd_hw_mgr_intf.h"
 #include "cam_fd_hw_mgr.h"
+#include "cam_trace.h"
 
 static struct cam_fd_hw_mgr g_fd_hw_mgr;
 
@@ -334,7 +335,7 @@
 	/* Update required info in hw context */
 	hw_ctx->device_index = i;
 
-	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx index=%d, device_index=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	return 0;
@@ -480,6 +481,53 @@
 	return rc;
 }
 
+static int cam_fd_mgr_util_get_buf_map_requirement(uint32_t direction,
+	uint32_t resource_type, bool *need_io_map, bool *need_cpu_map)
+{
+	if (!need_io_map || !need_cpu_map) {
+		CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK", need_io_map,
+			need_cpu_map);
+		return -EINVAL;
+	}
+
+	if (direction == CAM_BUF_INPUT) {
+		switch (resource_type) {
+		case CAM_FD_INPUT_PORT_ID_IMAGE:
+			*need_io_map = true;
+			*need_cpu_map = false;
+			break;
+		default:
+			CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+				direction, resource_type);
+			return -EINVAL;
+		}
+	} else if (direction == CAM_BUF_OUTPUT) {
+		switch (resource_type) {
+		case CAM_FD_OUTPUT_PORT_ID_RESULTS:
+			*need_io_map = true;
+			*need_cpu_map = true;
+			break;
+		case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS:
+			*need_io_map = true;
+			*need_cpu_map = true;
+			break;
+		case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER:
+			*need_io_map = true;
+			*need_cpu_map = false;
+			break;
+		default:
+			CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+				direction, resource_type);
+			return -EINVAL;
+		}
+	} else {
+		CAM_WARN(CAM_FD, "Invalid direction %d", direction);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
 	struct cam_hw_prepare_update_args *prepare,
 	struct cam_fd_hw_io_buffer *input_buf,
@@ -491,6 +539,7 @@
 	uint64_t io_addr[CAM_PACKET_MAX_PLANES];
 	uint64_t cpu_addr[CAM_PACKET_MAX_PLANES];
 	size_t size;
+	bool need_io_map, need_cpu_map;
 
 	/* Get IO Buf information */
 	num_out_buf = 0;
@@ -512,32 +561,55 @@
 			return -EINVAL;
 		}
 
+		rc = cam_fd_mgr_util_get_buf_map_requirement(
+			io_cfg[i].direction, io_cfg[i].resource_type,
+			&need_io_map, &need_cpu_map);
+		if (rc) {
+			CAM_WARN(CAM_FD, "Invalid io buff [%d] : %d %d %d",
+				i, io_cfg[i].direction,
+				io_cfg[i].resource_type, rc);
+			continue;
+		}
+
 		memset(io_addr, 0x0, sizeof(io_addr));
 		for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
 			if (!io_cfg[i].mem_handle[plane])
 				break;
 
-			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
-				iommu_hdl, &io_addr[plane], &size);
-			if ((rc) || (io_addr[plane] >> 32)) {
-				CAM_ERR(CAM_FD, "Invalid io addr for %d %d",
-					plane, rc);
-				return -ENOMEM;
+			io_addr[plane] = 0x0;
+			cpu_addr[plane] = 0x0;
+
+			if (need_io_map) {
+				rc = cam_mem_get_io_buf(
+					io_cfg[i].mem_handle[plane],
+					iommu_hdl, &io_addr[plane], &size);
+				if ((rc) || (io_addr[plane] >> 32)) {
+					CAM_ERR(CAM_FD,
+						"Invalid io buf %d %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane,
+						rc);
+					return -ENOMEM;
+				}
+
+				io_addr[plane] += io_cfg[i].offsets[plane];
 			}
 
-			/*
-			 * Buffers may be accessed by CPU as well, we do not
-			 * know at this point, so get both and send to HW layer
-			 */
-			rc = cam_mem_get_cpu_buf(io_cfg[i].mem_handle[plane],
-				&cpu_addr[plane], &size);
-			if (rc) {
-				CAM_ERR(CAM_FD, "unable to get buf address");
-				return rc;
-			}
+			if (need_cpu_map) {
+				rc = cam_mem_get_cpu_buf(
+					io_cfg[i].mem_handle[plane],
+					&cpu_addr[plane], &size);
+				if (rc) {
+					CAM_ERR(CAM_FD,
+						"Invalid cpu buf %d %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane,
+						rc);
+					return rc;
+				}
 
-			io_addr[plane] += io_cfg[i].offsets[plane];
-			cpu_addr[plane] += io_cfg[i].offsets[plane];
+				cpu_addr[plane] += io_cfg[i].offsets[plane];
+			}
 
 			CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK",
 				io_cfg[i].direction, plane, io_addr[plane],
@@ -764,6 +836,8 @@
 		return -EBUSY;
 	}
 
+	trace_cam_submit_to_hw("FD", frame_req->request_id);
+
 	list_del_init(&frame_req->list);
 	mutex_unlock(&hw_mgr->frame_req_mutex);
 
@@ -924,6 +998,8 @@
 		frame_abort = false;
 	}
 
+	trace_cam_irq_handled("FD", irq_type);
+
 notify_context:
 	/* Do a callback to inform frame done or stop done */
 	if (frame_req->hw_ctx->event_cb) {
@@ -1158,7 +1234,7 @@
 		return -EPERM;
 	}
 
-	CAM_DBG(CAM_FD, "ctx index=%d, hw_ctx=%d", hw_ctx->ctx_index,
+	CAM_DBG(CAM_FD, "ctx index=%d, device_index=%d", hw_ctx->ctx_index,
 		hw_ctx->device_index);
 
 	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
@@ -1300,7 +1376,7 @@
 
 	/* We do not expect any patching, but just do it anyway */
 	rc = cam_packet_util_process_patches(prepare->packet,
-		hw_mgr->device_iommu.non_secure);
+		hw_mgr->device_iommu.non_secure, -1);
 	if (rc) {
 		CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
 		return rc;
@@ -1389,6 +1465,8 @@
 	}
 
 	frame_req = config->priv;
+
+	trace_cam_apply_req("FD", frame_req->request_id);
 	CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id);
 
 	frame_req->num_hw_update_entries = config->num_hw_update_entries;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index 51fcdcaa..11a81d6 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -12,6 +12,7 @@
 
 #include "cam_fd_hw_core.h"
 #include "cam_fd_hw_soc.h"
+#include "cam_trace.h"
 
 #define CAM_FD_REG_VAL_PAIR_SIZE 256
 
@@ -30,6 +31,7 @@
 static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
 	enum cam_cdm_cb_status status, uint32_t cookie)
 {
+	trace_cam_cdm_cb("FD", status);
 	CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%d",
 		handle, userdata, status, cookie);
 }
@@ -110,9 +112,6 @@
 	/* Before triggering reset to HW, clear the reset complete */
 	reinit_completion(&fd_core->reset_complete);
 
-	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
-		hw_static_info->core_regs.control, 0x1);
-
 	if (hw_static_info->enable_errata_wa.single_irq_only) {
 		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
 			hw_static_info->wrapper_regs.irq_mask,
@@ -124,13 +123,11 @@
 
 	time_left = wait_for_completion_timeout(&fd_core->reset_complete,
 		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
-	if (time_left <= 0) {
-		CAM_ERR(CAM_FD, "HW reset wait failed time_left=%d", time_left);
-		return -EPERM;
-	}
+	if (time_left <= 0)
+		CAM_WARN(CAM_FD, "HW reset timeout time_left=%d", time_left);
 
 	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
-		hw_static_info->core_regs.control, 0x0);
+		hw_static_info->core_regs.control, 0x1);
 
 	CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
 
@@ -148,9 +145,6 @@
 	/* Before triggering halt to HW, clear halt complete */
 	reinit_completion(&fd_core->halt_complete);
 
-	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
-		hw_static_info->core_regs.control, 0x1);
-
 	if (hw_static_info->enable_errata_wa.single_irq_only) {
 		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
 			hw_static_info->wrapper_regs.irq_mask,
@@ -162,13 +156,8 @@
 
 	time_left = wait_for_completion_timeout(&fd_core->halt_complete,
 		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
-	if (time_left <= 0) {
-		CAM_ERR(CAM_FD, "HW halt wait failed time_left=%d", time_left);
-		return -EPERM;
-	}
-
-	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
-		hw_static_info->core_regs.control, 0x0);
+	if (time_left <= 0)
+		CAM_WARN(CAM_FD, "HW halt timeout time_left=%d", time_left);
 
 	CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
 
@@ -397,12 +386,22 @@
 	prestart_args->pre_config_buf_size =
 		prestart_args->size - available_size;
 
-	/*
-	 * Currently, no post config commands, we trigger HW start directly
-	 * from start(). Start trigger command can be inserted into CDM
-	 * as post config commands.
-	 */
-	prestart_args->post_config_buf_size = 0;
+	/* Insert start trigger command into CDM as post config commands. */
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, 0,
+		hw_static_info->core_regs.control, 0x2);
+	size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+		num_cmds/2);
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+			available_size, size);
+		return -ENOMEM;
+	}
+	ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	prestart_args->post_config_buf_size = size * 4;
 
 	CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
 		prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
@@ -573,6 +572,8 @@
 		return -EINVAL;
 	}
 
+	trace_cam_irq_activated("FD", irq_type);
+
 	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
 		hw_static_info->wrapper_regs.irq_clear,
 		hw_static_info->irq_mask);
@@ -795,6 +796,12 @@
 	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
 	spin_unlock(&fd_core->spin_lock);
 
+	rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+		return rc;
+	}
+
 	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
 	if (rc) {
 		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
@@ -893,9 +900,6 @@
 		goto error;
 	}
 
-	cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_CORE,
-		hw_static_info->core_regs.control, 0x2);
-
 	return 0;
 error:
 	spin_lock(&fd_core->spin_lock);
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
index 35bf6b6..bdd72af 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
@@ -30,7 +30,7 @@
 #define CAM_FD_IRQ_TO_MASK(irq)        (1 << (irq))
 #define CAM_FD_MASK_TO_IRQ(mask, irq)  ((mask) >> (irq))
 
-#define CAM_FD_HW_HALT_RESET_TIMEOUT   3000
+#define CAM_FD_HW_HALT_RESET_TIMEOUT   100
 
 /**
  * enum cam_fd_core_state - FD Core internal states
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
index bbdff27..8a0d5f7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -41,8 +41,8 @@
 struct cam_icp_subdev {
 	struct cam_subdev sd;
 	struct cam_node *node;
-	struct cam_context ctx[CAM_CTX_MAX];
-	struct cam_icp_context ctx_icp[CAM_CTX_MAX];
+	struct cam_context ctx[CAM_ICP_CTX_MAX];
+	struct cam_icp_context ctx_icp[CAM_ICP_CTX_MAX];
 	struct mutex icp_lock;
 	int32_t open_cnt;
 	int32_t reserved;
@@ -164,7 +164,7 @@
 		goto hw_init_fail;
 	}
 
-	for (i = 0; i < CAM_CTX_MAX; i++) {
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
 		g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
 		rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
 					hw_mgr_intf);
@@ -175,7 +175,7 @@
 	}
 
 	rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
-				CAM_CTX_MAX, CAM_ICP_DEV_NAME);
+				CAM_ICP_CTX_MAX, CAM_ICP_DEV_NAME);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "ICP node init failed");
 		goto ctx_fail;
@@ -220,7 +220,7 @@
 		return -ENODEV;
 	}
 
-	for (i = 0; i < CAM_CTX_MAX; i++)
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
 		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
 	cam_node_deinit(g_icp_dev.node);
 	cam_subdev_remove(&g_icp_dev.sd);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 7c5b405..3354e2c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -880,6 +880,12 @@
 	buf_data.request_id = hfi_frame_process->request_id[idx];
 	ctx_data->ctxt_event_cb(ctx_data->context_priv, flag, &buf_data);
 	hfi_frame_process->request_id[idx] = 0;
+	if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_resource[idx]);
+		cam_sync_destroy(ctx_data->hfi_frame_process.in_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
 	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
 	hfi_frame_process->fw_process_flag[idx] = false;
 	mutex_unlock(&ctx_data->ctx_mutex);
@@ -1550,6 +1556,7 @@
 	cam_icp_mgr_device_deinit(hw_mgr);
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
+	hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return rc;
@@ -1973,13 +1980,16 @@
 	return rc;
 }
 
-static void cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_icp_hw_ctx_data *ctx_data,
 	struct cam_packet *packet,
-	struct cam_hw_prepare_update_args *prepare_args)
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t index)
 {
-	int i, j, k;
+	int i, j, k, rc = 0;
 	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+	int32_t sync_in_obj[CAM_MAX_OUT_RES];
+	int32_t merged_sync_in_obj;
 
 	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
 				packet->io_configs_offset/4);
@@ -1988,8 +1998,7 @@
 
 	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
 		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
-			prepare_args->in_map_entries[j++].sync_id =
-				io_cfg_ptr[i].fence;
+			sync_in_obj[j++] = io_cfg_ptr[i].fence;
 			prepare_args->num_in_map_entries++;
 		} else {
 			prepare_args->out_map_entries[k++].sync_id =
@@ -1999,6 +2008,33 @@
 		CAM_DBG(CAM_ICP, "dir[%d]: %u, fence: %u",
 			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
 	}
+
+	if (prepare_args->num_in_map_entries > 1) {
+		rc = cam_sync_merge(&sync_in_obj[0],
+			prepare_args->num_in_map_entries, &merged_sync_in_obj);
+		if (rc) {
+			prepare_args->num_out_map_entries = 0;
+			prepare_args->num_in_map_entries = 0;
+			return rc;
+		}
+
+		ctx_data->hfi_frame_process.in_resource[index] =
+			merged_sync_in_obj;
+		prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prepare_args->num_in_map_entries = 1;
+		CAM_DBG(CAM_ICP, "Merged Sync obj = %d", merged_sync_in_obj);
+	} else if (prepare_args->num_in_map_entries == 1) {
+		prepare_args->in_map_entries[0].sync_id = sync_in_obj[0];
+		prepare_args->num_in_map_entries = 1;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+	} else {
+		CAM_ERR(CAM_ICP, "No input fences");
+		prepare_args->num_in_map_entries = 0;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+		rc = -EINVAL;
+	}
+
+	return rc;
 }
 
 static int cam_icp_packet_generic_blob_handler(void *user_data,
@@ -2146,21 +2182,28 @@
 	}
 
 	/* Update Buffer Address from handles and patch information */
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
+		hw_mgr->iommu_sec_hdl);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
 	}
 
-	cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
-		packet, prepare_args);
-
 	rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
 		prepare_args, &idx);
 	if (rc) {
-		if (prepare_args->in_map_entries[0].sync_id > 0)
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+		packet, prepare_args, idx);
+	if (rc) {
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0)
 			cam_sync_destroy(
-				prepare_args->in_map_entries[0].sync_id);
+				ctx_data->hfi_frame_process.in_resource[idx]);
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		ctx_data->hfi_frame_process.request_id[idx] = -1;
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
 	}
@@ -2195,6 +2238,13 @@
 
 		/* now release memory for hfi frame process command */
 		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
 		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
 	}
 	mutex_unlock(&ctx_data->ctx_mutex);
@@ -2239,6 +2289,7 @@
 			NULL, 0);
 		cam_icp_mgr_hw_close(hw_mgr, NULL);
 		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
+		hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	}
 
 	return rc;
@@ -2384,6 +2435,12 @@
 		sizeof(struct cam_icp_acquire_dev_info)))
 		return -EFAULT;
 
+	if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) {
+		CAM_ERR(CAM_ICP, "Invalid mode:%d",
+			icp_dev_acquire_info.secure_mode);
+		return -EINVAL;
+	}
+
 	if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
 		CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
 			icp_dev_acquire_info.num_out_res);
@@ -2396,28 +2453,46 @@
 		return -EFAULT;
 	}
 
+	if (!hw_mgr->ctxt_cnt) {
+		hw_mgr->secure_mode = icp_dev_acquire_info.secure_mode;
+	} else {
+		if (hw_mgr->secure_mode != icp_dev_acquire_info.secure_mode) {
+			CAM_ERR(CAM_ICP,
+				"secure mode mismatch driver:%d, context:%d",
+				hw_mgr->secure_mode,
+				icp_dev_acquire_info.secure_mode);
+			return -EINVAL;
+		}
+	}
+
 	acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
 		(icp_dev_acquire_info.num_out_res *
 		sizeof(struct cam_icp_res_info));
 	ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
-	if (!ctx_data->icp_dev_acquire_info)
+	if (!ctx_data->icp_dev_acquire_info) {
+		if (!hw_mgr->ctxt_cnt)
+			hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 		return -ENOMEM;
+	}
 
 	if (copy_from_user(ctx_data->icp_dev_acquire_info,
 		(void __user *)args->acquire_info, acquire_size)) {
+		if (!hw_mgr->ctxt_cnt)
+			hw_mgr->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 		kfree(ctx_data->icp_dev_acquire_info);
 		ctx_data->icp_dev_acquire_info = NULL;
 		return -EFAULT;
 	}
 
-	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
+	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x %u",
 		ctx_data->icp_dev_acquire_info->dev_type,
 		ctx_data->icp_dev_acquire_info->in_res.format,
 		ctx_data->icp_dev_acquire_info->in_res.width,
 		ctx_data->icp_dev_acquire_info->in_res.height,
 		ctx_data->icp_dev_acquire_info->in_res.fps,
 		ctx_data->icp_dev_acquire_info->num_out_res,
-		ctx_data->icp_dev_acquire_info->scratch_mem_size);
+		ctx_data->icp_dev_acquire_info->scratch_mem_size,
+		hw_mgr->secure_mode);
 
 	p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
 	for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
@@ -2470,18 +2545,10 @@
 		goto acquire_info_failed;
 	icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
 
-	/* Get IOCONFIG command info */
-	if (icp_dev_acquire_info->secure_mode)
-		rc = cam_mem_get_io_buf(
-			icp_dev_acquire_info->io_config_cmd_handle,
-			hw_mgr->iommu_sec_hdl,
-			&io_buf_addr, &io_buf_size);
-	else
-		rc = cam_mem_get_io_buf(
-			icp_dev_acquire_info->io_config_cmd_handle,
-			hw_mgr->iommu_hdl,
-			&io_buf_addr, &io_buf_size);
-
+	rc = cam_mem_get_io_buf(
+		icp_dev_acquire_info->io_config_cmd_handle,
+		hw_mgr->iommu_hdl,
+		&io_buf_addr, &io_buf_size);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
 		goto get_io_buf_failed;
@@ -2808,6 +2875,7 @@
 	hw_mgr_intf->download_fw = cam_icp_mgr_download_fw;
 	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close;
 
+	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
 	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
 	spin_lock_init(&icp_hw_mgr.hw_mgr_lock);
 
@@ -2820,7 +2888,7 @@
 
 	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
 	if (rc) {
-		CAM_ERR(CAM_ICP, "icp get iommu handle failed: %d", rc);
+		CAM_ERR(CAM_ICP, "get mmu handle failed: %d", rc);
 		goto icp_get_hdl_failed;
 	}
 
@@ -2830,6 +2898,12 @@
 		goto icp_attach_failed;
 	}
 
+	rc = cam_smmu_get_handle("cam-secure", &icp_hw_mgr.iommu_sec_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "get secure mmu handle failed: %d", rc);
+		goto secure_hdl_failed;
+	}
+
 	rc = cam_icp_mgr_create_wq();
 	if (rc)
 		goto icp_wq_create_failed;
@@ -2839,6 +2913,9 @@
 	return rc;
 
 icp_wq_create_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_sec_hdl);
+	icp_hw_mgr.iommu_sec_hdl = -1;
+secure_hdl_failed:
 	cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
 icp_attach_failed:
 	cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index d4f5482..d1793e6 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -114,6 +114,7 @@
 	uint64_t request_id[CAM_FRAME_CMD_MAX];
 	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
 	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
+	uint32_t in_resource[CAM_FRAME_CMD_MAX];
 	uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
 	struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
 };
@@ -226,6 +227,7 @@
  * @icp_debug_clk: Set clock based on debug value
  * @icp_default_clk: Set this clok if user doesn't supply
  * @clk_info: Clock info of hardware
+ * @secure_mode: Flag to enable/disable secure camera
  */
 struct cam_icp_hw_mgr {
 	struct mutex hw_mgr_mutex;
@@ -255,6 +257,7 @@
 	uint64_t icp_debug_clk;
 	uint64_t icp_default_clk;
 	struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
+	bool secure_mode;
 };
 
 static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
index 9300ea8..6915ad5 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
@@ -13,8 +13,6 @@
 #ifndef CAM_ICP_HW_INTF_H
 #define CAM_ICP_HW_INTF_H
 
-#define CAM_ICP_CTX_MAX                 8
-
 #define CAM_ICP_CMD_BUF_MAX_SIZE     128
 #define CAM_ICP_MSG_BUF_MAX_SIZE     CAM_ICP_CMD_BUF_MAX_SIZE
 
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index d99a878..4f6fce8 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -20,6 +20,7 @@
 
 #define ICP_TURBO_VOTE           600000000
 #define ICP_SVS_VOTE             400000000
+#define CAM_ICP_CTX_MAX          36
 
 int cam_icp_hw_mgr_init(struct device_node *of_node,
 	uint64_t *hw_mgr_hdl);
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 0ee368d..4ecb36e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -24,6 +24,46 @@
 #include "cam_trace.h"
 #include "cam_debug_util.h"
 
+static int __cam_isp_ctx_enqueue_request_in_order(
+	struct cam_context *ctx, struct cam_ctx_request *req)
+{
+	struct cam_ctx_request           *req_current;
+	struct cam_ctx_request           *req_prev;
+	struct list_head                  temp_list;
+
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock_bh(&ctx->lock);
+	if (list_empty(&ctx->pending_req_list)) {
+		list_add_tail(&req->list, &ctx->pending_req_list);
+	} else {
+		list_for_each_entry_safe_reverse(
+			req_current, req_prev, &ctx->pending_req_list, list) {
+			if (req->request_id < req_current->request_id) {
+				list_del_init(&req_current->list);
+				list_add(&req_current->list, &temp_list);
+				continue;
+			} else if (req->request_id == req_current->request_id) {
+				CAM_WARN(CAM_ISP,
+					"Received duplicated request %lld",
+					req->request_id);
+			}
+			break;
+		}
+		list_add_tail(&req->list, &ctx->pending_req_list);
+
+		if (!list_empty(&temp_list)) {
+			list_for_each_entry_safe(
+				req_current, req_prev, &temp_list, list) {
+				list_del_init(&req_current->list);
+				list_add_tail(&req_current->list,
+					&ctx->pending_req_list);
+			}
+		}
+	}
+	spin_unlock_bh(&ctx->lock);
+	return 0;
+}
+
 static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
 {
 	uint64_t ts = 0;
@@ -256,7 +296,7 @@
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 	} else {
-		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
 		rc = -EFAULT;
 	}
 
@@ -574,7 +614,8 @@
 	 */
 
 	if (list_empty(&ctx->active_req_list)) {
-		CAM_ERR(CAM_ISP, "handling error with no active request");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"handling error with no active request");
 		rc = -EINVAL;
 		goto end;
 	}
@@ -588,10 +629,10 @@
 		notify.req_id = req->request_id;
 
 		ctx->ctx_crm_intf->notify_err(&notify);
-		CAM_ERR(CAM_ISP, "Notify CRM about ERROR frame %lld",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Notify CRM about ERROR frame %lld",
 			ctx_isp->frame_id);
 	} else {
-		CAM_ERR(CAM_ISP, "Can not notify ERRROR to CRM");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
 		rc = -EFAULT;
 	}
 
@@ -724,7 +765,7 @@
 
 	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Can not apply the configuration");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
 	} else {
 		spin_lock_bh(&ctx->lock);
 		ctx_isp->substate_activated = next_state;
@@ -964,7 +1005,7 @@
 		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
 			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
 	} else {
-		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
 	}
 
 	if (list_empty(&ctx->active_req_list))
@@ -1477,9 +1518,7 @@
 	CAM_DBG(CAM_ISP, "Packet request id 0x%llx",
 		packet->header.request_id);
 
-	spin_lock_bh(&ctx->lock);
-	list_add_tail(&req->list, &ctx->pending_req_list);
-	spin_unlock_bh(&ctx->lock);
+	__cam_isp_ctx_enqueue_request_in_order(ctx, req);
 
 	CAM_DBG(CAM_ISP, "Preprocessing Config %lld successful",
 		req->request_id);
@@ -1875,7 +1914,7 @@
 	struct cam_isp_context *ctx_isp =
 		(struct cam_isp_context *) ctx->ctx_priv;
 
-	trace_cam_apply_req("ISP", apply);
+	trace_cam_apply_req("ISP", apply->request_id);
 	CAM_DBG(CAM_ISP, "Enter: apply req in Substate %d request _id:%lld",
 		 ctx_isp->substate_activated, apply->request_id);
 	if (ctx_isp->substate_machine[ctx_isp->substate_activated].
@@ -1883,14 +1922,16 @@
 		rc = ctx_isp->substate_machine[ctx_isp->substate_activated].
 			crm_ops.apply_req(ctx, apply);
 	} else {
-		CAM_ERR(CAM_ISP, "No handle function in activated substate %d",
-			 ctx_isp->substate_activated);
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No handle function in activated substate %d",
+			ctx_isp->substate_activated);
 		rc = -EFAULT;
 	}
 
 	if (rc)
-		CAM_ERR(CAM_ISP, "Apply failed in active substate %d",
-			 ctx_isp->substate_activated);
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Apply failed in active substate %d",
+			ctx_isp->substate_activated);
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index d0b0751..d84be30 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -64,7 +64,7 @@
 		sizeof(struct cam_isp_query_cap_cmd)))
 		rc = -EFAULT;
 
-	CAM_DBG(CAM_ISP, "exit rc :%d !", rc);
+	CAM_DBG(CAM_ISP, "exit rc :%d", rc);
 
 	return rc;
 }
@@ -111,7 +111,7 @@
 
 	return 0;
 err:
-	CAM_ERR(CAM_ISP, "INIT HW res failed! (type:%d, id:%d)",
+	CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)",
 		isp_hw_res->res_type, isp_hw_res->res_id);
 	return rc;
 }
@@ -132,7 +132,7 @@
 				isp_hw_res->hw_res[i],
 				sizeof(struct cam_isp_resource_node));
 			if (rc) {
-				CAM_ERR(CAM_ISP, "Can not start HW resources!");
+				CAM_ERR(CAM_ISP, "Can not start HW resources");
 				goto err;
 			}
 		} else {
@@ -143,7 +143,7 @@
 
 	return 0;
 err:
-	CAM_ERR(CAM_ISP, "Start hw res failed! (type:%d, id:%d)",
+	CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)",
 		isp_hw_res->res_type, isp_hw_res->res_id);
 	return rc;
 }
@@ -210,7 +210,7 @@
 			struct cam_ife_hw_mgr_res, list);
 		list_del_init(&res_ptr->list);
 	} else {
-		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
 		rc = -1;
 	}
 	*res = res_ptr;
@@ -235,7 +235,7 @@
 				sizeof(struct cam_isp_resource_node));
 			if (rc)
 				CAM_ERR(CAM_ISP,
-					"Release hw resrouce id %d failed!",
+					"Release hw resrouce id %d failed",
 					isp_hw_res->res_id);
 			isp_hw_res->hw_res[i] = NULL;
 		} else
@@ -362,7 +362,7 @@
 			struct cam_ife_hw_mgr_ctx, list);
 		list_del_init(&ctx_ptr->list);
 	} else {
-		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx!");
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
 		rc = -1;
 	}
 	*ife_ctx = ctx_ptr;
@@ -417,7 +417,7 @@
 	uint32_t i;
 
 	if (list_empty(&ctx->res_list_ife_src)) {
-		CAM_ERR(CAM_ISP, "Error! Mux List empty");
+		CAM_ERR(CAM_ISP, "Mux List empty");
 		return -ENODEV;
 	}
 
@@ -474,6 +474,8 @@
 		CAM_ERR(CAM_ISP, "invalid resource type");
 		goto err;
 	}
+	CAM_DBG(CAM_ISP, "vfe_in_res_id = %d, vfe_out_red_id = %d",
+		vfe_in_res_id, vfe_out_res_id);
 
 	vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
 	vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
@@ -482,6 +484,9 @@
 	for (i = 0; i < in_port->num_out_res; i++) {
 		out_port = &in_port->data[i];
 
+		CAM_DBG(CAM_ISP, "i = %d, vfe_out_res_id = %d, out_port: %d",
+			i, vfe_out_res_id, out_port->res_type);
+
 		if (vfe_out_res_id != out_port->res_type)
 			continue;
 
@@ -503,7 +508,9 @@
 	}
 
 	if (i == in_port->num_out_res) {
-		CAM_ERR(CAM_ISP, "Can not acquire out resource");
+		CAM_ERR(CAM_ISP,
+			"Cannot acquire out resource, i=%d, num_out_res=%d",
+			i, in_port->num_out_res);
 		goto err;
 	}
 
@@ -511,6 +518,7 @@
 	ife_out_res->is_dual_vfe = 0;
 	ife_out_res->res_id = vfe_out_res_id;
 	ife_out_res->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
 
 	return 0;
 err:
@@ -633,7 +641,8 @@
 				ife_src_res, in_port);
 			break;
 		default:
-			CAM_ERR(CAM_ISP, "Fatal: Unknown IFE SRC resource!");
+			CAM_ERR(CAM_ISP, "Unknown IFE SRC resource: %d",
+				ife_src_res->res_id);
 			break;
 		}
 		if (rc)
@@ -667,7 +676,7 @@
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&ife_src_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource");
 			goto err;
 		}
 		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
@@ -706,7 +715,7 @@
 			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
 			break;
 		default:
-			CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node!");
+			CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node");
 			goto err;
 		}
 		ife_src_res->res_type = vfe_acquire.rsrc_type;
@@ -748,9 +757,11 @@
 		 * csid resource and ife source resource
 		 */
 		csid_res->child[0] = ife_src_res;
-		csid_res->num_children = 1;
 		ife_src_res->parent = csid_res;
 		csid_res->child[csid_res->num_children++] = ife_src_res;
+		CAM_DBG(CAM_ISP, "csid_res=%d  num_children=%d ife_src_res=%d",
+			csid_res->res_id, csid_res->num_children,
+			ife_src_res->res_id);
 	}
 
 	return 0;
@@ -776,7 +787,7 @@
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
 		goto err;
 	}
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
@@ -808,11 +819,11 @@
 	}
 
 	if (i == CAM_IFE_CSID_HW_NUM_MAX) {
-		CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resrouce!");
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid ipp resource");
 		goto err;
 	}
 
-	CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resrouce successfully!",
+	CAM_DBG(CAM_ISP, "acquired csid(%d) left ipp resource successfully",
 		 i);
 
 	csid_res->res_type = CAM_ISP_RESOURCE_PIX_PATH;
@@ -839,18 +850,17 @@
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resrouce!");
+				"Can not acquire ife csid rdi resrouce");
 			goto err;
 		}
 		csid_res->hw_res[1] = csid_acquire.node_res;
 
 		CAM_DBG(CAM_ISP,
-			"acquired csid(%d)right ipp resrouce successfully!", j);
+			"acquired csid(%d)right ipp resrouce successfully", j);
 
 	}
 	csid_res->parent = &ife_ctx->res_list_ife_in;
-	ife_ctx->res_list_ife_in.child[
-		ife_ctx->res_list_ife_in.num_children++] = csid_res;
+	CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
 
 	return 0;
 err:
@@ -862,7 +872,6 @@
 	uint32_t                 out_port_type)
 {
 	enum cam_ife_pix_path_res_id path_id;
-
 	switch (out_port_type) {
 	case CAM_ISP_IFE_OUT_RES_RDI_0:
 		path_id = CAM_IFE_PIX_PATH_RES_RDI_0;
@@ -882,6 +891,8 @@
 		break;
 	}
 
+	CAM_DBG(CAM_ISP, "out_port %d path_id %d", out_port_type, path_id);
+
 	return path_id;
 }
 
@@ -909,7 +920,7 @@
 		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
 			&csid_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "No more free hw mgr resource!",
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource",
 				__func__);
 			goto err;
 		}
@@ -945,7 +956,7 @@
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resrouce!");
+				"Can not acquire ife csid rdi resource");
 			goto err;
 		}
 
@@ -954,10 +965,8 @@
 		csid_res->is_dual_vfe = 0;
 		csid_res->hw_res[0] = csid_acquire.node_res;
 		csid_res->hw_res[1] = NULL;
-
+		CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
 		csid_res->parent = &ife_ctx->res_list_ife_in;
-		ife_ctx->res_list_ife_in.child[
-			ife_ctx->res_list_ife_in.num_children++] = csid_res;
 	}
 
 	return 0;
@@ -978,7 +987,7 @@
 		ife_ctx->res_list_ife_in.res_id = in_port->res_type;
 		ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
 	} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
-		CAM_ERR(CAM_ISP, "No Free resource for this context!");
+		CAM_ERR(CAM_ISP, "No Free resource for this context");
 		goto err;
 	} else {
 		/* else do nothing */
@@ -1032,7 +1041,7 @@
 	/* no dual vfe for TPG */
 	if ((in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) &&
 		(in_port->usage_type != 0)) {
-		CAM_ERR(CAM_ISP, "No Dual VFE on TPG input!");
+		CAM_ERR(CAM_ISP, "No Dual VFE on TPG input");
 		goto err;
 	}
 
@@ -1040,7 +1049,7 @@
 
 	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &cid_res);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "No more free hw mgr resource!");
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
 		goto err;
 	}
 	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, &cid_res);
@@ -1062,7 +1071,7 @@
 	}
 
 	if (i == CAM_IFE_CSID_HW_NUM_MAX || !csid_acquire.node_res) {
-		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resrouce!");
+		CAM_ERR(CAM_ISP, "Can not acquire ife csid rdi resource");
 		goto err;
 	}
 
@@ -1093,7 +1102,7 @@
 
 		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
 			CAM_ERR(CAM_ISP,
-				"Can not acquire ife csid rdi resrouce!");
+				"Can not acquire ife csid rdi resource");
 			goto err;
 		}
 		cid_res->hw_res[1] = csid_acquire.node_res;
@@ -1123,14 +1132,14 @@
 	/* get root node resource */
 	rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Can not acquire csid rx resource!");
+		CAM_ERR(CAM_ISP, "Can not acquire csid rx resource");
 		goto err;
 	}
 
 	/* get cid resource */
 	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res_id);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed!");
+		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
 		goto err;
 	}
 
@@ -1138,7 +1147,7 @@
 		&pixel_count, &rdi_count);
 
 	if (!pixel_count && !rdi_count) {
-		CAM_ERR(CAM_ISP, "Error! no PIX or RDI resource");
+		CAM_ERR(CAM_ISP, "No PIX or RDI resource");
 		return -EINVAL;
 	}
 
@@ -1148,7 +1157,7 @@
 				cid_res_id);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
-				"Acquire IFE CSID IPP resource Failed!");
+				"Acquire IFE CSID IPP resource Failed");
 			goto err;
 		}
 	}
@@ -1159,7 +1168,7 @@
 			cid_res_id);
 		if (rc) {
 			CAM_ERR(CAM_ISP,
-				"Acquire IFE CSID RDI resource Failed!");
+				"Acquire IFE CSID RDI resource Failed");
 			goto err;
 		}
 	}
@@ -1167,13 +1176,13 @@
 	/* get ife src resource */
 	rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed!");
+		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
 		goto err;
 	}
 
 	rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed!");
+		CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed");
 		goto err;
 	}
 
@@ -1206,8 +1215,10 @@
 	struct cam_isp_in_port_info       *in_port = NULL;
 	struct cam_isp_resource           *isp_resource = NULL;
 	struct cam_cdm_acquire_data        cdm_acquire;
-	uint32_t                           num_pix_port = 0;
-	uint32_t                           num_rdi_port = 0;
+	uint32_t                           num_pix_port_per_in = 0;
+	uint32_t                           num_rdi_port_per_in = 0;
+	uint32_t                           total_pix_port = 0;
+	uint32_t                           total_rdi_port = 0;
 
 	CAM_DBG(CAM_ISP, "Enter...");
 
@@ -1219,7 +1230,7 @@
 	/* get the ife ctx */
 	rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 	if (rc || !ife_ctx) {
-		CAM_ERR(CAM_ISP, "Get ife hw context failed!");
+		CAM_ERR(CAM_ISP, "Get ife hw context failed");
 		goto err;
 	}
 
@@ -1271,7 +1282,10 @@
 			isp_resource[i].length);
 		if (in_port > 0) {
 			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
-				&num_pix_port, &num_rdi_port);
+				&num_pix_port_per_in, &num_rdi_port_per_in);
+			total_pix_port += num_pix_port_per_in;
+			total_rdi_port += num_rdi_port_per_in;
+
 			kfree(in_port);
 			if (rc) {
 				CAM_ERR(CAM_ISP, "can not acquire resource");
@@ -1279,7 +1293,7 @@
 			}
 		} else {
 			CAM_ERR(CAM_ISP,
-				"copy from user failed with in_port = %pK",
+				"Copy from user failed with in_port = %pK",
 				in_port);
 			rc = -EFAULT;
 			goto free_res;
@@ -1287,7 +1301,7 @@
 	}
 
 	/* Check whether context has only RDI resource */
-	if (!num_pix_port) {
+	if (!total_pix_port) {
 		ife_ctx->is_rdi_only_context = 1;
 		CAM_DBG(CAM_ISP, "RDI only context");
 	}
@@ -1295,7 +1309,7 @@
 	/* Process base info */
 	rc = cam_ife_mgr_process_base_info(ife_ctx);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error process) base info!");
+		CAM_ERR(CAM_ISP, "Process base info failed");
 		return -EINVAL;
 	}
 
@@ -1304,14 +1318,14 @@
 
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
 
-	CAM_DBG(CAM_ISP, "Exit...(success)!");
+	CAM_DBG(CAM_ISP, "Exit...(success)");
 
 	return 0;
 free_res:
 	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
 	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
 err:
-	CAM_DBG(CAM_ISP, "Exit...(rc=%d)!", rc);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
 	return rc;
 }
 
@@ -1334,12 +1348,12 @@
 	cfg = config_hw_args;
 	ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
 	if (!ctx) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
 	if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
-		CAM_ERR(CAM_ISP, "Invalid context parameters !");
+		CAM_ERR(CAM_ISP, "Invalid context parameters");
 		return -EPERM;
 	}
 
@@ -1388,7 +1402,7 @@
 	}
 	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1397,7 +1411,7 @@
 
 	/* stop resource will remove the irq mask from the hardware */
 	if (!ctx->num_base) {
-		CAM_ERR(CAM_ISP, "error number of bases are zero");
+		CAM_ERR(CAM_ISP, "Number of bases are zero");
 		return -EINVAL;
 	}
 
@@ -1474,7 +1488,7 @@
 	}
 	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1484,7 +1498,7 @@
 	/* Note:stop resource will remove the irq mask from the hardware */
 
 	if (!ctx->num_base) {
-		CAM_ERR(CAM_ISP, "error number of bases are zero");
+		CAM_ERR(CAM_ISP, "number of bases are zero");
 		return -EINVAL;
 	}
 
@@ -1628,7 +1642,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1639,7 +1653,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!", i);
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
 			goto err;
 		}
 	}
@@ -1649,7 +1663,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1660,7 +1674,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1671,7 +1685,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1702,7 +1716,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1721,7 +1735,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE CID.(id :%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1735,7 +1749,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE CSID.(id :%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1747,7 +1761,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1760,7 +1774,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)!",
+			CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
 				 ctx->res_list_ife_out[i].res_id);
 			goto err;
 		}
@@ -1769,7 +1783,7 @@
 	CAM_DBG(CAM_ISP, "start cdm interface");
 	rc = cam_cdm_stream_on(ctx->cdm_handle);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Can not start cdm (%d)!",
+		CAM_ERR(CAM_ISP, "Can not start cdm (%d)",
 			 ctx->cdm_handle);
 		goto err;
 	}
@@ -1788,7 +1802,7 @@
 	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
 		rc = cam_ife_hw_mgr_start_hw_res(&ctx->res_list_ife_out[i]);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
 				 i);
 			goto err;
 		}
@@ -1800,7 +1814,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1812,7 +1826,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1824,7 +1838,7 @@
 	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
 		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res);
 		if (rc) {
-			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)!",
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
 				 hw_mgr_res->res_id);
 			goto err;
 		}
@@ -1864,7 +1878,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Invalid context is used");
 		return -EPERM;
 	}
 
@@ -1923,7 +1937,8 @@
 		return rc;
 
 	rc = cam_packet_util_process_patches(prepare->packet,
-		hw_mgr->mgr_common.cmd_iommu_hdl);
+		hw_mgr->mgr_common.cmd_iommu_hdl,
+		hw_mgr->mgr_common.cmd_iommu_hdl_secure);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
 		return rc;
@@ -1982,6 +1997,7 @@
 
 		/* get IO buffers */
 		rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
 			prepare, ctx->base[i].idx,
 			&kmd_buf, ctx->res_list_ife_out,
 			CAM_IFE_HW_OUT_RES_MAX, fill_fence);
@@ -2049,7 +2065,7 @@
 
 	ctx = (struct cam_ife_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;
 	if (!ctx || !ctx->ctx_in_use) {
-		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used!");
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
 		return -EPERM;
 	}
 
@@ -2117,7 +2133,7 @@
 	}
 end:
 	if (rc)
-		CAM_ERR(CAM_ISP, "error in getting sof time stamp");
+		CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
 
 	return rc;
 }
@@ -2135,7 +2151,7 @@
 	struct cam_ife_hw_mgr_ctx        *ctx = NULL;
 
 	/* Here recovery is performed */
-	CAM_DBG(CAM_ISP, "Enter: ErrorType = %d", error_type);
+	CAM_DBG(CAM_ISP, "ErrorType = %d", error_type);
 
 	switch (error_type) {
 	case CAM_ISP_HW_ERROR_OVERFLOW:
@@ -2757,7 +2773,7 @@
 		break;
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)!", sof_status);
+	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
 
 	return 0;
 }
@@ -2949,7 +2965,7 @@
 		}
 	}
 
-	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)!", eof_status);
+	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
 
 	return 0;
 }
@@ -3080,7 +3096,7 @@
 	 * for the first phase, we are going to reset entire HW.
 	 */
 
-	CAM_DBG(CAM_ISP, "Exit (buf_done_status (Error) = %d)!",
+	CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
 		buf_done_status);
 	return rc;
 }
@@ -3212,8 +3228,8 @@
 	mutex_init(&g_ife_hw_mgr.ctx_mutex);
 
 	if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
-		CAM_ERR(CAM_ISP, "Fatal, CSID num is different then IFE num!");
-		goto end;
+		CAM_ERR(CAM_ISP, "CSID num is different then IFE num");
+		return -EINVAL;
 	}
 
 	/* fill ife hw intf information */
@@ -3237,8 +3253,8 @@
 		}
 	}
 	if (j == 0) {
-		CAM_ERR(CAM_ISP, "no valid IFE HW!");
-		goto end;
+		CAM_ERR(CAM_ISP, "no valid IFE HW");
+		return -EINVAL;
 	}
 
 	/* fill csid hw intf information */
@@ -3248,8 +3264,8 @@
 			j++;
 	}
 	if (!j) {
-		CAM_ERR(CAM_ISP, "no valid IFE CSID HW!");
-		goto end;
+		CAM_ERR(CAM_ISP, "no valid IFE CSID HW");
+		return -EINVAL;
 	}
 
 	cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr);
@@ -3267,19 +3283,25 @@
 	 */
 	if (cam_smmu_get_handle("ife",
 		&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
-		CAM_ERR(CAM_ISP, "Can not get iommu handle.");
-		goto end;
+		CAM_ERR(CAM_ISP, "Can not get iommu handle");
+		return -EINVAL;
 	}
 
 	if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
 		CAM_SMMU_ATTACH)) {
 		CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
-		goto end;
+		goto attach_fail;
 	}
 
-	CAM_DBG(CAM_ISP, "got iommu_handle=%d",
-		g_ife_hw_mgr.mgr_common.img_iommu_hdl);
-	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+	if (cam_smmu_get_handle("cam-secure",
+		&g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure)) {
+		CAM_ERR(CAM_ISP, "Failed to get secure iommu handle");
+		goto secure_fail;
+	}
+
+	CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]",
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
 
 	if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
 		CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
@@ -3341,7 +3363,6 @@
 	/* Create Worker for ife_hw_mgr with 10 tasks */
 	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
 			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
-
 	if (rc < 0) {
 		CAM_ERR(CAM_ISP, "Unable to create worker");
 		goto end;
@@ -3359,14 +3380,28 @@
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
-
 	CAM_DBG(CAM_ISP, "Exit");
+
 	return 0;
 end:
 	if (rc) {
-		for (i = 0; i < CAM_CTX_MAX; i++)
+		for (i = 0; i < CAM_CTX_MAX; i++) {
+			cam_tasklet_deinit(
+				&g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
 			kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
+			g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL;
+			g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL;
+		}
 	}
+	cam_smmu_destroy_handle(
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+secure_fail:
+	cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		CAM_SMMU_DETACH);
+attach_fail:
+	cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index 698a4c8..c58578e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -408,6 +408,7 @@
 
 int cam_isp_add_io_buffers(
 	int                                   iommu_hdl,
+	int                                   sec_iommu_hdl,
 	struct cam_hw_prepare_update_args    *prepare,
 	uint32_t                              base_idx,
 	struct cam_kmd_buf_info              *kmd_buf_info,
@@ -425,7 +426,10 @@
 	uint32_t                            i, j, num_out_buf, num_in_buf;
 	uint32_t                            res_id_out, res_id_in, plane_id;
 	uint32_t                            io_cfg_used_bytes, num_ent;
-	size_t size;
+	size_t                              size;
+	int32_t                             hdl;
+	int                                 mmu_hdl;
+	bool                                mode, is_buf_secure;
 
 	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
 			&prepare->packet->payload +
@@ -536,9 +540,31 @@
 				if (!io_cfg[i].mem_handle[plane_id])
 					break;
 
+				hdl = io_cfg[i].mem_handle[plane_id];
+				if (res->process_cmd(res,
+						CAM_VFE_HW_CMD_GET_SECURE_MODE,
+						&mode,
+						sizeof(bool)))
+					return -EINVAL;
+
+				is_buf_secure = cam_mem_is_secure_buf(hdl);
+				if ((mode == CAM_SECURE_MODE_SECURE) &&
+					is_buf_secure) {
+					mmu_hdl = sec_iommu_hdl;
+				} else if (
+					(mode == CAM_SECURE_MODE_NON_SECURE) &&
+					(!is_buf_secure)) {
+					mmu_hdl = iommu_hdl;
+				} else {
+					CAM_ERR_RATE_LIMIT(CAM_ISP,
+						"Invalid hdl: port mode[%u], buf mode[%u]",
+						mode, is_buf_secure);
+					return -EINVAL;
+				}
+
 				rc = cam_mem_get_io_buf(
 					io_cfg[i].mem_handle[plane_id],
-					iommu_hdl, &io_addr[plane_id], &size);
+					mmu_hdl, &io_addr[plane_id], &size);
 				if (rc) {
 					CAM_ERR(CAM_ISP,
 						"no io addr for plane%d",
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
index 187e5bc..24b532e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -69,6 +69,8 @@
  *                         index and update the HW entries list
  *
  * @iommu_hdl:             Iommu handle to get the IO buf from memory manager
+ * @sec_iommu_hdl:         Secure iommu handle to get the IO buf from
+ *                         memory manager
  * @prepare:               Contain the  packet and HW update variables
  * @base_idx:              Base or dev index of the IFE/VFE HW instance
  * @kmd_buf_info:          Kmd buffer to store the change base command
@@ -79,7 +81,9 @@
  * @return:                0 for success
  *                         -EINVAL for Fail
  */
-int cam_isp_add_io_buffers(int	 iommu_hdl,
+int cam_isp_add_io_buffers(
+	int                                   iommu_hdl,
+	int                                   sec_iommu_hdl,
 	struct cam_hw_prepare_update_args    *prepare,
 	uint32_t                              base_idx,
 	struct cam_kmd_buf_info              *kmd_buf_info,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
index 2341b38..dcbea8d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -329,6 +329,111 @@
 	return rc;
 }
 
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	struct cam_irq_evt_handler *evt_handler_temp;
+	unsigned long               flags;
+	unsigned int                i;
+	uint32_t                    irq_mask;
+	uint32_t                    found = 0;
+	int                         rc = -EINVAL;
+
+	if (!controller)
+		return rc;
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_ISP, "enable item %d", handle);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (!found)
+		return rc;
+
+	write_lock_irqsave(&controller->rw_lock, flags);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_register_arr[i].
+		top_half_enable_mask[evt_handler->priority] |=
+			evt_handler->evt_bit_mask_arr[i];
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			controller->irq_register_arr[i].
+			mask_reg_offset);
+		irq_mask |= evt_handler->evt_bit_mask_arr[i];
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+		controller->irq_register_arr[i].mask_reg_offset);
+	}
+	write_unlock_irqrestore(&controller->rw_lock, flags);
+
+	return rc;
+}
+
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	struct cam_irq_evt_handler *evt_handler_temp;
+	unsigned long               flags;
+	unsigned int                i;
+	uint32_t                    irq_mask;
+	uint32_t                    found = 0;
+	int                         rc = -EINVAL;
+
+	if (!controller)
+		return rc;
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_ISP, "disable item %d", handle);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (!found)
+		return rc;
+
+	write_lock_irqsave(&controller->rw_lock, flags);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_register_arr[i].
+		top_half_enable_mask[evt_handler->priority] &=
+			~(evt_handler->evt_bit_mask_arr[i]);
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			controller->irq_register_arr[i].
+			mask_reg_offset);
+		irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+			controller->irq_register_arr[i].
+			mask_reg_offset);
+
+		/* Clear the IRQ bits of this handler */
+		cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+			controller->mem_base +
+			controller->irq_register_arr[i].
+			clear_reg_offset);
+
+		if (controller->global_clear_offset)
+			cam_io_w_mb(
+				controller->global_clear_bitmask,
+				controller->mem_base +
+				controller->global_clear_offset);
+	}
+	write_unlock_irqrestore(&controller->rw_lock, flags);
+
+	return rc;
+}
+
 int cam_irq_controller_unsubscribe_irq(void *irq_controller,
 	uint32_t handle)
 {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
index 1990c51..7e307b5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -214,4 +214,40 @@
  */
 irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv);
 
+/*
+ * cam_irq_controller_disable_irq()
+ *
+ * @brief:              Disable the interrupts on given controller.
+ *                      Unsubscribe will disable the IRQ by default, so this is
+ *                      only needed if between subscribe/unsubscribe there is
+ *                      need to disable IRQ again
+ *
+ * @irq_controller:     Pointer to IRQ Controller that controls the registered
+ *                      events to it.
+ * @handle:             Handle returned on successful subscribe, used to
+ *                      identify the handler object
+ *
+ * @return:             0: events found and disabled
+ *                      Negative: events not registered on this controller
+ */
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle);
+
+/*
+ * cam_irq_controller_enable_irq()
+ *
+ * @brief:              Enable the interrupts on given controller.
+ *                      Subscribe will enable the IRQ by default, so this is
+ *                      only needed if between subscribe/unsubscribe there is
+ *                      need to enable IRQ again
+ *
+ * @irq_controller:     Pointer to IRQ Controller that controls the registered
+ *                      events to it.
+ * @handle:             Handle returned on successful subscribe, used to
+ *                      identify the handler object
+ *
+ * @return:             0: events found and enabled
+ *                      Negative: events not registered on this controller
+ */
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
+
 #endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
index 22c11d3..020599d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -129,8 +129,8 @@
 
 	ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	ahb_vote.vote.level = CAM_SVS_VOTE;
-	axi_vote.compressed_bw = 640000000;
-	axi_vote.uncompressed_bw = 640000000;
+	axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
 
 	CAM_DBG(CAM_ISP, "csid vote compressed_bw:%lld uncompressed_bw:%lld",
 		axi_vote.compressed_bw, axi_vote.uncompressed_bw);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index a08248d..a64379c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -49,6 +49,7 @@
 	CAM_VFE_HW_CMD_GET_BUF_UPDATE,
 	CAM_VFE_HW_CMD_GET_REG_UPDATE,
 	CAM_VFE_HW_CMD_GET_HFR_UPDATE,
+	CAM_VFE_HW_CMD_GET_SECURE_MODE,
 	CAM_VFE_HW_CMD_MAX,
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index b015452..1472e09 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -94,6 +94,9 @@
 	struct cam_vfe_bus_irq_evt_payload          evt_payload[
 		CAM_VFE_BUS_VER2_PAYLOAD_MAX];
 	struct list_head                            free_payload_list;
+	struct mutex                                bus_mutex;
+	uint32_t                                    secure_mode;
+	uint32_t                                    num_sec_out;
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
@@ -168,6 +171,7 @@
 	uint32_t                         max_width;
 	uint32_t                         max_height;
 	struct cam_cdm_utils_ops        *cdm_util_ops;
+	uint32_t                         secure_mode;
 };
 
 struct cam_vfe_bus_ver2_priv {
@@ -184,6 +188,10 @@
 	uint32_t                            irq_handle;
 };
 
+static int cam_vfe_bus_process_cmd(
+	struct cam_isp_resource_node *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
 static int cam_vfe_bus_get_evt_payload(
 	struct cam_vfe_bus_ver2_common_data  *common_data,
 	struct cam_vfe_bus_irq_evt_payload  **evt_payload)
@@ -328,6 +336,34 @@
 	return rc;
 }
 
+static bool cam_vfe_bus_can_be_secure(uint32_t out_type)
+{
+	switch (out_type) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+		return true;
+
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+	default:
+		return false;
+	}
+}
+
 static enum cam_vfe_bus_ver2_vfe_out_type
 	cam_vfe_bus_get_out_res_id(uint32_t res_type)
 {
@@ -720,6 +756,8 @@
 	case CAM_FORMAT_UBWC_TP10:
 	case CAM_FORMAT_TP10:
 		return PACKER_FMT_TP_10;
+	case CAM_FORMAT_ARGB_14:
+		return PACKER_FMT_ARGB_14;
 	default:
 		return PACKER_FMT_MAX;
 	}
@@ -844,7 +882,7 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
@@ -860,7 +898,7 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
@@ -875,7 +913,7 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
@@ -889,12 +927,12 @@
 			case PLANE_Y:
 				break;
 			default:
-				CAM_ERR(CAM_ISP, "Invalid plane %d\n", plane);
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
 				return -EINVAL;
 			}
 			break;
 		default:
-			CAM_ERR(CAM_ISP, "Invalid format %d\n",
+			CAM_ERR(CAM_ISP, "Invalid format %d",
 				rsrc_data->format);
 			return -EINVAL;
 		}
@@ -905,8 +943,13 @@
 		rsrc_data->height = 0;
 		rsrc_data->stride = 1;
 		rsrc_data->en_cfg = 0x3;
-	} else {
-		/* Write master 5-6 DS ports , 9 - Raw dump , 10 PDAF */
+	}  else if (rsrc_data->index == 9) {
+		/* Write master 9 - Raw dump */
+		rsrc_data->width = rsrc_data->width * 2;
+		rsrc_data->stride = rsrc_data->width;
+		rsrc_data->en_cfg = 0x1;
+	}  else {
+		/* Write master 5-6 DS ports, 10 PDAF */
 		rsrc_data->width = rsrc_data->width * 4;
 		rsrc_data->height = rsrc_data->height / 2;
 		rsrc_data->en_cfg = 0x1;
@@ -1061,7 +1104,7 @@
 
 	wm_res = th_payload->handler_priv;
 	if (!wm_res) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error: No resource");
 		return -ENODEV;
 	}
 
@@ -1073,7 +1116,7 @@
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue\n");
+			"No tasklet_cmd is free in queue");
 		return rc;
 	}
 
@@ -1170,10 +1213,8 @@
 
 	rsrc_data = wm_res->res_priv;
 	wm_res->res_priv = NULL;
-	if (!rsrc_data) {
-		CAM_ERR(CAM_ISP, "Error! WM res priv is NULL");
+	if (!rsrc_data)
 		return -ENOMEM;
-	}
 	kfree(rsrc_data);
 
 	return 0;
@@ -1465,7 +1506,7 @@
 
 	comp_grp = th_payload->handler_priv;
 	if (!comp_grp) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! No resource\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
 		return -ENODEV;
 	}
 
@@ -1477,7 +1518,7 @@
 	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
 	if (rc) {
 		CAM_ERR_RATE_LIMIT(CAM_ISP,
-			"No tasklet_cmd is free in queue\n");
+			"No tasklet_cmd is free in queue");
 		return rc;
 	}
 
@@ -1604,7 +1645,7 @@
 		break;
 	default:
 		rc = CAM_VFE_IRQ_STATUS_ERR;
-		CAM_ERR(CAM_ISP, "Error! Invalid comp_grp_type %u",
+		CAM_ERR(CAM_ISP, "Invalid comp_grp_type %u",
 			rsrc_data->comp_grp_type);
 		break;
 	}
@@ -1625,10 +1666,9 @@
 
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
 		GFP_KERNEL);
-	if (!rsrc_data) {
-		CAM_DBG(CAM_ISP, "Failed to alloc for comp_grp_priv");
+	if (!rsrc_data)
 		return -ENOMEM;
-	}
+
 	comp_grp->res_priv = rsrc_data;
 
 	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
@@ -1675,7 +1715,7 @@
 	comp_grp->res_priv = NULL;
 
 	if (!rsrc_data) {
-		CAM_ERR(CAM_ISP, "Error! comp_grp_priv is NULL");
+		CAM_ERR(CAM_ISP, "comp_grp_priv is NULL");
 		return -ENODEV;
 	}
 	kfree(rsrc_data);
@@ -1683,6 +1723,22 @@
 	return 0;
 }
 
+static int cam_vfe_bus_get_secure_mode(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	bool *mode = cmd_args;
+	struct cam_isp_resource_node *res =
+		(struct cam_isp_resource_node *) priv;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data =
+		(struct cam_vfe_bus_ver2_vfe_out_data *)res->res_priv;
+
+	*mode =
+		(rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ?
+		true : false;
+
+	return 0;
+}
+
 static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
 	uint32_t args_size)
 {
@@ -1690,7 +1746,7 @@
 	int                                     i;
 	enum cam_vfe_bus_ver2_vfe_out_type      vfe_out_res_id;
 	uint32_t                                format;
-	uint32_t                                num_wm;
+	int                                     num_wm;
 	uint32_t                                subscribe_irq;
 	uint32_t                                client_done_mask;
 	struct cam_vfe_bus_ver2_priv           *ver2_bus_priv = bus_priv;
@@ -1698,6 +1754,7 @@
 	struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args;
 	struct cam_isp_resource_node           *rsrc_node = NULL;
 	struct cam_vfe_bus_ver2_vfe_out_data   *rsrc_data = NULL;
+	uint32_t                                secure_caps = 0, mode;
 
 	if (!bus_priv || !acquire_args) {
 		CAM_ERR(CAM_ISP, "Invalid Param");
@@ -1727,6 +1784,33 @@
 	}
 
 	rsrc_data = rsrc_node->res_priv;
+	secure_caps = cam_vfe_bus_can_be_secure(
+		rsrc_data->out_type);
+	mode = out_acquire_args->out_port_info->secure_mode;
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (!rsrc_data->common_data->num_sec_out) {
+			rsrc_data->secure_mode = mode;
+			rsrc_data->common_data->secure_mode = mode;
+		} else {
+			if (mode == rsrc_data->common_data->secure_mode) {
+				rsrc_data->secure_mode =
+					rsrc_data->common_data->secure_mode;
+			} else {
+				rc = -EINVAL;
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"Mismatch: Acquire mode[%d], drvr mode[%d]",
+					rsrc_data->common_data->secure_mode,
+					mode);
+				mutex_unlock(
+					&rsrc_data->common_data->bus_mutex);
+				return -EINVAL;
+			}
+		}
+		rsrc_data->common_data->num_sec_out++;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
 	rsrc_data->num_wm = num_wm;
 	rsrc_node->res_id = out_acquire_args->out_port_info->res_type;
 	rsrc_node->tasklet_info = acq_args->tasklet;
@@ -1807,6 +1891,7 @@
 	uint32_t i;
 	struct cam_isp_resource_node          *vfe_out = NULL;
 	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+	uint32_t                               secure_caps = 0;
 
 	if (!bus_priv || !release_args) {
 		CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
@@ -1818,7 +1903,7 @@
 	rsrc_data = vfe_out->res_priv;
 
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
 			vfe_out->res_state);
 	}
 
@@ -1834,6 +1919,32 @@
 	vfe_out->cdm_ops = NULL;
 	rsrc_data->cdm_util_ops = NULL;
 
+	secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type);
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (rsrc_data->secure_mode ==
+			rsrc_data->common_data->secure_mode) {
+			rsrc_data->common_data->num_sec_out--;
+			rsrc_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+		} else {
+			/*
+			 * The validity of the mode is properly
+			 * checked while acquiring the output port.
+			 * not expected to reach here, unless there is
+			 * some corruption.
+			 */
+			CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch",
+				rsrc_data->common_data->secure_mode,
+				rsrc_data->secure_mode);
+		}
+
+		if (!rsrc_data->common_data->num_sec_out)
+			rsrc_data->common_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
 	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
 		vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 
@@ -1858,7 +1969,7 @@
 	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
 
 	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
-		CAM_ERR(CAM_ISP, "Error! Invalid resource state:%d",
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
 			vfe_out->res_state);
 		return -EACCES;
 	}
@@ -1965,7 +2076,6 @@
 	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
 		GFP_KERNEL);
 	if (!rsrc_data) {
-		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe out priv");
 		rc = -ENOMEM;
 		return rc;
 	}
@@ -1981,12 +2091,14 @@
 		ver2_hw_info->vfe_out_hw_info[index].max_width;
 	rsrc_data->max_height  =
 		ver2_hw_info->vfe_out_hw_info[index].max_height;
+	rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
 
 	vfe_out->start = cam_vfe_bus_start_vfe_out;
 	vfe_out->stop = cam_vfe_bus_stop_vfe_out;
 	vfe_out->top_half_handler = cam_vfe_bus_handle_vfe_out_done_top_half;
 	vfe_out->bottom_half_handler =
 		cam_vfe_bus_handle_vfe_out_done_bottom_half;
+	vfe_out->process_cmd = cam_vfe_bus_process_cmd;
 	vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
 
 	return 0;
@@ -2007,10 +2119,8 @@
 	INIT_LIST_HEAD(&vfe_out->list);
 	vfe_out->res_priv = NULL;
 
-	if (!rsrc_data) {
-		CAM_ERR(CAM_ISP, "Error! vfe out priv is NULL");
+	if (!rsrc_data)
 		return -ENOMEM;
-	}
 	kfree(rsrc_data);
 
 	return 0;
@@ -2387,7 +2497,7 @@
 	uint32_t                         top_irq_reg_mask[2] = {0};
 
 	if (!bus_priv) {
-		CAM_ERR(CAM_ISP, "Error! Invalid args");
+		CAM_ERR(CAM_ISP, "Invalid args");
 		return -EINVAL;
 	}
 
@@ -2418,7 +2528,7 @@
 	int                              rc;
 
 	if (!bus_priv || (bus_priv->irq_handle <= 0)) {
-		CAM_ERR(CAM_ISP, "Error! Invalid args");
+		CAM_ERR(CAM_ISP, "Error: Invalid args");
 		return -EINVAL;
 	}
 
@@ -2431,13 +2541,20 @@
 	return rc;
 }
 
-static int cam_vfe_bus_process_cmd(void *priv,
+static int __cam_vfe_bus_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_process_cmd(priv, cmd_type, cmd_args, arg_size);
+}
+
+static int cam_vfe_bus_process_cmd(
+	struct cam_isp_resource_node *priv,
 	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
 {
 	int rc = -EINVAL;
 
 	if (!priv || !cmd_args) {
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error! Invalid input arguments\n");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
 		return -EINVAL;
 	}
 
@@ -2448,8 +2565,11 @@
 	case CAM_VFE_HW_CMD_GET_HFR_UPDATE:
 		rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size);
 		break;
+	case CAM_VFE_HW_CMD_GET_SECURE_MODE:
+		rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
+		break;
 	default:
-		CAM_ERR_RATE_LIMIT(CAM_ISP, "Inval camif process command:%d\n",
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
 			cmd_type);
 		break;
 	}
@@ -2496,18 +2616,21 @@
 	}
 	vfe_bus_local->bus_priv = bus_priv;
 
+	bus_priv->common_data.num_sec_out        = 0;
+	bus_priv->common_data.secure_mode        = CAM_SECURE_MODE_NON_SECURE;
 	bus_priv->common_data.core_index         = soc_info->index;
 	bus_priv->common_data.mem_base           =
 		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
 	bus_priv->common_data.hw_intf            = hw_intf;
 	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
 	bus_priv->common_data.common_reg         = &ver2_hw_info->common_reg;
+	mutex_init(&bus_priv->common_data.bus_mutex);
 
 	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
 		&ver2_hw_info->common_reg.irq_reg_info,
 		&bus_priv->common_data.bus_irq_controller);
 	if (rc) {
-		CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+		CAM_ERR(CAM_ISP, "cam_irq_controller_init failed");
 		goto free_bus_priv;
 	}
 
@@ -2519,7 +2642,7 @@
 		rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
 			&bus_priv->bus_client[i]);
 		if (rc < 0) {
-			CAM_ERR(CAM_ISP, "Error! Init WM failed rc=%d", rc);
+			CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc);
 			goto deinit_wm;
 		}
 	}
@@ -2557,7 +2680,7 @@
 	vfe_bus_local->hw_ops.deinit       = cam_vfe_bus_deinit_hw;
 	vfe_bus_local->top_half_handler    = cam_vfe_bus_ver2_handle_irq;
 	vfe_bus_local->bottom_half_handler = NULL;
-	vfe_bus_local->hw_ops.process_cmd  = cam_vfe_bus_process_cmd;
+	vfe_bus_local->hw_ops.process_cmd  = __cam_vfe_bus_process_cmd;
 
 	*vfe_bus = vfe_bus_local;
 
@@ -2600,14 +2723,14 @@
 	struct cam_vfe_bus              *vfe_bus_local;
 
 	if (!vfe_bus || !*vfe_bus) {
-		CAM_ERR(CAM_ISP, "Error! Invalid input");
+		CAM_ERR(CAM_ISP, "Invalid input");
 		return -EINVAL;
 	}
 	vfe_bus_local = *vfe_bus;
 
 	bus_priv = vfe_bus_local->bus_priv;
 	if (!bus_priv) {
-		CAM_ERR(CAM_ISP, "Error! bus_priv is NULL");
+		CAM_ERR(CAM_ISP, "bus_priv is NULL");
 		rc = -ENODEV;
 		goto free_bus_local;
 	}
@@ -2620,21 +2743,21 @@
 		rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
 		if (rc < 0)
 			CAM_ERR(CAM_ISP,
-				"Error! Deinit WM failed rc=%d", rc);
+				"Deinit WM failed rc=%d", rc);
 	}
 
 	for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
 		rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
 		if (rc < 0)
 			CAM_ERR(CAM_ISP,
-				"Error! Deinit Comp Grp failed rc=%d", rc);
+				"Deinit Comp Grp failed rc=%d", rc);
 	}
 
 	for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
 		rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
 		if (rc < 0)
 			CAM_ERR(CAM_ISP,
-				"Error! Deinit VFE Out failed rc=%d", rc);
+				"Deinit VFE Out failed rc=%d", rc);
 	}
 
 	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
@@ -2645,8 +2768,9 @@
 		&bus_priv->common_data.bus_irq_controller);
 	if (rc)
 		CAM_ERR(CAM_ISP,
-			"Error! Deinit IRQ Controller failed rc=%d", rc);
+			"Deinit IRQ Controller failed rc=%d", rc);
 
+	mutex_destroy(&bus_priv->common_data.bus_mutex);
 	kfree(vfe_bus_local->bus_priv);
 
 free_bus_local:
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index b06b5c4..2cd6b04 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -214,22 +214,22 @@
 
 
 static int cam_jpeg_mgr_release_ctx(
-	struct cam_jpeg_hw_mgr *hw_mgr, int ctx_id)
+	struct cam_jpeg_hw_mgr *hw_mgr, struct cam_jpeg_hw_ctx_data *ctx_data)
 {
-	if (ctx_id >= CAM_JPEG_CTX_MAX) {
-		CAM_ERR(CAM_JPEG, "ctx_id is wrong: %d", ctx_id);
+	if (!ctx_data) {
+		CAM_ERR(CAM_JPEG, "invalid ctx_data %pK", ctx_data);
 		return -EINVAL;
 	}
 
-	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
-	if (!hw_mgr->ctx_data[ctx_id].in_use) {
-		CAM_ERR(CAM_JPEG, "ctx is already in use: %d", ctx_id);
-		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is already un-used: %pK", ctx_data);
+		mutex_unlock(&ctx_data->ctx_mutex);
 		return -EINVAL;
 	}
 
-	hw_mgr->ctx_data[ctx_id].in_use = 0;
-	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	ctx_data->in_use = false;
+	mutex_unlock(&ctx_data->ctx_mutex);
 
 	return 0;
 }
@@ -280,7 +280,8 @@
 		hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
 		list_del_init(&p_cfg_req->list);
 	} else {
-		CAM_ERR(CAM_JPEG, "NOT dequeing, just return");
+		CAM_DBG(CAM_JPEG, "Not dequeing, just return");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		rc = -EFAULT;
 		goto end;
 	}
@@ -289,7 +290,7 @@
 	config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
 	request_id = task_data->request_id;
 	if (request_id != (uint64_t)config_args->priv) {
-		CAM_WARN(CAM_JPEG, "not a recent req %d %d",
+		CAM_DBG(CAM_JPEG, "not a recent req %lld %lld",
 			request_id, (uint64_t)config_args->priv);
 	}
 
@@ -475,8 +476,8 @@
 
 	request_id = (uint64_t)config_args->priv;
 	hw_update_entries = config_args->hw_update_entries;
-	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %d %pK",
-		ctx_data, request_id, config_args->priv);
+	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %lld",
+		ctx_data, request_id, (uint64_t)config_args->priv);
 	task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
 	if (!task) {
 		CAM_ERR(CAM_JPEG, "no empty task");
@@ -578,7 +579,7 @@
 		(void *)packet, (void *)cmd_desc,
 		sizeof(struct cam_cmd_buf_desc));
 
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl);
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl, -1);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
 		return rc;
@@ -631,13 +632,12 @@
 static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
 {
 	int rc;
-	int ctx_id = 0;
 	struct cam_hw_release_args *release_hw = release_hw_args;
 	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
 	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
 	uint32_t dev_type;
 
-	if (!release_hw || !hw_mgr) {
+	if (!hw_mgr || !release_hw || !release_hw->ctxt_to_hw_map) {
 		CAM_ERR(CAM_JPEG, "Invalid args");
 		return -EINVAL;
 	}
@@ -671,7 +671,7 @@
 	}
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
-	rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+	rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data);
 	if (rc) {
 		mutex_unlock(&hw_mgr->hw_mgr_mutex);
 		return -EINVAL;
@@ -838,7 +838,7 @@
 	cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
 acq_cdm_hdl_failed:
 	kfree(ctx_data->cdm_cmd);
-	cam_jpeg_mgr_release_ctx(hw_mgr, ctx_id);
+	cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
index 05c1a95..0a15f71 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
@@ -57,6 +57,13 @@
 		return -EINVAL;
 	}
 
+
+	mutex_lock(&core_info->core_mutex);
+	if (++core_info->ref_count > 1) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
 	cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
@@ -64,15 +71,26 @@
 
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
-	if (rc)
+	if (rc) {
 		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+		goto cpas_failed;
+	}
 
 	rc = cam_jpeg_dma_enable_soc_resources(soc_info);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
-		cam_cpas_stop(core_info->cpas_handle);
+		goto soc_failed;
 	}
 
+	mutex_unlock(&core_info->core_mutex);
+
+	return 0;
+
+soc_failed:
+	cam_cpas_stop(core_info->cpas_handle);
+cpas_failed:
+	--core_info->ref_count;
+	mutex_unlock(&core_info->core_mutex);
 	return rc;
 }
 
@@ -98,6 +116,19 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&core_info->core_mutex);
+	if (--core_info->ref_count > 0) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	if (core_info->ref_count < 0) {
+		CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count);
+		core_info->ref_count = 0;
+		mutex_unlock(&core_info->core_mutex);
+		return -EFAULT;
+	}
+
 	rc = cam_jpeg_dma_disable_soc_resources(soc_info);
 	if (rc)
 		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
@@ -106,6 +137,8 @@
 	if (rc)
 		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
 
+	mutex_unlock(&core_info->core_mutex);
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
index bb4e34a..1e0c2e2 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
@@ -41,6 +41,8 @@
 	struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info;
 	uint32_t cpas_handle;
 	struct cam_jpeg_dma_set_irq_cb irq_cb;
+	int32_t ref_count;
+	struct mutex core_mutex;
 };
 
 int cam_jpeg_dma_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
index 55a344d..ed58b41 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
@@ -99,6 +99,7 @@
 	if (rc)
 		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
 
+	mutex_destroy(&core_info->core_mutex);
 	kfree(core_info);
 
 deinit_soc:
@@ -165,13 +166,14 @@
 	hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data;
 	core_info->jpeg_dma_hw_info = hw_info;
 	core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY;
+	mutex_init(&core_info->core_mutex);
 
 	rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info,
 		cam_jpeg_dma_irq,
 		jpeg_dma_dev);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "%failed to init_soc %d", rc);
-		goto error_match_dev;
+		goto error_init_soc;
 	}
 
 	rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info,
@@ -191,6 +193,8 @@
 
 error_reg_cpas:
 	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+error_init_soc:
+	mutex_destroy(&core_info->core_mutex);
 error_match_dev:
 	kfree(jpeg_dma_dev->core_info);
 error_alloc_core:
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
index efc161b..63d54fd 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
@@ -55,7 +55,7 @@
 {
 	int rc;
 
-	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc)
 		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
index 25405cf..06ad260 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -100,6 +100,13 @@
 		return -EINVAL;
 	}
 
+
+	mutex_lock(&core_info->core_mutex);
+	if (++core_info->ref_count > 1) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
 	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
 	cpas_vote.axi_vote.compressed_bw = JPEG_TURBO_VOTE;
@@ -107,15 +114,26 @@
 
 	rc = cam_cpas_start(core_info->cpas_handle,
 		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
-	if (rc)
+	if (rc) {
 		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+		goto cpas_failed;
+	}
 
 	rc = cam_jpeg_enc_enable_soc_resources(soc_info);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
-		cam_cpas_stop(core_info->cpas_handle);
+		goto soc_failed;
 	}
 
+	mutex_unlock(&core_info->core_mutex);
+
+	return 0;
+
+soc_failed:
+	cam_cpas_stop(core_info->cpas_handle);
+cpas_failed:
+	--core_info->ref_count;
+	mutex_unlock(&core_info->core_mutex);
 	return rc;
 }
 
@@ -141,6 +159,19 @@
 		return -EINVAL;
 	}
 
+	mutex_lock(&core_info->core_mutex);
+	if (--core_info->ref_count > 0) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	if (core_info->ref_count < 0) {
+		CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count);
+		core_info->ref_count = 0;
+		mutex_unlock(&core_info->core_mutex);
+		return -EFAULT;
+	}
+
 	rc = cam_jpeg_enc_disable_soc_resources(soc_info);
 	if (rc)
 		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
@@ -149,6 +180,8 @@
 	if (rc)
 		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
 
+	mutex_unlock(&core_info->core_mutex);
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
index 6ae4cdc..eb5caef 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -45,6 +45,8 @@
 	struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info;
 	uint32_t cpas_handle;
 	struct cam_jpeg_enc_set_irq_cb irq_cb;
+	int32_t ref_count;
+	struct mutex core_mutex;
 };
 
 int cam_jpeg_enc_init_hw(void *device_priv,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
index a8f309a..570d9f9 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -103,6 +103,7 @@
 	if (rc)
 		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
 
+	mutex_destroy(&core_info->core_mutex);
 	kfree(core_info);
 
 deinit_soc:
@@ -171,13 +172,14 @@
 	hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data;
 	core_info->jpeg_enc_hw_info = hw_info;
 	core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	mutex_init(&core_info->core_mutex);
 
 	rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info,
 		cam_jpeg_enc_irq,
 		jpeg_enc_dev);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc);
-		goto error_match_dev;
+		goto error_init_soc;
 	}
 
 	rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info,
@@ -195,6 +197,8 @@
 
 error_reg_cpas:
 	cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+error_init_soc:
+	mutex_destroy(&core_info->core_mutex);
 error_match_dev:
 	kfree(jpeg_enc_dev->core_info);
 error_alloc_core:
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
index 3f450cd..ddf2465 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
@@ -55,7 +55,7 @@
 {
 	int rc;
 
-	rc = cam_soc_util_disable_platform_resource(soc_info, true, false);
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
 	if (rc)
 		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
index c150244..9d454e9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -52,6 +52,8 @@
 		rc = DMA_FROM_DEVICE;
 	else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
 		rc = DMA_BIDIRECTIONAL;
+	else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		rc = DMA_BIDIRECTIONAL;
 
 	return rc;
 }
@@ -211,10 +213,16 @@
 		goto handle_mismatch;
 	}
 
-	rc = cam_smmu_get_iova(mmu_handle,
-		tbl.bufq[idx].fd,
-		iova_ptr,
-		len_ptr);
+	if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
+		rc = cam_smmu_get_stage2_iova(mmu_handle,
+			tbl.bufq[idx].fd,
+			iova_ptr,
+			len_ptr);
+	else
+		rc = cam_smmu_get_iova(mmu_handle,
+			tbl.bufq[idx].fd,
+			iova_ptr,
+			len_ptr);
 	if (rc < 0)
 		CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
 
@@ -376,10 +384,12 @@
 	uint32_t ion_flag = 0;
 	int rc;
 
-	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
 		heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
-	else
+		ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
+	} else {
 		heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
+	}
 
 	if (cmd->flags & CAM_MEM_FLAG_CACHE)
 		ion_flag |= ION_FLAG_CACHED;
@@ -466,10 +476,11 @@
 
 	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
 		for (i = 0; i < num_hdls; i++) {
-			rc = cam_smmu_map_sec_iova(mmu_hdls[i],
+			rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
 				fd,
 				dir,
-				(dma_addr_t *)hw_vaddr,
+				tbl.client,
+				(ion_phys_addr_t *)hw_vaddr,
 				len);
 
 			if (rc < 0) {
@@ -498,7 +509,7 @@
 multi_map_fail:
 	if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
 		for (--i; i > 0; i--)
-			cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+			cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
 	else
 		for (--i; i > 0; i--)
 			cam_smmu_unmap_iova(mmu_hdls[i],
@@ -543,8 +554,9 @@
 		goto slot_fail;
 	}
 
-	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
-		cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+	if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+		(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
 
 		enum cam_smmu_region_id region;
 
@@ -570,6 +582,8 @@
 	tbl.bufq[idx].fd = ion_fd;
 	tbl.bufq[idx].flags = cmd->flags;
 	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
 	tbl.bufq[idx].kmdvaddr = 0;
 
 	if (cmd->num_hdl > 0)
@@ -630,7 +644,8 @@
 		return -EINVAL;
 	}
 
-	if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) {
+	if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
 		rc = cam_mem_util_map_hw_va(cmd->flags,
 			cmd->mmu_hdls,
 			cmd->num_hdl,
@@ -652,6 +667,8 @@
 	tbl.bufq[idx].fd = cmd->fd;
 	tbl.bufq[idx].flags = cmd->flags;
 	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
 	tbl.bufq[idx].kmdvaddr = 0;
 
 	if (cmd->num_hdl > 0)
@@ -699,7 +716,7 @@
 
 	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
 		for (i = 0; i < num_hdls; i++) {
-			rc = cam_smmu_unmap_sec_iova(mmu_hdls[i], fd);
+			rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
 			if (rc < 0)
 				goto unmap_end;
 		}
@@ -741,8 +758,9 @@
 			region = CAM_SMMU_REGION_IO;
 	}
 
-	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE ||
-		tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+	if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+		(tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
 		rc = cam_mem_util_unmap_hw_va(idx, region);
 
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
index 0858b8a..af7962a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -95,4 +95,9 @@
 int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr,
 	size_t *len);
 
+static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
+{
+	return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle);
+}
+
 #endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 96d5b6e..681504d 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -414,7 +414,7 @@
 		}
 	}
 	if (rc < 0) {
-		CAM_ERR(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
 			dev->dev_info.p_delay, apply_req.request_id);
 		/* Apply req failed notify already applied devs */
 		for (; i >= 0; i--) {
@@ -525,7 +525,8 @@
 
 	if (trigger == CAM_TRIGGER_POINT_SOF) {
 		if (link->trigger_mask) {
-			CAM_ERR(CAM_CRM, "Applying for last EOF fails");
+			CAM_ERR_RATE_LIMIT(CAM_CRM,
+				"Applying for last EOF fails");
 			return -EINVAL;
 		}
 		rc = __cam_req_mgr_check_link_is_ready(link, slot->idx);
@@ -542,7 +543,7 @@
 				 * ready, don't expect to enter here.
 				 * @TODO: gracefully handle if recovery fails.
 				 */
-				CAM_ERR(CAM_CRM,
+				CAM_ERR_RATE_LIMIT(CAM_CRM,
 					"FATAL recovery cant finish idx %d status %d",
 					in_q->rd_idx,
 					in_q->slot[in_q->rd_idx].status);
@@ -553,7 +554,7 @@
 	}
 	if (trigger == CAM_TRIGGER_POINT_EOF &&
 			(!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
-		CAM_ERR(CAM_CRM, "Applying for last SOF fails");
+		CAM_DBG(CAM_CRM, "Applying for last SOF fails");
 		return -EINVAL;
 	}
 
@@ -902,7 +903,13 @@
 		CAM_ERR(CAM_CRM, "failed to create link, no mem");
 		return NULL;
 	}
-	in_q = &session->in_q;
+	in_q = (struct cam_req_mgr_req_queue *)
+		kzalloc(sizeof(struct cam_req_mgr_req_queue), GFP_KERNEL);
+	if (!in_q) {
+		CAM_ERR(CAM_CRM, "failed to create input queue, no mem");
+		kfree(link);
+		return NULL;
+	}
 	mutex_init(&link->lock);
 
 	mutex_lock(&link->lock);
@@ -928,7 +935,7 @@
 }
 
 /**
- * __cam_req_mgr_reserve_link()
+ * __cam_req_mgr_unreserve_link()
  *
  * @brief  : Reserves one link data struct within session
  * @session: session identifier
@@ -960,6 +967,8 @@
 		CAM_DBG(CAM_CRM, "Active session links (%d)",
 			session->num_links);
 	}
+	kfree((*link)->req.in_q);
+	(*link)->req.in_q = NULL;
 	kfree(*link);
 	*link = NULL;
 	mutex_unlock(&session->lock);
@@ -1178,7 +1187,7 @@
 		}
 	}
 	if (!tbl) {
-		CAM_ERR(CAM_CRM, "dev_hdl not found %x, %x %x",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "dev_hdl not found %x, %x %x",
 			add_req->dev_hdl,
 			link->l_dev[0].dev_hdl,
 			link->l_dev[1].dev_hdl);
@@ -1267,8 +1276,9 @@
 	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
 		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
 		if (idx < 0) {
-			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
-			err_info->req_id);
+			CAM_ERR_RATE_LIMIT(CAM_CRM,
+				"req_id %lld not found in input queue",
+				err_info->req_id);
 		} else {
 			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
 				err_info->req_id, idx);
@@ -1430,7 +1440,7 @@
 
 	task = cam_req_mgr_workq_get_task(link->workq);
 	if (!task) {
-		CAM_ERR(CAM_CRM, "no empty task dev %x req %lld",
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task dev %x req %lld",
 			add_req->dev_hdl, add_req->req_id);
 		rc = -EBUSY;
 		goto end;
@@ -1902,10 +1912,7 @@
 	cam_destroy_device_hdl(link->link_hdl);
 	link_info->link_hdl = 0;
 link_hdl_fail:
-	mutex_lock(&link->lock);
-	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
-	mutex_unlock(&link->lock);
-
+	__cam_req_mgr_unreserve_link(cam_session, &link);
 	mutex_unlock(&g_crm_core_dev->crm_lock);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 98a2a4f..e45d634 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -315,7 +315,6 @@
 	int32_t                       session_hdl;
 	uint32_t                      num_links;
 	struct cam_req_mgr_core_link *links[MAX_LINKS_PER_SESSION];
-	struct cam_req_mgr_req_queue  in_q;
 	struct list_head              entry;
 	struct mutex                  lock;
 	int32_t                       force_err_recovery;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 7a2bc09..e4bc98f 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -500,6 +500,12 @@
 	g_dev.video = NULL;
 }
 
+void cam_register_subdev_fops(struct v4l2_file_operations *fops)
+{
+	*fops = v4l2_subdev_fops;
+}
+EXPORT_SYMBOL(cam_register_subdev_fops);
+
 int cam_register_subdev(struct cam_subdev *csd)
 {
 	struct v4l2_subdev *sd;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
index a9134fb..1d2169b 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -203,29 +203,29 @@
 
 	spin_lock_bh(&hdl_tbl_lock);
 	if (!hdl_tbl) {
-		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Hdl tbl is NULL");
 		goto device_priv_fail;
 	}
 
 	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
 	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
-		CAM_ERR(CAM_CRM, "Invalid idx");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx");
 		goto device_priv_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
-		CAM_ERR(CAM_CRM, "Invalid state");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state");
 		goto device_priv_fail;
 	}
 
 	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
 	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
-		CAM_ERR(CAM_CRM, "Invalid type");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type");
 		goto device_priv_fail;
 	}
 
 	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
-		CAM_ERR(CAM_CRM, "Invalid hdl");
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl");
 		goto device_priv_fail;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
index 78f2223..8cd3214 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
@@ -82,6 +82,15 @@
 int cam_subdev_remove(struct cam_subdev *sd);
 
 /**
+ * cam_register_subdev_fops()
+ *
+ * @brief:   This common utility function assigns subdev ops
+ *
+ * @fops:    v4l file operations
+ */
+void cam_register_subdev_fops(struct v4l2_file_operations *fops);
+
+/**
  * cam_register_subdev()
  *
  * @brief:   This is the common utility function to be called by each camera
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
index bcf4133..8ffa0ff 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -161,7 +161,7 @@
 	}
 	request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
 
-	trace_cam_apply_req("Actuator", apply);
+	trace_cam_apply_req("Actuator", apply->request_id);
 
 	CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
index e58f737..b58f5d8 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -286,6 +286,7 @@
 	}
 
 	/* Fill platform device id*/
+	a_ctrl->id = a_ctrl->soc_info.index;
 	pdev->id = a_ctrl->id;
 
 	rc = cam_actuator_init_subdev(a_ctrl);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
index 6aa7586..6cfb965 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -30,6 +30,11 @@
 {
 	int32_t rc = 0;
 
+	if (arg == NULL) {
+		CAM_ERR(CAM_CCI, "Invalid Args");
+		return rc;
+	}
+
 	switch (cmd) {
 	case VIDIOC_MSM_CCI_CFG:
 		rc = cam_cci_core_cfg(sd, arg);
@@ -44,6 +49,14 @@
 	return rc;
 }
 
+#ifdef CONFIG_COMPAT
+static long cam_cci_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	return cam_cci_subdev_ioctl(sd, cmd, NULL);
+}
+#endif
+
 irqreturn_t cam_cci_irq(int irq_num, void *data)
 {
 	uint32_t irq;
@@ -162,6 +175,9 @@
 
 static struct v4l2_subdev_core_ops cci_subdev_core_ops = {
 	.ioctl = cam_cci_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cci_subdev_compat_ioctl,
+#endif
 	.interrupt_service_routine = cam_cci_irq_routine,
 };
 
@@ -171,6 +187,34 @@
 
 static const struct v4l2_subdev_internal_ops cci_subdev_intern_ops;
 
+static struct v4l2_file_operations cci_v4l2_subdev_fops;
+
+static long cam_cci_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return cam_cci_subdev_ioctl(sd, cmd, NULL);
+}
+
+static long cam_cci_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, cam_cci_subdev_do_ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cci_subdev_fops_compat_ioctl(struct file *file,
+	unsigned int cmd, unsigned long arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return v4l2_subdev_call(sd, core, ioctl, cmd, NULL);
+}
+#endif
+
 static int cam_cci_platform_probe(struct platform_device *pdev)
 {
 	struct cam_cpas_register_params cpas_parms;
@@ -222,6 +266,13 @@
 	v4l2_set_subdevdata(&new_cci_dev->v4l2_dev_str.sd, new_cci_dev);
 	g_cci_subdev = &new_cci_dev->v4l2_dev_str.sd;
 
+	cam_register_subdev_fops(&cci_v4l2_subdev_fops);
+	cci_v4l2_subdev_fops.unlocked_ioctl = cam_cci_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+	cci_v4l2_subdev_fops.compat_ioctl32 =
+		cam_cci_subdev_fops_compat_ioctl;
+#endif
+
 	cpas_parms.cam_cpas_client_cb = NULL;
 	cpas_parms.cell_index = 0;
 	cpas_parms.dev = &pdev->dev;
@@ -271,6 +322,26 @@
 	},
 };
 
+static int cam_cci_assign_fops(void)
+{
+	struct v4l2_subdev *sd;
+
+	sd = g_cci_subdev;
+	if (!sd || !(sd->devnode)) {
+		CAM_ERR(CAM_CRM,
+			"Invalid args sd node: %pK", sd);
+		return -EINVAL;
+	}
+	sd->devnode->fops = &cci_v4l2_subdev_fops;
+
+	return 0;
+}
+
+static int __init cam_cci_late_init(void)
+{
+	return cam_cci_assign_fops();
+}
+
 static int __init cam_cci_init_module(void)
 {
 	return platform_driver_register(&cci_driver);
@@ -282,6 +353,7 @@
 }
 
 module_init(cam_cci_init_module);
+late_initcall(cam_cci_late_init);
 module_exit(cam_cci_exit_module);
 MODULE_DESCRIPTION("MSM CCI driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
index bdd3e72..1a3f947 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -14,10 +14,32 @@
 #include "cam_csiphy_core.h"
 #include "cam_csiphy_dev.h"
 #include "cam_csiphy_soc.h"
+
+#include <soc/qcom/scm.h>
 #include <cam_mem_mgr.h>
 
 static int cam_csiphy_mem_dmp_param;
 module_param(cam_csiphy_mem_dmp_param, int, 0644);
+#define SCM_SVC_CAMERASS 0x18
+#define SECURE_SYSCALL_ID 0x6
+
+static int cam_csiphy_notify_secure_mode(int phy, bool protect)
+{
+	struct scm_desc desc = {0};
+
+	desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+	desc.args[0] = protect;
+	desc.args[1] = phy;
+
+	CAM_DBG(CAM_CSIPHY, "phy : %d, protect : %d", phy, protect);
+	if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID),
+		&desc)) {
+		CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
 
 void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
 	struct cam_csiphy_query_cap *csiphy_cap)
@@ -70,17 +92,10 @@
 		return -EINVAL;
 	}
 
-	csiphy_dev->csiphy_info =
-		kzalloc(sizeof(struct cam_csiphy_info), GFP_KERNEL);
-	if (!csiphy_dev->csiphy_info)
-		return -ENOMEM;
-
 	rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
 		(uint64_t *)&generic_ptr, &len);
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
-		kfree(csiphy_dev->csiphy_info);
-		csiphy_dev->csiphy_info = NULL;
 		return rc;
 	}
 
@@ -88,8 +103,6 @@
 		CAM_ERR(CAM_CSIPHY,
 			"offset is out of bounds: offset: %lld len: %zu",
 			cfg_dev->offset, len);
-		kfree(csiphy_dev->csiphy_info);
-		csiphy_dev->csiphy_info = NULL;
 		return -EINVAL;
 	}
 
@@ -104,8 +117,6 @@
 	if (rc < 0) {
 		CAM_ERR(CAM_CSIPHY,
 			"Failed to get cmd buf Mem address : %d", rc);
-		kfree(csiphy_dev->csiphy_info);
-		csiphy_dev->csiphy_info = NULL;
 		return rc;
 	}
 
@@ -113,13 +124,26 @@
 	cmd_buf += cmd_desc->offset / 4;
 	cam_cmd_csiphy_info = (struct cam_csiphy_info *)cmd_buf;
 
-	csiphy_dev->csiphy_info->lane_cnt = cam_cmd_csiphy_info->lane_cnt;
-	csiphy_dev->csiphy_info->lane_mask = cam_cmd_csiphy_info->lane_mask;
-	csiphy_dev->csiphy_info->csiphy_3phase =
+	csiphy_dev->config_count++;
+	csiphy_dev->csiphy_info.lane_cnt += cam_cmd_csiphy_info->lane_cnt;
+	csiphy_dev->csiphy_info.lane_mask |= cam_cmd_csiphy_info->lane_mask;
+	csiphy_dev->csiphy_info.csiphy_3phase =
 		cam_cmd_csiphy_info->csiphy_3phase;
-	csiphy_dev->csiphy_info->combo_mode = cam_cmd_csiphy_info->combo_mode;
-	csiphy_dev->csiphy_info->settle_time = cam_cmd_csiphy_info->settle_time;
-	csiphy_dev->csiphy_info->data_rate = cam_cmd_csiphy_info->data_rate;
+	csiphy_dev->csiphy_info.combo_mode |= cam_cmd_csiphy_info->combo_mode;
+	if (cam_cmd_csiphy_info->combo_mode == 1)
+		csiphy_dev->csiphy_info.settle_time_combo_sensor =
+			cam_cmd_csiphy_info->settle_time;
+	else
+		csiphy_dev->csiphy_info.settle_time =
+			cam_cmd_csiphy_info->settle_time;
+	csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
+	csiphy_dev->csiphy_info.secure_mode = cam_cmd_csiphy_info->secure_mode;
+
+	if (csiphy_dev->csiphy_info.secure_mode &&
+		(csiphy_dev->config_count == 1))
+		rc = cam_csiphy_notify_secure_mode(
+			csiphy_dev->soc_info.index,
+			CAM_SECURE_MODE_SECURE);
 
 	return rc;
 }
@@ -205,14 +229,8 @@
 	void __iomem *csiphybase;
 	struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
 
-	if (csiphy_dev->csiphy_info == NULL) {
-		CAM_ERR(CAM_CSIPHY, "csiphy_info is NULL, No/Fail CONFIG_DEV?");
-		return -EINVAL;
-	}
-
-	lane_cnt = csiphy_dev->csiphy_info->lane_cnt;
-	lane_mask = csiphy_dev->csiphy_info->lane_mask & 0x1f;
-	settle_cnt = (csiphy_dev->csiphy_info->settle_time / 200000000);
+	lane_cnt = csiphy_dev->csiphy_info.lane_cnt;
+	lane_mask = csiphy_dev->csiphy_info.lane_mask & 0x1f;
 	csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
 
 	if (!csiphybase) {
@@ -231,8 +249,8 @@
 		mask <<= 1;
 	}
 
-	if (!csiphy_dev->csiphy_info->csiphy_3phase) {
-		if (csiphy_dev->csiphy_info->combo_mode == 1)
+	if (!csiphy_dev->csiphy_info.csiphy_3phase) {
+		if (csiphy_dev->csiphy_info.combo_mode == 1)
 			reg_array =
 				csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg;
 		else
@@ -242,7 +260,7 @@
 		cfg_size = csiphy_dev->ctrl_reg->csiphy_reg.
 			csiphy_2ph_config_array_size;
 	} else {
-		if (csiphy_dev->csiphy_info->combo_mode == 1)
+		if (csiphy_dev->csiphy_info.combo_mode == 1)
 			reg_array =
 				csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg;
 		else
@@ -283,6 +301,12 @@
 			continue;
 		}
 
+		settle_cnt = (csiphy_dev->csiphy_info.settle_time / 200000000);
+		if (csiphy_dev->csiphy_info.combo_mode == 1 &&
+			(lane_pos >= 3))
+			settle_cnt =
+				(csiphy_dev->csiphy_info.
+				settle_time_combo_sensor / 200000000);
 		for (i = 0; i < cfg_size; i++) {
 			switch (reg_array[lane_pos][i].csiphy_param_type) {
 			case CSIPHY_LANE_ENABLE:
@@ -353,6 +377,14 @@
 
 		csiphy_acq_params.combo_mode = 0;
 
+		if (copy_from_user(&csiphy_acq_params,
+			(void __user *)csiphy_acq_dev.info_handle,
+			sizeof(csiphy_acq_params))) {
+			CAM_ERR(CAM_CSIPHY,
+				"Failed copying from User");
+			goto release_mutex;
+		}
+
 		if (csiphy_dev->acquire_count == 2) {
 			CAM_ERR(CAM_CSIPHY,
 					"CSIPHY device do not allow more than 2 acquires");
@@ -360,6 +392,27 @@
 			goto release_mutex;
 		}
 
+		if ((csiphy_acq_params.combo_mode == 1) &&
+			(csiphy_dev->is_acquired_dev_combo_mode == 1)) {
+			CAM_ERR(CAM_CSIPHY,
+				"Multiple Combo Acq are not allowed: cm: %d, acm: %d",
+				csiphy_acq_params.combo_mode,
+				csiphy_dev->is_acquired_dev_combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if ((csiphy_acq_params.combo_mode != 1) &&
+			(csiphy_dev->is_acquired_dev_combo_mode != 1) &&
+			(csiphy_dev->acquire_count == 1)) {
+			CAM_ERR(CAM_CSIPHY,
+				"Multiple Acquires are not allowed cm: %d acm: %d",
+				csiphy_acq_params.combo_mode,
+				csiphy_dev->is_acquired_dev_combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
 		bridge_params.ops = NULL;
 		bridge_params.session_hdl = csiphy_acq_dev.session_handle;
 		bridge_params.v4l2_sub_dev_flag = 0;
@@ -384,7 +437,9 @@
 		}
 		if (csiphy_acq_params.combo_mode == 1)
 			csiphy_dev->is_acquired_dev_combo_mode = 1;
+
 		csiphy_dev->acquire_count++;
+		csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
 	}
 		break;
 	case CAM_QUERY_CAP: {
@@ -400,6 +455,19 @@
 	}
 		break;
 	case CAM_STOP_DEV: {
+		if (csiphy_dev->csiphy_state !=
+			CAM_CSIPHY_START) {
+			CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
+				csiphy_dev->csiphy_state);
+			goto release_mutex;
+		}
+
+		if (--csiphy_dev->start_dev_count) {
+			CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
+				csiphy_dev->start_dev_count);
+			goto release_mutex;
+		}
+
 		rc = cam_csiphy_disable_hw(csiphy_dev);
 		if (rc < 0) {
 			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
@@ -411,6 +479,8 @@
 			CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
 			goto release_mutex;
 		}
+		csiphy_dev->csiphy_info.combo_mode = 0;
+		csiphy_dev->csiphy_state = CAM_CSIPHY_STOP;
 	}
 		break;
 	case CAM_RELEASE_DEV: {
@@ -441,8 +511,22 @@
 			csiphy_dev->bridge_intf.link_hdl[1] = -1;
 			csiphy_dev->bridge_intf.
 				session_hdl[1] = -1;
+			csiphy_dev->is_acquired_dev_combo_mode = 0;
 		}
+
+		csiphy_dev->config_count--;
 		csiphy_dev->acquire_count--;
+		if (csiphy_dev->acquire_count == 0)
+			csiphy_dev->csiphy_state = CAM_CSIPHY_RELEASE;
+
+		if (csiphy_dev->csiphy_info.secure_mode &&
+			(!csiphy_dev->config_count)) {
+			csiphy_dev->csiphy_info.secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+			rc = cam_csiphy_notify_secure_mode(
+				csiphy_dev->soc_info.index,
+				CAM_SECURE_MODE_NON_SECURE);
+		}
 	}
 		break;
 	case CAM_CONFIG_DEV: {
@@ -464,6 +548,11 @@
 		struct cam_ahb_vote ahb_vote;
 		struct cam_axi_vote axi_vote;
 
+		csiphy_dev->start_dev_count++;
+
+		if (csiphy_dev->csiphy_state == CAM_CSIPHY_START)
+			goto release_mutex;
+
 		ahb_vote.type = CAM_VOTE_ABSOLUTE;
 		ahb_vote.vote.level = CAM_SVS_VOTE;
 		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
@@ -491,6 +580,7 @@
 			cam_cpas_stop(csiphy_dev->cpas_handle);
 			goto release_mutex;
 		}
+		csiphy_dev->csiphy_state = CAM_CSIPHY_START;
 	}
 		break;
 	case CAM_SD_SHUTDOWN:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
index 6e74344..a136fa7 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -157,6 +157,7 @@
 		NULL;
 
 	new_csiphy_dev->acquire_count = 0;
+	new_csiphy_dev->start_dev_count = 0;
 	new_csiphy_dev->is_acquired_dev_combo_mode = 0;
 
 	cpas_parms.cam_cpas_client_cb = NULL;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
index 8ed5ba4..25891c5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -67,6 +67,13 @@
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
 #endif
 
+enum cam_csiphy_state {
+	CAM_CSIPHY_ACQUIRE,
+	CAM_CSIPHY_RELEASE,
+	CAM_CSIPHY_START,
+	CAM_CSIPHY_STOP,
+};
+
 /**
  * struct csiphy_reg_parms_t
  * @mipi_csiphy_glbl_irq_cmd_addr: CSIPhy irq addr
@@ -150,6 +157,32 @@
 };
 
 /**
+ * cam_csiphy_param: Provides cmdbuffer structre
+ * @lane_mask     :  Lane mask details
+ * @lane_assign   :  Lane sensor will be using
+ * @csiphy_3phase :  Mentions DPHY or CPHY
+ * @combo_mode    :  Info regarding combo_mode is enable / disable
+ * @lane_cnt      :  Total number of lanes
+ * @reserved
+ * @3phase        :  Details whether 3Phase / 2Phase operation
+ * @settle_time   :  Settling time in ms
+ * @settle_time_combo_sensor   :  Settling time in ms
+ * @data_rate     :  Data rate in mbps
+ *
+ */
+struct cam_csiphy_param {
+	uint16_t    lane_mask;
+	uint16_t    lane_assign;
+	uint8_t     csiphy_3phase;
+	uint8_t     combo_mode;
+	uint8_t     lane_cnt;
+	uint8_t     secure_mode;
+	uint64_t    settle_time;
+	uint64_t    settle_time_combo_sensor;
+	uint64_t    data_rate;
+};
+
+/**
  * struct csiphy_device
  * @pdev: Platform device
  * @irq: Interrupt structure
@@ -171,6 +204,7 @@
  * @ref_count: Reference count
  * @clk_lane: Clock lane
  * @acquire_count: Acquire device count
+ * @start_dev_count: Start count
  * @is_acquired_dev_combo_mode:
  *    Flag that mentions whether already acquired
  *   device is for combo mode
@@ -178,7 +212,7 @@
 struct csiphy_device {
 	struct mutex mutex;
 	uint32_t hw_version;
-	uint32_t csiphy_state;
+	enum cam_csiphy_state csiphy_state;
 	struct csiphy_ctrl_t *ctrl_reg;
 	uint32_t csiphy_max_clk;
 	struct msm_cam_clk_info csiphy_3p_clk_info[2];
@@ -190,14 +224,16 @@
 	uint8_t is_csiphy_3phase_hw;
 	uint8_t num_irq_registers;
 	struct cam_subdev v4l2_dev_str;
-	struct cam_csiphy_info *csiphy_info;
+	struct cam_csiphy_param csiphy_info;
 	struct intf_params bridge_intf;
 	uint32_t clk_lane;
 	uint32_t acquire_count;
+	uint32_t start_dev_count;
 	char device_name[20];
 	uint32_t is_acquired_dev_combo_mode;
 	struct cam_hw_soc_info   soc_info;
 	uint32_t cpas_handle;
+	uint32_t config_count;
 };
 
 #endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
index 02b2c51..6eab59a 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -243,7 +243,8 @@
 	struct camera_io_master *client = &e_ctrl->io_master_info;
 	uint8_t                  id[2];
 
-	rc = cam_spi_query_id(client, 0, &id[0], 2);
+	rc = cam_spi_query_id(client, 0, CAMERA_SENSOR_I2C_TYPE_WORD,
+		&id[0], 2);
 	if (rc)
 		return rc;
 	CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
@@ -310,6 +311,8 @@
 data_mem_free:
 	kfree(e_ctrl->cal_data.mapdata);
 	kfree(e_ctrl->cal_data.map);
+	e_ctrl->cal_data.num_data = 0;
+	e_ctrl->cal_data.num_map = 0;
 	return rc;
 }
 
@@ -483,7 +486,7 @@
 	uint16_t                        cmd_length_in_bytes = 0;
 	struct cam_cmd_i2c_info        *i2c_info = NULL;
 	int                             num_map = -1;
-	struct cam_eeprom_memory_map_t *map;
+	struct cam_eeprom_memory_map_t *map = NULL;
 	struct cam_eeprom_soc_private  *soc_private =
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
@@ -664,53 +667,60 @@
 	switch (csl_packet->header.op_code & 0xFFFFFF) {
 	case CAM_EEPROM_PACKET_OPCODE_INIT:
 		if (e_ctrl->userspace_probe == false) {
-			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
-			CAM_ERR(CAM_EEPROM,
-				"Eeprom already probed at kernel boot");
-			rc = -EINVAL;
-		break;
-		}
-		if (e_ctrl->cal_data.num_data == 0) {
-			rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
-			if (rc) {
-				CAM_ERR(CAM_EEPROM,
-					"Failed in parsing the pkt");
+			rc = cam_eeprom_parse_read_memory_map(
+					e_ctrl->pdev->dev.of_node, e_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc);
 				return rc;
 			}
-
-			e_ctrl->cal_data.mapdata =
-				kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
-			if (!e_ctrl->cal_data.mapdata) {
-				rc = -ENOMEM;
-				CAM_ERR(CAM_EEPROM, "failed");
-				goto error;
-			}
-
-			rc = cam_eeprom_power_up(e_ctrl,
-				&soc_private->power_info);
-			if (rc) {
-				CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
-				goto memdata_free;
-			}
-
-			rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
-			if (rc) {
-				CAM_ERR(CAM_EEPROM,
-					"read_eeprom_memory failed");
-				goto power_down;
-			}
-
 			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
-			rc = cam_eeprom_power_down(e_ctrl);
-		} else {
-			CAM_DBG(CAM_EEPROM, "Already read eeprom");
+			kfree(e_ctrl->cal_data.mapdata);
+			kfree(e_ctrl->cal_data.map);
+			e_ctrl->cal_data.num_data = 0;
+			e_ctrl->cal_data.num_map = 0;
+			CAM_DBG(CAM_EEPROM,
+				"Returning the data using kernel probe");
+		break;
 		}
+		rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed in parsing the pkt");
+			return rc;
+		}
+
+		e_ctrl->cal_data.mapdata =
+			kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+		if (!e_ctrl->cal_data.mapdata) {
+			rc = -ENOMEM;
+			CAM_ERR(CAM_EEPROM, "failed");
+			goto error;
+		}
+
+		rc = cam_eeprom_power_up(e_ctrl,
+			&soc_private->power_info);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+			goto memdata_free;
+		}
+
+		rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM,
+				"read_eeprom_memory failed");
+			goto power_down;
+		}
+
+		rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+		rc = cam_eeprom_power_down(e_ctrl);
+		kfree(e_ctrl->cal_data.mapdata);
+		kfree(e_ctrl->cal_data.map);
+		e_ctrl->cal_data.num_data = 0;
+		e_ctrl->cal_data.num_map = 0;
 		break;
 	default:
 		break;
 	}
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
 	return rc;
 power_down:
 	rc = cam_eeprom_power_down(e_ctrl);
@@ -718,6 +728,8 @@
 	kfree(e_ctrl->cal_data.mapdata);
 error:
 	kfree(e_ctrl->cal_data.map);
+	e_ctrl->cal_data.num_data = 0;
+	e_ctrl->cal_data.num_map = 0;
 	return rc;
 }
 
@@ -764,6 +776,23 @@
 			goto release_mutex;
 		}
 		break;
+	case CAM_RELEASE_DEV:
+		if (e_ctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_EEPROM,
+				"Invalid Handles: link hdl: %d device hdl: %d",
+				e_ctrl->bridge_intf.device_hdl,
+				e_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_EEPROM,
+				"failed in destroying the device hdl");
+		e_ctrl->bridge_intf.device_hdl = -1;
+		e_ctrl->bridge_intf.link_hdl = -1;
+		e_ctrl->bridge_intf.session_hdl = -1;
+		break;
 	case CAM_CONFIG_DEV:
 		rc = cam_eeprom_pkt_parse(e_ctrl, arg);
 		if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
index 9bbc6df..88d7665 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -152,6 +152,12 @@
 		goto probe_failure;
 	}
 
+	soc_private = kzalloc(sizeof(*soc_private), GFP_KERNEL);
+	if (!soc_private)
+		goto ectrl_free;
+
+	e_ctrl->soc_info.soc_private = soc_private;
+
 	i2c_set_clientdata(client, e_ctrl);
 
 	mutex_init(&(e_ctrl->eeprom_mutex));
@@ -175,15 +181,6 @@
 		goto free_soc;
 	}
 
-	if (e_ctrl->userspace_probe == false) {
-		rc = cam_eeprom_parse_read_memory_map(soc_info->dev->of_node,
-			e_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
-			goto free_soc;
-		}
-	}
-
 	soc_private = (struct cam_eeprom_soc_private *)(id->driver_data);
 	if (!soc_private) {
 		CAM_ERR(CAM_EEPROM, "board info NULL");
@@ -242,8 +239,6 @@
 		return -EINVAL;
 	}
 
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
 	if (soc_private) {
 		kfree(soc_private->power_info.gpio_num_info);
 		kfree(soc_private);
@@ -307,13 +302,10 @@
 		goto board_free;
 	}
 
-	if (e_ctrl->userspace_probe == false) {
-		rc = cam_eeprom_parse_read_memory_map(soc_info->dev->of_node,
-			e_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
-			goto board_free;
-		}
+	rc = cam_eeprom_spi_parse_of(spi_client);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "Device tree parsing error");
+		goto board_free;
 	}
 
 	rc = cam_eeprom_init_subdev(e_ctrl);
@@ -369,8 +361,6 @@
 	}
 
 	kfree(e_ctrl->io_master_info.spi_client);
-	kfree(e_ctrl->cal_data.mapdata);
-	kfree(e_ctrl->cal_data.map);
 	soc_private =
 		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
 	if (soc_private) {
@@ -428,15 +418,6 @@
 		goto free_soc;
 	}
 
-	if (e_ctrl->userspace_probe == false) {
-		rc = cam_eeprom_parse_read_memory_map(pdev->dev.of_node,
-			e_ctrl);
-		if (rc) {
-			CAM_ERR(CAM_EEPROM, "failed: read mem map rc %d", rc);
-			goto free_soc;
-		}
-	}
-
 	rc = cam_eeprom_init_subdev(e_ctrl);
 	if (rc)
 		goto free_soc;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
index 5e12f4a..70c40fd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -20,6 +20,96 @@
 #include "cam_eeprom_soc.h"
 #include "cam_debug_util.h"
 
+#define cam_eeprom_spi_parse_cmd(spi_dev, name, out)          \
+	{                                                     \
+		spi_dev->cmd_tbl.name.opcode = out[0];        \
+		spi_dev->cmd_tbl.name.addr_len = out[1];      \
+		spi_dev->cmd_tbl.name.dummy_len = out[2];     \
+		spi_dev->cmd_tbl.name.delay_intv = out[3];    \
+		spi_dev->cmd_tbl.name.delay_count = out[4];   \
+	}
+
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *spi_dev)
+{
+	int rc = -EFAULT;
+	uint32_t tmp[5];
+
+	rc  = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-read", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get read data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-readseq", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read_seq, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get readseq data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-queryid", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, query_id, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get queryid data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-pprog", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, page_program, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get page program data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-wenable", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, write_enable, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get write enable data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-readst", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read_status, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get readdst data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-erase", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, erase, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get erase data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"eeprom-id", tmp, 2);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "Failed to get eeprom id");
+		return rc;
+	}
+
+	spi_dev->mfr_id0 = tmp[0];
+	spi_dev->device_id0 = tmp[1];
+
+	return 0;
+}
+
 /*
  * cam_eeprom_parse_memory_map() - parse memory map in device node
  * @of:         device node
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
index 08c436c..d311549 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -14,6 +14,8 @@
 
 #include "cam_eeprom_dev.h"
 
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *client);
+
 int cam_eeprom_parse_dt_memory_map(struct device_node *of,
 	struct cam_eeprom_memory_block_t *data);
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index 0aaf8b0c..f455715 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -334,7 +334,8 @@
 		flash_data = &fctrl->per_frame[frame_offset];
 
 		if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
-			(flash_data->cmn_attr.is_settings_valid)) {
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
 			/* Turn On Flash */
 			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
 				rc = cam_flash_high(fctrl, flash_data);
@@ -348,7 +349,8 @@
 			}
 		} else if ((flash_data->opcode ==
 			CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
-			(flash_data->cmn_attr.is_settings_valid)) {
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
 			/* Turn On Torch */
 			if (fctrl->flash_state == CAM_FLASH_STATE_INIT) {
 				rc = cam_flash_low(fctrl, flash_data);
@@ -361,7 +363,8 @@
 				fctrl->flash_state = CAM_FLASH_STATE_LOW;
 			}
 		} else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
-			(flash_data->cmn_attr.is_settings_valid)) {
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
 			if ((fctrl->flash_state != CAM_FLASH_STATE_RELEASE) ||
 				(fctrl->flash_state != CAM_FLASH_STATE_INIT)) {
 				rc = cam_flash_off(fctrl);
@@ -374,10 +377,7 @@
 				}
 			}
 		} else {
-			CAM_ERR(CAM_FLASH, "Wrong opcode : %d",
-				flash_data->opcode);
-			rc = -EINVAL;
-			goto apply_setting_err;
+			CAM_DBG(CAM_FLASH, "NOP opcode: req_id: %u", req_id);
 		}
 	}
 
@@ -470,6 +470,19 @@
 			csl_packet->cmd_buf_offset);
 		frame_offset = csl_packet->header.request_id %
 			MAX_PER_FRAME_ARRAY;
+		if (fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid
+			== true) {
+			fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+			fctrl->per_frame[frame_offset].
+				cmn_attr.is_settings_valid = false;
+			for (i = 0;
+			i < fctrl->per_frame[frame_offset].cmn_attr.count;
+			i++) {
+				fctrl->per_frame[frame_offset].
+					led_current_ma[i] = 0;
+			}
+		}
+
 		fctrl->per_frame[frame_offset].cmn_attr.request_id =
 			csl_packet->header.request_id;
 		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
@@ -479,6 +492,10 @@
 			(uint64_t *)&generic_ptr, &len_of_buffer);
 		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
 			cmd_desc->offset);
+
+		if (!cmd_buf)
+			return -EINVAL;
+
 		cmn_hdr = (struct common_header *)cmd_buf;
 
 		switch (cmn_hdr->cmd_type) {
@@ -487,6 +504,11 @@
 				"CAMERA_FLASH_CMD_TYPE_OPS case called");
 			flash_operation_info =
 				(struct cam_flash_set_on_off *) cmd_buf;
+			if (!flash_operation_info) {
+				CAM_ERR(CAM_FLASH, "flash_operation_info Null");
+				return -EINVAL;
+			}
+
 			fctrl->per_frame[frame_offset].opcode =
 				flash_operation_info->opcode;
 			fctrl->per_frame[frame_offset].cmn_attr.count =
@@ -502,7 +524,6 @@
 				cmn_hdr->cmd_type);
 			return -EINVAL;
 		}
-
 		break;
 	}
 	case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
@@ -596,6 +617,8 @@
 		break;
 	}
 	case CAM_PKT_NOP_OPCODE: {
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+			csl_packet->header.request_id);
 		goto update_req_mgr;
 	}
 	default:
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
index 7040ba2..d743cf0 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -93,7 +93,7 @@
 		break;
 	}
 	case CAM_QUERY_CAP: {
-		struct cam_flash_query_cap_info flash_cap;
+		struct cam_flash_query_cap_info flash_cap = {0};
 
 		CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP");
 		flash_cap.slot_info = fctrl->soc_info.index;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index edbb335..f0c1bca 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -813,29 +813,27 @@
 					return rc;
 				}
 			}
-			del_req_id = (req_id +
-				MAX_PER_FRAME_ARRAY -
-				MAX_SYSTEM_PIPELINE_DELAY) %
-				MAX_PER_FRAME_ARRAY;
-			CAM_DBG(CAM_SENSOR, "Deleting the Request: %d",
-				del_req_id);
-			if (req_id >
-				s_ctrl->i2c_data.per_frame[del_req_id].
-				request_id) {
-				s_ctrl->i2c_data.per_frame[del_req_id].
-					request_id = 0;
-				rc = delete_request(
-					&(s_ctrl->i2c_data.
-					per_frame[del_req_id]));
-				if (rc < 0)
-					CAM_ERR(CAM_SENSOR,
-						"Delete request Fail:%d rc:%d",
-						del_req_id, rc);
-			}
 		} else {
 			CAM_DBG(CAM_SENSOR,
 				"Invalid/NOP request to apply: %lld", req_id);
 		}
+
+		del_req_id = (req_id + MAX_PER_FRAME_ARRAY -
+			MAX_SYSTEM_PIPELINE_DELAY) % MAX_PER_FRAME_ARRAY;
+		CAM_DBG(CAM_SENSOR, "Deleting the Request: %d", del_req_id);
+
+		if ((req_id >
+			 s_ctrl->i2c_data.per_frame[del_req_id].request_id) &&
+			(s_ctrl->i2c_data.per_frame[del_req_id].
+				is_settings_valid == 1)) {
+			s_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
+			rc = delete_request(
+				&(s_ctrl->i2c_data.per_frame[del_req_id]));
+			if (rc < 0)
+				CAM_ERR(CAM_SENSOR,
+					"Delete request Fail:%d rc:%d",
+					del_req_id, rc);
+		}
 	}
 	return rc;
 }
@@ -855,7 +853,7 @@
 		return -EINVAL;
 	}
 	CAM_DBG(CAM_SENSOR, " Req Id: %lld", apply->request_id);
-	trace_cam_apply_req("Sensor", apply);
+	trace_cam_apply_req("Sensor", apply->request_id);
 	rc = cam_sensor_apply_settings(s_ctrl, apply->request_id);
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
index 3257703..2c1f520 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -38,7 +38,7 @@
 	rc = v4l2_subdev_call(cci_client->cci_subdev,
 		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
 	if (rc < 0) {
-		CAM_ERR(CAM_SENSOR, "line %d rc = %d", rc);
+		CAM_ERR(CAM_SENSOR, "rc = %d", rc);
 		return rc;
 	}
 
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index c9cf088..6e169cf 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -58,7 +58,7 @@
 			addr, data, addr_type, data_type);
 	} else if (io_master_info->master_type == SPI_MASTER) {
 		return cam_spi_read(io_master_info,
-			addr, data, addr_type);
+			addr, data, addr_type, data_type);
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
@@ -78,8 +78,8 @@
 		return cam_qup_i2c_read_seq(io_master_info->client,
 			addr, data, addr_type, num_bytes);
 	} else if (io_master_info->master_type == SPI_MASTER) {
-		return cam_spi_read(io_master_info,
-			addr, (uint32_t *)data, addr_type);
+		return cam_spi_read_seq(io_master_info,
+			addr, data, addr_type, num_bytes);
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
@@ -153,6 +153,9 @@
 			cam_cci_get_subdev();
 		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
 			MSM_CCI_INIT);
+	} else if ((io_master_info->master_type == I2C_MASTER) ||
+			(io_master_info->master_type == SPI_MASTER)) {
+		return 0;
 	} else {
 		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
 			io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
index 4011aa0..131b0ae 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -59,25 +59,27 @@
  * instruction and the data length.
  */
 static void cam_set_addr(uint32_t addr, uint8_t addr_len,
-	enum camera_sensor_i2c_type type,
+	enum camera_sensor_i2c_type addr_type,
 	char *str)
 {
-	int i, len;
-
 	if (!addr_len)
 		return;
 
-	if (addr_len < type)
-		CAM_DBG(CAM_EEPROM, "omitting higher bits in address");
-
-	/* only support transfer MSB first for now */
-	len = addr_len - type;
-	for (i = len; i < addr_len; i++) {
-		if (i >= 0)
-			str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
-				& 0xFF;
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		str[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		str[0] = addr >> 8;
+		str[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		str[0] = addr >> 16;
+		str[1] = addr >> 8;
+		str[2] = addr;
+	} else {
+		str[0] = addr >> 24;
+		str[1] = addr >> 16;
+		str[2] = addr >> 8;
+		str[3] = addr;
 	}
-
 }
 
 /**
@@ -105,6 +107,7 @@
  */
 static int32_t cam_spi_tx_helper(struct camera_io_master *client,
 	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	uint32_t num_byte, char *tx, char *rx)
 {
 	int32_t rc = -EINVAL;
@@ -112,10 +115,6 @@
 	char *ctx = NULL, *crx = NULL;
 	uint32_t len, hlen;
 	uint8_t retries = client->spi_client->retries;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
-
-	if (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
-		return rc;
 
 	hlen = cam_camera_spi_get_hlen(inst);
 	len = hlen + num_byte;
@@ -166,6 +165,7 @@
 
 static int32_t cam_spi_tx_read(struct camera_io_master *client,
 	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	uint32_t num_byte, char *tx, char *rx)
 {
 	int32_t rc = -EINVAL;
@@ -173,12 +173,6 @@
 	char *ctx = NULL, *crx = NULL;
 	uint32_t hlen;
 	uint8_t retries = client->spi_client->retries;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
-
-	if ((addr_type != CAMERA_SENSOR_I2C_TYPE_WORD)
-		&& (addr_type != CAMERA_SENSOR_I2C_TYPE_BYTE)
-		&& (addr_type != CAMERA_SENSOR_I2C_TYPE_3B))
-		return rc;
 
 	hlen = cam_camera_spi_get_hlen(inst);
 	if (tx) {
@@ -204,14 +198,8 @@
 	}
 
 	ctx[0] = inst->opcode;
-	if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
-		cam_set_addr(addr, inst->addr_len, addr_type,
-			ctx + 1);
-	} else {
-		ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
-		ctx[2] = (addr & 0xFF);
-		ctx[3] = 0;
-	}
+	cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+
 	CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
 		ctx[1], ctx[2],	ctx[3]);
 	while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
@@ -235,18 +223,23 @@
 
 int cam_spi_read(struct camera_io_master *client,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type)
 {
 	int rc = -EINVAL;
 	uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
 
-	if ((data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
-		|| (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verification");
 		return rc;
+	}
 
 	rc = cam_spi_tx_read(client,
 		&client->spi_client->cmd_tbl.read, addr, &temp[0],
-		data_type, NULL, NULL);
+		addr_type, data_type, NULL, NULL);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "failed %d", rc);
 		return rc;
@@ -254,23 +247,50 @@
 
 	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
 		*data = temp[0];
-	else
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
 		*data = (temp[0] << BITS_PER_BYTE) | temp[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = (temp[0] << 16 | temp[1] << 8 | temp[2]);
+	else
+		*data = (temp[0] << 24 | temp[1] << 16 | temp[2] << 8 |
+			temp[3]);
 
 	CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u", addr, *data);
 	return rc;
 }
 
+int32_t cam_spi_read_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
+		return -EINVAL;
+	}
+
+	if (num_bytes == 0) {
+		CAM_ERR(CAM_SENSOR, "num_byte: 0x%x", num_bytes);
+		return -EINVAL;
+	}
+
+	return cam_spi_tx_helper(client,
+		&client->spi_client->cmd_tbl.read_seq, addr, data,
+		addr_type, num_bytes, NULL, NULL);
+}
+
 int cam_spi_query_id(struct camera_io_master *client,
-	uint32_t addr, uint8_t *data, uint32_t num_byte)
+	uint32_t addr, enum camera_sensor_i2c_type addr_type,
+	uint8_t *data, uint32_t num_byte)
 {
 	return cam_spi_tx_helper(client,
-		&client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
-		NULL, NULL);
+		&client->spi_client->cmd_tbl.query_id,
+		addr, data, addr_type, num_byte, NULL, NULL);
 }
 
 static int32_t cam_spi_read_status_reg(
-	struct camera_io_master *client, uint8_t *status)
+	struct camera_io_master *client, uint8_t *status,
+	enum camera_sensor_i2c_type addr_type)
 {
 	struct cam_camera_spi_inst *rs =
 		&client->spi_client->cmd_tbl.read_status;
@@ -279,16 +299,17 @@
 		CAM_ERR(CAM_SENSOR, "not implemented yet");
 		return -ENXIO;
 	}
-	return cam_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+	return cam_spi_tx_helper(client, rs, 0, status,
+		addr_type, 1, NULL, NULL);
 }
 
 static int32_t cam_spi_device_busy(struct camera_io_master *client,
-	uint8_t *busy)
+	uint8_t *busy, enum camera_sensor_i2c_type addr_type)
 {
 	int rc;
 	uint8_t st = 0;
 
-	rc = cam_spi_read_status_reg(client,  &st);
+	rc = cam_spi_read_status_reg(client, &st, addr_type);
 	if (rc < 0) {
 		CAM_ERR(CAM_SENSOR, "failed to read status reg");
 		return rc;
@@ -298,14 +319,15 @@
 }
 
 static int32_t cam_spi_wait(struct camera_io_master *client,
-	struct cam_camera_spi_inst *inst)
+	struct cam_camera_spi_inst *inst,
+	enum camera_sensor_i2c_type addr_type)
 {
 	uint8_t busy;
 	int i, rc;
 
 	CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
 	for (i = 0; i < inst->delay_count; i++) {
-		rc = cam_spi_device_busy(client, &busy);
+		rc = cam_spi_device_busy(client, &busy, addr_type);
 		if (rc < 0)
 			return rc;
 		if (!busy)
@@ -321,8 +343,8 @@
 	return 0;
 }
 
-static int32_t cam_spi_write_enable(
-	struct camera_io_master *client)
+static int32_t cam_spi_write_enable(struct camera_io_master *client,
+	enum camera_sensor_i2c_type addr_type)
 {
 	struct cam_camera_spi_inst *we =
 		&client->spi_client->cmd_tbl.write_enable;
@@ -334,7 +356,8 @@
 		CAM_ERR(CAM_SENSOR, "not implemented yet");
 		return -EINVAL;
 	}
-	rc = cam_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+	rc = cam_spi_tx_helper(client, we, 0, NULL, addr_type,
+		0, NULL, NULL);
 	if (rc < 0)
 		CAM_ERR(CAM_SENSOR, "write enable failed");
 	return rc;
@@ -354,7 +377,9 @@
  * used outside cam_spi_write_seq().
  */
 static int32_t cam_spi_page_program(struct camera_io_master *client,
-	uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint16_t len, uint8_t *tx)
 {
 	int rc;
 	struct cam_camera_spi_inst *pg =
@@ -362,10 +387,9 @@
 	struct spi_device *spi = client->spi_client->spi_master;
 	uint8_t retries = client->spi_client->retries;
 	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
 
 	CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
-	rc = cam_spi_write_enable(client);
+	rc = cam_spi_write_enable(client, addr_type);
 	if (rc < 0)
 		return rc;
 	memset(tx, 0, header_len);
@@ -375,7 +399,7 @@
 	CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x",
 		len, tx[0], tx[1], tx[2], tx[3]);
 	while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
-		rc = cam_spi_wait(client, pg);
+		rc = cam_spi_wait(client, pg, addr_type);
 		msleep(client->spi_client->retry_delay);
 		retries--;
 	}
@@ -383,41 +407,54 @@
 		CAM_ERR(CAM_SENSOR, "failed %d", rc);
 		return rc;
 	}
-	rc = cam_spi_wait(client, pg);
+	rc = cam_spi_wait(client, pg, addr_type);
 		return rc;
 }
 
 int cam_spi_write(struct camera_io_master *client,
-	uint32_t addr, uint16_t data,
+	uint32_t addr, uint32_t data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type)
 {
 	struct cam_camera_spi_inst *pg =
 		&client->spi_client->cmd_tbl.page_program;
 	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
 	uint16_t len = 0;
-	char buf[2];
+	char buf[CAMERA_SENSOR_I2C_TYPE_MAX];
 	char *tx;
 	int rc = -EINVAL;
-	enum camera_sensor_i2c_type addr_type = CAMERA_SENSOR_I2C_TYPE_WORD;
 
-	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
-		|| (data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
-		&& data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (data_type != CAMERA_SENSOR_I2C_TYPE_MAX))
 		return rc;
+
 	CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
 	len = header_len + (uint8_t)data_type;
 	tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
 	if (!tx)
 		goto NOMEM;
+
 	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
 		buf[0] = data;
 		CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
 	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
 		buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
 		buf[1] = (data & 0x00FF);
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = (data >> 16) & 0x00FF;
+		buf[1] = (data >> 8) & 0x00FF;
+		buf[2] = (data & 0x00FF);
+	} else {
+		buf[0] = (data >> 24) & 0x00FF;
+		buf[1] = (data >> 16) & 0x00FF;
+		buf[2] = (data >> 8) & 0x00FF;
+		buf[3] = (data & 0x00FF);
 	}
+
 	rc = cam_spi_page_program(client, addr, buf,
-		(uint16_t)data_type, tx);
+		addr_type, data_type, tx);
 	if (rc < 0)
 		goto ERROR;
 	goto OUT;
@@ -442,18 +479,22 @@
 
 	if (!client || !write_setting)
 		return rc;
-	if (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
-		|| (write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_BYTE
-		&& write_setting->data_type != CAMERA_SENSOR_I2C_TYPE_WORD))
+
+	if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
 		return rc;
+
 	reg_setting = write_setting->reg_setting;
-	client_addr_type = addr_type;
+	client_addr_type = write_setting->addr_type;
 	addr_type = write_setting->addr_type;
 	for (i = 0; i < write_setting->size; i++) {
 		CAM_DBG(CAM_SENSOR, "addr %x data %x",
 			reg_setting->reg_addr, reg_setting->reg_data);
-		rc = cam_spi_write(client, reg_setting->reg_addr,
-			reg_setting->reg_data, write_setting->data_type);
+		rc = cam_spi_write(client,
+			reg_setting->reg_addr, reg_setting->reg_data,
+			write_setting->addr_type, write_setting->data_type);
 		if (rc < 0)
 			break;
 		reg_setting++;
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
index a497491..ec1bede 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -78,13 +78,22 @@
 
 int cam_spi_read(struct camera_io_master *client,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type);
 
+int cam_spi_read_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	int32_t num_bytes);
+
 int cam_spi_query_id(struct camera_io_master *client,
-	uint32_t addr, uint8_t *data, uint32_t num_byte);
+	uint32_t addr,
+	enum camera_sensor_i2c_type addr_type,
+	uint8_t *data, uint32_t num_byte);
 
 int cam_spi_write(struct camera_io_master *client,
-	uint32_t addr, uint16_t data,
+	uint32_t addr, uint32_t data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type);
 
 int cam_spi_write_table(struct camera_io_master *client,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
index f5df775..bcdaf6d 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -217,9 +217,19 @@
 		sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
 		sizeof(struct cam_cmd_read) *
 		(cam_cmd_i2c_continuous_wr->header.count));
-	i2c_list->op_code = cam_cmd_i2c_continuous_wr->header.op_code;
+	if (cam_cmd_i2c_continuous_wr->header.op_code ==
+		CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
+		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
+	else if (cam_cmd_i2c_continuous_wr->header.op_code ==
+		CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN)
+		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_SEQ;
+	else
+		return -EINVAL;
+
 	i2c_list->i2c_settings.addr_type =
 		cam_cmd_i2c_continuous_wr->header.addr_type;
+	i2c_list->i2c_settings.data_type =
+		cam_cmd_i2c_continuous_wr->header.data_type;
 
 	for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
 		cnt++) {
@@ -388,7 +398,7 @@
 	uint16_t power_setting_size)
 {
 	int32_t rc = 0, j = 0, i = 0;
-	uint32_t num_vreg;
+	int num_vreg;
 
 	/* Validate input parameters */
 	if (!soc_info || !power_setting) {
@@ -399,7 +409,7 @@
 
 	num_vreg = soc_info->num_rgltr;
 
-	if (num_vreg <= 0) {
+	if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
 		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
 		return -EINVAL;
 	}
@@ -1210,8 +1220,8 @@
 	gpio_num_info = ctrl->gpio_num_info;
 	num_vreg = soc_info->num_rgltr;
 
-	if ((num_vreg == 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
-		CAM_ERR(CAM_SENSOR, "Regulators are not initialized");
+	if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
 		return -EINVAL;
 	}
 
@@ -1556,6 +1566,11 @@
 	gpio_num_info = ctrl->gpio_num_info;
 	num_vreg = soc_info->num_rgltr;
 
+	if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+		return -EINVAL;
+	}
+
 	for (index = 0; index < ctrl->power_down_setting_size; index++) {
 		CAM_DBG(CAM_SENSOR, "index %d",  index);
 		pd = &ctrl->power_down_setting[index];
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index ff7a0e5..cbf54f7 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -21,7 +21,8 @@
 #include <linux/msm_dma_iommu_mapping.h>
 #include <linux/workqueue.h>
 #include <linux/genalloc.h>
-
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
 #include "cam_smmu_api.h"
 #include "cam_debug_util.h"
 
@@ -118,6 +119,7 @@
 		int, void*);
 	void *token[CAM_SMMU_CB_MAX];
 	int cb_count;
+	int secure_count;
 };
 
 struct cam_iommu_cb_set {
@@ -151,6 +153,17 @@
 	size_t phys_len;
 };
 
+struct cam_sec_buff_info {
+	struct ion_handle *i_hdl;
+	struct ion_client *i_client;
+	enum dma_data_direction dir;
+	int ref_count;
+	dma_addr_t paddr;
+	struct list_head list;
+	int ion_fd;
+	size_t len;
+};
+
 static struct cam_iommu_cb_set iommu_cb_set;
 
 static enum dma_data_direction cam_smmu_translate_dir(
@@ -166,6 +179,9 @@
 static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
 	int ion_fd);
 
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+	int ion_fd);
+
 static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
 	dma_addr_t base, size_t size,
 	int order);
@@ -544,7 +560,15 @@
 					"Error: %s already got handle 0x%x",
 					name,
 					iommu_cb_set.cb_info[i].handle);
+
+				if (iommu_cb_set.cb_info[i].is_secure)
+					iommu_cb_set.cb_info[i].secure_count++;
+
 				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+				if (iommu_cb_set.cb_info[i].is_secure) {
+					*hdl = iommu_cb_set.cb_info[i].handle;
+					return 0;
+				}
 				return -EINVAL;
 			}
 
@@ -556,6 +580,8 @@
 			/* put handle in the table */
 			iommu_cb_set.cb_info[i].handle = handle;
 			iommu_cb_set.cb_info[i].cb_count = 0;
+			if (iommu_cb_set.cb_info[i].is_secure)
+				iommu_cb_set.cb_info[i].secure_count++;
 			*hdl = handle;
 			CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
 				name, handle);
@@ -693,6 +719,24 @@
 
 	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
 		ion_fd, idx);
+
+	return NULL;
+}
+
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+	int ion_fd)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+		list) {
+		if (mapping->ion_fd == ion_fd) {
+			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
+			return mapping;
+		}
+	}
+	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
+		ion_fd, idx);
 	return NULL;
 }
 
@@ -1339,6 +1383,25 @@
 	return CAM_SMMU_BUFF_NOT_EXIST;
 }
 
+static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
+					int ion_fd, dma_addr_t *paddr_ptr,
+					size_t *len_ptr)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
 int cam_smmu_get_handle(char *identifier, int *handle_ptr)
 {
 	int ret = 0;
@@ -1722,14 +1785,200 @@
 	return rc;
 }
 
-int cam_smmu_map_sec_iova(int handle, int ion_fd,
-	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
-	size_t *len_ptr)
+static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
+		 enum dma_data_direction dma_dir, struct ion_client *client,
+		 dma_addr_t *paddr_ptr,
+		 size_t *len_ptr)
 {
-	/* not implemented yet */
-	return -EPERM;
+	int rc = 0;
+	struct ion_handle *i_handle = NULL;
+	struct cam_sec_buff_info *mapping_info;
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	i_handle = ion_import_dma_buf_fd(client, ion_fd);
+	if (IS_ERR_OR_NULL((void *)(i_handle))) {
+		CAM_ERR(CAM_SMMU, "ion import dma buffer failed");
+		return -EINVAL;
+	}
+
+	/* return addr and len to client */
+	rc = ion_phys(client, i_handle, paddr_ptr, len_ptr);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "ION Get Physical failed, rc: %d",
+			rc);
+		return rc;
+	}
+
+	/* fill up mapping_info */
+	mapping_info = kzalloc(sizeof(struct cam_sec_buff_info), GFP_KERNEL);
+	if (!mapping_info)
+		return -ENOMEM;
+
+	mapping_info->ion_fd = ion_fd;
+	mapping_info->paddr = *paddr_ptr;
+	mapping_info->len = *len_ptr;
+	mapping_info->dir = dma_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->i_hdl = i_handle;
+	mapping_info->i_client = client;
+
+	CAM_DBG(CAM_SMMU, "ion_fd = %d, dev = %pK, paddr= %pK, len = %u",
+			ion_fd,
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+	/* add to the list */
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_map_sec_iova);
+
+int cam_smmu_map_stage2_iova(int handle,
+		int ion_fd, enum cam_smmu_map_dir dir,
+		struct ion_client *client, ion_phys_addr_t *paddr_ptr,
+		size_t *len_ptr)
+{
+	int idx, rc;
+	enum dma_data_direction dma_dir;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid inputs, paddr_ptr:%pK, len_ptr: %pK",
+			paddr_ptr, len_ptr);
+		return -EINVAL;
+	}
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	if (dma_dir == DMA_NONE) {
+		CAM_ERR(CAM_SMMU,
+			"Error: translate direction failed. dir = %d", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if ((handle == HANDLE_INIT) ||
+		(idx < 0) ||
+		(idx >= iommu_cb_set.cb_num)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map secure mem to non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, paddr_ptr,
+			len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CAM_DBG(CAM_SMMU, "fd:%d already in list, give same addr back",
+			ion_fd);
+		rc = 0;
+		goto get_addr_end;
+	}
+	rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+			client, paddr_ptr, len_ptr);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_stage2_iova);
+
+static int cam_smmu_secure_unmap_buf_and_remove_from_list(
+		struct cam_sec_buff_info *mapping_info,
+		int idx)
+{
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU, "Error: List doesn't exist");
+		return -EINVAL;
+	}
+	ion_free(mapping_info->i_client, mapping_info->i_hdl);
+	list_del_init(&mapping_info->list);
+
+	CAM_DBG(CAM_SMMU, "unmap fd: %d, idx : %d", mapping_info->ion_fd, idx);
+
+	/* free one buffer */
+	kfree(mapping_info);
+	return 0;
+}
+
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
+{
+	int idx, rc;
+	struct cam_sec_buff_info *mapping_info;
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if ((handle == HANDLE_INIT) ||
+		(idx < 0) ||
+		(idx >= iommu_cb_set.cb_num)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap secure mem from non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd);
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params! idx = %d, fd = %d",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* unmapping one buffer from device */
+	rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+		goto put_addr_end;
+	}
+
+put_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova);
 
 int cam_smmu_map_iova(int handle, int ion_fd,
 	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
@@ -1767,6 +2016,12 @@
 		return -EINVAL;
 	}
 
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map non-secure mem to secure cb");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
@@ -1786,9 +2041,7 @@
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
 		len_ptr);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
-		CAM_ERR(CAM_SMMU,
-			"ion_fd:%d already in the list, give same addr back",
-			 ion_fd);
+		CAM_ERR(CAM_SMMU, "ion_fd:%d already in the list", ion_fd);
 		rc = -EALREADY;
 		goto get_addr_end;
 	}
@@ -1832,6 +2085,12 @@
 		return -EINVAL;
 	}
 
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't get non-secure mem from secure cb");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU,
@@ -1854,12 +2113,65 @@
 }
 EXPORT_SYMBOL(cam_smmu_get_iova);
 
-int cam_smmu_unmap_sec_iova(int handle, int ion_fd)
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr)
 {
-	/* not implemented yet */
-	return -EPERM;
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't get secure mem from non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_secure_fd_in_list(idx,
+		ion_fd,
+		paddr_ptr,
+		len_ptr);
+
+	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
 }
-EXPORT_SYMBOL(cam_smmu_unmap_sec_iova);
+EXPORT_SYMBOL(cam_smmu_get_stage2_iova);
 
 int cam_smmu_unmap_iova(int handle,
 	int ion_fd,
@@ -1882,6 +2194,12 @@
 		return -EINVAL;
 	}
 
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap non-secure mem from secure cb");
+		return -EINVAL;
+	}
+
 	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
 	if (iommu_cb_set.cb_info[idx].handle != handle) {
 		CAM_ERR(CAM_SMMU,
@@ -1989,6 +2307,22 @@
 		cam_smmu_clean_buffer_list(idx);
 	}
 
+	if (&iommu_cb_set.cb_info[idx].is_secure) {
+		if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -EPERM;
+		}
+
+		iommu_cb_set.cb_info[idx].secure_count--;
+		if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+			iommu_cb_set.cb_info[idx].cb_count = 0;
+			iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+		}
+
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return 0;
+	}
+
 	iommu_cb_set.cb_info[idx].cb_count = 0;
 	iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
@@ -2168,7 +2502,15 @@
 	}
 
 	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+	cb->is_secure = of_property_read_bool(of_node, "qcom,secure-cb");
+
+	/*
+	 * We always expect a memory map node, except when it is a secure
+	 * context bank.
+	 */
 	if (!mem_map_node) {
+		if (cb->is_secure)
+			return 0;
 		CAM_ERR(CAM_SMMU, "iova-mem-map not present");
 		return -EINVAL;
 	}
@@ -2292,6 +2634,12 @@
 		return rc;
 	}
 
+	if (cb->is_secure) {
+		/* increment count to next bank */
+		iommu_cb_set.cb_init_count++;
+		return 0;
+	}
+
 	/* set up the iommu mapping for the  context bank */
 	if (type == CAM_QSMMU) {
 		CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
index 20445f3..4cb6efb 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -204,6 +204,19 @@
  */
 int cam_smmu_get_iova(int handle, int ion_fd,
 	dma_addr_t *paddr_ptr, size_t *len_ptr);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the secure context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr);
 /**
  * @brief Unmaps memory from context bank
  *
@@ -217,27 +230,28 @@
 /**
  * @brief Maps secure memory for SMMU handle
  *
- * @param handle: SMMU handle identifying context bank
+ * @param handle: SMMU handle identifying secure context bank
  * @param ion_fd: ION fd to map securely
  * @param dir: DMA Direction for the mapping
+ * @param client: Ion client passed by caller
  * @param dma_addr: Returned IOVA address after mapping
  * @param len_ptr: Length of memory mapped
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_map_sec_iova(int handle,
-	int ion_fd, enum cam_smmu_map_dir dir,
-	dma_addr_t *dma_addr, size_t *len_ptr);
+int cam_smmu_map_stage2_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir, struct ion_client *client,
+	ion_phys_addr_t *dma_addr, size_t *len_ptr);
 
 /**
  * @brief Unmaps secure memopry for SMMU handle
  *
- * @param handle: SMMU handle identifying context bank
+ * @param handle: SMMU handle identifying secure context bank
  * @param ion_fd: ION fd to unmap
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_unmap_sec_iova(int handle, int ion_fd);
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
 
 
 /**
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
index 4323358..44d5d48 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -124,7 +124,7 @@
 }
 
 int cam_packet_util_process_patches(struct cam_packet *packet,
-	int32_t iommu_hdl)
+	int32_t iommu_hdl, int32_t sec_mmu_hdl)
 {
 	struct cam_patch_desc *patch_desc = NULL;
 	uint64_t   iova_addr;
@@ -136,6 +136,7 @@
 	size_t     src_buf_size;
 	int        i;
 	int        rc = 0;
+	int32_t    hdl;
 
 	/* process patch descriptor */
 	patch_desc = (struct cam_patch_desc *)
@@ -146,8 +147,11 @@
 			sizeof(struct cam_patch_desc));
 
 	for (i = 0; i < packet->num_patches; i++) {
+
+		hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
+			sec_mmu_hdl : iommu_hdl;
 		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
-			iommu_hdl, &iova_addr, &src_buf_size);
+			hdl, &iova_addr, &src_buf_size);
 		if (rc < 0) {
 			CAM_ERR(CAM_UTIL, "unable to get src buf address");
 			return rc;
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
index 2fa7585..323a75a 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -83,12 +83,14 @@
  *
  * @packet:             Input packet containing Command Buffers and Patches
  * @iommu_hdl:          IOMMU handle of the HW Device that received the packet
+ * @sec_iommu_hdl:      Secure IOMMU handle of the HW Device that
+ *                      received the packet
  *
  * @return:             0: Success
  *                      Negative: Failure
  */
 int cam_packet_util_process_patches(struct cam_packet *packet,
-	int32_t iommu_hdl);
+	int32_t iommu_hdl, int32_t sec_mmu_hdl);
 
 /**
  * cam_packet_util_process_generic_cmd_buffer()
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
index 2e9e61f..f4f85e4 100644
--- a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -92,15 +92,15 @@
 );
 
 TRACE_EVENT(cam_apply_req,
-	TP_PROTO(const char *entity, struct cam_req_mgr_apply_request *req),
-	TP_ARGS(entity, req),
+	TP_PROTO(const char *entity, uint64_t req_id),
+	TP_ARGS(entity, req_id),
 	TP_STRUCT__entry(
 		__string(entity, entity)
 		__field(uint64_t, req_id)
 	),
 	TP_fast_assign(
 		__assign_str(entity, entity);
-		__entry->req_id = req->request_id;
+		__entry->req_id = req_id;
 	),
 	TP_printk(
 		"%8s: ApplyRequest request=%llu",
@@ -217,6 +217,75 @@
 			__entry->devicemap, __entry->link, __entry->session
 	)
 );
+
+TRACE_EVENT(cam_submit_to_hw,
+	TP_PROTO(const char *entity, uint64_t req_id),
+	TP_ARGS(entity, req_id),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint64_t, req_id)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->req_id = req_id;
+	),
+	TP_printk(
+		"%8s: submit request=%llu",
+			__get_str(entity), __entry->req_id
+	)
+);
+
+TRACE_EVENT(cam_irq_activated,
+	TP_PROTO(const char *entity, uint32_t irq_type),
+	TP_ARGS(entity, irq_type),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint32_t, irq_type)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->irq_type = irq_type;
+	),
+	TP_printk(
+		"%8s: got irq type=%d",
+			__get_str(entity), __entry->irq_type
+	)
+);
+
+TRACE_EVENT(cam_irq_handled,
+	TP_PROTO(const char *entity, uint32_t irq_type),
+	TP_ARGS(entity, irq_type),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint32_t, irq_type)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->irq_type = irq_type;
+	),
+	TP_printk(
+		"%8s: handled irq type=%d",
+			__get_str(entity), __entry->irq_type
+	)
+);
+
+TRACE_EVENT(cam_cdm_cb,
+	TP_PROTO(const char *entity, uint32_t status),
+	TP_ARGS(entity, status),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint32_t, status)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->status = status;
+	),
+	TP_printk(
+		"%8s: cdm cb status=%d",
+			__get_str(entity), __entry->status
+	)
+);
+
 #endif /* _CAM_TRACE_H */
 
 /* This part must be outside protection */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 03d4840..d749384 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -65,6 +65,9 @@
 #define ROT_OVERHEAD_NUMERATOR		27
 #define ROT_OVERHEAD_DENOMINATOR	10000
 
+/* Minimum Rotator Clock value */
+#define ROT_MIN_ROT_CLK			20000000
+
 /* default minimum bandwidth vote */
 #define ROT_ENABLE_BW_VOTE		64000
 /*
@@ -1378,6 +1381,9 @@
 	if (rot_dev->min_rot_clk > perf->clk_rate)
 		perf->clk_rate = rot_dev->min_rot_clk;
 
+	if (mgr->min_rot_clk > perf->clk_rate)
+		perf->clk_rate = mgr->min_rot_clk;
+
 	read_bw =  sde_rotator_calc_buf_bw(in_fmt, config->input.width,
 				config->input.height, max_fps);
 
@@ -2383,7 +2389,7 @@
 	}
 
 	SDEROT_DBG(
-		"reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u fps:%d clk:%lu, bw:%llu\n",
+		"reconfig session id=%u in{%u,%u}f:%x out{%u,%u}f:%x fps:%d clk:%lu bw:%llu\n",
 		config->session_id, config->input.width, config->input.height,
 		config->input.format, config->output.width,
 		config->output.height, config->output.format,
@@ -3084,6 +3090,7 @@
 		IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
 			SDE_MDP_HW_REV_410)) {
 		mgr->ops_hw_init = sde_rotator_r3_init;
+		mgr->min_rot_clk = ROT_MIN_ROT_CLK;
 	} else {
 		ret = -ENODEV;
 		SDEROT_ERR("unsupported sde version %x\n",
@@ -3398,3 +3405,38 @@
 
 	return sde_rotator_config_session(mgr, private, config);
 }
+
+/*
+ * sde_rotator_session_validate - validate session
+ */
+int sde_rotator_session_validate(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private,
+	struct sde_rotation_config *config)
+{
+	int ret;
+
+	if (!mgr || !private || !config) {
+		SDEROT_ERR("null parameters\n");
+		return -EINVAL;
+	}
+
+	SDEROT_DBG(
+		"validate session id=%u in{%u,%u}f:%x out{%u,%u}f:%x fps:%d\n",
+		config->session_id, config->input.width, config->input.height,
+		config->input.format, config->output.width,
+		config->output.height, config->output.format,
+		config->frame_rate);
+
+	ret = sde_rotator_verify_config_all(mgr, config);
+	if (ret) {
+		SDEROT_WARN("rotator verify format failed %d\n", ret);
+		return ret;
+	}
+
+	if (config->output.sbuf && mgr->sbuf_ctx != private && mgr->sbuf_ctx) {
+		SDEROT_WARN("too many sbuf sessions\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index eb8cb88..e2f5465 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -396,6 +396,7 @@
  * @pixel_per_clk: rotator hardware performance in pixel for clock
  * @fudge_factor: fudge factor for clock calculation
  * @overhead: software overhead for offline rotation in msec
+ * @min_rot_clk: minimum rotator clock rate
  * @sbuf_ctx: pointer to sbuf session context
  * @ops_xxx: function pointers of rotator HAL layer
  * @hw_data: private handle of rotator HAL layer
@@ -443,6 +444,7 @@
 	struct sde_mult_factor pixel_per_clk;
 	struct sde_mult_factor fudge_factor;
 	struct sde_mult_factor overhead;
+	unsigned long min_rot_clk;
 
 	struct sde_rot_file_private *sbuf_ctx;
 
@@ -602,6 +604,17 @@
 	struct sde_rotation_config *config);
 
 /*
+ * sde_rotator_session_validate - validate session configuration
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to per file session
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_session_validate(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private,
+	struct sde_rotation_config *config);
+
+/*
  * sde_rotator_req_init - allocate a new request and initialzie with given
  *	array of rotation items
  * @rot_dev: Pointer to rotator device
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 4ad960d8..76c9367 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1460,9 +1460,10 @@
 	struct sde_rotator_device *rot_dev;
 	struct sde_rotator_request *request = NULL;
 	struct sde_rot_entry_container *req = NULL;
+	struct sde_rotation_config rotcfg;
 	ktime_t *ts;
 	u32 flags = 0;
-	int i, ret;
+	int i, ret = 0;
 
 	if (!handle || !cmd) {
 		SDEROT_ERR("invalid rotator handle/cmd\n");
@@ -1478,7 +1479,7 @@
 	}
 
 	SDEROT_DBG(
-		"s:%d.%u src:(%u,%u,%u,%u)/%ux%u/%c%c%c%c dst:(%u,%u,%u,%u)/%c%c%c%c r:%d f:%d/%d s:%d fps:%u clk:%llu bw:%llu wb:%d vid:%d cmd:%d\n",
+		"s:%d.%u src:(%u,%u,%u,%u)/%ux%u/%c%c%c%c dst:(%u,%u,%u,%u)/%c%c%c%c r:%d f:%d/%d s:%d fps:%u clk:%llu bw:%llu prefill:%llu wb:%d vid:%d cmd:%d\n",
 		ctx->session_id, cmd->sequence_id,
 		cmd->src_rect_x, cmd->src_rect_y,
 		cmd->src_rect_w, cmd->src_rect_h,
@@ -1490,7 +1491,7 @@
 		cmd->dst_pixfmt >> 0, cmd->dst_pixfmt >> 8,
 		cmd->dst_pixfmt >> 16, cmd->dst_pixfmt >> 24,
 		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
-		cmd->clkrate, cmd->data_bw,
+		cmd->clkrate, cmd->data_bw, cmd->prefill_bw,
 		cmd->dst_writeback, cmd->video_mode, cmd_type);
 	SDEROT_EVTLOG(ctx->session_id, cmd->sequence_id,
 		cmd->src_rect_x, cmd->src_rect_y,
@@ -1498,11 +1499,11 @@
 		cmd->src_pixfmt,
 		cmd->dst_rect_w, cmd->dst_rect_h,
 		cmd->dst_pixfmt,
+		cmd->fps, cmd->clkrate, cmd->data_bw, cmd->prefill_bw,
 		(cmd->rot90 << 0) | (cmd->hflip << 1) | (cmd->vflip << 2) |
 		(cmd->secure << 3) | (cmd->dst_writeback << 4) |
-		(cmd->video_mode << 5),
-		cmd->fps, cmd->clkrate, cmd->data_bw,
-		cmd_type);
+		(cmd->video_mode << 5) |
+		(cmd_type << 24));
 
 	sde_rot_mgr_lock(rot_dev->mgr);
 
@@ -1584,11 +1585,8 @@
 			ret = -ENOMEM;
 			goto error_init_request;
 		}
-	}
 
-	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE) {
-		struct sde_rotation_config rotcfg;
-
+		/* initialize session configuration */
 		memset(&rotcfg, 0, sizeof(struct sde_rotation_config));
 		rotcfg.flags = flags;
 		rotcfg.frame_rate = cmd->fps;
@@ -1606,25 +1604,16 @@
 		rotcfg.output.comp_ratio.numer = 1;
 		rotcfg.output.comp_ratio.denom = 1;
 		rotcfg.output.sbuf = true;
+	}
 
-		if (memcmp(&rotcfg, &ctx->rotcfg, sizeof(rotcfg))) {
-			ret = sde_rotator_session_config(rot_dev->mgr,
-					ctx->private, &rotcfg);
-			if (ret) {
-				SDEROT_WARN("fail session config s:%d\n",
-						ctx->session_id);
-				goto error_session_config;
-			}
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE) {
 
-			ctx->rotcfg = rotcfg;
-		}
-
-		ret = sde_rotator_validate_request(rot_dev->mgr, ctx->private,
-				req);
+		ret = sde_rotator_session_validate(rot_dev->mgr,
+				ctx->private, &rotcfg);
 		if (ret) {
-			SDEROT_WARN("fail validate request s:%d\n",
+			SDEROT_WARN("fail session validation s:%d\n",
 					ctx->session_id);
-			goto error_validate_request;
+			goto error_session_validate;
 		}
 
 		devm_kfree(rot_dev->dev, req);
@@ -1632,6 +1621,18 @@
 
 	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
 
+		if (memcmp(&rotcfg, &ctx->rotcfg, sizeof(rotcfg))) {
+			ret = sde_rotator_session_config(rot_dev->mgr,
+					ctx->private, &rotcfg);
+			if (ret) {
+				SDEROT_ERR("fail session config s:%d\n",
+						ctx->session_id);
+				goto error_session_config;
+			}
+
+			ctx->rotcfg = rotcfg;
+		}
+
 		request = list_first_entry_or_null(&ctx->retired_list,
 				struct sde_rotator_request, list);
 		if (!request) {
@@ -1745,7 +1746,7 @@
 	sde_rotator_update_retire_sequence(request);
 	sde_rotator_retire_request(request);
 error_retired_list:
-error_validate_request:
+error_session_validate:
 error_session_config:
 	devm_kfree(rot_dev->dev, req);
 error_invalid_handle:
@@ -2614,13 +2615,6 @@
 			}
 
 			/**
-			 * Loose any reference to sync fence once we pass
-			 * it to user. Driver does not clean up user
-			 * unclosed fence descriptors.
-			 */
-			vbinfo->fence = NULL;
-
-			/**
 			 * Cache fence descriptor in case user calls this
 			 * ioctl multiple times. Cached value would be stale
 			 * if user duplicated and closed old descriptor.
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 541cbaa..3d39fa2 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1437,8 +1437,7 @@
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
-	if (!ctx->sbuf_mode &&
-			(ctx->is_traffic_shaping || cfg->prefill_bw)) {
+	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
 		u32 bw;
 
 		/*
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 9b23376..5198bc3 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -80,6 +80,8 @@
 			dprintk(VIDC_ERR,
 				"Size mismatch! Dmabuf size: %zu Expected Size: %lu",
 				buf->size, *buffer_size);
+			msm_vidc_res_handle_fatal_hw_error(smem_client->res,
+					true);
 			goto mem_buf_size_mismatch;
 		}
 		/* Prepare a dma buf for dma on the given device */
@@ -327,6 +329,7 @@
 	if (ion_flags & ION_FLAG_SECURE)
 		smem->flags |= SMEM_SECURE;
 
+	buffer_size = smem->size;
 	rc = msm_ion_get_device_address(inst->mem_client, ion_handle,
 			align, &iova, &buffer_size, smem->flags,
 			smem->buffer_type, &smem->mapping_info);
@@ -603,22 +606,25 @@
 		__func__, mem->handle, mem->device_addr, mem->size,
 		mem->kvaddr, mem->buffer_type);
 
-	if (mem->device_addr)
+	if (mem->device_addr) {
 		msm_ion_put_device_address(client, mem->handle, mem->flags,
 			&mem->mapping_info, mem->buffer_type);
+		mem->device_addr = 0x0;
+	}
 
-	if (mem->kvaddr)
+	if (mem->kvaddr) {
 		ion_unmap_kernel(client->clnt, mem->handle);
+		mem->kvaddr = NULL;
+	}
 
 	if (mem->handle) {
 		trace_msm_smem_buffer_ion_op_start("FREE",
 				(u32)mem->buffer_type, -1, mem->size, -1,
 				mem->flags, -1);
 		ion_free(client->clnt, mem->handle);
+		mem->handle = NULL;
 		trace_msm_smem_buffer_ion_op_end("FREE", (u32)mem->buffer_type,
 			-1, mem->size, -1, mem->flags, -1);
-	} else {
-		dprintk(VIDC_ERR, "%s: invalid ion_handle\n", __func__);
 	}
 
 	return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index a527717..286a67e 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -537,7 +537,7 @@
 		.name = "VP8",
 		.description = "VP8 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP8,
-		.get_frame_size = get_frame_size_compressed,
+		.get_frame_size = get_frame_size_compressed_full_yuv,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
 	},
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index b4c0ab9..ae2a2c6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -280,6 +280,7 @@
 		break;
 	case V4L2_PIX_FMT_NV12_TP10_UBWC:
 		color_format = COLOR_FMT_NV12_BPP10_UBWC;
+		break;
 	case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010:
 		color_format = COLOR_FMT_P010;
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 1ef1282..c6d31f7 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -553,7 +553,8 @@
 static int msm_vidc_set_clocks(struct msm_vidc_core *core)
 {
 	struct hfi_device *hdev;
-	unsigned long freq = 0, rate = 0;
+	unsigned long freq_core_1 = 0, freq_core_2 = 0, rate = 0;
+	unsigned long freq_core_max = 0;
 	struct msm_vidc_inst *temp = NULL;
 	int rc = 0, i = 0;
 	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
@@ -568,20 +569,33 @@
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
-		freq += temp->clk_data.curr_freq;
+
+		if (temp->clk_data.core_id == VIDC_CORE_ID_1)
+			freq_core_1 += temp->clk_data.min_freq;
+		else if (temp->clk_data.core_id == VIDC_CORE_ID_2)
+			freq_core_2 += temp->clk_data.min_freq;
+		else if (temp->clk_data.core_id == VIDC_CORE_ID_3) {
+			freq_core_1 += temp->clk_data.min_freq / 2;
+			freq_core_2 += temp->clk_data.min_freq / 2;
+		}
+
+		freq_core_max = max_t(unsigned long, freq_core_1, freq_core_2);
+
 		if (temp->clk_data.turbo_mode) {
 			dprintk(VIDC_PROF,
 				"Found an instance with Turbo request\n");
-			freq = msm_vidc_max_freq(core);
+			freq_core_max = msm_vidc_max_freq(core);
 			break;
 		}
 	}
+
 	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
 		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= freq)
+		if (rate >= freq_core_max)
 			break;
 	}
-	core->min_freq = freq;
+
+	core->min_freq = freq_core_max;
 	core->curr_freq = rate;
 	mutex_unlock(&core->lock);
 
@@ -1018,17 +1032,21 @@
 }
 
 static u32 get_core_load(struct msm_vidc_core *core,
-	u32 core_id, bool lp_mode)
+	u32 core_id, bool lp_mode, bool real_time)
 {
 	struct msm_vidc_inst *inst = NULL;
 	u32 current_inst_mbs_per_sec = 0, load = 0;
+	bool real_time_mode = false;
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 		u32 cycles, lp_cycles;
 
+		real_time_mode = inst->flags & VIDC_REALTIME ? true : false;
 		if (!(inst->clk_data.core_id & core_id))
 			continue;
+		if (real_time_mode != real_time)
+			continue;
 		if (inst->session_type == MSM_VIDC_DECODER) {
 			cycles = lp_cycles = inst->clk_data.entry->vpp_cycles;
 		} else if (inst->session_type == MSM_VIDC_ENCODER) {
@@ -1073,10 +1091,10 @@
 	max_freq = msm_vidc_max_freq(inst->core);
 	inst->clk_data.core_id = 0;
 
-	core0_load = get_core_load(core, VIDC_CORE_ID_1, false);
-	core1_load = get_core_load(core, VIDC_CORE_ID_2, false);
-	core0_lp_load = get_core_load(core, VIDC_CORE_ID_1, true);
-	core1_lp_load = get_core_load(core, VIDC_CORE_ID_2, true);
+	core0_load = get_core_load(core, VIDC_CORE_ID_1, false, true);
+	core1_load = get_core_load(core, VIDC_CORE_ID_2, false, true);
+	core0_lp_load = get_core_load(core, VIDC_CORE_ID_1, true, true);
+	core1_lp_load = get_core_load(core, VIDC_CORE_ID_2, true, true);
 
 	min_load = min(core0_load, core1_load);
 	min_core_id = core0_load < core1_load ?
@@ -1095,9 +1113,9 @@
 	current_inst_lp_load = msm_comm_get_inst_load(inst,
 		LOAD_CALC_NO_QUIRKS) * lp_cycles;
 
-	dprintk(VIDC_DBG, "Core 0 Load = %d Core 1 Load = %d\n",
+	dprintk(VIDC_DBG, "Core 0 RT Load = %d Core 1 RT Load = %d\n",
 		 core0_load, core1_load);
-	dprintk(VIDC_DBG, "Core 0 LP Load = %d Core 1 LP Load = %d\n",
+	dprintk(VIDC_DBG, "Core 0 RT LP Load = %d Core 1 RT LP Load = %d\n",
 		core0_lp_load, core1_lp_load);
 	dprintk(VIDC_DBG, "Max Load = %lu\n", max_freq);
 	dprintk(VIDC_DBG, "Current Load = %d Current LP Load = %d\n",
@@ -1191,12 +1209,12 @@
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
 
-		if (!((inst->clk_data.core_id & core_id) ||
-			  (inst->clk_data.core_id & VIDC_CORE_ID_3)))
+		if ((inst->clk_data.core_id != core_id) &&
+			(inst->clk_data.core_id != VIDC_CORE_ID_3))
 			continue;
 
 		dprintk(VIDC_PROF,
-			"inst %pK (%4ux%4u) to (%4ux%4u) %3u %s %s %s %s\n",
+			"inst %pK (%4ux%4u) to (%4ux%4u) %3u %s %s %s %s %lu\n",
 			inst,
 			inst->prop.width[OUTPUT_PORT],
 			inst->prop.height[OUTPUT_PORT],
@@ -1207,7 +1225,8 @@
 			inst->clk_data.work_mode == VIDC_WORK_MODE_1 ?
 				"WORK_MODE_1" : "WORK_MODE_2",
 			inst->flags & VIDC_LOW_POWER ? "LP" : "HQ",
-			inst->flags & VIDC_REALTIME ? "RealTime" : "NonRTime");
+			inst->flags & VIDC_REALTIME ? "RealTime" : "NonRTime",
+			inst->clk_data.min_freq);
 	}
 	mutex_unlock(&core->lock);
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 4238eda..2beb90c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2196,7 +2196,8 @@
 			"%s: Send sys error for inst %pK\n", __func__, inst);
 		change_inst_state(inst, MSM_VIDC_CORE_INVALID);
 		msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_SYS_ERROR);
-		msm_comm_print_inst_info(inst);
+		if (!core->trigger_ssr)
+			msm_comm_print_inst_info(inst);
 	}
 	dprintk(VIDC_DBG, "Calling core_release\n");
 	rc = call_hfi_op(hdev, core_release, hdev->hfi_device_data);
@@ -4776,6 +4777,7 @@
 	mutex_lock(&inst->eosbufs.lock);
 	list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
 		list_del(&buf->list);
+		msm_comm_smem_free(inst, &buf->smem);
 		kfree(buf);
 	}
 	INIT_LIST_HEAD(&inst->eosbufs.list);
@@ -5323,8 +5325,7 @@
 		LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
 
 	if (inst->state == MSM_VIDC_OPEN_DONE) {
-		max_load_adj = inst->core->resources.max_load +
-			inst->capability.mbs_per_frame.max;
+		max_load_adj = inst->core->resources.max_load;
 		num_mbs_per_sec = msm_comm_get_load(inst->core,
 					MSM_VIDC_DECODER, quirks);
 		num_mbs_per_sec += msm_comm_get_load(inst->core,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index c1828b3..dbebe5d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -113,7 +113,6 @@
 
 #define MSM_VIDC_ERROR(value)					\
 	do {							\
-		dprintk(VIDC_DBG, "Fatal Level = %d\n", value);\
 		BUG_ON(value);					\
 	} while (0)
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 3554998..a82b598 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -54,7 +54,7 @@
 	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_ENCODER, 125, 675, 320),
 	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_ENCODER, 125, 675, 320),
 	CODEC_ENTRY(V4L2_PIX_FMT_VP8, MSM_VIDC_ENCODER, 125, 675, 320),
-	CODEC_ENTRY(V4L2_PIX_FMT_TME, MSM_VIDC_ENCODER, 125, 675, 320),
+	CODEC_ENTRY(V4L2_PIX_FMT_TME, MSM_VIDC_ENCODER, 0, 540, 540),
 	CODEC_ENTRY(V4L2_PIX_FMT_MPEG2, MSM_VIDC_DECODER, 50, 200, 200),
 	CODEC_ENTRY(V4L2_PIX_FMT_H264, MSM_VIDC_DECODER, 50, 200, 200),
 	CODEC_ENTRY(V4L2_PIX_FMT_HEVC, MSM_VIDC_DECODER, 50, 200, 200),
@@ -119,7 +119,7 @@
 	},
 	{
 		.key = "qcom,max-hw-load",
-		.value = 2563200,
+		.value = 3110400,	/* 4096x2160@90 */
 	},
 	{
 		.key = "qcom,max-hq-mbs-per-frame",
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index dde6029..5d2ed29 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -1751,9 +1751,8 @@
 	if (rc || __iface_cmdq_write(dev, &version_pkt))
 		dprintk(VIDC_WARN, "Failed to send image version pkt to f/w\n");
 
-	rc = __enable_subcaches(device);
-	if (!rc)
-		__set_subcaches(device);
+	__enable_subcaches(device);
+	__set_subcaches(device);
 
 	if (dev->res->pm_qos_latency_us) {
 #ifdef CONFIG_SMP
@@ -3790,8 +3789,9 @@
 	venus_hfi_for_each_subcache(device, sinfo) {
 		rc = llcc_slice_activate(sinfo->subcache);
 		if (rc) {
-			dprintk(VIDC_ERR, "Failed to activate %s: %d\n",
+			dprintk(VIDC_WARN, "Failed to activate %s: %d\n",
 				sinfo->name, rc);
+			msm_vidc_res_handle_fatal_hw_error(device->res, true);
 			goto err_activate_fail;
 		}
 		sinfo->isactive = true;
@@ -3806,7 +3806,7 @@
 err_activate_fail:
 	__release_subcaches(device);
 	__disable_subcaches(device);
-	return -EINVAL;
+	return 0;
 }
 
 static int __set_subcaches(struct venus_hfi_device *device)
@@ -3848,25 +3848,25 @@
 
 		rc = __core_set_resource(device, &rhdr, (void *)sc_res_info);
 		if (rc) {
-			dprintk(VIDC_ERR, "Failed to set subcaches %d\n", rc);
+			dprintk(VIDC_WARN, "Failed to set subcaches %d\n", rc);
 			goto err_fail_set_subacaches;
 		}
-	}
 
-	venus_hfi_for_each_subcache(device, sinfo) {
-		if (sinfo->isactive == true)
-			sinfo->isset = true;
-	}
+		venus_hfi_for_each_subcache(device, sinfo) {
+			if (sinfo->isactive == true)
+				sinfo->isset = true;
+		}
 
-	dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
-	device->res->sys_cache_res_set = true;
+		dprintk(VIDC_DBG, "Set Subcaches done to Venus\n");
+		device->res->sys_cache_res_set = true;
+	}
 
 	return 0;
 
 err_fail_set_subacaches:
 	__disable_subcaches(device);
 
-	return rc;
+	return 0;
 }
 
 static int __release_subcaches(struct venus_hfi_device *device)
@@ -3905,13 +3905,13 @@
 
 		rc = __core_release_resource(device, &rhdr);
 		if (rc)
-			dprintk(VIDC_ERR,
+			dprintk(VIDC_WARN,
 				"Failed to release %d subcaches\n", c);
 	}
 
 	device->res->sys_cache_res_set = false;
 
-	return rc;
+	return 0;
 }
 
 static int __disable_subcaches(struct venus_hfi_device *device)
@@ -3929,7 +3929,7 @@
 				sinfo->name);
 			rc = llcc_slice_deactivate(sinfo->subcache);
 			if (rc) {
-				dprintk(VIDC_ERR,
+				dprintk(VIDC_WARN,
 					"Failed to de-activate %s: %d\n",
 					sinfo->name, rc);
 			}
@@ -3937,7 +3937,7 @@
 		}
 	}
 
-	return rc;
+	return 0;
 }
 
 static int __venus_power_on(struct venus_hfi_device *device)
@@ -4111,9 +4111,8 @@
 
 	__sys_set_debug(device, msm_vidc_fw_debug);
 
-	rc = __enable_subcaches(device);
-	if (!rc)
-		__set_subcaches(device);
+	__enable_subcaches(device);
+	__set_subcaches(device);
 
 	dprintk(VIDC_PROF, "Resumed from power collapse\n");
 exit:
@@ -4282,10 +4281,65 @@
 	return rc;
 }
 
+static void __noc_error_info(struct venus_hfi_device *device, u32 core_num)
+{
+	u32 vcodec_core_video_noc_base_offs, val;
+
+	if (!device) {
+		dprintk(VIDC_ERR, "%s: null device\n", __func__);
+		return;
+	}
+	if (!core_num) {
+		vcodec_core_video_noc_base_offs =
+			VCODEC_CORE0_VIDEO_NOC_BASE_OFFS;
+	} else if (core_num == 1) {
+		vcodec_core_video_noc_base_offs =
+			VCODEC_CORE1_VIDEO_NOC_BASE_OFFS;
+	} else {
+		dprintk(VIDC_ERR, "%s: invalid core_num %u\n",
+			__func__, core_num);
+		return;
+	}
+
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_SWID_LOW_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_SWID_LOW:     %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_SWID_HIGH_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_SWID_HIGH:    %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_MAINCTL_LOW:  %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG0_LOW:  %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG0_HIGH: %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG1_LOW:  %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG1_HIGH: %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG2_LOW:  %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG2_HIGH: %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG3_LOW:  %#x\n", core_num, val);
+	val = __read_register(device, vcodec_core_video_noc_base_offs +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS);
+	dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG3_HIGH: %#x\n", core_num, val);
+}
+
 static int venus_hfi_noc_error_info(void *dev)
 {
 	struct venus_hfi_device *device;
-	u32 val = 0;
+	const u32 core0 = 0, core1 = 1;
 
 	if (!dev) {
 		dprintk(VIDC_ERR, "%s: null device\n", __func__);
@@ -4296,44 +4350,13 @@
 	mutex_lock(&device->lock);
 	dprintk(VIDC_ERR, "%s: non error information\n", __func__);
 
-	val = __read_register(device, 0x0C500);
-	dprintk(VIDC_ERR, "NOC_ERR_SWID_LOW(0x00AA0C500):     %#x\n", val);
+	if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+		__noc_error_info(device, core0);
 
-	val = __read_register(device, 0x0C504);
-	dprintk(VIDC_ERR, "NOC_ERR_SWID_HIGH(0x00AA0C504):    %#x\n", val);
-
-	val = __read_register(device, 0x0C508);
-	dprintk(VIDC_ERR, "NOC_ERR_MAINCTL_LOW(0x00AA0C508):  %#x\n", val);
-
-	val = __read_register(device, 0x0C510);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRVLD_LOW(0x00AA0C510):   %#x\n", val);
-
-	val = __read_register(device, 0x0C518);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRCLR_LOW(0x00AA0C518):   %#x\n", val);
-
-	val = __read_register(device, 0x0C520);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG0_LOW(0x00AA0C520):  %#x\n", val);
-
-	val = __read_register(device, 0x0C524);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG0_HIGH(0x00AA0C524): %#x\n", val);
-
-	val = __read_register(device, 0x0C528);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG1_LOW(0x00AA0C528):  %#x\n", val);
-
-	val = __read_register(device, 0x0C52C);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG1_HIGH(0x00AA0C52C): %#x\n", val);
-
-	val = __read_register(device, 0x0C530);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG2_LOW(0x00AA0C530):  %#x\n", val);
-
-	val = __read_register(device, 0x0C534);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG2_HIGH(0x00AA0C534): %#x\n", val);
-
-	val = __read_register(device, 0x0C538);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG3_LOW(0x00AA0C538):  %#x\n", val);
-
-	val = __read_register(device, 0x0C53C);
-	dprintk(VIDC_ERR, "NOC_ERR_ERRLOG3_HIGH(0x00AA0C53C): %#x\n", val);
+	if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
+			VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+		__noc_error_info(device, core1);
 
 	mutex_unlock(&device->lock);
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
index fc32d73..3433483 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
@@ -190,4 +190,25 @@
 #define VIDC_UC_REGION_ADDR 0x000D2064
 #define VIDC_UC_REGION_SIZE 0x000D2068
 
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers
+ * --------------------------------------------------------------------------
+ */
+#define VCODEC_CORE0_VIDEO_NOC_BASE_OFFS		0x00004000
+#define VCODEC_CORE1_VIDEO_NOC_BASE_OFFS		0x0000C000
+#define VCODEC_COREX_VIDEO_NOC_ERR_SWID_LOW_OFFS	0x0500
+#define VCODEC_COREX_VIDEO_NOC_ERR_SWID_HIGH_OFFS	0x0504
+#define VCODEC_COREX_VIDEO_NOC_ERR_MAINCTL_LOW_OFFS	0x0508
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS	0x0510
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRCLR_LOW_OFFS	0x0518
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_LOW_OFFS	0x0520
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG0_HIGH_OFFS	0x0524
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_LOW_OFFS	0x0528
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG1_HIGH_OFFS	0x052C
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_LOW_OFFS	0x0530
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG2_HIGH_OFFS	0x0534
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS	0x0538
+#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS	0x053C
+
 #endif
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index c3277308..b49f80c 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -254,7 +254,7 @@
 		return 0;
 
 	case LIRC_GET_REC_RESOLUTION:
-		val = dev->rx_resolution;
+		val = dev->rx_resolution / 1000;
 		break;
 
 	case LIRC_SET_WIDEBAND_RECEIVER:
diff --git a/drivers/mfd/qcom-i2c-pmic.c b/drivers/mfd/qcom-i2c-pmic.c
index 590e4c1..28c3343 100644
--- a/drivers/mfd/qcom-i2c-pmic.c
+++ b/drivers/mfd/qcom-i2c-pmic.c
@@ -62,8 +62,12 @@
 	struct irq_domain	*domain;
 	struct i2c_pmic_periph	*periph;
 	struct pinctrl		*pinctrl;
+	struct mutex		irq_complete;
 	const char		*pinctrl_name;
 	int			num_periphs;
+	int			summary_irq;
+	bool			resume_completed;
+	bool			irq_waiting;
 };
 
 static void i2c_pmic_irq_bus_lock(struct irq_data *d)
@@ -400,6 +404,16 @@
 	unsigned int summary_status;
 	int rc, i;
 
+	mutex_lock(&chip->irq_complete);
+	chip->irq_waiting = true;
+	if (!chip->resume_completed) {
+		pr_debug("IRQ triggered before device-resume\n");
+		disable_irq_nosync(irq);
+		mutex_unlock(&chip->irq_complete);
+		return IRQ_HANDLED;
+	}
+	chip->irq_waiting = false;
+
 	for (i = 0; i < DIV_ROUND_UP(chip->num_periphs, BITS_PER_BYTE); i++) {
 		rc = regmap_read(chip->regmap, I2C_INTR_STATUS_BASE + i,
 				&summary_status);
@@ -416,6 +430,8 @@
 		i2c_pmic_summary_status_handler(chip, periph, summary_status);
 	}
 
+	mutex_unlock(&chip->irq_complete);
+
 	return IRQ_HANDLED;
 }
 
@@ -559,6 +575,9 @@
 		}
 	}
 
+	chip->resume_completed = true;
+	mutex_init(&chip->irq_complete);
+
 	rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
 				       i2c_pmic_irq_handler,
 				       IRQF_ONESHOT | IRQF_SHARED,
@@ -568,6 +587,7 @@
 		goto cleanup;
 	}
 
+	chip->summary_irq = client->irq;
 	enable_irq_wake(client->irq);
 
 probe_children:
@@ -594,6 +614,17 @@
 }
 
 #ifdef CONFIG_PM_SLEEP
+static int i2c_pmic_suspend_noirq(struct device *dev)
+{
+	struct i2c_pmic *chip = dev_get_drvdata(dev);
+
+	if (chip->irq_waiting) {
+		pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
 static int i2c_pmic_suspend(struct device *dev)
 {
 	struct i2c_pmic *chip = dev_get_drvdata(dev);
@@ -618,6 +649,11 @@
 			pr_err_ratelimited("Couldn't enable 0x%04x wake irqs 0x%02x rc=%d\n",
 			       periph->addr, periph->wake, rc);
 	}
+	if (!rc) {
+		mutex_lock(&chip->irq_complete);
+		chip->resume_completed = false;
+		mutex_unlock(&chip->irq_complete);
+	}
 
 	return rc;
 }
@@ -647,10 +683,38 @@
 			       periph->addr, periph->synced[IRQ_EN_SET], rc);
 	}
 
+	mutex_lock(&chip->irq_complete);
+	chip->resume_completed = true;
+	if (chip->irq_waiting) {
+		mutex_unlock(&chip->irq_complete);
+		/* irq was pending, call the handler */
+		i2c_pmic_irq_handler(chip->summary_irq, chip);
+		enable_irq(chip->summary_irq);
+	} else {
+		mutex_unlock(&chip->irq_complete);
+	}
+
 	return rc;
 }
+#else
+static int i2c_pmic_suspend(struct device *dev)
+{
+	return 0;
+}
+static int i2c_pmic_resume(struct device *dev)
+{
+	return 0;
+}
+static int i2c_pmic_suspend_noirq(struct device *dev)
+{
+	return 0
+}
 #endif
-static SIMPLE_DEV_PM_OPS(i2c_pmic_pm_ops, i2c_pmic_suspend, i2c_pmic_resume);
+static const struct dev_pm_ops i2c_pmic_pm_ops = {
+	.suspend	= i2c_pmic_suspend,
+	.suspend_noirq	= i2c_pmic_suspend_noirq,
+	.resume		= i2c_pmic_resume,
+};
 
 static const struct of_device_id i2c_pmic_match_table[] = {
 	{ .compatible = "qcom,i2c-pmic", },
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index cada3c1..e3f4c39 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -791,6 +791,13 @@
 	  Per UID based io statistics exported to /proc/uid_io
 	  Per UID based procstat control in /proc/uid_procstat
 
+config UID_SYS_STATS_DEBUG
+	bool "Per-TASK statistics"
+	depends on UID_SYS_STATS
+	default n
+	help
+	  Per TASK based io statistics exported to /proc/uid_io
+
 config MEMORY_STATE_TIME
 	tristate "Memory freq/bandwidth time statistics"
 	depends on PROFILING
diff --git a/drivers/misc/qpnp-misc.c b/drivers/misc/qpnp-misc.c
index 3c11de0..690b28b 100644
--- a/drivers/misc/qpnp-misc.c
+++ b/drivers/misc/qpnp-misc.c
@@ -135,7 +135,7 @@
 	struct qpnp_misc_dev *mdev = NULL;
 	struct qpnp_misc_dev *mdev_found = NULL;
 	int rc;
-	u8 temp;
+	u8 temp = 0;
 
 	if (IS_ERR_OR_NULL(node)) {
 		pr_err("Invalid device node pointer\n");
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7077b30..7567f86 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -3126,6 +3126,7 @@
 				struct qseecom_send_cmd_req *req)
 {
 	int ret = 0;
+	int ret2 = 0;
 	u32 reqd_len_sb_in = 0;
 	struct qseecom_client_send_data_ireq send_data_req = {0};
 	struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
@@ -3224,32 +3225,38 @@
 	if (ret) {
 		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
 					ret, data->client.app_id);
-		return ret;
+		goto exit;
 	}
 
 	if (qseecom.qsee_reentrancy_support) {
 		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
 	} else {
 		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
 			ret = __qseecom_process_incomplete_cmd(data, &resp);
 			if (ret) {
 				pr_err("process_incomplete_cmd failed err: %d\n",
 						ret);
-				return ret;
+				goto exit;
 			}
 		} else {
 			if (resp.result != QSEOS_RESULT_SUCCESS) {
 				pr_err("Response result %d not supported\n",
 								resp.result);
 				ret = -EINVAL;
+				goto exit;
 			}
 		}
 	}
-	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+exit:
+	ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
 				data->client.sb_virt, data->client.sb_length,
 				ION_IOC_INV_CACHES);
-	if (ret)
-		pr_err("cache operation failed %d\n", ret);
+	if (ret2) {
+		pr_err("cache operation failed %d\n", ret2);
+		return ret2;
+	}
 	return ret;
 }
 
@@ -6575,6 +6582,7 @@
 	bool found_app = false;
 	unsigned long flags;
 	int ret = 0;
+	int ret2 = 0;
 	uint32_t reqd_len_sb_in = 0;
 	void *cmd_buf = NULL;
 	size_t cmd_len;
@@ -6684,43 +6692,47 @@
 	if (ret) {
 		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
 					ret, data->client.app_id);
-		return ret;
+		goto exit;
 	}
 
 	if (qseecom.qsee_reentrancy_support) {
 		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+		if (ret)
+			goto exit;
 	} else {
 		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
 			ret = __qseecom_process_incomplete_cmd(data, &resp);
 			if (ret) {
 				pr_err("process_incomplete_cmd failed err: %d\n",
 						ret);
-				return ret;
+				goto exit;
 			}
 		} else {
 			if (resp.result != QSEOS_RESULT_SUCCESS) {
 				pr_err("Response result %d not supported\n",
 								resp.result);
 				ret = -EINVAL;
+				goto exit;
 			}
 		}
 	}
-	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+exit:
+	ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
 				data->client.sb_virt, data->client.sb_length,
 				ION_IOC_INV_CACHES);
-	if (ret) {
+	if (ret2) {
 		pr_err("cache operation failed %d\n", ret);
-		return ret;
+		return ret2;
 	}
 
 	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
 			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
-		ret = __qseecom_update_qteec_req_buf(
+		ret2 = __qseecom_update_qteec_req_buf(
 			(struct qseecom_qteec_modfd_req *)req, data, true);
-		if (ret)
-			return ret;
+		if (ret2)
+			return ret2;
 	}
-	return 0;
+	return ret;
 }
 
 static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 8bf4c57..7d69dd5 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/mm.h>
 #include <linux/proc_fs.h>
 #include <linux/profile.h>
 #include <linux/rtmutex.h>
@@ -53,6 +54,15 @@
 #define UID_STATE_DEAD_TASKS	4
 #define UID_STATE_SIZE		5
 
+#define MAX_TASK_COMM_LEN 256
+
+struct task_entry {
+	char comm[MAX_TASK_COMM_LEN];
+	pid_t pid;
+	struct io_stats io[UID_STATE_SIZE];
+	struct hlist_node hash;
+};
+
 struct uid_entry {
 	uid_t uid;
 	cputime_t utime;
@@ -62,8 +72,231 @@
 	int state;
 	struct io_stats io[UID_STATE_SIZE];
 	struct hlist_node hash;
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+	DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
+#endif
 };
 
+static u64 compute_write_bytes(struct task_struct *task)
+{
+	if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
+		return 0;
+
+	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
+}
+
+static void compute_io_bucket_stats(struct io_stats *io_bucket,
+					struct io_stats *io_curr,
+					struct io_stats *io_last,
+					struct io_stats *io_dead)
+{
+	/* tasks could switch to another uid group, but its io_last in the
+	 * previous uid group could still be positive.
+	 * therefore before each update, do an overflow check first
+	 */
+	int64_t delta;
+
+	delta = io_curr->read_bytes + io_dead->read_bytes -
+		io_last->read_bytes;
+	io_bucket->read_bytes += delta > 0 ? delta : 0;
+	delta = io_curr->write_bytes + io_dead->write_bytes -
+		io_last->write_bytes;
+	io_bucket->write_bytes += delta > 0 ? delta : 0;
+	delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
+	io_bucket->rchar += delta > 0 ? delta : 0;
+	delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
+	io_bucket->wchar += delta > 0 ? delta : 0;
+	delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
+	io_bucket->fsync += delta > 0 ? delta : 0;
+
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+
+	memset(io_dead, 0, sizeof(struct io_stats));
+}
+
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+static void get_full_task_comm(struct task_entry *task_entry,
+		struct task_struct *task)
+{
+	int i = 0, offset = 0, len = 0;
+	/* save one byte for terminating null character */
+	int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
+	char buf[unused_len];
+	struct mm_struct *mm = task->mm;
+
+	/* fill the first TASK_COMM_LEN bytes with thread name */
+	get_task_comm(task_entry->comm, task);
+	i = strlen(task_entry->comm);
+	while (i < TASK_COMM_LEN)
+		task_entry->comm[i++] = ' ';
+
+	/* next the executable file name */
+	if (mm) {
+		down_read(&mm->mmap_sem);
+		if (mm->exe_file) {
+			char *pathname = d_path(&mm->exe_file->f_path, buf,
+					unused_len);
+
+			if (!IS_ERR(pathname)) {
+				len = strlcpy(task_entry->comm + i, pathname,
+						unused_len);
+				i += len;
+				task_entry->comm[i++] = ' ';
+				unused_len--;
+			}
+		}
+		up_read(&mm->mmap_sem);
+	}
+	unused_len -= len;
+
+	/* fill the rest with command line argument
+	 * replace each null or new line character
+	 * between args in argv with whitespace */
+	len = get_cmdline(task, buf, unused_len);
+	while (offset < len) {
+		if (buf[offset] != '\0' && buf[offset] != '\n')
+			task_entry->comm[i++] = buf[offset];
+		else
+			task_entry->comm[i++] = ' ';
+		offset++;
+	}
+
+	/* get rid of trailing whitespaces in case when arg is memset to
+	 * zero before being reset in userspace
+	 */
+	while (task_entry->comm[i-1] == ' ')
+		i--;
+	task_entry->comm[i] = '\0';
+}
+
+static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
+		struct task_struct *task)
+{
+	struct task_entry *task_entry;
+
+	hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
+			task->pid) {
+		if (task->pid == task_entry->pid) {
+			/* if thread name changed, update the entire command */
+			int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
+				- task_entry->comm;
+
+			if (strncmp(task_entry->comm, task->comm, len))
+				get_full_task_comm(task_entry, task);
+			return task_entry;
+		}
+	}
+	return NULL;
+}
+
+static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
+		struct task_struct *task)
+{
+	struct task_entry *task_entry;
+	pid_t pid = task->pid;
+
+	task_entry = find_task_entry(uid_entry, task);
+	if (task_entry)
+		return task_entry;
+
+	task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
+	if (!task_entry)
+		return NULL;
+
+	get_full_task_comm(task_entry, task);
+
+	task_entry->pid = pid;
+	hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
+
+	return task_entry;
+}
+
+static void remove_uid_tasks(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+	struct hlist_node *tmp_task;
+
+	hash_for_each_safe(uid_entry->task_entries, bkt_task,
+			tmp_task, task_entry, hash) {
+		hash_del(&task_entry->hash);
+		kfree(task_entry);
+	}
+}
+
+static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
+			sizeof(struct io_stats));
+	}
+}
+
+static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
+		struct task_struct *task, int slot)
+{
+	struct task_entry *task_entry = find_or_register_task(uid_entry, task);
+	struct io_stats *task_io_slot = &task_entry->io[slot];
+
+	task_io_slot->read_bytes += task->ioac.read_bytes;
+	task_io_slot->write_bytes += compute_write_bytes(task);
+	task_io_slot->rchar += task->ioac.rchar;
+	task_io_slot->wchar += task->ioac.wchar;
+	task_io_slot->fsync += task->ioac.syscfs;
+}
+
+static void compute_io_uid_tasks(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		compute_io_bucket_stats(&task_entry->io[uid_entry->state],
+					&task_entry->io[UID_STATE_TOTAL_CURR],
+					&task_entry->io[UID_STATE_TOTAL_LAST],
+					&task_entry->io[UID_STATE_DEAD_TASKS]);
+	}
+}
+
+static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		/* Separated by comma because space exists in task comm */
+		seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
+				task_entry->comm,
+				(unsigned long)task_entry->pid,
+				task_entry->io[UID_STATE_FOREGROUND].rchar,
+				task_entry->io[UID_STATE_FOREGROUND].wchar,
+				task_entry->io[UID_STATE_FOREGROUND].read_bytes,
+				task_entry->io[UID_STATE_FOREGROUND].write_bytes,
+				task_entry->io[UID_STATE_BACKGROUND].rchar,
+				task_entry->io[UID_STATE_BACKGROUND].wchar,
+				task_entry->io[UID_STATE_BACKGROUND].read_bytes,
+				task_entry->io[UID_STATE_BACKGROUND].write_bytes,
+				task_entry->io[UID_STATE_FOREGROUND].fsync,
+				task_entry->io[UID_STATE_BACKGROUND].fsync);
+	}
+}
+#else
+static void remove_uid_tasks(struct uid_entry *uid_entry) {};
+static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
+static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
+		struct task_struct *task, int slot) {};
+static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
+static void show_io_uid_tasks(struct seq_file *m,
+		struct uid_entry *uid_entry) {}
+#endif
+
 static struct uid_entry *find_uid_entry(uid_t uid)
 {
 	struct uid_entry *uid_entry;
@@ -87,7 +320,9 @@
 		return NULL;
 
 	uid_entry->uid = uid;
-
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+	hash_init(uid_entry->task_entries);
+#endif
 	hash_add(hash_table, &uid_entry->hash, uid);
 
 	return uid_entry;
@@ -193,6 +428,7 @@
 		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
 							hash, (uid_t)uid_start) {
 			if (uid_start == uid_entry->uid) {
+				remove_uid_tasks(uid_entry);
 				hash_del(&uid_entry->hash);
 				kfree(uid_entry);
 			}
@@ -209,13 +445,6 @@
 	.write		= uid_remove_write,
 };
 
-static u64 compute_write_bytes(struct task_struct *task)
-{
-	if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
-		return 0;
-
-	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
-}
 
 static void add_uid_io_stats(struct uid_entry *uid_entry,
 			struct task_struct *task, int slot)
@@ -227,28 +456,8 @@
 	io_slot->rchar += task->ioac.rchar;
 	io_slot->wchar += task->ioac.wchar;
 	io_slot->fsync += task->ioac.syscfs;
-}
 
-static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
-					struct io_stats *io_curr,
-					struct io_stats *io_last,
-					struct io_stats *io_dead)
-{
-	io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
-		io_last->read_bytes;
-	io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
-		io_last->write_bytes;
-	io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
-	io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
-	io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
-
-	io_last->read_bytes = io_curr->read_bytes;
-	io_last->write_bytes = io_curr->write_bytes;
-	io_last->rchar = io_curr->rchar;
-	io_last->wchar = io_curr->wchar;
-	io_last->fsync = io_curr->fsync;
-
-	memset(io_dead, 0, sizeof(struct io_stats));
+	add_uid_tasks_io_stats(uid_entry, task, slot);
 }
 
 static void update_io_stats_all_locked(void)
@@ -259,9 +468,11 @@
 	unsigned long bkt;
 	uid_t uid;
 
-	hash_for_each(hash_table, bkt, uid_entry, hash)
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
 		memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
 			sizeof(struct io_stats));
+		set_io_uid_tasks_zero(uid_entry);
+	}
 
 	rcu_read_lock();
 	do_each_thread(temp, task) {
@@ -275,10 +486,11 @@
 	rcu_read_unlock();
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
-		compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+		compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
 					&uid_entry->io[UID_STATE_TOTAL_CURR],
 					&uid_entry->io[UID_STATE_TOTAL_LAST],
 					&uid_entry->io[UID_STATE_DEAD_TASKS]);
+		compute_io_uid_tasks(uid_entry);
 	}
 }
 
@@ -289,6 +501,7 @@
 
 	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
 		sizeof(struct io_stats));
+	set_io_uid_tasks_zero(uid_entry);
 
 	rcu_read_lock();
 	do_each_thread(temp, task) {
@@ -298,12 +511,14 @@
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
-	compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+	compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
 				&uid_entry->io[UID_STATE_TOTAL_CURR],
 				&uid_entry->io[UID_STATE_TOTAL_LAST],
 				&uid_entry->io[UID_STATE_DEAD_TASKS]);
+	compute_io_uid_tasks(uid_entry);
 }
 
+
 static int uid_io_show(struct seq_file *m, void *v)
 {
 	struct uid_entry *uid_entry;
@@ -315,21 +530,22 @@
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
 		seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
-			uid_entry->uid,
-			uid_entry->io[UID_STATE_FOREGROUND].rchar,
-			uid_entry->io[UID_STATE_FOREGROUND].wchar,
-			uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
-			uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
-			uid_entry->io[UID_STATE_BACKGROUND].rchar,
-			uid_entry->io[UID_STATE_BACKGROUND].wchar,
-			uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
-			uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
-			uid_entry->io[UID_STATE_FOREGROUND].fsync,
-			uid_entry->io[UID_STATE_BACKGROUND].fsync);
+				uid_entry->uid,
+				uid_entry->io[UID_STATE_FOREGROUND].rchar,
+				uid_entry->io[UID_STATE_FOREGROUND].wchar,
+				uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
+				uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
+				uid_entry->io[UID_STATE_BACKGROUND].rchar,
+				uid_entry->io[UID_STATE_BACKGROUND].wchar,
+				uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
+				uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
+				uid_entry->io[UID_STATE_FOREGROUND].fsync,
+				uid_entry->io[UID_STATE_BACKGROUND].fsync);
+
+		show_io_uid_tasks(m, uid_entry);
 	}
 
 	rt_mutex_unlock(&uid_lock);
-
 	return 0;
 }
 
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 120fd54..af409aa 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3518,15 +3518,23 @@
 	/* RED error - Fatal: requires reset */
 	if (mrq->cmdq_req->resp_err) {
 		err = mrq->cmdq_req->resp_err;
+		goto reset;
+	}
+
+	/*
+	 * TIMEOUT errrors can happen because of execution error
+	 * in the last command. So send cmd 13 to get device status
+	 */
+	if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+			(mrq->data && (mrq->data->error == -ETIMEDOUT))) {
 		if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
 			ret = get_card_status(host->card, &status, 0);
 			if (ret)
 				pr_err("%s: CMD13 failed with err %d\n",
 						mmc_hostname(host), ret);
 		}
-		pr_err("%s: Response error detected with device status 0x%08x\n",
+		pr_err("%s: Timeout error detected with device status 0x%08x\n",
 			mmc_hostname(host), status);
-		goto reset;
 	}
 
 	/*
@@ -3572,7 +3580,7 @@
 	else if (mrq->data && mrq->data->error)
 		err = mrq->data->error;
 
-	if (err || cmdq_req->resp_err) {
+	if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) {
 		pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
 				mmc_hostname(mrq->host), __func__, err,
 				cmdq_req->resp_err);
@@ -3609,6 +3617,17 @@
 		blk_end_request_all(rq, err);
 		goto out;
 	}
+	/*
+	 * In case of error, cmdq_req->data.bytes_xfered is set to 0.
+	 * If we call blk_end_request() with nr_bytes as 0 then the request
+	 * never gets completed. So in case of error, to complete a request
+	 * with error we should use blk_end_request_all().
+	 */
+	if (err && cmdq_req->skip_err_handling) {
+		cmdq_req->skip_err_handling = false;
+		blk_end_request_all(rq, err);
+		goto out;
+	}
 
 	blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
 
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 127ab0f..64c8743 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -482,19 +482,17 @@
  */
 int mmc_of_parse(struct mmc_host *host)
 {
-	struct device_node *np;
+	struct device *dev = host->parent;
 	u32 bus_width;
 	int ret;
 	bool cd_cap_invert, cd_gpio_invert = false;
 	bool ro_cap_invert, ro_gpio_invert = false;
 
-	if (!host->parent || !host->parent->of_node)
+	if (!dev || !dev_fwnode(dev))
 		return 0;
 
-	np = host->parent->of_node;
-
 	/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
-	if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
+	if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
 		dev_dbg(host->parent,
 			"\"bus-width\" property is missing, assuming 1 bit.\n");
 		bus_width = 1;
@@ -516,7 +514,7 @@
 	}
 
 	/* f_max is obtained from the optional "max-frequency" property */
-	of_property_read_u32(np, "max-frequency", &host->f_max);
+	device_property_read_u32(dev, "max-frequency", &host->f_max);
 
 	/*
 	 * Configure CD and WP pins. They are both by default active low to
@@ -531,12 +529,12 @@
 	 */
 
 	/* Parse Card Detection */
-	if (of_property_read_bool(np, "non-removable")) {
+	if (device_property_read_bool(dev, "non-removable")) {
 		host->caps |= MMC_CAP_NONREMOVABLE;
 	} else {
-		cd_cap_invert = of_property_read_bool(np, "cd-inverted");
+		cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
 
-		if (of_property_read_bool(np, "broken-cd"))
+		if (device_property_read_bool(dev, "broken-cd"))
 			host->caps |= MMC_CAP_NEEDS_POLL;
 
 		ret = mmc_gpiod_request_cd(host, "cd", 0, true,
@@ -562,7 +560,7 @@
 	}
 
 	/* Parse Write Protection */
-	ro_cap_invert = of_property_read_bool(np, "wp-inverted");
+	ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
 
 	ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
 	if (!ret)
@@ -570,62 +568,62 @@
 	else if (ret != -ENOENT && ret != -ENOSYS)
 		return ret;
 
-	if (of_property_read_bool(np, "disable-wp"))
+	if (device_property_read_bool(dev, "disable-wp"))
 		host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
 
 	/* See the comment on CD inversion above */
 	if (ro_cap_invert ^ ro_gpio_invert)
 		host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
 
-	if (of_property_read_bool(np, "cap-sd-highspeed"))
+	if (device_property_read_bool(dev, "cap-sd-highspeed"))
 		host->caps |= MMC_CAP_SD_HIGHSPEED;
-	if (of_property_read_bool(np, "cap-mmc-highspeed"))
+	if (device_property_read_bool(dev, "cap-mmc-highspeed"))
 		host->caps |= MMC_CAP_MMC_HIGHSPEED;
-	if (of_property_read_bool(np, "sd-uhs-sdr12"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr12"))
 		host->caps |= MMC_CAP_UHS_SDR12;
-	if (of_property_read_bool(np, "sd-uhs-sdr25"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr25"))
 		host->caps |= MMC_CAP_UHS_SDR25;
-	if (of_property_read_bool(np, "sd-uhs-sdr50"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr50"))
 		host->caps |= MMC_CAP_UHS_SDR50;
-	if (of_property_read_bool(np, "sd-uhs-sdr104"))
+	if (device_property_read_bool(dev, "sd-uhs-sdr104"))
 		host->caps |= MMC_CAP_UHS_SDR104;
-	if (of_property_read_bool(np, "sd-uhs-ddr50"))
+	if (device_property_read_bool(dev, "sd-uhs-ddr50"))
 		host->caps |= MMC_CAP_UHS_DDR50;
-	if (of_property_read_bool(np, "cap-power-off-card"))
+	if (device_property_read_bool(dev, "cap-power-off-card"))
 		host->caps |= MMC_CAP_POWER_OFF_CARD;
-	if (of_property_read_bool(np, "cap-mmc-hw-reset"))
+	if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
 		host->caps |= MMC_CAP_HW_RESET;
-	if (of_property_read_bool(np, "cap-sdio-irq"))
+	if (device_property_read_bool(dev, "cap-sdio-irq"))
 		host->caps |= MMC_CAP_SDIO_IRQ;
-	if (of_property_read_bool(np, "full-pwr-cycle"))
+	if (device_property_read_bool(dev, "full-pwr-cycle"))
 		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
-	if (of_property_read_bool(np, "keep-power-in-suspend"))
+	if (device_property_read_bool(dev, "keep-power-in-suspend"))
 		host->pm_caps |= MMC_PM_KEEP_POWER;
-	if (of_property_read_bool(np, "wakeup-source") ||
-	    of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
+	if (device_property_read_bool(dev, "wakeup-source") ||
+	    device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
 		host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
-	if (of_property_read_bool(np, "mmc-ddr-1_8v"))
+	if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
 		host->caps |= MMC_CAP_1_8V_DDR;
-	if (of_property_read_bool(np, "mmc-ddr-1_2v"))
+	if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
 		host->caps |= MMC_CAP_1_2V_DDR;
-	if (of_property_read_bool(np, "mmc-hs200-1_8v"))
+	if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
 		host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
-	if (of_property_read_bool(np, "mmc-hs200-1_2v"))
+	if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
 		host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
-	if (of_property_read_bool(np, "mmc-hs400-1_8v"))
+	if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
 		host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
-	if (of_property_read_bool(np, "mmc-hs400-1_2v"))
+	if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
 		host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
-	if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
+	if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
 		host->caps2 |= MMC_CAP2_HS400_ES;
-	if (of_property_read_bool(np, "no-sdio"))
+	if (device_property_read_bool(dev, "no-sdio"))
 		host->caps2 |= MMC_CAP2_NO_SDIO;
-	if (of_property_read_bool(np, "no-sd"))
+	if (device_property_read_bool(dev, "no-sd"))
 		host->caps2 |= MMC_CAP2_NO_SD;
-	if (of_property_read_bool(np, "no-mmc"))
+	if (device_property_read_bool(dev, "no-mmc"))
 		host->caps2 |= MMC_CAP2_NO_MMC;
 
-	host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
+	host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
 	if (host->dsr_req && (host->dsr & ~0xffff)) {
 		dev_err(host->parent,
 			"device tree specified broken value for DSR: 0x%x, ignoring\n",
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c41f580..36295f5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1416,7 +1416,7 @@
 static int mmc_select_hs400es(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
-	int err = 0;
+	int err = -EINVAL;
 	u8 val;
 
 	if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
@@ -2148,7 +2148,7 @@
 		err = mmc_select_hs400(card);
 		if (err)
 			goto free_card;
-	} else {
+	} else if (!mmc_card_hs400es(card)) {
 		/* Select the desired bus width optionally */
 		err = mmc_select_bus_width(card);
 		if (err > 0 && mmc_card_hs(card)) {
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 7c3638c..591d6b2 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -145,6 +145,29 @@
 	mb();
 }
 
+static int cmdq_clear_task_poll(struct cmdq_host *cq_host, unsigned int tag)
+{
+	int retries = 100;
+
+	cmdq_clear_set_irqs(cq_host, CQIS_TCL, 0);
+	cmdq_writel(cq_host, 1<<tag, CQTCLR);
+	while (retries) {
+		/*
+		 * Task Clear register and doorbell,
+		 * both should indicate that task is cleared
+		 */
+		if ((cmdq_readl(cq_host, CQTCLR) & 1<<tag) ||
+			(cmdq_readl(cq_host, CQTDBR) & 1<<tag)) {
+			udelay(5);
+			retries--;
+			continue;
+		} else
+			break;
+	}
+
+	cmdq_clear_set_irqs(cq_host, 0, CQIS_TCL);
+	return retries ? 0 : -ETIMEDOUT;
+}
 
 #define DRV_NAME "cmdq-host"
 
@@ -197,6 +220,10 @@
 static void cmdq_dumpregs(struct cmdq_host *cq_host)
 {
 	struct mmc_host *mmc = cq_host->mmc;
+	int offset = 0;
+
+	if (cq_host->offset_changed)
+		offset = CQ_V5_VENDOR_CFG;
 
 	MMC_TRACE(mmc,
 	"%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
@@ -243,7 +270,7 @@
 		cmdq_readl(cq_host, CQCRI),
 		cmdq_readl(cq_host, CQCRA));
 	pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
-	       cmdq_readl(cq_host, CQ_VENDOR_CFG));
+	       cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
 	pr_err(DRV_NAME ": ===========================================\n");
 
 	cmdq_dump_task_history(cq_host);
@@ -345,6 +372,7 @@
 {
 	int err = 0;
 	u32 cqcfg;
+	u32 cqcap = 0;
 	bool dcmd_enable;
 	struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
 
@@ -373,6 +401,24 @@
 	cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
 			(dcmd_enable ? CQ_DCMD : 0));
 
+	cqcap = cmdq_readl(cq_host, CQCAP);
+	if (cqcap & CQCAP_CS) {
+		/*
+		 * In case host controller supports cryptographic operations
+		 * then, it uses 128bit task descriptor. Upper 64 bits of task
+		 * descriptor would be used to pass crypto specific informaton.
+		 */
+		cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
+				 CMDQ_TASK_DESC_SZ_128;
+		cqcfg |= CQ_ICE_ENABLE;
+		/*
+		 * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+		 * in CQ register space, due to which few CQ registers are
+		 * shifted. Set offset_changed boolean to use updated address.
+		 */
+		cq_host->offset_changed = true;
+	}
+
 	cmdq_writel(cq_host, cqcfg, CQCFG);
 	/* enable CQ_HOST */
 	cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
@@ -688,6 +734,30 @@
 		upper_32_bits(*task_desc));
 }
 
+static inline
+void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc,
+			u64 ice_ctx)
+{
+	u64 *ice_desc = NULL;
+
+	if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) {
+		/*
+		 * Get the address of ice context for the given task descriptor.
+		 * ice context is present in the upper 64bits of task descriptor
+		 * ice_conext_base_address = task_desc + 8-bytes
+		 */
+		ice_desc = (__le64 *)((u8 *)task_desc +
+						CQ_TASK_DESC_TASK_PARAMS_SIZE);
+		memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE);
+
+		/*
+		 *  Assign upper 64bits data of task descritor with ice context
+		 */
+		if (ice_ctx)
+			*ice_desc = cpu_to_le64(ice_ctx);
+	}
+}
+
 static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -711,6 +781,7 @@
 	u32 tag = mrq->cmdq_req->tag;
 	struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
 	struct sdhci_host *host = mmc_priv(mmc);
+	u64 ice_ctx = 0;
 
 	if (!cq_host->enabled) {
 		pr_err("%s: CMDQ host not enabled yet !!!\n",
@@ -730,7 +801,7 @@
 	}
 
 	if (cq_host->ops->crypto_cfg) {
-		err = cq_host->ops->crypto_cfg(mmc, mrq, tag);
+		err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx);
 		if (err) {
 			pr_err("%s: failed to configure crypto: err %d tag %d\n",
 					mmc_hostname(mmc), err, tag);
@@ -743,6 +814,9 @@
 	cmdq_prep_task_desc(mrq, &data, 1,
 			    (mrq->cmdq_req->cmdq_req_flags & QBR));
 	*task_desc = cpu_to_le64(data);
+
+	cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx);
+
 	cmdq_log_task_desc_history(cq_host, *task_desc, false);
 
 	err = cmdq_prep_tran_desc(mrq, cq_host, tag);
@@ -777,17 +851,31 @@
 {
 	struct mmc_request *mrq;
 	struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+	int offset = 0;
+	int err = 0;
 
+	if (cq_host->offset_changed)
+		offset = CQ_V5_VENDOR_CFG;
 	mrq = get_req_by_tag(cq_host, tag);
 	if (tag == cq_host->dcmd_slot)
 		mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
 
 	if (mrq->cmdq_req->cmdq_req_flags & DCMD)
-		cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
-			    CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
+		cmdq_writel(cq_host,
+			cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
+			CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
 
 	cmdq_runtime_pm_put(cq_host);
-	if (cq_host->ops->crypto_cfg_reset)
+
+	if (cq_host->ops->crypto_cfg_end) {
+		err = cq_host->ops->crypto_cfg_end(mmc, mrq);
+		if (err) {
+			pr_err("%s: failed to end ice config: err %d tag %d\n",
+					mmc_hostname(mmc), err, tag);
+		}
+	}
+	if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
+			cq_host->ops->crypto_cfg_reset)
 		cq_host->ops->crypto_cfg_reset(mmc, tag);
 	mrq->done(mrq);
 }
@@ -801,6 +889,8 @@
 	struct mmc_request *mrq;
 	int ret;
 	u32 dbr_set = 0;
+	u32 dev_pend_set = 0;
+	int stat_err = 0;
 
 	status = cmdq_readl(cq_host, CQIS);
 
@@ -809,7 +899,9 @@
 	MMC_TRACE(mmc, "%s: CQIS: 0x%x err: %d\n",
 		__func__, status, err);
 
-	if (err || (status & CQIS_RED)) {
+	stat_err = status & (CQIS_RED | CQIS_GCE | CQIS_ICCE);
+
+	if (err || stat_err) {
 		err_info = cmdq_readl(cq_host, CQTERRI);
 		pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
 		       mmc_hostname(mmc), err, status, err_info);
@@ -912,7 +1004,7 @@
 		 * CQE detected a response error from device
 		 * In most cases, this would require a reset.
 		 */
-		if (status & CQIS_RED) {
+		if (stat_err & CQIS_RED) {
 			/*
 			 * will check if the RED error is due to a bkops
 			 * exception once the queue is empty
@@ -931,6 +1023,62 @@
 			mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
 		}
 
+		/*
+		 * Generic Crypto error detected by CQE.
+		 * Its a fatal, would require cmdq reset.
+		 */
+		if (stat_err & CQIS_GCE) {
+			if (mrq->data)
+				mrq->data->error = -EIO;
+			pr_err("%s: Crypto generic error while processing task %lu!",
+				mmc_hostname(mmc), tag);
+			MMC_TRACE(mmc, "%s: GCE error detected with tag %lu\n",
+					__func__, tag);
+		}
+		/*
+		 * Invalid crypto config error detected by CQE, clear the task.
+		 * Task can be cleared only when CQE is halt state.
+		 */
+		if (stat_err & CQIS_ICCE) {
+			/*
+			 * Invalid Crypto Config Error is detected at the
+			 * beginning of the transfer before the actual execution
+			 * started. So just clear the task in CQE. No need to
+			 * clear in device. Only the task which caused ICCE has
+			 * to be cleared. Other tasks can be continue processing
+			 * The first task which is about to be prepared would
+			 * cause ICCE Error.
+			 */
+			dbr_set = cmdq_readl(cq_host, CQTDBR);
+			dev_pend_set = cmdq_readl(cq_host, CQDPT);
+			if (dbr_set ^ dev_pend_set)
+				tag = ffs(dbr_set ^ dev_pend_set) - 1;
+			mrq = get_req_by_tag(cq_host, tag);
+			pr_err("%s: Crypto config error while processing task %lu!",
+				mmc_hostname(mmc), tag);
+			MMC_TRACE(mmc, "%s: ICCE error with tag %lu\n",
+						__func__, tag);
+			if (mrq->data)
+				mrq->data->error = -EIO;
+			else if (mrq->cmd)
+				mrq->cmd->error = -EIO;
+			/*
+			 * If CQE is halted and tag is valid then clear the task
+			 * then un-halt CQE and set flag to skip error recovery.
+			 * If any of the condtions is not met thene it will
+			 * enter into default error recovery path.
+			 */
+			if (!ret && (dbr_set ^ dev_pend_set)) {
+				ret = cmdq_clear_task_poll(cq_host, tag);
+				if (ret) {
+					pr_err("%s: %s: task[%lu] clear failed ret=%d\n",
+						mmc_hostname(mmc),
+						__func__, tag, ret);
+				} else if (!cmdq_halt_poll(mmc, false)) {
+					mrq->cmdq_req->skip_err_handling = true;
+				}
+			}
+		}
 		cmdq_finish_data(mmc, tag);
 	} else {
 		cmdq_writel(cq_host, status, CQIS);
@@ -1001,6 +1149,7 @@
 			cq_host->ops->clear_set_irqs(mmc, true);
 		cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
 			    CQCTL);
+		mmc_host_clr_halt(mmc);
 		return 0;
 	}
 
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 8e9f765..1aabce9 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -18,11 +18,13 @@
 #define CQVER		0x00
 /* capabilities */
 #define CQCAP		0x04
+#define CQCAP_CS	(1 << 28)
 /* configuration */
 #define CQCFG		0x08
 #define CQ_DCMD		0x00001000
 #define CQ_TASK_DESC_SZ 0x00000100
 #define CQ_ENABLE	0x00000001
+#define CQ_ICE_ENABLE	0x00000002
 
 /* control */
 #define CQCTL		0x0C
@@ -35,6 +37,8 @@
 #define CQIS_TCC	(1 << 1)
 #define CQIS_RED	(1 << 2)
 #define CQIS_TCL	(1 << 3)
+#define CQIS_GCE	(1 << 4)
+#define CQIS_ICCE	(1 << 5)
 
 /* interrupt status enable */
 #define CQISTE		0x14
@@ -110,7 +114,7 @@
 /* command response argument */
 #define CQCRA		0x5C
 
-#define CQ_INT_ALL	0xF
+#define CQ_INT_ALL	0x3F
 #define CQIC_DEFAULT_ICCTH 31
 #define CQIC_DEFAULT_ICTOVAL 1
 
@@ -141,9 +145,17 @@
 #define DAT_ADDR_LO(x)	((x & 0xFFFFFFFF) << 32)
 #define DAT_ADDR_HI(x)	((x & 0xFFFFFFFF) << 0)
 
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG	0x900
 #define CQ_VENDOR_CFG	0x100
 #define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
 
+#define CQ_TASK_DESC_TASK_PARAMS_SIZE	8
+#define CQ_TASK_DESC_ICE_PARAMS_SIZE	8
+
 struct task_history {
 	u64 task;
 	bool is_dcmd;
@@ -161,6 +173,7 @@
 	u32 dcmd_slot;
 	u32 caps;
 #define CMDQ_TASK_DESC_SZ_128 0x1
+#define CMDQ_CAP_CRYPTO_SUPPORT 0x2
 
 	u32 quirks;
 #define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1
@@ -169,6 +182,7 @@
 	bool enabled;
 	bool halted;
 	bool init_done;
+	bool offset_changed;
 
 	u8 *desc_base;
 
@@ -209,7 +223,8 @@
 	int (*reset)(struct mmc_host *mmc);
 	void (*post_cqe_halt)(struct mmc_host *mmc);
 	int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
-				u32 slot);
+				u32 slot, u64 *ice_ctx);
+	int (*crypto_cfg_end)(struct mmc_host *mmc, struct mmc_request *mrq);
 	void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
 };
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index df478ae..f81f417 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2610,8 +2610,8 @@
 	host->slot[id] = slot;
 
 	mmc->ops = &dw_mci_ops;
-	if (of_property_read_u32_array(host->dev->of_node,
-				       "clock-freq-min-max", freq, 2)) {
+	if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
+					   freq, 2)) {
 		mmc->f_min = DW_MCI_FREQ_MIN;
 		mmc->f_max = DW_MCI_FREQ_MAX;
 	} else {
@@ -2709,7 +2709,6 @@
 {
 	int addr_config;
 	struct device *dev = host->dev;
-	struct device_node *np = dev->of_node;
 
 	/*
 	* Check tansfer mode from HCON[17:16]
@@ -2770,8 +2769,9 @@
 		dev_info(host->dev, "Using internal DMA controller.\n");
 	} else {
 		/* TRANS_MODE_EDMAC: check dma bindings again */
-		if ((of_property_count_strings(np, "dma-names") < 0) ||
-		    (!of_find_property(np, "dmas", NULL))) {
+		if ((device_property_read_string_array(dev, "dma-names",
+						       NULL, 0) < 0) ||
+		    !device_property_present(dev, "dmas")) {
 			goto no_dma;
 		}
 		host->dma_ops = &dw_mci_edmac_ops;
@@ -2931,7 +2931,6 @@
 {
 	struct dw_mci_board *pdata;
 	struct device *dev = host->dev;
-	struct device_node *np = dev->of_node;
 	const struct dw_mci_drv_data *drv_data = host->drv_data;
 	int ret;
 	u32 clock_frequency;
@@ -2948,15 +2947,16 @@
 	}
 
 	/* find out number of slots supported */
-	of_property_read_u32(np, "num-slots", &pdata->num_slots);
+	device_property_read_u32(dev, "num-slots", &pdata->num_slots);
 
-	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
+	if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
 		dev_info(dev,
 			 "fifo-depth property not found, using value of FIFOTH register as default\n");
 
-	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
+	device_property_read_u32(dev, "card-detect-delay",
+				 &pdata->detect_delay_ms);
 
-	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
+	if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
 		pdata->bus_hz = clock_frequency;
 
 	if (drv_data && drv_data->parse_dt) {
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index 2cb13bd..f86ce5b 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -276,6 +276,58 @@
 	mb();
 }
 
+static inline
+void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass,
+				short key_index, u64 *ice_ctx)
+{
+	/*
+	 * The naming convention got changed between ICE2.0 and ICE3.0
+	 * registers fields. Below is the equivalent names for
+	 * ICE3.0 Vs ICE2.0:
+	 *   Data Unit Number(DUN) == Logical Base address(LBA)
+	 *   Crypto Configuration index (CCI) == Key Index
+	 *   Crypto Enable (CE) == !BYPASS
+	 */
+	if (ice_ctx)
+		*ice_ctx = DATA_UNIT_NUM(dun) |
+			CRYPTO_CONFIG_INDEX(key_index) |
+			CRYPTO_ENABLE(!bypass);
+}
+
+static
+void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host,
+		u64 dun, unsigned int bypass, short key_index)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned int crypto_params = 0;
+	/*
+	 * The naming convention got changed between ICE2.0 and ICE3.0
+	 * registers fields. Below is the equivalent names for
+	 * ICE3.0 Vs ICE2.0:
+	 *   Data Unit Number(DUN) == Logical Base address(LBA)
+	 *   Crypto Configuration index (CCI) == Key Index
+	 *   Crypto Enable (CE) == !BYPASS
+	 */
+	/* Configure ICE bypass mode */
+	crypto_params |=
+		(!bypass & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE)
+			<< OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE;
+	/* Configure Crypto Configure Index (CCI) */
+	crypto_params |= (key_index &
+			 MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI)
+			 << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI;
+
+	writel_relaxed((crypto_params & 0xFFFFFFFF),
+		msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS);
+
+	/* Update DUN */
+	writel_relaxed((dun & 0xFFFFFFFF),
+		msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN);
+	/* Ensure ICE registers are configured before issuing SDHCI request */
+	mb();
+}
+
 int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
 			u32 slot)
 {
@@ -308,7 +360,14 @@
 				slot, bypass, key_index);
 	}
 
-	sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+	if (msm_host->ice_hci_support) {
+		/* For ICE HCI / ICE3.0 */
+		sdhci_msm_ice_hci_update_noncq_cfg(host, lba, bypass,
+						key_index);
+	} else {
+		/* For ICE versions earlier to ICE3.0 */
+		sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+	}
 	return 0;
 }
 
@@ -318,7 +377,7 @@
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
 	int err = 0;
-	short key_index;
+	short key_index = 0;
 	sector_t lba = 0;
 	unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
 	struct request *req;
@@ -344,7 +403,45 @@
 				slot, bypass, key_index);
 	}
 
-	sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+	if (msm_host->ice_hci_support) {
+		/* For ICE HCI / ICE3.0 */
+		sdhci_msm_ice_hci_update_cmdq_cfg(lba, bypass, key_index,
+						ice_ctx);
+	} else {
+		/* For ICE versions earlier to ICE3.0 */
+		sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+	}
+	return 0;
+}
+
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int err = 0;
+	struct request *req;
+
+	if (!host->is_crypto_en)
+		return 0;
+
+	if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+		pr_err("%s: ice is in invalid state %d\n",
+			mmc_hostname(host->mmc), msm_host->ice.state);
+		return -EINVAL;
+	}
+
+	req = mrq->req;
+	if (req) {
+		if (msm_host->ice.vops->config_end) {
+			err = msm_host->ice.vops->config_end(req);
+			if (err) {
+				pr_err("%s: ice config end failed %d\n",
+						mmc_hostname(host->mmc), err);
+				return err;
+			}
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
index 4c813a0..6296174 100644
--- a/drivers/mmc/host/sdhci-msm-ice.h
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -42,6 +42,8 @@
 #define ICE_HCI_SUPPORT		(1 << 28)
 #define ICE_CQ_CONFIG		0x08
 #define CRYPTO_GENERAL_ENABLE	(1 << 1)
+#define ICE_NONCQ_CRYPTO_PARAMS	0x70
+#define ICE_NONCQ_CRYPTO_DUN	0x74
 
 /* ICE3.0 register which got added hc reg space */
 #define HC_VENDOR_SPECIFIC_FUNC4	0x260
@@ -52,8 +54,10 @@
 /* SDHCI MSM ICE CTRL Info register offset */
 enum {
 	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS     = 0,
-	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 0x1,
-	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 0x6,
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 1,
+	OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 6,
+	OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI	  = 0,
+	OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE	  = 8,
 };
 
 /* SDHCI MSM ICE CTRL Info register masks */
@@ -61,6 +65,8 @@
 	MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS     = 0x1,
 	MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX  = 0x1F,
 	MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU        = 0x7,
+	MASK_SDHCI_MSM_ICE_HCI_PARAM_CE		= 0x1,
+	MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI	= 0xff
 };
 
 /* SDHCI MSM ICE encryption/decryption bypass state */
@@ -101,6 +107,7 @@
 			u32 slot);
 int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
 			struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq);
 int sdhci_msm_ice_reset(struct sdhci_host *host);
 int sdhci_msm_ice_resume(struct sdhci_host *host);
 int sdhci_msm_ice_suspend(struct sdhci_host *host);
@@ -137,6 +144,11 @@
 {
 	return 0;
 }
+static inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host,
+			struct mmc_request *mrq)
+{
+	return 0;
+}
 inline int sdhci_msm_ice_reset(struct sdhci_host *host)
 {
 	return 0;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 726da8f..55d9cf5 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1902,8 +1902,16 @@
 			dev_err(dev, "Invalid clock table\n");
 			goto out;
 		}
+		if (ice_clk_table_len != 2) {
+			dev_err(dev, "Need max and min frequencies in the table\n");
+			goto out;
+		}
 		pdata->sup_ice_clk_table = ice_clk_table;
 		pdata->sup_ice_clk_cnt = ice_clk_table_len;
+		pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
+		pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
+		dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
+				pdata->ice_clk_max, pdata->ice_clk_min);
 	}
 
 	pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
@@ -3408,6 +3416,8 @@
 	/* registers offset changed starting from 4.2.0 */
 	int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
 
+	if (cq_host->offset_changed)
+		offset += CQ_V5_VENDOR_CFG;
 	pr_err("---- Debug RAM dump ----\n");
 	pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
 	       cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
@@ -4113,9 +4123,36 @@
 	return max_curr;
 }
 
+static int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int ret = 0;
+	u32 clk_rate = 0;
+
+	if (!IS_ERR(msm_host->ice_clk)) {
+		clk_rate = (state == MMC_LOAD_LOW) ?
+			msm_host->pdata->ice_clk_min :
+			msm_host->pdata->ice_clk_max;
+		if (msm_host->ice_clk_rate == clk_rate)
+			return 0;
+		pr_debug("%s: changing ICE clk rate to %u\n",
+				mmc_hostname(host->mmc), clk_rate);
+		ret = clk_set_rate(msm_host->ice_clk, clk_rate);
+		if (ret) {
+			pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
+				mmc_hostname(host->mmc), ret, clk_rate);
+			return ret;
+		}
+		msm_host->ice_clk_rate = clk_rate;
+	}
+	return 0;
+}
+
 static struct sdhci_ops sdhci_msm_ops = {
 	.crypto_engine_cfg = sdhci_msm_ice_cfg,
 	.crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
+	.crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
 	.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
 	.crypto_engine_reset = sdhci_msm_ice_reset,
 	.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
@@ -4139,6 +4176,7 @@
 	.pre_req = sdhci_msm_pre_req,
 	.post_req = sdhci_msm_post_req,
 	.get_current_limit = sdhci_msm_get_current_limit,
+	.notify_load = sdhci_msm_notify_load,
 };
 
 static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
@@ -4351,7 +4389,7 @@
 		 */
 		dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
 			__func__, ret);
-		goto out_host_free;
+		goto pltfm_free;
 
 	} else if (ret == -ENODEV) {
 		/*
@@ -4363,7 +4401,7 @@
 	} else if (ret) {
 		dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
 			__func__, ret);
-		goto out_host_free;
+		goto pltfm_free;
 	}
 
 	/* Extract platform data */
@@ -4436,11 +4474,11 @@
 		if (!IS_ERR(msm_host->ice_clk)) {
 			/* ICE core has only one clock frequency for now */
 			ret = clk_set_rate(msm_host->ice_clk,
-					msm_host->pdata->sup_ice_clk_table[0]);
+					msm_host->pdata->ice_clk_max);
 			if (ret) {
 				dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
 					ret,
-					msm_host->pdata->sup_ice_clk_table[0]);
+					msm_host->pdata->ice_clk_max);
 				goto pclk_disable;
 			}
 			ret = clk_prepare_enable(msm_host->ice_clk);
@@ -4448,7 +4486,7 @@
 				goto pclk_disable;
 
 			msm_host->ice_clk_rate =
-				msm_host->pdata->sup_clk_table[0];
+				msm_host->pdata->ice_clk_max;
 		}
 	}
 
@@ -4950,8 +4988,6 @@
 		if (msm_host->msm_bus_vote.client_handle)
 			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
 	}
-	trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
-			ktime_to_us(ktime_sub(ktime_get(), start)));
 
 	if (host->is_crypto_en) {
 		ret = sdhci_msm_ice_suspend(host);
@@ -4959,6 +4995,8 @@
 			pr_err("%s: failed to suspend crypto engine %d\n",
 					mmc_hostname(host->mmc), ret);
 	}
+	trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
+			ktime_to_us(ktime_sub(ktime_get(), start)));
 	return 0;
 }
 
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index cdbaaa9..7c0b873 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -151,6 +151,8 @@
 	unsigned char sup_ice_clk_cnt;
 	struct sdhci_msm_pm_qos_data pm_qos_data;
 	bool sdr104_wa;
+	u32 ice_clk_max;
+	u32 ice_clk_min;
 };
 
 struct sdhci_msm_bus_vote {
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index a8b430f..83b84ff 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -31,6 +31,7 @@
 
 #define SDMMC_MC1R	0x204
 #define		SDMMC_MC1R_DDR		BIT(3)
+#define		SDMMC_MC1R_FCD		BIT(7)
 #define SDMMC_CACR	0x230
 #define		SDMMC_CACR_CAPWREN	BIT(0)
 #define		SDMMC_CACR_KEY		(0x46 << 8)
@@ -43,6 +44,15 @@
 	struct clk *mainck;
 };
 
+static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
+{
+	u8 mc1r;
+
+	mc1r = readb(host->ioaddr + SDMMC_MC1R);
+	mc1r |= SDMMC_MC1R_FCD;
+	writeb(mc1r, host->ioaddr + SDMMC_MC1R);
+}
+
 static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
 {
 	u16 clk;
@@ -112,10 +122,18 @@
 	sdhci_set_uhs_signaling(host, timing);
 }
 
+static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
+{
+	sdhci_reset(host, mask);
+
+	if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+		sdhci_at91_set_force_card_detect(host);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
 	.set_clock		= sdhci_at91_set_clock,
 	.set_bus_width		= sdhci_set_bus_width,
-	.reset			= sdhci_reset,
+	.reset			= sdhci_at91_reset,
 	.set_uhs_signaling	= sdhci_at91_set_uhs_signaling,
 	.set_power		= sdhci_at91_set_power,
 };
@@ -322,6 +340,21 @@
 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 	}
 
+	/*
+	 * If the device attached to the MMC bus is not removable, it is safer
+	 * to set the Force Card Detect bit. People often don't connect the
+	 * card detect signal and use this pin for another purpose. If the card
+	 * detect pin is not muxed to SDHCI controller, a default value is
+	 * used. This value can be different from a SoC revision to another
+	 * one. Problems come when this default value is not card present. To
+	 * avoid this case, if the device is non removable then the card
+	 * detection procedure using the SDMCC_CD signal is bypassed.
+	 * This bit is reset when a software reset for all command is performed
+	 * so we need to implement our own reset function to set back this bit.
+	 */
+	if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+		sdhci_at91_set_force_card_detect(host);
+
 	pm_runtime_put_autosuspend(&pdev->dev);
 
 	return 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bf32a87..4476e51 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1785,6 +1785,22 @@
 	return err;
 }
 
+static int sdhci_crypto_cfg_end(struct sdhci_host *host,
+				struct mmc_request *mrq)
+{
+	int err = 0;
+
+	if (host->ops->crypto_engine_cfg_end) {
+		err = host->ops->crypto_engine_cfg_end(host, mrq);
+		if (err) {
+			pr_err("%s: failed to configure crypto\n",
+					mmc_hostname(host->mmc));
+			return err;
+		}
+	}
+	return 0;
+}
+
 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sdhci_host *host;
@@ -2876,6 +2892,7 @@
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
 
+	sdhci_crypto_cfg_end(host, mrq);
 	mmc_request_done(host->mmc, mrq);
 
 	return false;
@@ -3775,7 +3792,7 @@
 	sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
 }
 static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
-		struct mmc_request *mrq, u32 slot)
+		struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
 	int err = 0;
@@ -3794,7 +3811,8 @@
 	}
 
 	if (host->ops->crypto_engine_cmdq_cfg) {
-		err = host->ops->crypto_engine_cmdq_cfg(host, mrq, slot, NULL);
+		err = host->ops->crypto_engine_cmdq_cfg(host, mrq,
+				slot, ice_ctx);
 		if (err) {
 			pr_err("%s: failed to configure crypto\n",
 					mmc_hostname(host->mmc));
@@ -3805,6 +3823,17 @@
 	return err;
 }
 
+static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
+					struct mmc_request *mrq)
+{
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	if (!host->is_crypto_en)
+		return 0;
+
+	return sdhci_crypto_cfg_end(host, mrq);
+}
+
 static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
 {
 	struct sdhci_host *host = mmc_priv(mmc);
@@ -3862,7 +3891,13 @@
 }
 
 static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
-		struct mmc_request *mrq, u32 slot)
+		struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+	return 0;
+}
+
+static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
+				struct mmc_request *mrq)
 {
 	return 0;
 }
@@ -3883,6 +3918,7 @@
 	.post_cqe_halt = sdhci_cmdq_post_cqe_halt,
 	.set_transfer_params = sdhci_cmdq_set_transfer_params,
 	.crypto_cfg	= sdhci_cmdq_crypto_cfg,
+	.crypto_cfg_end	= sdhci_cmdq_crypto_cfg_end,
 	.crypto_cfg_reset	= sdhci_cmdq_crypto_cfg_reset,
 };
 
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a463e45..2b67d0a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -673,6 +673,8 @@
 				struct mmc_request *mrq, u32 slot);
 	int	(*crypto_engine_cmdq_cfg)(struct sdhci_host *host,
 			struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+	int	(*crypto_engine_cfg_end)(struct sdhci_host *host,
+					struct mmc_request *mrq);
 	int	(*crypto_engine_reset)(struct sdhci_host *host);
 	void	(*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
 	void	(*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index d7f724b..0c84ee8 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -877,6 +877,8 @@
 	}
 }
 
+#define MXC_V1_ECCBYTES		5
+
 static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
 				struct mtd_oob_region *oobregion)
 {
@@ -886,7 +888,7 @@
 		return -ERANGE;
 
 	oobregion->offset = (section * 16) + 6;
-	oobregion->length = nand_chip->ecc.bytes;
+	oobregion->length = MXC_V1_ECCBYTES;
 
 	return 0;
 }
@@ -908,8 +910,7 @@
 			oobregion->length = 4;
 		}
 	} else {
-		oobregion->offset = ((section - 1) * 16) +
-				    nand_chip->ecc.bytes + 6;
+		oobregion->offset = ((section - 1) * 16) + MXC_V1_ECCBYTES + 6;
 		if (section < nand_chip->ecc.steps)
 			oobregion->length = (section * 16) + 6 -
 					    oobregion->offset;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index f222f8a..31a6ee3 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -64,8 +64,14 @@
 
 	if (!section) {
 		oobregion->offset = 0;
-		oobregion->length = 4;
+		if (mtd->oobsize == 16)
+			oobregion->length = 4;
+		else
+			oobregion->length = 3;
 	} else {
+		if (mtd->oobsize == 8)
+			return -ERANGE;
+
 		oobregion->offset = 6;
 		oobregion->length = ecc->total - 4;
 	}
@@ -1081,7 +1087,9 @@
 	 * Ensure the timing mode has been changed on the chip side
 	 * before changing timings on the controller side.
 	 */
-	if (chip->onfi_version) {
+	if (chip->onfi_version &&
+	    (le16_to_cpu(chip->onfi_params.opt_cmd) &
+	     ONFI_OPT_CMD_SET_GET_FEATURES)) {
 		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
 			chip->onfi_timing_mode_default,
 		};
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 57d483a..6f0fd15 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -109,7 +109,11 @@
 #define	READ_ADDR			0
 
 /* NAND_DEV_CMD_VLD bits */
-#define	READ_START_VLD			0
+#define	READ_START_VLD			BIT(0)
+#define	READ_STOP_VLD			BIT(1)
+#define	WRITE_START_VLD			BIT(2)
+#define	ERASE_START_VLD			BIT(3)
+#define	SEQ_READ_START_VLD		BIT(4)
 
 /* NAND_EBI2_ECC_BUF_CFG bits */
 #define	NUM_STEPS			0
@@ -148,6 +152,10 @@
 #define	FETCH_ID			0xb
 #define	RESET_DEVICE			0xd
 
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL		(READ_START_VLD | WRITE_START_VLD | \
+					 ERASE_START_VLD | SEQ_READ_START_VLD)
+
 /*
  * the NAND controller performs reads/writes with ECC in 516 byte chunks.
  * the driver calls the chunks 'step' or 'codeword' interchangeably
@@ -672,8 +680,7 @@
 
 	/* configure CMD1 and VLD for ONFI param probing */
 	nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
-		      (nandc->vld & ~(1 << READ_START_VLD))
-		      | 0 << READ_START_VLD);
+		      (nandc->vld & ~READ_START_VLD));
 	nandc_set_reg(nandc, NAND_DEV_CMD1,
 		      (nandc->cmd1 & ~(0xFF << READ_ADDR))
 		      | NAND_CMD_PARAM << READ_ADDR);
@@ -1893,7 +1900,7 @@
 				| wide_bus << WIDE_FLASH
 				| 1 << DEV0_CFG1_ECC_DISABLE;
 
-	host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
+	host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
 				| 0 << ECC_SW_RESET
 				| host->cw_data << ECC_NUM_DATA_BYTES
 				| 1 << ECC_FORCE_CLK_OPEN
@@ -1972,13 +1979,14 @@
 {
 	/* kill onenand */
 	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+	nandc_write(nandc, NAND_DEV_CMD_VLD, NAND_DEV_CMD_VLD_VAL);
 
 	/* enable ADM DMA */
 	nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
 
 	/* save the original values of these registers */
 	nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
-	nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
+	nandc->vld = NAND_DEV_CMD_VLD_VAL;
 
 	return 0;
 }
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 947adda..3ec573c 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1558,6 +1558,7 @@
 		.dev_name = "BCM53125",
 		.vlans = 4096,
 		.enabled_ports = 0xff,
+		.arl_entries = 4,
 		.cpu_port = B53_CPU_PORT,
 		.vta_regs = B53_VTA_REGS,
 		.duplex_reg = B53_DUPLEX_STAT_GE,
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index e078d8d..29d29af 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -609,7 +609,7 @@
 		mac_mode |= HALF_DUPLEX;
 
 	if (gigabit) {
-		if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+		if (phy_interface_is_rgmii(dev->phydev))
 			mac_mode |= RGMII_MODE;
 
 		mac_mode |= GMAC_MODE;
@@ -1277,11 +1277,10 @@
 		break;
 
 	case PHY_INTERFACE_MODE_RGMII:
-		pad_mode = PAD_MODE_RGMII;
-		break;
-
+	case PHY_INTERFACE_MODE_RGMII_ID:
+	case PHY_INTERFACE_MODE_RGMII_RXID:
 	case PHY_INTERFACE_MODE_RGMII_TXID:
-		pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+		pad_mode = PAD_MODE_RGMII;
 		break;
 
 	default:
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a927a73..edae2dc 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8720,11 +8720,14 @@
 	tg3_mem_rx_release(tp);
 	tg3_mem_tx_release(tp);
 
+	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+	tg3_full_lock(tp, 0);
 	if (tp->hw_stats) {
 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
 				  tp->hw_stats, tp->stats_mapping);
 		tp->hw_stats = NULL;
 	}
+	tg3_full_unlock(tp);
 }
 
 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e813951..9e073fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -317,12 +317,12 @@
 
 	if (v != MBOX_OWNER_DRV) {
 		ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
-		t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+		t4_record_mbox(adap, cmd, size, access, ret);
 		return ret;
 	}
 
 	/* Copy in the new mailbox command and send it on its way ... */
-	t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
+	t4_record_mbox(adap, cmd, size, access, 0);
 	for (i = 0; i < size; i += 8)
 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 
@@ -371,7 +371,7 @@
 	}
 
 	ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
-	t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+	t4_record_mbox(adap, cmd, size, access, ret);
 	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
 		*(const u8 *)cmd, mbox);
 	t4_report_fw_error(adap);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 736db9d..81021f8 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -622,6 +622,9 @@
 		goto no_mem;
 	}
 
+	pdev->dev.of_node = node;
+	pdev->dev.parent = priv->dev;
+
 	ret = platform_device_add_data(pdev, &data, sizeof(data));
 	if (ret)
 		goto err;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3f4e711..fd20688 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3690,7 +3690,7 @@
 		u32 tempval1 = gfar_read(&regs->maccfg1);
 		u32 tempval = gfar_read(&regs->maccfg2);
 		u32 ecntrl = gfar_read(&regs->ecntrl);
-		u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
 
 		if (phydev->duplex != priv->oldduplex) {
 			if (!(phydev->duplex))
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5d48458..bcbb80f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -724,16 +724,21 @@
  * header, the HW adds it. To address that, we are subtracting the pseudo
  * header checksum from the checksum value provided by the HW.
  */
-static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
-				struct iphdr *iph)
+static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
+			       struct iphdr *iph)
 {
 	__u16 length_for_csum = 0;
 	__wsum csum_pseudo_header = 0;
+	__u8 ipproto = iph->protocol;
+
+	if (unlikely(ipproto == IPPROTO_SCTP))
+		return -1;
 
 	length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
 	csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
-						length_for_csum, iph->protocol, 0);
+						length_for_csum, ipproto, 0);
 	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
@@ -744,17 +749,20 @@
 static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
 			       struct ipv6hdr *ipv6h)
 {
+	__u8 nexthdr = ipv6h->nexthdr;
 	__wsum csum_pseudo_hdr = 0;
 
-	if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
-		     ipv6h->nexthdr == IPPROTO_HOPOPTS))
+	if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
+		     nexthdr == IPPROTO_HOPOPTS ||
+		     nexthdr == IPPROTO_SCTP))
 		return -1;
-	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
 
 	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
 				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
 	csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
-	csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr));
+	csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
+				   (__force __wsum)htons(nexthdr));
 
 	skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
 	skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -777,11 +785,10 @@
 	}
 
 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
-		get_fixed_ipv4_csum(hw_checksum, skb, hdr);
+		return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
 #if IS_ENABLED(CONFIG_IPV6)
-	else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
-		if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
-			return -1;
+	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
+		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
 #endif
 	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 551786f..ba652d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -430,7 +430,7 @@
 		/* Virtual PCI function needs to determine UAR page size from
 		 * firmware. Only master PCI function can set the uar page size
 		 */
-		if (enable_4k_uar)
+		if (enable_4k_uar || !dev->persist->num_vfs)
 			dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
 		else
 			dev->uar_page_shift = PAGE_SHIFT;
@@ -2269,7 +2269,7 @@
 
 		dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
 
-		if (enable_4k_uar) {
+		if (enable_4k_uar || !dev->persist->num_vfs) {
 			init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
 						    PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
 			init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index cb45390..f7fabec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -770,6 +770,10 @@
 	mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 }
 
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+			      struct mlx5_cmd_msg *msg);
+
 static void cmd_work_handler(struct work_struct *work)
 {
 	struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -779,16 +783,27 @@
 	struct mlx5_cmd_layout *lay;
 	struct semaphore *sem;
 	unsigned long flags;
+	int alloc_ret;
 
 	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 	down(sem);
 	if (!ent->page_queue) {
-		ent->idx = alloc_ent(cmd);
-		if (ent->idx < 0) {
+		alloc_ret = alloc_ent(cmd);
+		if (alloc_ret < 0) {
+			if (ent->callback) {
+				ent->callback(-EAGAIN, ent->context);
+				mlx5_free_cmd_msg(dev, ent->out);
+				free_msg(dev, ent->in);
+				free_cmd(ent);
+			} else {
+				ent->ret = -EAGAIN;
+				complete(&ent->done);
+			}
 			mlx5_core_err(dev, "failed to allocate command entry\n");
 			up(sem);
 			return;
 		}
+		ent->idx = alloc_ret;
 	} else {
 		ent->idx = cmd->max_reg_cmds;
 		spin_lock_irqsave(&cmd->alloc_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 13dc388..1612ec0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -62,12 +62,14 @@
 	struct delayed_work *dwork = to_delayed_work(work);
 	struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
 						   overflow_work);
+	struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
 	unsigned long flags;
 
 	write_lock_irqsave(&tstamp->lock, flags);
 	timecounter_read(&tstamp->clock);
 	write_unlock_irqrestore(&tstamp->lock, flags);
-	schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
+	queue_delayed_work(priv->wq, &tstamp->overflow_work,
+			   msecs_to_jiffies(tstamp->overflow_period * 1000));
 }
 
 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -263,7 +265,7 @@
 
 	INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
 	if (tstamp->overflow_period)
-		schedule_delayed_work(&tstamp->overflow_work, 0);
+		queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
 	else
 		mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index e034dbc..cf070fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -276,7 +276,7 @@
 
 static bool outer_header_zero(u32 *match_criteria)
 {
-	int size = MLX5_ST_SZ_BYTES(fte_match_param);
+	int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
 					     outer_headers);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 6ffd5d2..52a3810 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -651,9 +651,14 @@
 	int vport;
 	int err;
 
+	/* disable PF RoCE so missed packets don't go through RoCE steering */
+	mlx5_dev_list_lock();
+	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	err = esw_create_offloads_fdb_table(esw, nvports);
 	if (err)
-		return err;
+		goto create_fdb_err;
 
 	err = esw_create_offloads_table(esw);
 	if (err)
@@ -673,11 +678,6 @@
 			goto err_reps;
 	}
 
-	/* disable PF RoCE so missed packets don't go through RoCE steering */
-	mlx5_dev_list_lock();
-	mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-	mlx5_dev_list_unlock();
-
 	return 0;
 
 err_reps:
@@ -694,6 +694,13 @@
 
 create_ft_err:
 	esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+	/* enable back PF RoCE */
+	mlx5_dev_list_lock();
+	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	return err;
 }
 
@@ -701,11 +708,6 @@
 {
 	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
-	/* enable back PF RoCE */
-	mlx5_dev_list_lock();
-	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-	mlx5_dev_list_unlock();
-
 	mlx5_eswitch_disable_sriov(esw);
 	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
 	if (err) {
@@ -715,6 +717,11 @@
 			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
 	}
 
+	/* enable back PF RoCE */
+	mlx5_dev_list_lock();
+	mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+	mlx5_dev_list_unlock();
+
 	return err;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b5d5519..0ca4623 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -157,22 +157,17 @@
 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
 					   u8 *port1, u8 *port2)
 {
-	if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
-		if (tracker->netdev_state[0].tx_enabled) {
-			*port1 = 1;
-			*port2 = 1;
-		} else {
-			*port1 = 2;
-			*port2 = 2;
-		}
-	} else {
-		*port1 = 1;
-		*port2 = 2;
-		if (!tracker->netdev_state[0].link_up)
-			*port1 = 2;
-		else if (!tracker->netdev_state[1].link_up)
-			*port2 = 1;
+	*port1 = 1;
+	*port2 = 2;
+	if (!tracker->netdev_state[0].tx_enabled ||
+	    !tracker->netdev_state[0].link_up) {
+		*port1 = 2;
+		return;
 	}
+
+	if (!tracker->netdev_state[1].tx_enabled ||
+	    !tracker->netdev_state[1].link_up)
+		*port2 = 1;
 }
 
 static void mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f902c4d..1806b1f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4172,6 +4172,8 @@
 			return -EINVAL;
 		if (!info->linking)
 			break;
+		if (netdev_has_any_upper_dev(upper_dev))
+			return -EINVAL;
 		/* HW limitation forbids to put ports to multiple bridges. */
 		if (netif_is_bridge_master(upper_dev) &&
 		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
@@ -4185,6 +4187,10 @@
 		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
 		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
 			return -EINVAL;
+		if (!info->linking)
+			break;
+		if (netdev_has_any_upper_dev(upper_dev))
+			return -EINVAL;
 		break;
 	case NETDEV_CHANGEUPPER:
 		upper_dev = info->upper_dev;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index aee3fd2..4ca82bd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -871,8 +871,7 @@
 	return NETDEV_TX_OK;
 
 err_unmap:
-	--f;
-	while (f >= 0) {
+	while (--f >= 0) {
 		frag = &skb_shinfo(skb)->frags[f];
 		dma_unmap_page(&nn->pdev->dev,
 			       tx_ring->txbufs[wr_idx].dma_addr,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 829be21..be258d9 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@
 	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
 	seg_hdr->segNum = seg_number;
 	seg_hdr->segSize = seg_size;
-	memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+	strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
 }
 
 /*
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 12be259..2140ded 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -574,6 +574,7 @@
 	.rpadir_value   = 2 << 16,
 	.no_trimd	= 1,
 	.no_ade		= 1,
+	.hw_crc		= 1,
 	.tsu		= 1,
 	.select_mii	= 1,
 	.shift_rd0	= 1,
@@ -802,7 +803,7 @@
 
 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -832,7 +833,7 @@
 
 	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
 	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ff038e5..36a04e1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1084,7 +1084,12 @@
 	bool notify = false, reschedule = false;
 	unsigned long flags, next_reconfig, delay;
 
-	rtnl_lock();
+	/* if changes are happening, comeback later */
+	if (!rtnl_trylock()) {
+		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+		return;
+	}
+
 	if (ndev_ctx->start_remove)
 		goto out_unlock;
 
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index bca6a1e..e1bb802 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -141,9 +141,19 @@
 static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
 {
 	struct usb_device *dev = mcs->usbdev;
-	int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
-				  MCS_RD_RTYPE, 0, reg, val, 2,
-				  msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+	void *dmabuf;
+	int ret;
+
+	dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+	if (!dmabuf)
+		return -ENOMEM;
+
+	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+			      MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+			      msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+	memcpy(val, dmabuf, sizeof(__u16));
+	kfree(dmabuf);
 
 	return ret;
 }
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index a5d66e2..2caac0c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3510,6 +3510,7 @@
 module_exit(macsec_exit);
 
 MODULE_ALIAS_RTNL_LINK("macsec");
+MODULE_ALIAS_GENL_FAMILY("macsec");
 
 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 4cad955..01cf094 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -29,6 +29,7 @@
 #define MII_DP83867_MICR	0x12
 #define MII_DP83867_ISR		0x13
 #define DP83867_CTRL		0x1f
+#define DP83867_CFG3		0x1e
 
 /* Extended Registers */
 #define DP83867_RGMIICTL	0x0032
@@ -90,6 +91,8 @@
 		micr_status |=
 			(MII_DP83867_MICR_AN_ERR_INT_EN |
 			MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+			MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+			MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
 			MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
 			MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
 
@@ -190,6 +193,13 @@
 				       DP83867_DEVADDR, delay);
 	}
 
+	/* Enable Interrupt output INT_OE in CFG3 register */
+	if (phy_interrupt_is_valid(phydev)) {
+		val = phy_read(phydev, DP83867_CFG3);
+		val |= BIT(7);
+		phy_write(phydev, DP83867_CFG3, val);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index edd30eb..6e12401 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1060,6 +1060,15 @@
 			if (old_link != phydev->link)
 				phydev->state = PHY_CHANGELINK;
 		}
+		/*
+		 * Failsafe: check that nobody set phydev->link=0 between two
+		 * poll cycles, otherwise we won't leave RUNNING state as long
+		 * as link remains down.
+		 */
+		if (!phydev->link && phydev->state == PHY_RUNNING) {
+			phydev->state = PHY_CHANGELINK;
+			phydev_err(phydev, "no link in PHY_RUNNING\n");
+		}
 		break;
 	case PHY_CHANGELINK:
 		err = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 9e7b783..bf02f8e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1714,6 +1714,8 @@
 {
 	struct phy_device *phydev = to_phy_device(dev);
 
+	cancel_delayed_work_sync(&phydev->state_queue);
+
 	mutex_lock(&phydev->lock);
 	phydev->state = PHY_DOWN;
 	mutex_unlock(&phydev->lock);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 5489c0e..96fa0e6 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -119,6 +119,7 @@
 	int		n_channels;	/* how many channels are attached 54 */
 	spinlock_t	rlock;		/* lock for receive side 58 */
 	spinlock_t	wlock;		/* lock for transmit side 5c */
+	int		*xmit_recursion __percpu; /* xmit recursion detect */
 	int		mru;		/* max receive unit 60 */
 	unsigned int	flags;		/* control bits 64 */
 	unsigned int	xstate;		/* transmit state bits 68 */
@@ -1024,6 +1025,7 @@
 	struct ppp *ppp = netdev_priv(dev);
 	int indx;
 	int err;
+	int cpu;
 
 	ppp->dev = dev;
 	ppp->ppp_net = src_net;
@@ -1038,6 +1040,15 @@
 	INIT_LIST_HEAD(&ppp->channels);
 	spin_lock_init(&ppp->rlock);
 	spin_lock_init(&ppp->wlock);
+
+	ppp->xmit_recursion = alloc_percpu(int);
+	if (!ppp->xmit_recursion) {
+		err = -ENOMEM;
+		goto err1;
+	}
+	for_each_possible_cpu(cpu)
+		(*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
+
 #ifdef CONFIG_PPP_MULTILINK
 	ppp->minseq = -1;
 	skb_queue_head_init(&ppp->mrq);
@@ -1049,11 +1060,15 @@
 
 	err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
 	if (err < 0)
-		return err;
+		goto err2;
 
 	conf->file->private_data = &ppp->file;
 
 	return 0;
+err2:
+	free_percpu(ppp->xmit_recursion);
+err1:
+	return err;
 }
 
 static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
@@ -1399,18 +1414,16 @@
 	ppp_xmit_unlock(ppp);
 }
 
-static DEFINE_PER_CPU(int, ppp_xmit_recursion);
-
 static void ppp_xmit_process(struct ppp *ppp)
 {
 	local_bh_disable();
 
-	if (unlikely(__this_cpu_read(ppp_xmit_recursion)))
+	if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
 		goto err;
 
-	__this_cpu_inc(ppp_xmit_recursion);
+	(*this_cpu_ptr(ppp->xmit_recursion))++;
 	__ppp_xmit_process(ppp);
-	__this_cpu_dec(ppp_xmit_recursion);
+	(*this_cpu_ptr(ppp->xmit_recursion))--;
 
 	local_bh_enable();
 
@@ -1901,23 +1914,23 @@
 	spin_unlock_bh(&pch->downl);
 	/* see if there is anything from the attached unit to be sent */
 	if (skb_queue_empty(&pch->file.xq)) {
-		read_lock_bh(&pch->upl);
 		ppp = pch->ppp;
 		if (ppp)
 			__ppp_xmit_process(ppp);
-		read_unlock_bh(&pch->upl);
 	}
 }
 
 static void ppp_channel_push(struct channel *pch)
 {
-	local_bh_disable();
-
-	__this_cpu_inc(ppp_xmit_recursion);
-	__ppp_channel_push(pch);
-	__this_cpu_dec(ppp_xmit_recursion);
-
-	local_bh_enable();
+	read_lock_bh(&pch->upl);
+	if (pch->ppp) {
+		(*this_cpu_ptr(pch->ppp->xmit_recursion))++;
+		__ppp_channel_push(pch);
+		(*this_cpu_ptr(pch->ppp->xmit_recursion))--;
+	} else {
+		__ppp_channel_push(pch);
+	}
+	read_unlock_bh(&pch->upl);
 }
 
 /*
@@ -3056,6 +3069,7 @@
 #endif /* CONFIG_PPP_FILTER */
 
 	kfree_skb(ppp->xmit_pending);
+	free_percpu(ppp->xmit_recursion);
 
 	free_netdev(ppp->dev);
 }
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 2f260c6..49a27dc 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -876,6 +876,7 @@
 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
+	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 972b5e2..366d3dc 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1852,6 +1852,12 @@
 		goto err_wmi_detach;
 	}
 
+	/* If firmware indicates Full Rx Reorder support it must be used in a
+	 * slightly different manner. Let HTT code know.
+	 */
+	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+						ar->wmi.svc_map));
+
 	status = ath10k_htt_rx_alloc(&ar->htt);
 	if (status) {
 		ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
@@ -1964,12 +1970,6 @@
 		}
 	}
 
-	/* If firmware indicates Full Rx Reorder support it must be used in a
-	 * slightly different manner. Let HTT code know.
-	 */
-	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
-						ar->wmi.svc_map));
-
 	status = ath10k_htt_rx_ring_refill(ar);
 	if (status) {
 		ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b7fe0af..63754ee 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -807,9 +807,15 @@
 					WLAN_STATUS_SUCCESS, GFP_KERNEL);
 		cfg80211_put_bss(ar->wiphy, bss);
 	} else if (vif->sme_state == SME_CONNECTED) {
+		struct cfg80211_roam_info roam_info = {
+			.bss = bss,
+			.req_ie = assoc_req_ie,
+			.req_ie_len = assoc_req_len,
+			.resp_ie = assoc_resp_ie,
+			.resp_ie_len = assoc_resp_len,
+		};
 		/* inform roam event to cfg80211 */
-		cfg80211_roamed_bss(vif->ndev, bss, assoc_req_ie, assoc_req_len,
-				    assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
+		cfg80211_roamed(vif->ndev, &roam_info, GFP_KERNEL);
 	}
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9552d2a..e2a459e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1802,9 +1802,13 @@
 
 	wil_dbg_pm(wil, "suspending\n");
 
+	mutex_lock(&wil->mutex);
 	wil_p2p_stop_discovery(wil);
 
+	mutex_lock(&wil->p2p_wdev_mutex);
 	wil_abort_scan(wil, true);
+	mutex_unlock(&wil->p2p_wdev_mutex);
+	mutex_unlock(&wil->mutex);
 
 out:
 	return rc;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 45a8081..831780a 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -242,12 +242,19 @@
 static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
 
 	wil_print_ring(s, "tx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, tx));
 	wil_print_ring(s, "rx", wil->csr + HOST_MBOX +
 		       offsetof(struct wil6210_mbox_ctl, rx));
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -265,15 +272,38 @@
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-	writel(val, (void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	writel_relaxed(val, (void __iomem *)d->offset);
+
 	wmb(); /* make sure write propagated to HW */
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-	*val = readl((void __iomem *)data);
+	struct wil_debugfs_iomem_data *d = (struct
+					    wil_debugfs_iomem_data *)data;
+	struct wil6210_priv *wil = d->wil;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	*val = readl_relaxed((void __iomem *)d->offset);
+
+	wil_pm_runtime_put(wil);
 
 	return 0;
 }
@@ -284,10 +314,21 @@
 static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
 						   umode_t mode,
 						   struct dentry *parent,
-						   void *value)
+						   void *value,
+						   struct wil6210_priv *wil)
 {
-	return debugfs_create_file(name, mode, parent, value,
-				   &fops_iomem_x32);
+	struct dentry *file;
+	struct wil_debugfs_iomem_data *data = &wil->dbg_data.data_arr[
+					      wil->dbg_data.iomem_data_count];
+
+	data->wil = wil;
+	data->offset = value;
+
+	file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
+	if (!IS_ERR_OR_NULL(file))
+		wil->dbg_data.iomem_data_count++;
+
+	return file;
 }
 
 static int wil_debugfs_ulong_set(void *data, u64 val)
@@ -346,7 +387,8 @@
 		case doff_io32:
 			f = wil_debugfs_create_iomem_x32(tbl[i].name,
 							 tbl[i].mode, dbg,
-							 base + tbl[i].off);
+							 base + tbl[i].off,
+							 wil);
 			break;
 		case doff_u8:
 			f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
@@ -475,13 +517,22 @@
 static int wil_memread_debugfs_show(struct seq_file *s, void *data)
 {
 	struct wil6210_priv *wil = s->private;
-	void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
+	void __iomem *a;
+	int ret;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
+	a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
 		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 }
 
@@ -502,10 +553,12 @@
 {
 	enum { max_count = 4096 };
 	struct wil_blob_wrapper *wil_blob = file->private_data;
+	struct wil6210_priv *wil = wil_blob->wil;
 	loff_t pos = *ppos;
 	size_t available = wil_blob->blob.size;
 	void *buf;
 	size_t ret;
+	int rc;
 
 	if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
 	    test_bit(wil_status_suspended, wil_blob->wil->status))
@@ -526,10 +579,19 @@
 	if (!buf)
 		return -ENOMEM;
 
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0) {
+		kfree(buf);
+		return rc;
+	}
+
 	wil_memcpy_fromio_32(buf, (const void __iomem *)
 			     wil_blob->blob.data + pos, count);
 
 	ret = copy_to_user(user_buf, buf, count);
+
+	wil_pm_runtime_put(wil);
+
 	kfree(buf);
 	if (ret == count)
 		return -EFAULT;
@@ -1786,6 +1848,13 @@
 	{},
 };
 
+static const int dbg_off_count = 4 * (ARRAY_SIZE(isr_off) - 1) +
+				ARRAY_SIZE(dbg_wil_regs) - 1 +
+				ARRAY_SIZE(pseudo_isr_off) - 1 +
+				ARRAY_SIZE(lgc_itr_cnt_off) - 1 +
+				ARRAY_SIZE(tx_itr_cnt_off) - 1 +
+				ARRAY_SIZE(rx_itr_cnt_off) - 1;
+
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
 	struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME,
@@ -1794,6 +1863,17 @@
 	if (IS_ERR_OR_NULL(dbg))
 		return -ENODEV;
 
+	wil->dbg_data.data_arr = kcalloc(dbg_off_count,
+					 sizeof(struct wil_debugfs_iomem_data),
+					 GFP_KERNEL);
+	if (!wil->dbg_data.data_arr) {
+		debugfs_remove_recursive(dbg);
+		wil->debug = NULL;
+		return -ENOMEM;
+	}
+
+	wil->dbg_data.iomem_data_count = 0;
+
 	wil_pmc_init(wil);
 
 	wil6210_debugfs_init_files(wil, dbg);
@@ -1818,6 +1898,8 @@
 	debugfs_remove_recursive(wil->debug);
 	wil->debug = NULL;
 
+	kfree(wil->dbg_data.data_arr);
+
 	/* free pmc memory without sending command to fw, as it will
 	 * be reset on the way down anyway
 	 */
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index adcfef4..66200f6 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -47,9 +47,14 @@
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
 	u32 tx_itr_en, tx_itr_val = 0;
 	u32 rx_itr_en, rx_itr_val = 0;
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
 
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
 	if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
 		tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
@@ -58,6 +63,8 @@
 	if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
 		rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
+	wil_pm_runtime_put(wil);
+
 	cp->tx_coalesce_usecs = tx_itr_val;
 	cp->rx_coalesce_usecs = rx_itr_val;
 	return 0;
@@ -67,6 +74,7 @@
 				       struct ethtool_coalesce *cp)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int ret;
 
 	wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
 		     cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
@@ -86,8 +94,15 @@
 
 	wil->tx_max_burst_duration = cp->tx_coalesce_usecs;
 	wil->rx_max_burst_duration = cp->rx_coalesce_usecs;
+
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	wil_configure_interrupt_moderation(wil);
 
+	wil_pm_runtime_put(wil);
+
 	return 0;
 
 out_bad:
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index e01acac..7a33792 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -124,24 +124,19 @@
 	return 0;
 }
 
-static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
-			     size_t size)
-{
-	wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, data, size, true);
-
-	return 0;
-}
-
 static int
-fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
-		       size_t size)
+fw_handle_comment(struct wil6210_priv *wil, const void *data,
+		  size_t size)
 {
 	const struct wil_fw_record_capabilities *rec = data;
 	size_t capa_size;
 
 	if (size < sizeof(*rec) ||
-	    le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+	    le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC) {
+		wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1,
+				data, size, true);
 		return 0;
+	}
 
 	capa_size = size - offsetof(struct wil_fw_record_capabilities,
 				    capabilities);
@@ -422,7 +417,7 @@
 	int (*parse_handler)(struct wil6210_priv *wil, const void *data,
 			     size_t size);
 } wil_fw_handlers[] = {
-	{wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+	{wil_fw_type_comment, fw_handle_comment, fw_handle_comment},
 	{wil_fw_type_data, fw_handle_data, fw_ignore_section},
 	{wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
 	/* wil_fw_type_action */
@@ -517,7 +512,7 @@
 
 	rc = request_firmware(&fw, name, wil_to_dev(wil));
 	if (rc) {
-		wil_err_fw(wil, "Failed to load firmware %s\n", name);
+		wil_err_fw(wil, "Failed to load firmware %s rc %d\n", name, rc);
 		return rc;
 	}
 	wil_dbg_fw(wil, "Loading <%s>, %zu bytes\n", name, fw->size);
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index f8d2c20..25bc439 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -221,6 +221,10 @@
 {
 	int ret;
 
+	ret = wil_pm_runtime_get(wil);
+	if (ret < 0)
+		return ret;
+
 	switch (cmd) {
 	case WIL_IOCTL_MEMIO:
 		ret = wil_ioc_memio_dword(wil, data);
@@ -233,9 +237,12 @@
 		break;
 	default:
 		wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
+		wil_pm_runtime_put(wil);
 		return -ENOIOCTLCMD;
 	}
 
+	wil_pm_runtime_put(wil);
+
 	wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
 	return ret;
 }
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index b91298d..c4faa2c 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -765,6 +765,8 @@
 	u8 retry_short;
 	int rc;
 
+	wil_refresh_fw_capabilities(wil);
+
 	rc = wmi_get_mgmt_retry(wil, &retry_short);
 	if (!rc) {
 		wiphy->retry_short = retry_short;
@@ -772,6 +774,25 @@
 	}
 }
 
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
+{
+	struct wiphy *wiphy = wil_to_wiphy(wil);
+
+	wil->keep_radio_on_during_sleep =
+		wil->platform_ops.keep_radio_on_during_sleep &&
+		wil->platform_ops.keep_radio_on_during_sleep(
+			wil->platform_handle) &&
+		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+		 wil->keep_radio_on_during_sleep);
+
+	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
+		wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	else
+		wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+}
+
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 {
 	le32_to_cpus(&r->base);
@@ -1080,14 +1101,14 @@
 			return rc;
 		}
 
+		wil_collect_fw_info(wil);
+
 		if (wil->ps_profile != WMI_PS_PROFILE_TYPE_DEFAULT)
 			wil_ps_update(wil, wil->ps_profile);
 
 		if (wil->tt_data_set)
 			wmi_set_tt_cfg(wil, &wil->tt_data);
 
-		wil_collect_fw_info(wil);
-
 		if (wil->platform_ops.notify) {
 			rc = wil->platform_ops.notify(wil->platform_handle,
 						      WIL_PLATFORM_EVT_FW_RDY);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index d80e7f4..40cd32a 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -26,6 +26,7 @@
 static int wil_open(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "open\n");
 
@@ -35,16 +36,29 @@
 		return -EINVAL;
 	}
 
-	return wil_up(wil);
+	rc = wil_pm_runtime_get(wil);
+	if (rc < 0)
+		return rc;
+
+	rc = wil_up(wil);
+	if (rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static int wil_stop(struct net_device *ndev)
 {
 	struct wil6210_priv *wil = ndev_to_wil(ndev);
+	int rc;
 
 	wil_dbg_misc(wil, "stop\n");
 
-	return wil_down(wil);
+	rc = wil_down(wil);
+	if (!rc)
+		wil_pm_runtime_put(wil);
+
+	return rc;
 }
 
 static int wil_change_mtu(struct net_device *ndev, int new_mtu)
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 42a5235..663e163 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -21,6 +21,7 @@
 #include <linux/suspend.h>
 #include "wil6210.h"
 #include <linux/rtnetlink.h>
+#include <linux/pm_runtime.h>
 
 static bool use_msi = true;
 module_param(use_msi, bool, 0444);
@@ -84,9 +85,7 @@
 
 	/* extract FW capabilities from file without loading the FW */
 	wil_request_firmware(wil, wil->wil_fw_name, false);
-
-	if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
-		wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+	wil_refresh_fw_capabilities(wil);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -289,15 +288,6 @@
 	wil_set_capabilities(wil);
 	wil6210_clear_irq(wil);
 
-	wil->keep_radio_on_during_sleep =
-		wil->platform_ops.keep_radio_on_during_sleep &&
-		wil->platform_ops.keep_radio_on_during_sleep(
-			wil->platform_handle) &&
-		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
-
-	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
-		 wil->keep_radio_on_during_sleep);
-
 	/* FW should raise IRQ when ready */
 	rc = wil_if_pcie_enable(wil);
 	if (rc) {
@@ -327,6 +317,8 @@
 	wil6210_debugfs_init(wil);
 	wil6210_sysfs_init(wil);
 
+	wil_pm_runtime_allow(wil);
+
 	return 0;
 
 bus_disable:
@@ -359,6 +351,8 @@
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
 
+	wil_pm_runtime_forbid(wil);
+
 	wil6210_sysfs_remove(wil);
 	wil6210_debugfs_remove(wil);
 	rtnl_lock();
@@ -488,10 +482,40 @@
 }
 #endif /* CONFIG_PM_SLEEP */
 
+static int wil6210_pm_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "Runtime idle\n");
+
+	return wil_can_suspend(wil, true);
+}
+
+static int wil6210_pm_runtime_resume(struct device *dev)
+{
+	return wil6210_resume(dev, true);
+}
+
+static int wil6210_pm_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 1;
+	}
+
+	return wil6210_suspend(dev, true);
+}
 #endif /* CONFIG_PM */
 
 static const struct dev_pm_ops wil6210_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+	SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
+			   wil6210_pm_runtime_resume,
+			   wil6210_pm_runtime_idle)
 };
 
 static struct pci_driver wil6210_driver = {
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 8f5d1b44..2ef2f34 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -16,15 +16,26 @@
 
 #include "wil6210.h"
 #include <linux/jiffies.h>
+#include <linux/pm_runtime.h>
+
+#define WIL6210_AUTOSUSPEND_DELAY_MS (1000)
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 {
 	int rc = 0;
 	struct wireless_dev *wdev = wil->wdev;
 	struct net_device *ndev = wil_to_ndev(wil);
+	bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+				 wil->fw_capabilities);
 
 	wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
 
+	if (wmi_only || debug_fw) {
+		wil_dbg_pm(wil, "Deny any suspend - %s mode\n",
+			   wmi_only ? "wmi_only" : "debug_fw");
+		rc = -EPERM;
+		goto out;
+	}
 	if (!(ndev->flags & IFF_UP)) {
 		/* can always sleep when down */
 		wil_dbg_pm(wil, "Interface is down\n");
@@ -44,6 +55,10 @@
 	/* interface is running */
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_MONITOR:
+		wil_dbg_pm(wil, "Sniffer\n");
+		rc = -EBUSY;
+		goto out;
+	/* for STA-like interface, don't runtime suspend */
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (test_bit(wil_status_fwconnecting, wil->status)) {
@@ -51,6 +66,12 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		/* Runtime pm not supported in case the interface is up */
+		if (is_runtime) {
+			wil_dbg_pm(wil, "STA-like interface\n");
+			rc = -EBUSY;
+			goto out;
+		}
 		break;
 	/* AP-like interface - can't suspend */
 	default:
@@ -348,3 +369,44 @@
 		   is_runtime ? "runtime" : "system", rc, suspend_time_usec);
 	return rc;
 }
+
+void wil_pm_runtime_allow(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_put_noidle(dev);
+	pm_runtime_set_autosuspend_delay(dev, WIL6210_AUTOSUSPEND_DELAY_MS);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_allow(dev);
+}
+
+void wil_pm_runtime_forbid(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_forbid(dev);
+	pm_runtime_get_noresume(dev);
+}
+
+int wil_pm_runtime_get(struct wil6210_priv *wil)
+{
+	int rc;
+	struct device *dev = wil_to_dev(wil);
+
+	rc = pm_runtime_get_sync(dev);
+	if (rc < 0) {
+		wil_err(wil, "pm_runtime_get_sync() failed, rc = %d\n", rc);
+		pm_runtime_put_noidle(dev);
+		return rc;
+	}
+
+	return 0;
+}
+
+void wil_pm_runtime_put(struct wil6210_priv *wil)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index d5b8ea6..8616f86 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -621,6 +621,16 @@
 	u32 off_ms;
 };
 
+struct wil_debugfs_iomem_data {
+	void *offset;
+	struct wil6210_priv *wil;
+};
+
+struct wil_debugfs_data {
+	struct wil_debugfs_iomem_data *data_arr;
+	int iomem_data_count;
+};
+
 extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
 extern u8 led_id;
 extern u8 led_polarity;
@@ -713,6 +723,7 @@
 	u8 abft_len;
 	u8 wakeup_trigger;
 	struct wil_suspend_stats suspend_stats;
+	struct wil_debugfs_data dbg_data;
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
@@ -870,6 +881,7 @@
 int __wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
 int __wil_down(struct wil6210_priv *wil);
+void wil_refresh_fw_capabilities(struct wil6210_priv *wil);
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
 int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
 void wil_set_ethtoolops(struct net_device *ndev);
@@ -1014,6 +1026,11 @@
 			 bool load);
 bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
 
+void wil_pm_runtime_allow(struct wil6210_priv *wil);
+void wil_pm_runtime_forbid(struct wil6210_priv *wil);
+int wil_pm_runtime_get(struct wil6210_priv *wil);
+void wil_pm_runtime_put(struct wil6210_priv *wil);
+
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_resume(struct wil6210_priv *wil, bool is_runtime);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b85398c..261a0da 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5419,6 +5419,7 @@
 	struct ieee80211_supported_band *band;
 	struct brcmf_bss_info_le *bi;
 	struct brcmu_chan ch;
+	struct cfg80211_roam_info roam_info = {};
 	u32 freq;
 	s32 err = 0;
 	u8 *buf;
@@ -5457,9 +5458,15 @@
 
 done:
 	kfree(buf);
-	cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
-			conn_info->req_ie, conn_info->req_ie_len,
-			conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
+
+	roam_info.channel = notify_channel;
+	roam_info.bssid = profile->bssid;
+	roam_info.req_ie = conn_info->req_ie;
+	roam_info.req_ie_len = conn_info->req_ie_len;
+	roam_info.resp_ie = conn_info->resp_ie;
+	roam_info.resp_ie_len = conn_info->resp_ie_len;
+
+	cfg80211_roamed(ndev, &roam_info, GFP_KERNEL);
 	brcmf_dbg(CONN, "Report roaming result\n");
 
 	set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 8744b9b..8e3c6f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4161,11 +4161,6 @@
 		goto fail;
 	}
 
-	/* allocate scatter-gather table. sg support
-	 * will be disabled upon allocation failure.
-	 */
-	brcmf_sdiod_sgtable_alloc(bus->sdiodev);
-
 	/* Query the F2 block size, set roundup accordingly */
 	bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
 	bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 4b97371..838946d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -1190,11 +1190,11 @@
 				next_reclaimed;
 			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
 						  next_reclaimed);
+			iwlagn_check_ratid_empty(priv, sta_id, tid);
 		}
 
 		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
 
-		iwlagn_check_ratid_empty(priv, sta_id, tid);
 		freed = 0;
 
 		/* process frames */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2f8134b..177fd5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -429,6 +429,7 @@
 	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
 
 /* 8000 Series */
 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index 257a9ea..4ac6764 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -488,7 +488,7 @@
 
 			entry += sizeof(__le16);
 			chan->pa_points_per_curve = 8;
-			memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+			memset(chan->curve_data, 0, sizeof(chan->curve_data));
 			memcpy(chan->curve_data, entry,
 			       sizeof(struct p54_pa_curve_data_sample) *
 			       min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index afdbbf5..8677a53 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -4188,7 +4188,7 @@
 	if (adapter->config_bands & BAND_A)
 		n_channels_a = mwifiex_band_5ghz.n_channels;
 
-	adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+	adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
 	adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
 				      adapter->num_in_chan_stats);
 
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 97c9765..78d59a6 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -2479,6 +2479,12 @@
 					      sizeof(struct mwifiex_chan_stats);
 
 	for (i = 0 ; i < num_chan; i++) {
+		if (adapter->survey_idx >= adapter->num_in_chan_stats) {
+			mwifiex_dbg(adapter, WARN,
+				    "FW reported too many channel results (max %d)\n",
+				    adapter->num_in_chan_stats);
+			return;
+		}
 		chan_stats.chan_num = fw_chan_stats->chan_num;
 		chan_stats.bandcfg = fw_chan_stats->bandcfg;
 		chan_stats.flags = fw_chan_stats->flags;
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 5be4fc9..75ffeaa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2269,7 +2269,7 @@
 	/* find adapter */
 	if (!_rtl_pci_find_adapter(pdev, hw)) {
 		err = -ENODEV;
-		goto fail3;
+		goto fail2;
 	}
 
 	/* Init IO handler */
@@ -2339,10 +2339,10 @@
 	pci_set_drvdata(pdev, NULL);
 	rtl_deinit_core(hw);
 
+fail2:
 	if (rtlpriv->io.pci_mem_start != 0)
 		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
 
-fail2:
 	pci_release_regions(pdev);
 	complete(&rtlpriv->firmware_loading_complete);
 
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 603c904..280196a 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2830,15 +2830,22 @@
 	}
 
 	if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
-		if (!roamed)
+		if (!roamed) {
 			cfg80211_connect_result(usbdev->net, bssid, req_ie,
 						req_ie_len, resp_ie,
 						resp_ie_len, 0, GFP_KERNEL);
-		else
-			cfg80211_roamed(usbdev->net,
-					get_current_channel(usbdev, NULL),
-					bssid, req_ie, req_ie_len,
-					resp_ie, resp_ie_len, GFP_KERNEL);
+		} else {
+			struct cfg80211_roam_info roam_info = {
+				.channel = get_current_channel(usbdev, NULL),
+				.bssid = bssid,
+				.req_ie = req_ie,
+				.req_ie_len = req_ie_len,
+				.resp_ie = resp_ie,
+				.resp_ie_len = resp_ie_len,
+			};
+
+			cfg80211_roamed(usbdev->net, &roam_info, GFP_KERNEL);
+		}
 	} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
 		cfg80211_ibss_joined(usbdev->net, bssid,
 				     get_current_channel(usbdev, NULL),
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index bbf7604..1c539c8 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1571,6 +1571,7 @@
 
 	wl->state = WL1251_STATE_OFF;
 	mutex_init(&wl->mutex);
+	spin_lock_init(&wl->wl_lock);
 
 	wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
 	wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 3ce1f7d..cb7365b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -199,6 +199,7 @@
 	unsigned long   remaining_credit;
 	struct timer_list credit_timeout;
 	u64 credit_window_start;
+	bool rate_limited;
 
 	/* Statistics */
 	struct xenvif_stats stats;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b009d79..5bfaf55 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -105,7 +105,11 @@
 
 	if (work_done < budget) {
 		napi_complete(napi);
-		xenvif_napi_schedule_or_enable_events(queue);
+		/* If the queue is rate-limited, it shall be
+		 * rescheduled in the timer callback.
+		 */
+		if (likely(!queue->rate_limited))
+			xenvif_napi_schedule_or_enable_events(queue);
 	}
 
 	return work_done;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 47b4810..d9b5b73 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -179,6 +179,7 @@
 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
 
 	queue->remaining_credit = min(max_credit, max_burst);
+	queue->rate_limited = false;
 }
 
 void xenvif_tx_credit_callback(unsigned long data)
@@ -685,8 +686,10 @@
 		msecs_to_jiffies(queue->credit_usec / 1000);
 
 	/* Timer could already be pending in rare cases. */
-	if (timer_pending(&queue->credit_timeout))
+	if (timer_pending(&queue->credit_timeout)) {
+		queue->rate_limited = true;
 		return true;
+	}
 
 	/* Passed the point where we can replenish credit? */
 	if (time_after_eq64(now, next_credit)) {
@@ -701,6 +704,7 @@
 		mod_timer(&queue->credit_timeout,
 			  next_credit);
 		queue->credit_window_start = next_credit;
+		queue->rate_limited = true;
 
 		return true;
 	}
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 712936f..fbd26ec 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -177,6 +177,16 @@
 		/* Packet that contains a length */
 		if (tmp[0] == 0 && tmp[1] == 0) {
 			phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3;
+			/*
+			 * Ensure next_read_size does not exceed sizeof(tmp)
+			 * for reading that many bytes during next iteration
+			 */
+			if (phy->next_read_size > FDP_NCI_I2C_MAX_PAYLOAD) {
+				dev_dbg(&client->dev, "%s: corrupted packet\n",
+					__func__);
+				phy->next_read_size = 5;
+				goto flush;
+			}
 		} else {
 			phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD;
 
diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c
index 798a32b..2062852 100644
--- a/drivers/nfc/st21nfca/dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -217,7 +217,8 @@
 
 	atr_req = (struct st21nfca_atr_req *)skb->data;
 
-	if (atr_req->length < sizeof(struct st21nfca_atr_req)) {
+	if (atr_req->length < sizeof(struct st21nfca_atr_req) ||
+	    atr_req->length > skb->len) {
 		r = -EPROTO;
 		goto exit;
 	}
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 3a98563..6e84e12 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -320,23 +320,33 @@
 		 * AID		81	5 to 16
 		 * PARAMETERS	82	0 to 255
 		 */
-		if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
+		if (skb->len < NFC_MIN_AID_LENGTH + 2 ||
 		    skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
 			return -EPROTO;
 
+		/*
+		 * Buffer should have enough space for at least
+		 * two tag fields + two length fields + aid_len (skb->data[1])
+		 */
+		if (skb->len < skb->data[1] + 4)
+			return -EPROTO;
+
 		transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
 						   skb->len - 2, GFP_KERNEL);
 
 		transaction->aid_len = skb->data[1];
 		memcpy(transaction->aid, &skb->data[2],
 		       transaction->aid_len);
-
-		/* Check next byte is PARAMETERS tag (82) */
-		if (skb->data[transaction->aid_len + 2] !=
-		    NFC_EVT_TRANSACTION_PARAMS_TAG)
-			return -EPROTO;
-
 		transaction->params_len = skb->data[transaction->aid_len + 3];
+
+		/* Check next byte is PARAMETERS tag (82) and the length field */
+		if (skb->data[transaction->aid_len + 2] !=
+		    NFC_EVT_TRANSACTION_PARAMS_TAG ||
+		    skb->len < transaction->aid_len + transaction->params_len + 4) {
+			devm_kfree(dev, transaction);
+			return -EPROTO;
+		}
+
 		memcpy(transaction->params, skb->data +
 		       transaction->aid_len + 4, transaction->params_len);
 
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index c234ee43..24222a5 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -176,14 +176,12 @@
 	u64 rx_err_ver;
 	u64 rx_memcpy;
 	u64 rx_async;
-	u64 dma_rx_prep_err;
 	u64 tx_bytes;
 	u64 tx_pkts;
 	u64 tx_ring_full;
 	u64 tx_err_no_buf;
 	u64 tx_memcpy;
 	u64 tx_async;
-	u64 dma_tx_prep_err;
 };
 
 struct ntb_transport_mw {
@@ -256,8 +254,6 @@
 #define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
 #define NTB_QP_DEF_NUM_ENTRIES	100
 #define NTB_LINK_DOWN_TIMEOUT	10
-#define DMA_RETRIES		20
-#define DMA_OUT_RESOURCE_TO	msecs_to_jiffies(50)
 
 static void ntb_transport_rxc_db(unsigned long data);
 static const struct ntb_ctx_ops ntb_transport_ops;
@@ -518,12 +514,6 @@
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "free tx - \t%u\n",
 			       ntb_transport_tx_free_entry(qp));
-	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "DMA tx prep err - \t%llu\n",
-			       qp->dma_tx_prep_err);
-	out_offset += snprintf(buf + out_offset, out_count - out_offset,
-			       "DMA rx prep err - \t%llu\n",
-			       qp->dma_rx_prep_err);
 
 	out_offset += snprintf(buf + out_offset, out_count - out_offset,
 			       "\n");
@@ -625,7 +615,7 @@
 	if (!mw->virt_addr)
 		return -ENOMEM;
 
-	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+	if (mw_num < qp_count % mw_count)
 		num_qps_mw = qp_count / mw_count + 1;
 	else
 		num_qps_mw = qp_count / mw_count;
@@ -770,8 +760,6 @@
 	qp->tx_err_no_buf = 0;
 	qp->tx_memcpy = 0;
 	qp->tx_async = 0;
-	qp->dma_tx_prep_err = 0;
-	qp->dma_rx_prep_err = 0;
 }
 
 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
@@ -933,10 +921,8 @@
 		ntb_free_mw(nt, i);
 
 	/* if there's an actual failure, we should just bail */
-	if (rc < 0) {
-		ntb_link_disable(ndev);
+	if (rc < 0)
 		return;
-	}
 
 out:
 	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
@@ -1002,7 +988,7 @@
 	qp->event_handler = NULL;
 	ntb_qp_link_down_reset(qp);
 
-	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+	if (mw_num < qp_count % mw_count)
 		num_qps_mw = qp_count / mw_count + 1;
 	else
 		num_qps_mw = qp_count / mw_count;
@@ -1125,8 +1111,8 @@
 	qp_count = ilog2(qp_bitmap);
 	if (max_num_clients && max_num_clients < qp_count)
 		qp_count = max_num_clients;
-	else if (mw_count < qp_count)
-		qp_count = mw_count;
+	else if (nt->mw_count < qp_count)
+		qp_count = nt->mw_count;
 
 	qp_bitmap &= BIT_ULL(qp_count) - 1;
 
@@ -1314,7 +1300,6 @@
 	struct dmaengine_unmap_data *unmap;
 	dma_cookie_t cookie;
 	void *buf = entry->buf;
-	int retries = 0;
 
 	len = entry->len;
 	device = chan->device;
@@ -1343,22 +1328,11 @@
 
 	unmap->from_cnt = 1;
 
-	for (retries = 0; retries < DMA_RETRIES; retries++) {
-		txd = device->device_prep_dma_memcpy(chan,
-						     unmap->addr[1],
-						     unmap->addr[0], len,
-						     DMA_PREP_INTERRUPT);
-		if (txd)
-			break;
-
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(DMA_OUT_RESOURCE_TO);
-	}
-
-	if (!txd) {
-		qp->dma_rx_prep_err++;
+	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
+					     unmap->addr[0], len,
+					     DMA_PREP_INTERRUPT);
+	if (!txd)
 		goto err_get_unmap;
-	}
 
 	txd->callback_result = ntb_rx_copy_callback;
 	txd->callback_param = entry;
@@ -1603,7 +1577,6 @@
 	struct dmaengine_unmap_data *unmap;
 	dma_addr_t dest;
 	dma_cookie_t cookie;
-	int retries = 0;
 
 	device = chan->device;
 	dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
@@ -1625,21 +1598,10 @@
 
 	unmap->to_cnt = 1;
 
-	for (retries = 0; retries < DMA_RETRIES; retries++) {
-		txd = device->device_prep_dma_memcpy(chan, dest,
-						     unmap->addr[0], len,
-						     DMA_PREP_INTERRUPT);
-		if (txd)
-			break;
-
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(DMA_OUT_RESOURCE_TO);
-	}
-
-	if (!txd) {
-		qp->dma_tx_prep_err++;
+	txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
+					     DMA_PREP_INTERRUPT);
+	if (!txd)
 		goto err_get_unmap;
-	}
 
 	txd->callback_result = ntb_tx_copy_callback;
 	txd->callback_param = entry;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5a3f008..eef1a68 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -77,7 +77,7 @@
 	kref_init(&host->ref);
 	uuid_be_gen(&host->id);
 	snprintf(host->nqn, NVMF_NQN_SIZE,
-		"nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
+		"nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id);
 
 	mutex_lock(&nvmf_hosts_mutex);
 	list_add_tail(&host->list, &nvmf_hosts);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 5c63b92..ed92c12 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -956,7 +956,7 @@
 
 	dino_dev->hba.dev = dev;
 	dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
-	dino_dev->hba.lmmio_space_offset = 0;	/* CPU addrs == bus addrs */
+	dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
 	spin_lock_init(&dino_dev->dinosaur_pen);
 	dino_dev->hba.iommu = ccio_get_iommu(dev);
 
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 9931be6..04d6fd2 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -343,9 +343,9 @@
 
 static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
 static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
-static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 };
-static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 };
-static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 };
+static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
+static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
+static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
 static const unsigned int mrfld_pwm0_pins[] = { 144 };
 static const unsigned int mrfld_pwm1_pins[] = { 145 };
 static const unsigned int mrfld_pwm2_pins[] = { 132 };
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa..7511723 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -85,6 +85,7 @@
 	MESON_PIN(GPIODV_15, EE_OFF),
 	MESON_PIN(GPIODV_16, EE_OFF),
 	MESON_PIN(GPIODV_17, EE_OFF),
+	MESON_PIN(GPIODV_18, EE_OFF),
 	MESON_PIN(GPIODV_19, EE_OFF),
 	MESON_PIN(GPIODV_20, EE_OFF),
 	MESON_PIN(GPIODV_21, EE_OFF),
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index d32fa2b..e8aee6d 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -195,8 +195,6 @@
 
 	spin_unlock_irqrestore(&bank->slock, flags);
 
-	exynos_irq_unmask(irqd);
-
 	return 0;
 }
 
@@ -217,8 +215,6 @@
 	shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
 	mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
 
-	exynos_irq_mask(irqd);
-
 	spin_lock_irqsave(&bank->slock, flags);
 
 	con = readl(d->virt_base + reg_con);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 862a096..be5c71d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -811,6 +811,7 @@
 		  SUNXI_FUNCTION(0x2, "lcd1"),		/* D16 */
 		  SUNXI_FUNCTION(0x3, "pata"),		/* ATAD12 */
 		  SUNXI_FUNCTION(0x4, "keypad"),	/* IN6 */
+		  SUNXI_FUNCTION(0x5, "sim"),		/* DET */
 		  SUNXI_FUNCTION_IRQ(0x6, 16),		/* EINT16 */
 		  SUNXI_FUNCTION(0x7, "csi1")),		/* D16 */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 77a0236..b190904 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -508,57 +508,71 @@
 static const int usb1_muxvals[] = {0, 0};
 static const unsigned usb2_pins[] = {50, 51};
 static const int usb2_muxvals[] = {0, 0};
-static const unsigned port_range_pins[] = {
+static const unsigned port_range0_pins[] = {
 	159, 160, 161, 162, 163, 164, 165, 166,		/* PORT0x */
 	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT1x */
 	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT2x */
-	16, 17, 18, -1, -1, -1, -1, -1,			/* PORT3x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT4x */
-	-1, -1, -1, 46, 47, 48, 49, 50,			/* PORT5x */
-	51, -1, -1, 54, 55, 56, 57, 58,			/* PORT6x */
+	16, 17, 18,					/* PORT30-32 */
+};
+static const int port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15,					/* PORT30-32 */
+};
+static const unsigned port_range1_pins[] = {
+	46, 47, 48, 49, 50,				/* PORT53-57 */
+	51,						/* PORT60 */
+};
+static const int port_range1_muxvals[] = {
+	15, 15, 15, 15, 15,				/* PORT53-57 */
+	15,						/* PORT60 */
+};
+static const unsigned port_range2_pins[] = {
+	54, 55, 56, 57, 58,				/* PORT63-67 */
 	59, 60, 69, 70, 71, 72, 73, 74,			/* PORT7x */
 	75, 76, 77, 78, 79, 80, 81, 82,			/* PORT8x */
 	83, 84, 85, 86, 87, 88, 89, 90,			/* PORT9x */
 	91, 92, 93, 94, 95, 96, 97, 98,			/* PORT10x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT11x */
-	99, 100, 101, 102, 103, 104, 105, 106,		/* PORT12x */
-	107, 108, 109, 110, 111, 112, 113, 114,		/* PORT13x */
-	115, 116, 117, 118, 119, 120, 121, 122,		/* PORT14x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT15x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT16x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT17x */
-	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT18x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT19x */
-	123, 124, 125, 126, 127, 128, 129, 130,		/* PORT20x */
-	131, 132, 133, 134, 135, 136, 137, 138,		/* PORT21x */
-	139, 140, 141, 142, -1, -1, -1, -1,		/* PORT22x */
-	147, 148, 149, 150, 151, 152, 153, 154,		/* PORT23x */
-	155, 156, 157, 143, 144, 145, 146, 158,		/* PORT24x */
 };
-static const int port_range_muxvals[] = {
-	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
-	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
-	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
-	15, 15, 15, -1, -1, -1, -1, -1,			/* PORT3x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT4x */
-	-1, -1, -1, 15, 15, 15, 15, 15,			/* PORT5x */
-	15, -1, -1, 15, 15, 15, 15, 15,			/* PORT6x */
+static const int port_range2_muxvals[] = {
+	15, 15, 15, 15, 15,				/* PORT63-67 */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT11x */
+};
+static const unsigned port_range3_pins[] = {
+	99, 100, 101, 102, 103, 104, 105, 106,		/* PORT12x */
+	107, 108, 109, 110, 111, 112, 113, 114,		/* PORT13x */
+	115, 116, 117, 118, 119, 120, 121, 122,		/* PORT14x */
+};
+static const int port_range3_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT15x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT16x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT17x */
+};
+static const unsigned port_range4_pins[] = {
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT18x */
+};
+static const int port_range4_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT19x */
+};
+static const unsigned port_range5_pins[] = {
+	123, 124, 125, 126, 127, 128, 129, 130,		/* PORT20x */
+	131, 132, 133, 134, 135, 136, 137, 138,		/* PORT21x */
+	139, 140, 141, 142,				/* PORT220-223 */
+};
+static const int port_range5_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
-	15, 15, 15, 15, -1, -1, -1, -1,			/* PORT22x */
+	15, 15, 15, 15,					/* PORT220-223 */
+};
+static const unsigned port_range6_pins[] = {
+	147, 148, 149, 150, 151, 152, 153, 154,		/* PORT23x */
+	155, 156, 157, 143, 144, 145, 146, 158,		/* PORT24x */
+};
+static const int port_range6_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
 };
@@ -607,147 +621,153 @@
 	UNIPHIER_PINCTRL_GROUP(usb0),
 	UNIPHIER_PINCTRL_GROUP(usb1),
 	UNIPHIER_PINCTRL_GROUP(usb2),
-	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range4),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range5),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range6),
 	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
 	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range2, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range2, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range2, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range2, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range2, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range2, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range2, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range2, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range2, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range2, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range2, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range2, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range2, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range2, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range2, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range2, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range2, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range2, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range2, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range2, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range2, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range2, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range2, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range2, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range2, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range2, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range2, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range2, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range2, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range2, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range2, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range2, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range2, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range2, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range2, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range2, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range2, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range3, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range3, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range3, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range3, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range3, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range3, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range3, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range3, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range3, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range3, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range3, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range3, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range3, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range3, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range3, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range3, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range3, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range3, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range3, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range3, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range3, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range3, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range3, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range3, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range4, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range4, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range4, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range4, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range4, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range4, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range4, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range4, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range5, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range5, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range5, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range5, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range5, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range5, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range5, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range5, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range5, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range5, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range5, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range5, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range5, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range5, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range5, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range5, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range5, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range5, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range5, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range5, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range6, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range6, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range6, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range6, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range6, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range6, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range6, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range6, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range6, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range6, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range6, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range6, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range6, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range6, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range6, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range6, 15),
 	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
 	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
 	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index 9668633..73b828b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -597,7 +597,7 @@
 static const int usb2_muxvals[] = {0, 0};
 static const unsigned usb3_pins[] = {52, 53};
 static const int usb3_muxvals[] = {0, 0};
-static const unsigned port_range_pins[] = {
+static const unsigned port_range0_pins[] = {
 	168, 169, 170, 171, 172, 173, 174, 175,		/* PORT0x */
 	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT1x */
 	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT2x */
@@ -609,23 +609,8 @@
 	75, 76, 77, 78, 79, 80, 81, 82,			/* PORT8x */
 	83, 84, 85, 86, 87, 88, 89, 90,			/* PORT9x */
 	91, 92, 93, 94, 95, 96, 97, 98,			/* PORT10x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT11x */
-	99, 100, 101, 102, 103, 104, 105, 106,		/* PORT12x */
-	107, 108, 109, 110, 111, 112, 113, 114,		/* PORT13x */
-	115, 116, 117, 118, 119, 120, 121, 122,		/* PORT14x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT15x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT16x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT17x */
-	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT18x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT19x */
-	123, 124, 125, 126, 127, 128, 129, 130,		/* PORT20x */
-	131, 132, 133, 134, 135, 136, 137, 138,		/* PORT21x */
-	139, 140, 141, 142, 143, 144, 145, 146,		/* PORT22x */
-	147, 148, 149, 150, 151, 152, 153, 154,		/* PORT23x */
-	155, 156, 157, 158, 159, 160, 161, 162,		/* PORT24x */
-	163, 164, 165, 166, 167,			/* PORT25x */
 };
-static const int port_range_muxvals[] = {
+static const int port_range0_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
@@ -637,21 +622,38 @@
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT11x */
+};
+static const unsigned port_range1_pins[] = {
+	99, 100, 101, 102, 103, 104, 105, 106,		/* PORT12x */
+	107, 108, 109, 110, 111, 112, 113, 114,		/* PORT13x */
+	115, 116, 117, 118, 119, 120, 121, 122,		/* PORT14x */
+};
+static const int port_range1_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT15x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT16x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT17x */
+};
+static const unsigned port_range2_pins[] = {
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT18x */
+};
+static const int port_range2_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
-	-1, -1, -1, -1, -1, -1, -1, -1,			/* PORT19x */
+};
+static const unsigned port_range3_pins[] = {
+	123, 124, 125, 126, 127, 128, 129, 130,		/* PORT20x */
+	131, 132, 133, 134, 135, 136, 137, 138,		/* PORT21x */
+	139, 140, 141, 142, 143, 144, 145, 146,		/* PORT22x */
+	147, 148, 149, 150, 151, 152, 153, 154,		/* PORT23x */
+	155, 156, 157, 158, 159, 160, 161, 162,		/* PORT24x */
+	163, 164, 165, 166, 167,			/* PORT250-254 */
+};
+static const int port_range3_muxvals[] = {
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
 	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
-	15, 15, 15, 15, 15,				/* PORT25x */
+	15, 15, 15, 15, 15,				/* PORT250-254 */
 };
 static const unsigned xirq_pins[] = {
 	149, 150, 151, 152, 153, 154, 155, 156,		/* XIRQ0-7 */
@@ -695,174 +697,177 @@
 	UNIPHIER_PINCTRL_GROUP(usb1),
 	UNIPHIER_PINCTRL_GROUP(usb2),
 	UNIPHIER_PINCTRL_GROUP(usb3),
-	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
 	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
 	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range, 27),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range, 28),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range, 29),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range, 30),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range, 31),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range, 32),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range, 33),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range, 34),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range, 35),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range, 36),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range, 37),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range, 38),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range, 39),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range, 40),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range, 41),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range, 42),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range, 49),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range, 50),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range, 180),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range, 181),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range, 182),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range, 183),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range, 200),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range, 201),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range, 202),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range, 203),
-	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range, 204),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range2, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range2, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range2, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range2, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range2, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range2, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range2, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range2, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range3, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range3, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range3, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range3, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range3, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range3, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range3, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range3, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range3, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range3, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range3, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range3, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range3, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range3, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range3, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range3, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range3, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range3, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range3, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range3, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range3, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range3, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range3, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range3, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range3, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range3, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range3, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range3, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range3, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range3, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range3, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range3, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range3, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range3, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range3, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range3, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range3, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range3, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range3, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range3, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range3, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range3, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range3, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range3, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range3, 44),
 	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
 	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
 	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index f5f783c..c02881b 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2598,15 +2598,16 @@
 	if (curr == GSI_CHAN_MODE_CALLBACK &&
 			mode == GSI_CHAN_MODE_POLL) {
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
+		atomic_set(&ctx->poll_mode, mode);
 		ctx->stats.callback_to_poll++;
 	}
 
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
+		atomic_set(&ctx->poll_mode, mode);
 		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
 		ctx->stats.poll_to_callback++;
 	}
-	atomic_set(&ctx->poll_mode, mode);
 	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
 
 	return GSI_STATUS_SUCCESS;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
index 105294a..2975192 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
@@ -19,6 +19,8 @@
 #include <linux/sched.h>
 #include <linux/atomic.h>
 #include <linux/ecm_ipa.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
 
 #define DRIVER_NAME "ecm_ipa"
 #define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
@@ -120,6 +122,7 @@
  * @usb_to_ipa_client: producer client
  * @ipa_rm_resource_name_prod: IPA resource manager producer resource
  * @ipa_rm_resource_name_cons: IPA resource manager consumer resource
+ * @pm_hdl: handle for IPA PM
  */
 struct ecm_ipa_dev {
 	struct net_device *net;
@@ -137,6 +140,7 @@
 	enum ipa_client_type usb_to_ipa_client;
 	enum ipa_rm_resource_name ipa_rm_resource_name_prod;
 	enum ipa_rm_resource_name ipa_rm_resource_name_cons;
+	u32 pm_hdl;
 };
 
 static int ecm_ipa_open(struct net_device *net);
@@ -158,6 +162,8 @@
 static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
 static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx);
 static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
 static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
 static netdev_tx_t ecm_ipa_start_xmit
@@ -403,27 +409,34 @@
 	ECM_IPA_DEBUG("usb_to_ipa_client = %d\n",
 		      ecm_ipa_ctx->usb_to_ipa_client);
 
-	ecm_ipa_ctx->ipa_rm_resource_name_cons =
-		ipa_get_rm_resource_from_ep(ipa_to_usb_hdl);
-	if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) {
-		ECM_IPA_ERROR("Error getting CONS RM resource from handle %d\n",
+	if (ipa_pm_is_used()) {
+		retval = ecm_ipa_register_pm_client(ecm_ipa_ctx);
+	} else {
+		ecm_ipa_ctx->ipa_rm_resource_name_cons =
+			ipa_get_rm_resource_from_ep(ipa_to_usb_hdl);
+		if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) {
+			ECM_IPA_ERROR(
+			"Error getting CONS RM resource from handle %d\n",
+				      ecm_ipa_ctx->ipa_rm_resource_name_cons);
+			return -EINVAL;
+		}
+		ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n",
 			      ecm_ipa_ctx->ipa_rm_resource_name_cons);
-		return -EINVAL;
-	}
-	ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n",
-		      ecm_ipa_ctx->ipa_rm_resource_name_cons);
 
-	ecm_ipa_ctx->ipa_rm_resource_name_prod =
-		ipa_get_rm_resource_from_ep(usb_to_ipa_hdl);
-	if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) {
-		ECM_IPA_ERROR("Error getting PROD RM resource from handle %d\n",
+		ecm_ipa_ctx->ipa_rm_resource_name_prod =
+			ipa_get_rm_resource_from_ep(usb_to_ipa_hdl);
+		if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) {
+			ECM_IPA_ERROR(
+			"Error getting PROD RM resource from handle %d\n",
+				      ecm_ipa_ctx->ipa_rm_resource_name_prod);
+			return -EINVAL;
+		}
+		ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n",
 			      ecm_ipa_ctx->ipa_rm_resource_name_prod);
-		return -EINVAL;
-	}
-	ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n",
-		      ecm_ipa_ctx->ipa_rm_resource_name_prod);
 
-	retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx);
+		retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx);
+	}
+
 	if (retval) {
 		ECM_IPA_ERROR("fail on RM create\n");
 		goto fail_create_rm;
@@ -488,7 +501,10 @@
 fail:
 	ecm_ipa_deregister_properties();
 fail_create_rm:
-	ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
+	if (ipa_pm_is_used())
+		ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
+	else
+		ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
 	return retval;
 }
 EXPORT_SYMBOL(ecm_ipa_connect);
@@ -746,7 +762,10 @@
 	netif_stop_queue(ecm_ipa_ctx->net);
 	ECM_IPA_DEBUG("queue stopped\n");
 
-	ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
+	if (ipa_pm_is_used())
+		ecm_ipa_deregister_pm_client(ecm_ipa_ctx);
+	else
+		ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
 
 	outstanding_dropped_pkts =
 		atomic_read(&ecm_ipa_ctx->outstanding_pkts);
@@ -1117,15 +1136,65 @@
 	ECM_IPA_LOG_EXIT();
 }
 
+static void ecm_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = p;
+
+	ECM_IPA_LOG_ENTRY();
+	if (event != IPA_PM_CLIENT_ACTIVATED) {
+		ECM_IPA_ERROR("unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	if (netif_queue_stopped(ecm_ipa_ctx->net)) {
+		ECM_IPA_DEBUG("Resource Granted - starting queue\n");
+		netif_start_queue(ecm_ipa_ctx->net);
+	}
+	ECM_IPA_LOG_EXIT();
+}
+
+static int ecm_ipa_register_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = ecm_ipa_ctx->net->name;
+	pm_reg.user_data = ecm_ipa_ctx;
+	pm_reg.callback = ecm_ipa_pm_cb;
+	pm_reg.group = IPA_PM_GROUP_APPS;
+	result = ipa_pm_register(&pm_reg, &ecm_ipa_ctx->pm_hdl);
+	if (result) {
+		ECM_IPA_ERROR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+	return 0;
+}
+
+static void ecm_ipa_deregister_pm_client(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	ipa_pm_deactivate_sync(ecm_ipa_ctx->pm_hdl);
+	ipa_pm_deregister(ecm_ipa_ctx->pm_hdl);
+	ecm_ipa_ctx->pm_hdl = ~0;
+}
+
 static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
 {
+	if (ipa_pm_is_used())
+		return ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
+
 	return ipa_rm_inactivity_timer_request_resource(
 		IPA_RM_RESOURCE_STD_ECM_PROD);
 }
 
 static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
 {
-	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+	if (ipa_pm_is_used())
+		ipa_pm_deferred_deactivate(ecm_ipa_ctx->pm_hdl);
+	else
+		ipa_rm_inactivity_timer_release_resource(
+			IPA_RM_RESOURCE_STD_ECM_PROD);
 }
 
 /**
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 9b3b53d..4d3113f 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -19,6 +19,7 @@
 #include <linux/ipa_qmi_service_v01.h>
 #include <linux/ipa_mhi.h>
 #include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
 
 #define IPA_MHI_DRV_NAME "ipa_mhi_client"
 #define IPA_MHI_DBG(fmt, args...) \
@@ -136,6 +137,8 @@
 	u32 use_ipadma;
 	bool assert_bit40;
 	bool test_mode;
+	u32 pm_hdl;
+	u32 modem_pm_hdl;
 };
 
 static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
@@ -834,25 +837,38 @@
 	IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
 		ipa_mhi_client_ctx->event_context_array_addr);
 
-	/* Add MHI <-> Q6 dependencies to IPA RM */
-	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-	if (res && res != -EINPROGRESS) {
-		IPA_MHI_ERR("failed to add dependency %d\n", res);
-		goto fail_add_mhi_q6_dep;
-	}
+	if (ipa_pm_is_used()) {
+		res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+		if (res) {
+			IPA_MHI_ERR("failed activate client %d\n", res);
+			goto fail_pm_activate;
+		}
+		res = ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+		if (res) {
+			IPA_MHI_ERR("failed activate modem client %d\n", res);
+			goto fail_pm_activate_modem;
+		}
+	} else {
+		/* Add MHI <-> Q6 dependencies to IPA RM */
+		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+		if (res && res != -EINPROGRESS) {
+			IPA_MHI_ERR("failed to add dependency %d\n", res);
+			goto fail_add_mhi_q6_dep;
+		}
 
-	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
-		IPA_RM_RESOURCE_MHI_CONS);
-	if (res && res != -EINPROGRESS) {
-		IPA_MHI_ERR("failed to add dependency %d\n", res);
-		goto fail_add_q6_mhi_dep;
-	}
+		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_MHI_CONS);
+		if (res && res != -EINPROGRESS) {
+			IPA_MHI_ERR("failed to add dependency %d\n", res);
+			goto fail_add_q6_mhi_dep;
+		}
 
-	res = ipa_mhi_request_prod();
-	if (res) {
-		IPA_MHI_ERR("failed request prod %d\n", res);
-		goto fail_request_prod;
+		res = ipa_mhi_request_prod();
+		if (res) {
+			IPA_MHI_ERR("failed request prod %d\n", res);
+			goto fail_request_prod;
+		}
 	}
 
 	/* gsi params */
@@ -880,14 +896,23 @@
 	return 0;
 
 fail_init_engine:
-	ipa_mhi_release_prod();
+	if (!ipa_pm_is_used())
+		ipa_mhi_release_prod();
 fail_request_prod:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-		IPA_RM_RESOURCE_MHI_CONS);
+	if (!ipa_pm_is_used())
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_MHI_CONS);
 fail_add_q6_mhi_dep:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
+	if (!ipa_pm_is_used())
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
 fail_add_mhi_q6_dep:
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+fail_pm_activate_modem:
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+fail_pm_activate:
 	ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
 	return res;
 }
@@ -2095,20 +2120,32 @@
 	 */
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	IPA_MHI_DBG("release prod\n");
-	res = ipa_mhi_release_prod();
-	if (res) {
-		IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
-		goto fail_release_prod;
-	}
+	if (ipa_pm_is_used()) {
+		res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+		if (res) {
+			IPA_MHI_ERR("fail to deactivate client %d\n", res);
+			goto fail_deactivate_pm;
+		}
+		res = ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+		if (res) {
+			IPA_MHI_ERR("fail to deactivate client %d\n", res);
+			goto fail_deactivate_modem_pm;
+		}
+	} else {
+		IPA_MHI_DBG("release prod\n");
+		res = ipa_mhi_release_prod();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+			goto fail_release_prod;
+		}
 
-	IPA_MHI_DBG("wait for cons release\n");
-	res = ipa_mhi_wait_for_cons_release();
-	if (res) {
-		IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res);
-		goto fail_release_cons;
+		IPA_MHI_DBG("wait for cons release\n");
+		res = ipa_mhi_wait_for_cons_release();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed\n");
+			goto fail_release_cons;
+		}
 	}
-
 	usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
 
 	res = ipa_mhi_suspend_dl(force);
@@ -2132,8 +2169,15 @@
 
 fail_suspend_dl_channel:
 fail_release_cons:
+	if (!ipa_pm_is_used())
 	ipa_mhi_request_prod();
 fail_release_prod:
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+fail_deactivate_modem_pm:
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+fail_deactivate_pm:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 fail_suspend_ul_channel:
 	ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels);
@@ -2193,10 +2237,23 @@
 		ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
 	}
 
-	res = ipa_mhi_request_prod();
-	if (res) {
-		IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
-		goto fail_request_prod;
+	if (ipa_pm_is_used()) {
+		res = ipa_pm_activate_sync(ipa_mhi_client_ctx->pm_hdl);
+		if (res) {
+			IPA_MHI_ERR("fail to activate client %d\n", res);
+			goto fail_pm_activate;
+		}
+		ipa_pm_activate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+		if (res) {
+			IPA_MHI_ERR("fail to activate client %d\n", res);
+			goto fail_pm_activate_modem;
+		}
+	} else {
+		res = ipa_mhi_request_prod();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
+			goto fail_request_prod;
+		}
 	}
 
 	/* resume all UL channels */
@@ -2234,8 +2291,15 @@
 fail_resume_dl_channels2:
 	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
 fail_resume_ul_channels:
-	ipa_mhi_release_prod();
+	if (!ipa_pm_is_used())
+		ipa_mhi_release_prod();
 fail_request_prod:
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+fail_pm_activate_modem:
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+fail_pm_activate:
 	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
 fail_resume_dl_channels:
 	ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
@@ -2319,6 +2383,85 @@
 	debugfs_remove_recursive(dent);
 }
 
+static void ipa_mhi_delete_rm_resources(void)
+{
+	int res;
+
+	if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED  &&
+		ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
+
+		IPA_MHI_DBG("release prod\n");
+		res = ipa_mhi_release_prod();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n",
+				res);
+			goto fail;
+		}
+		IPA_MHI_DBG("wait for cons release\n");
+		res = ipa_mhi_wait_for_cons_release();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_wait_for_cons_release%d\n",
+				res);
+			goto fail;
+		}
+
+		usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
+			IPA_MHI_SUSPEND_SLEEP_MAX);
+
+		IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
+		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_MHI_CONS);
+		if (res) {
+			IPA_MHI_ERR(
+				"Error deleting dependency %d->%d, res=%d\n",
+				IPA_RM_RESOURCE_Q6_PROD,
+				IPA_RM_RESOURCE_MHI_CONS,
+				res);
+			goto fail;
+		}
+		IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
+		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+		if (res) {
+			IPA_MHI_ERR(
+				"Error deleting dependency %d->%d, res=%d\n",
+				IPA_RM_RESOURCE_MHI_PROD,
+				IPA_RM_RESOURCE_Q6_CONS,
+				res);
+			goto fail;
+		}
+	}
+
+	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+	if (res) {
+		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+			IPA_RM_RESOURCE_MHI_PROD, res);
+		goto fail;
+	}
+
+	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+	if (res) {
+		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+			IPA_RM_RESOURCE_MHI_CONS, res);
+		goto fail;
+	}
+
+	return;
+fail:
+	ipa_assert();
+}
+
+static void ipa_mhi_deregister_pm(void)
+{
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
+	ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
+	ipa_mhi_client_ctx->pm_hdl = ~0;
+
+	ipa_pm_deactivate_sync(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_pm_deregister(ipa_mhi_client_ctx->modem_pm_hdl);
+	ipa_mhi_client_ctx->modem_pm_hdl = ~0;
+}
+
 /**
  * ipa_mhi_destroy() - Destroy MHI IPA
  *
@@ -2351,62 +2494,10 @@
 		ipa_uc_mhi_cleanup();
 	}
 
-
-	if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED  &&
-			ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
-		IPA_MHI_DBG("release prod\n");
-		res = ipa_mhi_release_prod();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
-			goto fail;
-		}
-		IPA_MHI_DBG("wait for cons release\n");
-		res = ipa_mhi_wait_for_cons_release();
-		if (res) {
-			IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n",
-				res);
-			goto fail;
-		}
-		usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
-				IPA_MHI_SUSPEND_SLEEP_MAX);
-
-		IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
-		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-			IPA_RM_RESOURCE_MHI_CONS);
-		if (res) {
-			IPA_MHI_ERR(
-				"Error deleting dependency %d->%d, res=%d\n"
-				, IPA_RM_RESOURCE_Q6_PROD,
-				IPA_RM_RESOURCE_MHI_CONS,
-				res);
-			goto fail;
-		}
-		IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
-		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
-			IPA_RM_RESOURCE_Q6_CONS);
-		if (res) {
-			IPA_MHI_ERR(
-				"Error deleting dependency %d->%d, res=%d\n",
-			IPA_RM_RESOURCE_MHI_PROD,
-			IPA_RM_RESOURCE_Q6_CONS,
-			res);
-			goto fail;
-		}
-	}
-
-	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
-	if (res) {
-		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
-			IPA_RM_RESOURCE_MHI_PROD, res);
-		goto fail;
-	}
-
-	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
-	if (res) {
-		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
-			IPA_RM_RESOURCE_MHI_CONS, res);
-		goto fail;
-	}
+	if (ipa_pm_is_used())
+		ipa_mhi_deregister_pm();
+	else
+		ipa_mhi_delete_rm_resources();
 
 	ipa_mhi_debugfs_destroy();
 	destroy_workqueue(ipa_mhi_client_ctx->wq);
@@ -2420,6 +2511,132 @@
 	ipa_assert();
 }
 
+static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	unsigned long flags;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_MHI_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+		ipa_mhi_notify_wakeup();
+	} else if (ipa_mhi_client_ctx->state ==
+		IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
+		/* wakeup event will be trigger after suspend finishes */
+		ipa_mhi_client_ctx->trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_DBG("EXIT");
+}
+
+static int ipa_mhi_register_pm(void)
+{
+	int res;
+	struct ipa_pm_register_params params;
+
+	memset(&params, 0, sizeof(params));
+	params.name = "MHI";
+	params.callback = ipa_mhi_pm_cb;
+	params.group = IPA_PM_GROUP_DEFAULT;
+	res = ipa_pm_register(&params, &ipa_mhi_client_ctx->pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to register with PM %d\n", res);
+		return res;
+	}
+
+	res = ipa_pm_associate_ipa_cons_to_client(ipa_mhi_client_ctx->pm_hdl,
+		IPA_CLIENT_MHI_CONS);
+	if (res) {
+		IPA_MHI_ERR("fail to associate cons with PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
+	res = ipa_pm_set_perf_profile(ipa_mhi_client_ctx->pm_hdl, 1000);
+	if (res) {
+		IPA_MHI_ERR("fail to set perf profile to PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
+	/* create a modem client for clock scaling */
+	memset(&params, 0, sizeof(params));
+	params.name = "MODEM (MHI)";
+	params.group = IPA_PM_GROUP_MODEM;
+	params.skip_clk_vote = true;
+	res = ipa_pm_register(&params, &ipa_mhi_client_ctx->modem_pm_hdl);
+	if (res) {
+		IPA_MHI_ERR("fail to register with PM %d\n", res);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ipa_mhi_client_ctx->pm_hdl);
+	ipa_mhi_client_ctx->pm_hdl = ~0;
+	return res;
+}
+
+static int ipa_mhi_create_rm_resources(void)
+{
+	int res;
+	struct ipa_rm_create_params mhi_prod_params;
+	struct ipa_rm_create_params mhi_cons_params;
+	struct ipa_rm_perf_profile profile;
+
+	/* Create PROD in IPA RM */
+	memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
+	mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
+	mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+	mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
+	res = ipa_rm_create_resource(&mhi_prod_params);
+	if (res) {
+		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
+		goto fail_create_rm_prod;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 1000;
+	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
+	if (res) {
+		IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
+		goto fail_perf_rm_prod;
+	}
+
+	/* Create CONS in IPA RM */
+	memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
+	mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
+	mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+	mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
+	mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
+	res = ipa_rm_create_resource(&mhi_cons_params);
+	if (res) {
+		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
+		goto fail_create_rm_cons;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 1000;
+	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
+	if (res) {
+		IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
+		goto fail_perf_rm_cons;
+	}
+fail_perf_rm_cons:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+fail_create_rm_cons:
+fail_perf_rm_prod:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+fail_create_rm_prod:
+	return res;
+}
+
 /**
  * ipa_mhi_init() - Initialize IPA MHI driver
  * @params: initialization params
@@ -2437,9 +2654,6 @@
 int ipa_mhi_init(struct ipa_mhi_init_params *params)
 {
 	int res;
-	struct ipa_rm_create_params mhi_prod_params;
-	struct ipa_rm_create_params mhi_cons_params;
-	struct ipa_rm_perf_profile profile;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -2500,43 +2714,14 @@
 		goto fail_create_wq;
 	}
 
-	/* Create PROD in IPA RM */
-	memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
-	mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
-	mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
-	mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
-	res = ipa_rm_create_resource(&mhi_prod_params);
+	if (ipa_pm_is_used())
+		res = ipa_mhi_register_pm();
+	else
+		res = ipa_mhi_create_rm_resources();
 	if (res) {
-		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
-		goto fail_create_rm_prod;
-	}
-
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = 1000;
-	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
-	if (res) {
-		IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
-		goto fail_perf_rm_prod;
-	}
-
-	/* Create CONS in IPA RM */
-	memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
-	mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
-	mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
-	mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
-	mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
-	res = ipa_rm_create_resource(&mhi_cons_params);
-	if (res) {
-		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
-		goto fail_create_rm_cons;
-	}
-
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = 1000;
-	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
-	if (res) {
-		IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
-		goto fail_perf_rm_cons;
+		IPA_MHI_ERR("failed to create RM resources\n");
+		res = -EFAULT;
+		goto fail_rm;
 	}
 
 	/* Initialize uC interface */
@@ -2551,12 +2736,7 @@
 	IPA_MHI_FUNC_EXIT();
 	return 0;
 
-fail_perf_rm_cons:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
-fail_create_rm_cons:
-fail_perf_rm_prod:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
-fail_create_rm_prod:
+fail_rm:
 	destroy_workqueue(ipa_mhi_client_ctx->wq);
 fail_create_wq:
 	kfree(ipa_mhi_client_ctx);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index a15a9d8..e19d297 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -13,6 +13,7 @@
 #include <linux/ipa_uc_offload.h>
 #include <linux/msm_ipa.h>
 #include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
 
 #define IPA_NTN_DMA_POOL_ALIGNMENT 8
 #define OFFLOAD_DRV_NAME "ipa_uc_offload"
@@ -69,6 +70,7 @@
 	char netdev_name[IPA_RESOURCE_NAME_MAX];
 	ipa_notify_cb notify;
 	struct completion ntn_completion;
+	u32 pm_hdl;
 };
 
 static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
@@ -113,22 +115,53 @@
 	return 0;
 }
 
-static int ipa_uc_offload_ntn_reg_intf(
-	struct ipa_uc_offload_intf_params *inp,
-	struct ipa_uc_offload_out_params *outp,
+static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	/* suspend/resume is not supported */
+	IPA_UC_OFFLOAD_DBG("event = %d\n", event);
+}
+
+static int ipa_uc_offload_ntn_register_pm_client(
 	struct ipa_uc_offload_ctx *ntn_ctx)
 {
-	struct ipa_ioc_add_hdr *hdr = NULL;
-	struct ipa_tx_intf tx;
-	struct ipa_rx_intf rx;
-	struct ipa_ioc_tx_intf_prop tx_prop[2];
-	struct ipa_ioc_rx_intf_prop rx_prop[2];
-	struct ipa_rm_create_params param;
-	u32 len;
-	int ret = 0;
+	int res;
+	struct ipa_pm_register_params params;
 
-	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
-					 inp->netdev_name);
+	memset(&params, 0, sizeof(params));
+	params.name = "ETH";
+	params.callback = ipa_uc_offload_ntn_pm_cb;
+	params.user_data = ntn_ctx;
+	params.group = IPA_PM_GROUP_DEFAULT;
+	res = ipa_pm_register(&params, &ntn_ctx->pm_hdl);
+	if (res) {
+		IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res);
+		return res;
+	}
+
+	res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
+		IPA_CLIENT_ETHERNET_CONS);
+	if (res) {
+		IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res);
+		ipa_pm_deregister(ntn_ctx->pm_hdl);
+		ntn_ctx->pm_hdl = ~0;
+		return res;
+	}
+
+	return 0;
+}
+
+static void ipa_uc_offload_ntn_deregister_pm_client(
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
+	ipa_pm_deregister(ntn_ctx->pm_hdl);
+}
+static int ipa_uc_offload_ntn_create_rm_resources(
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int ret;
+	struct ipa_rm_create_params param;
+
 	memset(&param, 0, sizeof(param));
 	param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
 	param.reg_params.user_data = ntn_ctx;
@@ -147,9 +180,37 @@
 	ret = ipa_rm_create_resource(&param);
 	if (ret) {
 		IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
-		goto fail_create_rm_cons;
+		ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
+		return -EFAULT;
 	}
 
+	return 0;
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+	struct ipa_uc_offload_intf_params *inp,
+	struct ipa_uc_offload_out_params *outp,
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	struct ipa_ioc_add_hdr *hdr = NULL;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	int ret = 0;
+	u32 len;
+
+
+	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+					 inp->netdev_name);
+	if (ipa_pm_is_used())
+		ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
+	else
+		ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to create rm resource\n");
+		return -EFAULT;
+	}
 	memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
 	ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
 	ntn_ctx->notify = inp->notify;
@@ -228,9 +289,12 @@
 fail:
 	kfree(hdr);
 fail_alloc:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
-fail_create_rm_cons:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
+	if (ipa_pm_is_used()) {
+		ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
+	} else {
+		ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
+		ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
+	}
 	return ret;
 }
 
@@ -348,25 +412,34 @@
 		return -EINVAL;
 	}
 
-	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (result) {
-		IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", result);
-		return result;
-	}
+	if (ipa_pm_is_used()) {
+		result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
+		if (result) {
+			IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
+			return result;
+		}
+	} else {
+		result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+		if (result) {
+			IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n",
+				result);
+			return result;
+		}
 
-	result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
-	if (result == -EINPROGRESS) {
-		if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
-			10*HZ) == 0) {
-			IPA_UC_OFFLOAD_ERR("ETH_PROD resource req time out\n");
+		result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
+		if (result == -EINPROGRESS) {
+			if (wait_for_completion_timeout(&ntn_ctx->ntn_completion
+				, 10*HZ) == 0) {
+				IPA_UC_OFFLOAD_ERR("ETH_PROD req timeout\n");
+				result = -EFAULT;
+				goto fail;
+			}
+		} else if (result != 0) {
+			IPA_UC_OFFLOAD_ERR("fail to request resource\n");
 			result = -EFAULT;
 			goto fail;
 		}
-	} else if (result != 0) {
-		IPA_UC_OFFLOAD_ERR("fail to request resource\n");
-		result = -EFAULT;
-		goto fail;
 	}
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
@@ -383,8 +456,9 @@
 	return 0;
 
 fail:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
+	if (!ipa_pm_is_used())
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
 	return result;
 }
 
@@ -455,6 +529,11 @@
 		return -EINVAL;
 	}
 
+	if (ipa_pm_is_used())
+		return ipa_pm_set_perf_profile(
+			ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
+			profile->max_supported_bw_mbps);
+
 	if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
 		IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
 		return -EFAULT;
@@ -471,18 +550,27 @@
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
 
-	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
-	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to release ETHERNET_PROD res: %d\n",
-						  ret);
-		return -EFAULT;
-	}
+	if (ipa_pm_is_used()) {
+		ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
+		if (ret) {
+			IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
+			ret);
+			return -EFAULT;
+		}
+	} else {
+		ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
+		if (ret) {
+			IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n",
+							ret);
+			return -EFAULT;
+		}
 
-	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
-		IPA_RM_RESOURCE_APPS_CONS);
-	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to del dep ETH_PROD->APPS, %d\n", ret);
-		return -EFAULT;
+		ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+		if (ret) {
+			IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret);
+			return -EFAULT;
+		}
 	}
 
 	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
@@ -539,14 +627,18 @@
 	int len, result = 0;
 	struct ipa_ioc_del_hdr *hdr;
 
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD resource\n");
-		return -EFAULT;
-	}
+	if (ipa_pm_is_used()) {
+		ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
+	} else {
+		if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
+			IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n");
+			return -EFAULT;
+		}
 
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS resource\n");
-		return -EFAULT;
+		if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
+			IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n");
+			return -EFAULT;
+		}
 	}
 
 	len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index f5d8d61..745e429 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -119,6 +119,12 @@
 	bool cons_requested_released;
 };
 
+struct ipa3_usb_pm_context {
+	struct ipa_pm_register_params reg_params;
+	struct work_struct *remote_wakeup_work;
+	u32 hdl;
+};
+
 enum ipa3_usb_state {
 	IPA_USB_INVALID,
 	IPA_USB_INITIALIZED,
@@ -157,6 +163,7 @@
  */
 struct ipa3_usb_transport_type_ctx {
 	struct ipa3_usb_rm_context rm_ctx;
+	struct ipa3_usb_pm_context pm_ctx;
 	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
 	void *user_data;
 	enum ipa3_usb_state state;
@@ -362,7 +369,8 @@
 			ipa3_usb_state_to_string(new_state));
 	}
 
-	if (state_legal && (new_state == IPA_USB_CONNECTED)) {
+	if (!ipa_pm_is_used() &&
+		state_legal && (new_state == IPA_USB_CONNECTED)) {
 		rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
 		if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) ||
 			rm_ctx->cons_requested_released) {
@@ -656,6 +664,30 @@
 	return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL);
 }
 
+static void ipa3_usb_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct ipa3_usb_transport_type_ctx *ttype_ctx =
+		(struct ipa3_usb_transport_type_ctx *)p;
+	unsigned long flags;
+
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		IPA_USB_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG("state is %s\n",
+		ipa3_usb_state_to_string(ttype_ctx->state));
+	if (ttype_ctx->state == IPA_USB_SUSPENDED)
+		queue_work(ipa3_usb_ctx->wq,
+			ttype_ctx->pm_ctx.remote_wakeup_work);
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
 static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot)
 {
 	switch (teth_prot) {
@@ -703,6 +735,59 @@
 	return 0;
 }
 
+static int ipa3_usb_register_pm(enum ipa3_usb_transport_type ttype)
+{
+	struct ipa3_usb_transport_type_ctx *ttype_ctx =
+		&ipa3_usb_ctx->ttype_ctx[ttype];
+	int result;
+
+	memset(&ttype_ctx->pm_ctx.reg_params, 0,
+		sizeof(ttype_ctx->pm_ctx.reg_params));
+	ttype_ctx->pm_ctx.reg_params.name = (ttype == IPA_USB_TRANSPORT_DPL) ?
+				"USB DPL" : "USB";
+	ttype_ctx->pm_ctx.reg_params.callback = ipa3_usb_pm_cb;
+	ttype_ctx->pm_ctx.reg_params.user_data = ttype_ctx;
+	ttype_ctx->pm_ctx.reg_params.group = IPA_PM_GROUP_DEFAULT;
+
+	result = ipa_pm_register(&ttype_ctx->pm_ctx.reg_params,
+		&ttype_ctx->pm_ctx.hdl);
+	if (result) {
+		IPA_USB_ERR("fail to register with PM %d\n", result);
+		goto fail_pm_reg;
+	}
+
+	result = ipa_pm_associate_ipa_cons_to_client(ttype_ctx->pm_ctx.hdl,
+		(ttype == IPA_USB_TRANSPORT_DPL) ?
+		IPA_CLIENT_USB_DPL_CONS : IPA_CLIENT_USB_CONS);
+	if (result) {
+		IPA_USB_ERR("fail to associate cons with PM %d\n", result);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(ttype_ctx->pm_ctx.hdl);
+fail_pm_reg:
+	memset(&ttype_ctx->pm_ctx.reg_params, 0,
+		sizeof(ttype_ctx->pm_ctx.reg_params));
+	return result;
+}
+
+static int ipa3_usb_deregister_pm(enum ipa3_usb_transport_type ttype)
+{
+	struct ipa3_usb_pm_context *pm_ctx =
+		&ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx;
+	int result;
+
+	result = ipa_pm_deregister(pm_ctx->hdl);
+	if (result)
+		return result;
+
+	memset(&pm_ctx->reg_params, 0, sizeof(pm_ctx->reg_params));
+	return 0;
+}
+
 static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
 {
 	struct ipa3_usb_rm_context *rm_ctx;
@@ -802,7 +887,10 @@
 	}
 
 	/* Create IPA RM USB resources */
-	result = ipa3_usb_create_rm_resources(ttype);
+	if (ipa_pm_is_used())
+		result = ipa3_usb_register_pm(ttype);
+	else
+		result = ipa3_usb_create_rm_resources(ttype);
 	if (result) {
 		IPA_USB_ERR("Failed creating IPA RM USB resources\n");
 		goto bad_params;
@@ -934,12 +1022,18 @@
 teth_prot_init_fail:
 	if ((IPA3_USB_IS_TTYPE_DPL(ttype))
 		|| (ipa3_usb_ctx->num_init_prot == 0)) {
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
-		ipa_rm_delete_resource(
+		if (ipa_pm_is_used()) {
+			ipa3_usb_deregister_pm(ttype);
+		} else {
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
+				false;
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
+				false;
+			ipa_rm_delete_resource(
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
-		ipa_rm_delete_resource(
+			ipa_rm_delete_resource(
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
+		}
 	}
 bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
@@ -1393,6 +1487,9 @@
 {
 	int res = 0;
 
+	if (ipa_pm_is_used())
+		return 0;
+
 	/*
 	 * Add DPL dependency to RM dependency graph, first add_dependency call
 	 * is sync in order to make sure the IPA clocks are up before we
@@ -1572,6 +1669,9 @@
 {
 	int res;
 
+	if (ipa_pm_is_used())
+		return 0;
+
 	/* Remove DPL RM dependency */
 	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
 				    IPA_RM_RESOURCE_Q6_CONS);
@@ -1699,32 +1799,51 @@
 		return result;
 	}
 
-	/* Set RM PROD & CONS perf profile */
-	profile.max_supported_bandwidth_mbps =
-			params->max_supported_bandwidth_mbps;
-	result = ipa_rm_set_perf_profile(
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
-		&profile);
-	if (result) {
-		IPA_USB_ERR("failed to set %s perf profile\n",
-			ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
-				rm_ctx.prod_params.name));
-		return result;
-	}
-	result = ipa_rm_set_perf_profile(
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
-		&profile);
-	if (result) {
-		IPA_USB_ERR("failed to set %s perf profile\n",
-			ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
-				rm_ctx.cons_params.name));
-		return result;
-	}
+	if (ipa_pm_is_used()) {
+		result = ipa_pm_set_perf_profile(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl,
+			params->max_supported_bandwidth_mbps);
+		if (result) {
+			IPA_USB_ERR("failed to set perf profile\n");
+			return result;
+		}
 
-	/* Request PROD */
-	result = ipa3_usb_request_prod(ttype);
-	if (result)
-		return result;
+		result = ipa_pm_activate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+		if (result) {
+			IPA_USB_ERR("failed to activate pm\n");
+			return result;
+		}
+	} else {
+		/* Set RM PROD & CONS perf profile */
+		profile.max_supported_bandwidth_mbps =
+				params->max_supported_bandwidth_mbps;
+		result = ipa_rm_set_perf_profile(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
+			&profile);
+		if (result) {
+			IPA_USB_ERR("failed to set %s perf profile\n",
+				ipa_rm_resource_str(ipa3_usb_ctx->
+					ttype_ctx[ttype].
+					rm_ctx.prod_params.name));
+			return result;
+		}
+		result = ipa_rm_set_perf_profile(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
+			&profile);
+		if (result) {
+			IPA_USB_ERR("failed to set %s perf profile\n",
+				ipa_rm_resource_str(ipa3_usb_ctx->
+					ttype_ctx[ttype].
+					rm_ctx.cons_params.name));
+			return result;
+		}
+
+		/* Request PROD */
+		result = ipa3_usb_request_prod(ttype);
+		if (result)
+			return result;
+	}
 
 	if (params->teth_prot != IPA_USB_DIAG) {
 		/* Start UL channel */
@@ -1775,7 +1894,11 @@
 		ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
 	}
 connect_ul_fail:
-	ipa3_usb_release_prod(ttype);
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		ipa3_usb_release_prod(ttype);
 	return result;
 }
 
@@ -2224,7 +2347,11 @@
 		goto bad_params;
 
 	if (orig_state != IPA_USB_SUSPENDED) {
-		result = ipa3_usb_release_prod(ttype);
+		if (ipa_pm_is_used())
+			result = ipa_pm_deactivate_sync(
+				ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+		else
+			result = ipa3_usb_release_prod(ttype);
 		if (result) {
 			IPA_USB_ERR("failed to release PROD.\n");
 			goto bad_params;
@@ -2334,13 +2461,19 @@
 		(ipa3_usb_ctx->num_init_prot == 0)) {
 		if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
 			IPA_USB_ERR("failed to change state to invalid\n");
-		ipa_rm_delete_resource(
+		if (ipa_pm_is_used()) {
+			ipa3_usb_deregister_pm(ttype);
+		} else {
+			ipa_rm_delete_resource(
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
-		ipa_rm_delete_resource(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid =
+				false;
+			ipa_rm_delete_resource(
 			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
-		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
-		ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid =
+				false;
+			ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
+		}
 	}
 
 	IPA_USB_DBG_LOW("exit\n");
@@ -2400,7 +2533,11 @@
 	if (result)
 		goto start_ul;
 
-	result = ipa3_usb_release_prod(ttype);
+	if (ipa_pm_is_used())
+		result = ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		result = ipa3_usb_release_prod(ttype);
 	if (result) {
 		IPA_USB_ERR("failed to release PROD.\n");
 		goto connect_teth;
@@ -2477,7 +2614,11 @@
 	}
 	ipa3_usb_ctx->qmi_req_id++;
 
-	result = ipa3_usb_release_prod(ttype);
+	if (ipa_pm_is_used())
+		result = ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		result = ipa3_usb_release_prod(ttype);
 	if (result) {
 		IPA_USB_ERR("failed to release PROD\n");
 		goto release_prod_fail;
@@ -2543,7 +2684,11 @@
 		"DPL channel":"Data Tethering channels");
 
 	/* Request USB_PROD */
-	result = ipa3_usb_request_prod(ttype);
+	if (ipa_pm_is_used())
+		result = ipa_pm_activate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		result = ipa3_usb_request_prod(ttype);
 	if (result)
 		goto fail_exit;
 
@@ -2590,7 +2735,11 @@
 disconn_teth:
 	(void)ipa3_usb_disconnect_teth_prot(teth_prot);
 release_prod:
-	(void)ipa3_usb_release_prod(ttype);
+	if (ipa_pm_is_used())
+		(void)ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		(void)ipa3_usb_release_prod(ttype);
 fail_exit:
 	return result;
 }
@@ -2642,7 +2791,11 @@
 	}
 
 	/* Request USB_PROD */
-	result = ipa3_usb_request_prod(ttype);
+	if (ipa_pm_is_used())
+		result = ipa_pm_activate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		result = ipa3_usb_request_prod(ttype);
 	if (result)
 		goto prod_req_fail;
 
@@ -2685,7 +2838,11 @@
 			IPA_USB_ERR("Error stopping UL channel: %d\n", result);
 	}
 start_ul_fail:
-	ipa3_usb_release_prod(ttype);
+	if (ipa_pm_is_used())
+		ipa_pm_deactivate_sync(
+			ipa3_usb_ctx->ttype_ctx[ttype].pm_ctx.hdl);
+	else
+		ipa3_usb_release_prod(ttype);
 prod_req_fail:
 	/* Change state back to prev_state */
 	if (!ipa3_usb_set_state(prev_state, true, ttype))
@@ -2722,6 +2879,20 @@
 	ipa3_usb_ctx->dl_data_pending = false;
 	mutex_init(&ipa3_usb_ctx->general_mutex);
 
+	if (ipa_pm_is_used()) {
+		struct ipa3_usb_pm_context *pm_ctx;
+
+		pm_ctx =
+			&ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].pm_ctx;
+		pm_ctx->hdl = ~0;
+		pm_ctx->remote_wakeup_work =
+			&ipa3_usb_notify_remote_wakeup_work;
+		pm_ctx = &ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].pm_ctx;
+		pm_ctx->hdl = ~0;
+		pm_ctx->remote_wakeup_work =
+			&ipa3_usb_dpl_notify_remote_wakeup_work;
+	}
+
 	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
 		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
 		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false;
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index a623d0b..f0d1102 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -27,6 +27,7 @@
 #include <linux/cdev.h>
 #include <linux/ipa_odu_bridge.h>
 #include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
 
 #define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge"
 
@@ -152,6 +153,7 @@
 	void *logbuf_low;
 	struct completion rm_comp;
 	void (*wakeup_request)(void *);
+	u32 pm_hdl;
 };
 static struct odu_bridge_ctx *odu_bridge_ctx;
 
@@ -273,20 +275,22 @@
 	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
 	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
 
-	/* Build IPA Resource manager dependency graph */
-	ODU_BRIDGE_DBG_LOW("build dependency graph\n");
-	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	if (!ipa_pm_is_used()) {
+		/* Build IPA Resource manager dependency graph */
+		ODU_BRIDGE_DBG_LOW("build dependency graph\n");
+		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
 					IPA_RM_RESOURCE_Q6_CONS);
-	if (res && res != -EINPROGRESS) {
-		ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
-		goto fail_add_dependency_1;
-	}
+		if (res && res != -EINPROGRESS) {
+			ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
+			goto fail_add_dependency_1;
+		}
 
-	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+		res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
 					IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-	if (res && res != -EINPROGRESS) {
-		ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
-		goto fail_add_dependency_2;
+		if (res && res != -EINPROGRESS) {
+			ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
+			goto fail_add_dependency_2;
+		}
 	}
 
 	/* configure RX (ODU->IPA) EP */
@@ -346,10 +350,12 @@
 	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
 	odu_bridge_ctx->odu_prod_hdl = 0;
 fail_odu_prod:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+	if (!ipa_pm_is_used())
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
 				IPA_RM_RESOURCE_ODU_ADAPT_CONS);
 fail_add_dependency_2:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	if (!ipa_pm_is_used())
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
 				IPA_RM_RESOURCE_Q6_CONS);
 fail_add_dependency_1:
 	return res;
@@ -397,17 +403,19 @@
 		ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
 	odu_bridge_ctx->odu_emb_cons_hdl = 0;
 
-	/* Delete IPA Resource manager dependency graph */
-	ODU_BRIDGE_DBG("deleting dependency graph\n");
-	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-	if (res && res != -EINPROGRESS)
-		ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+	if (!ipa_pm_is_used()) {
+		/* Delete IPA Resource manager dependency graph */
+		ODU_BRIDGE_DBG("deleting dependency graph\n");
+		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+		if (res && res != -EINPROGRESS)
+			ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
 
-	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-		IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-	if (res && res != -EINPROGRESS)
-		ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+		if (res && res != -EINPROGRESS)
+			ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+	}
 
 	return 0;
 }
@@ -1319,24 +1327,58 @@
 	return 0;
 }
 
-/* IPA Bridge API is the new API which will replaces old odu_bridge API */
-int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+static void ipa_br_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (event != IPA_PM_REQUEST_WAKEUP) {
+		ODU_BRIDGE_ERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+
+	if (odu_bridge_ctx->is_suspended)
+		odu_bridge_ctx->wakeup_request(odu_bridge_ctx->priv);
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+static int ipa_br_register_pm(void)
+{
+	struct ipa_pm_register_params reg_params;
+	int ret;
+
+	memset(&reg_params, 0, sizeof(reg_params));
+	reg_params.name = "ODU Bridge";
+	reg_params.callback = ipa_br_pm_cb;
+	reg_params.group = IPA_PM_GROUP_DEFAULT;
+
+	ret = ipa_pm_register(&reg_params,
+		&odu_bridge_ctx->pm_hdl);
+	if (ret) {
+		ODU_BRIDGE_ERR("fail to register with PM %d\n", ret);
+		goto fail_pm_reg;
+	}
+
+	ret = ipa_pm_associate_ipa_cons_to_client(odu_bridge_ctx->pm_hdl,
+		IPA_CLIENT_ODU_EMB_CONS);
+	if (ret) {
+		ODU_BRIDGE_ERR("fail to associate cons with PM %d\n", ret);
+		goto fail_pm_cons;
+	}
+
+	return 0;
+
+fail_pm_cons:
+	ipa_pm_deregister(odu_bridge_ctx->pm_hdl);
+	odu_bridge_ctx->pm_hdl = ~0;
+fail_pm_reg:
+	return ret;
+}
+
+static int ipa_br_create_rm_resources(void)
 {
 	int ret;
 	struct ipa_rm_create_params create_params;
 
-	if (!params || !params->wakeup_request || !hdl) {
-		ODU_BRIDGE_ERR("NULL arg\n");
-		return -EINVAL;
-	}
-
-
-	ret = odu_bridge_init(&params->info);
-	if (ret)
-		return ret;
-
-	odu_bridge_ctx->wakeup_request = params->wakeup_request;
-
 	/* create IPA RM resources for power management */
 	init_completion(&odu_bridge_ctx->rm_comp);
 	memset(&create_params, 0, sizeof(create_params));
@@ -1368,9 +1410,6 @@
 		goto fail_rm_cons;
 	}
 
-	/* handle is ignored for now */
-	*hdl = 0;
-
 	return 0;
 
 fail_rm_cons:
@@ -1379,6 +1418,41 @@
 fail_add_dep:
 	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
 fail_rm_prod:
+	return ret;
+}
+
+/* IPA Bridge API is the new API which will replaces old odu_bridge API */
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
+{
+	int ret;
+
+	if (!params || !params->wakeup_request || !hdl) {
+		ODU_BRIDGE_ERR("NULL arg\n");
+		return -EINVAL;
+	}
+
+
+	ret = odu_bridge_init(&params->info);
+	if (ret)
+		return ret;
+
+	odu_bridge_ctx->wakeup_request = params->wakeup_request;
+
+	if (ipa_pm_is_used())
+		ret = ipa_br_register_pm();
+	else
+		ret = ipa_br_create_rm_resources();
+	if (ret) {
+		ODU_BRIDGE_ERR("fail to register woth RM/PM %d\n", ret);
+		goto fail_pm;
+	}
+
+	/* handle is ignored for now */
+	*hdl = 0;
+
+	return 0;
+
+fail_pm:
 	odu_bridge_cleanup();
 	return ret;
 }
@@ -1398,7 +1472,10 @@
 		return -EFAULT;
 	}
 
-	ret = ipa_br_request_prod();
+	if (ipa_pm_is_used())
+		ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl);
+	else
+		ret = ipa_br_request_prod();
 	if (ret)
 		return ret;
 
@@ -1411,6 +1488,10 @@
 	struct ipa_rm_perf_profile profile = {0};
 	int ret;
 
+	if (ipa_pm_is_used())
+		return ipa_pm_set_perf_profile(odu_bridge_ctx->pm_hdl,
+			bandwidth);
+
 	profile.max_supported_bandwidth_mbps = bandwidth;
 	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_ODU_ADAPT_PROD, &profile);
 	if (ret) {
@@ -1436,7 +1517,10 @@
 	if (ret)
 		return ret;
 
-	ret = ipa_br_release_prod();
+	if (ipa_pm_is_used())
+		ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
+	else
+		ret = ipa_br_release_prod();
 	if (ret)
 		return ret;
 
@@ -1470,7 +1554,10 @@
 		return ret;
 	}
 
-	ret = ipa_br_release_prod();
+	if (ipa_pm_is_used())
+		ret = ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
+	else
+		ret = ipa_br_release_prod();
 	if (ret) {
 		ODU_BRIDGE_ERR("failed to release prod %d\n", ret);
 		ipa_start_gsi_channel(odu_bridge_ctx->odu_emb_cons_hdl);
@@ -1501,7 +1588,10 @@
 		return -EFAULT;
 	}
 
-	ret = ipa_br_request_prod();
+	if (ipa_pm_is_used())
+		ret = ipa_pm_activate_sync(odu_bridge_ctx->pm_hdl);
+	else
+		ret = ipa_br_request_prod();
 	if (ret)
 		return ret;
 
@@ -1523,12 +1613,27 @@
 }
 EXPORT_SYMBOL(ipa_bridge_tx_dp);
 
-int ipa_bridge_cleanup(u32 hdl)
+static void ipa_br_delete_rm_resources(void)
 {
 	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
 		IPA_RM_RESOURCE_APPS_CONS);
 	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
 	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+}
+
+static void ipa_br_deregister_pm(void)
+{
+	ipa_pm_deactivate_sync(odu_bridge_ctx->pm_hdl);
+	ipa_pm_deregister(odu_bridge_ctx->pm_hdl);
+	odu_bridge_ctx->pm_hdl = ~0;
+}
+
+int ipa_bridge_cleanup(u32 hdl)
+{
+	if (ipa_pm_is_used())
+		ipa_br_deregister_pm();
+	else
+		ipa_br_delete_rm_resources();
 	return odu_bridge_cleanup();
 }
 EXPORT_SYMBOL(ipa_bridge_cleanup);
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index f62cb27..1c47e69 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -9,7 +9,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
 #include <linux/atomic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -27,6 +26,8 @@
 #include <linux/random.h>
 #include <linux/rndis_ipa.h>
 #include <linux/workqueue.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
 
 #define CREATE_TRACE_POINTS
 #include "rndis_ipa_trace.h"
@@ -160,6 +161,7 @@
  * state is changed to RNDIS_IPA_CONNECTED_AND_UP
  * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails
  * @state_lock: used to protect the state variable.
+ * @pm_hdl: handle for IPA PM framework
  */
 struct rndis_ipa_dev {
 	struct net_device *net;
@@ -188,6 +190,7 @@
 	void (*device_ready_notify)(void);
 	struct delayed_work xmit_error_delayed_work;
 	spinlock_t state_lock; /* Spinlock for the state variable.*/
+	u32 pm_hdl;
 };
 
 /**
@@ -233,6 +236,8 @@
 	unsigned long data);
 static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
 static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx);
 static bool rx_filter(struct sk_buff *skb);
 static bool tx_filter(struct sk_buff *skb);
 static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx);
@@ -701,7 +706,10 @@
 		return -EINVAL;
 	}
 
-	result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
+	if (ipa_pm_is_used())
+		result = rndis_ipa_register_pm_client(rndis_ipa_ctx);
+	else
+		result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
 	if (result) {
 		RNDIS_IPA_ERROR("fail on RM create\n");
 		goto fail_create_rm;
@@ -763,7 +771,10 @@
 	return 0;
 
 fail:
-	rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
+	if (ipa_pm_is_used())
+		rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
+	else
+		rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
 fail_create_rm:
 	return result;
 }
@@ -1235,7 +1246,10 @@
 	rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
 	atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
 
-	retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
+	if (ipa_pm_is_used())
+		retval = rndis_ipa_deregister_pm_client(rndis_ipa_ctx);
+	else
+		retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
 	if (retval) {
 		RNDIS_IPA_ERROR("Fail to clean RM\n");
 		return retval;
@@ -1772,6 +1786,29 @@
 	return result;
 }
 
+static void rndis_ipa_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = p;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	if (event != IPA_PM_CLIENT_ACTIVATED) {
+		RNDIS_IPA_ERROR("unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+	RNDIS_IPA_DEBUG("Resource Granted\n");
+
+	if (netif_queue_stopped(rndis_ipa_ctx->net)) {
+		RNDIS_IPA_DEBUG("starting queue\n");
+		netif_start_queue(rndis_ipa_ctx->net);
+	} else {
+		RNDIS_IPA_DEBUG("queue already awake\n");
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
 /**
  * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy
  * the resource done on rndis_ipa_create_rm_resource()
@@ -1831,6 +1868,33 @@
 	return result;
 }
 
+static int rndis_ipa_register_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+
+	pm_reg.name = rndis_ipa_ctx->net->name;
+	pm_reg.user_data = rndis_ipa_ctx;
+	pm_reg.callback = rndis_ipa_pm_cb;
+	pm_reg.group = IPA_PM_GROUP_APPS;
+	result = ipa_pm_register(&pm_reg, &rndis_ipa_ctx->pm_hdl);
+	if (result) {
+		RNDIS_IPA_ERROR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+	return 0;
+}
+
+static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	ipa_pm_deactivate_sync(rndis_ipa_ctx->pm_hdl);
+	ipa_pm_deregister(rndis_ipa_ctx->pm_hdl);
+	rndis_ipa_ctx->pm_hdl = ~0;
+	return 0;
+}
+
 /**
  * resource_request() - request for the Netdev resource
  * @rndis_ipa_ctx: main driver context
@@ -1849,11 +1913,14 @@
 	int result = 0;
 
 	if (!rm_enabled(rndis_ipa_ctx))
-		goto out;
-	result = ipa_rm_inactivity_timer_request_resource(
+		return result;
+
+	if (ipa_pm_is_used())
+		return ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
+
+	return ipa_rm_inactivity_timer_request_resource(
 			DRV_RESOURCE_ID);
-out:
-	return result;
+
 }
 
 /**
@@ -1868,9 +1935,12 @@
 static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx)
 {
 	if (!rm_enabled(rndis_ipa_ctx))
-		goto out;
-	ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
-out:
+		return;
+	if (ipa_pm_is_used())
+		ipa_pm_deferred_deactivate(rndis_ipa_ctx->pm_hdl);
+	else
+		ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
+
 	return;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 32c8b25..a487bf4 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -386,4 +386,6 @@
 const char *ipa_get_version_string(enum ipa_hw_type ver);
 int ipa_start_gsi_channel(u32 clnt_hdl);
 
+bool ipa_pm_is_used(void);
+
 #endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 2b517a1..7aa7ffd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -811,10 +811,11 @@
 			eq = true;
 		} else {
 			rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
-			if (rt_tbl)
-				rt_tbl_idx = rt_tbl->idx;
+			if (rt_tbl == NULL ||
+				rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+				rt_tbl_idx =  ~0;
 			else
-				rt_tbl_idx = ~0;
+				rt_tbl_idx = rt_tbl->idx;
 			bitmap = entry->rule.attrib.attrib_mask;
 			eq = false;
 		}
@@ -841,10 +842,11 @@
 				eq = true;
 			} else {
 				rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
-				if (rt_tbl)
-					rt_tbl_idx = rt_tbl->idx;
-				else
+				if (rt_tbl == NULL ||
+					rt_tbl->cookie != IPA_RT_TBL_COOKIE)
 					rt_tbl_idx = ~0;
+				else
+					rt_tbl_idx = rt_tbl->idx;
 				bitmap = entry->rule.attrib.attrib_mask;
 				eq = false;
 			}
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index e3f8d45..5db2545 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -4,6 +4,6 @@
 ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
 	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
 	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
-	ipa_hw_stats.o
+	ipa_hw_stats.o ipa_pm.o
 
 obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 2210ee7..097eec8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1466,6 +1466,10 @@
 		}
 		break;
 	case IPA_IOC_RM_ADD_DEPENDENCY:
+		/* deprecate if IPA PM is used */
+		if (ipa3_ctx->use_ipa_pm)
+			return 0;
+
 		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
 				sizeof(struct ipa_ioc_rm_dependency))) {
 			retval = -EFAULT;
@@ -1475,6 +1479,10 @@
 			rm_depend.resource_name, rm_depend.depends_on_name);
 		break;
 	case IPA_IOC_RM_DEL_DEPENDENCY:
+		/* deprecate if IPA PM is used */
+		if (ipa3_ctx->use_ipa_pm)
+			return 0;
+
 		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
 				sizeof(struct ipa_ioc_rm_dependency))) {
 			retval = -EFAULT;
@@ -2350,7 +2358,6 @@
 	struct ipahal_imm_cmd_register_write reg_write;
 	struct ipahal_imm_cmd_pyld *cmd_pyld;
 	int retval;
-	struct ipahal_reg_valmask valmask;
 
 	desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
 			GFP_KERNEL);
@@ -2365,39 +2372,10 @@
 		if (ep_idx == -1)
 			continue;
 
-		if (ipa3_ctx->ep[ep_idx].valid &&
-			ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
-			BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
-
-			reg_write.skip_pipeline_clear = false;
-			reg_write.pipeline_clear_options =
-				IPAHAL_HPS_CLEAR;
-			reg_write.offset =
-				ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
-					ep_idx);
-			ipahal_get_status_ep_valmask(
-				ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
-				&valmask);
-			reg_write.value = valmask.val;
-			reg_write.value_mask = valmask.mask;
-			cmd_pyld = ipahal_construct_imm_cmd(
-				IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
-			if (!cmd_pyld) {
-				IPAERR("fail construct register_write cmd\n");
-				BUG();
-			}
-
-			desc[num_descs].opcode = cmd_pyld->opcode;
-			desc[num_descs].type = IPA_IMM_CMD_DESC;
-			desc[num_descs].callback = ipa3_destroy_imm;
-			desc[num_descs].user1 = cmd_pyld;
-			desc[num_descs].pyld = cmd_pyld->data;
-			desc[num_descs].len = cmd_pyld->len;
-			num_descs++;
-		}
-
-		/* disable statuses for modem producers */
-		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+		/* disable statuses for all modem controlled prod pipes */
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx) ||
+			(ipa3_ctx->ep[ep_idx].valid &&
+			ipa3_ctx->ep[ep_idx].skip_ep_cfg)) {
 			ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
 
 			reg_write.skip_pipeline_clear = false;
@@ -3712,6 +3690,53 @@
 	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
 }
 
+int ipa3_set_clock_plan_from_pm(int idx)
+{
+	u32 clk_rate;
+
+	IPADBG_LOW("idx = %d\n", idx);
+
+	if (idx <= 0 || idx >= ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases) {
+		IPAERR("bad voltage\n");
+		return -EINVAL;
+	}
+
+	if (idx == 1)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
+	else if (idx == 2)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
+	else if (idx == 3)
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+	else {
+		IPAERR("bad voltage\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
+		IPADBG_LOW("Same voltage\n");
+		return 0;
+	}
+
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
+	ipa3_ctx->ipa3_active_clients.bus_vote_idx = idx;
+	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+	if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) > 0) {
+		if (ipa3_clk)
+			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+				ipa3_get_bus_vote()))
+			WARN_ON(1);
+	} else {
+		IPADBG_LOW("clocks are gated, not setting rate\n");
+	}
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+	IPADBG_LOW("Done\n");
+
+	return 0;
+}
+
 int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
 				  u32 bandwidth_mbps)
 {
@@ -3814,14 +3839,19 @@
 	u32 i = 0;
 	int res;
 	struct ipa_ep_cfg_holb holb_cfg;
+	u32 pipe_bitmask = 0;
 
 	IPADBG("interrupt=%d, interrupt_data=%u\n",
 		interrupt, suspend_data);
 	memset(&holb_cfg, 0, sizeof(holb_cfg));
 	holb_cfg.tmr_val = 0;
 
-	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++, bmsk = bmsk << 1) {
 		if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
+			if (ipa3_ctx->use_ipa_pm) {
+				pipe_bitmask |= bmsk;
+				continue;
+			}
 			if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
 				/*
 				 * pipe will be unsuspended as part of
@@ -3860,7 +3890,13 @@
 				}
 			}
 		}
-		bmsk = bmsk << 1;
+	}
+	if (ipa3_ctx->use_ipa_pm) {
+		res = ipa_pm_handle_suspend(pipe_bitmask);
+		if (res) {
+			IPAERR("ipa_pm_handle_suspend failed %d\n", res);
+			return;
+		}
 	}
 }
 
@@ -4391,6 +4427,8 @@
 	if (IS_ERR_OR_NULL(subsystem_get_retval)) {
 		IPAERR("Unable to trigger PIL process for FW loading\n");
 		return -EINVAL;
+	} else {
+		subsystem_put(subsystem_get_retval);
 	}
 
 	IPADBG("PIL FW loading process is complete\n");
@@ -4621,6 +4659,7 @@
 	ipa3_ctx->ee = resource_p->ee;
 	ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
 	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
+	ipa3_ctx->use_ipa_pm = resource_p->use_ipa_pm;
 	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
 	if (resource_p->ipa_tz_unlock_reg) {
 		ipa3_ctx->ipa_tz_unlock_reg_num =
@@ -4934,20 +4973,30 @@
 	wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
 	spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
 
-	/* Initialize IPA RM (resource manager) */
-	result = ipa_rm_initialize();
-	if (result) {
-		IPAERR("RM initialization failed (%d)\n", -result);
-		result = -ENODEV;
-		goto fail_ipa_rm_init;
-	}
-	IPADBG("IPA resource manager initialized");
+	/* Initialize Power Management framework */
+	if (ipa3_ctx->use_ipa_pm) {
+		result = ipa_pm_init(&ipa3_res.pm_init);
+		if (result) {
+			IPAERR("IPA PM initialization failed (%d)\n", -result);
+			result = -ENODEV;
+			goto fail_ipa_rm_init;
+		}
+		IPADBG("IPA resource manager initialized");
+	} else {
+		result = ipa_rm_initialize();
+		if (result) {
+			IPAERR("RM initialization failed (%d)\n", -result);
+			result = -ENODEV;
+			goto fail_ipa_rm_init;
+		}
+		IPADBG("IPA resource manager initialized");
 
-	result = ipa3_create_apps_resource();
-	if (result) {
-		IPAERR("Failed to create APPS_CONS resource\n");
-		result = -ENODEV;
-		goto fail_create_apps_resource;
+		result = ipa3_create_apps_resource();
+		if (result) {
+			IPAERR("Failed to create APPS_CONS resource\n");
+			result = -ENODEV;
+			goto fail_create_apps_resource;
+		}
 	}
 
 	result = ipa3_alloc_pkt_init();
@@ -4998,9 +5047,11 @@
 
 fail_cdev_add:
 fail_ipa_init_interrupts:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+	if (!ipa3_ctx->use_ipa_pm)
+		ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
 fail_create_apps_resource:
-	ipa_rm_exit();
+	if (!ipa3_ctx->use_ipa_pm)
+		ipa_rm_exit();
 fail_ipa_rm_init:
 fail_nat_dev_add:
 	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
@@ -5069,6 +5120,101 @@
 	return result;
 }
 
+bool ipa_pm_is_used(void)
+{
+	return (ipa3_ctx) ? ipa3_ctx->use_ipa_pm : false;
+}
+
+static int get_ipa_dts_pm_info(struct platform_device *pdev,
+	struct ipa3_plat_drv_res *ipa_drv_res)
+{
+	int result;
+	int i, j;
+
+	ipa_drv_res->use_ipa_pm = of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-ipa-pm");
+	IPADBG("use_ipa_pm=%d\n", ipa_drv_res->use_ipa_pm);
+	if (!ipa_drv_res->use_ipa_pm)
+		return 0;
+
+	result = of_property_read_u32(pdev->dev.of_node,
+		"qcom,msm-bus,num-cases",
+		&ipa_drv_res->pm_init.threshold_size);
+	/* No vote is ignored */
+	ipa_drv_res->pm_init.threshold_size -= 2;
+	if (result || ipa_drv_res->pm_init.threshold_size >
+		IPA_PM_THRESHOLD_MAX) {
+		IPAERR("invalid property qcom,msm-bus,num-cases %d\n",
+			ipa_drv_res->pm_init.threshold_size);
+		return -EFAULT;
+	}
+
+	result = of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,throughput-threshold",
+		ipa_drv_res->pm_init.default_threshold,
+		ipa_drv_res->pm_init.threshold_size);
+	if (result) {
+		IPAERR("failed to read qcom,throughput-thresholds\n");
+		return -EFAULT;
+	}
+
+	result = of_property_count_strings(pdev->dev.of_node,
+		"qcom,scaling-exceptions");
+	if (result < 0) {
+		IPADBG("no exception list for ipa pm\n");
+		result = 0;
+	}
+
+	if (result % (ipa_drv_res->pm_init.threshold_size + 1)) {
+		IPAERR("failed to read qcom,scaling-exceptions\n");
+		return -EFAULT;
+	}
+
+	ipa_drv_res->pm_init.exception_size = result /
+		(ipa_drv_res->pm_init.threshold_size + 1);
+	if (ipa_drv_res->pm_init.exception_size >=
+		IPA_PM_EXCEPTION_MAX) {
+		IPAERR("exception list larger then max %d\n",
+			ipa_drv_res->pm_init.exception_size);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < ipa_drv_res->pm_init.exception_size; i++) {
+		struct ipa_pm_exception *ex = ipa_drv_res->pm_init.exceptions;
+
+		result = of_property_read_string_index(pdev->dev.of_node,
+			"qcom,scaling-exceptions",
+			i * ipa_drv_res->pm_init.threshold_size,
+			&ex[i].usecase);
+		if (result) {
+			IPAERR("failed to read qcom,scaling-exceptions");
+			return -EFAULT;
+		}
+
+		for (j = 0; j < ipa_drv_res->pm_init.threshold_size; j++) {
+			const char *str;
+
+			result = of_property_read_string_index(
+				pdev->dev.of_node,
+				"qcom,scaling-exceptions",
+				i * ipa_drv_res->pm_init.threshold_size + j + 1,
+				&str);
+			if (result) {
+				IPAERR("failed to read qcom,scaling-exceptions"
+					);
+				return -EFAULT;
+			}
+
+			if (kstrtou32(str, 0, &ex[i].threshold[j])) {
+				IPAERR("error str=%s\n", str);
+				return -EFAULT;
+			}
+		}
+	}
+
+	return 0;
+}
+
 static int get_ipa_dts_configuration(struct platform_device *pdev,
 		struct ipa3_plat_drv_res *ipa_drv_res)
 {
@@ -5307,6 +5453,14 @@
 		}
 		kfree(ipa_tz_unlock_reg);
 	}
+
+	/* get IPA PM related information */
+	result = get_ipa_dts_pm_info(pdev, ipa_drv_res);
+	if (result) {
+		IPAERR("failed to get pm info from dts %d\n", result);
+		return result;
+	}
+
 	return 0;
 }
 
@@ -5844,11 +5998,16 @@
 		}
 	}
 
-	/*
-	 * Release transport IPA resource without waiting for inactivity timer
-	 */
-	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
-	ipa3_transport_release_resource(NULL);
+	if (ipa3_ctx->use_ipa_pm) {
+		ipa_pm_deactivate_all_deferred();
+	} else {
+		/*
+		 * Release transport IPA resource without waiting
+		 * for inactivity timer
+		 */
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+		ipa3_transport_release_resource(NULL);
+	}
 	IPADBG("Exit\n");
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 153548b..f72f41c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -910,10 +910,11 @@
 				eq = true;
 			} else {
 				rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
-				if (rt_tbl)
-					rt_tbl_idx = rt_tbl->idx;
+				if (rt_tbl == NULL ||
+					rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+					rt_tbl_idx =  ~0;
 				else
-					rt_tbl_idx = ~0;
+					rt_tbl_idx = rt_tbl->idx;
 				bitmap = entry->rule.attrib.attrib_mask;
 				eq = false;
 			}
@@ -1705,6 +1706,10 @@
 {
 	int result, nbytes, cnt = 0;
 
+	/* deprecate if IPA PM is used */
+	if (ipa3_ctx->use_ipa_pm)
+		return 0;
+
 	result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
 	if (result < 0) {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -1716,6 +1721,45 @@
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
 }
 
+static ssize_t ipa3_pm_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, nbytes, cnt = 0;
+
+	if (!ipa3_ctx->use_ipa_pm)
+		return 0;
+
+	result = ipa_pm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Error in printing PM stat %d\n", result);
+		cnt += nbytes;
+	} else
+		cnt += result;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_pm_ex_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, nbytes, cnt = 0;
+
+	if (!ipa3_ctx->use_ipa_pm)
+		return 0;
+
+	result = ipa_pm_exceptions_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Error in printing PM stat %d\n", result);
+		cnt += nbytes;
+	} else
+		cnt += result;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+
 static void ipa_dump_status(struct ipahal_pkt_status *status)
 {
 	IPA_DUMP_STATUS_FIELD(status_opcode);
@@ -1940,6 +1984,15 @@
 	.read = ipa3_rm_read_stats,
 };
 
+static const struct file_operations ipa3_pm_stats = {
+	.read = ipa3_pm_read_stats,
+};
+
+
+static const struct file_operations ipa3_pm_ex_stats = {
+	.read = ipa3_pm_ex_read_stats,
+};
+
 const struct file_operations ipa3_active_clients = {
 	.read = ipa3_print_active_clients_log,
 	.write = ipa3_clear_active_clients_log,
@@ -2131,11 +2184,27 @@
 		goto fail;
 	}
 
-	dfile_rm_stats = debugfs_create_file("rm_stats",
-			read_only_mode, dent, 0, &ipa3_rm_stats);
-	if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
-		IPAERR("fail to create file for debug_fs rm_stats\n");
-		goto fail;
+	if (ipa3_ctx->use_ipa_pm) {
+		file = dfile_rm_stats = debugfs_create_file("pm_stats",
+			read_only_mode, dent, NULL, &ipa3_pm_stats);
+		if (!file || IS_ERR(file)) {
+			IPAERR("fail to create file for debug_fs pm_stats\n");
+			goto fail;
+		}
+
+		file = dfile_rm_stats = debugfs_create_file("pm_ex_stats",
+			read_only_mode, dent, NULL, &ipa3_pm_ex_stats);
+		if (!file || IS_ERR(file)) {
+			IPAERR("fail to create file for debugfs pm_ex_stats\n");
+			goto fail;
+		}
+	} else {
+		dfile_rm_stats = debugfs_create_file("rm_stats",
+				read_only_mode, dent, NULL, &ipa3_rm_stats);
+		if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
+			IPAERR("fail to create file for debug_fs rm_stats\n");
+			goto fail;
+		}
 	}
 
 	dfile_status_stats = debugfs_create_file("status_stats",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 26d5f5e..7f30a10 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -73,6 +73,8 @@
 
 #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
 
+#define IPA_APPS_BW_FOR_PM 700
+
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -748,7 +750,10 @@
 	int inactive_cycles = 0;
 	int cnt;
 
-	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	if (ipa3_ctx->use_ipa_pm)
+		ipa_pm_activate_sync(sys->pm_hdl);
+	else
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	do {
 		cnt = ipa3_handle_rx_core(sys, true, true);
 		if (cnt == 0)
@@ -772,7 +777,10 @@
 
 	trace_poll_to_intr3(sys->ep->client);
 	ipa3_rx_switch_to_intr_mode(sys);
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	if (ipa3_ctx->use_ipa_pm)
+		ipa_pm_deferred_deactivate(sys->pm_hdl);
+	else
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 }
 
 static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
@@ -799,6 +807,32 @@
 	return HRTIMER_NORESTART;
 }
 
+static void ipa_pm_sys_pipe_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)p;
+
+	switch (event) {
+	case IPA_PM_CLIENT_ACTIVATED:
+		/*
+		 * this event is ignored as the sync version of activation
+		 * will be used.
+		 */
+		break;
+	case IPA_PM_REQUEST_WAKEUP:
+		/*
+		 * pipe will be unsuspended as part of
+		 * enabling IPA clocks
+		 */
+		ipa_pm_activate_sync(sys->pm_hdl);
+		ipa_pm_deferred_deactivate(sys->pm_hdl);
+		break;
+	default:
+		IPAERR("Unexpected event %d\n", event);
+		WARN_ON(1);
+		return;
+	}
+}
+
 /**
  * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
  * IPA EP configuration
@@ -846,6 +880,9 @@
 	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
 
 	if (!ep->sys) {
+		struct ipa_pm_register_params pm_reg;
+
+		memset(&pm_reg, 0, sizeof(pm_reg));
 		ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
 		if (!ep->sys) {
 			IPAERR("failed to sys ctx for client %d\n",
@@ -884,6 +921,35 @@
 		hrtimer_init(&ep->sys->db_timer, CLOCK_MONOTONIC,
 			HRTIMER_MODE_REL);
 		ep->sys->db_timer.function = ipa3_ring_doorbell_timer_fn;
+
+		/* create IPA PM resources for handling polling mode */
+		if (ipa3_ctx->use_ipa_pm &&
+			IPA_CLIENT_IS_CONS(sys_in->client)) {
+			pm_reg.name = ipa_clients_strings[sys_in->client];
+			pm_reg.callback = ipa_pm_sys_pipe_cb;
+			pm_reg.user_data = ep->sys;
+			pm_reg.group = IPA_PM_GROUP_APPS;
+			result = ipa_pm_register(&pm_reg, &ep->sys->pm_hdl);
+			if (result) {
+				IPAERR("failed to create IPA PM client %d\n",
+					result);
+				goto fail_pm;
+			}
+
+			result = ipa_pm_associate_ipa_cons_to_client(
+				ep->sys->pm_hdl, sys_in->client);
+			if (result) {
+				IPAERR("failed to associate IPA PM client\n");
+				goto fail_gen2;
+			}
+
+			result = ipa_pm_set_perf_profile(ep->sys->pm_hdl,
+				IPA_APPS_BW_FOR_PM);
+			if (result) {
+				IPAERR("failed to set profile IPA PM client\n");
+				goto fail_gen2;
+			}
+		}
 	} else {
 		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
 	}
@@ -984,6 +1050,9 @@
 	return 0;
 
 fail_gen2:
+	if (ipa3_ctx->use_ipa_pm)
+		ipa_pm_deregister(ep->sys->pm_hdl);
+fail_pm:
 	destroy_workqueue(ep->sys->repl_wq);
 fail_wq2:
 	destroy_workqueue(ep->sys->wq);
@@ -1402,7 +1471,8 @@
 	sys = container_of(work, struct ipa3_sys_context, work);
 
 	if (sys->ep->napi_enabled) {
-		IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+		if (!ipa3_ctx->use_ipa_pm)
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
 		sys->ep->client_notify(sys->ep->priv,
 				IPA_CLIENT_START_POLL, 0);
 	} else
@@ -3283,11 +3353,48 @@
 	}
 }
 
+void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
+{
+	bool clk_off;
+
+	atomic_set(&sys->curr_polling_state, 1);
+	ipa3_inc_acquire_wakelock();
+
+	/*
+	 * pm deactivate is done in wq context
+	 * or after NAPI poll
+	 */
+	if (ipa3_ctx->use_ipa_pm) {
+		clk_off = ipa_pm_activate(sys->pm_hdl);
+		if (!clk_off && sys->ep->napi_enabled) {
+			sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+			return;
+		}
+		queue_work(sys->wq, &sys->work);
+		return;
+	}
+
+	if (sys->ep->napi_enabled) {
+		struct ipa_active_client_logging_info log;
+
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
+		clk_off = ipa3_inc_client_enable_clks_no_block(
+			&log);
+		if (!clk_off) {
+			sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+			return;
+		}
+	}
+
+	queue_work(sys->wq, &sys->work);
+}
+
 static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_sys_context *sys;
 	struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
-	int clk_off;
 
 	if (!notify) {
 		IPAERR("gsi notify is NULL.\n");
@@ -3317,22 +3424,7 @@
 			/* put the gsi channel into polling mode */
 			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
 				GSI_CHAN_MODE_POLL);
-			ipa3_inc_acquire_wakelock();
-			atomic_set(&sys->curr_polling_state, 1);
-			if (sys->ep->napi_enabled) {
-				struct ipa_active_client_logging_info log;
-
-				IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
-				clk_off = ipa3_inc_client_enable_clks_no_block(
-					&log);
-				if (!clk_off)
-					sys->ep->client_notify(sys->ep->priv,
-						IPA_CLIENT_START_POLL, 0);
-				else
-					queue_work(sys->wq, &sys->work);
-			} else {
-				queue_work(sys->wq, &sys->work);
-			}
+			__ipa_gsi_irq_rx_scedule_poll(sys);
 		}
 		break;
 	default:
@@ -3734,7 +3826,10 @@
 	if (cnt < weight) {
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
 		ipa3_rx_switch_to_intr_mode(ep->sys);
-		ipa3_dec_client_disable_clks_no_block(&log);
+		if (ipa3_ctx->use_ipa_pm)
+			ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
+		else
+			ipa3_dec_client_disable_clks_no_block(&log);
 	}
 
 	return cnt;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index beca549..0883e6f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1170,6 +1170,13 @@
 		goto bail;
 	}
 
+	if (entry->cookie != IPA_FLT_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u flt hdl id = %d\n",
+			entry->cookie, rules->add_after_hdl);
+		result = -EINVAL;
+		goto bail;
+	}
+
 	if (entry->tbl != tbl) {
 		IPAERR_RL("given entry does not match the table\n");
 		result = -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 4d1baea..3e1d188 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -36,6 +36,7 @@
 #include "ipahal/ipahal_hw_stats.h"
 #include "../ipa_common_i.h"
 #include "ipa_uc_offload_i.h"
+#include "ipa_pm.h"
 
 #define DRV_NAME "ipa"
 #define NAT_DEV_NAME "ipaNatTable"
@@ -647,6 +648,7 @@
 	struct workqueue_struct *wq;
 	struct workqueue_struct *repl_wq;
 	struct ipa3_status_stats *status_stat;
+	u32 pm_hdl;
 	/* ordering is important - other immutable fields go below */
 };
 
@@ -869,6 +871,7 @@
 struct ipa3_active_clients {
 	struct mutex mutex;
 	atomic_t cnt;
+	int bus_vote_idx;
 };
 
 struct ipa3_wakelock_ref_cnt {
@@ -1314,6 +1317,7 @@
 	struct ipa_cne_evt ipa_cne_evt_req_cache[IPA_MAX_NUM_REQ_CACHE];
 	int num_ipa_cne_evt_req;
 	struct mutex ipa_cne_evt_lock;
+	bool use_ipa_pm;
 };
 
 struct ipa3_plat_drv_res {
@@ -1341,6 +1345,8 @@
 	bool tethered_flow_control;
 	u32 ipa_tz_unlock_reg_num;
 	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
+	bool use_ipa_pm;
+	struct ipa_pm_init_params pm_init;
 };
 
 /**
@@ -2226,4 +2232,6 @@
 int ipa3_alloc_common_event_ring(void);
 int ipa3_allocate_dma_task_for_gsi(void);
 void ipa3_free_dma_task_for_gsi(void);
+int ipa3_set_clock_plan_from_pm(int idx);
+void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
new file mode 100644
index 0000000..11c1737
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -0,0 +1,1301 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include "ipa_pm.h"
+#include "ipa_i.h"
+
+static const char *client_state_to_str[IPA_PM_STATE_MAX] = {
+	__stringify(IPA_PM_DEACTIVATED),
+	__stringify(IPA_PM_DEACTIVATE_IN_PROGRESS),
+	__stringify(IPA_PM_ACTIVATE_IN_PROGRESS),
+	__stringify(IPA_PM_ACTIVATED),
+	__stringify(IPA_PM_ACTIVATED_PENDING_DEACTIVATION),
+	__stringify(IPA_PM_ACTIVATED_TIMER_SET),
+	__stringify(IPA_PM_ACTIVATED_PENDING_RESCHEDULE),
+};
+
+static const char *ipa_pm_group_to_str[IPA_PM_GROUP_MAX] = {
+	__stringify(IPA_PM_GROUP_DEFAULT),
+	__stringify(IPA_PM_GROUP_APPS),
+	__stringify(IPA_PM_GROUP_MODEM),
+};
+
+
+#define IPA_PM_DRV_NAME "ipa_pm"
+
+#define IPA_PM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_PM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_PM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_PM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_PM_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_PM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_PM_DBG_STATE(hdl, name, state) \
+	IPA_PM_DBG_LOW("Client[%d] %s: %s\n", hdl, name, \
+		client_state_to_str[state])
+
+
+#if IPA_PM_MAX_CLIENTS > 32
+#error max client greater than 32 all bitmask types should be changed
+#endif
+
+/*
+ * struct ipa_pm_exception_list - holds information about an exception
+ * @pending: number of clients in exception that have not yet been adctivated
+ * @bitmask: bitmask of the clients in the exception based on handle
+ * @threshold: the threshold values for the exception
+ */
+struct ipa_pm_exception_list {
+	char clients[IPA_PM_MAX_EX_CL];
+	int pending;
+	u32 bitmask;
+	int threshold[IPA_PM_THRESHOLD_MAX];
+};
+
+/*
+ * struct clk_scaling_db - holds information about threshholds and exceptions
+ * @lock: lock the bitmasks and thresholds
+ * @exception_list: pointer to the list of exceptions
+ * @work: work for clock scaling algorithm
+ * @active_client_bitmask: the bits represent handles in the clients array that
+ * contain non-null client
+ * @threshold_size: size of the throughput threshold
+ * @exception_size: size of the exception list
+ * @cur_vote: idx of the threshold
+ * @default_threshold: the thresholds used if no exception passes
+ * @current_threshold: the current threshold of the clock plan
+ */
+struct clk_scaling_db {
+	spinlock_t lock;
+	struct ipa_pm_exception_list exception_list[IPA_PM_EXCEPTION_MAX];
+	struct work_struct work;
+	u32 active_client_bitmask;
+	int threshold_size;
+	int exception_size;
+	int cur_vote;
+	int default_threshold[IPA_PM_THRESHOLD_MAX];
+	int *current_threshold;
+};
+
+/*
+ * ipa_pm state names
+ *
+ * Timer free states:
+ * @IPA_PM_DEACTIVATED: client starting state when registered
+ * @IPA_PM_DEACTIVATE_IN_PROGRESS: deactivate was called in progress of a client
+ *				   activating
+ * @IPA_PM_ACTIVATE_IN_PROGRESS: client is being activated by work_queue
+ * @IPA_PM_ACTIVATED: client is activated without any timers
+ *
+ * Timer set states:
+ * @IPA_PM_ACTIVATED_PENDING_DEACTIVATION: moves to deactivate once timer pass
+ * @IPA_PM_ACTIVATED_TIMER_SET: client was activated while timer was set, so
+ *			 when the timer pass, client will still be activated
+ *@IPA_PM_ACTIVATED_PENDING_RESCHEDULE: state signifying extended timer when
+ *             a client is deferred_deactivated when a time ris still active
+ */
+enum ipa_pm_state {
+	IPA_PM_DEACTIVATED,
+	IPA_PM_DEACTIVATE_IN_PROGRESS,
+	IPA_PM_ACTIVATE_IN_PROGRESS,
+	IPA_PM_ACTIVATED,
+	IPA_PM_ACTIVATED_PENDING_DEACTIVATION,
+	IPA_PM_ACTIVATED_TIMER_SET,
+	IPA_PM_ACTIVATED_PENDING_RESCHEDULE,
+};
+
+#define IPA_PM_STATE_ACTIVE(state) \
+	(state == IPA_PM_ACTIVATED ||\
+		state == IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||\
+		state  == IPA_PM_ACTIVATED_TIMER_SET ||\
+		state == IPA_PM_ACTIVATED_PENDING_RESCHEDULE)
+
+#define IPA_PM_STATE_IN_PROGRESS(state) \
+	(state == IPA_PM_ACTIVATE_IN_PROGRESS \
+		|| state == IPA_PM_DEACTIVATE_IN_PROGRESS)
+
+/*
+ * struct ipa_pm_client - holds information about a specific IPA client
+ * @name: string name of the client
+ * @callback: pointer to the client's callback function
+ * @callback_params: pointer to the client's callback parameters
+ * @state: Activation state of the client
+ * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
+ * @group: the ipa_pm_group the client belongs to
+ * @hdl: handle of the client
+ * @throughput: the throughput of the client for clock scaling
+ * @state_lock: spinlock to lock the pm_states
+ * @activate_work: work for activate (blocking case)
+ * @deactivate work: delayed work for deferred_deactivate function
+ * @complete: generic wait-for-completion handler
+ */
+struct ipa_pm_client {
+	char name[IPA_PM_MAX_EX_CL];
+	void (*callback)(void*, enum ipa_pm_cb_event);
+	void *callback_params;
+	enum ipa_pm_state state;
+	bool skip_clk_vote;
+	int group;
+	int hdl;
+	int throughput;
+	spinlock_t state_lock;
+	struct work_struct activate_work;
+	struct delayed_work deactivate_work;
+	struct completion complete;
+};
+
+/*
+ * struct ipa_pm_ctx - global ctx that will hold the client arrays and tput info
+ * @clients: array to the clients with the handle as its index
+ * @clients_by_pipe: array to the clients with endpoint as the index
+ * @wq: work queue for deferred deactivate, activate, and clk_scaling work
+ 8 @clk_scaling: pointer to clock scaling database
+ * @client_mutex: global mutex to  lock the client arrays
+ * @aggragated_tput: aggragated tput value of all valid activated clients
+ * @group_tput: combined throughput for the groups
+ */
+struct ipa_pm_ctx {
+	struct ipa_pm_client *clients[IPA_PM_MAX_CLIENTS];
+	struct ipa_pm_client *clients_by_pipe[IPA3_MAX_NUM_PIPES];
+	struct workqueue_struct *wq;
+	struct clk_scaling_db clk_scaling;
+	struct mutex client_mutex;
+	int aggregated_tput;
+	int group_tput[IPA_PM_GROUP_MAX];
+};
+
+static struct ipa_pm_ctx *ipa_pm_ctx;
+
+/**
+ * pop_max_from_array() -pop the max and move the last element to where the
+ * max was popped
+ * @arr: array to be searched for max
+ * @n: size of the array
+ *
+ * Returns: max value of the array
+ */
+static int pop_max_from_array(int *arr, int *n)
+{
+	int i;
+	int max, max_idx;
+
+	max_idx = *n - 1;
+	max = 0;
+
+	if (*n == 0)
+		return 0;
+
+	for (i = 0; i < *n; i++) {
+		if (arr[i] > max) {
+			max = arr[i];
+			max_idx = i;
+		}
+	}
+	(*n)--;
+	arr[max_idx] = arr[*n];
+
+	return max;
+}
+
+/**
+ * calculate_throughput() - calculate the aggregated throughput
+ * based on active clients
+ *
+ * Returns: aggregated tput value
+ */
+static int calculate_throughput(void)
+{
+	int client_tput[IPA_PM_MAX_CLIENTS] = { 0 };
+	bool group_voted[IPA_PM_GROUP_MAX] = { false };
+	int i, n;
+	int max, second_max, aggregated_tput;
+	struct ipa_pm_client *client;
+
+	/* Create a basic array to hold throughputs*/
+	for (i = 0, n = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+		client = ipa_pm_ctx->clients[i];
+		if (client != NULL && IPA_PM_STATE_ACTIVE(client->state)) {
+			/* default case */
+			if (client->group == IPA_PM_GROUP_DEFAULT) {
+				client_tput[n++] = client->throughput;
+			} else if (group_voted[client->group] == false) {
+				client_tput[n++] = ipa_pm_ctx->group_tput
+					[client->group];
+				group_voted[client->group] = true;
+			}
+		}
+	}
+	/*the array will only use n+1 spots. n will be the last index used*/
+
+	aggregated_tput = 0;
+
+	/**
+	 * throughput algorithm:
+	 * 1) pop the max and second_max
+	 * 2) add the 2nd max to aggregated tput
+	 * 3) insert the value of max - 2nd max
+	 * 4) repeat until array is of size 1
+	 */
+	while (n > 1) {
+		max = pop_max_from_array(client_tput, &n);
+		second_max = pop_max_from_array(client_tput, &n);
+		client_tput[n++] = max - second_max;
+		aggregated_tput += second_max;
+	}
+
+	IPA_PM_DBG_LOW("Aggregated throughput: %d\n", aggregated_tput);
+
+	return aggregated_tput;
+}
+
+/**
+ * deactivate_client() - turn off the bit in the active client bitmask based on
+ * the handle passed in
+ * @hdl: The index of the client to be deactivated
+ */
+static void deactivate_client(u32 hdl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
+	ipa_pm_ctx->clk_scaling.active_client_bitmask &= ~(1 << hdl);
+	spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
+	IPA_PM_DBG_LOW("active bitmask: %x\n",
+		ipa_pm_ctx->clk_scaling.active_client_bitmask);
+}
+
+/**
+ * activate_client() - turn on the bit in the active client bitmask based on
+ * the handle passed in
+ * @hdl: The index of the client to be activated
+ */
+static void activate_client(u32 hdl)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
+	ipa_pm_ctx->clk_scaling.active_client_bitmask |= (1 << hdl);
+	spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
+	IPA_PM_DBG_LOW("active bitmask: %x\n",
+		ipa_pm_ctx->clk_scaling.active_client_bitmask);
+}
+
+/**
+ * deactivate_client() - get threshold
+ *
+ * Returns: threshold of the exception that passes or default if none pass
+ */
+static void set_current_threshold(void)
+{
+	int i;
+	struct clk_scaling_db *clk;
+	struct ipa_pm_exception_list *exception;
+	unsigned long flags;
+
+	clk = &ipa_pm_ctx->clk_scaling;
+
+	spin_lock_irqsave(&ipa_pm_ctx->clk_scaling.lock, flags);
+	for (i = 0; i < clk->exception_size; i++) {
+		exception = &clk->exception_list[i];
+		if (exception->pending == 0 && (exception->bitmask
+			& ~clk->active_client_bitmask) == 0) {
+			spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock,
+				 flags);
+			clk->current_threshold = exception->threshold;
+			IPA_PM_DBG("Exception %d set\n", i);
+			return;
+		}
+	}
+	clk->current_threshold = clk->default_threshold;
+	spin_unlock_irqrestore(&ipa_pm_ctx->clk_scaling.lock, flags);
+}
+
+/**
+ * do_clk_scaling() - set the clock based on the activated clients
+ *
+ * Returns: 0 if success, negative otherwise
+ */
+static int do_clk_scaling(void)
+{
+	int i, tput;
+	int new_th_idx = 1;
+	struct clk_scaling_db *clk_scaling;
+
+	clk_scaling = &ipa_pm_ctx->clk_scaling;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	IPA_PM_DBG("clock scaling started\n");
+	tput = calculate_throughput();
+	ipa_pm_ctx->aggregated_tput = tput;
+	set_current_threshold();
+
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	for (i = 0; i < clk_scaling->threshold_size; i++) {
+		if (tput > clk_scaling->current_threshold[i])
+			new_th_idx++;
+	}
+
+	IPA_PM_DBG("old idx was at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
+
+
+	if (ipa_pm_ctx->clk_scaling.cur_vote != new_th_idx) {
+		ipa_pm_ctx->clk_scaling.cur_vote = new_th_idx;
+		ipa3_set_clock_plan_from_pm(ipa_pm_ctx->clk_scaling.cur_vote);
+	}
+
+	IPA_PM_DBG("new idx is at %d\n", ipa_pm_ctx->clk_scaling.cur_vote);
+
+	return 0;
+}
+
+/**
+ * clock_scaling_func() - set the clock on a work queue
+ */
+static void clock_scaling_func(struct work_struct *work)
+{
+	do_clk_scaling();
+}
+
+/**
+ * activate_work_func - activate a client and vote for clock on a work queue
+ */
+static void activate_work_func(struct work_struct *work)
+{
+	struct ipa_pm_client *client;
+	bool dec_clk = false;
+	unsigned long flags;
+
+	client = container_of(work, struct ipa_pm_client, activate_work);
+	if (!client->skip_clk_vote)
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	if (client->state == IPA_PM_ACTIVATE_IN_PROGRESS) {
+		client->state = IPA_PM_ACTIVATED;
+	} else if (client->state == IPA_PM_DEACTIVATE_IN_PROGRESS) {
+		client->state = IPA_PM_DEACTIVATED;
+		dec_clk = true;
+	} else {
+		IPA_PM_ERR("unexpected state %d\n", client->state);
+		WARN_ON(1);
+	}
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	complete_all(&client->complete);
+
+	if (dec_clk) {
+		ipa_set_tag_process_before_gating(true);
+		if (!client->skip_clk_vote)
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+		return;
+	}
+
+	activate_client(client->hdl);
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	client->callback(client->callback_params, IPA_PM_CLIENT_ACTIVATED);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	do_clk_scaling();
+}
+
+/**
+ * delayed_deferred_deactivate_work_func - deferred deactivate on a work queue
+ */
+static void delayed_deferred_deactivate_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa_pm_client *client;
+	unsigned long flags;
+
+	dwork = container_of(work, struct delayed_work, work);
+	client = container_of(dwork, struct ipa_pm_client, deactivate_work);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	switch (client->state) {
+	case IPA_PM_ACTIVATED_TIMER_SET:
+		client->state = IPA_PM_ACTIVATED;
+		goto bail;
+	case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
+		queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
+			msecs_to_jiffies(IPA_PM_DEFERRED_TIMEOUT));
+		client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
+		goto bail;
+	case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
+		client->state = IPA_PM_DEACTIVATED;
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		ipa_set_tag_process_before_gating(true);
+		if (!client->skip_clk_vote)
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+
+		deactivate_client(client->hdl);
+		do_clk_scaling();
+		return;
+	default:
+		IPA_PM_ERR("unexpected state %d\n", client->state);
+		WARN_ON(1);
+		goto bail;
+	}
+
+bail:
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+}
+
+static int find_next_open_array_element(const char *name)
+{
+	int i, n;
+
+	n = -ENOBUFS;
+
+	for (i = IPA_PM_MAX_CLIENTS - 1; i >= 0; i--) {
+		if (ipa_pm_ctx->clients[i] == NULL) {
+			n = i;
+			continue;
+		}
+
+		if (strlen(name) == strlen(ipa_pm_ctx->clients[i]->name))
+			if (!strcmp(name, ipa_pm_ctx->clients[i]->name))
+				return -EEXIST;
+	}
+	return n;
+}
+
+/**
+ * add_client_to_exception_list() - add client to the exception list and
+ * update pending if necessary
+ * @hdl: index of the IPA client
+ *
+ * Returns: 0 if success, negative otherwise
+ */
+static int add_client_to_exception_list(u32 hdl)
+{
+	int i;
+	struct ipa_pm_exception_list *exception;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
+		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
+		if (strnstr(exception->clients, ipa_pm_ctx->clients[hdl]->name,
+			strlen(exception->clients))) {
+			exception->pending--;
+
+			if (exception->pending < 0) {
+				WARN_ON(1);
+				exception->pending = 0;
+				mutex_unlock(&ipa_pm_ctx->client_mutex);
+				return -EPERM;
+			}
+			exception->bitmask |= (1 << hdl);
+		}
+	}
+	IPA_PM_DBG("%s added to exception list\n",
+		ipa_pm_ctx->clients[hdl]->name);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return 0;
+}
+
+/**
+ * remove_client_to_exception_list() - remove client from the exception list and
+ * update pending if necessary
+ * @hdl: index of the IPA client
+ *
+ * Returns: 0 if success, negative otherwise
+ */
+static int remove_client_from_exception_list(u32 hdl)
+{
+	int i;
+	struct ipa_pm_exception_list *exception;
+
+	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
+		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
+		if (exception->bitmask & (1 << hdl)) {
+			exception->pending++;
+			exception->bitmask &= ~(1 << hdl);
+		}
+	}
+	IPA_PM_DBG("Client %d removed from exception list\n", hdl);
+
+	return 0;
+}
+
+/**
+ * ipa_pm_init() - initialize  IPA PM Components
+ * @ipa_pm_init_params: parameters needed to fill exceptions and thresholds
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_init(struct ipa_pm_init_params *params)
+{
+	int i, j;
+	struct clk_scaling_db *clk_scaling;
+
+	if (params == NULL) {
+		IPA_PM_ERR("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	if (params->threshold_size <= 0
+		|| params->threshold_size > IPA_PM_THRESHOLD_MAX) {
+		IPA_PM_ERR("Invalid threshold size\n");
+		return -EINVAL;
+	}
+
+	if (params->exception_size < 0
+		|| params->exception_size > IPA_PM_EXCEPTION_MAX) {
+		IPA_PM_ERR("Invalid exception size\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG("IPA PM initialization started\n");
+
+	if (ipa_pm_ctx != NULL) {
+		IPA_PM_ERR("Already initialized\n");
+		return -EPERM;
+	}
+
+
+	ipa_pm_ctx = kzalloc(sizeof(*ipa_pm_ctx), GFP_KERNEL);
+	if (!ipa_pm_ctx) {
+		IPA_PM_ERR(":kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	ipa_pm_ctx->wq = create_singlethread_workqueue("ipa_pm_activate");
+	if (!ipa_pm_ctx->wq) {
+		IPA_PM_ERR("create workqueue failed\n");
+		kfree(ipa_pm_ctx);
+		return -ENOMEM;
+	}
+
+	mutex_init(&ipa_pm_ctx->client_mutex);
+
+	/* Populate and init locks in clk_scaling_db */
+	clk_scaling = &ipa_pm_ctx->clk_scaling;
+	spin_lock_init(&clk_scaling->lock);
+	clk_scaling->threshold_size = params->threshold_size;
+	clk_scaling->exception_size = params->exception_size;
+	INIT_WORK(&clk_scaling->work, clock_scaling_func);
+
+	for (i = 0; i < params->threshold_size; i++)
+		clk_scaling->default_threshold[i] =
+			params->default_threshold[i];
+
+	/* Populate exception list*/
+	for (i = 0; i < params->exception_size; i++) {
+		strlcpy(clk_scaling->exception_list[i].clients,
+			params->exceptions[i].usecase, IPA_PM_MAX_EX_CL);
+		IPA_PM_DBG("Usecase: %s\n", params->exceptions[i].usecase);
+
+		/* Parse the commas to count the size of the clients */
+		for (j = 0; j < IPA_PM_MAX_EX_CL &&
+			clk_scaling->exception_list[i].clients[j]; j++) {
+			if (clk_scaling->exception_list[i].clients[j] == ',')
+				clk_scaling->exception_list[i].pending++;
+		}
+
+		clk_scaling->exception_list[i].pending++;
+		IPA_PM_DBG("Pending: %d\n", clk_scaling->
+			exception_list[i].pending);
+
+		/* populate the threshold */
+		for (j = 0; j < params->threshold_size; j++) {
+			clk_scaling->exception_list[i].threshold[j]
+			= params->exceptions[i].threshold[j];
+		}
+
+	}
+	IPA_PM_DBG("initialization success");
+
+	return 0;
+}
+
+int ipa_pm_destroy(void)
+{
+	IPA_PM_DBG("IPA PM destroy started\n");
+
+	if (ipa_pm_ctx == NULL) {
+		IPA_PM_ERR("Already destroyed\n");
+		return -EPERM;
+	}
+
+	destroy_workqueue(ipa_pm_ctx->wq);
+
+	kfree(ipa_pm_ctx);
+	ipa_pm_ctx = NULL;
+
+	return 0;
+}
+
+/**
+ * ipa_rm_delete_register() - register an IPA PM client with the PM
+ * @register_params: params for a client like throughput, callback, etc.
+ * @hdl: int pointer that will be used as an index to access the client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: *hdl is replaced with the client index or -EEXIST if
+ * client is already registered
+ */
+int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl)
+{
+	struct ipa_pm_client *client;
+
+	if (params == NULL || hdl == NULL || params->name == NULL
+		|| params->callback == NULL) {
+		IPA_PM_ERR("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG("IPA PM registering client\n");
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+
+	*hdl = find_next_open_array_element(params->name);
+
+	if (*hdl > IPA_CLIENT_MAX) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR("client is already registered or array is full\n");
+		return *hdl;
+	}
+
+	ipa_pm_ctx->clients[*hdl] = kzalloc(sizeof
+		(struct ipa_pm_client), GFP_KERNEL);
+	if (!ipa_pm_ctx->clients[*hdl]) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR(":kzalloc err.\n");
+		return -ENOMEM;
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	client = ipa_pm_ctx->clients[*hdl];
+
+	spin_lock_init(&client->state_lock);
+
+	INIT_DELAYED_WORK(&client->deactivate_work,
+		delayed_deferred_deactivate_work_func);
+
+	INIT_WORK(&client->activate_work, activate_work_func);
+
+	/* populate fields */
+	strlcpy(client->name, params->name, IPA_PM_MAX_EX_CL);
+	client->callback = params->callback;
+	client->callback_params = params->user_data;
+	client->group = params->group;
+	client->hdl = *hdl;
+	client->skip_clk_vote = params->skip_clk_vote;
+
+	/* add client to exception list */
+	if (add_client_to_exception_list(*hdl)) {
+		ipa_pm_deregister(*hdl);
+		IPA_PM_ERR("Fail to add client to exception_list\n");
+		return -EPERM;
+	}
+
+	IPA_PM_DBG("IPA PM client registered with handle %d\n", *hdl);
+	return 0;
+}
+
+/**
+ * ipa_pm_deregister() - deregister IPA client from the PM
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deregister(u32 hdl)
+{
+	struct ipa_pm_client *client;
+	int i;
+	unsigned long flags;
+
+	if (hdl >= IPA_PM_MAX_CLIENTS) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	if (ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Client is Null\n");
+		return -EINVAL;
+	}
+
+	IPA_PM_DBG("IPA PM deregistering client\n");
+
+	client = ipa_pm_ctx->clients[hdl];
+	spin_lock_irqsave(&client->state_lock, flags);
+	if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		wait_for_completion(&client->complete);
+		spin_lock_irqsave(&client->state_lock, flags);
+	}
+
+	if (IPA_PM_STATE_ACTIVE(client->state)) {
+		IPA_PM_DBG("Activated clients cannot be deregistered");
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return -EPERM;
+	}
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+
+	/* nullify pointers in pipe array */
+	for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
+		if (ipa_pm_ctx->clients_by_pipe[i] == ipa_pm_ctx->clients[hdl])
+			ipa_pm_ctx->clients_by_pipe[i] = NULL;
+	}
+	kfree(client);
+	ipa_pm_ctx->clients[hdl] = NULL;
+
+	remove_client_from_exception_list(hdl);
+	IPA_PM_DBG("IPA PM client %d deregistered\n", hdl);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return 0;
+}
+
+/**
+ * ipa_pm_associate_ipa_cons_to_client() - add mapping to pipe with ipa cllent
+ * @hdl: index of the client to be mapped
+ * @consumer: the pipe/consumer name to be pipped to the client
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: multiple pipes are allowed to be mapped to a single client
+ */
+int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
+{
+	int idx;
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || consumer < 0 ||
+		consumer >= IPA_CLIENT_MAX) {
+		IPA_PM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	idx = ipa_get_ep_mapping(consumer);
+
+	IPA_PM_DBG("Mapping pipe %d to client %d\n", idx, hdl);
+
+	if (idx < 0) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_DBG("Pipe is not used\n");
+		return 0;
+	}
+
+	if (ipa_pm_ctx->clients[hdl] == NULL) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR("Client is NULL\n");
+		return -EPERM;
+	}
+
+	if (ipa_pm_ctx->clients_by_pipe[idx] != NULL) {
+		mutex_unlock(&ipa_pm_ctx->client_mutex);
+		IPA_PM_ERR("Pipe is already mapped\n");
+		return -EPERM;
+	}
+	ipa_pm_ctx->clients_by_pipe[idx] = ipa_pm_ctx->clients[hdl];
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	IPA_PM_DBG("Pipe %d is mapped to client %d\n", idx, hdl);
+
+	return 0;
+}
+
+static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
+{
+	struct ipa_active_client_logging_info log_info;
+	int result = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+
+	if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
+		if (sync) {
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			wait_for_completion(&client->complete);
+			spin_lock_irqsave(&client->state_lock, flags);
+		} else {
+			client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			return -EINPROGRESS;
+		}
+	}
+
+	switch (client->state) {
+	case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
+	case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
+		client->state = IPA_PM_ACTIVATED_TIMER_SET;
+	case IPA_PM_ACTIVATED:
+	case IPA_PM_ACTIVATED_TIMER_SET:
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return 0;
+	case IPA_PM_DEACTIVATED:
+		break;
+	default:
+		IPA_PM_ERR("Invalid State\n");
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return -EPERM;
+	}
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, client->name);
+	if (!client->skip_clk_vote) {
+		if (sync) {
+			client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			IPA_ACTIVE_CLIENTS_INC_SPECIAL(client->name);
+			spin_lock_irqsave(&client->state_lock, flags);
+		} else
+			result = ipa3_inc_client_enable_clks_no_block
+				 (&log_info);
+	}
+
+	/* we got the clocks */
+	if (result == 0) {
+		client->state = IPA_PM_ACTIVATED;
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		activate_client(client->hdl);
+		if (sync)
+			do_clk_scaling();
+		else
+			queue_work(ipa_pm_ctx->wq,
+				   &ipa_pm_ctx->clk_scaling.work);
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+		return 0;
+	}
+
+	client->state = IPA_PM_ACTIVATE_IN_PROGRESS;
+	init_completion(&client->complete);
+	queue_work(ipa_pm_ctx->wq, &client->activate_work);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+	return -EINPROGRESS;
+}
+
+/**
+ * ipa_pm_activate(): activate ipa client to vote for clock(). Can be called
+ * from atomic context and returns -EINPROGRESS if cannot be done synchronously
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, -EINPROGRESS if operation cannot be done synchronously
+ * and other negatives on failure
+ */
+int ipa_pm_activate(u32 hdl)
+{
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], false);
+}
+
+/**
+ * ipa_pm_activate(): activate ipa client to vote for clock synchronously.
+ * Cannot be called from an atomic contex.
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_activate_sync(u32 hdl)
+{
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	return ipa_pm_activate_helper(ipa_pm_ctx->clients[hdl], true);
+}
+
+/**
+ * ipa_pm_deferred_deactivate(): schedule a timer to deactivate client and
+ * devote clock. Can be called from atomic context (asynchronously)
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deferred_deactivate(u32 hdl)
+{
+	struct ipa_pm_client *client;
+	unsigned long flags;
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	client = ipa_pm_ctx->clients[hdl];
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	switch (client->state) {
+	case IPA_PM_ACTIVATE_IN_PROGRESS:
+		client->state = IPA_PM_DEACTIVATE_IN_PROGRESS;
+	case IPA_PM_DEACTIVATED:
+		IPA_PM_DBG_STATE(hdl, client->name, client->state);
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return 0;
+	case IPA_PM_ACTIVATED:
+		client->state = IPA_PM_ACTIVATED_PENDING_DEACTIVATION;
+		queue_delayed_work(ipa_pm_ctx->wq, &client->deactivate_work,
+			msecs_to_jiffies(IPA_PM_DEFERRED_TIMEOUT));
+		break;
+	case IPA_PM_ACTIVATED_TIMER_SET:
+	case IPA_PM_ACTIVATED_PENDING_DEACTIVATION:
+		client->state = IPA_PM_ACTIVATED_PENDING_RESCHEDULE;
+	case IPA_PM_DEACTIVATE_IN_PROGRESS:
+	case IPA_PM_ACTIVATED_PENDING_RESCHEDULE:
+		break;
+	}
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	return 0;
+}
+
+/**
+ * ipa_pm_deactivate_all_deferred(): Cancel the deferred deactivation timer and
+ * immediately devotes for IPA clocks
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deactivate_all_deferred(void)
+{
+	int i;
+	bool run_algorithm = false;
+	struct ipa_pm_client *client;
+	unsigned long flags;
+
+	for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+		client = ipa_pm_ctx->clients[i];
+
+		if (client == NULL)
+			continue;
+
+		cancel_delayed_work_sync(&client->deactivate_work);
+
+		if (IPA_PM_STATE_IN_PROGRESS(client->state)) {
+			wait_for_completion(&client->complete);
+			continue;
+		}
+
+		spin_lock_irqsave(&client->state_lock, flags);
+		IPA_PM_DBG_STATE(client->hdl, client->name, client->state);
+
+		if (client->state == IPA_PM_ACTIVATED_TIMER_SET) {
+			client->state = IPA_PM_ACTIVATED;
+			IPA_PM_DBG_STATE(client->hdl, client->name,
+				client->state);
+			spin_unlock_irqrestore(&client->state_lock, flags);
+		} else if (client->state ==
+			IPA_PM_ACTIVATED_PENDING_DEACTIVATION ||
+			IPA_PM_ACTIVATED_PENDING_RESCHEDULE) {
+			run_algorithm = true;
+			client->state = IPA_PM_DEACTIVATED;
+			IPA_PM_DBG_STATE(client->hdl, client->name,
+				client->state);
+			spin_unlock_irqrestore(&client->state_lock, flags);
+			ipa_set_tag_process_before_gating(true);
+			if (!client->skip_clk_vote)
+				IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+			deactivate_client(client->hdl);
+		} else /* if activated or deactivated, we do nothing */
+			spin_unlock_irqrestore(&client->state_lock, flags);
+	}
+
+	if (run_algorithm)
+		do_clk_scaling();
+
+	return 0;
+}
+
+/**
+ * ipa_pm_deactivate_sync(): deactivate ipa client and devote clock. Cannot be
+ * called from atomic context.
+ * @hdl: index of the client in the array
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_deactivate_sync(u32 hdl)
+{
+	struct ipa_pm_client *client = ipa_pm_ctx->clients[hdl];
+	unsigned long flags;
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+		IPA_PM_ERR("Invalid Param\n");
+		return -EINVAL;
+	}
+
+	cancel_delayed_work_sync(&client->deactivate_work);
+
+	if (IPA_PM_STATE_IN_PROGRESS(client->state))
+		wait_for_completion(&client->complete);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+
+	if (client->state == IPA_PM_DEACTIVATED) {
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		return 0;
+	}
+
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	/* else case (Deactivates all Activated cases)*/
+	ipa_set_tag_process_before_gating(true);
+	if (!client->skip_clk_vote)
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL(client->name);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	client->state = IPA_PM_DEACTIVATED;
+	IPA_PM_DBG_STATE(hdl, client->name, client->state);
+	spin_unlock_irqrestore(&client->state_lock, flags);
+	deactivate_client(hdl);
+	do_clk_scaling();
+
+	return 0;
+}
+
+/**
+ * ipa_pm_handle_suspend(): calls the callbacks of suspended clients to wake up
+ * @pipe_bitmask: the bits represent the indexes of the clients to be woken up
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_handle_suspend(u32 pipe_bitmask)
+{
+	int i;
+	struct ipa_pm_client *client;
+	bool client_notified[IPA_PM_MAX_CLIENTS] = { false };
+
+	IPA_PM_DBG_LOW("bitmask: %d",  pipe_bitmask);
+
+	if (pipe_bitmask == 0)
+		return 0;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	for (i = 0; i < IPA3_MAX_NUM_PIPES; i++) {
+		if (pipe_bitmask & (1 << i)) {
+			client = ipa_pm_ctx->clients_by_pipe[i];
+			if (client && client_notified[client->hdl] == false) {
+				client->callback(client->callback_params,
+					IPA_PM_REQUEST_WAKEUP);
+				client_notified[client->hdl] = true;
+			}
+		}
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+	return 0;
+}
+
+/**
+ * ipa_pm_set_perf_profile(): Adds/changes the throughput requirement to IPA PM
+ * to be used for clock scaling
+ * @hdl: index of the client in the array
+ * @throughput: the new throughput value to be set for that client
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_pm_set_perf_profile(u32 hdl, int throughput)
+{
+	struct ipa_pm_client *client = ipa_pm_ctx->clients[hdl];
+	unsigned long flags;
+
+	if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL
+		|| throughput < 0) {
+		IPA_PM_ERR("Invalid Params\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	if (client->group == IPA_PM_GROUP_DEFAULT)
+		IPA_PM_DBG_LOW("Old throughput: %d\n",  client->throughput);
+	else
+		IPA_PM_DBG_LOW("old Group %d throughput: %d\n",
+			client->group, ipa_pm_ctx->group_tput[client->group]);
+
+	if (client->group == IPA_PM_GROUP_DEFAULT)
+		client->throughput = throughput;
+	else
+		ipa_pm_ctx->group_tput[client->group] = throughput;
+
+	if (client->group == IPA_PM_GROUP_DEFAULT)
+		IPA_PM_DBG_LOW("New throughput: %d\n",  client->throughput);
+	else
+		IPA_PM_DBG_LOW("New Group %d throughput: %d\n",
+			client->group, ipa_pm_ctx->group_tput[client->group]);
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	spin_lock_irqsave(&client->state_lock, flags);
+	if (IPA_PM_STATE_ACTIVE(client->state) || (client->group !=
+			IPA_PM_GROUP_DEFAULT)) {
+		spin_unlock_irqrestore(&client->state_lock, flags);
+		do_clk_scaling();
+		return 0;
+	}
+	spin_unlock_irqrestore(&client->state_lock, flags);
+
+	return 0;
+}
+
+/**
+ * ipa_pm_stat() - print PM stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a picture of the clients in the PM and the throughput, threshold and cur vote
+ */
+int ipa_pm_stat(char *buf, int size)
+{
+	struct ipa_pm_client *client;
+	struct clk_scaling_db *clk = &ipa_pm_ctx->clk_scaling;
+	int i, j, tput, cnt = 0, result = 0;
+	unsigned long flags;
+
+	if (!buf || size < 0)
+		return -EINVAL;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+
+	result = scnprintf(buf + cnt, size - cnt, "\n\nCurrent threshold: [");
+	cnt += result;
+
+	for (i = 0; i < clk->threshold_size; i++) {
+		result = scnprintf(buf + cnt, size - cnt,
+			"%d, ", clk->current_threshold[i]);
+		cnt += result;
+	}
+
+	result = scnprintf(buf + cnt, size - cnt, "\b\b]\n");
+	cnt += result;
+
+	result = scnprintf(buf + cnt, size - cnt,
+		"Aggregated tput: %d, Cur vote: %d",
+		ipa_pm_ctx->aggregated_tput, clk->cur_vote);
+	cnt += result;
+
+	result = scnprintf(buf + cnt, size - cnt, "\n\nRegistered Clients:\n");
+	cnt += result;
+
+
+	for (i = 0; i < IPA_PM_MAX_CLIENTS; i++) {
+		client = ipa_pm_ctx->clients[i];
+
+		if (client == NULL)
+			continue;
+
+		spin_lock_irqsave(&client->state_lock, flags);
+		if (client->group == IPA_PM_GROUP_DEFAULT)
+			tput = client->throughput;
+		else
+			tput = ipa_pm_ctx->group_tput[client->group];
+
+		result = scnprintf(buf + cnt, size - cnt,
+		"Client[%d]: %s State:%s\nGroup: %s Throughput: %d Pipes: ",
+			i, client->name, client_state_to_str[client->state],
+			ipa_pm_group_to_str[client->group], tput);
+		cnt += result;
+
+		for (j = 0; j < IPA3_MAX_NUM_PIPES; j++) {
+			if (ipa_pm_ctx->clients_by_pipe[j] == client) {
+				result = scnprintf(buf + cnt, size - cnt,
+					"%d, ", j);
+				cnt += result;
+			}
+		}
+
+		result = scnprintf(buf + cnt, size - cnt, "\b\b\n\n");
+		cnt += result;
+		spin_unlock_irqrestore(&client->state_lock, flags);
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return cnt;
+}
+
+/**
+ * ipa_pm_exceptions_stat() - print PM exceptions stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a full picture of the exceptions in the PM
+ */
+int ipa_pm_exceptions_stat(char *buf, int size)
+{
+	int i, j, cnt = 0, result = 0;
+	struct ipa_pm_exception_list *exception;
+
+	if (!buf || size < 0)
+		return -EINVAL;
+
+	result = scnprintf(buf + cnt, size - cnt, "\n");
+	cnt += result;
+
+	mutex_lock(&ipa_pm_ctx->client_mutex);
+	for (i = 0; i < ipa_pm_ctx->clk_scaling.exception_size; i++) {
+		exception = &ipa_pm_ctx->clk_scaling.exception_list[i];
+		if (exception == NULL) {
+			result = scnprintf(buf + cnt, size - cnt,
+			"Exception %d is NULL\n\n", i);
+			cnt += result;
+			continue;
+		}
+
+		result = scnprintf(buf + cnt, size - cnt,
+			"Exception %d: %s\nPending: %d Bitmask: %d Threshold: ["
+			, i, exception->clients, exception->pending,
+			exception->bitmask);
+		cnt += result;
+		for (j = 0; j < ipa_pm_ctx->clk_scaling.threshold_size; j++) {
+			result = scnprintf(buf + cnt, size - cnt,
+				"%d, ", exception->threshold[j]);
+			cnt += result;
+		}
+		result = scnprintf(buf + cnt, size - cnt, "\b\b]\n\n");
+		cnt += result;
+	}
+	mutex_unlock(&ipa_pm_ctx->client_mutex);
+
+	return cnt;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
new file mode 100644
index 0000000..ca022b5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_PM_H_
+#define _IPA_PM_H_
+
+#include <linux/msm_ipa.h>
+
+/* internal to ipa */
+#define IPA_PM_MAX_CLIENTS 10
+#define IPA_PM_MAX_EX_CL 64
+#define IPA_PM_THRESHOLD_MAX 2
+#define IPA_PM_EXCEPTION_MAX 2
+#define IPA_PM_DEFERRED_TIMEOUT 100
+#define IPA_PM_STATE_MAX 7
+
+/*
+ * ipa_pm group names
+ *
+ * Default stands for individual clients while other groups share one throughput
+ * Some groups also have special flags like modem which do not vote for clock
+ * but is accounted for in clock scaling while activated
+ */
+enum ipa_pm_group {
+	IPA_PM_GROUP_DEFAULT,
+	IPA_PM_GROUP_APPS,
+	IPA_PM_GROUP_MODEM,
+	IPA_PM_GROUP_MAX,
+};
+
+/*
+ * ipa_pm_cb_event
+ *
+ * specifies what kind of callback is being called.
+ * IPA_PM_CLIENT_ACTIVATED: the client has completed asynchronous activation
+ * IPA_PM_REQUEST_WAKEUP: wake up the client after it has been suspended
+ */
+enum ipa_pm_cb_event {
+	IPA_PM_CLIENT_ACTIVATED,
+	IPA_PM_REQUEST_WAKEUP,
+	IPA_PM_CB_EVENT_MAX,
+};
+
+/*
+ * struct ipa_pm_exception - clients included in exception and its threshold
+ * @usecase: comma separated client names
+ * @threshold: the threshold values for the exception
+ */
+struct ipa_pm_exception {
+	const char *usecase;
+	int threshold[IPA_PM_THRESHOLD_MAX];
+};
+
+/*
+ * struct ipa_pm_init_params - parameters needed for initializng the pm
+ * @default_threshold: the thresholds used if no exception passes
+ * @threshold_size: size of the threshold
+ * @exceptions: list of exceptions  for the pm
+ * @exception_size: size of the exception_list
+ */
+struct ipa_pm_init_params {
+	int default_threshold[IPA_PM_THRESHOLD_MAX];
+	int threshold_size;
+	struct ipa_pm_exception exceptions[IPA_PM_EXCEPTION_MAX];
+	int exception_size;
+};
+
+/*
+ * struct ipa_pm_register_params - parameters needed to register a client
+ * @name: name of the client
+ * @callback: pointer to the client's callback function
+ * @user_data: pointer to the client's callback parameters
+ * @group: group number of the client
+ * @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
+ */
+struct ipa_pm_register_params {
+	const char *name;
+	void (*callback)(void*, enum ipa_pm_cb_event);
+	void *user_data;
+	enum ipa_pm_group group;
+	bool skip_clk_vote;
+};
+
+int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl);
+int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer);
+int ipa_pm_activate(u32 hdl);
+int ipa_pm_activate_sync(u32 hdl);
+int ipa_pm_deferred_deactivate(u32 hdl);
+int ipa_pm_deactivate_sync(u32 hdl);
+int ipa_pm_set_perf_profile(u32 hdl, int throughput);
+int ipa_pm_deregister(u32 hdl);
+
+/* IPA Internal Functions */
+int ipa_pm_init(struct ipa_pm_init_params *params);
+int ipa_pm_destroy(void);
+int ipa_pm_handle_suspend(u32 pipe_bitmask);
+int ipa_pm_deactivate_all_deferred(void);
+int ipa_pm_stat(char *buf, int size);
+int ipa_pm_exceptions_stat(char *buf, int size);
+
+#endif /* _IPA_PM_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 61bccc6..4e8c233 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1214,7 +1214,6 @@
 /* voting for bus BW to ipa_rm*/
 int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
 {
-	struct ipa_rm_perf_profile profile;
 	int ret;
 
 	if (bw_mbps == NULL) {
@@ -1222,16 +1221,13 @@
 		return -EINVAL;
 	}
 
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = *bw_mbps;
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
-			&profile);
+	ret = ipa3_wwan_set_modem_perf_profile(*bw_mbps);
 	if (ret)
 		IPAWANERR("Failed to set perf profile to BW %u\n",
-			profile.max_supported_bandwidth_mbps);
+			*bw_mbps);
 	else
 		IPAWANDBG("Succeeded to set perf profile to BW %u\n",
-			profile.max_supported_bandwidth_mbps);
+			*bw_mbps);
 
 	return ret;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d5d8503..d3a4ba0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -207,6 +207,10 @@
 
 void ipa3_q6_handshake_complete(bool ssr_bootup);
 
+int ipa3_wwan_set_modem_perf_profile(int throughput);
+
+int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state);
+
 void ipa3_qmi_init(void);
 
 void ipa3_qmi_cleanup(void);
@@ -323,6 +327,11 @@
 
 static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
 
+static inline int ipa3_wwan_set_modem_perf_profile(int throughput)
+{
+	return -EPERM;
+}
+
 static inline void ipa3_qmi_init(void)
 {
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index dd691f1..8bd7d30 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1149,6 +1149,13 @@
 		goto bail;
 	}
 
+	if (entry->cookie != IPA_RT_RULE_COOKIE) {
+		IPAERR_RL("Invalid cookie value =  %u rule %d in rt tbls\n",
+			entry->cookie, rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
 	if (entry->tbl != tbl) {
 		IPAERR_RL("given rt rule does not match the table\n");
 		ret = -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5206337..f717264 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4855,11 +4855,8 @@
 		IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
 		/* queue a work to start polling if don't have one */
 		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
-		if (!atomic_read(&ep->sys->curr_polling_state)) {
-			ipa3_inc_acquire_wakelock();
-			atomic_set(&ep->sys->curr_polling_state, 1);
-			queue_work(ep->sys->wq, &ep->sys->work);
-		}
+		if (!atomic_read(&ep->sys->curr_polling_state))
+			__ipa_gsi_irq_rx_scedule_poll(ep->sys);
 	}
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 56fed2a..a8d5342 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -595,6 +595,15 @@
 	return pyld;
 }
 
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dummy(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	IPAHAL_ERR("no construct function for IMM_CMD=%s, IPA ver %d\n",
+		ipahal_imm_cmd_name_str(cmd), ipahal_ctx->hw_type);
+	WARN_ON(1);
+	return NULL;
+}
+
 /*
  * struct ipahal_imm_cmd_obj - immediate command H/W information for
  *  specific IPA version
@@ -668,8 +677,8 @@
 		12},
 	/* NAT_DMA was renamed to TABLE_DMA for IPAv4 */
 	[IPA_HW_v4_0][IPA_IMM_CMD_NAT_DMA] = {
-		NULL,
-		-1 },
+		ipa_imm_cmd_construct_dummy,
+		-1},
 	[IPA_HW_v4_0][IPA_IMM_CMD_TABLE_DMA] = {
 		ipa_imm_cmd_construct_table_dma_ipav4,
 		14},
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index dc71414..74f5bbd 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1961,20 +1961,3 @@
 
 	valmask->mask = valmask->val;
 }
-
-void ipahal_get_status_ep_valmask(int pipe_num,
-	struct ipahal_reg_valmask *valmask)
-{
-	if (!valmask) {
-		IPAHAL_ERR("Input error\n");
-		return;
-	}
-
-	valmask->val =
-		(pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
-		IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
-
-	valmask->mask =
-		IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
-		IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
-}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index a2864cd..df3c976 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -541,8 +541,6 @@
 void ipahal_get_fltrt_hash_flush_valmask(
 	struct ipahal_reg_fltrt_hash_flush *flush,
 	struct ipahal_reg_valmask *valmask);
-void ipahal_get_status_ep_valmask(int pipe_num,
-	struct ipahal_reg_valmask *valmask);
 
 #endif /* _IPAHAL_REG_H_ */
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 5095e1f..f08e1e3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -142,6 +142,9 @@
 	u32 ipa3_to_apps_hdl;
 	struct mutex pipe_handle_guard;
 	struct mutex add_mux_channel_lock;
+	u32 pm_hdl;
+	u32 q6_pm_hdl;
+	u32 q6_teth_pm_hdl;
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1134,8 +1137,14 @@
 
 send:
 	/* IPA_RM checking start */
-	ret = ipa_rm_inactivity_timer_request_resource(
-		IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ipa3_ctx->use_ipa_pm) {
+		/* activate the modem pm for clock scaling */
+		ipa_pm_activate(rmnet_ipa3_ctx->q6_pm_hdl);
+		ret = ipa_pm_activate(rmnet_ipa3_ctx->pm_hdl);
+	} else {
+		ret = ipa_rm_inactivity_timer_request_resource(
+			IPA_RM_RESOURCE_WWAN_0_PROD);
+	}
 	if (ret == -EINPROGRESS) {
 		netif_stop_queue(dev);
 		return NETDEV_TX_BUSY;
@@ -1164,9 +1173,15 @@
 	dev->stats.tx_bytes += skb->len;
 	ret = NETDEV_TX_OK;
 out:
-	if (atomic_read(&wwan_ptr->outstanding_pkts) == 0)
-		ipa_rm_inactivity_timer_release_resource(
-			IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) {
+		if (ipa3_ctx->use_ipa_pm) {
+			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
+			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
+		} else {
+			ipa_rm_inactivity_timer_release_resource(
+				IPA_RM_RESOURCE_WWAN_0_PROD);
+		}
+	}
 	return ret;
 }
 
@@ -1218,9 +1233,15 @@
 		netif_wake_queue(wwan_ptr->net);
 	}
 
-	if (atomic_read(&wwan_ptr->outstanding_pkts) == 0)
-		ipa_rm_inactivity_timer_release_resource(
+	if (atomic_read(&wwan_ptr->outstanding_pkts) == 0) {
+		if (ipa3_ctx->use_ipa_pm) {
+			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->pm_hdl);
+			ipa_pm_deferred_deactivate(rmnet_ipa3_ctx->q6_pm_hdl);
+		} else {
+			ipa_rm_inactivity_timer_release_resource(
 			IPA_RM_RESOURCE_WWAN_0_PROD);
+		}
+	}
 	__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
 	dev_kfree_skb_any(skb);
 }
@@ -1858,6 +1879,91 @@
 		return;
 	}
 }
+
+int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state)
+{
+	if (!state)
+		return -EINVAL;
+
+	if (!ipa_pm_is_used())
+		return 0;
+
+	if (state->up)
+		return ipa_pm_activate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
+	else
+		return ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_teth_pm_hdl);
+}
+
+/**
+ * ipa3_q6_register_pm - Register modem clients for PM
+ *
+ * This function will register 2 client with IPA PM to represent modem
+ * in clock scaling calculation:
+ *	- "EMB MODEM" - this client will be activated with embedded traffic
+	- "TETH MODEM" - this client we be activated by IPACM on offload to
+	  modem.
+*/
+static int ipa3_q6_register_pm(void)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = "EMB MODEM";
+	pm_reg.group = IPA_PM_GROUP_MODEM;
+	pm_reg.skip_clk_vote = true;
+	result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->q6_pm_hdl);
+	if (result) {
+		IPAERR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+
+	pm_reg.name = "TETH MODEM";
+	pm_reg.group = IPA_PM_GROUP_MODEM;
+	pm_reg.skip_clk_vote = true;
+	result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->q6_teth_pm_hdl);
+	if (result) {
+		IPAERR("failed to create IPA PM client %d\n", result);
+		return result;
+	}
+
+	return 0;
+}
+
+static void ipa3_q6_deregister_pm(void)
+{
+	ipa_pm_deactivate_sync(rmnet_ipa3_ctx->q6_pm_hdl);
+	ipa_pm_deregister(rmnet_ipa3_ctx->q6_pm_hdl);
+}
+
+int ipa3_wwan_set_modem_perf_profile(int throughput)
+{
+	struct ipa_rm_perf_profile profile;
+	int ret;
+
+	ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl, throughput);
+	if (ret)
+		return ret;
+	return ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_teth_pm_hdl,
+		throughput);
+
+	if (ipa3_ctx->use_ipa_pm) {
+		ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_pm_hdl,
+			throughput);
+		if (ret)
+			return ret;
+		ret = ipa_pm_set_perf_profile(rmnet_ipa3_ctx->q6_teth_pm_hdl,
+			throughput);
+	} else {
+		memset(&profile, 0, sizeof(profile));
+		profile.max_supported_bandwidth_mbps = throughput;
+		ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	}
+
+	return ret;
+}
+
 static int ipa3_q6_initialize_rm(void)
 {
 	struct ipa_rm_create_params create_params;
@@ -2075,6 +2181,120 @@
 	schedule_work(&ipa3_scheduled_probe);
 }
 
+static void ipa_pm_wwan_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	struct net_device *dev = (struct net_device *)p;
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+	IPAWANDBG_LOW("event %d\n", event);
+	switch (event) {
+	case IPA_PM_CLIENT_ACTIVATED:
+		if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+			complete_all(&wwan_ptr->resource_granted_completion);
+			break;
+		}
+		ipa3_rm_resource_granted(dev);
+		break;
+	default:
+		pr_err("%s: unknown event %d\n", __func__, event);
+		break;
+	}
+}
+
+static int ipa3_wwan_register_netdev_pm_client(struct net_device *dev)
+{
+	int result;
+	struct ipa_pm_register_params pm_reg;
+
+	memset(&pm_reg, 0, sizeof(pm_reg));
+	pm_reg.name = IPA_NETDEV()->name;
+	pm_reg.user_data = dev;
+	pm_reg.callback = ipa_pm_wwan_pm_cb;
+	pm_reg.group = IPA_PM_GROUP_APPS;
+	result = ipa_pm_register(&pm_reg, &rmnet_ipa3_ctx->pm_hdl);
+	if (result) {
+		IPAERR("failed to create IPA PM client %d\n", result);
+			return result;
+	}
+	return 0;
+}
+
+static void ipa3_wwan_deregister_netdev_pm_client(void)
+{
+	ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl);
+	ipa_pm_deregister(rmnet_ipa3_ctx->pm_hdl);
+}
+
+static int ipa3_wwan_create_wwan_rm_resource(struct net_device *dev)
+{
+	struct ipa_rm_create_params ipa_rm_params;
+	struct ipa_rm_perf_profile profile;
+	int ret;
+
+	memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+	ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
+	ipa_rm_params.reg_params.user_data = dev;
+	ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
+	ret = ipa_rm_create_resource(&ipa_rm_params);
+	if (ret) {
+		pr_err("%s: unable to create resourse %d in IPA RM\n",
+			__func__, IPA_RM_RESOURCE_WWAN_0_PROD);
+		return ret;
+	}
+	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_INACTIVITY_TIMER);
+	if (ret) {
+		pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
+			__func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto timer_init_err;
+	}
+	/* add dependency */
+	ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+		&profile);
+	if (ret)
+		goto set_perf_err;
+
+	return 0;
+
+set_perf_err:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+add_dpnd_err:
+	ipa_rm_inactivity_timer_destroy(
+		IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
+timer_init_err:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	return ret;
+}
+
+static void ipa3_wwan_delete_wwan_rm_resource(void)
+{
+	int ret;
+
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+		ret);
+	ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR(
+		"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+}
+
 /**
  * ipa3_wwan_probe() - Initialized the module and registers as a
  * network interface to the network stack
@@ -2092,8 +2312,6 @@
 {
 	int ret, i;
 	struct net_device *dev;
-	struct ipa_rm_create_params ipa_rm_params;	/* IPA_RM */
-	struct ipa_rm_perf_profile profile;			/* IPA_RM */
 
 	pr_info("rmnet_ipa3 started initialization\n");
 
@@ -2187,7 +2405,10 @@
 
 	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
 		/* IPA_RM configuration starts */
-		ret = ipa3_q6_initialize_rm();
+		if (ipa3_ctx->use_ipa_pm)
+			ret = ipa3_q6_register_pm();
+		else
+			ret = ipa3_q6_initialize_rm();
 		if (ret) {
 			IPAWANERR("%s: ipa3_q6_initialize_rm failed, ret: %d\n",
 				__func__, ret);
@@ -2195,36 +2416,14 @@
 		}
 	}
 
-	memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
-	ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
-	ipa_rm_params.reg_params.user_data = dev;
-	ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
-	ret = ipa_rm_create_resource(&ipa_rm_params);
+	if (ipa3_ctx->use_ipa_pm)
+		ret = ipa3_wwan_register_netdev_pm_client(dev);
+	else
+		ret = ipa3_wwan_create_wwan_rm_resource(dev);
 	if (ret) {
-		pr_err("%s: unable to create resourse %d in IPA RM\n",
-		       __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
-		goto create_rsrc_err;
+		IPAWANERR("fail to create/register pm resources\n");
+		goto fail_pm;
 	}
-	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
-					   IPA_RM_INACTIVITY_TIMER);
-	if (ret) {
-		pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
-		       __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
-		goto timer_init_err;
-	}
-	/* add dependency */
-	ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
-			IPA_RM_RESOURCE_Q6_CONS);
-	if (ret)
-		goto add_dpnd_err;
-	/* setup Performance profile */
-	memset(&profile, 0, sizeof(profile));
-	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
-	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
-			&profile);
-	if (ret)
-		goto set_perf_err;
-	/* IPA_RM configuration ends */
 
 	/* Enable SG support in netdevice. */
 	if (ipa3_rmnet_res.ipa_advertise_sg_support)
@@ -2260,28 +2459,18 @@
 		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
 	unregister_netdev(dev);
 set_perf_err:
-	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-	if (ret)
-		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
-			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
-			ret);
-add_dpnd_err:
-	ret = ipa_rm_inactivity_timer_destroy(
-		IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
-	if (ret)
-		IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
-timer_init_err:
-	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
-	if (ret)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
-create_rsrc_err:
-
-	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
-		ipa3_q6_deinitialize_rm();
-
+	if (ipa3_ctx->use_ipa_pm)
+		ipa3_wwan_deregister_netdev_pm_client();
+	else
+		ipa3_wwan_delete_wwan_rm_resource();
+fail_pm:
+	if (ipa3_ctx->use_ipa_pm) {
+		if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			ipa3_q6_deregister_pm();
+	} else {
+		if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			ipa3_q6_deinitialize_rm();
+	}
 q6_init_err:
 	free_netdev(dev);
 	rmnet_ipa3_ctx->wwan_priv = NULL;
@@ -2317,21 +2506,10 @@
 		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
 	mutex_unlock(&rmnet_ipa3_ctx->pipe_handle_guard);
 	unregister_netdev(IPA_NETDEV());
-	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
-		IPA_RM_RESOURCE_Q6_CONS);
-	if (ret < 0)
-		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
-			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
-			ret);
-	ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
-	if (ret < 0)
-		IPAWANERR(
-		"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
-	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
-	if (ret < 0)
-		IPAWANERR("Error deleting resource %d, ret=%d\n",
-		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	if (ipa3_ctx->use_ipa_pm)
+		ipa3_wwan_deregister_netdev_pm_client();
+	else
+		ipa3_wwan_delete_wwan_rm_resource();
 	cancel_work_sync(&ipa3_tx_wakequeue_work);
 	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
 	if (IPA_NETDEV())
@@ -2385,23 +2563,26 @@
 	if (wwan_ptr == NULL) {
 		IPAWANERR("wwan_ptr is NULL.\n");
 		ret = 0;
-		goto unlock_and_bail;
+		netif_tx_unlock_bh(netdev);
+		goto bail;
 	}
 
 	/* Do not allow A7 to suspend in case there are oustanding packets */
 	if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
 		IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
 		ret = -EAGAIN;
-		goto unlock_and_bail;
+		netif_tx_unlock_bh(netdev);
+		goto bail;
 	}
 
 	/* Make sure that there is no Tx operation ongoing */
 	netif_stop_queue(netdev);
-	ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
-	ret = 0;
-
-unlock_and_bail:
 	netif_tx_unlock_bh(netdev);
+	if (ipa3_ctx->use_ipa_pm)
+		ipa_pm_deactivate_sync(rmnet_ipa3_ctx->pm_hdl);
+	else
+		ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	ret = 0;
 bail:
 	IPAWANDBG("Exit with %d\n", ret);
 	return ret;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index c7a6186..2e43abf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -50,6 +50,9 @@
 #define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \
 		WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
 		compat_uptr_t)
+#define WAN_IOC_NOTIFY_WAN_STATE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_NOTIFY_WAN_STATE, \
+		compat_uptr_t)
 #endif
 
 static unsigned int dev_num = 1;
@@ -316,6 +319,27 @@
 		}
 		break;
 
+	case WAN_IOC_NOTIFY_WAN_STATE:
+		IPAWANDBG_LOW("device %s got WAN_IOC_NOTIFY_WAN_STATE :>>>\n",
+			DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_notify_wan_state);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 __user *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (ipa3_wwan_set_modem_state(
+			(struct wan_ioctl_notify_wan_state *)param)) {
+			IPAWANERR("WAN_IOC_NOTIFY_WAN_STATE failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
 	default:
 		retval = -ENOTTY;
 	}
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index 297f932..7496f28 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -50,6 +50,7 @@
 	dev_t dev_num;
 	struct device *dev;
 	struct cdev cdev;
+	u32 modem_pm_hdl;
 };
 static struct ipa3_teth_bridge_ctx *ipa3_teth_ctx;
 
@@ -118,14 +119,25 @@
 */
 int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
 {
+	int res = 0;
+
 	TETH_DBG_FUNC_ENTRY();
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
-				 IPA_RM_RESOURCE_Q6_CONS);
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
-				 IPA_RM_RESOURCE_USB_CONS);
+	if (ipa_pm_is_used()) {
+		res = ipa_pm_deactivate_sync(ipa3_teth_ctx->modem_pm_hdl);
+		if (res) {
+			TETH_ERR("fail to deactivate modem %d\n", res);
+			return res;
+		}
+		res = ipa_pm_destroy();
+	} else {
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+					IPA_RM_RESOURCE_Q6_CONS);
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+					IPA_RM_RESOURCE_USB_CONS);
+	}
 	TETH_DBG_FUNC_EXIT();
 
-	return 0;
+	return res;
 }
 
 /**
@@ -140,9 +152,27 @@
 int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
 {
 	int res = 0;
+	struct ipa_pm_register_params reg_params;
+
+	memset(&reg_params, 0, sizeof(reg_params));
 
 	TETH_DBG_FUNC_ENTRY();
 
+	if (ipa_pm_is_used()) {
+		reg_params.name = "MODEM (USB RMNET)";
+		reg_params.group = IPA_PM_GROUP_MODEM;
+		reg_params.skip_clk_vote = true;
+		res = ipa_pm_register(&reg_params,
+			&ipa3_teth_ctx->modem_pm_hdl);
+		if (res) {
+			TETH_ERR("fail to register with PM %d\n", res);
+			return res;
+		}
+
+		res = ipa_pm_activate_sync(ipa3_teth_ctx->modem_pm_hdl);
+		goto bail;
+	}
+
 	/* Build the dependency graph, first add_dependency call is sync
 	 * in order to make sure the IPA clocks are up before we continue
 	 * and notify the USB driver it may continue.
@@ -234,6 +264,8 @@
 		res = -ENODEV;
 		goto fail_cdev_add;
 	}
+
+	ipa3_teth_ctx->modem_pm_hdl = ~0;
 	TETH_DBG("Tethering bridge driver init OK\n");
 
 	return 0;
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
index af46bf2..82bee5d 100644
--- a/drivers/platform/msm/ipa/test/Makefile
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
-ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o ipa_test_dma.o ipa_test_hw_stats.o ipa_pm_ut.o
diff --git a/drivers/platform/msm/ipa/test/ipa_pm_ut.c b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
new file mode 100644
index 0000000..e07040a
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_pm_ut.c
@@ -0,0 +1,1758 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_pm.h"
+#include "../ipa_v3/ipa_i.h"
+#include "ipa_ut_framework.h"
+#include <linux/delay.h>
+
+struct callback_param {
+	struct completion complete;
+	enum ipa_pm_cb_event evt;
+};
+
+static int ipa_pm_ut_setup(void **ppriv)
+{
+
+	IPA_UT_DBG("Start Setup\n");
+
+	/* decrement UT vote */
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
+
+	return 0;
+}
+
+static int ipa_pm_ut_teardown(void *priv)
+{
+	IPA_UT_DBG("Start Teardown\n");
+
+	/* undo UT vote */
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
+	return 0;
+}
+
+/* pass completion struct as the user data/callback params */
+static void ipa_pm_call_back(void *user_data, enum ipa_pm_cb_event evt)
+{
+	struct callback_param *param;
+
+	param = (struct callback_param *) user_data;
+	param->evt = evt;
+
+	if (evt == IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_DBG("Activate callback called\n");
+		complete_all(&param->complete);
+	} else if (evt == IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_DBG("Request Wakeup callback called\n");
+		complete_all(&param->complete);
+	} else
+		IPA_UT_ERR("invalid callback - callback #%d\n", evt);
+}
+
+static int clean_up(int n, ...)
+{
+	va_list args;
+	int i, hdl, rc = 0;
+
+	va_start(args, n);
+
+	IPA_UT_DBG("n = %d\n", n);
+
+	IPA_UT_DBG("Clean up Started");
+
+	for (i = 0; i < n; i++) {
+		hdl = va_arg(args, int);
+
+		rc = ipa_pm_deactivate_sync(hdl);
+		if (rc) {
+			IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+			return -EFAULT;
+		}
+		rc = ipa_pm_deregister(hdl);
+		if (rc) {
+			IPA_UT_ERR("fail to deregister client - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("deregister failed");
+			return -EFAULT;
+		}
+	}
+	va_end(args);
+	rc = ipa_pm_destroy();
+	if (rc) {
+		IPA_UT_ERR("fail to destroy pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+/* test 1.1 */
+static int ipa_pm_ut_single_registration(void *priv)
+{
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deregister(hdl);
+	if (rc == 0) {
+		IPA_UT_ERR("deregister was not unsuccesful - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deregister was not unsuccesful");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deferred deactivate client - rc = %d\n"
+			, rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deferred deactivate client");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 0) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deregister(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deregister client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deregister client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc == 0) {
+		IPA_UT_ERR("activate was not unsuccesful- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate was not unsuccesful");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_destroy();
+	if (rc) {
+		IPA_UT_ERR("terminate failed - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("terminate_failed");
+	}
+
+	return 0;
+}
+
+/* test 1.1 */
+static int ipa_pm_ut_double_register_activate(void *priv)
+{
+	int rc = 0;
+	int hdl, hdl_test, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&register_params, &hdl_test);
+	if (rc != -EEXIST) {
+		IPA_UT_ERR("registered client with same name rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("did not to fail register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to do nothing - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("do nothing failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to do nothing on 2nd activate = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to not reactivate");
+		return -EFAULT;
+	}
+
+	msleep(200);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deactivate client");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 0) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+/* test 2 */
+static int ipa_pm_ut_deferred_deactivate(void *priv)
+{
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client - rc = %d\n",
+		rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to reactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("reactivate client failed");
+		return -EFAULT;
+	}
+
+	msleep(200);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate_sync client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate sync failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+
+/*test 3*/
+static int ipa_pm_ut_two_clients_activate(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, vote;
+	u32 pipes;
+	struct callback_param user_data_USB;
+	struct callback_param user_data_WLAN;
+
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data_USB
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data_WLAN
+	};
+	user_data_USB.evt = IPA_PM_CB_EVENT_MAX;
+	user_data_WLAN.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data_USB.complete);
+	init_completion(&user_data_WLAN.complete);
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_associate_ipa_cons_to_client(hdl_USB, IPA_CLIENT_USB_CONS);
+	if (rc) {
+		IPA_UT_ERR("fail to map client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to map client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN,
+		IPA_CLIENT_WLAN1_CONS);
+	if (rc) {
+		IPA_UT_ERR("fail to map client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to map client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_associate_ipa_cons_to_client(hdl_WLAN,
+		IPA_CLIENT_WLAN2_CONS);
+	if (rc) {
+		IPA_UT_ERR("fail to map client 2 to multiplt pipes rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to map client");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client 1 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client 2 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_USB.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data_USB.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data_WLAN.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback 2\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_WLAN.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	reinit_completion(&user_data_USB.complete);
+	reinit_completion(&user_data_WLAN.complete);
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 1 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	msleep(200);
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("no-block activate failed - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("no-block activate fail");
+		return -EFAULT;
+	}
+
+	pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS);
+	pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+	pipes |= 1 << ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+
+	IPA_UT_DBG("pipes = %d\n", pipes);
+
+	rc = ipa_pm_handle_suspend(pipes);
+
+	if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for wakeup_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("wakeup callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_USB.evt != IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_ERR("Callback = %d\n", user_data_USB.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data_WLAN.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for wakeup_callback 2\n");
+		IPA_UT_TEST_FAIL_REPORT("wakeup callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_WLAN.evt != IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_ERR("Callback = %d\n", user_data_WLAN.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	reinit_completion(&user_data_USB.complete);
+
+	rc = ipa_pm_deactivate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate_sync client 1 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to deactivate_sync");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("no-block activate failed - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("no-block activate fail");
+		return -EFAULT;
+	}
+
+	pipes = 1 << ipa_get_ep_mapping(IPA_CLIENT_USB_CONS);
+
+	rc = ipa_pm_handle_suspend(pipes);
+
+	if (!wait_for_completion_timeout(&user_data_USB.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for wakeup_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("wakeup callback not called");
+		return -ETIME;
+	}
+
+	if (user_data_USB.evt != IPA_PM_REQUEST_WAKEUP) {
+		IPA_UT_ERR("Callback = %d\n", user_data_USB.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	rc = clean_up(2, hdl_USB, hdl_WLAN);
+	return rc;
+}
+
+/* test 4 */
+static int ipa_pm_ut_deactivate_all_deferred(void *priv)
+{
+
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rce %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_USB);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client 1 - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 2- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback 1\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to no-block activate - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("no-block-activate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 3) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 1 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deactivate_all_deferred();
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("deactivate_all_deferred failed");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("clock vote went below 1");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+}
+
+/* test 5 */
+static int ipa_pm_ut_deactivate_after_activate(void *priv)
+{
+
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rce %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&USB_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client - rc = %d\n",
+		rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work for client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate sync client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate sync fail");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+/* test 6 */
+static int ipa_pm_ut_atomic_activate(void *priv)
+{
+	int rc = 0;
+	int hdl, vote;
+	struct callback_param user_data;
+	spinlock_t lock;
+	unsigned long flags;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params register_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+		.user_data = &user_data
+	};
+	user_data.evt = IPA_PM_CB_EVENT_MAX;
+
+
+	spin_lock_init(&lock);
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	init_completion(&user_data.complete);
+
+	rc = ipa_pm_register(&register_params, &hdl);
+	if (rc) {
+		IPA_UT_ERR("fail to register client rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	spin_lock_irqsave(&lock, flags);
+	rc = ipa_pm_activate(hdl);
+	if (rc != -EINPROGRESS) {
+		IPA_UT_ERR("fail to queue work - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("queue activate work failed");
+		spin_unlock_irqrestore(&lock, flags);
+		return -EFAULT;
+	}
+	spin_unlock_irqrestore(&lock, flags);
+
+	if (!wait_for_completion_timeout(&user_data.complete, HZ)) {
+		IPA_UT_ERR("timeout waiting for activate_callback\n");
+		IPA_UT_TEST_FAIL_REPORT("activate callback not called");
+		return -ETIME;
+	}
+
+	if (user_data.evt != IPA_PM_CLIENT_ACTIVATED) {
+		IPA_UT_ERR("Callback = %d\n", user_data.evt);
+		IPA_UT_TEST_FAIL_REPORT("wrong callback called");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(1, hdl);
+	return rc;
+}
+
+/* test 7 */
+static int ipa_pm_ut_deactivate_loop(void *priv)
+{
+	int rc = 0;
+	int i, hdl_USB, hdl_WLAN, vote;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deferred_deactivate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to deffered deactivate client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+		return -EFAULT;
+	}
+
+	for (i = 0; i < 50; i++) {
+		IPA_UT_DBG("Loop iteration #%d\n", i);
+
+		vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+		if (vote != 2) {
+			IPA_UT_ERR("clock vote is at %d\n", vote);
+			IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+			return -EINVAL;
+		}
+
+		rc = ipa_pm_activate(hdl_WLAN);
+		if (rc) {
+			IPA_UT_ERR("fail to undo deactivate for client 2");
+			IPA_UT_ERR(" - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("undo deactivate failed");
+			return -EFAULT;
+		}
+
+		rc = ipa_pm_deferred_deactivate(hdl_WLAN);
+		if (rc) {
+			IPA_UT_ERR("fail to deffered deactivate client");
+			IPA_UT_ERR(" - rc = %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("deffered deactivate fail");
+			return -EFAULT;
+		}
+	}
+
+	msleep(200);
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+	rc = clean_up(2, hdl_USB, hdl_WLAN);
+	return rc;
+
+}
+
+
+/*test 8*/
+static int ipa_pm_ut_set_perf_profile(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, vote, idx;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 3) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = clean_up(2, hdl_USB, hdl_WLAN);
+	return rc;
+}
+
+/*test 9*/
+static int ipa_pm_ut_group_tput(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_APPS,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_APPS,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 500);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_MODEM, 1000);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 3) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+
+}
+
+/*test 10*/
+static int ipa_pm_ut_skip_clk_vote_tput(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000}
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_MODEM,
+		.skip_clk_vote = 1,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_MODEM,
+		.skip_clk_vote = 1,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_MODEM, 2000);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 1) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 3) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+
+	rc = ipa_pm_deactivate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 0) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+}
+
+/* Test 11 */
+static int ipa_pm_ut_simple_exception(void *priv)
+{
+	int rc = 0;
+	int hdl_USB, hdl_WLAN, hdl_MODEM, vote, idx;
+
+	struct ipa_pm_exception exceptions = {
+		.usecase = "USB",
+		.threshold = {1000, 1800},
+	};
+
+	struct ipa_pm_init_params init_params = {
+		.threshold_size = IPA_PM_THRESHOLD_MAX,
+		.default_threshold = {600, 1000},
+		.exception_size = 1,
+		.exceptions[0] = exceptions,
+	};
+
+	struct ipa_pm_register_params USB_params = {
+		.name = "USB",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params WLAN_params = {
+		.name = "WLAN",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	struct ipa_pm_register_params MODEM_params = {
+		.name = "MODEM",
+		.group = IPA_PM_GROUP_DEFAULT,
+		.skip_clk_vote = 0,
+		.callback = ipa_pm_call_back,
+	};
+
+	rc = ipa_pm_init(&init_params);
+	if (rc) {
+		IPA_UT_ERR("Fail to init ipa_pm - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init params");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&USB_params, &hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_register(&WLAN_params, &hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_USB, 1200);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 1 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_WLAN, 2000);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to activate sync for client 1- rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("activate sync failed");
+		return -EFAULT;
+	}
+
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 1) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_activate(hdl_WLAN);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 2 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_register(&MODEM_params, &hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to register client 3 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to register");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_set_perf_profile(hdl_MODEM, 800);
+	if (rc) {
+		IPA_UT_ERR("fail to set tput for client 2 rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to set perf profile");
+		return -EFAULT;
+	}
+
+	rc = ipa_pm_activate(hdl_MODEM);
+	if (rc) {
+		IPA_UT_ERR("fail to activate no block for client 3 - rc = %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("activate no block failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 3) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	msleep(200);
+	idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 3) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = ipa_pm_deactivate_sync(hdl_USB);
+	if (rc) {
+		IPA_UT_ERR("fail to deactivate client - rc = %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("deactivate failed");
+		return -EFAULT;
+	}
+
+	vote = atomic_read(&ipa3_ctx->ipa3_active_clients.cnt);
+	if (vote != 2) {
+		IPA_UT_ERR("clock vote is at %d\n", vote);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock vote");
+		return -EINVAL;
+	}
+
+	 idx = ipa3_ctx->ipa3_active_clients.bus_vote_idx;
+	if (idx != 2) {
+		IPA_UT_ERR("clock plan is at %d\n", idx);
+		IPA_UT_TEST_FAIL_REPORT("wrong clock plan");
+		return -EINVAL;
+	}
+
+	rc = clean_up(3, hdl_USB, hdl_WLAN, hdl_MODEM);
+	return rc;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(pm, "PM for IPA",
+	ipa_pm_ut_setup, ipa_pm_ut_teardown)
+{
+	IPA_UT_ADD_TEST(single_registration,
+		"Single Registration/Basic Functions",
+		ipa_pm_ut_single_registration,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(double_register_activate,
+		"double register/activate",
+		ipa_pm_ut_double_register_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deferred_deactivate,
+		"Deferred_deactivate",
+		ipa_pm_ut_deferred_deactivate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(two_clients_activate,
+		"Activate two clients",
+		ipa_pm_ut_two_clients_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deactivate_all_deferred,
+		"Deactivate all deferred",
+		ipa_pm_ut_deactivate_all_deferred,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deactivate_after_activate,
+		"Deactivate after activate",
+		ipa_pm_ut_deactivate_after_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(atomic_activate,
+		"Atomic activate",
+		ipa_pm_ut_atomic_activate,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(deactivate_loop,
+		"Deactivate Loop",
+		ipa_pm_ut_deactivate_loop,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(set_perf_profile,
+		"Set perf profile",
+		ipa_pm_ut_set_perf_profile,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(group_tput,
+		"Group throughputs",
+		ipa_pm_ut_group_tput,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(skip_clk_vote_tput,
+		"Skip clock vote and tput",
+		ipa_pm_ut_skip_clk_vote_tput,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(simple_exception,
+		"throughput while passing simple exception",
+		ipa_pm_ut_simple_exception,
+		true, IPA_HW_v4_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(pm);
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
index 823edcf..35f2878 100644
--- a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -22,6 +22,7 @@
  */
 IPA_UT_DECLARE_SUITE(mhi);
 IPA_UT_DECLARE_SUITE(dma);
+IPA_UT_DECLARE_SUITE(pm);
 IPA_UT_DECLARE_SUITE(example);
 IPA_UT_DECLARE_SUITE(hw_stats);
 
@@ -34,6 +35,7 @@
 {
 	IPA_UT_REGISTER_SUITE(mhi),
 	IPA_UT_REGISTER_SUITE(dma),
+	IPA_UT_REGISTER_SUITE(pm),
 	IPA_UT_REGISTER_SUITE(example),
 	IPA_UT_REGISTER_SUITE(hw_stats),
 } IPA_UT_DEFINE_ALL_SUITES_END;
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index aafb8fc..707c95e 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -1247,6 +1247,79 @@
 }
 EXPORT_SYMBOL(geni_se_iommu_free_buf);
 
+/**
+ * geni_se_dump_dbg_regs() - Print relevant registers that capture most
+ *			accurately the state of an SE.
+ * @_dev:		Pointer to the SE's device.
+ * @iomem:		Base address of the SE's register space.
+ * @ipc:		IPC log context handle.
+ *
+ * This function is used to print out all the registers that capture the state
+ * of an SE to help debug any errors.
+ *
+ * Return:	None
+ */
+void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base,
+				void *ipc)
+{
+	u32 m_cmd0 = 0;
+	u32 m_irq_status = 0;
+	u32 geni_status = 0;
+	u32 geni_ios = 0;
+	u32 dma_rx_irq = 0;
+	u32 dma_tx_irq = 0;
+	u32 rx_fifo_status = 0;
+	u32 tx_fifo_status = 0;
+	u32 se_dma_dbg = 0;
+	u32 m_cmd_ctrl = 0;
+	u32 se_dma_rx_len = 0;
+	u32 se_dma_rx_len_in = 0;
+	u32 se_dma_tx_len = 0;
+	u32 se_dma_tx_len_in = 0;
+	struct geni_se_device *geni_se_dev;
+
+	if (!ipc)
+		return;
+
+	geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
+	if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+		return;
+	mutex_lock(&geni_se_dev->ab_ib_lock);
+	if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list))) {
+		GENI_SE_DBG(ipc, false, NULL, "%s: Clocks not on\n", __func__);
+		goto exit_geni_se_dump_dbg_regs;
+	}
+	m_cmd0 = geni_read_reg(base, SE_GENI_M_CMD0);
+	m_irq_status = geni_read_reg(base, SE_GENI_M_IRQ_STATUS);
+	geni_status = geni_read_reg(base, SE_GENI_STATUS);
+	geni_ios = geni_read_reg(base, SE_GENI_IOS);
+	dma_rx_irq = geni_read_reg(base, SE_DMA_TX_IRQ_STAT);
+	dma_tx_irq = geni_read_reg(base, SE_DMA_RX_IRQ_STAT);
+	rx_fifo_status = geni_read_reg(base, SE_GENI_RX_FIFO_STATUS);
+	tx_fifo_status = geni_read_reg(base, SE_GENI_TX_FIFO_STATUS);
+	se_dma_dbg = geni_read_reg(base, SE_DMA_DEBUG_REG0);
+	m_cmd_ctrl = geni_read_reg(base, SE_GENI_M_CMD_CTRL_REG);
+	se_dma_rx_len = geni_read_reg(base, SE_DMA_RX_LEN);
+	se_dma_rx_len_in = geni_read_reg(base, SE_DMA_RX_LEN_IN);
+	se_dma_tx_len = geni_read_reg(base, SE_DMA_TX_LEN);
+	se_dma_tx_len_in = geni_read_reg(base, SE_DMA_TX_LEN_IN);
+
+	GENI_SE_DBG(ipc, false, NULL,
+	"%s: m_cmd0:0x%x, m_irq_status:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
+	__func__, m_cmd0, m_irq_status, geni_status, geni_ios);
+	GENI_SE_DBG(ipc, false, NULL,
+	"dma_rx_irq:0x%x, dma_tx_irq:0x%x, rx_fifo_sts:0x%x, tx_fifo_sts:0x%x\n"
+	, dma_rx_irq, dma_tx_irq, rx_fifo_status, tx_fifo_status);
+	GENI_SE_DBG(ipc, false, NULL,
+	"se_dma_dbg:0x%x, m_cmd_ctrl:0x%x, dma_rxlen:0x%x, dma_rxlen_in:0x%x\n",
+	se_dma_dbg, m_cmd_ctrl, se_dma_rx_len, se_dma_rx_len_in);
+	GENI_SE_DBG(ipc, false, NULL,
+	"dma_txlen:0x%x, dma_txlen_in:0x%x\n", se_dma_tx_len, se_dma_tx_len_in);
+exit_geni_se_dump_dbg_regs:
+	mutex_unlock(&geni_se_dev->ab_ib_lock);
+}
+EXPORT_SYMBOL(geni_se_dump_dbg_regs);
+
 static const struct of_device_id geni_se_dt_match[] = {
 	{ .compatible = "qcom,qupv3-geni-se", },
 	{ .compatible = "qcom,qupv3-geni-se-cb", },
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 7f6d346..7f9a797 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -26,12 +26,14 @@
 #include <linux/pm_wakeup.h>
 #include <linux/slab.h>
 #include <linux/pmic-voter.h>
+#include <linux/workqueue.h>
 #include "battery.h"
 
 #define DRV_MAJOR_VERSION	1
 #define DRV_MINOR_VERSION	0
 
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
+#define TAPER_STEPPER_VOTER		"TAPER_STEPPER_VOTER"
 #define TAPER_END_VOTER			"TAPER_END_VOTER"
 #define PL_TAPER_EARLY_BAD_VOTER	"PL_TAPER_EARLY_BAD_VOTER"
 #define PARALLEL_PSY_VOTER		"PARALLEL_PSY_VOTER"
@@ -41,12 +43,11 @@
 #define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
 #define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
-#define FCC_CHANGE_VOTER		"FCC_CHANGE_VOTER"
+#define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 
 struct pl_data {
 	int			pl_mode;
 	int			slave_pct;
-	int			taper_pct;
 	int			slave_fcc_ua;
 	int			restricted_current;
 	bool			restricted_charging_enabled;
@@ -59,7 +60,8 @@
 	struct votable		*pl_enable_votable_indirect;
 	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
-	struct delayed_work	pl_taper_work;
+	struct work_struct	pl_taper_work;
+	bool			taper_work_running;
 	struct power_supply	*main_psy;
 	struct power_supply	*pl_psy;
 	struct power_supply	*batt_psy;
@@ -342,67 +344,75 @@
 		*master_ua = max(0, total_ua);
 	else
 		*master_ua = max(0, total_ua - *slave_ua);
-
-	/* further reduce slave's share in accordance with taper reductions */
-	*slave_ua = (*slave_ua * chip->taper_pct) / 100;
 }
 
 #define MINIMUM_PARALLEL_FCC_UA		500000
-#define PL_TAPER_WORK_DELAY_MS		100
+#define PL_TAPER_WORK_DELAY_MS		500
 #define TAPER_RESIDUAL_PCT		90
+#define TAPER_REDUCTION_UA		200000
 static void pl_taper_work(struct work_struct *work)
 {
 	struct pl_data *chip = container_of(work, struct pl_data,
-						pl_taper_work.work);
+						pl_taper_work);
 	union power_supply_propval pval = {0, };
-	int total_fcc_ua, master_fcc_ua, slave_fcc_ua;
 	int rc;
+	int eff_fcc_ua;
+	int total_fcc_ua, master_fcc_ua, slave_fcc_ua = 0;
 
-	/* exit immediately if parallel is disabled */
-	if (get_effective_result(chip->pl_disable_votable)) {
-		pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
-		goto done;
+	chip->taper_work_running = true;
+	while (true) {
+		if (get_effective_result(chip->pl_disable_votable)) {
+			/*
+			 * if parallel's FCC share is low, simply disable
+			 * parallel with TAPER_END_VOTER
+			 */
+			total_fcc_ua = get_effective_result_locked(
+					chip->fcc_votable);
+			get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
+					&slave_fcc_ua);
+			if (slave_fcc_ua <= MINIMUM_PARALLEL_FCC_UA) {
+				pl_dbg(chip, PR_PARALLEL, "terminating: parallel's share is low\n");
+				vote(chip->pl_disable_votable, TAPER_END_VOTER,
+						true, 0);
+			} else {
+				pl_dbg(chip, PR_PARALLEL, "terminating: parallel disabled\n");
+			}
+			goto done;
+		}
+
+		rc = power_supply_get_property(chip->batt_psy,
+				       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get batt charge type rc=%d\n", rc);
+			goto done;
+		}
+
+		chip->charge_type = pval.intval;
+		if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+			eff_fcc_ua = get_effective_result(chip->fcc_votable);
+			if (eff_fcc_ua < 0) {
+				pr_err("Couldn't get fcc, exiting taper work\n");
+				goto done;
+			}
+			eff_fcc_ua = eff_fcc_ua - TAPER_REDUCTION_UA;
+			if (eff_fcc_ua < 0) {
+				pr_err("Can't reduce FCC any more\n");
+				goto done;
+			}
+
+			pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing FCC to %dua\n",
+					eff_fcc_ua);
+			vote(chip->fcc_votable, TAPER_STEPPER_VOTER,
+					true, eff_fcc_ua);
+		} else {
+			pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+		}
+		/* wait for the charger state to deglitch after FCC change */
+		msleep(PL_TAPER_WORK_DELAY_MS);
 	}
-
-	total_fcc_ua = get_effective_result_locked(chip->fcc_votable);
-	get_fcc_split(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
-	if (slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
-		pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
-		vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
-		goto done;
-	}
-
-	pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
-		slave_fcc_ua);
-
-	rc = power_supply_get_property(chip->batt_psy,
-			       POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
-	if (rc < 0) {
-		pr_err("Couldn't get batt charge type rc=%d\n", rc);
-		goto done;
-	}
-
-	chip->charge_type = pval.intval;
-	if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
-		pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
-
-		vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
-		/* Reduce the taper percent by 10 percent */
-		chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
-		rerun_election(chip->fcc_votable);
-		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
-				PL_TAPER_WORK_DELAY_MS);
-		schedule_delayed_work(&chip->pl_taper_work,
-				msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
-		return;
-	}
-
-	/*
-	 * Master back to Fast Charge, get out of this round of taper reduction
-	 */
-	pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
-
 done:
+	chip->taper_work_running = false;
+	vote(chip->fcc_votable, TAPER_STEPPER_VOTER, false, 0);
 	vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
 }
 
@@ -422,24 +432,19 @@
 		get_fcc_split(chip, total_fcc_ua, &master_fcc_ua,
 				&slave_fcc_ua);
 
-		if (slave_fcc_ua > 500000) {
+		if (slave_fcc_ua > MINIMUM_PARALLEL_FCC_UA) {
 			chip->slave_fcc_ua = slave_fcc_ua;
-			vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+			vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
 							false, 0);
 		} else {
 			chip->slave_fcc_ua = 0;
-			vote(chip->pl_disable_votable, FCC_CHANGE_VOTER,
+			vote(chip->pl_disable_votable, PL_FCC_LOW_VOTER,
 							true, 0);
 		}
 	}
 
 	rerun_election(chip->pl_disable_votable);
 
-	pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
-		   master_fcc_ua, slave_fcc_ua,
-		   (master_fcc_ua * 100) / total_fcc_ua,
-		   (slave_fcc_ua * 100) / total_fcc_ua);
-
 	return 0;
 }
 
@@ -652,12 +657,21 @@
 		if (rc < 0) {
 			pr_err("Couldn't get batt charge type rc=%d\n", rc);
 		} else {
-			if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+			if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+				&& !chip->taper_work_running) {
 				pl_dbg(chip, PR_PARALLEL,
 					"pl enabled in Taper scheduing work\n");
-				schedule_delayed_work(&chip->pl_taper_work, 0);
+				vote(chip->pl_awake_votable, TAPER_END_VOTER,
+						true, 0);
+				queue_work(system_long_wq,
+						&chip->pl_taper_work);
 			}
 		}
+
+		pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+			master_fcc_ua, slave_fcc_ua,
+			(master_fcc_ua * 100) / total_fcc_ua,
+			(slave_fcc_ua * 100) / total_fcc_ua);
 	} else {
 		if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
 			|| (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
@@ -788,7 +802,6 @@
 	if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
 		&& (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
 		vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
-		chip->taper_pct = 100;
 		vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
 		vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
 				false, 0);
@@ -800,8 +813,11 @@
 	if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_FAST
 		&& (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
 		chip->charge_type = pval.intval;
-		pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
-		schedule_delayed_work(&chip->pl_taper_work, 0);
+		if (!chip->taper_work_running) {
+			pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
+			vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+			queue_work(system_long_wq, &chip->pl_taper_work);
+		}
 		return;
 	}
 
@@ -1010,7 +1026,7 @@
 		goto release_wakeup_source;
 	}
 
-	chip->fv_votable = create_votable("FV", VOTE_MAX,
+	chip->fv_votable = create_votable("FV", VOTE_MIN,
 					pl_fv_vote_callback,
 					chip);
 	if (IS_ERR(chip->fv_votable)) {
@@ -1057,7 +1073,7 @@
 	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
 
 	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
-	INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
+	INIT_WORK(&chip->pl_taper_work, pl_taper_work);
 	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
 
 	rc = pl_register_notifier(chip);
@@ -1082,8 +1098,6 @@
 		goto unreg_notifier;
 	}
 
-	chip->taper_pct = 100;
-
 	the_chip = chip;
 
 	return 0;
@@ -1112,7 +1126,7 @@
 		return;
 
 	cancel_delayed_work_sync(&chip->status_change_work);
-	cancel_delayed_work_sync(&chip->pl_taper_work);
+	cancel_work_sync(&chip->pl_taper_work);
 	cancel_work_sync(&chip->pl_disable_forever_work);
 
 	power_supply_unreg_notifier(&chip->nb);
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 271c523..4303960 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -287,6 +287,7 @@
 	int	esr_pulse_thresh_ma;
 	int	esr_meas_curr_ma;
 	int	bmd_en_delay_ms;
+	int	ki_coeff_full_soc_dischg;
 	int	jeita_thresholds[NUM_JEITA_LEVELS];
 	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
@@ -416,6 +417,7 @@
 	struct mutex		bus_lock;
 	struct mutex		sram_rw_lock;
 	struct mutex		charge_full_lock;
+	struct mutex		qnovo_esr_ctrl_lock;
 	u32			batt_soc_base;
 	u32			batt_info_base;
 	u32			mem_if_base;
@@ -424,7 +426,6 @@
 	int			batt_id_ohms;
 	int			ki_coeff_full_soc;
 	int			charge_status;
-	int			prev_charge_status;
 	int			charge_done;
 	int			charge_type;
 	int			online_status;
@@ -450,6 +451,7 @@
 	bool			slope_limit_en;
 	bool			use_ima_single_mode;
 	bool			use_dma;
+	bool			qnovo_enable;
 	struct completion	soc_update;
 	struct completion	soc_ready;
 	struct completion	mem_grant;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 7f220c3..b02c860 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1631,6 +1631,8 @@
 
 	if (batt_temp < 0)
 		ki_coeff_full_soc = 0;
+	else if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+		ki_coeff_full_soc = chip->dt.ki_coeff_full_soc_dischg;
 	else
 		ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT;
 
@@ -1748,12 +1750,12 @@
 
 	/* We need 2 most significant bytes here */
 	bsoc = (u32)bsoc >> 16;
-	rc = fg_get_msoc(chip, &msoc);
+	rc = fg_get_msoc_raw(chip, &msoc_raw);
 	if (rc < 0) {
-		pr_err("Error in getting msoc, rc=%d\n", rc);
+		pr_err("Error in getting msoc_raw, rc=%d\n", rc);
 		goto out;
 	}
-	msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
+	msoc = DIV_ROUND_CLOSEST(msoc_raw * FULL_CAPACITY, FULL_SOC_RAW);
 
 	fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
 		msoc, bsoc, chip->health, chip->charge_status,
@@ -2507,7 +2509,6 @@
 		goto out;
 	}
 
-	chip->prev_charge_status = chip->charge_status;
 	chip->charge_status = prop.intval;
 	rc = power_supply_get_property(chip->batt_psy,
 			POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
@@ -3300,20 +3301,21 @@
 	int rc;
 	int esr_uohms;
 
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
 	/* force esr extraction enable */
 	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
 			ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
 			FG_IMA_DEFAULT);
 	if (rc < 0) {
 		pr_err("failed to enable esr extn rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
 			LD_REG_CTRL_BIT, 0);
 	if (rc < 0) {
 		pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
@@ -3321,24 +3323,36 @@
 			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
 	if (rc < 0) {
 		pr_err("Error in configuring force ESR rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
+	/*
+	 * Release and grab the lock again after 1.5 seconds so that prepare
+	 * callback can succeed if the request comes in between.
+	 */
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+
 	/* wait 1.5 seconds for hw to measure ESR */
 	msleep(1500);
+
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
 	rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
 			ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
 			0);
 	if (rc < 0) {
 		pr_err("Error in restoring force ESR rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
+	/* If qnovo is disabled, then leave ESR extraction enabled */
+	if (!chip->qnovo_enable)
+		goto done;
+
 	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
 			LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
 	if (rc < 0) {
 		pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	/* force esr extraction disable */
@@ -3347,36 +3361,46 @@
 			FG_IMA_DEFAULT);
 	if (rc < 0) {
 		pr_err("failed to disable esr extn rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
 
+done:
 	fg_get_battery_resistance(chip, &esr_uohms);
 	fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
-
+out:
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
 	return rc;
 }
 
 static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
 {
-	int rc;
+	int rc = 0;
 
+	mutex_lock(&chip->qnovo_esr_ctrl_lock);
 	/* force esr extraction disable when qnovo enables */
 	rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
 			ESR_EXTRACTION_ENABLE_OFFSET,
 			BIT(0), qnovo_enable ? 0 : BIT(0),
 			FG_IMA_DEFAULT);
-	if (rc < 0)
+	if (rc < 0) {
 		pr_err("Error in configuring esr extraction rc=%d\n", rc);
+		goto out;
+	}
 
 	rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
 			LD_REG_CTRL_BIT,
 			qnovo_enable ? LD_REG_CTRL_BIT : 0);
 	if (rc < 0) {
 		pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
-		return rc;
+		goto out;
 	}
-	fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
-	return 0;
+
+	fg_dbg(chip, FG_STATUS, "%s for Qnovo\n",
+		qnovo_enable ? "Prepared" : "Unprepared");
+	chip->qnovo_enable = qnovo_enable;
+out:
+	mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+	return rc;
 }
 
 static void ttf_work(struct work_struct *work)
@@ -4503,7 +4527,11 @@
 static int fg_parse_ki_coefficients(struct fg_chip *chip)
 {
 	struct device_node *node = chip->dev->of_node;
-	int rc, i;
+	int rc, i, temp;
+
+	rc = of_property_read_u32(node, "qcom,ki-coeff-full-dischg", &temp);
+	if (!rc)
+		chip->dt.ki_coeff_full_soc_dischg = temp;
 
 	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg",
 		chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
@@ -4972,7 +5000,6 @@
 	chip->debug_mask = &fg_gen3_debug_mask;
 	chip->irqs = fg_irqs;
 	chip->charge_status = -EINVAL;
-	chip->prev_charge_status = -EINVAL;
 	chip->ki_coeff_full_soc = -EINVAL;
 	chip->online_status = -EINVAL;
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
@@ -5045,6 +5072,7 @@
 	mutex_init(&chip->cl.lock);
 	mutex_init(&chip->ttf.lock);
 	mutex_init(&chip->charge_full_lock);
+	mutex_init(&chip->qnovo_esr_ctrl_lock);
 	init_completion(&chip->soc_update);
 	init_completion(&chip->soc_ready);
 	init_completion(&chip->mem_grant);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 6abbaeb..34514c9 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -188,6 +188,11 @@
 module_param_named(
 	weak_chg_icl_ua, __weak_chg_icl_ua, int, 0600);
 
+static int __try_sink_enabled = 1;
+module_param_named(
+	try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
 #define MICRO_1P5A		1500000
 #define MICRO_P1A		100000
 #define OTG_DEFAULT_DEGLITCH_TIME_MS	50
@@ -1037,7 +1042,10 @@
 		val->intval = 0;
 		break;
 	case POWER_SUPPLY_PROP_DIE_HEALTH:
-		rc = smblib_get_prop_die_health(chg, val);
+		if (chg->die_health == -EINVAL)
+			rc = smblib_get_prop_die_health(chg, val);
+		else
+			val->intval = chg->die_health;
 		break;
 	case POWER_SUPPLY_PROP_DP_DM:
 		val->intval = chg->pulse_cnt;
@@ -1092,14 +1100,8 @@
 		rc = smblib_set_prop_charge_qnovo_enable(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
-		if (val->intval == -EINVAL) {
-			vote(chg->fv_votable, BATT_PROFILE_VOTER,
-					true, chg->batt_profile_fv_uv);
-			vote(chg->fv_votable, QNOVO_VOTER, false, 0);
-		} else {
-			vote(chg->fv_votable, QNOVO_VOTER, true, val->intval);
-			vote(chg->fv_votable, BATT_PROFILE_VOTER, false, 0);
-		}
+		vote(chg->fv_votable, QNOVO_VOTER,
+			(val->intval >= 0), val->intval);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
@@ -1145,6 +1147,10 @@
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 		rc = smblib_set_prop_input_current_limited(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
+		chg->die_health = val->intval;
+		power_supply_changed(chg->batt_psy);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -1166,6 +1172,7 @@
 	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
 	case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
 	case POWER_SUPPLY_PROP_SW_JEITA_ENABLED:
+	case POWER_SUPPLY_PROP_DIE_HEALTH:
 		return 1;
 	default:
 		break;
@@ -1647,15 +1654,6 @@
 		return rc;
 	}
 
-	/* disable SW STAT override */
-	rc = smblib_masked_write(chg, STAT_CFG_REG,
-				 STAT_SW_OVERRIDE_CFG_BIT, 0);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't disable SW STAT override rc=%d\n",
-			rc);
-		return rc;
-	}
-
 	/* disable h/w autonomous parallel charging control */
 	rc = smblib_masked_write(chg, MISC_CFG_REG,
 				 STAT_PARALLEL_1400MA_EN_CFG_BIT, 0);
@@ -2244,9 +2242,11 @@
 	chg->dev = &pdev->dev;
 	chg->param = v1_params;
 	chg->debug_mask = &__debug_mask;
+	chg->try_sink_enabled = &__try_sink_enabled;
 	chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
 	chg->mode = PARALLEL_MASTER;
 	chg->irq_info = smb2_irqs;
+	chg->die_health = -EINVAL;
 	chg->name = "PMI";
 
 	chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 1b890d5..52fcd7f 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -143,6 +143,23 @@
 	return rc;
 }
 
+int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override)
+{
+	int rc;
+
+	/* override  = 1, SW STAT override; override = 0, HW auto mode */
+	rc = smblib_masked_write(chg, STAT_CFG_REG,
+				STAT_SW_OVERRIDE_CFG_BIT,
+				override ? STAT_SW_OVERRIDE_CFG_BIT : 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't configure SW STAT override rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	return rc;
+}
+
 /********************
  * REGISTER GETTERS *
  ********************/
@@ -584,8 +601,10 @@
 			schedule_work(&chg->bms_update_work);
 	}
 
-	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel"))
+	if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel")) {
 		chg->pl.psy = psy;
+		schedule_work(&chg->pl_update_work);
+	}
 
 	return NOTIFY_OK;
 }
@@ -2250,12 +2269,6 @@
 int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
 				    union power_supply_propval *val)
 {
-	int rc = 0;
-
-	rc = smblib_get_prop_usb_present(chg, val);
-	if (rc < 0 || !val->intval)
-		return rc;
-
 	if (!chg->iio.usbin_v_chan ||
 		PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER)
 		chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v");
@@ -2798,7 +2811,7 @@
 		hvdcp = stat & QC_CHARGER_BIT;
 		vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
 		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
-		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
+		vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
 		vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
 								false, 0);
 
@@ -3779,8 +3792,165 @@
 	return IRQ_HANDLED;
 }
 
+static int typec_try_sink(struct smb_charger *chg)
+{
+	union power_supply_propval val;
+	bool debounce_done, vbus_detected, sink;
+	u8 stat;
+	int exit_mode = ATTACHED_SRC, rc;
+
+	/* ignore typec interrupt while try.snk WIP */
+	chg->try_sink_active = true;
+
+	/* force SNK mode */
+	val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/* reduce Tccdebounce time to ~20ms */
+	rc = smblib_masked_write(chg, MISC_CFG_REG,
+			TCC_DEBOUNCE_20MS_BIT, TCC_DEBOUNCE_20MS_BIT);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/*
+	 * give opportunity to the other side to be a SRC,
+	 * for tDRPTRY + Tccdebounce time
+	 */
+	msleep(100);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+				rc);
+		goto try_sink_exit;
+	}
+
+	debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+	if (!debounce_done)
+		/*
+		 * The other side didn't switch to source, either it
+		 * is an adamant sink or is removed go back to showing Rp
+		 */
+		goto try_wait_src;
+
+	/*
+	 * We are in force sink mode and the other side has switched to
+	 * showing Rp. Config DRP in case the other side removes Rp so we
+	 * can quickly (20ms) switch to showing our Rp. Note that the spec
+	 * needs us to show Rp for 80mS while the drp DFP residency is just
+	 * 54mS. But 54mS is plenty time for us to react and force Rp for
+	 * the remaining 26mS.
+	 */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set DFP mode rc=%d\n",
+				rc);
+		goto try_sink_exit;
+	}
+
+	/*
+	 * while other side is Rp, wait for VBUS from it; exit if other side
+	 * removes Rp
+	 */
+	do {
+		rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+					rc);
+			goto try_sink_exit;
+		}
+
+		debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+		vbus_detected = stat & TYPEC_VBUS_STATUS_BIT;
+
+		/* Successfully transitioned to ATTACHED.SNK */
+		if (vbus_detected && debounce_done) {
+			exit_mode = ATTACHED_SINK;
+			goto try_sink_exit;
+		}
+
+		/*
+		 * Ensure sink since drp may put us in source if other
+		 * side switches back to Rd
+		 */
+		sink = !(stat &  UFP_DFP_MODE_STATUS_BIT);
+
+		usleep_range(1000, 2000);
+	} while (debounce_done && sink);
+
+try_wait_src:
+	/*
+	 * Transition to trywait.SRC state. check if other side still wants
+	 * to be SNK or has been removed.
+	 */
+	val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	/* Need to be in this state for tDRPTRY time, 75ms~150ms */
+	msleep(80);
+
+	rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+		goto try_sink_exit;
+	}
+
+	debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+	if (debounce_done)
+		/* the other side wants to be a sink */
+		exit_mode = ATTACHED_SRC;
+	else
+		/* the other side is detached */
+		exit_mode = UNATTACHED_SINK;
+
+try_sink_exit:
+	/* release forcing of SRC/SNK mode */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	rc = smblib_set_prop_typec_power_role(chg, &val);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set DFP mode rc=%d\n", rc);
+
+	/* revert Tccdebounce time back to ~120ms */
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+
+	chg->try_sink_active = false;
+
+	return exit_mode;
+}
+
 static void typec_sink_insertion(struct smb_charger *chg)
 {
+	int exit_mode;
+
+	/*
+	 * Try.SNK entry status - ATTACHWAIT.SRC state and detected Rd-open
+	 * or RD-Ra for TccDebounce time.
+	 */
+
+	if (*chg->try_sink_enabled) {
+		exit_mode = typec_try_sink(chg);
+
+		if (exit_mode != ATTACHED_SRC) {
+			smblib_usb_typec_change(chg);
+			return;
+		}
+	}
+
 	/* when a sink is inserted we should not wait on hvdcp timeout to
 	 * enable pd
 	 */
@@ -3853,7 +4023,7 @@
 
 	/* reset parallel voters */
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
-	vote(chg->pl_disable_votable, FCC_CHANGE_VOTER, false, 0);
+	vote(chg->pl_disable_votable, PL_FCC_LOW_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
 	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
@@ -4047,7 +4217,7 @@
 				smblib_typec_mode_name[chg->typec_mode]);
 }
 
-static void smblib_usb_typec_change(struct smb_charger *chg)
+void smblib_usb_typec_change(struct smb_charger *chg)
 {
 	int rc;
 
@@ -4083,7 +4253,8 @@
 		return IRQ_HANDLED;
 	}
 
-	if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+	if (chg->cc2_detach_wa_active || chg->typec_en_dis_active ||
+					 chg->try_sink_active) {
 		smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
 			chg->cc2_detach_wa_active ?
 			"cc2_detach_wa" : "typec_en_dis");
@@ -4117,6 +4288,14 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	chg->is_hdc = true;
+	/*
+	 * Disable usb IRQs after the flag set and re-enable IRQs after
+	 * the flag cleared in the delayed work queue, to avoid any IRQ
+	 * storming during the delays
+	 */
+	if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+
 	schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
 
 	return IRQ_HANDLED;
@@ -4288,12 +4467,22 @@
 		power_supply_changed(chg->batt_psy);
 }
 
+static void pl_update_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						pl_update_work);
+
+	smblib_stat_sw_override_cfg(chg, false);
+}
+
 static void clear_hdc_work(struct work_struct *work)
 {
 	struct smb_charger *chg = container_of(work, struct smb_charger,
 						clear_hdc_work.work);
 
 	chg->is_hdc = 0;
+	if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
 }
 
 static void rdstd_cc2_detach_work(struct work_struct *work)
@@ -4818,6 +5007,7 @@
 	mutex_init(&chg->otg_oc_lock);
 	mutex_init(&chg->vconn_oc_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
+	INIT_WORK(&chg->pl_update_work, pl_update_work);
 	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
 	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
 	INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
@@ -4866,6 +5056,14 @@
 
 		chg->bms_psy = power_supply_get_by_name("bms");
 		chg->pl.psy = power_supply_get_by_name("parallel");
+		if (chg->pl.psy) {
+			rc = smblib_stat_sw_override_cfg(chg, false);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't config stat sw rc=%d\n", rc);
+				return rc;
+			}
+		}
 		break;
 	case PARALLEL_SLAVE:
 		break;
@@ -4882,6 +5080,7 @@
 	switch (chg->mode) {
 	case PARALLEL_MASTER:
 		cancel_work_sync(&chg->bms_update_work);
+		cancel_work_sync(&chg->pl_update_work);
 		cancel_work_sync(&chg->rdstd_cc2_detach_work);
 		cancel_delayed_work_sync(&chg->hvdcp_detect_work);
 		cancel_delayed_work_sync(&chg->clear_hdc_work);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 80f5bca..1046b27 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -66,7 +66,7 @@
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define WEAK_CHARGER_VOTER		"WEAK_CHARGER_VOTER"
 #define OTG_VOTER			"OTG_VOTER"
-#define FCC_CHANGE_VOTER		"FCC_CHANGE_VOTER"
+#define PL_FCC_LOW_VOTER		"PL_FCC_LOW_VOTER"
 #define WBC_VOTER			"WBC_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
@@ -130,6 +130,12 @@
 	SMB_IRQ_MAX,
 };
 
+enum try_sink_exit_mode {
+	ATTACHED_SRC = 0,
+	ATTACHED_SINK,
+	UNATTACHED_SINK,
+};
+
 struct smb_irq_info {
 	const char			*name;
 	const irq_handler_t		handler;
@@ -232,6 +238,7 @@
 	struct smb_params	param;
 	struct smb_iio		iio;
 	int			*debug_mask;
+	int			*try_sink_enabled;
 	enum smb_mode		mode;
 	struct smb_chg_freq	chg_freq;
 	int			smb_version;
@@ -287,6 +294,7 @@
 
 	/* work */
 	struct work_struct	bms_update_work;
+	struct work_struct	pl_update_work;
 	struct work_struct	rdstd_cc2_detach_work;
 	struct delayed_work	hvdcp_detect_work;
 	struct delayed_work	ps_change_timeout_work;
@@ -342,6 +350,7 @@
 	u32			wa_flags;
 	bool			cc2_detach_wa_active;
 	bool			typec_en_dis_active;
+	bool			try_sink_active;
 	int			boost_current_ua;
 	int			temp_speed_reading_count;
 
@@ -355,6 +364,8 @@
 	/* qnovo */
 	int			usb_icl_delta_ua;
 	int			pulse_cnt;
+
+	int			die_health;
 };
 
 int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -523,6 +534,8 @@
 				union power_supply_propval *val);
 int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_stat_sw_override_cfg(struct smb_charger *chg, bool override);
+void smblib_usb_typec_change(struct smb_charger *chg);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index a464a81..4b2e9c8 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -861,7 +861,7 @@
 	return (reg & CMD_OTG_EN_BIT) ? 1 : 0;
 }
 
-struct regulator_ops smb1351_chg_otg_reg_ops = {
+static struct regulator_ops smb1351_chg_otg_reg_ops = {
 	.enable		= smb1351_chg_otg_regulator_enable,
 	.disable	= smb1351_chg_otg_regulator_disable,
 	.is_enabled	= smb1351_chg_otg_regulator_is_enable,
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 4e1bb17..4b42420 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -146,6 +146,8 @@
 
 	struct power_supply	*parallel_psy;
 	struct pmic_revid_data	*pmic_rev_id;
+
+	int			c_health;
 };
 
 static bool is_secure(struct smb1355 *chip, int addr)
@@ -434,7 +436,10 @@
 		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
-		val->intval = smb1355_get_prop_connector_health(chip);
+		if (chip->c_health == -EINVAL)
+			val->intval = smb1355_get_prop_connector_health(chip);
+		else
+			val->intval = chip->c_health;
 		break;
 	default:
 		pr_err_ratelimited("parallel psy get prop %d not supported\n",
@@ -497,6 +502,10 @@
 		rc = smb1355_set_charge_param(chip, &chip->param.fcc,
 						val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		chip->c_health = val->intval;
+		power_supply_changed(chip->parallel_psy);
+		break;
 	default:
 		pr_debug("parallel power supply set prop %d not supported\n",
 			prop);
@@ -509,6 +518,13 @@
 static int smb1355_parallel_prop_is_writeable(struct power_supply *psy,
 					      enum power_supply_property prop)
 {
+	switch (prop) {
+	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
+		return 1;
+	default:
+		break;
+	}
+
 	return 0;
 }
 
@@ -613,6 +629,7 @@
 	[0] = {
 		.name		= "wdog-bark",
 		.handler	= smb1355_handle_wdog_bark,
+		.wake		= true,
 	},
 	[1] = {
 		.name		= "chg-state-change",
@@ -713,6 +730,7 @@
 
 	chip->dev = &pdev->dev;
 	chip->param = v1_params;
+	chip->c_health = -EINVAL;
 	chip->name = "smb1355";
 	mutex_init(&chip->write_lock);
 
@@ -762,14 +780,26 @@
 	return 0;
 }
 
+static void smb1355_shutdown(struct platform_device *pdev)
+{
+	struct smb1355 *chip = platform_get_drvdata(pdev);
+	int rc;
+
+	/* disable parallel charging path */
+	rc = smb1355_set_parallel_charging(chip, true);
+	if (rc < 0)
+		pr_err("Couldn't disable parallel path rc=%d\n", rc);
+}
+
 static struct platform_driver smb1355_driver = {
 	.driver	= {
 		.name		= "qcom,smb1355-charger",
 		.owner		= THIS_MODULE,
 		.of_match_table	= match_table,
 	},
-	.probe	= smb1355_probe,
-	.remove	= smb1355_remove,
+	.probe		= smb1355_probe,
+	.remove		= smb1355_remove,
+	.shutdown	= smb1355_shutdown,
 };
 module_platform_driver(smb1355_driver);
 
diff --git a/drivers/power/supply/qcom/smb135x-charger.c b/drivers/power/supply/qcom/smb135x-charger.c
index 803dd6e..edc0998 100644
--- a/drivers/power/supply/qcom/smb135x-charger.c
+++ b/drivers/power/supply/qcom/smb135x-charger.c
@@ -2218,7 +2218,7 @@
 	return  (reg & OTG_EN) ? 1 : 0;
 }
 
-struct regulator_ops smb135x_chg_otg_reg_ops = {
+static struct regulator_ops smb135x_chg_otg_reg_ops = {
 	.enable		= smb135x_chg_otg_regulator_enable,
 	.disable	= smb135x_chg_otg_regulator_disable,
 	.is_enabled	= smb135x_chg_otg_regulator_is_enable,
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index 86f08d2..e7267de 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -317,6 +317,7 @@
 	struct pwm_period_config	period;
 	int				supported_sizes;
 	int				force_pwm_size;
+	bool				update_period;
 };
 
 /* Public facing structure */
@@ -327,6 +328,7 @@
 	bool			enabled;
 	struct _qpnp_pwm_config	pwm_config;
 	struct	qpnp_lpg_config	lpg_config;
+	enum pm_pwm_mode	pwm_mode;
 	spinlock_t		lpg_lock;
 	enum qpnp_lpg_revision	revision;
 	u8			sub_type;
@@ -1211,28 +1213,32 @@
 	rc = qpnp_lpg_save_pwm_value(chip);
 	if (rc)
 		goto out;
-	rc = qpnp_lpg_configure_pwm(chip);
-	if (rc)
-		goto out;
-	rc = qpnp_configure_pwm_control(chip);
-	if (rc)
-		goto out;
 
-	if (!rc && chip->enabled) {
-		rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
-		if (rc) {
-			pr_err("Error in configuring pwm state, rc=%d\n", rc);
-			return rc;
-		}
+	if (pwm_config->update_period) {
+		rc = qpnp_lpg_configure_pwm(chip);
+		if (rc)
+			goto out;
+		rc = qpnp_configure_pwm_control(chip);
+		if (rc)
+			goto out;
+		if (!rc && chip->enabled) {
+			rc = qpnp_lpg_configure_pwm_state(chip,
+					QPNP_PWM_ENABLE);
+			if (rc) {
+				pr_err("Error in configuring pwm state, rc=%d\n",
+						rc);
+				return rc;
+			}
 
-		/* Enable the glitch removal after PWM is enabled */
-		rc = qpnp_lpg_glitch_removal(chip, true);
-		if (rc) {
-			pr_err("Error in enabling glitch control, rc=%d\n", rc);
-			return rc;
+			/* Enable the glitch removal after PWM is enabled */
+			rc = qpnp_lpg_glitch_removal(chip, true);
+			if (rc) {
+				pr_err("Error in enabling glitch control, rc=%d\n",
+						rc);
+				return rc;
+			}
 		}
 	}
-
 	pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n",
 		 (unsigned int)duty_value, (unsigned int)period_value,
 		 (tm_lvl == LVL_USEC) ? "usec" : "nsec",
@@ -1309,12 +1315,10 @@
 	return rc;
 }
 
+/* lpg_lock should be held while calling _pwm_enable() */
 static int _pwm_enable(struct qpnp_pwm_chip *chip)
 {
 	int rc = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&chip->lpg_lock, flags);
 
 	if (QPNP_IS_PWM_CONFIG_SELECTED(
 		chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
@@ -1327,8 +1331,21 @@
 	if (!rc)
 		chip->enabled = true;
 
-	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+	return rc;
+}
 
+/* lpg_lock should be held while calling _pwm_change_mode() */
+static int _pwm_change_mode(struct qpnp_pwm_chip *chip, enum pm_pwm_mode mode)
+{
+	int rc;
+
+	if (mode == PM_PWM_MODE_LPG)
+		rc = qpnp_configure_lpg_control(chip);
+	else
+		rc = qpnp_configure_pwm_control(chip);
+
+	if (rc)
+		pr_err("Failed to change the mode\n");
 	return rc;
 }
 
@@ -1375,12 +1392,14 @@
 
 	spin_lock_irqsave(&chip->lpg_lock, flags);
 
+	chip->pwm_config.update_period = false;
 	if (prev_period_us > INT_MAX / NSEC_PER_USEC ||
 			prev_period_us * NSEC_PER_USEC != period_ns) {
 		qpnp_lpg_calc_period(LVL_NSEC, period_ns, chip);
 		qpnp_lpg_save_period(chip);
 		pwm->state.period = period_ns;
 		chip->pwm_config.pwm_period = period_ns / NSEC_PER_USEC;
+		chip->pwm_config.update_period = true;
 	}
 
 	rc = _pwm_config(chip, LVL_NSEC, duty_ns, period_ns);
@@ -1403,11 +1422,15 @@
 {
 	int rc;
 	struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+	unsigned long flags;
 
+	spin_lock_irqsave(&chip->lpg_lock, flags);
 	rc = _pwm_enable(chip);
 	if (rc)
 		pr_err("Failed to enable PWM channel: %d\n", chip->channel_id);
 
+	spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
 	return rc;
 }
 
@@ -1445,20 +1468,6 @@
 					chip->channel_id);
 }
 
-static int _pwm_change_mode(struct qpnp_pwm_chip *chip, enum pm_pwm_mode mode)
-{
-	int rc;
-
-	if (mode)
-		rc = qpnp_configure_lpg_control(chip);
-	else
-		rc = qpnp_configure_pwm_control(chip);
-
-	if (rc)
-		pr_err("Failed to change the mode\n");
-	return rc;
-}
-
 /**
  * pwm_change_mode - Change the PWM mode configuration
  * @pwm: the PWM device
@@ -1466,7 +1475,7 @@
  */
 int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
 {
-	int rc;
+	int rc = 0;
 	unsigned long flags;
 	struct qpnp_pwm_chip *chip;
 
@@ -1483,7 +1492,22 @@
 	chip = qpnp_pwm_from_pwm_dev(pwm);
 
 	spin_lock_irqsave(&chip->lpg_lock, flags);
-	rc = _pwm_change_mode(chip, mode);
+	if (chip->pwm_mode != mode) {
+		rc = _pwm_change_mode(chip, mode);
+		if (rc) {
+			pr_err("Failed to change mode: %d, rc=%d\n", mode, rc);
+			goto unlock;
+		}
+		chip->pwm_mode = mode;
+		if (chip->enabled) {
+			rc = _pwm_enable(chip);
+			if (rc) {
+				pr_err("Failed to enable PWM, rc=%d\n", rc);
+				goto unlock;
+			}
+		}
+	}
+unlock:
 	spin_unlock_irqrestore(&chip->lpg_lock, flags);
 
 	return rc;
@@ -1619,6 +1643,7 @@
 
 	spin_lock_irqsave(&chip->lpg_lock, flags);
 
+	chip->pwm_config.update_period = false;
 	if (chip->pwm_config.pwm_period != period_us) {
 		qpnp_lpg_calc_period(LVL_USEC, period_us, chip);
 		qpnp_lpg_save_period(chip);
@@ -1629,6 +1654,7 @@
 		else
 			pwm->state.period
 				= (unsigned int)period_us * NSEC_PER_USEC;
+		chip->pwm_config.update_period = true;
 	}
 
 	rc = _pwm_config(chip, LVL_USEC, duty_us, period_us);
@@ -1735,6 +1761,7 @@
 	qpnp_lpg_calc_period(LVL_USEC, period, chip);
 	qpnp_lpg_save_period(chip);
 	chip->pwm_config.pwm_period = period;
+	chip->pwm_config.update_period = true;
 
 	rc = _pwm_config(chip, LVL_USEC, chip->pwm_config.pwm_duty, period);
 
@@ -1885,7 +1912,7 @@
 static int qpnp_parse_dt_config(struct platform_device *pdev,
 					struct qpnp_pwm_chip *chip)
 {
-	int			rc, enable, lut_entry_size, list_size, i;
+	int			rc, mode, lut_entry_size, list_size, i;
 	const char		*label;
 	const __be32		*prop;
 	u32			size;
@@ -2069,18 +2096,20 @@
 		}
 	}
 
-	rc = of_property_read_u32(of_node, "qcom,mode-select", &enable);
+	rc = of_property_read_u32(of_node, "qcom,mode-select", &mode);
 	if (rc)
 		goto read_opt_props;
 
-	if ((enable == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
-		(enable == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
+	if (mode > PM_PWM_MODE_LPG ||
+		(mode == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
+		(mode == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
 		dev_err(&pdev->dev, "%s: Invalid mode select\n", __func__);
 		rc = -EINVAL;
 		goto out;
 	}
 
-	_pwm_change_mode(chip, enable);
+	chip->pwm_mode = mode;
+	_pwm_change_mode(chip, mode);
 	_pwm_enable(chip);
 
 read_opt_props:
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100..00602ab 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@
 {
 	static const char * const strings[] = RNC_STATES;
 
+	if (state >= ARRAY_SIZE(strings))
+		return "UNKNOWN";
+
 	return strings[state];
 }
 #undef C
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ad33238..8c4641b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@
 	struct qla_hw_data *ha = vha->hw;
 	ssize_t rval = 0;
 
-	if (ha->optrom_state != QLA_SREADING)
-		return 0;
-
 	mutex_lock(&ha->optrom_mutex);
+
+	if (ha->optrom_state != QLA_SREADING)
+		goto out;
+
 	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
 	    ha->optrom_region_size);
+
+out:
 	mutex_unlock(&ha->optrom_mutex);
 
 	return rval;
@@ -263,14 +266,19 @@
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (ha->optrom_state != QLA_SWRITING)
+	mutex_lock(&ha->optrom_mutex);
+
+	if (ha->optrom_state != QLA_SWRITING) {
+		mutex_unlock(&ha->optrom_mutex);
 		return -EINVAL;
-	if (off > ha->optrom_region_size)
+	}
+	if (off > ha->optrom_region_size) {
+		mutex_unlock(&ha->optrom_mutex);
 		return -ERANGE;
+	}
 	if (off + count > ha->optrom_region_size)
 		count = ha->optrom_region_size - off;
 
-	mutex_lock(&ha->optrom_mutex);
 	memcpy(&ha->optrom_buffer[off], buf, count);
 	mutex_unlock(&ha->optrom_mutex);
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 44c466b..4bd6fd4 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -142,6 +142,7 @@
 	struct sg_device *parentdp;	/* owning device */
 	wait_queue_head_t read_wait;	/* queue read until command done */
 	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
+	struct mutex f_mutex;	/* protect against changes in this fd */
 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
 	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
@@ -155,6 +156,7 @@
 	unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
 	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
 	char mmap_called;	/* 0 -> mmap() never called on this fd */
+	char res_in_use;	/* 1 -> 'reserve' array in use */
 	struct kref f_ref;
 	struct execute_work ew;
 } Sg_fd;
@@ -198,7 +200,6 @@
 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
 static Sg_request *sg_add_request(Sg_fd * sfp);
 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
 static Sg_device *sg_get_dev(int dev);
 static void sg_device_destroy(struct kref *kref);
 
@@ -499,7 +500,7 @@
 		old_hdr->result = EIO;
 		break;
 	case DID_ERROR:
-		old_hdr->result = (srp->sense_b[0] == 0 && 
+		old_hdr->result = (srp->sense_b[0] == 0 &&
 				  hp->masked_status == GOOD) ? 0 : EIO;
 		break;
 	default:
@@ -614,6 +615,7 @@
 	}
 	buf += SZ_SG_HEADER;
 	__get_user(opcode, buf);
+	mutex_lock(&sfp->f_mutex);
 	if (sfp->next_cmd_len > 0) {
 		cmd_size = sfp->next_cmd_len;
 		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
@@ -622,6 +624,7 @@
 		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
 			cmd_size = 12;
 	}
+	mutex_unlock(&sfp->f_mutex);
 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
 		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
 /* Determine buffer size.  */
@@ -721,7 +724,7 @@
 			sg_remove_request(sfp, srp);
 			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
 		}
-		if (sg_res_in_use(sfp)) {
+		if (sfp->res_in_use) {
 			sg_remove_request(sfp, srp);
 			return -EBUSY;	/* reserve buffer already being used */
 		}
@@ -856,8 +859,10 @@
 			return -ENXIO;
 		if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
 			return -EFAULT;
+		mutex_lock(&sfp->parentdp->open_rel_lock);
 		result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
 				 1, read_only, 1, &srp);
+		mutex_unlock(&sfp->parentdp->open_rel_lock);
 		if (result < 0)
 			return result;
 		result = wait_event_interruptible(sfp->read_wait,
@@ -896,10 +901,12 @@
 			return result;
 		if (val) {
 			sfp->low_dma = 1;
-			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+			if ((0 == sfp->low_dma) && !sfp->res_in_use) {
 				val = (int) sfp->reserve.bufflen;
+				mutex_lock(&sfp->parentdp->open_rel_lock);
 				sg_remove_scat(sfp, &sfp->reserve);
 				sg_build_reserve(sfp, val);
+				mutex_unlock(&sfp->parentdp->open_rel_lock);
 			}
 		} else {
 			if (atomic_read(&sdp->detaching))
@@ -967,16 +974,23 @@
 		result = get_user(val, ip);
 		if (result)
 			return result;
-                if (val < 0)
-                        return -EINVAL;
+		if (val < 0)
+			return -EINVAL;
 		val = min_t(int, val,
 			    max_sectors_bytes(sdp->device->request_queue));
+		mutex_lock(&sfp->f_mutex);
 		if (val != sfp->reserve.bufflen) {
-			if (sg_res_in_use(sfp) || sfp->mmap_called)
+			if (sfp->mmap_called ||
+			    sfp->res_in_use) {
+				mutex_unlock(&sfp->f_mutex);
 				return -EBUSY;
+			}
+			mutex_lock(&sfp->parentdp->open_rel_lock);
 			sg_remove_scat(sfp, &sfp->reserve);
 			sg_build_reserve(sfp, val);
+			mutex_unlock(&sfp->parentdp->open_rel_lock);
 		}
+		mutex_unlock(&sfp->f_mutex);
 		return 0;
 	case SG_GET_RESERVED_SIZE:
 		val = min_t(int, sfp->reserve.bufflen,
@@ -1030,8 +1044,8 @@
 				if (srp) {
 					rinfo[val].req_state = srp->done + 1;
 					rinfo[val].problem =
-					    srp->header.masked_status & 
-					    srp->header.host_status & 
+					    srp->header.masked_status &
+					    srp->header.host_status &
 					    srp->header.driver_status;
 					if (srp->done)
 						rinfo[val].duration =
@@ -1052,7 +1066,7 @@
 				}
 			}
 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-			result = __copy_to_user(p, rinfo, 
+			result = __copy_to_user(p, rinfo,
 						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
 			result = result ? -EFAULT : 0;
 			kfree(rinfo);
@@ -1128,14 +1142,14 @@
 		return -ENXIO;
 
 	sdev = sdp->device;
-	if (sdev->host->hostt->compat_ioctl) { 
+	if (sdev->host->hostt->compat_ioctl) {
 		int ret;
 
 		ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
 
 		return ret;
 	}
-	
+
 	return -ENOIOCTLCMD;
 }
 #endif
@@ -1239,6 +1253,7 @@
 	unsigned long req_sz, len, sa;
 	Sg_scatter_hold *rsv_schp;
 	int k, length;
+	int ret = 0;
 
 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
 		return -ENXIO;
@@ -1249,8 +1264,11 @@
 	if (vma->vm_pgoff)
 		return -EINVAL;	/* want no offset */
 	rsv_schp = &sfp->reserve;
-	if (req_sz > rsv_schp->bufflen)
-		return -ENOMEM;	/* cannot map more than reserved buffer */
+	mutex_lock(&sfp->f_mutex);
+	if (req_sz > rsv_schp->bufflen) {
+		ret = -ENOMEM;	/* cannot map more than reserved buffer */
+		goto out;
+	}
 
 	sa = vma->vm_start;
 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1264,7 +1282,9 @@
 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = sfp;
 	vma->vm_ops = &sg_mmap_vm_ops;
-	return 0;
+out:
+	mutex_unlock(&sfp->f_mutex);
+	return ret;
 }
 
 static void
@@ -1619,7 +1639,7 @@
 	else
 		def_reserved_size = sg_big_buff;
 
-	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), 
+	rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
 				    SG_MAX_DEVS, "sg");
 	if (rc)
 		return rc;
@@ -1728,13 +1748,25 @@
 		md = &map_data;
 
 	if (md) {
-		if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+		mutex_lock(&sfp->f_mutex);
+		if (dxfer_len <= rsv_schp->bufflen &&
+		    !sfp->res_in_use) {
+			sfp->res_in_use = 1;
 			sg_link_reserve(sfp, srp, dxfer_len);
-		else {
+		} else if (hp->flags & SG_FLAG_MMAP_IO) {
+			res = -EBUSY; /* sfp->res_in_use == 1 */
+			if (dxfer_len > rsv_schp->bufflen)
+				res = -ENOMEM;
+			mutex_unlock(&sfp->f_mutex);
+			return res;
+		} else {
 			res = sg_build_indirect(req_schp, sfp, dxfer_len);
-			if (res)
+			if (res) {
+				mutex_unlock(&sfp->f_mutex);
 				return res;
+			}
 		}
+		mutex_unlock(&sfp->f_mutex);
 
 		md->pages = req_schp->pages;
 		md->page_order = req_schp->page_order;
@@ -2025,6 +2057,8 @@
 	req_schp->sglist_len = 0;
 	sfp->save_scat_len = 0;
 	srp->res_used = 0;
+	/* Called without mutex lock to avoid deadlock */
+	sfp->res_in_use = 0;
 }
 
 static Sg_request *
@@ -2136,6 +2170,7 @@
 	rwlock_init(&sfp->rq_list_lock);
 
 	kref_init(&sfp->f_ref);
+	mutex_init(&sfp->f_mutex);
 	sfp->timeout = SG_DEFAULT_TIMEOUT;
 	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
 	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2211,20 +2246,6 @@
 	schedule_work(&sfp->ew.work);
 }
 
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
-	const Sg_request *srp;
-	unsigned long iflags;
-
-	read_lock_irqsave(&sfp->rq_list_lock, iflags);
-	for (srp = sfp->headrp; srp; srp = srp->nextrp)
-		if (srp->res_used)
-			break;
-	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
-	return srp ? 1 : 0;
-}
-
 #ifdef CONFIG_SCSI_PROC_FS
 static int
 sg_idr_max_id(int id, void *p, void *data)
@@ -2299,7 +2320,7 @@
 };
 
 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
-static ssize_t sg_proc_write_dressz(struct file *filp, 
+static ssize_t sg_proc_write_dressz(struct file *filp,
 		const char __user *buffer, size_t count, loff_t *off);
 static const struct file_operations dressz_fops = {
 	.owner = THIS_MODULE,
@@ -2439,7 +2460,7 @@
 	return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
 }
 
-static ssize_t 
+static ssize_t
 sg_proc_write_adio(struct file *filp, const char __user *buffer,
 		   size_t count, loff_t *off)
 {
@@ -2460,7 +2481,7 @@
 	return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
 }
 
-static ssize_t 
+static ssize_t
 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
 		     size_t count, loff_t *off)
 {
@@ -2620,7 +2641,7 @@
 			hp = &srp->header;
 			new_interface = (hp->interface_id == '\0') ? 0 : 1;
 			if (srp->res_used) {
-				if (new_interface && 
+				if (new_interface &&
 				    (SG_FLAG_MMAP_IO & hp->flags))
 					cp = "     mmap>> ";
 				else
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 557ca19..11e11e4 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -950,6 +950,10 @@
 	seq_printf(file, "hba->saved_err = 0x%x\n", hba->saved_err);
 	seq_printf(file, "hba->saved_uic_err = 0x%x\n", hba->saved_uic_err);
 
+	seq_printf(file, "power_mode_change_cnt = %d\n",
+			hba->ufs_stats.power_mode_change_cnt);
+	seq_printf(file, "hibern8_exit_cnt = %d\n",
+			hba->ufs_stats.hibern8_exit_cnt);
 	return 0;
 }
 
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3578b88..b6ba4c4 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4875,6 +4875,7 @@
 
 		memcpy(&hba->pwr_info, pwr_mode,
 			sizeof(struct ufs_pa_layer_attr));
+		hba->ufs_stats.power_mode_change_cnt++;
 	}
 
 	return ret;
@@ -7170,6 +7171,12 @@
 	} while (err && --retries);
 
 	/*
+	 * There is no point proceeding even after failing
+	 * to recover after multiple retries.
+	 */
+	if (err && ufshcd_is_embedded_dev(hba))
+		BUG();
+	/*
 	 * After reset the door-bell might be cleared, complete
 	 * outstanding requests in s/w here.
 	 */
@@ -7634,9 +7641,6 @@
 {
 	int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
 
-	hba->ufs_stats.hibern8_exit_cnt = 0;
-	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
-
 	memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
 	memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
 	memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
@@ -9708,6 +9712,8 @@
 
 static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
 {
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
 	__ufshcd_shutdown_clkscaling(hba);
 	device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
 }
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index fc855db..1b21238 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -644,6 +644,7 @@
 	struct ufshcd_clk_ctx clk_rel;
 	u32 hibern8_exit_cnt;
 	ktime_t last_hibern8_exit_tstamp;
+	u32 power_mode_change_cnt;
 	struct ufs_uic_err_reg_hist pa_err;
 	struct ufs_uic_err_reg_hist dl_err;
 	struct ufs_uic_err_reg_hist nl_err;
@@ -1198,6 +1199,14 @@
 		pwr_info->pwr_tx == FASTAUTO_MODE);
 }
 
+static inline bool ufshcd_is_embedded_dev(struct ufs_hba *hba)
+{
+	if ((hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_BOOTABLE) ||
+	    (hba->dev_info.b_device_sub_class == UFS_DEV_EMBEDDED_NON_BOOTABLE))
+		return true;
+	return false;
+}
+
 #ifdef CONFIG_DEBUG_FS
 static inline void ufshcd_init_req_stats(struct ufs_hba *hba)
 {
diff --git a/drivers/slimbus/slimbus.c b/drivers/slimbus/slimbus.c
index 567f290..3ae9a10 100644
--- a/drivers/slimbus/slimbus.c
+++ b/drivers/slimbus/slimbus.c
@@ -2397,6 +2397,8 @@
 		bool opensl1valid = false;
 		int maxctrlw1, maxctrlw3, i;
 
+		/* intitalize array to zero */
+		memset(opensl1, 0x0, sizeof(opensl1));
 		finalexp = (ctrl->sched.chc3[last3])->rootexp;
 		if (last1 >= 0) {
 			slc1 = ctrl->sched.chc1[coeff1];
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 6eef58f..37b33e6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -54,7 +54,7 @@
 obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor_v01.o
 obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor.o
 obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
-obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o
+obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o
 
 obj-$(CONFIG_MEM_SHARE_QMI_SERVICE)		+= memshare/
 obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index ecf72ca..6ce481b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -397,12 +397,6 @@
 	uint32_t rejuvenate_ack_err;
 };
 
-#define MAX_NO_OF_MAC_ADDR 4
-struct icnss_wlan_mac_addr {
-	u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
-	uint32_t no_of_mac_addr_set;
-};
-
 enum icnss_pdr_cause_index {
 	ICNSS_FW_CRASH,
 	ICNSS_ROOT_PD_CRASH,
@@ -479,8 +473,6 @@
 	uint64_t vph_pwr;
 	atomic_t pm_count;
 	struct ramdump_device *msa0_dump_dev;
-	bool is_wlan_mac_set;
-	struct icnss_wlan_mac_addr wlan_mac_addr;
 	bool bypass_s1_smmu;
 	u8 cause_for_rejuvenation;
 	u8 requesting_sub_system;
@@ -3279,78 +3271,6 @@
 }
 EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
 
-int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len)
-{
-	struct icnss_priv *priv = penv;
-	uint32_t no_of_mac_addr;
-	struct icnss_wlan_mac_addr *addr = NULL;
-	int iter;
-	u8 *temp = NULL;
-
-	if (!priv) {
-		icnss_pr_err("Priv data is NULL\n");
-		return -EINVAL;
-	}
-
-	if (priv->is_wlan_mac_set) {
-		icnss_pr_dbg("WLAN MAC address is already set\n");
-		return 0;
-	}
-
-	if (len == 0 || (len % ETH_ALEN) != 0) {
-		icnss_pr_err("Invalid length %d\n", len);
-		return -EINVAL;
-	}
-
-	no_of_mac_addr = len / ETH_ALEN;
-	if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) {
-		icnss_pr_err("Exceed maxinum supported MAC address %u %u\n",
-			     MAX_NO_OF_MAC_ADDR, no_of_mac_addr);
-		return -EINVAL;
-	}
-
-	priv->is_wlan_mac_set = true;
-	addr = &priv->wlan_mac_addr;
-	addr->no_of_mac_addr_set = no_of_mac_addr;
-	temp = &addr->mac_addr[0][0];
-
-	for (iter = 0; iter < no_of_mac_addr;
-	     ++iter, temp += ETH_ALEN, in += ETH_ALEN) {
-		ether_addr_copy(temp, in);
-		icnss_pr_dbg("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n",
-			     temp[0], temp[1], temp[2],
-			     temp[3], temp[4], temp[5]);
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_set_wlan_mac_address);
-
-u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num)
-{
-	struct icnss_priv *priv = dev_get_drvdata(dev);
-	struct icnss_wlan_mac_addr *addr = NULL;
-
-	if (priv->magic != ICNSS_MAGIC) {
-		icnss_pr_err("Invalid drvdata: dev %p, data %p, magic 0x%x\n",
-			     dev, priv, priv->magic);
-		goto out;
-	}
-
-	if (!priv->is_wlan_mac_set) {
-		icnss_pr_dbg("WLAN MAC address is not set\n");
-		goto out;
-	}
-
-	addr = &priv->wlan_mac_addr;
-	*num = addr->no_of_mac_addr_set;
-	return &addr->mac_addr[0][0];
-out:
-	*num = 0;
-	return NULL;
-}
-EXPORT_SYMBOL(icnss_get_wlan_mac_address);
-
 int icnss_trigger_recovery(struct device *dev)
 {
 	int ret = 0;
diff --git a/drivers/soc/qcom/icnss_utils.c b/drivers/soc/qcom/icnss_utils.c
deleted file mode 100644
index 6974146..0000000
--- a/drivers/soc/qcom/icnss_utils.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <soc/qcom/icnss.h>
-
-#define ICNSS_MAX_CH_NUM 45
-
-static DEFINE_MUTEX(unsafe_channel_list_lock);
-static DEFINE_SPINLOCK(dfs_nol_info_lock);
-static int driver_load_cnt;
-
-static struct icnss_unsafe_channel_list {
-	u16 unsafe_ch_count;
-	u16 unsafe_ch_list[ICNSS_MAX_CH_NUM];
-} unsafe_channel_list;
-
-static struct icnss_dfs_nol_info {
-	void *dfs_nol_info;
-	u16 dfs_nol_info_len;
-} dfs_nol_info;
-
-int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
-{
-	mutex_lock(&unsafe_channel_list_lock);
-	if ((!unsafe_ch_list) || (ch_count > ICNSS_MAX_CH_NUM)) {
-		mutex_unlock(&unsafe_channel_list_lock);
-		return -EINVAL;
-	}
-
-	unsafe_channel_list.unsafe_ch_count = ch_count;
-
-	if (ch_count != 0) {
-		memcpy(
-		       (char *)unsafe_channel_list.unsafe_ch_list,
-		       (char *)unsafe_ch_list, ch_count * sizeof(u16));
-	}
-	mutex_unlock(&unsafe_channel_list_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_set_wlan_unsafe_channel);
-
-int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
-				  u16 *ch_count, u16 buf_len)
-{
-	mutex_lock(&unsafe_channel_list_lock);
-	if (!unsafe_ch_list || !ch_count) {
-		mutex_unlock(&unsafe_channel_list_lock);
-		return -EINVAL;
-	}
-
-	if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
-		mutex_unlock(&unsafe_channel_list_lock);
-		return -ENOMEM;
-	}
-
-	*ch_count = unsafe_channel_list.unsafe_ch_count;
-	memcpy(
-		(char *)unsafe_ch_list,
-		(char *)unsafe_channel_list.unsafe_ch_list,
-		unsafe_channel_list.unsafe_ch_count * sizeof(u16));
-	mutex_unlock(&unsafe_channel_list_lock);
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_get_wlan_unsafe_channel);
-
-int icnss_wlan_set_dfs_nol(const void *info, u16 info_len)
-{
-	void *temp;
-	void *old_nol_info;
-	struct icnss_dfs_nol_info *dfs_info;
-
-	if (!info || !info_len)
-		return -EINVAL;
-
-	temp = kmalloc(info_len, GFP_ATOMIC);
-	if (!temp)
-		return -ENOMEM;
-
-	memcpy(temp, info, info_len);
-	spin_lock_bh(&dfs_nol_info_lock);
-	dfs_info = &dfs_nol_info;
-	old_nol_info = dfs_info->dfs_nol_info;
-	dfs_info->dfs_nol_info = temp;
-	dfs_info->dfs_nol_info_len = info_len;
-	spin_unlock_bh(&dfs_nol_info_lock);
-	kfree(old_nol_info);
-
-	return 0;
-}
-EXPORT_SYMBOL(icnss_wlan_set_dfs_nol);
-
-int icnss_wlan_get_dfs_nol(void *info, u16 info_len)
-{
-	int len;
-	struct icnss_dfs_nol_info *dfs_info;
-
-	if (!info || !info_len)
-		return -EINVAL;
-
-	spin_lock_bh(&dfs_nol_info_lock);
-
-	dfs_info = &dfs_nol_info;
-	if (dfs_info->dfs_nol_info == NULL ||
-	    dfs_info->dfs_nol_info_len == 0) {
-		spin_unlock_bh(&dfs_nol_info_lock);
-		return -ENOENT;
-	}
-
-	len = min(info_len, dfs_info->dfs_nol_info_len);
-	memcpy(info, dfs_info->dfs_nol_info, len);
-	spin_unlock_bh(&dfs_nol_info_lock);
-
-	return len;
-}
-EXPORT_SYMBOL(icnss_wlan_get_dfs_nol);
-
-void icnss_increment_driver_load_cnt(void)
-{
-	++driver_load_cnt;
-}
-EXPORT_SYMBOL(icnss_increment_driver_load_cnt);
-
-int icnss_get_driver_load_cnt(void)
-{
-	return driver_load_cnt;
-}
-EXPORT_SYMBOL(icnss_get_driver_load_cnt);
diff --git a/drivers/soc/qcom/llcc-core.c b/drivers/soc/qcom/llcc-core.c
index 3d6b002..a5e5b56 100644
--- a/drivers/soc/qcom/llcc-core.c
+++ b/drivers/soc/qcom/llcc-core.c
@@ -35,6 +35,15 @@
 #define DRP0_INTERRUPT_ENABLE	BIT(6)
 #define SB_DB_DRP_INTERRUPT_ENABLE	0x3
 
+#ifdef CONFIG_EDAC_QCOM_LLCC
+#define ENABLE_ECC_INTR 1
+#else
+#define ENABLE_ECC_INTR 0
+#endif
+
+static int enable_ecc_intr = ENABLE_ECC_INTR;
+module_param(enable_ecc_intr, int, 0444);
+
 static void qcom_llcc_core_setup(struct regmap *llcc_regmap, uint32_t b_off)
 {
 	u32 sb_err_threshold;
@@ -81,7 +90,8 @@
 		return -EINVAL;
 	}
 
-	qcom_llcc_core_setup(llcc_regmap, b_off);
+	if (enable_ecc_intr)
+		qcom_llcc_core_setup(llcc_regmap, b_off);
 
 	return 0;
 }
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
index 3863b09..5234580 100644
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -73,7 +73,7 @@
 	SCT_ENTRY("display",     16, 16, 2816, 1, 0, 0xFFC, 0x2, 0, 0, 1, 1, 0),
 	SCT_ENTRY("videofw",     17, 17, 2816, 1, 0, 0xFFC, 0x2, 0, 0, 1, 1, 0),
 	SCT_ENTRY("modemhp_fix", 20, 20, 1024, 2, 1, 0x0,  0xF00, 0, 0, 1, 1, 0),
-	SCT_ENTRY("modem_paging", 21, 21, 1024, 0, 1, 0x0,  0xF, 0, 0, 1, 1, 0),
+	SCT_ENTRY("modem_paging", 21, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
 	SCT_ENTRY("audiohw",     22, 22, 1024, 1, 1, 0xFFC, 0x2, 0, 0, 1, 1, 0),
 };
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
index c1e8feb..dafae4c 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -242,7 +242,7 @@
 	(M_BKE_GC_GC_BMSK >> \
 	(M_BKE_GC_GC_SHFT + 1))
 
-static int bimc_div(int64_t *a, uint32_t b)
+static int bimc_div(uint64_t *a, uint32_t b)
 {
 	if ((*a > 0) && (*a < b)) {
 		*a = 0;
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index 707d9e7..5a110bb 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -566,6 +566,11 @@
 		bcm_clist_add(node);
 	}
 
+	if (!cur_rsc) {
+		MSM_BUS_ERR("%s: Error for cur_rsc is NULL.\n", __func__);
+		return ret;
+	}
+
 	cur_mbox = cur_rsc->rscdev->mbox;
 	cur_bcm_clist = cur_rsc->rscdev->bcm_clist;
 
@@ -1004,6 +1009,7 @@
 
 				bus_node_info->fabdev->noc_ops.qos_init(
 					node_dev,
+					bus_node_info,
 					bus_node_info->fabdev->qos_base,
 					bus_node_info->fabdev->base_offset,
 					bus_node_info->fabdev->qos_off,
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
index 996c719..bdcdf29 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
@@ -15,10 +15,14 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/spinlock.h>
 #include "msm_bus_core.h"
 #include "msm_bus_noc.h"
 #include "msm_bus_rpmh.h"
 
+static DEFINE_SPINLOCK(noc_lock);
+
 /* NOC_QOS generic */
 #define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
 #define SAT_SCALE 16	/* 16 bytes minimum for saturation */
@@ -29,6 +33,7 @@
 #define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
 #define MIN_SAT_FIELD	1
 #define MIN_BW_FIELD	1
+#define MSM_BUS_FAB_MEM_NOC 6152
 
 #define NOC_QOS_REG_BASE(b, o)		((b) + (o))
 
@@ -102,6 +107,8 @@
 	NOC_QOS_SATn_SAT_SHFT		= 0x0,
 };
 
+static void __iomem *memnoc_qos_base;
+
 static int noc_div(uint64_t *a, uint32_t b)
 {
 	if ((*a > 0) && (*a < b)) {
@@ -169,38 +176,10 @@
 	wmb();
 }
 
-static void noc_set_qos_limiter(void __iomem *base, uint32_t qos_off,
-		uint32_t mport, uint32_t qos_delta,
-		struct msm_bus_noc_limiter *lim, uint32_t lim_en)
+static void noc_enable_qos_limiter(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint32_t lim_en)
 {
 	uint32_t reg_val, val;
-
-	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
-		qos_delta));
-
-	writel_relaxed((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))),
-		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
-
-	/* Ensure we disable limiter before config*/
-	wmb();
-
-	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
-		qos_delta));
-	val = lim->bw << NOC_QOS_LIMITBW_BWn_SHFT;
-	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_BWn_BMSK))) |
-		(val & NOC_QOS_LIMITBW_BWn_BMSK)),
-		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
-
-	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
-		qos_delta));
-	val = lim->sat << NOC_QOS_LIMITBW_SATn_SHFT;
-	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_SATn_BMSK))) |
-		(val & NOC_QOS_LIMITBW_SATn_BMSK)),
-		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
-
-	/* Ensure qos limiter settings in place before possibly enabling */
-	wmb();
-
 	reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport,
 		qos_delta));
 	val = lim_en << NOC_QOS_MCTL_LIMIT_ENn_SHFT;
@@ -208,9 +187,50 @@
 		(val & NOC_QOS_MCTL_LIMIT_ENn_BMSK)),
 		NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta));
 
+	/* Ensure we disable/enable limiter before exiting*/
 	wmb();
 }
 
+static void noc_set_qos_limit_bw(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint32_t bw)
+{
+	uint32_t reg_val, val;
+	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = bw << NOC_QOS_LIMITBW_BWn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_BWn_BMSK))) |
+		(val & NOC_QOS_LIMITBW_BWn_BMSK)),
+		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure we set limiter bw before exiting*/
+	wmb();
+}
+
+static void noc_set_qos_limit_sat(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint32_t sat)
+{
+	uint32_t reg_val, val;
+	reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport,
+		qos_delta));
+	val = sat << NOC_QOS_LIMITBW_SATn_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_SATn_BMSK))) |
+		(val & NOC_QOS_LIMITBW_SATn_BMSK)),
+		NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta));
+
+	/* Ensure we set limiter sat before exiting*/
+	wmb();
+}
+
+static void noc_set_qos_limiter(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_limiter *lim, uint32_t lim_en)
+{
+	noc_enable_qos_limiter(base, qos_off, mport, qos_delta, 0);
+	noc_set_qos_limit_bw(base, qos_off, mport, qos_delta, lim->bw);
+	noc_set_qos_limit_sat(base, qos_off, mport, qos_delta, lim->sat);
+	noc_enable_qos_limiter(base, qos_off, mport, qos_delta, lim_en);
+}
+
 static void noc_set_qos_regulator(void __iomem *base, uint32_t qos_off,
 		uint32_t mport, uint32_t qos_delta,
 		struct msm_bus_noc_regulator *reg,
@@ -317,6 +337,7 @@
 }
 
 static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				struct msm_bus_node_device_type *fabdev,
 				void __iomem *qos_base,
 				uint32_t qos_off, uint32_t qos_delta,
 				uint32_t qos_freq)
@@ -324,6 +345,7 @@
 	struct msm_bus_noc_qos_params *qos_params;
 	int ret = 0;
 	int i;
+	unsigned long flags;
 
 	qos_params = &info->node_info->qos_params;
 
@@ -333,6 +355,11 @@
 		goto err_qos_init;
 	}
 
+	spin_lock_irqsave(&noc_lock, flags);
+
+	if (fabdev->node_info->id == MSM_BUS_FAB_MEM_NOC)
+		memnoc_qos_base = qos_base;
+
 	for (i = 0; i < info->node_info->num_qports; i++) {
 		noc_set_qos_dflt_prio(qos_base, qos_off,
 					info->node_info->qport[i],
@@ -356,10 +383,87 @@
 					qos_delta,
 					qos_params->urg_fwd_en);
 	}
+	spin_unlock_irqrestore(&noc_lock, flags);
+
 err_qos_init:
 	return ret;
 }
 
+int msm_bus_noc_throttle_wa(bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&noc_lock, flags);
+
+	if (!memnoc_qos_base) {
+		MSM_BUS_ERR("Memnoc QoS Base address not found!");
+		goto noc_throttle_exit;
+	}
+
+	if (enable) {
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 2,
+								0x1000, 0x1B);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 3,
+								0x1000, 0x1B);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 10,
+								0x1000, 0x30);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 11,
+								0x1000, 0x30);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 10,
+								0x1000, 1);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 11,
+								0x1000, 1);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 2,
+								0x1000, 1);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 3,
+								0x1000, 1);
+	} else {
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 2,
+								0x1000, 0);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 3,
+								0x1000, 0);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 10,
+								0x1000, 0);
+		noc_enable_qos_limiter(memnoc_qos_base, 0x10000, 11,
+								0x1000, 0);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 2,
+								0x1000, 0);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 3,
+								0x1000, 0);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 10,
+								0x1000, 0);
+		noc_set_qos_limit_bw(memnoc_qos_base, 0x10000, 11,
+								0x1000, 0);
+	}
+
+noc_throttle_exit:
+	spin_unlock_irqrestore(&noc_lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_throttle_wa);
+
+int msm_bus_noc_priority_wa(bool enable)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&noc_lock, flags);
+	if (!memnoc_qos_base) {
+		MSM_BUS_ERR("Memnoc QoS Base address not found!");
+		goto noc_priority_exit;
+	}
+
+	if (enable)
+		noc_set_qos_dflt_prio(memnoc_qos_base, 0x10000, 0,
+								0x1000, 7);
+	else
+		noc_set_qos_dflt_prio(memnoc_qos_base, 0x10000, 0,
+								0x1000, 6);
+noc_priority_exit:
+	spin_unlock_irqrestore(&noc_lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_priority_wa);
+
 int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
 {
 	if (!bus_dev)
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index a9733f1..8929959 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -42,6 +42,7 @@
 /* New types introduced for adhoc topology */
 struct msm_bus_noc_ops {
 	int (*qos_init)(struct msm_bus_node_device_type *dev,
+			struct msm_bus_node_device_type *fabdev,
 			void __iomem *qos_base, uint32_t qos_off,
 			uint32_t qos_delta, uint32_t qos_freq);
 	int (*set_bw)(struct msm_bus_node_device_type *dev,
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
index 03042fa..4cff9f2 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -410,8 +410,10 @@
 {
 	struct rule_node_info *node_it = NULL;
 
+	mutex_lock(&msm_bus_rules_lock);
 	list_for_each_entry(node_it, &node_list, link)
 		print_rules(node_it);
+	mutex_unlock(&msm_bus_rules_lock);
 }
 
 void print_rules_buf(char *buf, int max_buf)
@@ -421,6 +423,7 @@
 	int i;
 	int cnt = 0;
 
+	mutex_lock(&msm_bus_rules_lock);
 	list_for_each_entry(node_it, &node_list, link) {
 		cnt += scnprintf(buf + cnt, max_buf - cnt,
 			"\n Now printing rules for Node %d cur_rule %d\n",
@@ -452,6 +455,7 @@
 					node_rule->rule_ops.mode);
 		}
 	}
+	mutex_unlock(&msm_bus_rules_lock);
 }
 
 static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
@@ -716,11 +720,12 @@
 {
 	bool ret = false;
 
+	mutex_lock(&msm_bus_rules_lock);
 	if (list_empty(&node_list))
 		ret = false;
 	else
 		ret = true;
-
+	mutex_unlock(&msm_bus_rules_lock);
 	return ret;
 }
 
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index a443239..9d22925 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -56,6 +56,7 @@
 
 #define PIL_NUM_DESC		10
 #define NUM_OF_ENCRYPTED_KEY	3
+#define MAX_LEN 96
 static void __iomem *pil_info_base;
 static void __iomem *pil_minidump_base;
 
@@ -836,6 +837,23 @@
 	return 0;
 }
 
+static int pil_notify_aop(struct pil_desc *desc, char *status)
+{
+	struct qmp_pkt pkt;
+	char mbox_msg[MAX_LEN];
+
+	if (!desc->signal_aop)
+		return 0;
+
+	snprintf(mbox_msg, MAX_LEN,
+		"{class: image, res: load_state, name: %s, val: %s}",
+		desc->name, status);
+	pkt.size = MAX_LEN;
+	pkt.data = mbox_msg;
+
+	return mbox_send_message(desc->mbox, &pkt);
+}
+
 /* Synchronize request_firmware() with suspend */
 static DECLARE_RWSEM(pil_pm_rwsem);
 
@@ -857,6 +875,12 @@
 	bool mem_protect = false;
 	bool hyp_assign = false;
 
+	ret = pil_notify_aop(desc, "on");
+	if (ret < 0) {
+		pil_err(desc, "Failed to send ON message to AOP rc:%d\n", ret);
+		return ret;
+	}
+
 	if (desc->shutdown_fail)
 		pil_err(desc, "Subsystem shutdown failed previously!\n");
 
@@ -1015,6 +1039,7 @@
 			priv->region = NULL;
 		}
 		pil_release_mmap(desc);
+		pil_notify_aop(desc, "off");
 	}
 	return ret;
 }
@@ -1026,6 +1051,7 @@
  */
 void pil_shutdown(struct pil_desc *desc)
 {
+	int ret;
 	struct pil_priv *priv = desc->priv;
 
 	if (desc->ops->shutdown) {
@@ -1043,6 +1069,9 @@
 		pil_proxy_unvote(desc, 1);
 	else
 		flush_delayed_work(&priv->proxy);
+	ret = pil_notify_aop(desc, "off");
+	if (ret < 0)
+		pr_warn("pil: failed to send OFF message to AOP rc:%d\n", ret);
 	desc->modem_ssr = true;
 }
 EXPORT_SYMBOL(pil_shutdown);
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index daa4533..f09adf5 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -12,6 +12,9 @@
 #ifndef __MSM_PERIPHERAL_LOADER_H
 #define __MSM_PERIPHERAL_LOADER_H
 
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/qmp.h>
+
 struct device;
 struct module;
 struct pil_priv;
@@ -57,6 +60,9 @@
 	bool modem_ssr;
 	bool clear_fw_region;
 	u32 subsys_vmid;
+	bool signal_aop;
+	struct mbox_client cl;
+	struct mbox_chan *mbox;
 };
 
 /**
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 926016f..ec3063e 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -615,7 +615,7 @@
 	char *fw_name_p;
 	void *mba_dp_virt;
 	dma_addr_t mba_dp_phys, mba_dp_phys_end;
-	int ret, count;
+	int ret;
 	const u8 *data;
 	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
 
@@ -681,10 +681,9 @@
 					&mba_dp_phys, &mba_dp_phys_end);
 
 	/* Load the MBA image into memory */
-	count = fw->size;
-	if (count <= SZ_1M) {
+	if (fw->size <= SZ_1M) {
 		/* Ensures memcpy is done for max 1MB fw size */
-		memcpy(mba_dp_virt, data, count);
+		memcpy(mba_dp_virt, data, fw->size);
 	} else {
 		dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
 			__func__);
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index e9d3534..cf5f7e4 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -217,6 +217,22 @@
 	drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
 
 	drv->q6->desc.modem_ssr = false;
+	drv->q6->desc.signal_aop = of_property_read_bool(pdev->dev.of_node,
+						"qcom,signal-aop");
+	if (drv->q6->desc.signal_aop) {
+		drv->q6->desc.cl.dev = &pdev->dev;
+		drv->q6->desc.cl.tx_block = true;
+		drv->q6->desc.cl.tx_tout = 1000;
+		drv->q6->desc.cl.knows_txdone = false;
+		drv->q6->desc.mbox = mbox_request_channel(&drv->q6->desc.cl, 0);
+		if (IS_ERR(drv->q6->desc.mbox)) {
+			ret = PTR_ERR(drv->q6->desc.mbox);
+			dev_err(&pdev->dev, "Failed to get mailbox channel %pK %d\n",
+				drv->q6->desc.mbox, ret);
+			goto err_subsys;
+		}
+	}
+
 	drv->subsys = subsys_register(&drv->subsys_desc);
 	if (IS_ERR(drv->subsys)) {
 		ret = PTR_ERR(drv->subsys);
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 6a30381..6d6b9f7 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -84,7 +84,7 @@
 /* QDSP6v65 parameters */
 #define QDSP6SS_BOOT_CORE_START		(0x400)
 #define QDSP6SS_BOOT_CMD		(0x404)
-#define QDSP6SS_BOOT_STATUS		(0x408)
+#define MSS_STATUS			(0x40)
 #define QDSP6SS_SLEEP			(0x3C)
 #define SLEEP_CHECK_MAX_LOOPS		(200)
 #define BOOT_FSM_TIMEOUT		(100)
@@ -410,8 +410,8 @@
 	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CMD);
 
 	/* Wait for boot FSM to complete */
-	ret = readl_poll_timeout(drv->reg_base + QDSP6SS_BOOT_STATUS, val,
-			val != 0, 10, BOOT_FSM_TIMEOUT);
+	ret = readl_poll_timeout(drv->rmb_base + MSS_STATUS, val,
+			(val & BIT(1)) != 0, 10, BOOT_FSM_TIMEOUT);
 
 	if (ret) {
 		dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 417b0b2..c35119c 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -572,6 +572,12 @@
 	/* SDM670 ID */
 	[336] = {MSM_CPU_SDM670, "SDM670"},
 
+	/* QCS605 ID */
+	[347] = {MSM_CPU_QCS605, "QCS605"},
+
+	/* SDA670 ID */
+	[337] = {MSM_CPU_SDA670, "SDA670"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -1434,6 +1440,14 @@
 		dummy_socinfo.id = 336;
 		strlcpy(dummy_socinfo.build_id, "sdm670 - ",
 			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_sda670()) {
+		dummy_socinfo.id = 337;
+		strlcpy(dummy_socinfo.build_id, "sda670 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_qcs605()) {
+		dummy_socinfo.id = 347;
+		strlcpy(dummy_socinfo.build_id, "qcs605 - ",
+			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdxpoorwills()) {
 		dummy_socinfo.id = 334;
 		strlcpy(dummy_socinfo.build_id, "sdxpoorwills - ",
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index de5f0ff..68681f9 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -206,10 +206,8 @@
 	 * Only one rx/tx transaction at a time (request + response).
 	 */
 	int ref_count;
-	u32 pid;
 
-	/* link UP/DOWN callback */
-	void (*notify_link_state_cb)(bool up);
+	u32 pid; /* debug only to find user space application */
 
 	/* abort flags */
 	bool rx_abort;
@@ -493,13 +491,10 @@
 
 		ch->glink_state = event;
 
-		/*
-		 * if spcom_notify_state() is called within glink_open()
-		 * then ch->glink_handle is not updated yet.
-		 */
-		if (!ch->glink_handle) {
-			pr_debug("update glink_handle, ch [%s].\n", ch->name);
-			ch->glink_handle = handle;
+		if (!handle) {
+			pr_err("inavlid glink_handle, ch [%s].\n", ch->name);
+			mutex_unlock(&ch->lock);
+			return;
 		}
 
 		/* signal before unlock mutex & before calling glink */
@@ -512,8 +507,7 @@
 		 */
 
 		pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
-		ret = glink_queue_rx_intent(ch->glink_handle,
-					    ch, ch->rx_buf_size);
+		ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
 		if (ret) {
 			pr_err("glink_queue_rx_intent() err [%d]\n", ret);
 		} else {
@@ -736,6 +730,7 @@
 	long timeleft;
 	const char *name;
 	void *handle;
+	u32 pid = current_pid();
 
 	mutex_lock(&ch->lock);
 	name = ch->name;
@@ -749,7 +744,7 @@
 	}
 
 	pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
-		 name, ch->pid, ch->ref_count);
+		 name, pid, ch->ref_count);
 
 	pr_debug("Open channel [%s] timeout_msec [%d].\n", name, timeout_msec);
 
@@ -777,7 +772,7 @@
 	/* init channel context after successful open */
 	ch->glink_handle = handle;
 	ch->ref_count++;
-	ch->pid = current_pid();
+	ch->pid = pid;
 	ch->txn_id = INITIAL_TXN_ID;
 
 	mutex_unlock(&ch->lock);
@@ -1026,10 +1021,12 @@
 			 ch->name, ch->actual_rx_size);
 		goto exit_ready;
 	}
+	mutex_unlock(&ch->lock); /* unlock while waiting */
 
 	pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
 	wait_for_completion(&ch->rx_done);
 
+	mutex_lock(&ch->lock); /* re-lock after waiting */
 	/* Check Rx Abort on SP reset */
 	if (ch->rx_abort) {
 		pr_err("rx aborted.\n");
@@ -2027,6 +2024,7 @@
 				      void *buf,
 				      uint32_t size)
 {
+	int ret = -1;
 	uint32_t next_req_size = 0;
 
 	if (size < sizeof(next_req_size)) {
@@ -2034,7 +2032,10 @@
 		return -EINVAL;
 	}
 
-	next_req_size = spcom_get_next_request_size(ch);
+	ret = spcom_get_next_request_size(ch);
+	if (ret < 0)
+		return ret;
+	next_req_size = (uint32_t) ret;
 
 	memcpy(buf, &next_req_size, sizeof(next_req_size));
 	pr_debug("next_req_size [%d].\n", next_req_size);
@@ -2139,18 +2140,20 @@
 			      void *buf,
 			      uint32_t size)
 {
+	int ret = -1;
+
 	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
 		pr_debug("get next request size, ch [%s].\n", ch->name);
 		ch->is_server = true;
-		size = spcom_handle_get_req_size(ch, buf, size);
+		ret = spcom_handle_get_req_size(ch, buf, size);
 	} else {
 		pr_debug("get request/response, ch [%s].\n", ch->name);
-		size = spcom_handle_read_req_resp(ch, buf, size);
+		ret = spcom_handle_read_req_resp(ch, buf, size);
 	}
 
 	pr_debug("ch [%s] , size = %d.\n", ch->name, size);
 
-	return size;
+	return ret;
 }
 
 /*======================================================================*/
@@ -2302,6 +2305,7 @@
 	char *buf;
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
+	int buf_size = 0;
 
 	pr_debug("Write file [%s] size [%d] pos [%d].\n",
 		 name, (int) size, (int) *f_pos);
@@ -2328,6 +2332,7 @@
 			   (int) size, (int) SPCOM_MAX_COMMAND_SIZE);
 		return -EINVAL;
 	}
+	buf_size = size; /* explicit casting size_t to int */
 
 	if (*f_pos != 0) {
 		pr_err("offset should be zero, no sparse buffer.\n");
@@ -2345,7 +2350,7 @@
 		return -EFAULT;
 	}
 
-	ret = spcom_handle_write(ch, buf, size);
+	ret = spcom_handle_write(ch, buf, buf_size);
 	if (ret) {
 		pr_err("handle command error [%d].\n", ret);
 		kfree(buf);
@@ -2373,6 +2378,7 @@
 	char *buf;
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
+	uint32_t buf_size = 0;
 
 	pr_debug("Read file [%s], size = %d bytes.\n", name, (int) size);
 
@@ -2381,6 +2387,7 @@
 		pr_err("invalid parameters.\n");
 		return -EINVAL;
 	}
+	buf_size = size; /* explicit casting size_t to uint32_t */
 
 	ch = filp->private_data;
 
@@ -2398,7 +2405,7 @@
 	if (buf == NULL)
 		return -ENOMEM;
 
-	ret = spcom_handle_read(ch, buf, size);
+	ret = spcom_handle_read(ch, buf, buf_size);
 	if (ret < 0) {
 		pr_err("read error [%d].\n", ret);
 		kfree(buf);
@@ -2481,9 +2488,14 @@
 		done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
 		break;
 	case SPCOM_POLL_CH_CONNECT:
+		/*
+		 * ch is not expected to be NULL since user must call open()
+		 * to get FD before it can call poll().
+		 * open() will fail if no ch related to the char-device.
+		 */
 		if (ch == NULL) {
 			pr_err("invalid ch pointer, file [%s].\n", name);
-			return -EINVAL;
+			return POLLERR;
 		}
 		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
 		if (wait) {
@@ -2784,7 +2796,7 @@
 {
 	int ret;
 
-	pr_info("spcom driver version 1.1 17-July-2017.\n");
+	pr_info("spcom driver version 1.2 23-Aug-2017.\n");
 
 	ret = platform_driver_register(&spcom_driver);
 	if (ret)
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 01eb260..3ea4be6 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1152,6 +1152,22 @@
 		d->subsys_desc.wdog_bite_handler = subsys_wdog_bite_irq_handler;
 		d->subsys_desc.stop_ack_handler = subsys_stop_ack_intr_handler;
 	}
+	d->desc.signal_aop = of_property_read_bool(pdev->dev.of_node,
+						"qcom,signal-aop");
+	if (d->desc.signal_aop) {
+		d->desc.cl.dev = &pdev->dev;
+		d->desc.cl.tx_block = true;
+		d->desc.cl.tx_tout = 1000;
+		d->desc.cl.knows_txdone = false;
+		d->desc.mbox = mbox_request_channel(&d->desc.cl, 0);
+		if (IS_ERR(d->desc.mbox)) {
+			rc = PTR_ERR(d->desc.mbox);
+			dev_err(&pdev->dev, "Failed to get mailbox channel %pK %d\n",
+				d->desc.mbox, rc);
+			goto err_ramdump;
+		}
+	}
+
 	d->ramdump_dev = create_ramdump_device(d->subsys_desc.name,
 								&pdev->dev);
 	if (!d->ramdump_dev) {
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 2b1456e..c1eafbd 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@
 			SPI_ENGINE_VERSION_MAJOR(version),
 			SPI_ENGINE_VERSION_MINOR(version),
 			SPI_ENGINE_VERSION_PATCH(version));
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_put_master;
 	}
 
 	spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index de3dc69..8dc42ac 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -24,6 +24,7 @@
 #include <linux/qcom-geni-se.h>
 #include <linux/msm_gpi.h>
 #include <linux/spi/spi.h>
+#include <linux/spi/spi-geni-qcom.h>
 
 #define SPI_NUM_CHIPSELECT	(4)
 #define SPI_XFER_TIMEOUT_MS	(250)
@@ -67,6 +68,11 @@
 /* SPI_TX/SPI_RX_TRANS_LEN fields */
 #define TRANS_LEN_MSK		(GENMASK(23, 0))
 
+/* SE_SPI_DELAY_COUNTERS */
+#define SPI_INTER_WORDS_DELAY_MSK	(GENMASK(9, 0))
+#define SPI_CS_CLK_DELAY_MSK		(GENMASK(19, 10))
+#define SPI_CS_CLK_DELAY_SHFT		(10)
+
 /* M_CMD OP codes for SPI */
 #define SPI_TX_ONLY		(1)
 #define SPI_RX_ONLY		(2)
@@ -229,6 +235,8 @@
 	int ret = 0;
 	int idx;
 	int div;
+	struct spi_geni_qcom_ctrl_data *delay_params = NULL;
+	u32 spi_delay_params = 0;
 
 	loopback_cfg &= ~LOOPBACK_MSK;
 	cpol &= ~CPOL;
@@ -246,6 +254,22 @@
 	if (spi_slv->mode & SPI_CS_HIGH)
 		demux_output_inv |= BIT(spi_slv->chip_select);
 
+	if (spi_slv->controller_data) {
+		u32 cs_clk_delay = 0;
+		u32 inter_words_delay = 0;
+
+		delay_params =
+		(struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
+		cs_clk_delay =
+		(delay_params->spi_cs_clk_delay << SPI_CS_CLK_DELAY_SHFT)
+							& SPI_CS_CLK_DELAY_MSK;
+		inter_words_delay =
+			delay_params->spi_inter_words_delay &
+						SPI_INTER_WORDS_DELAY_MSK;
+		spi_delay_params =
+		(inter_words_delay | cs_clk_delay);
+	}
+
 	demux_sel = spi_slv->chip_select;
 	mas->cur_speed_hz = spi_slv->max_speed_hz;
 	mas->cur_word_len = spi_slv->bits_per_word;
@@ -267,12 +291,13 @@
 	geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
 	geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
 	geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
+	geni_write_reg(spi_delay_params, mas->base, SE_SPI_DELAY_COUNTERS);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
 		"%s:Loopback%d demux_sel0x%x demux_op_inv 0x%x clk_cfg 0x%x\n",
 		__func__, loopback_cfg, demux_sel, demux_output_inv, m_clk_cfg);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
-		"%s:clk_sel 0x%x cpol %d cpha %d\n", __func__,
-							clk_sel, cpol, cpha);
+		"%s:clk_sel 0x%x cpol %d cpha %d delay 0x%x\n", __func__,
+					clk_sel, cpol, cpha, spi_delay_params);
 	/* Ensure message level attributes are written before returning */
 	mb();
 setup_fifo_params_exit:
@@ -298,7 +323,7 @@
 	 */
 	if (fifo_disable && !dma_chan_valid)
 		mode = -EINVAL;
-	else if (fifo_disable)
+	else if (dma_chan_valid)
 		mode = GSI_DMA;
 	else
 		mode = FIFO_MODE;
@@ -306,7 +331,8 @@
 }
 
 static struct msm_gpi_tre *setup_config0_tre(struct spi_transfer *xfer,
-				struct spi_geni_master *mas, u16 mode)
+				struct spi_geni_master *mas, u16 mode,
+				u32 cs_clk_delay, u32 inter_words_delay)
 {
 	struct msm_gpi_tre *c0_tre = &mas->gsi[mas->num_xfers].config0_tre;
 	u8 flags = 0;
@@ -340,12 +366,16 @@
 	}
 	c0_tre->dword[0] = MSM_GPI_SPI_CONFIG0_TRE_DWORD0(pack, flags,
 								word_len);
-	c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, 0, 0);
+	c0_tre->dword[1] = MSM_GPI_SPI_CONFIG0_TRE_DWORD1(0, cs_clk_delay,
+							inter_words_delay);
 	c0_tre->dword[2] = MSM_GPI_SPI_CONFIG0_TRE_DWORD2(idx, div);
 	c0_tre->dword[3] = MSM_GPI_SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 1);
 	GENI_SE_DBG(mas->ipc, false, mas->dev,
 		"%s: flags 0x%x word %d pack %d idx %d div %d\n",
 		__func__, flags, word_len, pack, idx, div);
+	GENI_SE_DBG(mas->ipc, false, mas->dev,
+		"%s: cs_clk_delay %d inter_words_delay %d\n", __func__,
+				 cs_clk_delay, inter_words_delay);
 	return c0_tre;
 }
 
@@ -503,13 +533,27 @@
 	u32 rx_len = 0;
 	int go_flags = 0;
 	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+	struct spi_geni_qcom_ctrl_data *delay_params = NULL;
+	u32 cs_clk_delay = 0;
+	u32 inter_words_delay = 0;
+
+	if (spi_slv->controller_data) {
+		delay_params =
+		(struct spi_geni_qcom_ctrl_data *) spi_slv->controller_data;
+
+		cs_clk_delay =
+			delay_params->spi_cs_clk_delay;
+		inter_words_delay =
+			delay_params->spi_inter_words_delay;
+	}
 
 	if ((xfer->bits_per_word != mas->cur_word_len) ||
 		(xfer->speed_hz != mas->cur_speed_hz)) {
 		mas->cur_word_len = xfer->bits_per_word;
 		mas->cur_speed_hz = xfer->speed_hz;
 		tx_nent++;
-		c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode);
+		c0_tre = setup_config0_tre(xfer, mas, spi_slv->mode,
+					cs_clk_delay, inter_words_delay);
 		if (IS_ERR_OR_NULL(c0_tre)) {
 			dev_err(mas->dev, "%s:Err setting c0tre:%d\n",
 							__func__, ret);
@@ -617,30 +661,32 @@
 				struct spi_message *msg)
 {
 	struct spi_transfer *xfer;
-	struct device *gsi_dev = mas->dev;
+	int ret = 0;
 
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		if (xfer->rx_buf) {
-			xfer->rx_dma = dma_map_single(gsi_dev, xfer->rx_buf,
+			ret = geni_se_iommu_map_buf(mas->wrapper_dev,
+						&xfer->rx_dma, xfer->rx_buf,
 						xfer->len, DMA_FROM_DEVICE);
-			if (dma_mapping_error(mas->dev, xfer->rx_dma)) {
-				dev_err(mas->dev, "Err mapping buf\n");
-				return -ENOMEM;
+			if (ret) {
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"%s: Mapping Rx buffer %d\n", __func__, ret);
+				return ret;
 			}
 		}
 
 		if (xfer->tx_buf) {
-			xfer->tx_dma = dma_map_single(gsi_dev,
-				(void *)xfer->tx_buf, xfer->len, DMA_TO_DEVICE);
-			if (dma_mapping_error(gsi_dev, xfer->tx_dma)) {
-				dev_err(mas->dev, "Err mapping buf\n");
-				dma_unmap_single(gsi_dev, xfer->rx_dma,
-						xfer->len, DMA_FROM_DEVICE);
-				return -ENOMEM;
+			ret = geni_se_iommu_map_buf(mas->wrapper_dev,
+						&xfer->tx_dma,
+						(void *)xfer->tx_buf,
+						xfer->len, DMA_TO_DEVICE);
+			if (ret) {
+				GENI_SE_ERR(mas->ipc, true, mas->dev,
+				"%s: Mapping Tx buffer %d\n", __func__, ret);
+				return ret;
 			}
 		}
 	};
-
 	return 0;
 }
 
@@ -648,14 +694,13 @@
 				struct spi_message *msg)
 {
 	struct spi_transfer *xfer;
-	struct device *gsi_dev = mas->dev;
 
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		if (xfer->rx_buf)
-			dma_unmap_single(gsi_dev, xfer->rx_dma,
+			geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->rx_dma,
 						xfer->len, DMA_FROM_DEVICE);
 		if (xfer->tx_buf)
-			dma_unmap_single(gsi_dev, xfer->tx_dma,
+			geni_se_iommu_unmap_buf(mas->wrapper_dev, &xfer->tx_dma,
 						xfer->len, DMA_TO_DEVICE);
 	};
 }
@@ -709,7 +754,12 @@
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
 	int ret = 0;
+	u32 max_speed = spi->cur_msg->spi->max_speed_hz;
+	struct se_geni_rsc *rsc = &mas->spi_rsc;
 
+	/* Adjust the AB/IB based on the max speed of the slave.*/
+	rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
+	rsc->ab = max_speed * DEFAULT_BUS_WIDTH;
 	ret = pm_runtime_get_sync(mas->dev);
 	if (ret < 0) {
 		dev_err(mas->dev, "Error enabling SE resources\n");
@@ -900,6 +950,7 @@
 {
 	unsigned long timeout;
 
+	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
 	reinit_completion(&mas->xfer_done);
 	geni_cancel_m_cmd(mas->base);
 	geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
@@ -986,6 +1037,7 @@
 	}
 	return ret;
 err_gsi_geni_transfer_one:
+	geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
 	dmaengine_terminate_all(mas->tx);
 	return ret;
 err_fifo_geni_transfer_one:
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
index ce9dc7e..192661b 100644
--- a/drivers/staging/android/fiq_debugger/fiq_debugger.c
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -401,7 +401,7 @@
 		cmd += 6;
 		while (*cmd == ' ')
 			cmd++;
-		if ((cmd != '\0') && sysrq_on())
+		if ((*cmd != '\0') && sysrq_on())
 			kernel_restart(cmd);
 		else
 			kernel_restart(NULL);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ec99790..7458df4 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2385,6 +2385,7 @@
 			continue;
 		}
 
+		set_current_state(TASK_RUNNING);
 		wp = async->buf_write_ptr;
 		n1 = min(n, async->prealloc_bufsz - wp);
 		n2 = n - n1;
@@ -2517,6 +2518,8 @@
 			}
 			continue;
 		}
+
+		set_current_state(TASK_RUNNING);
 		rp = async->buf_read_ptr;
 		n1 = min(n, async->prealloc_bufsz - rp);
 		n2 = n - n1;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6b99263..598f0fa 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -472,7 +472,7 @@
 			     long m)
 {
 	struct ad2s1210_state *st = iio_priv(indio_dev);
-	bool negative;
+	u16 negative;
 	int ret = 0;
 	u16 pos;
 	s16 vel;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index b432153..0f63a36 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -45,6 +45,7 @@
 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
 	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
 	{}	/* Terminating entry */
 };
 
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 01438fa..f50076d 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -414,7 +414,7 @@
 	sense->ascq = ascq;
 	if (sns_key_info0 != 0) {
 		sense->sns_key_info[0] = SKSV | sns_key_info0;
-		sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+		sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
 		sense->sns_key_info[2] = sns_key_info1 & 0x0f;
 	}
 }
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 6370a5e..defffa7 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -269,23 +269,12 @@
 
 int wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
 {
-	int i = 0;
-	int ret = -1;
-	struct wilc_vif *vif;
-	struct wilc *wilc;
+	struct wilc_vif *vif = netdev_priv(wilc_netdev);
 
-	vif = netdev_priv(wilc_netdev);
-	wilc = vif->wilc;
+	memcpy(vif->bssid, bssid, 6);
+	vif->mode = mode;
 
-	for (i = 0; i < wilc->vif_num; i++)
-		if (wilc->vif[i]->ndev == wilc_netdev) {
-			memcpy(wilc->vif[i]->bssid, bssid, 6);
-			wilc->vif[i]->mode = mode;
-			ret = 0;
-			break;
-		}
-
-	return ret;
+	return 0;
 }
 
 int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
@@ -1212,16 +1201,11 @@
 
 void wilc_netdev_cleanup(struct wilc *wilc)
 {
-	int i = 0;
-	struct wilc_vif *vif[NUM_CONCURRENT_IFC];
+	int i;
 
-	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
+	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev))
 		unregister_inetaddr_notifier(&g_dev_notifier);
 
-		for (i = 0; i < NUM_CONCURRENT_IFC; i++)
-			vif[i] = netdev_priv(wilc->vif[i]->ndev);
-	}
-
 	if (wilc && wilc->firmware) {
 		release_firmware(wilc->firmware);
 		wilc->firmware = NULL;
@@ -1230,7 +1214,7 @@
 	if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++)
 			if (wilc->vif[i]->ndev)
-				if (vif[i]->mac_opened)
+				if (wilc->vif[i]->mac_opened)
 					wilc_mac_close(wilc->vif[i]->ndev);
 
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
@@ -1278,9 +1262,9 @@
 
 		vif->idx = wl->vif_num;
 		vif->wilc = *wilc;
+		vif->ndev = ndev;
 		wl->vif[i] = vif;
-		wl->vif[wl->vif_num]->ndev = ndev;
-		wl->vif_num++;
+		wl->vif_num = i;
 		ndev->netdev_ops = &wilc_netdev_ops;
 
 		{
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 182b2d5..165c46f 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -666,8 +666,11 @@
 
 void prism2_roamed(struct wlandevice *wlandev)
 {
-	cfg80211_roamed(wlandev->netdev, NULL, wlandev->bssid,
-		NULL, 0, NULL, 0, GFP_KERNEL);
+	struct cfg80211_roam_info roam_info = {
+		.bssid = wlandev->bssid,
+	};
+
+	cfg80211_roamed(wlandev->netdev, &roam_info, GFP_KERNEL);
 }
 
 /* Structures for declaring wiphy interface */
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 155fe0e..e49fcd5 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@
 		return 0;
 	}
 	np->np_thread_state = ISCSI_NP_THREAD_RESET;
+	atomic_inc(&np->np_reset_count);
 
 	if (np->np_thread) {
 		spin_unlock_bh(&np->np_thread_lock);
@@ -2177,6 +2178,7 @@
 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
 	cmd->data_direction	= DMA_NONE;
+	kfree(cmd->text_in_ptr);
 	cmd->text_in_ptr	= NULL;
 
 	return 0;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 6128e8e..9ccd5da 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1233,9 +1233,11 @@
 	flush_signals(current);
 
 	spin_lock_bh(&np->np_thread_lock);
-	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+	if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
 		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+		spin_unlock_bh(&np->np_thread_lock);
 		complete(&np->np_restart_comp);
+		return 1;
 	} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
 		spin_unlock_bh(&np->np_thread_lock);
 		goto exit;
@@ -1268,7 +1270,8 @@
 		goto exit;
 	} else if (rc < 0) {
 		spin_lock_bh(&np->np_thread_lock);
-		if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+		if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+			np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
 			spin_unlock_bh(&np->np_thread_lock);
 			complete(&np->np_restart_comp);
 			iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 6693d7c..e8efb42 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -490,14 +490,60 @@
 
 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
 
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
 {
 	if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
-		pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+		pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
 			"returning FALSE\n");
-		return false;
+		return true;
 	}
-	return true;
+	return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		read_lock_bh(&sk->sk_callback_lock);
+		state = (__iscsi_target_sk_check_close(sk) ||
+			 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+		read_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		read_lock_bh(&sk->sk_callback_lock);
+		state = test_bit(flag, &conn->login_flags);
+		read_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+	bool state = false;
+
+	if (conn->sock) {
+		struct sock *sk = conn->sock->sk;
+
+		write_lock_bh(&sk->sk_callback_lock);
+		state = (__iscsi_target_sk_check_close(sk) ||
+			 test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+		if (!state)
+			clear_bit(flag, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+	return state;
 }
 
 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -537,6 +583,20 @@
 
 	pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
 			conn, current->comm, current->pid);
+	/*
+	 * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+	 * before initial PDU processing in iscsi_target_start_negotiation()
+	 * has completed, go ahead and retry until it's cleared.
+	 *
+	 * Otherwise if the TCP connection drops while this is occuring,
+	 * iscsi_target_start_negotiation() will detect the failure, call
+	 * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+	 * remaining iscsi connection resources from iscsi_np process context.
+	 */
+	if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+		schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+		return;
+	}
 
 	spin_lock(&tpg->tpg_state_lock);
 	state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -544,26 +604,12 @@
 
 	if (!state) {
 		pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
-		iscsi_target_restore_sock_callbacks(conn);
-		iscsi_target_login_drop(conn, login);
-		iscsit_deaccess_np(np, tpg, tpg_np);
-		return;
+		goto err;
 	}
 
-	if (conn->sock) {
-		struct sock *sk = conn->sock->sk;
-
-		read_lock_bh(&sk->sk_callback_lock);
-		state = iscsi_target_sk_state_check(sk);
-		read_unlock_bh(&sk->sk_callback_lock);
-
-		if (!state) {
-			pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
-			iscsi_target_restore_sock_callbacks(conn);
-			iscsi_target_login_drop(conn, login);
-			iscsit_deaccess_np(np, tpg, tpg_np);
-			return;
-		}
+	if (iscsi_target_sk_check_close(conn)) {
+		pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+		goto err;
 	}
 
 	conn->login_kworker = current;
@@ -581,34 +627,29 @@
 	flush_signals(current);
 	conn->login_kworker = NULL;
 
-	if (rc < 0) {
-		iscsi_target_restore_sock_callbacks(conn);
-		iscsi_target_login_drop(conn, login);
-		iscsit_deaccess_np(np, tpg, tpg_np);
-		return;
-	}
+	if (rc < 0)
+		goto err;
 
 	pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
 			conn, current->comm, current->pid);
 
 	rc = iscsi_target_do_login(conn, login);
 	if (rc < 0) {
-		iscsi_target_restore_sock_callbacks(conn);
-		iscsi_target_login_drop(conn, login);
-		iscsit_deaccess_np(np, tpg, tpg_np);
+		goto err;
 	} else if (!rc) {
-		if (conn->sock) {
-			struct sock *sk = conn->sock->sk;
-
-			write_lock_bh(&sk->sk_callback_lock);
-			clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
-			write_unlock_bh(&sk->sk_callback_lock);
-		}
+		if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+			goto err;
 	} else if (rc == 1) {
 		iscsi_target_nego_release(conn);
 		iscsi_post_login_handler(np, conn, zero_tsih);
 		iscsit_deaccess_np(np, tpg, tpg_np);
 	}
+	return;
+
+err:
+	iscsi_target_restore_sock_callbacks(conn);
+	iscsi_target_login_drop(conn, login);
+	iscsit_deaccess_np(np, tpg, tpg_np);
 }
 
 static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -656,31 +697,54 @@
 		orig_state_change(sk);
 		return;
 	}
+	state = __iscsi_target_sk_check_close(sk);
+	pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
 	if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
 		pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
 			 " conn: %p\n", conn);
+		if (state)
+			set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
 		write_unlock_bh(&sk->sk_callback_lock);
 		orig_state_change(sk);
 		return;
 	}
-	if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+	if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
 		pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
 			 conn);
 		write_unlock_bh(&sk->sk_callback_lock);
 		orig_state_change(sk);
 		return;
 	}
-
-	state = iscsi_target_sk_state_check(sk);
-	write_unlock_bh(&sk->sk_callback_lock);
-
-	pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
-
-	if (!state) {
+	/*
+	 * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+	 * but only queue conn->login_work -> iscsi_target_do_login_rx()
+	 * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+	 *
+	 * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+	 * will detect the dropped TCP connection from delayed workqueue context.
+	 *
+	 * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+	 * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+	 * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+	 * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+	 * dropped TCP connection in iscsi_np process context, and cleaning up
+	 * the remaining iscsi connection resources.
+	 */
+	if (state) {
 		pr_debug("iscsi_target_sk_state_change got failed state\n");
-		schedule_delayed_work(&conn->login_cleanup_work, 0);
+		set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+		state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+
+		orig_state_change(sk);
+
+		if (!state)
+			schedule_delayed_work(&conn->login_work, 0);
 		return;
 	}
+	write_unlock_bh(&sk->sk_callback_lock);
+
 	orig_state_change(sk);
 }
 
@@ -945,6 +1009,15 @@
 			if (iscsi_target_handle_csg_one(conn, login) < 0)
 				return -1;
 			if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+				/*
+				 * Check to make sure the TCP connection has not
+				 * dropped asynchronously while session reinstatement
+				 * was occuring in this kthread context, before
+				 * transitioning to full feature phase operation.
+				 */
+				if (iscsi_target_sk_check_close(conn))
+					return -1;
+
 				login->tsih = conn->sess->tsih;
 				login->login_complete = 1;
 				iscsi_target_restore_sock_callbacks(conn);
@@ -971,21 +1044,6 @@
 		break;
 	}
 
-	if (conn->sock) {
-		struct sock *sk = conn->sock->sk;
-		bool state;
-
-		read_lock_bh(&sk->sk_callback_lock);
-		state = iscsi_target_sk_state_check(sk);
-		read_unlock_bh(&sk->sk_callback_lock);
-
-		if (!state) {
-			pr_debug("iscsi_target_do_login() failed state for"
-				 " conn: %p\n", conn);
-			return -1;
-		}
-	}
-
 	return 0;
 }
 
@@ -1252,13 +1310,25 @@
        if (conn->sock) {
                struct sock *sk = conn->sock->sk;
 
-               write_lock_bh(&sk->sk_callback_lock);
-               set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
-               write_unlock_bh(&sk->sk_callback_lock);
-       }
+		write_lock_bh(&sk->sk_callback_lock);
+		set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+		set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+		write_unlock_bh(&sk->sk_callback_lock);
+	}
+	/*
+	 * If iscsi_target_do_login returns zero to signal more PDU
+	 * exchanges are required to complete the login, go ahead and
+	 * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+	 * is still active.
+	 *
+	 * Otherwise if TCP connection dropped asynchronously, go ahead
+	 * and perform connection cleanup now.
+	 */
+	ret = iscsi_target_do_login(conn, login);
+	if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+		ret = -1;
 
-       ret = iscsi_target_do_login(conn, login);
-       if (ret < 0) {
+	if (ret < 0) {
 		cancel_delayed_work_sync(&conn->login_work);
 		cancel_delayed_work_sync(&conn->login_cleanup_work);
 		iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 1949f50..0e2e71f 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -364,7 +364,7 @@
 	mutex_lock(&tpg->acl_node_mutex);
 	if (acl->dynamic_node_acl)
 		acl->dynamic_node_acl = 0;
-	list_del(&acl->acl_list);
+	list_del_init(&acl->acl_list);
 	mutex_unlock(&tpg->acl_node_mutex);
 
 	target_shutdown_sessions(acl);
@@ -540,7 +540,7 @@
 	 * in transport_deregister_session().
 	 */
 	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
-		list_del(&nacl->acl_list);
+		list_del_init(&nacl->acl_list);
 
 		core_tpg_wait_for_nacl_pr_ref(nacl);
 		core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index e8a1f5c..bacfa8f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -465,7 +465,7 @@
 	}
 
 	mutex_lock(&se_tpg->acl_node_mutex);
-	list_del(&nacl->acl_list);
+	list_del_init(&nacl->acl_list);
 	mutex_unlock(&se_tpg->acl_node_mutex);
 
 	core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -537,7 +537,7 @@
 			spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
 
 			if (se_nacl->dynamic_stop)
-				list_del(&se_nacl->acl_list);
+				list_del_init(&se_nacl->acl_list);
 		}
 		mutex_unlock(&se_tpg->acl_node_mutex);
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index c5e2703..79788eb 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1264,6 +1264,13 @@
 		goto exit_geni_serial_isr;
 	}
 
+	if (s_irq_status & S_RX_FIFO_WR_ERR_EN) {
+		uport->icount.buf_overrun++;
+		IPC_LOG_MSG(msm_port->ipc_log_misc,
+			"%s.sirq 0x%x buf_overrun:%d\n",
+			__func__, s_irq_status, uport->icount.buf_overrun);
+	}
+
 	if (!dma) {
 		if ((m_irq_status & m_irq_en) &
 		    (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 840930b0..c8075eb 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -629,6 +629,8 @@
 	if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
 			as->status != -ENOENT)
 		cancel_bulk_urbs(ps, as->bulk_addr);
+
+	wake_up(&ps->wait);
 	spin_unlock(&ps->lock);
 
 	if (signr) {
@@ -636,8 +638,6 @@
 		put_pid(pid);
 		put_cred(cred);
 	}
-
-	wake_up(&ps->wait);
 }
 
 static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e07fa76..7b8ca7d 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1878,7 +1878,7 @@
 	/* No more submits can occur */
 	spin_lock_irq(&hcd_urb_list_lock);
 rescan:
-	list_for_each_entry (urb, &ep->urb_list, urb_list) {
+	list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
 		int	is_in;
 
 		if (urb->unlinked)
@@ -2256,39 +2256,38 @@
 
 /*-------------------------------------------------------------------------*/
 
-dma_addr_t
-usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
-	unsigned int intr_num)
+phys_addr_t
+usb_hcd_get_sec_event_ring_phys_addr(struct usb_device *udev,
+	unsigned int intr_num, dma_addr_t *dma)
 {
 	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
 
 	if (!HCD_RH_RUNNING(hcd))
 		return 0;
 
-	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+	return hcd->driver->get_sec_event_ring_phys_addr(hcd, intr_num, dma);
 }
 
-dma_addr_t
-usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
+phys_addr_t
+usb_hcd_get_xfer_ring_phys_addr(struct usb_device *udev,
+		struct usb_host_endpoint *ep, dma_addr_t *dma)
 {
 	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
 
 	if (!HCD_RH_RUNNING(hcd))
 		return 0;
 
-	return hcd->driver->get_dcba_dma_addr(hcd, udev);
+	return hcd->driver->get_xfer_ring_phys_addr(hcd, udev, ep, dma);
 }
 
-dma_addr_t
-usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
-		struct usb_host_endpoint *ep)
+int usb_hcd_get_controller_id(struct usb_device *udev)
 {
 	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
 
 	if (!HCD_RH_RUNNING(hcd))
-		return 0;
+		return -EINVAL;
 
-	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+	return hcd->driver->get_core_id(hcd);
 }
 
 #ifdef	CONFIG_PM
@@ -2532,6 +2531,8 @@
 	}
 	if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
 		hcd = hcd->shared_hcd;
+		clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+		set_bit(HCD_FLAG_DEAD, &hcd->flags);
 		if (hcd->rh_registered) {
 			clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50679bc..3b0cc03 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4741,7 +4741,8 @@
 static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
 		u16 portchange)
 {
-	int status, i;
+	int status = -ENODEV;
+	int i;
 	unsigned unit_load;
 	struct usb_device *hdev = hub->hdev;
 	struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4945,9 +4946,10 @@
 
 done:
 	hub_port_disable(hub, port1, 1);
-	if (hcd->driver->relinquish_port && !hub->hdev->parent)
-		hcd->driver->relinquish_port(hcd, port1);
-
+	if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+		if (status != -ENOTCONN && status != -ENODEV)
+			hcd->driver->relinquish_port(hcd, port1);
+	}
 }
 
 /* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3116edf..82806e3 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -57,8 +57,9 @@
 	/* Microsoft LifeCam-VX700 v2.0 */
 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
-	/* Logitech HD Pro Webcams C920 and C930e */
+	/* Logitech HD Pro Webcams C920, C920-C and C930e */
 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
 
 	/* Logitech ConferenceCam CC3000e */
@@ -150,6 +151,9 @@
 	/* appletouch */
 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
 	/* Avision AV600U */
 	{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
 	  USB_QUIRK_STRING_FETCH_255 },
@@ -214,6 +218,9 @@
 	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+	/* Corsair Strafe RGB */
+	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+
 	/* Acer C120 LED Projector */
 	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
 
@@ -249,6 +256,7 @@
 	{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
 	{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
 	{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+	{ USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
 
 	/* Logitech Optical Mouse M90/M100 */
 	{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 2776cfe..ef9cf4a 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -127,6 +127,22 @@
  */
 #define USB_ACPI_LOCATION_VALID (1 << 31)
 
+static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
+					      int raw)
+{
+	struct acpi_device *adev;
+
+	if (!parent)
+		return NULL;
+
+	list_for_each_entry(adev, &parent->children, node) {
+		if (acpi_device_adr(adev) == raw)
+			return adev;
+	}
+
+	return acpi_find_child_device(parent, raw, false);
+}
+
 static struct acpi_device *usb_acpi_find_companion(struct device *dev)
 {
 	struct usb_device *udev;
@@ -174,8 +190,10 @@
 			int raw;
 
 			raw = usb_hcd_find_raw_port_number(hcd, port1);
-			adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
-					raw, false);
+
+			adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
+						  raw);
+
 			if (!adev)
 				return NULL;
 		} else {
@@ -186,7 +204,9 @@
 				return NULL;
 
 			acpi_bus_get_device(parent_handle, &adev);
-			adev = acpi_find_child_device(adev, port1, false);
+
+			adev = usb_acpi_find_port(adev, port1);
+
 			if (!adev)
 				return NULL;
 		}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 92e5d13..d745733 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -704,36 +704,35 @@
 }
 EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
 
-dma_addr_t
-usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
-	unsigned int intr_num)
+phys_addr_t
+usb_get_sec_event_ring_phys_addr(struct usb_device *dev,
+	unsigned int intr_num, dma_addr_t *dma)
 {
 	if (dev->state == USB_STATE_NOTATTACHED)
 		return 0;
 
-	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+	return usb_hcd_get_sec_event_ring_phys_addr(dev, intr_num, dma);
 }
-EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+EXPORT_SYMBOL(usb_get_sec_event_ring_phys_addr);
 
-dma_addr_t
-usb_get_dcba_dma_addr(struct usb_device *dev)
+phys_addr_t usb_get_xfer_ring_phys_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep, dma_addr_t *dma)
 {
 	if (dev->state == USB_STATE_NOTATTACHED)
 		return 0;
 
-	return usb_hcd_get_dcba_dma_addr(dev);
+	return usb_hcd_get_xfer_ring_phys_addr(dev, ep, dma);
 }
-EXPORT_SYMBOL(usb_get_dcba_dma_addr);
+EXPORT_SYMBOL(usb_get_xfer_ring_phys_addr);
 
-dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
-	struct usb_host_endpoint *ep)
+int usb_get_controller_id(struct usb_device *dev)
 {
 	if (dev->state == USB_STATE_NOTATTACHED)
-		return 0;
+		return -EINVAL;
 
-	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+	return usb_hcd_get_controller_id(dev);
 }
-EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+EXPORT_SYMBOL(usb_get_controller_id);
 
 /*-------------------------------------------------------------------*/
 /*
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 8a6ae0b..02adc93 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2155,7 +2155,7 @@
 	struct usb_gadget *gadget = dev_to_usb_gadget(dev);
 	struct usb_composite_dev *cdev = get_gadget_data(gadget);
 
-	return sprintf(buf, "%d\n", cdev->suspended);
+	return snprintf(buf, PAGE_SIZE, "%d\n", cdev->suspended);
 }
 static DEVICE_ATTR_RO(suspended);
 
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index f910990..d6e77a5 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1533,6 +1533,18 @@
 
 	gi = container_of(cdev, struct gadget_info, cdev);
 
+	/* FIXME: There's a race between usb_gadget_udc_stop() which is likely
+	 * to set the gadget driver to NULL in the udc driver and this drivers
+	 * gadget disconnect fn which likely checks for the gadget driver to
+	 * be a null ptr. It happens that unbind (doing set_gadget_data(NULL))
+	 * is called before the gadget driver is set to NULL and the udc driver
+	 * calls disconnect fn which results in cdev being a null ptr.
+	 */
+	if (cdev == NULL) {
+		WARN(1, "%s: gadget driver already disconnected\n", __func__);
+		return;
+	}
+
 	/* accessory HID support can be active while the
 		accessory function is not actually enabled,
 		so we need to inform it when we are disconnected.
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index a7cb586..9240956 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -864,6 +864,14 @@
 	.probe = acc_hid_probe,
 };
 
+static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req)
+{
+	/*
+	 * Default no-op function when nothing needs to be done for the
+	 * setup request
+	 */
+}
+
 int acc_ctrlrequest(struct usb_composite_dev *cdev,
 				const struct usb_ctrlrequest *ctrl)
 {
@@ -891,6 +899,7 @@
 			schedule_delayed_work(
 				&dev->start_work, msecs_to_jiffies(10));
 			value = 0;
+			cdev->req->complete = acc_complete_setup_noop;
 		} else if (b_request == ACCESSORY_SEND_STRING) {
 			dev->string_index = w_index;
 			cdev->gadget->ep0->driver_data = dev;
@@ -899,10 +908,13 @@
 		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
 				w_index == 0 && w_length == 0) {
 			dev->audio_mode = w_value;
+			cdev->req->complete = acc_complete_setup_noop;
 			value = 0;
 		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			cdev->req->complete = acc_complete_setup_noop;
 			value = acc_register_hid(dev, w_value, w_index);
 		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			cdev->req->complete = acc_complete_setup_noop;
 			value = acc_unregister_hid(dev, w_value);
 		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
 			spin_lock_irqsave(&dev->lock, flags);
@@ -937,7 +949,7 @@
 		if (b_request == ACCESSORY_GET_PROTOCOL) {
 			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
 			value = sizeof(u16);
-
+			cdev->req->complete = acc_complete_setup_noop;
 			/* clear any string left over from a previous session */
 			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
 			memset(dev->model, 0, sizeof(dev->model));
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 18241f4..c11629d 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1565,12 +1565,22 @@
 		struct usb_request *req)
 {
 	struct f_gsi *gsi = req->context;
+	rndis_init_msg_type *buf;
 	int status;
 
 	status = rndis_msg_parser(gsi->params, (u8 *) req->buf);
 	if (status < 0)
 		log_event_err("RNDIS command error %d, %d/%d",
 			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+	if (buf->MessageType == RNDIS_MSG_INIT) {
+		gsi->d_port.in_aggr_size = min_t(u32, gsi->d_port.in_aggr_size,
+						gsi->params->dl_max_xfer_size);
+		log_event_dbg("RNDIS host dl_aggr_size:%d in_aggr_size:%d\n",
+				gsi->params->dl_max_xfer_size,
+				gsi->d_port.in_aggr_size);
+	}
 }
 
 static void
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index 48454b7..d6bf0f4 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -1393,7 +1393,7 @@
 
 /* string descriptors: */
 static struct usb_string qdss_gsi_string_defs[] = {
-	[0].s = "QDSS DATA",
+	[0].s = "DPL Data",
 	{}, /* end of list */
 };
 
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 40a7acf..a0fecb2 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -338,7 +338,7 @@
 	struct usb_gadget *gadget = c->cdev->gadget;
 	struct f_qdss *qdss = func_to_qdss(f);
 	struct usb_ep *ep;
-	int iface;
+	int iface, id;
 
 	pr_debug("qdss_bind\n");
 
@@ -356,6 +356,12 @@
 	qdss_data_intf_desc.bInterfaceNumber = iface;
 	qdss->data_iface_id = iface;
 
+	id = usb_string_id(c->cdev);
+	if (id < 0)
+		return id;
+	qdss_string_defs[QDSS_DATA_IDX].id = id;
+	qdss_data_intf_desc.iInterface = id;
+
 	if (qdss->debug_inface_enabled) {
 		/* Allocate ctrl I/F */
 		iface = usb_interface_id(c, f);
@@ -365,6 +371,11 @@
 		}
 		qdss_ctrl_intf_desc.bInterfaceNumber = iface;
 		qdss->ctrl_iface_id = iface;
+		id = usb_string_id(c->cdev);
+		if (id < 0)
+			return id;
+		qdss_string_defs[QDSS_CTRL_IDX].id = id;
+		qdss_ctrl_intf_desc.iInterface = id;
 	}
 
 	ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 38d58f3..ac2231a 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -813,8 +813,17 @@
 	/* For USB: responses may take up to 10 seconds */
 	switch (MsgType) {
 	case RNDIS_MSG_INIT:
-		pr_debug("%s: RNDIS_MSG_INIT\n",
-			__func__);
+		pr_debug("%s: RNDIS_MSG_INIT\n", __func__);
+		tmp++; /* to get RequestID */
+		params->host_rndis_major_ver = get_unaligned_le32(tmp++);
+		params->host_rndis_minor_ver = get_unaligned_le32(tmp++);
+		params->dl_max_xfer_size = get_unaligned_le32(tmp++);
+
+		pr_debug("%s(): RNDIS Host Major:%d Minor:%d version\n",
+					__func__, params->host_rndis_major_ver,
+					params->host_rndis_minor_ver);
+		pr_debug("%s(): DL Max Transfer size:%x\n",
+				__func__, params->dl_max_xfer_size);
 		params->state = RNDIS_INITIALIZED;
 		return rndis_init_response(params, (rndis_init_msg_type *)buf);
 
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 939c3be..4ffc282 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -191,6 +191,9 @@
 
 	u32			vendorID;
 	u8			max_pkt_per_xfer;
+	u32			host_rndis_major_ver;
+	u32			host_rndis_minor_ver;
+	u32			dl_max_xfer_size;
 	const char		*vendorDescr;
 	u8			pkt_alignment_factor;
 	void			(*resp_avail)(void *v);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index ba78e3f..d2cfefa 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -685,21 +685,32 @@
 	return usb3_req;
 }
 
+static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
+				struct renesas_usb3_request *usb3_req,
+				int status)
+{
+	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
+
+	dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
+		usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
+		status);
+	usb3_req->req.status = status;
+	usb3_ep->started = false;
+	list_del_init(&usb3_req->queue);
+	spin_unlock(&usb3->lock);
+	usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
+	spin_lock(&usb3->lock);
+}
+
 static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
 			      struct renesas_usb3_request *usb3_req, int status)
 {
 	struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
 	unsigned long flags;
 
-	dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
-		usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
-		status);
-	usb3_req->req.status = status;
 	spin_lock_irqsave(&usb3->lock, flags);
-	usb3_ep->started = false;
-	list_del_init(&usb3_req->queue);
+	__usb3_request_done(usb3_ep, usb3_req, status);
 	spin_unlock_irqrestore(&usb3->lock, flags);
-	usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
 }
 
 static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index c8989c6..58b9685 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -98,6 +98,7 @@
 	AMD_CHIPSET_HUDSON2,
 	AMD_CHIPSET_BOLTON,
 	AMD_CHIPSET_YANGTZE,
+	AMD_CHIPSET_TAISHAN,
 	AMD_CHIPSET_UNKNOWN,
 };
 
@@ -145,20 +146,26 @@
 		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
 				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
 
-		if (!pinfo->smbus_dev) {
-			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
-			return 0;
+		if (pinfo->smbus_dev) {
+			rev = pinfo->smbus_dev->revision;
+			if (rev >= 0x11 && rev <= 0x14)
+				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+			else if (rev >= 0x15 && rev <= 0x18)
+				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+			else if (rev >= 0x39 && rev <= 0x3a)
+				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+		} else {
+			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+							  0x145c, NULL);
+			if (pinfo->smbus_dev) {
+				rev = pinfo->smbus_dev->revision;
+				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+			} else {
+				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+				return 0;
+			}
 		}
-
-		rev = pinfo->smbus_dev->revision;
-		if (rev >= 0x11 && rev <= 0x14)
-			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
-		else if (rev >= 0x15 && rev <= 0x18)
-			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
-		else if (rev >= 0x39 && rev <= 0x3a)
-			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
 	}
-
 	pinfo->sb_type.rev = rev;
 	return 1;
 }
@@ -260,11 +267,12 @@
 {
 	/* Make sure amd chipset type has already been initialized */
 	usb_amd_find_chipset_info();
-	if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
-		return 0;
-
-	dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
-	return 1;
+	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+		return 1;
+	}
+	return 0;
 }
 EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
 
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6cb5ab3..89de903 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -316,6 +316,9 @@
 	if (device_property_read_u32(sysdev, "snps,xhci-imod-value", &imod))
 		imod = 0;
 
+	if (device_property_read_u32(sysdev, "usb-core-id", &xhci->core_id))
+		xhci->core_id = -EINVAL;
+
 	hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
 	if (IS_ERR(hcd->usb_phy)) {
 		ret = PTR_ERR(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 15bf308..1660c7c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4964,10 +4964,13 @@
 }
 EXPORT_SYMBOL_GPL(xhci_gen_setup);
 
-dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
-	unsigned int intr_num)
+static phys_addr_t xhci_get_sec_event_ring_phys_addr(struct usb_hcd *hcd,
+	unsigned int intr_num, dma_addr_t *dma)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct device *dev = hcd->self.sysdev;
+	struct sg_table sgt;
+	phys_addr_t pa;
 
 	if (intr_num >= xhci->max_interrupters) {
 		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
@@ -4977,31 +4980,34 @@
 
 	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
 		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
-		&& xhci->sec_event_ring[intr_num]->first_seg)
-		return xhci->sec_event_ring[intr_num]->first_seg->dma;
+		&& xhci->sec_event_ring[intr_num]->first_seg) {
+
+		dma_get_sgtable(dev, &sgt,
+			xhci->sec_event_ring[intr_num]->first_seg->trbs,
+			xhci->sec_event_ring[intr_num]->first_seg->dma,
+			TRB_SEGMENT_SIZE);
+
+		*dma = xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+		pa = page_to_phys(sg_page(sgt.sgl));
+		sg_free_table(&sgt);
+
+		return pa;
+	}
 
 	return 0;
 }
 
-dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
-	struct usb_device *udev)
-{
-	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
-	if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
-		return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
-
-	return 0;
-}
-
-dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
-	struct usb_device *udev, struct usb_host_endpoint *ep)
+static phys_addr_t xhci_get_xfer_ring_phys_addr(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma)
 {
 	int ret;
 	unsigned int ep_index;
 	struct xhci_virt_device *virt_dev;
-
+	struct device *dev = hcd->self.sysdev;
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct sg_table sgt;
+	phys_addr_t pa;
 
 	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
 	if (ret <= 0) {
@@ -5013,12 +5019,31 @@
 	ep_index = xhci_get_endpoint_index(&ep->desc);
 
 	if (virt_dev->eps[ep_index].ring &&
-		virt_dev->eps[ep_index].ring->first_seg)
-		return virt_dev->eps[ep_index].ring->first_seg->dma;
+		virt_dev->eps[ep_index].ring->first_seg) {
+
+		dma_get_sgtable(dev, &sgt,
+			virt_dev->eps[ep_index].ring->first_seg->trbs,
+			virt_dev->eps[ep_index].ring->first_seg->dma,
+			TRB_SEGMENT_SIZE);
+
+		*dma = virt_dev->eps[ep_index].ring->first_seg->dma;
+
+		pa = page_to_phys(sg_page(sgt.sgl));
+		sg_free_table(&sgt);
+
+		return pa;
+	}
 
 	return 0;
 }
 
+int xhci_get_core_id(struct usb_hcd *hcd)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	return xhci->core_id;
+}
+
 static const struct hc_driver xhci_hc_driver = {
 	.description =		"xhci-hcd",
 	.product_desc =		"xHCI Host Controller",
@@ -5080,9 +5105,9 @@
 	.find_raw_port_number =	xhci_find_raw_port_number,
 	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
 	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
-	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
-	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
-	.get_dcba_dma_addr =		xhci_get_dcba_dma_addr,
+	.get_sec_event_ring_phys_addr =	xhci_get_sec_event_ring_phys_addr,
+	.get_xfer_ring_phys_addr =	xhci_get_xfer_ring_phys_addr,
+	.get_core_id =			xhci_get_core_id,
 };
 
 void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 757d045..db46db4 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1538,6 +1538,8 @@
 	/* secondary interrupter */
 	struct	xhci_intr_reg __iomem **sec_ir_set;
 
+	int		core_id;
+
 	/* Cached register copies of read-only HC data */
 	__u32		hcs_params1;
 	__u32		hcs_params2;
@@ -1977,6 +1979,7 @@
 		char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
+int xhci_get_core_id(struct usb_hcd *hcd);
 
 #ifdef CONFIG_PM
 int xhci_bus_suspend(struct usb_hcd *hcd);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 261ed2c..a6b6b1c 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2655,6 +2655,13 @@
 {
 	struct musb	*musb = dev_to_musb(dev);
 	unsigned long	flags;
+	int ret;
+
+	ret = pm_runtime_get_sync(dev);
+	if (ret < 0) {
+		pm_runtime_put_noidle(dev);
+		return ret;
+	}
 
 	musb_platform_disable(musb);
 	musb_generic_disable(musb);
@@ -2703,14 +2710,6 @@
 	if ((devctl & mask) != (musb->context.devctl & mask))
 		musb->port1_status = 0;
 
-	/*
-	 * The USB HUB code expects the device to be in RPM_ACTIVE once it came
-	 * out of suspend
-	 */
-	pm_runtime_disable(dev);
-	pm_runtime_set_active(dev);
-	pm_runtime_enable(dev);
-
 	musb_start(musb);
 
 	spin_lock_irqsave(&musb->lock, flags);
@@ -2720,6 +2719,9 @@
 			error);
 	spin_unlock_irqrestore(&musb->lock, flags);
 
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
 	return 0;
 }
 
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 99beda9..55c624f 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -139,6 +139,7 @@
 				"Could not flush host TX%d fifo: csr: %04x\n",
 				ep->epnum, csr))
 			return;
+		mdelay(1);
 	}
 }
 
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 6103172..68bd576 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -63,6 +63,17 @@
 #define LINESTATE_DP			BIT(0)
 #define LINESTATE_DM			BIT(1)
 
+/* eud related registers */
+#define EUD_SW_ATTACH_DET	0x1018
+#define EUD_INT1_EN_MASK	0x0024
+
+/* EUD interrupt mask bits */
+#define EUD_INT_RX		BIT(0)
+#define EUD_INT_TX		BIT(1)
+#define EUD_INT_VBUS		BIT(2)
+#define EUD_INT_CHGR		BIT(3)
+#define EUD_INT_SAFE_MODE	BIT(4)
+
 unsigned int phy_tune1;
 module_param(phy_tune1, uint, 0644);
 MODULE_PARM_DESC(phy_tune1, "QUSB PHY v2 TUNE1");
@@ -81,6 +92,7 @@
 	struct usb_phy		phy;
 	struct mutex		lock;
 	void __iomem		*base;
+	void __iomem		*eud_base;
 	void __iomem		*efuse_reg;
 
 	struct clk		*ref_clk_src;
@@ -664,6 +676,22 @@
 			return ret;
 		}
 		qphy->dpdm_enable = true;
+
+		if (qphy->eud_base) {
+			if (qphy->cfg_ahb_clk)
+				clk_prepare_enable(qphy->cfg_ahb_clk);
+			writel_relaxed(BIT(0),
+					qphy->eud_base + EUD_SW_ATTACH_DET);
+			/* to flush above write before next write */
+			wmb();
+
+			writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR,
+					qphy->eud_base + EUD_INT1_EN_MASK);
+			/* to flush above write before turning off clk */
+			wmb();
+			if (qphy->cfg_ahb_clk)
+				clk_disable_unprepare(qphy->cfg_ahb_clk);
+		}
 	}
 
 	return ret;
@@ -678,6 +706,16 @@
 				__func__, qphy->dpdm_enable);
 
 	if (qphy->dpdm_enable) {
+		if (qphy->eud_base) {
+			if (qphy->cfg_ahb_clk)
+				clk_prepare_enable(qphy->cfg_ahb_clk);
+			writel_relaxed(0, qphy->eud_base + EUD_SW_ATTACH_DET);
+			/* to flush above write before turning off clk */
+			wmb();
+			if (qphy->cfg_ahb_clk)
+				clk_disable_unprepare(qphy->cfg_ahb_clk);
+		}
+
 		ret = qusb_phy_enable_power(qphy, false);
 		if (ret < 0) {
 			dev_dbg(qphy->phy.dev,
@@ -784,6 +822,17 @@
 		}
 	}
 
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"eud_base");
+	if (res) {
+		qphy->eud_base = devm_ioremap(dev, res->start,
+					resource_size(res));
+		if (IS_ERR(qphy->eud_base)) {
+			dev_dbg(dev, "couldn't ioremap eud_base\n");
+			qphy->eud_base = NULL;
+		}
+	}
+
 	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
 	if (IS_ERR(qphy->ref_clk_src)) {
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 59f5379..7e7c76c 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -623,10 +623,7 @@
 	}
 
 	if (suspend) {
-		if (!phy->cable_connected)
-			writel_relaxed(0x00,
-			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
-		else
+		if (phy->cable_connected)
 			msm_ssusb_qmp_enable_autonomous(phy, 1);
 
 		/* Make sure above write completed with PHY */
@@ -674,6 +671,10 @@
 	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
 					phy);
 
+	writel_relaxed(0x00,
+		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+	readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
 	dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
 	dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
 	phy->cable_connected = false;
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d544b33..02b67ab 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -20,9 +20,13 @@
 /* Low Power Status register (LPSTS) */
 #define LPSTS_SUSPM	0x4000
 
-/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */
+/*
+ * USB General control register 2 (UGCTRL2)
+ * Remarks: bit[31:11] and bit[9:6] should be 0
+ */
 #define UGCTRL2_RESERVED_3	0x00000001	/* bit[3:0] should be B'0001 */
 #define UGCTRL2_USB0SEL_OTG	0x00000030
+#define UGCTRL2_VBUSSEL		0x00000400
 
 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
 {
@@ -34,7 +38,8 @@
 {
 	struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
 
-	usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
+	usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
+		      UGCTRL2_VBUSSEL);
 
 	if (enable) {
 		usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 84b444f..470b17b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -136,6 +136,7 @@
 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
 	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
 	{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+	{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ebe51f11..2a99443 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2023,8 +2023,11 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) },			/* D-Link DWM-157 C1 */
 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
+	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1db4b61..a51b283 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485) },
 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
 	{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
 	{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be8..3b5a15d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID		0x0557
 #define ATEN_VENDOR_ID2		0x0547
 #define ATEN_PRODUCT_ID		0x2008
+#define ATEN_PRODUCT_UC485	0x2021
 #define ATEN_PRODUCT_ID2	0x2118
 
 #define IODATA_VENDOR_ID	0x04bb
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cbea9f3..cde1153 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -124,9 +124,9 @@
 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
 		"Initio Corporation",
-		"",
+		"INIC-3069",
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-		US_FL_NO_ATA_1X),
+		US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
 
 /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
 UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5dc128a..96a0661 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -537,8 +537,13 @@
 
 		preempt_enable();
 
-		if (vhost_enable_notify(&net->dev, vq))
+		if (!vhost_vq_avail_empty(&net->dev, vq))
 			vhost_poll_queue(&vq->poll);
+		else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
+			vhost_disable_notify(&net->dev, vq);
+			vhost_poll_queue(&vq->poll);
+		}
+
 		mutex_unlock(&vq->mutex);
 
 		len = peek_head_len(sk);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 76c1ad9..f8a3839 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1097,6 +1097,13 @@
 	void __user *argp = (void __user *)arg;
 	long ret = 0;
 
+	memset(&var, 0, sizeof(var));
+	memset(&fix, 0, sizeof(fix));
+	memset(&con2fb, 0, sizeof(con2fb));
+	memset(&cmap_from, 0, sizeof(cmap_from));
+	memset(&cmap, 0, sizeof(cmap));
+	memset(&event, 0, sizeof(event));
+
 	switch (cmd) {
 	case FBIOGET_VSCREENINFO:
 		if (!lock_fb_info(info))
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 4da69db..1bdd02a 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -10,8 +10,7 @@
 	unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
 	unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
 
-	return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
-		((bfn1 == bfn2) || ((bfn1+1) == bfn2));
+	return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
 #else
 	/*
 	 * XXX: Add support for merging bio_vec when using different page
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 14a37ff..705bb5f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4759,10 +4759,6 @@
 		else
 			flush = BTRFS_RESERVE_NO_FLUSH;
 		spin_lock(&space_info->lock);
-		if (can_overcommit(root, space_info, orig, flush)) {
-			spin_unlock(&space_info->lock);
-			break;
-		}
 		if (list_empty(&space_info->tickets) &&
 		    list_empty(&space_info->priority_tickets)) {
 			spin_unlock(&space_info->lock);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 74ed5aa..f6e1119 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1834,6 +1834,8 @@
 			goto restore;
 		}
 
+		btrfs_qgroup_rescan_resume(fs_info);
+
 		if (!fs_info->uuid_root) {
 			btrfs_info(fs_info, "creating UUID tree");
 			ret = btrfs_create_uuid_tree(fs_info);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 900ffaf..7b79a54 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -188,7 +188,7 @@
 /*
  * read a single page, without unlocking it.
  */
-static int readpage_nounlock(struct file *filp, struct page *page)
+static int ceph_do_readpage(struct file *filp, struct page *page)
 {
 	struct inode *inode = file_inode(filp);
 	struct ceph_inode_info *ci = ceph_inode(inode);
@@ -218,7 +218,7 @@
 
 	err = ceph_readpage_from_fscache(inode, page);
 	if (err == 0)
-		goto out;
+		return -EINPROGRESS;
 
 	dout("readpage inode %p file %p page %p index %lu\n",
 	     inode, filp, page, page->index);
@@ -248,8 +248,11 @@
 
 static int ceph_readpage(struct file *filp, struct page *page)
 {
-	int r = readpage_nounlock(filp, page);
-	unlock_page(page);
+	int r = ceph_do_readpage(filp, page);
+	if (r != -EINPROGRESS)
+		unlock_page(page);
+	else
+		r = 0;
 	return r;
 }
 
@@ -1235,7 +1238,7 @@
 			goto retry_locked;
 		r = writepage_nounlock(page, NULL);
 		if (r < 0)
-			goto fail_nosnap;
+			goto fail_unlock;
 		goto retry_locked;
 	}
 
@@ -1263,11 +1266,14 @@
 	}
 
 	/* we need to read it. */
-	r = readpage_nounlock(file, page);
-	if (r < 0)
-		goto fail_nosnap;
+	r = ceph_do_readpage(file, page);
+	if (r < 0) {
+		if (r == -EINPROGRESS)
+			return -EAGAIN;
+		goto fail_unlock;
+	}
 	goto retry_locked;
-fail_nosnap:
+fail_unlock:
 	unlock_page(page);
 	return r;
 }
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 5bc5d37..a2d7997 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -240,13 +240,7 @@
 	}
 }
 
-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
-{
-	if (!error)
-		SetPageUptodate(page);
-}
-
-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
+static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
 {
 	if (!error)
 		SetPageUptodate(page);
@@ -274,7 +268,7 @@
 		return -ENOBUFS;
 
 	ret = fscache_read_or_alloc_page(ci->fscache, page,
-					 ceph_vfs_readpage_complete, NULL,
+					 ceph_readpage_from_fscache_complete, NULL,
 					 GFP_KERNEL);
 
 	switch (ret) {
@@ -303,7 +297,7 @@
 		return -ENOBUFS;
 
 	ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
-					  ceph_vfs_readpage_complete_unlock,
+					  ceph_readpage_from_fscache_complete,
 					  NULL, mapping_gfp_mask(mapping));
 
 	switch (ret) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 789ff1d..dd3e236 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -183,15 +183,20 @@
 }
 
 /*
+ * Don't allow path components longer than the server max.
  * Don't allow the separator character in a path component.
  * The VFS will not allow "/", but "\" is allowed by posix.
  */
 static int
-check_name(struct dentry *direntry)
+check_name(struct dentry *direntry, struct cifs_tcon *tcon)
 {
 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
 	int i;
 
+	if (unlikely(direntry->d_name.len >
+		     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+		return -ENAMETOOLONG;
+
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
 		for (i = 0; i < direntry->d_name.len; i++) {
 			if (direntry->d_name.name[i] == '\\') {
@@ -489,10 +494,6 @@
 		return finish_no_open(file, res);
 	}
 
-	rc = check_name(direntry);
-	if (rc)
-		return rc;
-
 	xid = get_xid();
 
 	cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@@ -505,6 +506,11 @@
 	}
 
 	tcon = tlink_tcon(tlink);
+
+	rc = check_name(direntry, tcon);
+	if (rc)
+		goto out_free_xid;
+
 	server = tcon->ses->server;
 
 	if (server->ops->new_lease_key)
@@ -765,7 +771,7 @@
 	}
 	pTcon = tlink_tcon(tlink);
 
-	rc = check_name(direntry);
+	rc = check_name(direntry, pTcon);
 	if (rc)
 		goto lookup_out;
 
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 7c1c6c3..0437e5f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2930,8 +2930,8 @@
 	kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
 			  le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
 	kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
-	kst->f_bfree  = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
-	kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+	kst->f_bfree  = kst->f_bavail =
+			le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
 	return;
 }
 
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index dc0d141..1e1449a 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,8 @@
 
 #define NUMBER_OF_SMB2_COMMANDS	0x0013
 
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
 
 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 58c2f4a..9ac6591 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -355,6 +355,10 @@
 	error = misc_register(&ls->ls_device);
 	if (error) {
 		kfree(ls->ls_device.name);
+		/* this has to be set to NULL
+		 * to avoid a double-free in dlm_device_deregister
+		 */
+		ls->ls_device.name = NULL;
 	}
 fail:
 	return error;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index dc4a34f..5b96ba7 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -524,8 +524,13 @@
 	wait_queue_head_t *whead;
 
 	rcu_read_lock();
-	/* If it is cleared by POLLFREE, it should be rcu-safe */
-	whead = rcu_dereference(pwq->whead);
+	/*
+	 * If it is cleared by POLLFREE, it should be rcu-safe.
+	 * If we read NULL we need a barrier paired with
+	 * smp_store_release() in ep_poll_callback(), otherwise
+	 * we rely on whead->lock.
+	 */
+	whead = smp_load_acquire(&pwq->whead);
 	if (whead)
 		remove_wait_queue(whead, &pwq->wait);
 	rcu_read_unlock();
@@ -1010,17 +1015,6 @@
 	struct eventpoll *ep = epi->ep;
 	int ewake = 0;
 
-	if ((unsigned long)key & POLLFREE) {
-		ep_pwq_from_wait(wait)->whead = NULL;
-		/*
-		 * whead = NULL above can race with ep_remove_wait_queue()
-		 * which can do another remove_wait_queue() after us, so we
-		 * can't use __remove_wait_queue(). whead->lock is held by
-		 * the caller.
-		 */
-		list_del_init(&wait->task_list);
-	}
-
 	spin_lock_irqsave(&ep->lock, flags);
 
 	/*
@@ -1102,10 +1096,26 @@
 	if (pwake)
 		ep_poll_safewake(&ep->poll_wait);
 
-	if (epi->event.events & EPOLLEXCLUSIVE)
-		return ewake;
+	if (!(epi->event.events & EPOLLEXCLUSIVE))
+		ewake = 1;
 
-	return 1;
+	if ((unsigned long)key & POLLFREE) {
+		/*
+		 * If we race with ep_remove_wait_queue() it can miss
+		 * ->whead = NULL and do another remove_wait_queue() after
+		 * us, so we can't use __remove_wait_queue().
+		 */
+		list_del_init(&wait->task_list);
+		/*
+		 * ->whead != NULL protects us from the race with ep_free()
+		 * or ep_remove(), ep_remove_wait_queue() takes whead->lock
+		 * held by the caller. Once we nullify it, nothing protects
+		 * ep/epi or even wait.
+		 */
+		smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
+	}
+
+	return ewake;
 }
 
 /*
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 9e77c08..d17d12e 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -469,6 +469,8 @@
 				lastoff = page_offset(page);
 				bh = head = page_buffers(page);
 				do {
+					if (lastoff + bh->b_size <= startoff)
+						goto next;
 					if (buffer_uptodate(bh) ||
 					    buffer_unwritten(bh)) {
 						if (whence == SEEK_DATA)
@@ -483,6 +485,7 @@
 						unlock_page(page);
 						goto out;
 					}
+next:
 					lastoff += bh->b_size;
 					bh = bh->b_this_page;
 				} while (bh != head);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index cf68100..95bf466 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1926,7 +1926,8 @@
 			n_desc_blocks = o_desc_blocks +
 				le16_to_cpu(es->s_reserved_gdt_blocks);
 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
-			n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
+			n_blocks_count = (ext4_fsblk_t)n_group *
+				EXT4_BLOCKS_PER_GROUP(sb);
 			n_group--; /* set to last group number */
 		}
 
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 2fc84a9..98c1a63 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -316,7 +316,7 @@
 		return 0;
 
 	/* Get the previous summary */
-	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
+	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
 		struct curseg_info *curseg = CURSEG_I(sbi, i);
 		if (curseg->segno == segno) {
 			sum = curseg->sum_blk->entries[blkoff];
@@ -626,8 +626,6 @@
 	}
 
 	clear_sbi_flag(sbi, SBI_POR_DOING);
-	if (err)
-		set_ckpt_flags(sbi, CP_ERROR_FLAG);
 	mutex_unlock(&sbi->cp_mutex);
 
 	/* let's drop all the directory inodes for clean checkpoint */
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 75c95659..21d829b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -55,7 +55,7 @@
 {
 	struct fuse_file *ff;
 
-	ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+	ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
 	if (unlikely(!ff))
 		return NULL;
 
diff --git a/fs/inode.c b/fs/inode.c
index 0aaebd1..3844c31 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -637,6 +637,7 @@
 
 	dispose_list(&dispose);
 }
+EXPORT_SYMBOL_GPL(evict_inodes);
 
 /**
  * invalidate_inodes	- attempt to free all inodes on a superblock
diff --git a/fs/internal.h b/fs/internal.h
index 15fe2aa..3e58863 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -138,7 +138,6 @@
 extern void inode_io_list_del(struct inode *inode);
 
 extern long get_nr_dirty_inodes(void);
-extern void evict_inodes(struct super_block *);
 extern int invalidate_inodes(struct super_block *, bool);
 
 /*
diff --git a/fs/iomap.c b/fs/iomap.c
index 798c291..a49db88 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -281,7 +281,7 @@
 		unsigned long bytes;	/* Bytes to write to page */
 
 		offset = (pos & (PAGE_SIZE - 1));
-		bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
+		bytes = min_t(loff_t, PAGE_SIZE - offset, length);
 
 		rpage = __iomap_read_page(inode, pos);
 		if (IS_ERR(rpage))
@@ -376,7 +376,7 @@
 		unsigned offset, bytes;
 
 		offset = pos & (PAGE_SIZE - 1); /* Within page */
-		bytes = min_t(unsigned, PAGE_SIZE - offset, count);
+		bytes = min_t(loff_t, PAGE_SIZE - offset, count);
 
 		if (IS_DAX(inode))
 			status = iomap_dax_zero(pos, offset, bytes, iomap);
diff --git a/fs/namespace.c b/fs/namespace.c
index 7731f77..2160bb9 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -227,6 +227,7 @@
 		mnt->mnt_count = 1;
 		mnt->mnt_writers = 0;
 #endif
+		mnt->mnt.data = NULL;
 
 		INIT_HLIST_NODE(&mnt->mnt_hash);
 		INIT_LIST_HEAD(&mnt->mnt_child);
@@ -976,7 +977,6 @@
 	if (!mnt)
 		return ERR_PTR(-ENOMEM);
 
-	mnt->mnt.data = NULL;
 	if (type->alloc_mnt_data) {
 		mnt->mnt.data = type->alloc_mnt_data();
 		if (!mnt->mnt.data) {
@@ -990,7 +990,6 @@
 
 	root = mount_fs(type, flags, name, &mnt->mnt, data);
 	if (IS_ERR(root)) {
-		kfree(mnt->mnt.data);
 		mnt_free_id(mnt);
 		free_vfsmnt(mnt);
 		return ERR_CAST(root);
@@ -1109,7 +1108,6 @@
 	return mnt;
 
  out_free:
-	kfree(mnt->mnt.data);
 	mnt_free_id(mnt);
 	free_vfsmnt(mnt);
 	return ERR_PTR(err);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index f31fd0d..b1daeaf 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -121,6 +121,7 @@
 config PNFS_BLOCK
 	tristate
 	depends on NFS_V4_1 && BLK_DEV_DM
+	depends on 64BIT || LBDAF
 	default NFS_V4
 
 config PNFS_OBJLAYOUT
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 84c1cb9..1eec947 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -636,11 +636,11 @@
 	if (result <= 0)
 		goto out;
 
-	result = generic_write_sync(iocb, result);
-	if (result < 0)
-		goto out;
 	written = result;
 	iocb->ki_pos += written;
+	result = generic_write_sync(iocb, written);
+	if (result < 0)
+		goto out;
 
 	/* Return error values */
 	if (nfs_need_check_write(file, inode)) {
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index f7a3f6b..9009989 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -30,6 +30,7 @@
 {
 	nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
 	nfs4_pnfs_ds_put(mirror_ds->ds);
+	kfree(mirror_ds->ds_versions);
 	kfree_rcu(mirror_ds, id_node.rcu);
 }
 
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 80bcc0b..52ea41b 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -248,7 +248,6 @@
 extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
 struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
 void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
 int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
 int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
 		      struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 46ca788..a53b8e0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7410,7 +7410,7 @@
 			cdata->res.server_scope = NULL;
 		}
 		/* Save the EXCHANGE_ID verifier session trunk tests */
-		memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
+		memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
 		       sizeof(clp->cl_confirm.data));
 	}
 out:
@@ -7447,7 +7447,6 @@
 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
 			u32 sp4_how, struct rpc_xprt *xprt)
 {
-	nfs4_verifier verifier;
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
 		.rpc_cred = cred,
@@ -7470,8 +7469,7 @@
 	if (!calldata)
 		goto out;
 
-	if (!xprt)
-		nfs4_init_boot_verifier(clp, &verifier);
+	nfs4_init_boot_verifier(clp, &calldata->args.verifier);
 
 	status = nfs4_init_uniform_client_string(clp);
 	if (status)
@@ -7516,9 +7514,8 @@
 		task_setup_data.rpc_xprt = xprt;
 		task_setup_data.flags =
 				RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
-		calldata->args.verifier = &clp->cl_confirm;
-	} else {
-		calldata->args.verifier = &verifier;
+		memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
+				sizeof(calldata->args.verifier.data));
 	}
 	calldata->args.client = clp;
 #ifdef CONFIG_NFS_V4_1_MIGRATION
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c9c4d985..5e2724a 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1761,7 +1761,7 @@
 	int len = 0;
 
 	encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
-	encode_nfs4_verifier(xdr, args->verifier);
+	encode_nfs4_verifier(xdr, &args->verifier);
 
 	encode_string(xdr, strlen(args->client->cl_owner_id),
 			args->client->cl_owner_id);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 142a74f..3d17fc8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -497,16 +497,6 @@
 }
 EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
 
-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
-	hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
 /**
  * nfs_pgio_data_destroy - make @hdr suitable for reuse
  *
@@ -515,14 +505,24 @@
  *
  * @hdr: A header that has had nfs_generic_pgio called
  */
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
 {
 	if (hdr->args.context)
 		put_nfs_open_context(hdr->args.context);
 	if (hdr->page_array.pagevec != hdr->page_array.page_array)
 		kfree(hdr->page_array.pagevec);
 }
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+	nfs_pgio_data_destroy(hdr);
+	hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
 
 /**
  * nfs_pgio_rpcsetup - Set up arguments for a pageio call
@@ -636,7 +636,6 @@
 static void nfs_pgio_error(struct nfs_pgio_header *hdr)
 {
 	set_bit(NFS_IOHDR_REDO, &hdr->flags);
-	nfs_pgio_data_destroy(hdr);
 	hdr->completion_ops->completion(hdr);
 }
 
@@ -647,7 +646,6 @@
 static void nfs_pgio_release(void *calldata)
 {
 	struct nfs_pgio_header *hdr = calldata;
-	nfs_pgio_data_destroy(hdr);
 	hdr->completion_ops->completion(hdr);
 }
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 415d7e6..b7a07ba 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2145,7 +2145,6 @@
 		nfs_pageio_reset_write_mds(desc);
 		mirror->pg_recoalesce = 1;
 	}
-	nfs_pgio_data_destroy(hdr);
 	hdr->release(hdr);
 }
 
@@ -2257,7 +2256,6 @@
 		nfs_pageio_reset_read_mds(desc);
 		mirror->pg_recoalesce = 1;
 	}
-	nfs_pgio_data_destroy(hdr);
 	hdr->release(hdr);
 }
 
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 4e7a56a..2c4f7a2 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -129,7 +129,7 @@
 	argp->p = page_address(argp->pagelist[0]);
 	argp->pagelist++;
 	if (argp->pagelen < PAGE_SIZE) {
-		argp->end = argp->p + (argp->pagelen>>2);
+		argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
 		argp->pagelen = 0;
 	} else {
 		argp->end = argp->p + (PAGE_SIZE>>2);
@@ -1246,9 +1246,7 @@
 		argp->pagelen -= pages * PAGE_SIZE;
 		len -= pages * PAGE_SIZE;
 
-		argp->p = (__be32 *)page_address(argp->pagelist[0]);
-		argp->pagelist++;
-		argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
+		next_decode_page(argp);
 	}
 	argp->p += XDR_QUADLEN(len);
 
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 1239d1c..fffaad4 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -176,6 +176,9 @@
 	gid_t gid = sbi->options.fs_low_gid;
 	struct iattr newattrs;
 
+	if (!sbi->options.gid_derivation)
+		return;
+
 	info = SDCARDFS_I(d_inode(dentry));
 	info_d = info->data;
 	perm = info_d->perm;
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 6076c34..5ac0b0b 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -104,12 +104,19 @@
 {
 	long err = -ENOTTY;
 	struct file *lower_file;
+	const struct cred *saved_cred = NULL;
+	struct dentry *dentry = file->f_path.dentry;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 
 	lower_file = sdcardfs_lower_file(file);
 
 	/* XXX: use vfs_ioctl if/when VFS exports it */
 	if (!lower_file || !lower_file->f_op)
 		goto out;
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+
 	if (lower_file->f_op->unlocked_ioctl)
 		err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
 
@@ -117,6 +124,7 @@
 	if (!err)
 		sdcardfs_copy_and_fix_attrs(file_inode(file),
 				      file_inode(lower_file));
+	REVERT_CRED(saved_cred);
 out:
 	return err;
 }
@@ -127,15 +135,23 @@
 {
 	long err = -ENOTTY;
 	struct file *lower_file;
+	const struct cred *saved_cred = NULL;
+	struct dentry *dentry = file->f_path.dentry;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
 
 	lower_file = sdcardfs_lower_file(file);
 
 	/* XXX: use vfs_ioctl if/when VFS exports it */
 	if (!lower_file || !lower_file->f_op)
 		goto out;
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+
 	if (lower_file->f_op->compat_ioctl)
 		err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
 
+	REVERT_CRED(saved_cred);
 out:
 	return err;
 }
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 683b492..4a971e2 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -34,10 +34,14 @@
 	if (!cred)
 		return NULL;
 
-	if (data->under_obb)
-		uid = AID_MEDIA_OBB;
-	else
-		uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+	if (sbi->options.gid_derivation) {
+		if (data->under_obb)
+			uid = AID_MEDIA_OBB;
+		else
+			uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+	} else {
+		uid = sbi->options.fs_low_uid;
+	}
 	cred->fsuid = make_kuid(&init_user_ns, uid);
 	cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
 
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 80825b2..0a2b516 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -32,6 +32,7 @@
 	Opt_multiuser,
 	Opt_userid,
 	Opt_reserved_mb,
+	Opt_gid_derivation,
 	Opt_err,
 };
 
@@ -43,6 +44,7 @@
 	{Opt_mask, "mask=%u"},
 	{Opt_userid, "userid=%d"},
 	{Opt_multiuser, "multiuser"},
+	{Opt_gid_derivation, "derive_gid"},
 	{Opt_reserved_mb, "reserved_mb=%u"},
 	{Opt_err, NULL}
 };
@@ -64,6 +66,8 @@
 	vfsopts->gid = 0;
 	/* by default, 0MB is reserved */
 	opts->reserved_mb = 0;
+	/* by default, gid derivation is off */
+	opts->gid_derivation = false;
 
 	*debug = 0;
 
@@ -115,6 +119,9 @@
 				return 0;
 			opts->reserved_mb = option;
 			break;
+		case Opt_gid_derivation:
+			opts->gid_derivation = true;
+			break;
 		/* unknown option */
 		default:
 			if (!silent)
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 4e0ce49..d1d8bab 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -219,6 +219,7 @@
 	gid_t fs_low_gid;
 	userid_t fs_user_id;
 	bool multiuser;
+	bool gid_derivation;
 	unsigned int reserved_mb;
 };
 
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 7f4539b..b89947d 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -302,6 +302,8 @@
 		seq_printf(m, ",mask=%u", vfsopts->mask);
 	if (opts->fs_user_id)
 		seq_printf(m, ",userid=%u", opts->fs_user_id);
+	if (opts->gid_derivation)
+		seq_puts(m, ",derive_gid");
 	if (opts->reserved_mb != 0)
 		seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
 
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 2852521..c6c15e5 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -351,7 +351,7 @@
 
 	err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
 				XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
-	if (!err && tp)
+	if (!err && tp && *bpp)
 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
 	return err;
 }
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2a8cbd1..d2f4ab1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -579,7 +579,7 @@
 
 #else
 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
-#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
+#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)	do { } while (0)
 #endif /* DEBUG */
 
 /*
@@ -5555,6 +5555,8 @@
 	int			whichfork;	/* data or attribute fork */
 	xfs_fsblock_t		sum;
 	xfs_filblks_t		len = *rlen;	/* length to unmap in file */
+	xfs_fileoff_t		max_len;
+	xfs_agnumber_t		prev_agno = NULLAGNUMBER, agno;
 
 	trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
 
@@ -5576,6 +5578,16 @@
 	ASSERT(len > 0);
 	ASSERT(nexts >= 0);
 
+	/*
+	 * Guesstimate how many blocks we can unmap without running the risk of
+	 * blowing out the transaction with a mix of EFIs and reflink
+	 * adjustments.
+	 */
+	if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
+		max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
+	else
+		max_len = len;
+
 	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
 	    (error = xfs_iread_extents(tp, ip, whichfork)))
 		return error;
@@ -5621,7 +5633,7 @@
 
 	extno = 0;
 	while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
-	       (nexts == 0 || extno < nexts)) {
+	       (nexts == 0 || extno < nexts) && max_len > 0) {
 		/*
 		 * Is the found extent after a hole in which bno lives?
 		 * Just back up to the previous extent, if so.
@@ -5647,6 +5659,17 @@
 		ASSERT(ep != NULL);
 		del = got;
 		wasdel = isnullstartblock(del.br_startblock);
+
+		/*
+		 * Make sure we don't touch multiple AGF headers out of order
+		 * in a single transaction, as that could cause AB-BA deadlocks.
+		 */
+		if (!wasdel) {
+			agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
+			if (prev_agno != NULLAGNUMBER && prev_agno > agno)
+				break;
+			prev_agno = agno;
+		}
 		if (got.br_startoff < start) {
 			del.br_startoff = start;
 			del.br_blockcount -= start - got.br_startoff;
@@ -5655,6 +5678,15 @@
 		}
 		if (del.br_startoff + del.br_blockcount > bno + 1)
 			del.br_blockcount = bno + 1 - del.br_startoff;
+
+		/* How much can we safely unmap? */
+		if (max_len < del.br_blockcount) {
+			del.br_startoff += del.br_blockcount - max_len;
+			if (!wasdel)
+				del.br_startblock += del.br_blockcount - max_len;
+			del.br_blockcount = max_len;
+		}
+
 		sum = del.br_startblock + del.br_blockcount;
 		if (isrt &&
 		    (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
@@ -5835,6 +5867,7 @@
 		if (!isrt && wasdel)
 			xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
 
+		max_len -= del.br_blockcount;
 		bno = del.br_startoff - 1;
 nodelete:
 		/*
@@ -6604,25 +6637,33 @@
 	int				whichfork,
 	xfs_fileoff_t			startoff,
 	xfs_fsblock_t			startblock,
-	xfs_filblks_t			blockcount,
+	xfs_filblks_t			*blockcount,
 	xfs_exntst_t			state)
 {
 	struct xfs_bmbt_irec		bmap;
 	int				nimaps = 1;
 	xfs_fsblock_t			firstfsb;
 	int				flags = XFS_BMAPI_REMAP;
-	int				done;
 	int				error = 0;
 
 	bmap.br_startblock = startblock;
 	bmap.br_startoff = startoff;
-	bmap.br_blockcount = blockcount;
+	bmap.br_blockcount = *blockcount;
 	bmap.br_state = state;
 
+	/*
+	 * firstfsb is tied to the transaction lifetime and is used to
+	 * ensure correct AG locking order and schedule work item
+	 * continuations.  XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
+	 * to only making one bmap call per transaction, so it should
+	 * be safe to have it as a local variable here.
+	 */
+	firstfsb = NULLFSBLOCK;
+
 	trace_xfs_bmap_deferred(tp->t_mountp,
 			XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
 			XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
-			ip->i_ino, whichfork, startoff, blockcount, state);
+			ip->i_ino, whichfork, startoff, *blockcount, state);
 
 	if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK)
 		return -EFSCORRUPTED;
@@ -6641,12 +6682,11 @@
 					bmap.br_blockcount, flags, &firstfsb,
 					bmap.br_blockcount, &bmap, &nimaps,
 					dfops);
+		*blockcount = 0;
 		break;
 	case XFS_BMAP_UNMAP:
-		error = xfs_bunmapi(tp, ip, bmap.br_startoff,
-				bmap.br_blockcount, flags, 1, &firstfsb,
-				dfops, &done);
-		ASSERT(done);
+		error = __xfs_bunmapi(tp, ip, startoff, blockcount,
+				XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
 		break;
 	default:
 		ASSERT(0);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index e7d40b3..db53ac7f 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -265,7 +265,7 @@
 int	xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_defer_ops *dfops,
 		struct xfs_inode *ip, enum xfs_bmap_intent_type type,
 		int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
-		xfs_filblks_t blockcount, xfs_exntst_t state);
+		xfs_filblks_t *blockcount, xfs_exntst_t state);
 int	xfs_bmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
 		struct xfs_inode *ip, struct xfs_bmbt_irec *imap);
 int	xfs_bmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 5c39186..9968a74 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -888,6 +888,7 @@
 	cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
 	if (!cur)
 		return -ENOMEM;
+	cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
 
 	error = xfs_btree_change_owner(cur, new_owner, buffer_list);
 	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 91c6891..4ad1e21 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -714,7 +714,8 @@
 	 * Get the block pointer for this level.
 	 */
 	block = xfs_btree_get_block(cur, level, &bp);
-	xfs_btree_check_block(cur, block, level, bp);
+	if (xfs_btree_check_block(cur, block, level, bp))
+		return 0;
 	/*
 	 * It's empty, there is no such record.
 	 */
@@ -743,7 +744,8 @@
 	 * Get the block pointer for this level.
 	 */
 	block = xfs_btree_get_block(cur, level, &bp);
-	xfs_btree_check_block(cur, block, level, bp);
+	if (xfs_btree_check_block(cur, block, level, bp))
+		return 0;
 	/*
 	 * It's empty, there is no such record.
 	 */
@@ -1772,6 +1774,7 @@
 
 	/* Check the inode owner since the verifiers don't. */
 	if (xfs_sb_version_hascrc(&cur->bc_mp->m_sb) &&
+	    !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_INVALID_OWNER) &&
 	    (cur->bc_flags & XFS_BTREE_LONG_PTRS) &&
 	    be64_to_cpu((*blkp)->bb_u.l.bb_owner) !=
 			cur->bc_private.b.ip->i_ino)
@@ -4432,10 +4435,15 @@
 
 	/* modify the owner */
 	block = xfs_btree_get_block(cur, level, &bp);
-	if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+	if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
+		if (block->bb_u.l.bb_owner == cpu_to_be64(bbcoi->new_owner))
+			return 0;
 		block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
-	else
+	} else {
+		if (block->bb_u.s.bb_owner == cpu_to_be32(bbcoi->new_owner))
+			return 0;
 		block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
+	}
 
 	/*
 	 * If the block is a root block hosted in an inode, we might not have a
@@ -4444,16 +4452,19 @@
 	 * block is formatted into the on-disk inode fork. We still change it,
 	 * though, so everything is consistent in memory.
 	 */
-	if (bp) {
-		if (cur->bc_tp) {
-			xfs_trans_ordered_buf(cur->bc_tp, bp);
-			xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
-		} else {
-			xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
-		}
-	} else {
+	if (!bp) {
 		ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
 		ASSERT(level == cur->bc_nlevels - 1);
+		return 0;
+	}
+
+	if (cur->bc_tp) {
+		if (!xfs_trans_ordered_buf(cur->bc_tp, bp)) {
+			xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
+			return -EAGAIN;
+		}
+	} else {
+		xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
 	}
 
 	return 0;
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 3b0fc1a..33c7be2 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -268,7 +268,8 @@
 			short		forksize;	/* fork's inode space */
 			char		whichfork;	/* data or attr fork */
 			char		flags;		/* flags */
-#define	XFS_BTCUR_BPRV_WASDEL	1			/* was delayed */
+#define	XFS_BTCUR_BPRV_WASDEL		(1<<0)		/* was delayed */
+#define	XFS_BTCUR_BPRV_INVALID_OWNER	(1<<1)		/* for ext swap */
 		} b;
 	}		bc_private;	/* per-btree type data */
 } xfs_btree_cur_t;
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 1bdf288..b305dbf 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -263,7 +263,7 @@
 
 	err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
 					which_fork, &xfs_da3_node_buf_ops);
-	if (!err && tp) {
+	if (!err && tp && *bpp) {
 		struct xfs_da_blkinfo	*info = (*bpp)->b_addr;
 		int			type;
 
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index aa17cb7..43c902f 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -139,7 +139,7 @@
 
 	err = xfs_da_read_buf(tp, dp, mp->m_dir_geo->datablk, -1, bpp,
 				XFS_DATA_FORK, &xfs_dir3_block_buf_ops);
-	if (!err && tp)
+	if (!err && tp && *bpp)
 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_BLOCK_BUF);
 	return err;
 }
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index b887fb2..f2e342e 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -268,7 +268,7 @@
 
 	err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
 				XFS_DATA_FORK, &xfs_dir3_leaf1_buf_ops);
-	if (!err && tp)
+	if (!err && tp && *bpp)
 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAF1_BUF);
 	return err;
 }
@@ -285,7 +285,7 @@
 
 	err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
 				XFS_DATA_FORK, &xfs_dir3_leafn_buf_ops);
-	if (!err && tp)
+	if (!err && tp && *bpp)
 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_LEAFN_BUF);
 	return err;
 }
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index a2818f6..42fef07 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -368,8 +368,6 @@
 				 * transaction and pin the log appropriately.
 				 */
 				xfs_trans_ordered_buf(tp, fbuf);
-				xfs_trans_log_buf(tp, fbuf, 0,
-						  BBTOB(fbuf->b_length) - 1);
 			}
 		} else {
 			fbuf->b_flags |= XBF_DONE;
@@ -1123,6 +1121,7 @@
 	int			error;
 	int			offset;
 	int			i, j;
+	int			searchdistance = 10;
 
 	pag = xfs_perag_get(mp, agno);
 
@@ -1149,7 +1148,6 @@
 	if (pagno == agno) {
 		int		doneleft;	/* done, to the left */
 		int		doneright;	/* done, to the right */
-		int		searchdistance = 10;
 
 		error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
 		if (error)
@@ -1210,21 +1208,9 @@
 		/*
 		 * Loop until we find an inode chunk with a free inode.
 		 */
-		while (!doneleft || !doneright) {
+		while (--searchdistance > 0 && (!doneleft || !doneright)) {
 			int	useleft;  /* using left inode chunk this time */
 
-			if (!--searchdistance) {
-				/*
-				 * Not in range - save last search
-				 * location and allocate a new inode
-				 */
-				xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
-				pag->pagl_leftrec = trec.ir_startino;
-				pag->pagl_rightrec = rec.ir_startino;
-				pag->pagl_pagino = pagino;
-				goto newino;
-			}
-
 			/* figure out the closer block if both are valid. */
 			if (!doneleft && !doneright) {
 				useleft = pagino -
@@ -1236,13 +1222,13 @@
 
 			/* free inodes to the left? */
 			if (useleft && trec.ir_freecount) {
-				rec = trec;
 				xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 				cur = tcur;
 
 				pag->pagl_leftrec = trec.ir_startino;
 				pag->pagl_rightrec = rec.ir_startino;
 				pag->pagl_pagino = pagino;
+				rec = trec;
 				goto alloc_inode;
 			}
 
@@ -1268,26 +1254,37 @@
 				goto error1;
 		}
 
-		/*
-		 * We've reached the end of the btree. because
-		 * we are only searching a small chunk of the
-		 * btree each search, there is obviously free
-		 * inodes closer to the parent inode than we
-		 * are now. restart the search again.
-		 */
-		pag->pagl_pagino = NULLAGINO;
-		pag->pagl_leftrec = NULLAGINO;
-		pag->pagl_rightrec = NULLAGINO;
-		xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
-		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
-		goto restart_pagno;
+		if (searchdistance <= 0) {
+			/*
+			 * Not in range - save last search
+			 * location and allocate a new inode
+			 */
+			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+			pag->pagl_leftrec = trec.ir_startino;
+			pag->pagl_rightrec = rec.ir_startino;
+			pag->pagl_pagino = pagino;
+
+		} else {
+			/*
+			 * We've reached the end of the btree. because
+			 * we are only searching a small chunk of the
+			 * btree each search, there is obviously free
+			 * inodes closer to the parent inode than we
+			 * are now. restart the search again.
+			 */
+			pag->pagl_pagino = NULLAGINO;
+			pag->pagl_leftrec = NULLAGINO;
+			pag->pagl_rightrec = NULLAGINO;
+			xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
+			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+			goto restart_pagno;
+		}
 	}
 
 	/*
 	 * In a different AG from the parent.
 	 * See if the most recently allocated block has any free.
 	 */
-newino:
 	if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
 		error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
 					 XFS_LOOKUP_EQ, &i);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 8a37efe..4e30448 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -1539,14 +1539,11 @@
 	xfs_ifork_t	*ifp,		/* inode fork pointer */
 	int		new_size)	/* new indirection array size */
 {
-	int		nlists;		/* number of irec's (ex lists) */
-	int		size;		/* current indirection array size */
-
 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
-	size = nlists * sizeof(xfs_ext_irec_t);
 	ASSERT(ifp->if_real_bytes);
-	ASSERT((new_size >= 0) && (new_size != size));
+	ASSERT((new_size >= 0) &&
+	       (new_size != ((ifp->if_real_bytes / XFS_IEXT_BUFSZ) *
+			     sizeof(xfs_ext_irec_t))));
 	if (new_size == 0) {
 		xfs_iext_destroy(ifp);
 	} else {
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 82a38d8..d71cb63 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -784,14 +784,6 @@
 }
 
 /*
- * While we're adjusting the refcounts records of an extent, we have
- * to keep an eye on the number of extents we're dirtying -- run too
- * many in a single transaction and we'll exceed the transaction's
- * reservation and crash the fs.  Each record adds 12 bytes to the
- * log (plus any key updates) so we'll conservatively assume 24 bytes
- * per record.  We must also leave space for btree splits on both ends
- * of the range and space for the CUD and a new CUI.
- *
  * XXX: This is a pretty hand-wavy estimate.  The penalty for guessing
  * true incorrectly is a shutdown FS; the penalty for guessing false
  * incorrectly is more transaction rolls than might be necessary.
@@ -822,7 +814,7 @@
 	else if (overhead > cur->bc_tp->t_log_res)
 		return false;
 	return  cur->bc_tp->t_log_res - overhead >
-		cur->bc_private.a.priv.refc.nr_ops * 32;
+		cur->bc_private.a.priv.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
 }
 
 /*
@@ -1648,6 +1640,10 @@
 	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
 	if (error)
 		goto out_trans;
+	if (!agbp) {
+		error = -ENOMEM;
+		goto out_trans;
+	}
 	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
 
 	/* Find all the leftover CoW staging extents. */
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index 098dc668..eafb9d1 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -67,4 +67,20 @@
 extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
 		xfs_agnumber_t agno);
 
+/*
+ * While we're adjusting the refcounts records of an extent, we have
+ * to keep an eye on the number of extents we're dirtying -- run too
+ * many in a single transaction and we'll exceed the transaction's
+ * reservation and crash the fs.  Each record adds 12 bytes to the
+ * log (plus any key updates) so we'll conservatively assume 32 bytes
+ * per record.  We must also leave space for btree splits on both ends
+ * of the range and space for the CUD and a new CUI.
+ */
+#define XFS_REFCOUNT_ITEM_OVERHEAD	32
+
+static inline xfs_fileoff_t xfs_refcount_max_unmap(int log_res)
+{
+	return (log_res * 3 / 4) / XFS_REFCOUNT_ITEM_OVERHEAD;
+}
+
 #endif	/* __XFS_REFCOUNT_H__ */
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 5789814..d23889e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -90,11 +90,11 @@
  * associated buffer_heads, paying attention to the start and end offsets that
  * we need to process on the page.
  *
- * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
- * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
- * the page at all, as we may be racing with memory reclaim and it can free both
- * the bufferhead chain and the page as it will see the page as clean and
- * unused.
+ * Note that we open code the action in end_buffer_async_write here so that we
+ * only have to iterate over the buffers attached to the page once.  This is not
+ * only more efficient, but also ensures that we only calls end_page_writeback
+ * at the end of the iteration, and thus avoids the pitfall of having the page
+ * and buffers potentially freed after every call to end_buffer_async_write.
  */
 static void
 xfs_finish_page_writeback(
@@ -102,29 +102,45 @@
 	struct bio_vec		*bvec,
 	int			error)
 {
-	unsigned int		end = bvec->bv_offset + bvec->bv_len - 1;
-	struct buffer_head	*head, *bh, *next;
+	struct buffer_head	*head = page_buffers(bvec->bv_page), *bh = head;
+	bool			busy = false;
 	unsigned int		off = 0;
-	unsigned int		bsize;
+	unsigned long		flags;
 
 	ASSERT(bvec->bv_offset < PAGE_SIZE);
 	ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
-	ASSERT(end < PAGE_SIZE);
+	ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
 	ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
 
-	bh = head = page_buffers(bvec->bv_page);
-
-	bsize = bh->b_size;
+	local_irq_save(flags);
+	bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
 	do {
-		if (off > end)
-			break;
-		next = bh->b_this_page;
-		if (off < bvec->bv_offset)
-			goto next_bh;
-		bh->b_end_io(bh, !error);
-next_bh:
-		off += bsize;
-	} while ((bh = next) != head);
+		if (off >= bvec->bv_offset &&
+		    off < bvec->bv_offset + bvec->bv_len) {
+			ASSERT(buffer_async_write(bh));
+			ASSERT(bh->b_end_io == NULL);
+
+			if (error) {
+				mapping_set_error(bvec->bv_page->mapping, -EIO);
+				set_buffer_write_io_error(bh);
+				clear_buffer_uptodate(bh);
+				SetPageError(bvec->bv_page);
+			} else {
+				set_buffer_uptodate(bh);
+			}
+			clear_buffer_async_write(bh);
+			unlock_buffer(bh);
+		} else if (buffer_async_write(bh)) {
+			ASSERT(buffer_locked(bh));
+			busy = true;
+		}
+		off += bh->b_size;
+	} while ((bh = bh->b_this_page) != head);
+	bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+	local_irq_restore(flags);
+
+	if (!busy)
+		end_page_writeback(bvec->bv_page);
 }
 
 /*
@@ -138,8 +154,10 @@
 	int			error)
 {
 	struct inode		*inode = ioend->io_inode;
-	struct bio		*last = ioend->io_bio;
-	struct bio		*bio, *next;
+	struct bio		*bio = &ioend->io_inline_bio;
+	struct bio		*last = ioend->io_bio, *next;
+	u64			start = bio->bi_iter.bi_sector;
+	bool			quiet = bio_flagged(bio, BIO_QUIET);
 
 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
 		struct bio_vec	*bvec;
@@ -160,6 +178,11 @@
 
 		bio_put(bio);
 	}
+
+	if (unlikely(error && !quiet)) {
+		xfs_err_ratelimited(XFS_I(inode)->i_mount,
+			"writeback error on sector %llu", start);
+	}
 }
 
 /*
@@ -427,7 +450,8 @@
 	ASSERT(!buffer_delay(bh));
 	ASSERT(!buffer_unwritten(bh));
 
-	mark_buffer_async_write(bh);
+	bh->b_end_io = NULL;
+	set_buffer_async_write(bh);
 	set_buffer_uptodate(bh);
 	clear_buffer_dirty(bh);
 }
@@ -1566,9 +1590,12 @@
 	 * The swap code (ab-)uses ->bmap to get a block mapping and then
 	 * bypasseѕ the file system for actual I/O.  We really can't allow
 	 * that on reflinks inodes, so we have to skip out here.  And yes,
-	 * 0 is the magic code for a bmap error..
+	 * 0 is the magic code for a bmap error.
+	 *
+	 * Since we don't pass back blockdev info, we can't return bmap
+	 * information for rt files either.
 	 */
-	if (xfs_is_reflink_inode(ip)) {
+	if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) {
 		xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 		return 0;
 	}
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index c4b90e7..5a54dcd 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -395,6 +395,7 @@
 	struct xfs_map_extent		*bmap;
 	xfs_fsblock_t			startblock_fsb;
 	xfs_fsblock_t			inode_fsb;
+	xfs_filblks_t			count;
 	bool				op_ok;
 	struct xfs_bud_log_item		*budp;
 	enum xfs_bmap_intent_type	type;
@@ -403,6 +404,7 @@
 	struct xfs_trans		*tp;
 	struct xfs_inode		*ip = NULL;
 	struct xfs_defer_ops		dfops;
+	struct xfs_bmbt_irec		irec;
 	xfs_fsblock_t			firstfsb;
 
 	ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
@@ -480,13 +482,24 @@
 	}
 	xfs_trans_ijoin(tp, ip, 0);
 
+	count = bmap->me_len;
 	error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
 			ip, whichfork, bmap->me_startoff,
-			bmap->me_startblock, bmap->me_len,
-			state);
+			bmap->me_startblock, &count, state);
 	if (error)
 		goto err_dfops;
 
+	if (count > 0) {
+		ASSERT(type == XFS_BMAP_UNMAP);
+		irec.br_startblock = bmap->me_startblock;
+		irec.br_blockcount = count;
+		irec.br_startoff = bmap->me_startoff;
+		irec.br_state = state;
+		error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
+		if (error)
+			goto err_dfops;
+	}
+
 	/* Finish transaction, free inodes. */
 	error = xfs_defer_finish(&tp, &dfops, NULL);
 	if (error)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 87b495e..5ffefac 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1825,29 +1825,18 @@
 	}
 
 	/*
-	 * Before we've swapped the forks, lets set the owners of the forks
-	 * appropriately. We have to do this as we are demand paging the btree
-	 * buffers, and so the validation done on read will expect the owner
-	 * field to be correctly set. Once we change the owners, we can swap the
-	 * inode forks.
+	 * Btree format (v3) inodes have the inode number stamped in the bmbt
+	 * block headers. We can't start changing the bmbt blocks until the
+	 * inode owner change is logged so recovery does the right thing in the
+	 * event of a crash. Set the owner change log flags now and leave the
+	 * bmbt scan as the last step.
 	 */
 	if (ip->i_d.di_version == 3 &&
-	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
 		(*target_log_flags) |= XFS_ILOG_DOWNER;
-		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
-					      tip->i_ino, NULL);
-		if (error)
-			return error;
-	}
-
 	if (tip->i_d.di_version == 3 &&
-	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
 		(*src_log_flags) |= XFS_ILOG_DOWNER;
-		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
-					      ip->i_ino, NULL);
-		if (error)
-			return error;
-	}
 
 	/*
 	 * Swap the data forks of the inodes
@@ -1925,6 +1914,48 @@
 	return 0;
 }
 
+/*
+ * Fix up the owners of the bmbt blocks to refer to the current inode. The
+ * change owner scan attempts to order all modified buffers in the current
+ * transaction. In the event of ordered buffer failure, the offending buffer is
+ * physically logged as a fallback and the scan returns -EAGAIN. We must roll
+ * the transaction in this case to replenish the fallback log reservation and
+ * restart the scan. This process repeats until the scan completes.
+ */
+static int
+xfs_swap_change_owner(
+	struct xfs_trans	**tpp,
+	struct xfs_inode	*ip,
+	struct xfs_inode	*tmpip)
+{
+	int			error;
+	struct xfs_trans	*tp = *tpp;
+
+	do {
+		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
+					      NULL);
+		/* success or fatal error */
+		if (error != -EAGAIN)
+			break;
+
+		error = xfs_trans_roll(tpp, NULL);
+		if (error)
+			break;
+		tp = *tpp;
+
+		/*
+		 * Redirty both inodes so they can relog and keep the log tail
+		 * moving forward.
+		 */
+		xfs_trans_ijoin(tp, ip, 0);
+		xfs_trans_ijoin(tp, tmpip, 0);
+		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
+	} while (true);
+
+	return error;
+}
+
 int
 xfs_swap_extents(
 	struct xfs_inode	*ip,	/* target inode */
@@ -1938,8 +1969,8 @@
 	int			error = 0;
 	int			lock_flags;
 	struct xfs_ifork	*cowfp;
-	__uint64_t		f;
-	int			resblks;
+	uint64_t		f;
+	int			resblks = 0;
 
 	/*
 	 * Lock the inodes against other IO, page faults and truncate to
@@ -1987,11 +2018,8 @@
 			  XFS_SWAP_RMAP_SPACE_RES(mp,
 				XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK),
 				XFS_DATA_FORK);
-		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
-				0, 0, &tp);
-	} else
-		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0,
-				0, 0, &tp);
+	}
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
 	if (error)
 		goto out_unlock;
 
@@ -2077,6 +2105,23 @@
 	xfs_trans_log_inode(tp, tip, target_log_flags);
 
 	/*
+	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
+	 * have inode number owner values in the bmbt blocks that still refer to
+	 * the old inode. Scan each bmbt to fix up the owner values with the
+	 * inode number of the current inode.
+	 */
+	if (src_log_flags & XFS_ILOG_DOWNER) {
+		error = xfs_swap_change_owner(&tp, ip, tip);
+		if (error)
+			goto out_trans_cancel;
+	}
+	if (target_log_flags & XFS_ILOG_DOWNER) {
+		error = xfs_swap_change_owner(&tp, tip, ip);
+		if (error)
+			goto out_trans_cancel;
+	}
+
+	/*
 	 * If this is a synchronous mount, make sure that the
 	 * transaction goes to disk before returning to the user.
 	 */
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 1626927..eca7bae 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -116,7 +116,7 @@
 __xfs_buf_ioacct_dec(
 	struct xfs_buf	*bp)
 {
-	ASSERT(spin_is_locked(&bp->b_lock));
+	lockdep_assert_held(&bp->b_lock);
 
 	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
 		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
@@ -2022,6 +2022,66 @@
 	return error;
 }
 
+/*
+ * Push a single buffer on a delwri queue.
+ *
+ * The purpose of this function is to submit a single buffer of a delwri queue
+ * and return with the buffer still on the original queue. The waiting delwri
+ * buffer submission infrastructure guarantees transfer of the delwri queue
+ * buffer reference to a temporary wait list. We reuse this infrastructure to
+ * transfer the buffer back to the original queue.
+ *
+ * Note the buffer transitions from the queued state, to the submitted and wait
+ * listed state and back to the queued state during this call. The buffer
+ * locking and queue management logic between _delwri_pushbuf() and
+ * _delwri_queue() guarantee that the buffer cannot be queued to another list
+ * before returning.
+ */
+int
+xfs_buf_delwri_pushbuf(
+	struct xfs_buf		*bp,
+	struct list_head	*buffer_list)
+{
+	LIST_HEAD		(submit_list);
+	int			error;
+
+	ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+
+	trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
+
+	/*
+	 * Isolate the buffer to a new local list so we can submit it for I/O
+	 * independently from the rest of the original list.
+	 */
+	xfs_buf_lock(bp);
+	list_move(&bp->b_list, &submit_list);
+	xfs_buf_unlock(bp);
+
+	/*
+	 * Delwri submission clears the DELWRI_Q buffer flag and returns with
+	 * the buffer on the wait list with an associated reference. Rather than
+	 * bounce the buffer from a local wait list back to the original list
+	 * after I/O completion, reuse the original list as the wait list.
+	 */
+	xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
+
+	/*
+	 * The buffer is now under I/O and wait listed as during typical delwri
+	 * submission. Lock the buffer to wait for I/O completion. Rather than
+	 * remove the buffer from the wait list and release the reference, we
+	 * want to return with the buffer queued to the original list. The
+	 * buffer already sits on the original list with a wait list reference,
+	 * however. If we let the queue inherit that wait list reference, all we
+	 * need to do is reset the DELWRI_Q flag.
+	 */
+	xfs_buf_lock(bp);
+	error = bp->b_error;
+	bp->b_flags |= _XBF_DELWRI_Q;
+	xfs_buf_unlock(bp);
+
+	return error;
+}
+
 int __init
 xfs_buf_init(void)
 {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index ad514a8..f961b19 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -333,6 +333,7 @@
 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
 extern int xfs_buf_delwri_submit(struct list_head *);
 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
+extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
 
 /* Buffer Daemon Setup Routines */
 extern int xfs_buf_init(void);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 0306168..e0a0af0 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -29,6 +29,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_log.h"
+#include "xfs_inode.h"
 
 
 kmem_zone_t	*xfs_buf_item_zone;
@@ -322,6 +323,8 @@
 	ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
 	       (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
 	        && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
+	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
+	       (bip->bli_flags & XFS_BLI_STALE));
 
 
 	/*
@@ -346,16 +349,6 @@
 		bip->bli_flags &= ~XFS_BLI_INODE_BUF;
 	}
 
-	if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
-							XFS_BLI_ORDERED) {
-		/*
-		 * The buffer has been logged just to order it.  It is not being
-		 * included in the transaction commit, so don't format it.
-		 */
-		trace_xfs_buf_item_format_ordered(bip);
-		return;
-	}
-
 	for (i = 0; i < bip->bli_format_count; i++) {
 		xfs_buf_item_format_segment(bip, lv, &vecp, offset,
 					    &bip->bli_formats[i]);
@@ -574,26 +567,20 @@
 {
 	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
 	struct xfs_buf		*bp = bip->bli_buf;
-	bool			clean;
-	bool			aborted;
-	int			flags;
+	bool			aborted = !!(lip->li_flags & XFS_LI_ABORTED);
+	bool			hold = !!(bip->bli_flags & XFS_BLI_HOLD);
+	bool			dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
+#if defined(DEBUG) || defined(XFS_WARN)
+	bool			ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
+#endif
 
 	/* Clear the buffer's association with this transaction. */
 	bp->b_transp = NULL;
 
 	/*
-	 * If this is a transaction abort, don't return early.  Instead, allow
-	 * the brelse to happen.  Normally it would be done for stale
-	 * (cancelled) buffers at unpin time, but we'll never go through the
-	 * pin/unpin cycle if we abort inside commit.
+	 * The per-transaction state has been copied above so clear it from the
+	 * bli.
 	 */
-	aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
-	/*
-	 * Before possibly freeing the buf item, copy the per-transaction state
-	 * so we can reference it safely later after clearing it from the
-	 * buffer log item.
-	 */
-	flags = bip->bli_flags;
 	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
 
 	/*
@@ -601,7 +588,7 @@
 	 * unlock the buffer and free the buf item when the buffer is unpinned
 	 * for the last time.
 	 */
-	if (flags & XFS_BLI_STALE) {
+	if (bip->bli_flags & XFS_BLI_STALE) {
 		trace_xfs_buf_item_unlock_stale(bip);
 		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 		if (!aborted) {
@@ -619,40 +606,34 @@
 	 * regardless of whether it is dirty or not. A dirty abort implies a
 	 * shutdown, anyway.
 	 *
-	 * Ordered buffers are dirty but may have no recorded changes, so ensure
-	 * we only release clean items here.
+	 * The bli dirty state should match whether the blf has logged segments
+	 * except for ordered buffers, where only the bli should be dirty.
 	 */
-	clean = (flags & XFS_BLI_DIRTY) ? false : true;
-	if (clean) {
-		int i;
-		for (i = 0; i < bip->bli_format_count; i++) {
-			if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
-				     bip->bli_formats[i].blf_map_size)) {
-				clean = false;
-				break;
-			}
-		}
-	}
+	ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
+	       (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
 
 	/*
 	 * Clean buffers, by definition, cannot be in the AIL. However, aborted
-	 * buffers may be dirty and hence in the AIL. Therefore if we are
-	 * aborting a buffer and we've just taken the last refernce away, we
-	 * have to check if it is in the AIL before freeing it. We need to free
-	 * it in this case, because an aborted transaction has already shut the
-	 * filesystem down and this is the last chance we will have to do so.
+	 * buffers may be in the AIL regardless of dirty state. An aborted
+	 * transaction that invalidates a buffer already in the AIL may have
+	 * marked it stale and cleared the dirty state, for example.
+	 *
+	 * Therefore if we are aborting a buffer and we've just taken the last
+	 * reference away, we have to check if it is in the AIL before freeing
+	 * it. We need to free it in this case, because an aborted transaction
+	 * has already shut the filesystem down and this is the last chance we
+	 * will have to do so.
 	 */
 	if (atomic_dec_and_test(&bip->bli_refcount)) {
-		if (clean)
-			xfs_buf_item_relse(bp);
-		else if (aborted) {
+		if (aborted) {
 			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
 			xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
 			xfs_buf_item_relse(bp);
-		}
+		} else if (!dirty)
+			xfs_buf_item_relse(bp);
 	}
 
-	if (!(flags & XFS_BLI_HOLD))
+	if (!hold)
 		xfs_buf_relse(bp);
 }
 
@@ -942,14 +923,22 @@
 
 
 /*
- * Return 1 if the buffer has been logged or ordered in a transaction (at any
- * point, not just the current transaction) and 0 if not.
+ * Return true if the buffer has any ranges logged/dirtied by a transaction,
+ * false otherwise.
  */
-uint
-xfs_buf_item_dirty(
-	xfs_buf_log_item_t	*bip)
+bool
+xfs_buf_item_dirty_format(
+	struct xfs_buf_log_item	*bip)
 {
-	return (bip->bli_flags & XFS_BLI_DIRTY);
+	int			i;
+
+	for (i = 0; i < bip->bli_format_count; i++) {
+		if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
+			     bip->bli_formats[i].blf_map_size))
+			return true;
+	}
+
+	return false;
 }
 
 STATIC void
@@ -1051,6 +1040,31 @@
 	}
 }
 
+/*
+ * Invoke the error state callback for each log item affected by the failed I/O.
+ *
+ * If a metadata buffer write fails with a non-permanent error, the buffer is
+ * eventually resubmitted and so the completion callbacks are not run. The error
+ * state may need to be propagated to the log items attached to the buffer,
+ * however, so the next AIL push of the item knows hot to handle it correctly.
+ */
+STATIC void
+xfs_buf_do_callbacks_fail(
+	struct xfs_buf		*bp)
+{
+	struct xfs_log_item	*next;
+	struct xfs_log_item	*lip = bp->b_fspriv;
+	struct xfs_ail		*ailp = lip->li_ailp;
+
+	spin_lock(&ailp->xa_lock);
+	for (; lip; lip = next) {
+		next = lip->li_bio_list;
+		if (lip->li_ops->iop_error)
+			lip->li_ops->iop_error(lip, bp);
+	}
+	spin_unlock(&ailp->xa_lock);
+}
+
 static bool
 xfs_buf_iodone_callback_error(
 	struct xfs_buf		*bp)
@@ -1120,7 +1134,11 @@
 	if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
 		goto permanent_error;
 
-	/* still a transient error, higher layers will retry */
+	/*
+	 * Still a transient error, run IO completion failure callbacks and let
+	 * the higher layers retry the buffer.
+	 */
+	xfs_buf_do_callbacks_fail(bp);
 	xfs_buf_ioerror(bp, 0);
 	xfs_buf_relse(bp);
 	return true;
@@ -1201,3 +1219,31 @@
 	xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
 	xfs_buf_item_free(BUF_ITEM(lip));
 }
+
+/*
+ * Requeue a failed buffer for writeback
+ *
+ * Return true if the buffer has been re-queued properly, false otherwise
+ */
+bool
+xfs_buf_resubmit_failed_buffers(
+	struct xfs_buf		*bp,
+	struct xfs_log_item	*lip,
+	struct list_head	*buffer_list)
+{
+	struct xfs_log_item	*next;
+
+	/*
+	 * Clear XFS_LI_FAILED flag from all items before resubmit
+	 *
+	 * XFS_LI_FAILED set/clear is protected by xa_lock, caller  this
+	 * function already have it acquired
+	 */
+	for (; lip; lip = next) {
+		next = lip->li_bio_list;
+		xfs_clear_li_failed(lip);
+	}
+
+	/* Add this buffer back to the delayed write list */
+	return xfs_buf_delwri_queue(bp, buffer_list);
+}
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index f7eba99..9690ce6 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -64,12 +64,15 @@
 int	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void	xfs_buf_item_relse(struct xfs_buf *);
 void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
-uint	xfs_buf_item_dirty(xfs_buf_log_item_t *);
+bool	xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
 void	xfs_buf_attach_iodone(struct xfs_buf *,
 			      void(*)(struct xfs_buf *, xfs_log_item_t *),
 			      xfs_log_item_t *);
 void	xfs_buf_iodone_callbacks(struct xfs_buf *);
 void	xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
+bool	xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
+					struct xfs_log_item *,
+					struct list_head *);
 
 extern kmem_zone_t	*xfs_buf_item_zone;
 
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index df206cf..586b398 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -729,6 +729,7 @@
 		xfs_rw_iunlock(ip, iolock);
 		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
+		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
 		goto write_retry;
 	}
 
@@ -1139,29 +1140,8 @@
 		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 					  want);
-		/*
-		 * No page mapped into given range.  If we are searching holes
-		 * and if this is the first time we got into the loop, it means
-		 * that the given offset is landed in a hole, return it.
-		 *
-		 * If we have already stepped through some block buffers to find
-		 * holes but they all contains data.  In this case, the last
-		 * offset is already updated and pointed to the end of the last
-		 * mapped page, if it does not reach the endpoint to search,
-		 * that means there should be a hole between them.
-		 */
-		if (nr_pages == 0) {
-			/* Data search found nothing */
-			if (type == DATA_OFF)
-				break;
-
-			ASSERT(type == HOLE_OFF);
-			if (lastoff == startoff || lastoff < endoff) {
-				found = true;
-				*offset = lastoff;
-			}
+		if (nr_pages == 0)
 			break;
-		}
 
 		for (i = 0; i < nr_pages; i++) {
 			struct page	*page = pvec.pages[i];
@@ -1227,21 +1207,20 @@
 
 		/*
 		 * The number of returned pages less than our desired, search
-		 * done.  In this case, nothing was found for searching data,
-		 * but we found a hole behind the last offset.
+		 * done.
 		 */
-		if (nr_pages < want) {
-			if (type == HOLE_OFF) {
-				*offset = lastoff;
-				found = true;
-			}
+		if (nr_pages < want)
 			break;
-		}
 
 		index = pvec.pages[i - 1]->index + 1;
 		pagevec_release(&pvec);
 	} while (index <= end);
 
+	/* No page at lastoff and we are not done - we found a hole. */
+	if (type == HOLE_OFF && lastoff < endoff) {
+		*offset = lastoff;
+		found = true;
+	}
 out:
 	pagevec_release(&pvec);
 	return found;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 74304b6..86a4911 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -66,7 +66,6 @@
 
 	XFS_STATS_INC(mp, vn_active);
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
-	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(!xfs_isiflocked(ip));
 	ASSERT(ip->i_ino == 0);
 
@@ -192,7 +191,7 @@
 {
 	struct xfs_mount	*mp = pag->pag_mount;
 
-	ASSERT(spin_is_locked(&pag->pag_ici_lock));
+	lockdep_assert_held(&pag->pag_ici_lock);
 	if (pag->pag_ici_reclaimable++)
 		return;
 
@@ -214,7 +213,7 @@
 {
 	struct xfs_mount	*mp = pag->pag_mount;
 
-	ASSERT(spin_is_locked(&pag->pag_ici_lock));
+	lockdep_assert_held(&pag->pag_ici_lock);
 	if (--pag->pag_ici_reclaimable)
 		return;
 
@@ -1079,11 +1078,11 @@
 	 * Because we use RCU freeing we need to ensure the inode always appears
 	 * to be reclaimed with an invalid inode number when in the free state.
 	 * We do this as early as possible under the ILOCK so that
-	 * xfs_iflush_cluster() can be guaranteed to detect races with us here.
-	 * By doing this, we guarantee that once xfs_iflush_cluster has locked
-	 * XFS_ILOCK that it will see either a valid, flushable inode that will
-	 * serialise correctly, or it will see a clean (and invalid) inode that
-	 * it can skip.
+	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
+	 * detect races with us here. By doing this, we guarantee that once
+	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
+	 * it will see either a valid inode that will serialise correctly, or it
+	 * will see an invalid inode that it can skip.
 	 */
 	spin_lock(&ip->i_flags_lock);
 	ip->i_flags = XFS_IRECLAIM;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7a0b4ee..9e795ab 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -881,7 +881,6 @@
 	case S_IFREG:
 	case S_IFDIR:
 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
-			uint64_t	di_flags2 = 0;
 			uint		di_flags = 0;
 
 			if (S_ISDIR(mode)) {
@@ -918,20 +917,23 @@
 				di_flags |= XFS_DIFLAG_NODEFRAG;
 			if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
 				di_flags |= XFS_DIFLAG_FILESTREAM;
-			if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
-				di_flags2 |= XFS_DIFLAG2_DAX;
 
 			ip->i_d.di_flags |= di_flags;
-			ip->i_d.di_flags2 |= di_flags2;
 		}
 		if (pip &&
 		    (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
 		    pip->i_d.di_version == 3 &&
 		    ip->i_d.di_version == 3) {
+			uint64_t	di_flags2 = 0;
+
 			if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
-				ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
+				di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
 				ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
 			}
+			if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
+				di_flags2 |= XFS_DIFLAG2_DAX;
+
+			ip->i_d.di_flags2 |= di_flags2;
 		}
 		/* FALLTHROUGH */
 	case S_IFLNK:
@@ -2366,11 +2368,24 @@
 			 * already marked stale. If we can't lock it, back off
 			 * and retry.
 			 */
-			if (ip != free_ip &&
-			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
-				rcu_read_unlock();
-				delay(1);
-				goto retry;
+			if (ip != free_ip) {
+				if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
+					rcu_read_unlock();
+					delay(1);
+					goto retry;
+				}
+
+				/*
+				 * Check the inode number again in case we're
+				 * racing with freeing in xfs_reclaim_inode().
+				 * See the comments in that function for more
+				 * information as to why the initial check is
+				 * not sufficient.
+				 */
+				if (ip->i_ino != inum + i) {
+					xfs_iunlock(ip, XFS_ILOCK_EXCL);
+					continue;
+				}
 			}
 			rcu_read_unlock();
 
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index d90e781..9491574 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -27,6 +27,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
 #include "xfs_log.h"
 
 
@@ -475,6 +476,23 @@
 		wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
 }
 
+/*
+ * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
+ * have been failed during writeback
+ *
+ * This informs the AIL that the inode is already flush locked on the next push,
+ * and acquires a hold on the buffer to ensure that it isn't reclaimed before
+ * dirty data makes it to disk.
+ */
+STATIC void
+xfs_inode_item_error(
+	struct xfs_log_item	*lip,
+	struct xfs_buf		*bp)
+{
+	ASSERT(xfs_isiflocked(INODE_ITEM(lip)->ili_inode));
+	xfs_set_li_failed(lip, bp);
+}
+
 STATIC uint
 xfs_inode_item_push(
 	struct xfs_log_item	*lip,
@@ -484,13 +502,28 @@
 {
 	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
 	struct xfs_inode	*ip = iip->ili_inode;
-	struct xfs_buf		*bp = NULL;
+	struct xfs_buf		*bp = lip->li_buf;
 	uint			rval = XFS_ITEM_SUCCESS;
 	int			error;
 
 	if (xfs_ipincount(ip) > 0)
 		return XFS_ITEM_PINNED;
 
+	/*
+	 * The buffer containing this item failed to be written back
+	 * previously. Resubmit the buffer for IO.
+	 */
+	if (lip->li_flags & XFS_LI_FAILED) {
+		if (!xfs_buf_trylock(bp))
+			return XFS_ITEM_LOCKED;
+
+		if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+			rval = XFS_ITEM_FLUSHING;
+
+		xfs_buf_unlock(bp);
+		return rval;
+	}
+
 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
 		return XFS_ITEM_LOCKED;
 
@@ -622,7 +655,8 @@
 	.iop_unlock	= xfs_inode_item_unlock,
 	.iop_committed	= xfs_inode_item_committed,
 	.iop_push	= xfs_inode_item_push,
-	.iop_committing = xfs_inode_item_committing
+	.iop_committing = xfs_inode_item_committing,
+	.iop_error	= xfs_inode_item_error
 };
 
 
@@ -710,7 +744,8 @@
 		 * the AIL lock.
 		 */
 		iip = INODE_ITEM(blip);
-		if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
+		if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
+		    lip->li_flags & XFS_LI_FAILED)
 			need_ail++;
 
 		blip = next;
@@ -718,7 +753,8 @@
 
 	/* make sure we capture the state of the initial inode. */
 	iip = INODE_ITEM(lip);
-	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
+	if ((iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) ||
+	    lip->li_flags & XFS_LI_FAILED)
 		need_ail++;
 
 	/*
@@ -731,22 +767,30 @@
 	 * holding the lock before removing the inode from the AIL.
 	 */
 	if (need_ail) {
-		struct xfs_log_item *log_items[need_ail];
-		int i = 0;
+		bool			mlip_changed = false;
+
+		/* this is an opencoded batch version of xfs_trans_ail_delete */
 		spin_lock(&ailp->xa_lock);
 		for (blip = lip; blip; blip = blip->li_bio_list) {
-			iip = INODE_ITEM(blip);
-			if (iip->ili_logged &&
-			    blip->li_lsn == iip->ili_flush_lsn) {
-				log_items[i++] = blip;
+			if (INODE_ITEM(blip)->ili_logged &&
+			    blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
+				mlip_changed |= xfs_ail_delete_one(ailp, blip);
+			else {
+				xfs_clear_li_failed(blip);
 			}
-			ASSERT(i <= need_ail);
 		}
-		/* xfs_trans_ail_delete_bulk() drops the AIL lock. */
-		xfs_trans_ail_delete_bulk(ailp, log_items, i,
-					  SHUTDOWN_CORRUPT_INCORE);
-	}
 
+		if (mlip_changed) {
+			if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
+				xlog_assign_tail_lsn_locked(ailp->xa_mount);
+			if (list_empty(&ailp->xa_ail))
+				wake_up_all(&ailp->xa_empty);
+		}
+		spin_unlock(&ailp->xa_lock);
+
+		if (mlip_changed)
+			xfs_log_space_wake(ailp->xa_mount);
+	}
 
 	/*
 	 * clean up and unlock the flush lock now we are done. We can clear the
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 73cfc71..bce2e26 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -928,16 +928,15 @@
 	return 0;
 }
 
-STATIC void
-xfs_set_diflags(
+STATIC uint16_t
+xfs_flags2diflags(
 	struct xfs_inode	*ip,
 	unsigned int		xflags)
 {
-	unsigned int		di_flags;
-	uint64_t		di_flags2;
-
 	/* can't set PREALLOC this way, just preserve it */
-	di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
+	uint16_t		di_flags =
+		(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
+
 	if (xflags & FS_XFLAG_IMMUTABLE)
 		di_flags |= XFS_DIFLAG_IMMUTABLE;
 	if (xflags & FS_XFLAG_APPEND)
@@ -967,19 +966,24 @@
 		if (xflags & FS_XFLAG_EXTSIZE)
 			di_flags |= XFS_DIFLAG_EXTSIZE;
 	}
-	ip->i_d.di_flags = di_flags;
 
-	/* diflags2 only valid for v3 inodes. */
-	if (ip->i_d.di_version < 3)
-		return;
+	return di_flags;
+}
 
-	di_flags2 = (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
+STATIC uint64_t
+xfs_flags2diflags2(
+	struct xfs_inode	*ip,
+	unsigned int		xflags)
+{
+	uint64_t		di_flags2 =
+		(ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK);
+
 	if (xflags & FS_XFLAG_DAX)
 		di_flags2 |= XFS_DIFLAG2_DAX;
 	if (xflags & FS_XFLAG_COWEXTSIZE)
 		di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
 
-	ip->i_d.di_flags2 = di_flags2;
+	return di_flags2;
 }
 
 STATIC void
@@ -1005,11 +1009,12 @@
 		inode->i_flags |= S_NOATIME;
 	else
 		inode->i_flags &= ~S_NOATIME;
+#if 0	/* disabled until the flag switching races are sorted out */
 	if (xflags & FS_XFLAG_DAX)
 		inode->i_flags |= S_DAX;
 	else
 		inode->i_flags &= ~S_DAX;
-
+#endif
 }
 
 static int
@@ -1019,6 +1024,7 @@
 	struct fsxattr		*fa)
 {
 	struct xfs_mount	*mp = ip->i_mount;
+	uint64_t		di_flags2;
 
 	/* Can't change realtime flag if any extents are allocated. */
 	if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
@@ -1049,7 +1055,14 @@
 	    !capable(CAP_LINUX_IMMUTABLE))
 		return -EPERM;
 
-	xfs_set_diflags(ip, fa->fsx_xflags);
+	/* diflags2 only valid for v3 inodes. */
+	di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
+	if (di_flags2 && ip->i_d.di_version < 3)
+		return -EINVAL;
+
+	ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
+	ip->i_d.di_flags2 = di_flags2;
+
 	xfs_diflags_to_linux(ip);
 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index a1247c3..5b81f7f 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -802,7 +802,7 @@
  * Caution: The caller of this function is responsible for calling
  * setattr_prepare() or otherwise verifying the change is fine.
  */
-int
+STATIC int
 xfs_setattr_size(
 	struct xfs_inode	*ip,
 	struct iattr		*iattr)
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 1455b2520..3ebed16 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -363,7 +363,14 @@
 #endif /* DEBUG */
 
 #ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/*
+ * make sure we ignore the inode flag if the filesystem doesn't have a
+ * configured realtime device.
+ */
+#define XFS_IS_REALTIME_INODE(ip)			\
+	(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) &&	\
+	 (ip)->i_mount->m_rtdev_targp)
 #else
 #define XFS_IS_REALTIME_INODE(ip) (0)
 #endif
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b57ab34..33c9a3a 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -743,15 +743,45 @@
 	struct xfs_mount	*mp)
 {
 	int	error = 0;
+	bool	readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
 
 	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
 		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 		return 0;
+	} else if (readonly) {
+		/* Allow unlinked processing to proceed */
+		mp->m_flags &= ~XFS_MOUNT_RDONLY;
 	}
 
+	/*
+	 * During the second phase of log recovery, we need iget and
+	 * iput to behave like they do for an active filesystem.
+	 * xfs_fs_drop_inode needs to be able to prevent the deletion
+	 * of inodes before we're done replaying log items on those
+	 * inodes.  Turn it off immediately after recovery finishes
+	 * so that we don't leak the quota inodes if subsequent mount
+	 * activities fail.
+	 *
+	 * We let all inodes involved in redo item processing end up on
+	 * the LRU instead of being evicted immediately so that if we do
+	 * something to an unlinked inode, the irele won't cause
+	 * premature truncation and freeing of the inode, which results
+	 * in log recovery failure.  We have to evict the unreferenced
+	 * lru inodes after clearing MS_ACTIVE because we don't
+	 * otherwise clean up the lru if there's a subsequent failure in
+	 * xfs_mountfs, which leads to us leaking the inodes if nothing
+	 * else (e.g. quotacheck) references the inodes before the
+	 * mount failure occurs.
+	 */
+	mp->m_super->s_flags |= MS_ACTIVE;
 	error = xlog_recover_finish(mp->m_log);
 	if (!error)
 		xfs_log_work_queue(mp);
+	mp->m_super->s_flags &= ~MS_ACTIVE;
+	evict_inodes(mp->m_super);
+
+	if (readonly)
+		mp->m_flags |= XFS_MOUNT_RDONLY;
 
 	return error;
 }
@@ -801,11 +831,14 @@
 	int		 error;
 
 	/*
-	 * Don't write out unmount record on read-only mounts.
+	 * Don't write out unmount record on norecovery mounts or ro devices.
 	 * Or, if we are doing a forced umount (typically because of IO errors).
 	 */
-	if (mp->m_flags & XFS_MOUNT_RDONLY)
+	if (mp->m_flags & XFS_MOUNT_NORECOVERY ||
+	    xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
+		ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
 		return 0;
+	}
 
 	error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
 	ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
@@ -3304,8 +3337,6 @@
 		 */
 		if (iclog->ic_state & XLOG_STATE_IOERROR)
 			return -EIO;
-		if (log_flushed)
-			*log_flushed = 1;
 	} else {
 
 no_sleep:
@@ -3409,8 +3440,6 @@
 
 				xlog_wait(&iclog->ic_prev->ic_write_wait,
 							&log->l_icloglock);
-				if (log_flushed)
-					*log_flushed = 1;
 				already_slept = 1;
 				goto try_again;
 			}
@@ -3444,9 +3473,6 @@
 			 */
 			if (iclog->ic_state & XLOG_STATE_IOERROR)
 				return -EIO;
-
-			if (log_flushed)
-				*log_flushed = 1;
 		} else {		/* just return */
 			spin_unlock(&log->l_icloglock);
 		}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 9b3d7c7..0590926 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1029,61 +1029,106 @@
 }
 
 /*
- * Check the log tail for torn writes. This is required when torn writes are
- * detected at the head and the head had to be walked back to a previous record.
- * The tail of the previous record must now be verified to ensure the torn
- * writes didn't corrupt the previous tail.
+ * Calculate distance from head to tail (i.e., unused space in the log).
+ */
+static inline int
+xlog_tail_distance(
+	struct xlog	*log,
+	xfs_daddr_t	head_blk,
+	xfs_daddr_t	tail_blk)
+{
+	if (head_blk < tail_blk)
+		return tail_blk - head_blk;
+
+	return tail_blk + (log->l_logBBsize - head_blk);
+}
+
+/*
+ * Verify the log tail. This is particularly important when torn or incomplete
+ * writes have been detected near the front of the log and the head has been
+ * walked back accordingly.
  *
- * Return an error if CRC verification fails as recovery cannot proceed.
+ * We also have to handle the case where the tail was pinned and the head
+ * blocked behind the tail right before a crash. If the tail had been pushed
+ * immediately prior to the crash and the subsequent checkpoint was only
+ * partially written, it's possible it overwrote the last referenced tail in the
+ * log with garbage. This is not a coherency problem because the tail must have
+ * been pushed before it can be overwritten, but appears as log corruption to
+ * recovery because we have no way to know the tail was updated if the
+ * subsequent checkpoint didn't write successfully.
+ *
+ * Therefore, CRC check the log from tail to head. If a failure occurs and the
+ * offending record is within max iclog bufs from the head, walk the tail
+ * forward and retry until a valid tail is found or corruption is detected out
+ * of the range of a possible overwrite.
  */
 STATIC int
 xlog_verify_tail(
 	struct xlog		*log,
 	xfs_daddr_t		head_blk,
-	xfs_daddr_t		tail_blk)
+	xfs_daddr_t		*tail_blk,
+	int			hsize)
 {
 	struct xlog_rec_header	*thead;
 	struct xfs_buf		*bp;
 	xfs_daddr_t		first_bad;
-	int			count;
 	int			error = 0;
 	bool			wrapped;
-	xfs_daddr_t		tmp_head;
+	xfs_daddr_t		tmp_tail;
+	xfs_daddr_t		orig_tail = *tail_blk;
 
 	bp = xlog_get_bp(log, 1);
 	if (!bp)
 		return -ENOMEM;
 
 	/*
-	 * Seek XLOG_MAX_ICLOGS + 1 records past the current tail record to get
-	 * a temporary head block that points after the last possible
-	 * concurrently written record of the tail.
+	 * Make sure the tail points to a record (returns positive count on
+	 * success).
 	 */
-	count = xlog_seek_logrec_hdr(log, head_blk, tail_blk,
-				     XLOG_MAX_ICLOGS + 1, bp, &tmp_head, &thead,
-				     &wrapped);
-	if (count < 0) {
-		error = count;
+	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, bp,
+			&tmp_tail, &thead, &wrapped);
+	if (error < 0)
 		goto out;
+	if (*tail_blk != tmp_tail)
+		*tail_blk = tmp_tail;
+
+	/*
+	 * Run a CRC check from the tail to the head. We can't just check
+	 * MAX_ICLOGS records past the tail because the tail may point to stale
+	 * blocks cleared during the search for the head/tail. These blocks are
+	 * overwritten with zero-length records and thus record count is not a
+	 * reliable indicator of the iclog state before a crash.
+	 */
+	first_bad = 0;
+	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
+				      XLOG_RECOVER_CRCPASS, &first_bad);
+	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
+		int	tail_distance;
+
+		/*
+		 * Is corruption within range of the head? If so, retry from
+		 * the next record. Otherwise return an error.
+		 */
+		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
+		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
+			break;
+
+		/* skip to the next record; returns positive count on success */
+		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, bp,
+				&tmp_tail, &thead, &wrapped);
+		if (error < 0)
+			goto out;
+
+		*tail_blk = tmp_tail;
+		first_bad = 0;
+		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
+					      XLOG_RECOVER_CRCPASS, &first_bad);
 	}
 
-	/*
-	 * If the call above didn't find XLOG_MAX_ICLOGS + 1 records, we ran
-	 * into the actual log head. tmp_head points to the start of the record
-	 * so update it to the actual head block.
-	 */
-	if (count < XLOG_MAX_ICLOGS + 1)
-		tmp_head = head_blk;
-
-	/*
-	 * We now have a tail and temporary head block that covers at least
-	 * XLOG_MAX_ICLOGS records from the tail. We need to verify that these
-	 * records were completely written. Run a CRC verification pass from
-	 * tail to head and return the result.
-	 */
-	error = xlog_do_recovery_pass(log, tmp_head, tail_blk,
-				      XLOG_RECOVER_CRCPASS, &first_bad);
-
+	if (!error && *tail_blk != orig_tail)
+		xfs_warn(log->l_mp,
+		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
+			 orig_tail, *tail_blk);
 out:
 	xlog_put_bp(bp);
 	return error;
@@ -1143,7 +1188,7 @@
 	 */
 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
 				      XLOG_RECOVER_CRCPASS, &first_bad);
-	if (error == -EFSBADCRC) {
+	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
 		/*
 		 * We've hit a potential torn write. Reset the error and warn
 		 * about it.
@@ -1183,31 +1228,12 @@
 			ASSERT(0);
 			return 0;
 		}
-
-		/*
-		 * Now verify the tail based on the updated head. This is
-		 * required because the torn writes trimmed from the head could
-		 * have been written over the tail of a previous record. Return
-		 * any errors since recovery cannot proceed if the tail is
-		 * corrupt.
-		 *
-		 * XXX: This leaves a gap in truly robust protection from torn
-		 * writes in the log. If the head is behind the tail, the tail
-		 * pushes forward to create some space and then a crash occurs
-		 * causing the writes into the previous record's tail region to
-		 * tear, log recovery isn't able to recover.
-		 *
-		 * How likely is this to occur? If possible, can we do something
-		 * more intelligent here? Is it safe to push the tail forward if
-		 * we can determine that the tail is within the range of the
-		 * torn write (e.g., the kernel can only overwrite the tail if
-		 * it has actually been pushed forward)? Alternatively, could we
-		 * somehow prevent this condition at runtime?
-		 */
-		error = xlog_verify_tail(log, *head_blk, *tail_blk);
 	}
+	if (error)
+		return error;
 
-	return error;
+	return xlog_verify_tail(log, *head_blk, tail_blk,
+				be32_to_cpu((*rhead)->h_size));
 }
 
 /*
@@ -4152,7 +4178,7 @@
 
 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
 
-	hlist_del(&trans->r_list);
+	hlist_del_init(&trans->r_list);
 
 	error = xlog_recover_reorder_trans(log, trans, pass);
 	if (error)
@@ -4354,6 +4380,8 @@
 	xlog_recover_item_t	*item, *n;
 	int			i;
 
+	hlist_del_init(&trans->r_list);
+
 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
 		/* Free the regions in the item. */
 		list_del(&item->ri_list);
@@ -4799,12 +4827,16 @@
 	int			error = 0;
 	struct xfs_ail_cursor	cur;
 	struct xfs_ail		*ailp;
+#if defined(DEBUG) || defined(XFS_WARN)
 	xfs_lsn_t		last_lsn;
+#endif
 
 	ailp = log->l_ailp;
 	spin_lock(&ailp->xa_lock);
 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+#if defined(DEBUG) || defined(XFS_WARN)
 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
+#endif
 	while (lip != NULL) {
 		/*
 		 * We're done when we see something other than an intent.
@@ -5214,7 +5246,7 @@
 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
 {
 	xlog_rec_header_t	*rhead;
-	xfs_daddr_t		blk_no;
+	xfs_daddr_t		blk_no, rblk_no;
 	xfs_daddr_t		rhead_blk;
 	char			*offset;
 	xfs_buf_t		*hbp, *dbp;
@@ -5222,11 +5254,15 @@
 	int			error2 = 0;
 	int			bblks, split_bblks;
 	int			hblks, split_hblks, wrapped_hblks;
+	int			i;
 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
 	LIST_HEAD		(buffer_list);
 
 	ASSERT(head_blk != tail_blk);
-	rhead_blk = 0;
+	blk_no = rhead_blk = tail_blk;
+
+	for (i = 0; i < XLOG_RHASH_SIZE; i++)
+		INIT_HLIST_HEAD(&rhash[i]);
 
 	/*
 	 * Read the header of the tail block and get the iclog buffer size from
@@ -5301,7 +5337,6 @@
 	}
 
 	memset(rhash, 0, sizeof(rhash));
-	blk_no = rhead_blk = tail_blk;
 	if (tail_blk > head_blk) {
 		/*
 		 * Perform recovery around the end of the physical log.
@@ -5363,9 +5398,19 @@
 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
 			blk_no += hblks;
 
-			/* Read in data for log record */
-			if (blk_no + bblks <= log->l_logBBsize) {
-				error = xlog_bread(log, blk_no, bblks, dbp,
+			/*
+			 * Read the log record data in multiple reads if it
+			 * wraps around the end of the log. Note that if the
+			 * header already wrapped, blk_no could point past the
+			 * end of the log. The record data is contiguous in
+			 * that case.
+			 */
+			if (blk_no + bblks <= log->l_logBBsize ||
+			    blk_no >= log->l_logBBsize) {
+				/* mod blk_no in case the header wrapped and
+				 * pushed it beyond the end of the log */
+				rblk_no = do_mod(blk_no, log->l_logBBsize);
+				error = xlog_bread(log, rblk_no, bblks, dbp,
 						   &offset);
 				if (error)
 					goto bread_err2;
@@ -5464,6 +5509,19 @@
 	if (error && first_bad)
 		*first_bad = rhead_blk;
 
+	/*
+	 * Transactions are freed at commit time but transactions without commit
+	 * records on disk are never committed. Free any that may be left in the
+	 * hash table.
+	 */
+	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
+		struct hlist_node	*tmp;
+		struct xlog_recover	*trans;
+
+		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
+			xlog_recover_free_trans(trans);
+	}
+
 	return error ? error : error2;
 }
 
@@ -5542,6 +5600,8 @@
 	xfs_buf_t	*bp;
 	xfs_sb_t	*sbp;
 
+	trace_xfs_log_recover(log, head_blk, tail_blk);
+
 	/*
 	 * First replay the images in the log.
 	 */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 13796f2..d4ce8d2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -925,15 +925,6 @@
 	}
 
 	/*
-	 * During the second phase of log recovery, we need iget and
-	 * iput to behave like they do for an active filesystem.
-	 * xfs_fs_drop_inode needs to be able to prevent the deletion
-	 * of inodes before we're done replaying log items on those
-	 * inodes.
-	 */
-	mp->m_super->s_flags |= MS_ACTIVE;
-
-	/*
 	 * Finish recovering the file system.  This part needed to be delayed
 	 * until after the root and real-time bitmap inodes were consistently
 	 * read in.
@@ -1008,12 +999,13 @@
  out_quota:
 	xfs_qm_unmount_quotas(mp);
  out_rtunmount:
-	mp->m_super->s_flags &= ~MS_ACTIVE;
 	xfs_rtunmount_inodes(mp);
  out_rele_rip:
 	IRELE(rip);
 	cancel_delayed_work_sync(&mp->m_reclaim_work);
 	xfs_reclaim_inodes(mp, SYNC_WAIT);
+	/* Clean out dquots that might be in memory after quotacheck. */
+	xfs_qm_unmount(mp);
  out_log_dealloc:
 	mp->m_flags |= XFS_MOUNT_UNMOUNTING;
 	xfs_log_mount_cancel(mp);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 8b9a9f1..1fdd3fa 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -111,6 +111,9 @@
 			skipped = 0;
 			break;
 		}
+		/* we're done if id overflows back to zero */
+		if (!next_index)
+			break;
 	}
 
 	if (skipped) {
@@ -1247,6 +1250,7 @@
 	struct xfs_dquot	*dqp,
 	void			*data)
 {
+	struct xfs_mount	*mp = dqp->q_mount;
 	struct list_head	*buffer_list = data;
 	struct xfs_buf		*bp = NULL;
 	int			error = 0;
@@ -1257,7 +1261,32 @@
 	if (!XFS_DQ_IS_DIRTY(dqp))
 		goto out_unlock;
 
-	xfs_dqflock(dqp);
+	/*
+	 * The only way the dquot is already flush locked by the time quotacheck
+	 * gets here is if reclaim flushed it before the dqadjust walk dirtied
+	 * it for the final time. Quotacheck collects all dquot bufs in the
+	 * local delwri queue before dquots are dirtied, so reclaim can't have
+	 * possibly queued it for I/O. The only way out is to push the buffer to
+	 * cycle the flush lock.
+	 */
+	if (!xfs_dqflock_nowait(dqp)) {
+		/* buf is pinned in-core by delwri list */
+		DEFINE_SINGLE_BUF_MAP(map, dqp->q_blkno,
+				      mp->m_quotainfo->qi_dqchunklen);
+		bp = _xfs_buf_find(mp->m_ddev_targp, &map, 1, 0, NULL);
+		if (!bp) {
+			error = -EINVAL;
+			goto out_unlock;
+		}
+		xfs_buf_unlock(bp);
+
+		xfs_buf_delwri_pushbuf(bp, buffer_list);
+		xfs_buf_rele(bp);
+
+		error = -EAGAIN;
+		goto out_unlock;
+	}
+
 	error = xfs_qm_dqflush(dqp, &bp);
 	if (error)
 		goto out_unlock;
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 29a75ec..0015c19 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -169,6 +169,8 @@
 	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
 	if (error)
 		return error;
+	if (!agbp)
+		return -ENOMEM;
 
 	cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
 
@@ -333,7 +335,7 @@
 	struct xfs_defer_ops		*dfops)
 {
 	struct xfs_bmbt_irec		irec = *imap;
-	xfs_fsblock_t			first_block;
+	xfs_fsblock_t			first_block = NULLFSBLOCK;
 	int				nimaps = 1;
 
 	if (imap->br_state == XFS_EXT_NORM)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 882fb85..67d589e 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1214,7 +1214,7 @@
 	tmp_mp->m_super = sb;
 	error = xfs_parseargs(tmp_mp, options);
 	xfs_free_fsname(tmp_mp);
-	kfree(tmp_mp);
+	kmem_free(tmp_mp);
 
 	return error;
 }
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 828f383..bdf69e1 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -366,6 +366,7 @@
 DEFINE_BUF_EVENT(xfs_buf_delwri_queue);
 DEFINE_BUF_EVENT(xfs_buf_delwri_queued);
 DEFINE_BUF_EVENT(xfs_buf_delwri_split);
+DEFINE_BUF_EVENT(xfs_buf_delwri_pushbuf);
 DEFINE_BUF_EVENT(xfs_buf_get_uncached);
 DEFINE_BUF_EVENT(xfs_bdstrat_shut);
 DEFINE_BUF_EVENT(xfs_buf_item_relse);
@@ -519,7 +520,6 @@
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_ordered);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered);
 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
@@ -1990,6 +1990,24 @@
 DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
 DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
 
+TRACE_EVENT(xfs_log_recover,
+	TP_PROTO(struct xlog *log, xfs_daddr_t headblk, xfs_daddr_t tailblk),
+	TP_ARGS(log, headblk, tailblk),
+	TP_STRUCT__entry(
+		__field(dev_t, dev)
+		__field(xfs_daddr_t, headblk)
+		__field(xfs_daddr_t, tailblk)
+	),
+	TP_fast_assign(
+		__entry->dev = log->l_mp->m_super->s_dev;
+		__entry->headblk = headblk;
+		__entry->tailblk = tailblk;
+	),
+	TP_printk("dev %d:%d headblk 0x%llx tailblk 0x%llx",
+		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->headblk,
+		  __entry->tailblk)
+)
+
 TRACE_EVENT(xfs_log_recover_record,
 	TP_PROTO(struct xlog *log, struct xlog_rec_header *rhead, int pass),
 	TP_ARGS(log, rhead, pass),
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 98024cb..5669cf0 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -50,6 +50,7 @@
 	struct xfs_ail			*li_ailp;	/* ptr to AIL */
 	uint				li_type;	/* item type */
 	uint				li_flags;	/* misc flags */
+	struct xfs_buf			*li_buf;	/* real buffer pointer */
 	struct xfs_log_item		*li_bio_list;	/* buffer item list */
 	void				(*li_cb)(struct xfs_buf *,
 						 struct xfs_log_item *);
@@ -65,11 +66,13 @@
 } xfs_log_item_t;
 
 #define	XFS_LI_IN_AIL	0x1
-#define XFS_LI_ABORTED	0x2
+#define	XFS_LI_ABORTED	0x2
+#define	XFS_LI_FAILED	0x4
 
 #define XFS_LI_FLAGS \
 	{ XFS_LI_IN_AIL,	"IN_AIL" }, \
-	{ XFS_LI_ABORTED,	"ABORTED" }
+	{ XFS_LI_ABORTED,	"ABORTED" }, \
+	{ XFS_LI_FAILED,	"FAILED" }
 
 struct xfs_item_ops {
 	void (*iop_size)(xfs_log_item_t *, int *, int *);
@@ -80,6 +83,7 @@
 	void (*iop_unlock)(xfs_log_item_t *);
 	xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
 	void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
+	void (*iop_error)(xfs_log_item_t *, xfs_buf_t *);
 };
 
 void	xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
@@ -213,12 +217,14 @@
 void		xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
 void		xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
 void		xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
-void		xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
+bool		xfs_trans_ordered_buf(xfs_trans_t *, struct xfs_buf *);
 void		xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
 void		xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
 void		xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
 void		xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
-void		xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
+void		xfs_trans_log_buf(struct xfs_trans *, struct xfs_buf *, uint,
+				  uint);
+void		xfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
 void		xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
 
 void		xfs_extent_free_init_defer_op(void);
@@ -277,6 +283,6 @@
 		struct xfs_bud_log_item *rudp, struct xfs_defer_ops *dfops,
 		enum xfs_bmap_intent_type type, struct xfs_inode *ip,
 		int whichfork, xfs_fileoff_t startoff, xfs_fsblock_t startblock,
-		xfs_filblks_t blockcount, xfs_exntst_t state);
+		xfs_filblks_t *blockcount, xfs_exntst_t state);
 
 #endif	/* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index d6c9c3e..70f5ab0 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -684,8 +684,24 @@
 	}
 }
 
-/*
- * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
+bool
+xfs_ail_delete_one(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	*lip)
+{
+	struct xfs_log_item	*mlip = xfs_ail_min(ailp);
+
+	trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
+	xfs_ail_delete(ailp, lip);
+	xfs_clear_li_failed(lip);
+	lip->li_flags &= ~XFS_LI_IN_AIL;
+	lip->li_lsn = 0;
+
+	return mlip == lip;
+}
+
+/**
+ * Remove a log items from the AIL
  *
  * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
  * removed from the AIL. The caller is already holding the AIL lock, and done
@@ -706,52 +722,36 @@
  * before returning.
  */
 void
-xfs_trans_ail_delete_bulk(
+xfs_trans_ail_delete(
 	struct xfs_ail		*ailp,
-	struct xfs_log_item	**log_items,
-	int			nr_items,
+	struct xfs_log_item	*lip,
 	int			shutdown_type) __releases(ailp->xa_lock)
 {
-	xfs_log_item_t		*mlip;
-	int			mlip_changed = 0;
-	int			i;
+	struct xfs_mount	*mp = ailp->xa_mount;
+	bool			mlip_changed;
 
-	mlip = xfs_ail_min(ailp);
-
-	for (i = 0; i < nr_items; i++) {
-		struct xfs_log_item *lip = log_items[i];
-		if (!(lip->li_flags & XFS_LI_IN_AIL)) {
-			struct xfs_mount	*mp = ailp->xa_mount;
-
-			spin_unlock(&ailp->xa_lock);
-			if (!XFS_FORCED_SHUTDOWN(mp)) {
-				xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
-		"%s: attempting to delete a log item that is not in the AIL",
-						__func__);
-				xfs_force_shutdown(mp, shutdown_type);
-			}
-			return;
+	if (!(lip->li_flags & XFS_LI_IN_AIL)) {
+		spin_unlock(&ailp->xa_lock);
+		if (!XFS_FORCED_SHUTDOWN(mp)) {
+			xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
+	"%s: attempting to delete a log item that is not in the AIL",
+					__func__);
+			xfs_force_shutdown(mp, shutdown_type);
 		}
-
-		trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
-		xfs_ail_delete(ailp, lip);
-		lip->li_flags &= ~XFS_LI_IN_AIL;
-		lip->li_lsn = 0;
-		if (mlip == lip)
-			mlip_changed = 1;
+		return;
 	}
 
+	mlip_changed = xfs_ail_delete_one(ailp, lip);
 	if (mlip_changed) {
-		if (!XFS_FORCED_SHUTDOWN(ailp->xa_mount))
-			xlog_assign_tail_lsn_locked(ailp->xa_mount);
+		if (!XFS_FORCED_SHUTDOWN(mp))
+			xlog_assign_tail_lsn_locked(mp);
 		if (list_empty(&ailp->xa_ail))
 			wake_up_all(&ailp->xa_empty);
-		spin_unlock(&ailp->xa_lock);
-
-		xfs_log_space_wake(ailp->xa_mount);
-	} else {
-		spin_unlock(&ailp->xa_lock);
 	}
+
+	spin_unlock(&ailp->xa_lock);
+	if (mlip_changed)
+		xfs_log_space_wake(ailp->xa_mount);
 }
 
 int
diff --git a/fs/xfs/xfs_trans_bmap.c b/fs/xfs/xfs_trans_bmap.c
index 6408e7d..14543d9 100644
--- a/fs/xfs/xfs_trans_bmap.c
+++ b/fs/xfs/xfs_trans_bmap.c
@@ -63,7 +63,7 @@
 	int				whichfork,
 	xfs_fileoff_t			startoff,
 	xfs_fsblock_t			startblock,
-	xfs_filblks_t			blockcount,
+	xfs_filblks_t			*blockcount,
 	xfs_exntst_t			state)
 {
 	int				error;
@@ -196,16 +196,23 @@
 	void				**state)
 {
 	struct xfs_bmap_intent		*bmap;
+	xfs_filblks_t			count;
 	int				error;
 
 	bmap = container_of(item, struct xfs_bmap_intent, bi_list);
+	count = bmap->bi_bmap.br_blockcount;
 	error = xfs_trans_log_finish_bmap_update(tp, done_item, dop,
 			bmap->bi_type,
 			bmap->bi_owner, bmap->bi_whichfork,
 			bmap->bi_bmap.br_startoff,
 			bmap->bi_bmap.br_startblock,
-			bmap->bi_bmap.br_blockcount,
+			&count,
 			bmap->bi_bmap.br_state);
+	if (!error && count > 0) {
+		ASSERT(bmap->bi_type == XFS_BMAP_UNMAP);
+		bmap->bi_bmap.br_blockcount = count;
+		return -EAGAIN;
+	}
 	kmem_free(bmap);
 	return error;
 }
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 8ee29ca..3ba7a96 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -356,6 +356,7 @@
 		 xfs_buf_t	*bp)
 {
 	xfs_buf_log_item_t	*bip;
+	int			freed;
 
 	/*
 	 * Default to a normal brelse() call if the tp is NULL.
@@ -419,16 +420,22 @@
 	/*
 	 * Drop our reference to the buf log item.
 	 */
-	atomic_dec(&bip->bli_refcount);
+	freed = atomic_dec_and_test(&bip->bli_refcount);
 
 	/*
-	 * If the buf item is not tracking data in the log, then
-	 * we must free it before releasing the buffer back to the
-	 * free pool.  Before releasing the buffer to the free pool,
-	 * clear the transaction pointer in b_fsprivate2 to dissolve
-	 * its relation to this transaction.
+	 * If the buf item is not tracking data in the log, then we must free it
+	 * before releasing the buffer back to the free pool.
+	 *
+	 * If the fs has shutdown and we dropped the last reference, it may fall
+	 * on us to release a (possibly dirty) bli if it never made it to the
+	 * AIL (e.g., the aborted unpin already happened and didn't release it
+	 * due to our reference). Since we're already shutdown and need xa_lock,
+	 * just force remove from the AIL and release the bli here.
 	 */
-	if (!xfs_buf_item_dirty(bip)) {
+	if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
+		xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
+		xfs_buf_item_relse(bp);
+	} else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
 /***
 		ASSERT(bp->b_pincount == 0);
 ***/
@@ -486,25 +493,17 @@
 }
 
 /*
- * This is called to mark bytes first through last inclusive of the given
- * buffer as needing to be logged when the transaction is committed.
- * The buffer must already be associated with the given transaction.
- *
- * First and last are numbers relative to the beginning of this buffer,
- * so the first byte in the buffer is numbered 0 regardless of the
- * value of b_blkno.
+ * Mark a buffer dirty in the transaction.
  */
 void
-xfs_trans_log_buf(xfs_trans_t	*tp,
-		  xfs_buf_t	*bp,
-		  uint		first,
-		  uint		last)
+xfs_trans_dirty_buf(
+	struct xfs_trans	*tp,
+	struct xfs_buf		*bp)
 {
-	xfs_buf_log_item_t	*bip = bp->b_fspriv;
+	struct xfs_buf_log_item	*bip = bp->b_fspriv;
 
 	ASSERT(bp->b_transp == tp);
 	ASSERT(bip != NULL);
-	ASSERT(first <= last && last < BBTOB(bp->b_length));
 	ASSERT(bp->b_iodone == NULL ||
 	       bp->b_iodone == xfs_buf_iodone_callbacks);
 
@@ -524,8 +523,6 @@
 	bp->b_iodone = xfs_buf_iodone_callbacks;
 	bip->bli_item.li_cb = xfs_buf_iodone;
 
-	trace_xfs_trans_log_buf(bip);
-
 	/*
 	 * If we invalidated the buffer within this transaction, then
 	 * cancel the invalidation now that we're dirtying the buffer
@@ -538,17 +535,37 @@
 		bp->b_flags &= ~XBF_STALE;
 		bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
 	}
+	bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
 
 	tp->t_flags |= XFS_TRANS_DIRTY;
 	bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+}
 
-	/*
-	 * If we have an ordered buffer we are not logging any dirty range but
-	 * it still needs to be marked dirty and that it has been logged.
-	 */
-	bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
-	if (!(bip->bli_flags & XFS_BLI_ORDERED))
-		xfs_buf_item_log(bip, first, last);
+/*
+ * This is called to mark bytes first through last inclusive of the given
+ * buffer as needing to be logged when the transaction is committed.
+ * The buffer must already be associated with the given transaction.
+ *
+ * First and last are numbers relative to the beginning of this buffer,
+ * so the first byte in the buffer is numbered 0 regardless of the
+ * value of b_blkno.
+ */
+void
+xfs_trans_log_buf(
+	struct xfs_trans	*tp,
+	struct xfs_buf		*bp,
+	uint			first,
+	uint			last)
+{
+	struct xfs_buf_log_item	*bip = bp->b_fspriv;
+
+	ASSERT(first <= last && last < BBTOB(bp->b_length));
+	ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
+
+	xfs_trans_dirty_buf(tp, bp);
+
+	trace_xfs_trans_log_buf(bip);
+	xfs_buf_item_log(bip, first, last);
 }
 
 
@@ -701,14 +718,13 @@
 }
 
 /*
- * Mark the buffer as ordered for this transaction. This means
- * that the contents of the buffer are not recorded in the transaction
- * but it is tracked in the AIL as though it was. This allows us
- * to record logical changes in transactions rather than the physical
- * changes we make to the buffer without changing writeback ordering
- * constraints of metadata buffers.
+ * Mark the buffer as ordered for this transaction. This means that the contents
+ * of the buffer are not recorded in the transaction but it is tracked in the
+ * AIL as though it was. This allows us to record logical changes in
+ * transactions rather than the physical changes we make to the buffer without
+ * changing writeback ordering constraints of metadata buffers.
  */
-void
+bool
 xfs_trans_ordered_buf(
 	struct xfs_trans	*tp,
 	struct xfs_buf		*bp)
@@ -719,8 +735,18 @@
 	ASSERT(bip != NULL);
 	ASSERT(atomic_read(&bip->bli_refcount) > 0);
 
+	if (xfs_buf_item_dirty_format(bip))
+		return false;
+
 	bip->bli_flags |= XFS_BLI_ORDERED;
 	trace_xfs_buf_item_ordered(bip);
+
+	/*
+	 * We don't log a dirty range of an ordered buffer but it still needs
+	 * to be marked dirty and that it has been logged.
+	 */
+	xfs_trans_dirty_buf(tp, bp);
+	return true;
 }
 
 /*
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 49931b7..b317a36 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -106,18 +106,9 @@
 	xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
 }
 
-void	xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
-				struct xfs_log_item **log_items, int nr_items,
-				int shutdown_type)
-				__releases(ailp->xa_lock);
-static inline void
-xfs_trans_ail_delete(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip,
-	int		shutdown_type) __releases(ailp->xa_lock)
-{
-	xfs_trans_ail_delete_bulk(ailp, &lip, 1, shutdown_type);
-}
+bool xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip);
+void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip,
+		int shutdown_type) __releases(ailp->xa_lock);
 
 static inline void
 xfs_trans_ail_remove(
@@ -173,4 +164,35 @@
 	*dst = *src;
 }
 #endif
+
+static inline void
+xfs_clear_li_failed(
+	struct xfs_log_item	*lip)
+{
+	struct xfs_buf	*bp = lip->li_buf;
+
+	ASSERT(lip->li_flags & XFS_LI_IN_AIL);
+	lockdep_assert_held(&lip->li_ailp->xa_lock);
+
+	if (lip->li_flags & XFS_LI_FAILED) {
+		lip->li_flags &= ~XFS_LI_FAILED;
+		lip->li_buf = NULL;
+		xfs_buf_rele(bp);
+	}
+}
+
+static inline void
+xfs_set_li_failed(
+	struct xfs_log_item	*lip,
+	struct xfs_buf		*bp)
+{
+	lockdep_assert_held(&lip->li_ailp->xa_lock);
+
+	if (!(lip->li_flags & XFS_LI_FAILED)) {
+		xfs_buf_hold(bp);
+		lip->li_flags |= XFS_LI_FAILED;
+		lip->li_buf = bp;
+	}
+}
+
 #endif	/* __XFS_TRANS_PRIV_H__ */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2..5d2add1 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
 #define parent_node(node)	((void)(node),0)
 #endif
 #ifndef cpumask_of_node
-#define cpumask_of_node(node)	((void)node, cpu_online_mask)
+  #ifdef CONFIG_NEED_MULTIPLE_NODES
+    #define cpumask_of_node(node)	((node) == 0 ? cpu_online_mask : cpu_none_mask)
+  #else
+    #define cpumask_of_node(node)	((void)node, cpu_online_mask)
+  #endif
 #endif
 #ifndef pcibus_to_node
 #define pcibus_to_node(bus)	((void)(bus), -1)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 31e1d63..dc81e52 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -60,6 +60,22 @@
 #define ALIGN_FUNCTION()  . = ALIGN(8)
 
 /*
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
+ * generates .data.identifier sections, which need to be pulled in with
+ * .data. We don't want to pull in .data..other sections, which Linux
+ * has defined. Same for text and bss.
+ */
+#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
+#else
+#define TEXT_MAIN .text
+#define DATA_MAIN .data
+#define BSS_MAIN .bss
+#endif
+
+/*
  * Align to a 32 byte boundary equal to the
  * alignment gcc 4.5 uses for a struct
  */
@@ -198,12 +214,9 @@
 
 /*
  * .data section
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
- * .data.identifier which needs to be pulled in with .data, but don't want to
- * pull in .data..stuff which has its own requirements. Same for bss.
  */
 #define DATA_DATA							\
-	*(.data .data.[0-9a-zA-Z_]*)					\
+	*(DATA_MAIN)							\
 	*(.ref.data)							\
 	*(.data..shared_aligned) /* percpu related */			\
 	MEM_KEEP(init.data)						\
@@ -436,16 +449,17 @@
 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
 	}
 
-/* .text section. Map to function alignment to avoid address changes
+/*
+ * .text section. Map to function alignment to avoid address changes
  * during second ld run in second ld pass when generating System.map
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
- * .text.identifier which needs to be pulled in with .text , but some
- * architectures define .text.foo which is not intended to be pulled in here.
- * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
- * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
+ *
+ * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
+ * code elimination is enabled, so these sections should be converted
+ * to use ".." first.
+ */
 #define TEXT_TEXT							\
 		ALIGN_FUNCTION();					\
-		*(.text.hot .text .text.fixup .text.unlikely)		\
+		*(.text.hot TEXT_MAIN .text.fixup .text.unlikely)	\
 		*(.ref.text)						\
 	MEM_KEEP(init.text)						\
 	MEM_KEEP(exit.text)						\
@@ -613,7 +627,7 @@
 		BSS_FIRST_SECTIONS					\
 		*(.bss..page_aligned)					\
 		*(.dynbss)						\
-		*(.bss .bss.[0-9a-zA-Z_]*)				\
+		*(BSS_MAIN)						\
 		*(COMMON)						\
 	}
 
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index bc87beb..e20175d 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -251,7 +251,8 @@
 #define	MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP 147
 #define	MSM_BUS_MASTER_CAMNOC_SF_UNCOMP 148
 #define	MSM_BUS_MASTER_GIC 149
-#define	MSM_BUS_MASTER_MASTER_LAST 150
+#define	MSM_BUS_MASTER_EMMC 150
+#define	MSM_BUS_MASTER_MASTER_LAST 151
 
 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000
 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -767,6 +768,7 @@
 #define	ICBID_MASTER_CNOC_A2NOC 146
 #define	ICBID_MASTER_WLAN 147
 #define	ICBID_MASTER_MSS_CE 148
+#define	ICBID_MASTER_EMMC 149
 
 #define	ICBID_SLAVE_EBI1 0
 #define	ICBID_SLAVE_APPSS_L2 1
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index a13b031..3101141 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -40,6 +40,7 @@
 	 */
 	s64 min_value;
 	u64 max_value;
+	bool value_from_signed;
 };
 
 enum bpf_stack_slot_type {
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9ddaf05..6be0299 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -10,7 +10,6 @@
 	CPUHP_PERF_X86_PREPARE,
 	CPUHP_PERF_X86_UNCORE_PREP,
 	CPUHP_PERF_X86_AMD_UNCORE_PREP,
-	CPUHP_PERF_X86_RAPL_PREP,
 	CPUHP_PERF_BFIN,
 	CPUHP_PERF_POWER,
 	CPUHP_PERF_SUPERH,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index bfc204e..cd32a49 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -16,6 +16,19 @@
 
 #ifdef CONFIG_CPUSETS
 
+/*
+ * Static branch rewrites can happen in an arbitrary order for a given
+ * key. In code paths where we need to loop with read_mems_allowed_begin() and
+ * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
+ * to ensure that begin() always gets rewritten before retry() in the
+ * disabled -> enabled transition. If not, then if local irqs are disabled
+ * around the loop, we can deadlock since retry() would always be
+ * comparing the latest value of the mems_allowed seqcount against 0 as
+ * begin() still would see cpusets_enabled() as false. The enabled -> disabled
+ * transition should happen in reverse order for the same reasons (want to stop
+ * looking at real value of mems_allowed.sequence in retry() first).
+ */
+extern struct static_key_false cpusets_pre_enable_key;
 extern struct static_key_false cpusets_enabled_key;
 static inline bool cpusets_enabled(void)
 {
@@ -30,12 +43,14 @@
 
 static inline void cpuset_inc(void)
 {
+	static_branch_inc(&cpusets_pre_enable_key);
 	static_branch_inc(&cpusets_enabled_key);
 }
 
 static inline void cpuset_dec(void)
 {
 	static_branch_dec(&cpusets_enabled_key);
+	static_branch_dec(&cpusets_pre_enable_key);
 }
 
 extern int cpuset_init(void);
@@ -113,7 +128,7 @@
  */
 static inline unsigned int read_mems_allowed_begin(void)
 {
-	if (!cpusets_enabled())
+	if (!static_branch_unlikely(&cpusets_pre_enable_key))
 		return 0;
 
 	return read_seqcount_begin(&current->mems_allowed_seq);
@@ -127,7 +142,7 @@
  */
 static inline bool read_mems_allowed_retry(unsigned int seq)
 {
-	if (!cpusets_enabled())
+	if (!static_branch_unlikely(&cpusets_enabled_key))
 		return false;
 
 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index 560f047..64ae548 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,19 +41,17 @@
 };
 
 #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
-int fast_smmu_attach_device(struct device *dev,
+int fast_smmu_init_mapping(struct device *dev,
 			    struct dma_iommu_mapping *mapping);
-void fast_smmu_detach_device(struct device *dev,
-			     struct dma_iommu_mapping *mapping);
+void fast_smmu_release_mapping(struct kref *kref);
 #else
-static inline int fast_smmu_attach_device(struct device *dev,
+static inline int fast_smmu_init_mapping(struct device *dev,
 					  struct dma_iommu_mapping *mapping)
 {
 	return -ENODEV;
 }
 
-static inline void fast_smmu_detach_device(struct device *dev,
-					   struct dma_iommu_mapping *mapping)
+static inline void fast_smmu_release_mapping(struct kref *kref)
 {
 }
 #endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 026aa0a..18bd249 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -941,9 +941,9 @@
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
    limits, otherwise bad things can happen in VM. */ 
 #if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE	(((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE	((loff_t)ULONG_MAX << PAGE_SHIFT)
 #elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE 	((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE 	((loff_t)LLONG_MAX)
 #endif
 
 #define FL_POSIX	1
@@ -2782,6 +2782,7 @@
 #endif
 extern void unlock_new_inode(struct inode *);
 extern unsigned int get_next_ino(void);
+extern void evict_inodes(struct super_block *sb);
 
 extern void __iget(struct inode * inode);
 extern void iget_failed(struct inode *);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0b8aedf..99eb77a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -142,6 +142,7 @@
 	DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
 	DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
 	DOMAIN_ATTR_CB_STALL_DISABLE,
+	DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
 	DOMAIN_ATTR_MAX,
 };
 
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index b7e3431..c122409 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -450,6 +450,8 @@
 	return !!(val & ICC_SRE_EL1_SRE);
 }
 
+void gic_show_pending_irqs(void);
+unsigned int get_gic_highpri_irq(void);
 #endif
 
 #endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 37e5178..600c905 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -64,6 +64,7 @@
 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
 #define __init_memblock __meminit
 #define __initdata_memblock __meminitdata
+void memblock_discard(void);
 #else
 #define __init_memblock
 #define __initdata_memblock
@@ -77,8 +78,6 @@
 					int nid, ulong flags);
 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
 				   phys_addr_t size, phys_addr_t align);
-phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
-phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
 void memblock_allow_resize(void);
 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
 int memblock_add(phys_addr_t base, phys_addr_t size);
@@ -114,6 +113,9 @@
 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
 				phys_addr_t *out_end);
 
+void __memblock_free_early(phys_addr_t base, phys_addr_t size);
+void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+
 /**
  * for_each_mem_range - iterate through memblock areas from type_a and not
  * included in type_b. Or just type_a if type_b is NULL.
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3139ea4..5942478 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -515,6 +515,10 @@
 	 */
 	bool tlb_flush_pending;
 #endif
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+	/* See flush_tlb_batched_pending() */
+	bool tlb_flush_batched;
+#endif
 	struct uprobes_state uprobes_state;
 #ifdef CONFIG_X86_INTEL_MPX
 	/* address of the bounds directory */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bad4f8c..1e07ed2 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -226,6 +226,7 @@
 	unsigned int		resp_arg;
 	unsigned int		dev_pend_tasks;
 	bool			resp_err;
+	bool			skip_err_handling;
 	int			tag; /* used for command queuing */
 	u8			ctx_id;
 };
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
index c298666..a584e0a 100644
--- a/include/linux/msm-bus.h
+++ b/include/linux/msm-bus.h
@@ -130,6 +130,8 @@
 					uint32_t cl, unsigned int index);
 int msm_bus_scale_query_tcs_cmd_all(struct msm_bus_tcs_handle *tcs_handle,
 					uint32_t cl);
+int msm_bus_noc_throttle_wa(bool enable);
+int msm_bus_noc_priority_wa(bool enable);
 
 /* AXI Port configuration APIs */
 int msm_bus_axi_porthalt(int master_port);
@@ -211,6 +213,16 @@
 	return 0;
 }
 
+static inline int msm_bus_noc_throttle_wa(bool enable)
+{
+	return 0;
+}
+
+static inline int msm_bus_noc_priority_wa(bool enable)
+{
+	return 0;
+}
+
 #endif
 
 #if defined(CONFIG_OF) && defined(CONFIG_QCOM_BUS_SCALING)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d92d9a6..266471e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3905,6 +3905,8 @@
 	     updev; \
 	     updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
 
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
 void *netdev_lower_get_next_private(struct net_device *dev,
 				    struct list_head **iter);
 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index beb1e10..3bf867a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1199,7 +1199,7 @@
 
 struct nfs41_exchange_id_args {
 	struct nfs_client		*client;
-	nfs4_verifier			*verifier;
+	nfs4_verifier			verifier;
 	u32				flags;
 	struct nfs41_state_protection	state_protect;
 };
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3e5dbbe..4308204 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -574,6 +574,7 @@
 #define PCI_DEVICE_ID_AMD_CS5536_EHC    0x2095
 #define PCI_DEVICE_ID_AMD_CS5536_UDC    0x2096
 #define PCI_DEVICE_ID_AMD_CS5536_UOC    0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE    0x2092
 #define PCI_DEVICE_ID_AMD_CS5536_IDE    0x209A
 #define PCI_DEVICE_ID_AMD_LX_VIDEO  0x2081
 #define PCI_DEVICE_ID_AMD_LX_AES    0x2082
diff --git a/include/linux/property.h b/include/linux/property.h
index 856e50b..338f9b7 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -33,6 +33,8 @@
 	DEV_DMA_COHERENT,
 };
 
+struct fwnode_handle *dev_fwnode(struct device *dev);
+
 bool device_property_present(struct device *dev, const char *propname);
 int device_property_read_u8_array(struct device *dev, const char *propname,
 				  u8 *val, size_t nval);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6c70444..b83507c 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -340,9 +340,9 @@
 	__PTR_RING_PEEK_CALL_v; \
 })
 
-static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
+static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
 {
-	return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
+	return kcalloc(size, sizeof(void *), gfp);
 }
 
 static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
@@ -417,7 +417,8 @@
  * In particular if you consume ring in interrupt or BH context, you must
  * disable interrupts/BH when doing so.
  */
-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
+static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
+					   unsigned int nrings,
 					   int size,
 					   gfp_t gfp, void (*destroy)(void *))
 {
@@ -425,7 +426,7 @@
 	void ***queues;
 	int i;
 
-	queues = kmalloc(nrings * sizeof *queues, gfp);
+	queues = kmalloc_array(nrings, sizeof(*queues), gfp);
 	if (!queues)
 		goto noqueues;
 
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index aa4c1ed..77a46bd 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -117,6 +117,7 @@
 #define SE_HW_PARAM_0			(0xE24)
 #define SE_HW_PARAM_1			(0xE28)
 #define SE_DMA_GENERAL_CFG		(0xE30)
+#define SE_DMA_DEBUG_REG0		(0xE40)
 
 /* GENI_OUTPUT_CTRL fields */
 #define DEFAULT_IO_OUTPUT_CTRL_MSK	(GENMASK(6, 0))
@@ -736,6 +737,22 @@
 int geni_se_iommu_free_buf(struct device *wrapper_dev, dma_addr_t *iova,
 			   void *buf, size_t size);
 
+
+/**
+ * geni_se_dump_dbg_regs() - Print relevant registers that capture most
+ *			accurately the state of an SE; meant to be called
+ *			in case of errors to help debug.
+ * @_dev:		Pointer to the SE's device.
+ * @iomem:		Base address of the SE's register space.
+ * @ipc:		IPC log context handle.
+ *
+ * This function is used to print out all the registers that capture the state
+ * of an SE to help debug any errors.
+ *
+ * Return:	None
+ */
+void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base,
+				void *ipc);
 #else
 static inline unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
 {
@@ -907,5 +924,10 @@
 	return -ENXIO;
 }
 
+static void geni_se_dump_dbg_regs(struct se_geni_rsc *rsc, void __iomem *base,
+				void *ipc)
+{
+}
+
 #endif
 #endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a4ea064..67860f3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -928,6 +928,16 @@
 
 #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
 
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+			  SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+					 unsigned int flags)
+{
+	WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+	sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
 /* If true, all threads except ->group_exit_task have pending SIGKILL */
 static inline int signal_group_exit(const struct signal_struct *sig)
 {
@@ -2380,23 +2390,6 @@
 }
 
 static inline int pid_alive(const struct task_struct *p);
-static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
-	pid_t pid = 0;
-
-	rcu_read_lock();
-	if (pid_alive(tsk))
-		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
-	rcu_read_unlock();
-
-	return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
-	return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
 
 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
 					struct pid_namespace *ns)
@@ -2431,6 +2424,23 @@
 	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
 }
 
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+	pid_t pid = 0;
+
+	rcu_read_lock();
+	if (pid_alive(tsk))
+		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+	rcu_read_unlock();
+
+	return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+	return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
 /* obsolete, do not use */
 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
 {
@@ -3950,6 +3960,7 @@
 #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 #define SCHED_CPUFREQ_WALT (1U << 4)
 #define SCHED_CPUFREQ_PL	(1U << 5)
+#define SCHED_CPUFREQ_EARLY_DET	(1U << 6)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index f4dfade..be8b902 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -162,7 +162,8 @@
 }
 
 static inline int skb_array_resize_multiple(struct skb_array **rings,
-					    int nrings, int size, gfp_t gfp)
+					    int nrings, unsigned int size,
+					    gfp_t gfp)
 {
 	BUILD_BUG_ON(offsetof(struct skb_array, ring));
 	return ptr_ring_resize_multiple((struct ptr_ring **)rings,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 084b12b..4c53635 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -226,7 +226,7 @@
  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  */
 #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW	3
 #endif
@@ -239,7 +239,7 @@
  * be allocated from the same page.
  */
 #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX	30
+#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW	3
 #endif
diff --git a/include/linux/spi/spi-geni-qcom.h b/include/linux/spi/spi-geni-qcom.h
new file mode 100644
index 0000000..8aee88a
--- /dev/null
+++ b/include/linux/spi/spi-geni-qcom.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SPI_GENI_QCOM_HEADER___
+#define __SPI_GENI_QCOM_HEADER___
+
+struct spi_geni_qcom_ctrl_data {
+	u32 spi_cs_clk_delay;
+	u32 spi_inter_words_delay;
+};
+
+#endif /*__SPI_GENI_QCOM_HEADER___*/
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 0a34489..17a33f3 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -23,6 +23,8 @@
 extern int snprint_stack_trace(char *buf, size_t size,
 			struct stack_trace *trace, int spaces);
 
+#define BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM
+
 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
 extern void save_stack_trace_user(struct stack_trace *trace);
 #else
diff --git a/include/linux/string.h b/include/linux/string.h
index 4e510df..0463dfb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -178,17 +178,6 @@
 void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
 
 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
-__FORTIFY_INLINE char *strcpy(char *p, const char *q)
-{
-	size_t p_size = __builtin_object_size(p, 0);
-	size_t q_size = __builtin_object_size(q, 0);
-	if (p_size == (size_t)-1 && q_size == (size_t)-1)
-		return __builtin_strcpy(p, q);
-	if (strscpy(p, q, p_size < q_size ? p_size : q_size) < 0)
-		fortify_panic(__func__);
-	return p;
-}
-
 __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
 {
 	size_t p_size = __builtin_object_size(p, 0);
@@ -367,6 +356,18 @@
 		fortify_panic(__func__);
 	return __real_kmemdup(p, size, gfp);
 }
+
+/* defined after fortified strlen and memcpy to reuse them */
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+	size_t p_size = __builtin_object_size(p, 0);
+	size_t q_size = __builtin_object_size(q, 0);
+	if (p_size == (size_t)-1 && q_size == (size_t)-1)
+		return __builtin_strcpy(p, q);
+	memcpy(p, q, strlen(q) + 1);
+	return p;
+}
+
 #endif
 
 #endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
index aa17ccf..c97fac8 100644
--- a/include/linux/sync_file.h
+++ b/include/linux/sync_file.h
@@ -43,9 +43,10 @@
 
 	struct fence		*fence;
 	struct fence_cb cb;
+	unsigned long flags;
 };
 
-#define POLL_ENABLED FENCE_FLAG_USER_BITS
+#define POLL_ENABLED 0
 
 struct sync_file *sync_file_create(struct fence *fence);
 struct fence *sync_file_get_fence(int fd);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index ef20e16..232c3e0 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -751,11 +751,11 @@
 extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
 	unsigned int intr_num);
 
-extern dma_addr_t usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
-	unsigned int intr_num);
-extern dma_addr_t usb_get_dcba_dma_addr(struct usb_device *dev);
-extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
-	struct usb_host_endpoint *ep);
+extern phys_addr_t usb_get_sec_event_ring_phys_addr(
+	struct usb_device *dev, unsigned int intr_num, dma_addr_t *dma);
+extern phys_addr_t usb_get_xfer_ring_phys_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep, dma_addr_t *dma);
+extern int usb_get_controller_id(struct usb_device *dev);
 
 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
 extern int usb_alloc_streams(struct usb_interface *interface,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index b305b0e..1699d2b 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -401,12 +401,12 @@
 	int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned int intr_num);
 	int (*sec_event_ring_cleanup)(struct usb_hcd *hcd,
 			unsigned int intr_num);
-	dma_addr_t (*get_sec_event_ring_dma_addr)(struct usb_hcd *hcd,
-			unsigned int intr_num);
-	dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
-			struct usb_device *udev, struct usb_host_endpoint *ep);
-	dma_addr_t (*get_dcba_dma_addr)(struct usb_hcd *hcd,
-			struct usb_device *udev);
+	phys_addr_t (*get_sec_event_ring_phys_addr)(struct usb_hcd *hcd,
+			unsigned int intr_num, dma_addr_t *dma);
+	phys_addr_t (*get_xfer_ring_phys_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev, struct usb_host_endpoint *ep,
+			dma_addr_t *dma);
+	int (*get_core_id)(struct usb_hcd *hcd);
 };
 
 static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -449,11 +449,11 @@
 	unsigned int intr_num);
 extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
 	unsigned int intr_num);
-extern dma_addr_t usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
-		unsigned int intr_num);
-extern dma_addr_t usb_hcd_get_dcba_dma_addr(struct usb_device *udev);
-extern dma_addr_t usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
-	struct usb_host_endpoint *ep);
+extern phys_addr_t usb_hcd_get_sec_event_ring_phys_addr(
+	struct usb_device *udev, unsigned int intr_num, dma_addr_t *dma);
+extern phys_addr_t usb_hcd_get_xfer_ring_phys_addr(
+	struct usb_device *udev, struct usb_host_endpoint *ep, dma_addr_t *dma);
+extern int usb_hcd_get_controller_id(struct usb_device *udev);
 
 struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
 		struct device *sysdev, struct device *dev, const char *bus_name,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fc6e221..1061add 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -312,6 +312,7 @@
 	__WQ_DRAINING		= 1 << 16, /* internal: workqueue is draining */
 	__WQ_ORDERED		= 1 << 17, /* internal: workqueue is ordered */
 	__WQ_LEGACY		= 1 << 18, /* internal: create*_workqueue() */
+	__WQ_ORDERED_EXPLICIT	= 1 << 19, /* internal: alloc_ordered_workqueue() */
 
 	WQ_MAX_ACTIVE		= 512,	  /* I like 512, better ideas? */
 	WQ_MAX_UNBOUND_PER_CPU	= 4,	  /* 4 * #cpus for unbound wq */
@@ -409,7 +410,8 @@
  * Pointer to the allocated workqueue on success, %NULL on failure.
  */
 #define alloc_ordered_workqueue(fmt, flags, args...)			\
-	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+	alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED |		\
+			__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
 
 #define create_workqueue(name)						\
 	alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 82b4b53..73da337 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -49,6 +49,13 @@
 #define CFG80211_REKEY_DATA_KEK_LEN 1
 
 /**
+ * Indicate backport support for the new cfg80211_roamed event which unifies the
+ * old APIs cfg80211_roamed and cfg80211_roamed_bss and takes a structure to
+ * update roam information to the kernel.
+ */
+#define CFG80211_ROAMED_API_UNIFIED 1
+
+/**
  * DOC: Introduction
  *
  * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges
@@ -2661,8 +2668,7 @@
  *	indication of requesting reassociation.
  *	In both the driver-initiated and new connect() call initiated roaming
  *	cases, the result of roaming is indicated with a call to
- *	cfg80211_roamed() or cfg80211_roamed_bss().
- *	(invoked with the wireless_dev mutex held)
+ *	cfg80211_roamed(). (invoked with the wireless_dev mutex held)
  * @update_connect_params: Update the connect parameters while connected to a
  *	BSS. The updated parameters can be used by driver/firmware for
  *	subsequent BSS selection (roaming) decisions and to form the
@@ -5301,51 +5307,50 @@
 }
 
 /**
+ * struct cfg80211_roam_info - driver initiated roaming information
+ *
+ * @channel: the channel of the new AP
+ * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set)
+ * @bssid: the BSSID of the new AP (may be %NULL if %bss is set)
+ * @req_ie: association request IEs (maybe be %NULL)
+ * @req_ie_len: association request IEs length
+ * @resp_ie: association response IEs (may be %NULL)
+ * @resp_ie_len: assoc response IEs length
+ * @authorized: true if the 802.1X authentication was done by the driver or is
+ *	not needed (e.g., when Fast Transition protocol was used), false
+ *	otherwise. Ignored for networks that don't use 802.1X authentication.
+ */
+struct cfg80211_roam_info {
+	struct ieee80211_channel *channel;
+	struct cfg80211_bss *bss;
+	const u8 *bssid;
+	const u8 *req_ie;
+	size_t req_ie_len;
+	const u8 *resp_ie;
+	size_t resp_ie_len;
+	bool authorized;
+};
+
+/**
  * cfg80211_roamed - notify cfg80211 of roaming
  *
  * @dev: network device
- * @channel: the channel of the new AP
- * @bssid: the BSSID of the new AP
- * @req_ie: association request IEs (maybe be %NULL)
- * @req_ie_len: association request IEs length
- * @resp_ie: association response IEs (may be %NULL)
- * @resp_ie_len: assoc response IEs length
+ * @info: information about the new BSS. struct &cfg80211_roam_info.
  * @gfp: allocation flags
  *
- * It should be called by the underlying driver whenever it roamed
- * from one AP to another while connected.
- */
-void cfg80211_roamed(struct net_device *dev,
-		     struct ieee80211_channel *channel,
-		     const u8 *bssid,
-		     const u8 *req_ie, size_t req_ie_len,
-		     const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
-
-/**
- * cfg80211_roamed_bss - notify cfg80211 of roaming
- *
- * @dev: network device
- * @bss: entry of bss to which STA got roamed
- * @req_ie: association request IEs (maybe be %NULL)
- * @req_ie_len: association request IEs length
- * @resp_ie: association response IEs (may be %NULL)
- * @resp_ie_len: assoc response IEs length
- * @gfp: allocation flags
- *
- * This is just a wrapper to notify cfg80211 of roaming event with driver
- * passing bss to avoid a race in timeout of the bss entry. It should be
- * called by the underlying driver whenever it roamed from one AP to another
- * while connected. Drivers which have roaming implemented in firmware
- * may use this function to avoid a race in bss entry timeout where the bss
- * entry of the new AP is seen in the driver, but gets timed out by the time
- * it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * This function may be called with the driver passing either the BSSID of the
+ * new AP or passing the bss entry to avoid a race in timeout of the bss entry.
+ * It should be called by the underlying driver whenever it roamed from one AP
+ * to another while connected. Drivers which have roaming implemented in
+ * firmware should pass the bss entry to avoid a race in bss entry timeout where
+ * the bss entry of the new AP is seen in the driver, but gets timed out by the
+ * time it is accessed in __cfg80211_roamed() due to delay in scheduling
  * rdev->event_work. In case of any failures, the reference is released
- * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
- * it will be released while diconneting from the current bss.
+ * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
+ * released while diconneting from the current bss.
  */
-void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
-			 const u8 *req_ie, size_t req_ie_len,
-			 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
+		     gfp_t gfp);
 
 /**
  * cfg80211_disconnected - notify cfg80211 that connection was dropped
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 909972a..634d192 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,9 @@
 #ifndef __NET_FRAG_H__
 #define __NET_FRAG_H__
 
-#include <linux/percpu_counter.h>
-
 struct netns_frags {
-	/* The percpu_counter "mem" need to be cacheline aligned.
-	 *  mem.count must not share cacheline with other writers
-	 */
-	struct percpu_counter   mem ____cacheline_aligned_in_smp;
-
+	/* Keep atomic mem on separate cachelines in structs that include it */
+	atomic_t		mem ____cacheline_aligned_in_smp;
 	/* sysctls */
 	int			timeout;
 	int			high_thresh;
@@ -108,15 +103,10 @@
 int inet_frags_init(struct inet_frags *);
 void inet_frags_fini(struct inet_frags *);
 
-static inline int inet_frags_init_net(struct netns_frags *nf)
+static inline void inet_frags_init_net(struct netns_frags *nf)
 {
-	return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
+	atomic_set(&nf->mem, 0);
 }
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
-{
-	percpu_counter_destroy(&nf->mem);
-}
-
 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
 
 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@@ -140,37 +130,24 @@
 
 /* Memory Tracking Functions. */
 
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- *  (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
-
 static inline int frag_mem_limit(struct netns_frags *nf)
 {
-	return percpu_counter_read(&nf->mem);
+	return atomic_read(&nf->mem);
 }
 
 static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
 {
-	__percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+	atomic_sub(i, &nf->mem);
 }
 
 static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
 {
-	__percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+	atomic_add(i, &nf->mem);
 }
 
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
 {
-	unsigned int res;
-
-	local_bh_disable();
-	res = percpu_counter_sum_positive(&nf->mem);
-	local_bh_enable();
-
-	return res;
+	return atomic_read(&nf->mem);
 }
 
 /* RFC 3168 support :
diff --git a/include/net/ip.h b/include/net/ip.h
index 9816365..4ef6792 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -342,7 +342,7 @@
 	    !forwarding)
 		return dst_mtu(dst);
 
-	return min(dst->dev->mtu, IP_MAX_MTU);
+	return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
 }
 
 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
@@ -354,7 +354,7 @@
 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
 	}
 
-	return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
+	return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
 }
 
 u32 ip_idents_reserve(u32 hash, int segs);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index a74e2aa..a6bcb18 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -68,6 +68,7 @@
 	__u16			fn_flags;
 	int			fn_sernum;
 	struct rt6_info		*rr_ptr;
+	struct rcu_head		rcu;
 };
 
 #ifndef CONFIG_IPV6_SUBTREES
@@ -102,7 +103,7 @@
 	 * the same cache line.
 	 */
 	struct fib6_table		*rt6i_table;
-	struct fib6_node		*rt6i_node;
+	struct fib6_node __rcu		*rt6i_node;
 
 	struct in6_addr			rt6i_gateway;
 
@@ -165,13 +166,40 @@
 	rt0->rt6i_flags |= RTF_EXPIRES;
 }
 
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+				       u32 *cookie)
+{
+	struct fib6_node *fn;
+	bool status = false;
+
+	rcu_read_lock();
+	fn = rcu_dereference(rt->rt6i_node);
+
+	if (fn) {
+		*cookie = fn->fn_sernum;
+		status = true;
+	}
+
+	rcu_read_unlock();
+	return status;
+}
+
 static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 {
+	u32 cookie = 0;
+
 	if (rt->rt6i_flags & RTF_PCPU ||
 	    (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
 		rt = (struct rt6_info *)(rt->dst.from);
 
-	return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+	rt6_get_cookie_safe(rt, &cookie);
+
+	return cookie;
 }
 
 static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index e0f4109..c2aa73e 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -556,7 +556,8 @@
 		memcpy(stream + lcp_len,
 		       ((char *) &iwe->u) + IW_EV_POINT_OFF,
 		       IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
-		memcpy(stream + point_len, extra, iwe->u.data.length);
+		if (iwe->u.data.length && extra)
+			memcpy(stream + point_len, extra, iwe->u.data.length);
 		stream += event_len;
 	}
 	return stream;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e6aa0a2..f18fc1a 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -768,8 +768,11 @@
 	old = *pold;
 	*pold = new;
 	if (old != NULL) {
-		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+		unsigned int qlen = old->q.qlen;
+		unsigned int backlog = old->qstats.backlog;
+
 		qdisc_reset(old);
+		qdisc_tree_reduce_backlog(old, qlen, backlog);
 	}
 	sch_tree_unlock(sch);
 
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 31acc3f..61d9ce8 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -460,6 +460,8 @@
 
 #define _sctp_walk_params(pos, chunk, end, member)\
 for (pos.v = chunk->member;\
+     (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
+      (void *)chunk + end) &&\
      pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
      ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
      pos.v += SCTP_PAD4(ntohs(pos.p->length)))
@@ -470,6 +472,8 @@
 #define _sctp_walk_errors(err, chunk_hdr, end)\
 for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
 	    sizeof(sctp_chunkhdr_t));\
+     ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
+      (void *)chunk_hdr + end) &&\
      (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
      err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 3527c35..e58a522 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -127,16 +127,7 @@
 extern int icnss_smmu_map(struct device *dev, phys_addr_t paddr,
 			  uint32_t *iova_addr, size_t size);
 extern unsigned int icnss_socinfo_get_serial_number(struct device *dev);
-extern int icnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
-extern int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
-					 u16 buf_len);
-extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
-extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
 extern bool icnss_is_qmi_disable(void);
 extern bool icnss_is_fw_ready(void);
-extern int icnss_set_wlan_mac_address(const u8 *in, const uint32_t len);
-extern u8 *icnss_get_wlan_mac_address(struct device *dev, uint32_t *num);
 extern int icnss_trigger_recovery(struct device *dev);
-extern int icnss_get_driver_load_cnt(void);
-extern void icnss_increment_driver_load_cnt(void);
 #endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index f196d40..71bd075 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -102,6 +102,10 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
 #define early_machine_is_sdm670()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
+#define early_machine_is_qcs605()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,qcs605")
+#define early_machine_is_sda670()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sda670")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -142,6 +146,8 @@
 #define early_machine_is_sdxpoorwills()	0
 #define early_machine_is_sdm845()	0
 #define early_machine_is_sdm670()	0
+#define early_machine_is_qcs605()	0
+#define early_machine_is_sda670()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -204,6 +210,8 @@
 	SDX_CPU_SDXPOORWILLS,
 	MSM_CPU_SDM845,
 	MSM_CPU_SDM670,
+	MSM_CPU_QCS605,
+	MSM_CPU_SDA670,
 };
 
 struct msm_soc_info {
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 33b2e75..6021c3a 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -563,6 +563,7 @@
 #define LOGIN_FLAGS_READ_ACTIVE		1
 #define LOGIN_FLAGS_CLOSED		2
 #define LOGIN_FLAGS_READY		4
+#define LOGIN_FLAGS_INITIAL_PDU		8
 	unsigned long		login_flags;
 	struct delayed_work	login_work;
 	struct delayed_work	login_cleanup_work;
@@ -784,6 +785,7 @@
 	int			np_sock_type;
 	enum np_thread_state_table np_thread_state;
 	bool                    enabled;
+	atomic_t		np_reset_count;
 	enum iscsi_timer_flags_table np_login_timer_flags;
 	u32			np_exports;
 	enum np_flags_table	np_flags;
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 2c7befb..9652037 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -161,6 +161,62 @@
 
 	TP_ARGS(dev, iova, flags)
 );
+
+DECLARE_EVENT_CLASS(iommu_errata_tlbi,
+
+	TP_PROTO(struct device *dev, u64 time),
+
+	TP_ARGS(dev, time),
+
+	TP_STRUCT__entry(
+		__string(device, dev_name(dev))
+		__field(u64, time)
+	),
+
+	TP_fast_assign(
+		__assign_str(device, dev_name(dev));
+		__entry->time = time;
+	),
+
+	TP_printk("IOMMU:%s %lld us",
+			__get_str(device), __entry->time
+	)
+);
+
+DEFINE_EVENT(iommu_errata_tlbi, errata_tlbi_start,
+
+	TP_PROTO(struct device *dev, u64 time),
+
+	TP_ARGS(dev, time)
+);
+
+DEFINE_EVENT(iommu_errata_tlbi, errata_tlbi_end,
+
+	TP_PROTO(struct device *dev, u64 time),
+
+	TP_ARGS(dev, time)
+);
+
+DEFINE_EVENT(iommu_errata_tlbi, errata_throttle_start,
+
+	TP_PROTO(struct device *dev, u64 time),
+
+	TP_ARGS(dev, time)
+);
+
+DEFINE_EVENT(iommu_errata_tlbi, errata_throttle_end,
+
+	TP_PROTO(struct device *dev, u64 time),
+
+	TP_ARGS(dev, time)
+);
+
+DEFINE_EVENT(iommu_errata_tlbi, errata_failed,
+
+	TP_PROTO(struct device *dev, u64 time),
+
+	TP_ARGS(dev, time)
+);
 #endif /* _TRACE_IOMMU_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6ff08de..d5438d3 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -324,6 +324,7 @@
 #define DRM_EVENT_SYS_BACKLIGHT 0x80000003
 #define DRM_EVENT_SDE_POWER 0x80000004
 #define DRM_EVENT_IDLE_NOTIFY 0x80000005
+#define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 285508a..1a43659 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -295,6 +295,44 @@
 	struct sde_drm_de_v1 de;
 };
 
+/* Number of dest scalers supported */
+#define SDE_MAX_DS_COUNT 2
+
+/*
+ * Destination scaler flag config
+ */
+#define SDE_DRM_DESTSCALER_ENABLE           0x1
+#define SDE_DRM_DESTSCALER_SCALE_UPDATE     0x2
+#define SDE_DRM_DESTSCALER_ENHANCER_UPDATE  0x4
+#define SDE_DRM_DESTSCALER_PU_ENABLE        0x8
+
+/**
+ * struct sde_drm_dest_scaler_cfg - destination scaler config structure
+ * @flags:      Flag to switch between mode for destination scaler
+ *              refer to destination scaler flag config
+ * @index:      Destination scaler selection index
+ * @lm_width:   Layer mixer width configuration
+ * @lm_height:  Layer mixer height configuration
+ * @scaler_cfg: The scaling parameters for all the mode except disable
+ *              Userspace pointer to struct sde_drm_scaler_v2
+ */
+struct sde_drm_dest_scaler_cfg {
+	uint32_t flags;
+	uint32_t index;
+	uint32_t lm_width;
+	uint32_t lm_height;
+	uint64_t scaler_cfg;
+};
+
+/**
+ * struct sde_drm_dest_scaler_data - destination scaler data struct
+ * @num_dest_scaler: Number of dest scalers to be configured
+ * @ds_cfg:          Destination scaler block configuration
+ */
+struct sde_drm_dest_scaler_data {
+	uint32_t num_dest_scaler;
+	struct sde_drm_dest_scaler_cfg ds_cfg[SDE_MAX_DS_COUNT];
+};
 
 /*
  * Define constants for struct sde_drm_csc
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 8c0fc7b..9fbdc11 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -546,6 +546,12 @@
  *	well to remain backwards compatible.
  * @NL80211_CMD_ROAM: request that the card roam (currently not implemented),
  *	sent as an event when the card/driver roamed by itself.
+ *	When used as an event, and the driver roamed in a network that requires
+ *	802.1X authentication, %NL80211_ATTR_PORT_AUTHORIZED should be set
+ *	if the 802.1X authentication was done by the driver or if roaming was
+ *	done using Fast Transition protocol (in which case 802.1X authentication
+ *	is not needed). If %NL80211_ATTR_PORT_AUTHORIZED is not set, user space
+ *	is responsible for the 802.1X authentication.
  * @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
  *	userspace that a connection was dropped by the AP or due to other
  *	reasons, for this the %NL80211_ATTR_DISCONNECTED_BY_AP and
@@ -2066,6 +2072,10 @@
  *
  * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
  *	This is used with @NL80211_CMD_SET_PMKSA.
+ * @NL80211_ATTR_PORT_AUTHORIZED: flag attribute used in %NL80211_CMD_ROAMED
+ *	notification indicating that that 802.1X authentication was done by
+ *	the driver or is not needed (because roaming used the Fast Transition
+ *	protocol).
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2486,6 +2496,13 @@
 
 	NL80211_ATTR_PMK,
 
+	NL80211_ATTR_SCHED_SCAN_MULTI,
+	NL80211_ATTR_SCHED_SCAN_MAX_REQS,
+
+	NL80211_ATTR_WANT_1X_4WAY_HS,
+	NL80211_ATTR_PMKR0_NAME,
+	NL80211_ATTR_PORT_AUTHORIZED,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index f04ac49..04aaaad 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -33,6 +33,7 @@
 #define WAN_IOCTL_QUERY_DL_FILTER_STATS  8
 #define WAN_IOCTL_ADD_FLT_RULE_EX        9
 #define WAN_IOCTL_QUERY_TETHER_STATS_ALL  10
+#define WAN_IOCTL_NOTIFY_WAN_STATE  11
 
 /* User space may not have this defined. */
 #ifndef IFNAMSIZ
@@ -126,6 +127,10 @@
 	uint32_t index;
 };
 
+struct wan_ioctl_notify_wan_state {
+	uint8_t up;
+};
+
 #define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
 		WAN_IOCTL_ADD_FLT_RULE, \
 		struct ipa_install_fltr_rule_req_msg_v01 *)
@@ -170,4 +175,8 @@
 		WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
 		struct wan_ioctl_query_tether_stats_all *)
 
+#define WAN_IOC_NOTIFY_WAN_STATE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_NOTIFY_WAN_STATE, \
+		struct wan_ioctl_notify_wan_state *)
+
 #endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index fee26a9..9b7d055 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -216,7 +216,8 @@
 /* Maximum allowed buffers in existence */
 #define CAM_MEM_BUFQ_MAX                        1024
 
-#define CAM_MEM_MGR_HDL_IDX_SIZE                16
+#define CAM_MEM_MGR_SECURE_BIT_POS              15
+#define CAM_MEM_MGR_HDL_IDX_SIZE                15
 #define CAM_MEM_MGR_HDL_FD_SIZE                 16
 #define CAM_MEM_MGR_HDL_IDX_END_POS             16
 #define CAM_MEM_MGR_HDL_FD_END_POS              32
@@ -224,11 +225,19 @@
 #define CAM_MEM_MGR_HDL_IDX_MASK      ((1 << CAM_MEM_MGR_HDL_IDX_SIZE) - 1)
 
 #define GET_MEM_HANDLE(idx, fd) \
-	((idx << (CAM_MEM_MGR_HDL_IDX_END_POS - CAM_MEM_MGR_HDL_IDX_SIZE)) | \
+	((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \
 	(fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \
 
 #define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK)
 
+#define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \
+	((flag) ? (hdl |= (1 << CAM_MEM_MGR_SECURE_BIT_POS)) : \
+	((hdl) &= ~(1 << CAM_MEM_MGR_SECURE_BIT_POS)))
+
+#define CAM_MEM_MGR_IS_SECURE_HDL(hdl) \
+	(((hdl) & \
+	(1<<CAM_MEM_MGR_SECURE_BIT_POS)) >> CAM_MEM_MGR_SECURE_BIT_POS)
+
 /**
  * memory allocation type
  */
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
index 2fe7f2b..87f25b0 100644
--- a/include/uapi/media/cam_sensor.h
+++ b/include/uapi/media/cam_sensor.h
@@ -8,6 +8,7 @@
 #define CAM_SENSOR_PROBE_CMD   (CAM_COMMON_OPCODE_MAX + 1)
 #define CAM_FLASH_MAX_LED_TRIGGERS 3
 #define MAX_OIS_NAME_SIZE 32
+#define CAM_CSIPHY_SECURE_MODE_ENABLED 1
 /**
  * struct cam_sensor_query_cap - capabilities info for sensor
  *
@@ -316,15 +317,15 @@
 
 /**
  * cam_csiphy_info: Provides cmdbuffer structre
- * @lane_mask     :  Lane mask details
- * @lane_assign   :  Lane sensor will be using
- * @csiphy_3phase :  Total number of lanes
- * @combo_mode    :  Info regarding combo_mode is enable / disable
- * @lane_cnt      :  Total number of lanes
- * @reserved
- * @3phase        :  Details whether 3Phase / 2Phase operation
- * @settle_time   :  Settling time in ms
- * @data_rate     :  Data rate
+ * @lane_mask     : Lane mask details
+ * @lane_assign   : Lane sensor will be using
+ * @csiphy_3phase : Total number of lanes
+ * @combo_mode    : Info regarding combo_mode is enable / disable
+ * @lane_cnt      : Total number of lanes
+ * @secure_mode   : Secure mode flag to enable / disable
+ * @3phase        : Details whether 3Phase / 2Phase operation
+ * @settle_time   : Settling time in ms
+ * @data_rate     : Data rate
  *
  */
 struct cam_csiphy_info {
@@ -333,7 +334,7 @@
 	uint8_t     csiphy_3phase;
 	uint8_t     combo_mode;
 	uint8_t     lane_cnt;
-	uint8_t     reserved;
+	uint8_t     secure_mode;
 	uint64_t    settle_time;
 	uint64_t    data_rate;
 } __attribute__((packed));
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 0d302a8..690e1e3 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -457,13 +457,15 @@
 	list_del(&krule->rlist);
 
 	if (list_empty(&watch->rules)) {
+		/*
+		 * audit_remove_watch() drops our reference to 'parent' which
+		 * can get freed. Grab our own reference to be safe.
+		 */
+		audit_get_parent(parent);
 		audit_remove_watch(watch);
-
-		if (list_empty(&parent->watches)) {
-			audit_get_parent(parent);
+		if (list_empty(&parent->watches))
 			fsnotify_destroy_mark(&parent->mark, audit_watch_group);
-			audit_put_parent(parent);
-		}
+		audit_put_parent(parent);
 	}
 }
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8ce679d..779c871 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -139,7 +139,7 @@
 	struct bpf_verifier_stack_elem *next;
 };
 
-#define BPF_COMPLEXITY_LIMIT_INSNS	65536
+#define BPF_COMPLEXITY_LIMIT_INSNS	98304
 #define BPF_COMPLEXITY_LIMIT_STACK	1024
 
 struct bpf_call_arg_meta {
@@ -682,12 +682,13 @@
 	return -EACCES;
 }
 
-static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+static bool __is_pointer_value(bool allow_ptr_leaks,
+			       const struct bpf_reg_state *reg)
 {
-	if (env->allow_ptr_leaks)
+	if (allow_ptr_leaks)
 		return false;
 
-	switch (env->cur_state.regs[regno].type) {
+	switch (reg->type) {
 	case UNKNOWN_VALUE:
 	case CONST_IMM:
 		return false;
@@ -696,6 +697,11 @@
 	}
 }
 
+static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
+{
+	return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
+}
+
 static int check_ptr_alignment(struct bpf_verifier_env *env,
 			       struct bpf_reg_state *reg, int off, int size)
 {
@@ -1467,6 +1473,65 @@
 	return 0;
 }
 
+static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
+					struct bpf_insn *insn)
+{
+	struct bpf_reg_state *regs = env->cur_state.regs;
+	struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
+	struct bpf_reg_state *src_reg = &regs[insn->src_reg];
+	u8 opcode = BPF_OP(insn->code);
+	s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
+
+	/* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
+	if (src_reg->imm > 0 && dst_reg->imm) {
+		switch (opcode) {
+		case BPF_ADD:
+			/* dreg += sreg
+			 * where both have zero upper bits. Adding them
+			 * can only result making one more bit non-zero
+			 * in the larger value.
+			 * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
+			 *     0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
+			 */
+			dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
+			dst_reg->imm--;
+			break;
+		case BPF_AND:
+			/* dreg &= sreg
+			 * AND can not extend zero bits only shrink
+			 * Ex.  0x00..00ffffff
+			 *    & 0x0f..ffffffff
+			 *     ----------------
+			 *      0x00..00ffffff
+			 */
+			dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
+			break;
+		case BPF_OR:
+			/* dreg |= sreg
+			 * OR can only extend zero bits
+			 * Ex.  0x00..00ffffff
+			 *    | 0x0f..ffffffff
+			 *     ----------------
+			 *      0x0f..00ffffff
+			 */
+			dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
+			break;
+		case BPF_SUB:
+		case BPF_MUL:
+		case BPF_RSH:
+		case BPF_LSH:
+			/* These may be flushed out later */
+		default:
+			mark_reg_unknown_value(regs, insn->dst_reg);
+		}
+	} else {
+		mark_reg_unknown_value(regs, insn->dst_reg);
+	}
+
+	dst_reg->type = UNKNOWN_VALUE;
+	return 0;
+}
+
 static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
 				struct bpf_insn *insn)
 {
@@ -1475,6 +1540,9 @@
 	struct bpf_reg_state *src_reg = &regs[insn->src_reg];
 	u8 opcode = BPF_OP(insn->code);
 
+	if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
+		return evaluate_reg_imm_alu_unknown(env, insn);
+
 	/* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
 	 * Don't care about overflow or negative values, just add them
 	 */
@@ -1530,10 +1598,24 @@
 	}
 
 	/* We don't know anything about what was done to this register, mark it
-	 * as unknown.
+	 * as unknown. Also, if both derived bounds came from signed/unsigned
+	 * mixed compares and one side is unbounded, we cannot really do anything
+	 * with them as boundaries cannot be trusted. Thus, arithmetic of two
+	 * regs of such kind will get invalidated bounds on the dst side.
 	 */
-	if (min_val == BPF_REGISTER_MIN_RANGE &&
-	    max_val == BPF_REGISTER_MAX_RANGE) {
+	if ((min_val == BPF_REGISTER_MIN_RANGE &&
+	     max_val == BPF_REGISTER_MAX_RANGE) ||
+	    (BPF_SRC(insn->code) == BPF_X &&
+	     ((min_val != BPF_REGISTER_MIN_RANGE &&
+	       max_val == BPF_REGISTER_MAX_RANGE) ||
+	      (min_val == BPF_REGISTER_MIN_RANGE &&
+	       max_val != BPF_REGISTER_MAX_RANGE) ||
+	      (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
+	       dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
+	      (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
+	       dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
+	     regs[insn->dst_reg].value_from_signed !=
+	     regs[insn->src_reg].value_from_signed)) {
 		reset_reg_range_values(regs, insn->dst_reg);
 		return;
 	}
@@ -1542,10 +1624,12 @@
 	 * do our normal operations to the register, we need to set the values
 	 * to the min/max since they are undefined.
 	 */
-	if (min_val == BPF_REGISTER_MIN_RANGE)
-		dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
-	if (max_val == BPF_REGISTER_MAX_RANGE)
-		dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+	if (opcode != BPF_SUB) {
+		if (min_val == BPF_REGISTER_MIN_RANGE)
+			dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+		if (max_val == BPF_REGISTER_MAX_RANGE)
+			dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+	}
 
 	switch (opcode) {
 	case BPF_ADD:
@@ -1555,10 +1639,17 @@
 			dst_reg->max_value += max_val;
 		break;
 	case BPF_SUB:
+		/* If one of our values was at the end of our ranges, then the
+		 * _opposite_ value in the dst_reg goes to the end of our range.
+		 */
+		if (min_val == BPF_REGISTER_MIN_RANGE)
+			dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+		if (max_val == BPF_REGISTER_MAX_RANGE)
+			dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
 		if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
-			dst_reg->min_value -= min_val;
+			dst_reg->min_value -= max_val;
 		if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
-			dst_reg->max_value -= max_val;
+			dst_reg->max_value -= min_val;
 		break;
 	case BPF_MUL:
 		if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
@@ -1808,6 +1899,7 @@
 		 * register as unknown.
 		 */
 		if (env->allow_ptr_leaks &&
+		    BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
 		    (dst_reg->type == PTR_TO_MAP_VALUE ||
 		     dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
 			dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1876,38 +1968,63 @@
 			    struct bpf_reg_state *false_reg, u64 val,
 			    u8 opcode)
 {
+	bool value_from_signed = true;
+	bool is_range = true;
+
 	switch (opcode) {
 	case BPF_JEQ:
 		/* If this is false then we know nothing Jon Snow, but if it is
 		 * true then we know for sure.
 		 */
 		true_reg->max_value = true_reg->min_value = val;
+		is_range = false;
 		break;
 	case BPF_JNE:
 		/* If this is true we know nothing Jon Snow, but if it is false
 		 * we know the value for sure;
 		 */
 		false_reg->max_value = false_reg->min_value = val;
+		is_range = false;
 		break;
 	case BPF_JGT:
-		/* Unsigned comparison, the minimum value is 0. */
-		false_reg->min_value = 0;
+		value_from_signed = false;
+		/* fallthrough */
 	case BPF_JSGT:
+		if (true_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(true_reg, 0);
+		if (false_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(false_reg, 0);
+		if (opcode == BPF_JGT) {
+			/* Unsigned comparison, the minimum value is 0. */
+			false_reg->min_value = 0;
+		}
 		/* If this is false then we know the maximum val is val,
 		 * otherwise we know the min val is val+1.
 		 */
 		false_reg->max_value = val;
+		false_reg->value_from_signed = value_from_signed;
 		true_reg->min_value = val + 1;
+		true_reg->value_from_signed = value_from_signed;
 		break;
 	case BPF_JGE:
-		/* Unsigned comparison, the minimum value is 0. */
-		false_reg->min_value = 0;
+		value_from_signed = false;
+		/* fallthrough */
 	case BPF_JSGE:
+		if (true_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(true_reg, 0);
+		if (false_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(false_reg, 0);
+		if (opcode == BPF_JGE) {
+			/* Unsigned comparison, the minimum value is 0. */
+			false_reg->min_value = 0;
+		}
 		/* If this is false then we know the maximum value is val - 1,
 		 * otherwise we know the mimimum value is val.
 		 */
 		false_reg->max_value = val - 1;
+		false_reg->value_from_signed = value_from_signed;
 		true_reg->min_value = val;
+		true_reg->value_from_signed = value_from_signed;
 		break;
 	default:
 		break;
@@ -1915,6 +2032,12 @@
 
 	check_reg_overflow(false_reg);
 	check_reg_overflow(true_reg);
+	if (is_range) {
+		if (__is_pointer_value(false, false_reg))
+			reset_reg_range_values(false_reg, 0);
+		if (__is_pointer_value(false, true_reg))
+			reset_reg_range_values(true_reg, 0);
+	}
 }
 
 /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
@@ -1924,39 +2047,64 @@
 				struct bpf_reg_state *false_reg, u64 val,
 				u8 opcode)
 {
+	bool value_from_signed = true;
+	bool is_range = true;
+
 	switch (opcode) {
 	case BPF_JEQ:
 		/* If this is false then we know nothing Jon Snow, but if it is
 		 * true then we know for sure.
 		 */
 		true_reg->max_value = true_reg->min_value = val;
+		is_range = false;
 		break;
 	case BPF_JNE:
 		/* If this is true we know nothing Jon Snow, but if it is false
 		 * we know the value for sure;
 		 */
 		false_reg->max_value = false_reg->min_value = val;
+		is_range = false;
 		break;
 	case BPF_JGT:
-		/* Unsigned comparison, the minimum value is 0. */
-		true_reg->min_value = 0;
+		value_from_signed = false;
+		/* fallthrough */
 	case BPF_JSGT:
+		if (true_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(true_reg, 0);
+		if (false_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(false_reg, 0);
+		if (opcode == BPF_JGT) {
+			/* Unsigned comparison, the minimum value is 0. */
+			true_reg->min_value = 0;
+		}
 		/*
 		 * If this is false, then the val is <= the register, if it is
 		 * true the register <= to the val.
 		 */
 		false_reg->min_value = val;
+		false_reg->value_from_signed = value_from_signed;
 		true_reg->max_value = val - 1;
+		true_reg->value_from_signed = value_from_signed;
 		break;
 	case BPF_JGE:
-		/* Unsigned comparison, the minimum value is 0. */
-		true_reg->min_value = 0;
+		value_from_signed = false;
+		/* fallthrough */
 	case BPF_JSGE:
+		if (true_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(true_reg, 0);
+		if (false_reg->value_from_signed != value_from_signed)
+			reset_reg_range_values(false_reg, 0);
+		if (opcode == BPF_JGE) {
+			/* Unsigned comparison, the minimum value is 0. */
+			true_reg->min_value = 0;
+		}
 		/* If this is false then constant < register, if it is true then
 		 * the register < constant.
 		 */
 		false_reg->min_value = val + 1;
+		false_reg->value_from_signed = value_from_signed;
 		true_reg->max_value = val;
+		true_reg->value_from_signed = value_from_signed;
 		break;
 	default:
 		break;
@@ -1964,6 +2112,12 @@
 
 	check_reg_overflow(false_reg);
 	check_reg_overflow(true_reg);
+	if (is_range) {
+		if (__is_pointer_value(false, false_reg))
+			reset_reg_range_values(false_reg, 0);
+		if (__is_pointer_value(false, true_reg))
+			reset_reg_range_values(true_reg, 0);
+	}
 }
 
 static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
@@ -2390,6 +2544,7 @@
 				env->explored_states[t + 1] = STATE_LIST_MARK;
 		} else {
 			/* conditional jump with two edges */
+			env->explored_states[t] = STATE_LIST_MARK;
 			ret = push_insn(t, t + 1, FALLTHROUGH, env);
 			if (ret == 1)
 				goto peek_stack;
@@ -2548,6 +2703,12 @@
 		     rcur->type != NOT_INIT))
 			continue;
 
+		/* Don't care about the reg->id in this case. */
+		if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
+		    rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
+		    rold->map_ptr == rcur->map_ptr)
+			continue;
+
 		if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
 		    compare_ptrs_to_packet(rold, rcur))
 			continue;
@@ -2682,6 +2843,9 @@
 			goto process_bpf_exit;
 		}
 
+		if (need_resched())
+			cond_resched();
+
 		if (log_level && do_print_state) {
 			verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
 			print_verifier_state(&env->cur_state);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7bb21fd..26c624e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3508,11 +3508,11 @@
 	cgrp->subtree_control &= ~disable;
 
 	ret = cgroup_apply_control(cgrp);
-
 	cgroup_finalize_control(cgrp, ret);
+	if (ret)
+		goto out_unlock;
 
 	kernfs_activate(cgrp->kn);
-	ret = 0;
 out_unlock:
 	cgroup_kn_unlock(of->kn);
 	return ret ?: nbytes;
@@ -5744,6 +5744,10 @@
 
 		if (ss->bind)
 			ss->bind(init_css_set.subsys[ssid]);
+
+		mutex_lock(&cgroup_mutex);
+		css_populate_dir(init_css_set.subsys[ssid]);
+		mutex_unlock(&cgroup_mutex);
 	}
 
 	/* init_css_set.subsys[] has been updated, re-hash */
diff --git a/kernel/configs/README.android b/kernel/configs/README.android
deleted file mode 100644
index 2e2d7c0..0000000
--- a/kernel/configs/README.android
+++ /dev/null
@@ -1,15 +0,0 @@
-The android-*.config files in this directory are meant to be used as a base
-for an Android kernel config. All devices should have the options in
-android-base.config enabled. While not mandatory, the options in
-android-recommended.config enable advanced Android features.
-
-Assuming you already have a minimalist defconfig for your device, a possible
-way to enable these options would be:
-
-     ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig kernel/configs/android-base.config kernel/configs/android-recommended.config
-
-This will generate a .config that can then be used to save a new defconfig or
-compile a new kernel with Android features enabled.
-
-Because there is no tool to consistently generate these config fragments,
-lets keep them alphabetically sorted instead of random.
diff --git a/kernel/configs/android-base-arm64.cfg b/kernel/configs/android-base-arm64.cfg
deleted file mode 100644
index 43f23d6..0000000
--- a/kernel/configs/android-base-arm64.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-#  KEEP ALPHABETICALLY SORTED
-CONFIG_ARMV8_DEPRECATED=y
-CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_SETEND_EMULATION=y
-CONFIG_SWP_EMULATION=y
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
deleted file mode 100644
index 80df048..0000000
--- a/kernel/configs/android-base.config
+++ /dev/null
@@ -1,170 +0,0 @@
-#  KEEP ALPHABETICALLY SORTED
-# CONFIG_DEVKMEM is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_FHANDLE is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_NFSD is not set
-# CONFIG_NFS_FS is not set
-# CONFIG_OABI_COMPAT is not set
-# CONFIG_SYSVIPC is not set
-# CONFIG_USELIB is not set
-CONFIG_ANDROID=y
-CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
-CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ASHMEM=y
-CONFIG_AUDIT=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_CGROUP_BPF=y
-CONFIG_DEFAULT_SECURITY_SELINUX=y
-CONFIG_EMBEDDED=y
-CONFIG_FB=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_INET=y
-CONFIG_INET_DIAG_DESTROY=y
-CONFIG_INET_ESP=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_IP6_NF_RAW=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IPV6=y
-CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARP_MANGLE=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_MATCH_AH=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_NAT=y
-CONFIG_IP_NF_RAW=y
-CONFIG_IP_NF_SECURITY=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_NETMAP=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_NET=y
-CONFIG_NETDEVICES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_XT_MATCH_COMMENT=y
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_HELPER=y
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-CONFIG_NETFILTER_XT_MATCH_LENGTH=y
-CONFIG_NETFILTER_XT_MATCH_LIMIT=y
-CONFIG_NETFILTER_XT_MATCH_MAC=y
-CONFIG_NETFILTER_XT_MATCH_MARK=y
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA=y
-CONFIG_NETFILTER_XT_MATCH_SOCKET=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
-CONFIG_NETFILTER_XT_MATCH_STRING=y
-CONFIG_NETFILTER_XT_MATCH_TIME=y
-CONFIG_NETFILTER_XT_MATCH_U32=y
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_TARGET_TPROXY=y
-CONFIG_NETFILTER_XT_TARGET_TRACE=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_CLS_U32=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_U32=y
-CONFIG_NET_KEY=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_HTB=y
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_AMANDA=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_H323=y
-CONFIG_NF_CONNTRACK_IPV4=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_NETBIOS_NS=y
-CONFIG_NF_CONNTRACK_PPTP=y
-CONFIG_NF_CONNTRACK_SANE=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_TFTP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_NAT=y
-CONFIG_NO_HZ=y
-CONFIG_PACKET=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PPP=y
-CONFIG_PPPOLAC=y
-CONFIG_PPPOPNS=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_MPPE=y
-CONFIG_PREEMPT=y
-CONFIG_PROFILING=y
-CONFIG_QFMT_V2=y
-CONFIG_QUOTA=y
-CONFIG_QUOTACTL=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QUOTA_TREE=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RTC_CLASS=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_SECCOMP=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_STAGING=y
-CONFIG_SYNC=y
-CONFIG_TUN=y
-CONFIG_UID_SYS_STATS=y
-CONFIG_UNIX=y
-CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_GADGET=y
-CONFIG_XFRM_USER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
deleted file mode 100644
index 36ec6c1..0000000
--- a/kernel/configs/android-recommended.config
+++ /dev/null
@@ -1,133 +0,0 @@
-#  KEEP ALPHABETICALLY SORTED
-# CONFIG_AIO is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_NF_CONNTRACK_SIP is not set
-# CONFIG_PM_WAKELOCKS_GC is not set
-# CONFIG_VT is not set
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_COMPACTION=y
-CONFIG_CPU_SW_DOMAIN_PAN=y
-CONFIG_DEBUG_RODATA=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_UEVENT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
-CONFIG_DRAGONRISE_FF=y
-CONFIG_ENABLE_DEFAULT_TRACERS=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FUSE_FS=y
-CONFIG_GREENASIA_FF=y
-CONFIG_HIDRAW=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_ACRUX=y
-CONFIG_HID_ACRUX_FF=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_DRAGONRISE=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EMS_FF=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GREENASIA=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_HOLTEK=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_KEYTOUCH=y
-CONFIG_HID_KYE=y
-CONFIG_HID_LCPOWER=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_LOGITECH_DJ=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_ORTEK=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_PICOLCD=y
-CONFIG_HID_PRIMAX=y
-CONFIG_HID_PRODIKEYS=y
-CONFIG_HID_ROCCAT=y
-CONFIG_HID_SAITEK=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SMARTJOYPLUS=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SPEEDLINK=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_TIVO=y
-CONFIG_HID_TOPSEED=y
-CONFIG_HID_TWINHAN=y
-CONFIG_HID_UCLOGIC=y
-CONFIG_HID_WACOM=y
-CONFIG_HID_WALTOP=y
-CONFIG_HID_WIIMOTE=y
-CONFIG_HID_ZEROPLUS=y
-CONFIG_HID_ZYDACRON=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_KEYCHORD=y
-CONFIG_INPUT_KEYRESET=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_TABLET=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
-CONFIG_JOYSTICK_XPAD=y
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_KSM=y
-CONFIG_LOGIG940_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGITECH_FF=y
-CONFIG_MD=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEMORY_STATE_TIME=y
-CONFIG_MSDOS_FS=y
-CONFIG_PANIC_TIMEOUT=5
-CONFIG_PANTHERLORD_FF=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_POWER_SUPPLY=y
-CONFIG_PSTORE=y
-CONFIG_PSTORE_CONSOLE=y
-CONFIG_PSTORE_RAM=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_SND=y
-CONFIG_SOUND=y
-CONFIG_SUSPEND_TIME=y
-CONFIG_TABLET_USB_ACECAD=y
-CONFIG_TABLET_USB_AIPTEK=y
-CONFIG_TABLET_USB_GTCO=y
-CONFIG_TABLET_USB_HANWANG=y
-CONFIG_TABLET_USB_KBTAB=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_TASK_XACCT=y
-CONFIG_TIMER_STATS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_UHID=y
-CONFIG_MEMORY_STATE_TIME=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_USBNET=y
-CONFIG_VFAT_FS=y
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a99cd8d..d3a7411 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -61,6 +61,7 @@
 #include <linux/cgroup.h>
 #include <linux/wait.h>
 
+DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
 
 /* See "Frequency meter" comments, below. */
@@ -1907,6 +1908,7 @@
 	{
 		.name = "memory_pressure",
 		.read_u64 = cpuset_read_u64,
+		.private = FILE_MEMORY_PRESSURE,
 	},
 
 	{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0d4a401..f6e81b5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9894,28 +9894,27 @@
 			goto err_context;
 
 		/*
-		 * Do not allow to attach to a group in a different
-		 * task or CPU context:
+		 * Make sure we're both events for the same CPU;
+		 * grouping events for different CPUs is broken; since
+		 * you can never concurrently schedule them anyhow.
 		 */
-		if (move_group) {
-			/*
-			 * Make sure we're both on the same task, or both
-			 * per-cpu events.
-			 */
-			if (group_leader->ctx->task != ctx->task)
-				goto err_context;
+		if (group_leader->cpu != event->cpu)
+			goto err_context;
 
-			/*
-			 * Make sure we're both events for the same CPU;
-			 * grouping events for different CPUs is broken; since
-			 * you can never concurrently schedule them anyhow.
-			 */
-			if (group_leader->cpu != event->cpu)
-				goto err_context;
-		} else {
-			if (group_leader->ctx != ctx)
-				goto err_context;
-		}
+		/*
+		 * Make sure we're both on the same task, or both
+		 * per-CPU events.
+		 */
+		if (group_leader->ctx->task != ctx->task)
+			goto err_context;
+
+		/*
+		 * Do not allow to attach to a group in a different task
+		 * or CPU context. If we're moving SW events, we'll fix
+		 * this up later, so allow that.
+		 */
+		if (!move_group && group_leader->ctx != ctx)
+			goto err_context;
 
 		/*
 		 * Only a group leader can be exclusive or pinned
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f9ec9ad..a1de021 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1254,8 +1254,6 @@
 
 void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
 {
-	newmm->uprobes_state.xol_area = NULL;
-
 	if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
 		set_bit(MMF_HAS_UPROBES, &newmm->flags);
 		/* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
diff --git a/kernel/fork.c b/kernel/fork.c
index 39c0709..610aded 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -745,6 +745,13 @@
 #endif
 }
 
+static void mm_init_uprobes_state(struct mm_struct *mm)
+{
+#ifdef CONFIG_UPROBES
+	mm->uprobes_state.xol_area = NULL;
+#endif
+}
+
 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
 	struct user_namespace *user_ns)
 {
@@ -766,11 +773,13 @@
 	mm_init_cpumask(mm);
 	mm_init_aio(mm);
 	mm_init_owner(mm, p);
+	RCU_INIT_POINTER(mm->exe_file, NULL);
 	mmu_notifier_mm_init(mm);
 	clear_tlb_flush_pending(mm);
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 	mm->pmd_huge_pte = NULL;
 #endif
+	mm_init_uprobes_state(mm);
 
 	if (current->mm) {
 		mm->flags = current->mm->flags & MMF_INIT_MASK;
diff --git a/kernel/futex.c b/kernel/futex.c
index 4c6b6e6..88bad86 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -668,13 +668,14 @@
 		 * this reference was taken by ihold under the page lock
 		 * pinning the inode in place so i_lock was unnecessary. The
 		 * only way for this check to fail is if the inode was
-		 * truncated in parallel so warn for now if this happens.
+		 * truncated in parallel which is almost certainly an
+		 * application bug. In such a case, just retry.
 		 *
 		 * We are not calling into get_futex_key_refs() in file-backed
 		 * cases, therefore a successful atomic_inc return below will
 		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
 		 */
-		if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+		if (!atomic_inc_not_zero(&inode->i_count)) {
 			rcu_read_unlock();
 			put_page(page);
 
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 2f9df37..c51a49c 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -98,6 +98,12 @@
 }
 EXPORT_SYMBOL(__gcov_merge_icall_topn);
 
+void __gcov_exit(void)
+{
+	/* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
+
 /**
  * gcov_enable_events - enable event reporting through gcov_event()
  *
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 6a5c239..46a18e7 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
 #include <linux/vmalloc.h>
 #include "gcov.h"
 
-#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
+#if (__GNUC__ >= 7)
+#define GCOV_COUNTERS			9
+#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
 #define GCOV_COUNTERS			10
 #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
 #define GCOV_COUNTERS			9
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 077c87f..f30110e 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -895,13 +895,15 @@
 
 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 {
-	unsigned long flags;
+	unsigned long flags, trigger, tmp;
 	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 
 	if (!desc)
 		return;
 	irq_settings_clr_and_set(desc, clr, set);
 
+	trigger = irqd_get_trigger_type(&desc->irq_data);
+
 	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
 		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
 	if (irq_settings_has_no_balance_set(desc))
@@ -913,7 +915,11 @@
 	if (irq_settings_is_level(desc))
 		irqd_set(&desc->irq_data, IRQD_LEVEL);
 
-	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
+	tmp = irq_settings_get_trigger_mask(desc);
+	if (tmp != IRQ_TYPE_NONE)
+		trigger = tmp;
+
+	irqd_set(&desc->irq_data, trigger);
 
 	irq_put_desc_unlock(desc, flags);
 }
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index 1a9abc1..259a22a 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -165,7 +165,7 @@
 	struct irq_data *data = irq_get_irq_data(irq);
 	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
 
-	if (!data || !ipimask || cpu > nr_cpu_ids)
+	if (!data || !ipimask || cpu >= nr_cpu_ids)
 		return INVALID_HWIRQ;
 
 	if (!cpumask_test_cpu(cpu, ipimask))
@@ -195,7 +195,7 @@
 	if (!chip->ipi_send_single && !chip->ipi_send_mask)
 		return -EINVAL;
 
-	if (cpu > nr_cpu_ids)
+	if (cpu >= nr_cpu_ids)
 		return -EINVAL;
 
 	if (dest) {
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3cbb0c8..f8f3f4c 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -14,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/kcov.h>
+#include <asm/setup.h>
 
 /*
  * kcov descriptor (one per opened debugfs file).
@@ -68,6 +69,11 @@
 	if (mode == KCOV_MODE_TRACE) {
 		unsigned long *area;
 		unsigned long pos;
+		unsigned long ip = _RET_IP_;
+
+#ifdef CONFIG_RANDOMIZE_BASE
+		ip -= kaslr_offset();
+#endif
 
 		/*
 		 * There is some code that runs in interrupts but for which
@@ -81,7 +87,7 @@
 		/* The first word is number of subsequent PCs. */
 		pos = READ_ONCE(area[0]) + 1;
 		if (likely(pos < t->kcov_size)) {
-			area[pos] = _RET_IP_;
+			area[pos] = ip;
 			WRITE_ONCE(area[0], pos);
 		}
 	}
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index f8c5af5..d3de04b 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -780,6 +780,10 @@
 	else
 		lock_torture_print_module_parms(cxt.cur_ops,
 						"End of test: SUCCESS");
+
+	kfree(cxt.lwsa);
+	kfree(cxt.lrsa);
+
 end:
 	torture_cleanup_end();
 }
@@ -924,6 +928,8 @@
 				       GFP_KERNEL);
 		if (reader_tasks == NULL) {
 			VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+			kfree(writer_tasks);
+			writer_tasks = NULL;
 			firsterr = -ENOMEM;
 			goto unwind;
 		}
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index e99d860..10e6d8b 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -110,38 +110,14 @@
 	lock->owner_cpu = -1;
 }
 
-static void __spin_lock_debug(raw_spinlock_t *lock)
-{
-	u64 i;
-	u64 loops = loops_per_jiffy * HZ;
-
-	for (i = 0; i < loops; i++) {
-		if (arch_spin_trylock(&lock->raw_lock))
-			return;
-		__delay(1);
-	}
-	/* lockup suspected: */
-	spin_bug(lock, "lockup suspected");
-#ifdef CONFIG_SMP
-	trigger_all_cpu_backtrace();
-#endif
-
-	/*
-	 * The trylock above was causing a livelock.  Give the lower level arch
-	 * specific lock code a chance to acquire the lock. We have already
-	 * printed a warning/backtrace at this point. The non-debug arch
-	 * specific code might actually succeed in acquiring the lock.  If it is
-	 * not successful, the end-result is the same - there is no forward
-	 * progress.
-	 */
-	arch_spin_lock(&lock->raw_lock);
-}
-
+/*
+ * We are now relying on the NMI watchdog to detect lockup instead of doing
+ * the detection here with an unfair lock which can cause problem of its own.
+ */
 void do_raw_spin_lock(raw_spinlock_t *lock)
 {
 	debug_spin_lock_before(lock);
-	if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
-		__spin_lock_debug(lock);
+	arch_spin_lock(&lock->raw_lock);
 	debug_spin_lock_after(lock);
 }
 
@@ -179,32 +155,6 @@
 
 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
 
-#if 0		/* __write_lock_debug() can lock up - maybe this can too? */
-static void __read_lock_debug(rwlock_t *lock)
-{
-	u64 i;
-	u64 loops = loops_per_jiffy * HZ;
-	int print_once = 1;
-
-	for (;;) {
-		for (i = 0; i < loops; i++) {
-			if (arch_read_trylock(&lock->raw_lock))
-				return;
-			__delay(1);
-		}
-		/* lockup suspected: */
-		if (print_once) {
-			print_once = 0;
-			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
-					"%s/%d, %p\n",
-				raw_smp_processor_id(), current->comm,
-				current->pid, lock);
-			dump_stack();
-		}
-	}
-}
-#endif
-
 void do_raw_read_lock(rwlock_t *lock)
 {
 	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
@@ -254,32 +204,6 @@
 	lock->owner_cpu = -1;
 }
 
-#if 0		/* This can cause lockups */
-static void __write_lock_debug(rwlock_t *lock)
-{
-	u64 i;
-	u64 loops = loops_per_jiffy * HZ;
-	int print_once = 1;
-
-	for (;;) {
-		for (i = 0; i < loops; i++) {
-			if (arch_write_trylock(&lock->raw_lock))
-				return;
-			__delay(1);
-		}
-		/* lockup suspected: */
-		if (print_once) {
-			print_once = 0;
-			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
-					"%s/%d, %p\n",
-				raw_smp_processor_id(), current->comm,
-				current->pid, lock);
-			dump_stack();
-		}
-	}
-}
-#endif
-
 void do_raw_write_lock(rwlock_t *lock)
 {
 	debug_write_lock_before(lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ccfb9bf..08fd4be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3305,6 +3305,7 @@
 	bool early_notif;
 	u32 old_load;
 	struct related_thread_group *grp;
+	unsigned int flag = 0;
 
 	sched_clock_tick();
 
@@ -3321,10 +3322,12 @@
 	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
 	sched_freq_tick(cpu);
-	cpufreq_update_util(rq, 0);
 
 	early_notif = early_detection_notify(rq, wallclock);
+	if (early_notif)
+		flag = SCHED_CPUFREQ_WALT | SCHED_CPUFREQ_EARLY_DET;
 
+	cpufreq_update_util(rq, flag);
 	raw_spin_unlock(&rq->lock);
 
 	if (early_notif)
@@ -9620,4 +9623,4 @@
 }
 #endif /* CONFIG_SCHED_WALT */
 
-__read_mostly bool sched_predl;
+__read_mostly bool sched_predl = 1;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index c2372f8..2eb966c 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -28,6 +28,7 @@
 struct sugov_tunables {
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
+	unsigned int hispeed_load;
 	unsigned int hispeed_freq;
 	bool pl;
 };
@@ -266,7 +267,7 @@
 }
 
 #define NL_RATIO 75
-#define HISPEED_LOAD 90
+#define DEFAULT_HISPEED_LOAD 90
 static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
 			      unsigned long *max)
 {
@@ -280,7 +281,7 @@
 		return;
 
 	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
-					   HISPEED_LOAD,
+					   sg_policy->tunables->hispeed_load,
 					   100));
 
 	if (is_hiload && !is_migration)
@@ -508,6 +509,26 @@
 	return count;
 }
 
+static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->hispeed_load);
+}
+
+static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
+				  const char *buf, size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtouint(buf, 10, &tunables->hispeed_load))
+		return -EINVAL;
+
+	tunables->hispeed_load = min(100U, tunables->hispeed_load);
+
+	return count;
+}
+
 static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
 {
 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
@@ -559,11 +580,13 @@
 }
 
 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
+static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
 static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
 static struct governor_attr pl = __ATTR_RW(pl);
 
 static struct attribute *sugov_attributes[] = {
 	&rate_limit_us.attr,
+	&hispeed_load.attr,
 	&hispeed_freq.attr,
 	&pl.attr,
 	NULL
@@ -710,6 +733,7 @@
 	}
 
 	tunables->rate_limit_us = LATENCY_MULTIPLIER;
+	tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
 	tunables->hispeed_freq = 0;
 	lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 	if (lat)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 38fa781..d61c570 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5658,10 +5658,10 @@
 
 	for (idx = 0; idx < sge->nr_cap_states; idx++) {
 		if (sge->cap_states[idx].cap >= util)
-			break;
+			return idx;
 	}
 
-	return idx;
+	return (sge->nr_cap_states - 1);
 }
 
 static int find_new_capacity(struct energy_env *eenv,
@@ -6161,15 +6161,15 @@
 	return __task_fits(p, cpu, cpu_util(cpu));
 }
 
-static bool __cpu_overutilized(int cpu, int delta)
+bool __cpu_overutilized(int cpu, unsigned long util)
 {
-	return (capacity_orig_of(cpu) * 1024) <
-	       ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin);
+	return (capacity_orig_of(cpu) * 1024 <
+		util * sysctl_sched_capacity_margin);
 }
 
 bool cpu_overutilized(int cpu)
 {
-	return __cpu_overutilized(cpu, 0);
+	return __cpu_overutilized(cpu, cpu_util(cpu));
 }
 
 #ifdef CONFIG_SCHED_TUNE
@@ -6996,7 +6996,7 @@
 			if (is_reserved(i))
 				continue;
 
-			if (sched_cpu_high_irqload(cpu))
+			if (sched_cpu_high_irqload(i))
 				continue;
 
 			/*
@@ -7188,7 +7188,9 @@
 		task_util_boosted = 0;
 #endif
 		/* Not enough spare capacity on previous cpu */
-		if (__cpu_overutilized(task_cpu(p), task_util_boosted)) {
+		if (__cpu_overutilized(task_cpu(p),
+				       cpu_util(task_cpu(p)) +
+						task_util_boosted)) {
 			trace_sched_task_util_overutilzed(p, task_cpu(p),
 						task_util(p), target_cpu,
 						target_cpu, 0, need_idle);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 1bc4828..35382df 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1724,11 +1724,11 @@
 	unsigned long cpu_capacity;
 	unsigned long best_capacity;
 	unsigned long util, best_cpu_util = ULONG_MAX;
+	unsigned long best_cpu_util_cum = ULONG_MAX;
+	unsigned long util_cum;
+	unsigned long tutil = task_util(task);
 	int best_cpu_idle_idx = INT_MAX;
 	int cpu_idle_idx = -1;
-	long new_util_cum;
-	int max_spare_cap_cpu = -1;
-	long max_spare_cap = -LONG_MAX;
 	bool placement_boost;
 
 	/* Make sure the mask is initialized first */
@@ -1791,55 +1791,55 @@
 			 * double count rt task load.
 			 */
 			util = cpu_util(cpu);
-			if (!cpu_overutilized(cpu)) {
-				if (cpu_isolated(cpu))
+
+			if (__cpu_overutilized(cpu, util + tutil))
+				continue;
+
+			if (cpu_isolated(cpu))
+				continue;
+
+			if (sched_cpu_high_irqload(cpu))
+				continue;
+
+			/* Find the least loaded CPU */
+			if (util > best_cpu_util)
+				continue;
+
+			/*
+			 * If the previous CPU has same load, keep it as
+			 * best_cpu.
+			 */
+			if (best_cpu_util == util && best_cpu == task_cpu(task))
+				continue;
+
+			/*
+			 * If candidate CPU is the previous CPU, select it.
+			 * Otherwise, if its load is same with best_cpu and in
+			 * a shallower C-state, select it.  If all above
+			 * conditions are same, select the least cumulative
+			 * window demand CPU.
+			 */
+			if (sysctl_sched_cstate_aware)
+				cpu_idle_idx = idle_get_state_idx(cpu_rq(cpu));
+
+			util_cum = cpu_util_cum(cpu, 0);
+			if (cpu != task_cpu(task) && best_cpu_util == util) {
+				if (best_cpu_idle_idx < cpu_idle_idx)
 					continue;
 
-				if (sched_cpu_high_irqload(cpu))
+				if (best_cpu_idle_idx == cpu_idle_idx &&
+				    best_cpu_util_cum < util_cum)
 					continue;
-
-				new_util_cum = cpu_util_cum(cpu, 0);
-
-				if (!task_in_cum_window_demand(cpu_rq(cpu),
-							       task))
-					new_util_cum += task_util(task);
-
-				trace_sched_cpu_util(task, cpu, task_util(task),
-						     0, new_util_cum, 0);
-
-				if (sysctl_sched_cstate_aware)
-					cpu_idle_idx =
-					     idle_get_state_idx(cpu_rq(cpu));
-
-				if (add_capacity_margin(new_util_cum, cpu) <
-				    capacity_curr_of(cpu)) {
-					if (cpu_idle_idx < best_cpu_idle_idx ||
-					    (best_cpu != task_cpu(task) &&
-					     (best_cpu_idle_idx ==
-					      cpu_idle_idx &&
-					      best_cpu_util > util))) {
-						best_cpu_util = util;
-						best_cpu = cpu;
-						best_cpu_idle_idx =
-						    cpu_idle_idx;
-					}
-				} else {
-					long spare_cap = capacity_of(cpu) -
-							 util;
-
-					if (spare_cap > 0 &&
-					    max_spare_cap < spare_cap) {
-						max_spare_cap_cpu = cpu;
-						max_spare_cap = spare_cap;
-					}
-				}
 			}
+
+			best_cpu_idle_idx = cpu_idle_idx;
+			best_cpu_util_cum = util_cum;
+			best_cpu_util = util;
+			best_cpu = cpu;
 		}
 
 		if (best_cpu != -1) {
 			return best_cpu;
-		} else if (max_spare_cap_cpu != -1) {
-			return max_spare_cap_cpu;
 		} else if (!cpumask_empty(&backup_search_cpu)) {
 			cpumask_copy(&search_cpu, &backup_search_cpu);
 			cpumask_clear(&backup_search_cpu);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0a1e62f..a5b1377 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1457,6 +1457,7 @@
 
 extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
 
+bool __cpu_overutilized(int cpu, unsigned long util);
 bool cpu_overutilized(int cpu);
 
 #endif
@@ -2291,7 +2292,7 @@
 
 #ifdef CONFIG_SCHED_WALT
 	unsigned int exception_flags = SCHED_CPUFREQ_INTERCLUSTER_MIG |
-						SCHED_CPUFREQ_PL;
+				SCHED_CPUFREQ_PL | SCHED_CPUFREQ_EARLY_DET;
 
 	/*
 	 * Skip if we've already reported, but not if this is an inter-cluster
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 217bafe..93643ba 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -838,7 +838,6 @@
 		bg = &per_cpu(cpu_boost_groups, cpu);
 		bg->group[st->idx].boost = 0;
 		bg->group[st->idx].tasks = 0;
-		raw_spin_lock_init(&bg->lock);
 	}
 
 	return 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index deb04d5..e48668c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@
 	 * fresh group stop.  Read comment in do_signal_stop() for details.
 	 */
 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
-		sig->flags = SIGNAL_STOP_STOPPED;
+		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
 		return true;
 	}
 	return false;
@@ -845,7 +845,7 @@
 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
 			 * notify its parent. See get_signal_to_deliver().
 			 */
-			signal->flags = why | SIGNAL_STOP_CONTINUED;
+			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
 			signal->group_stop_count = 0;
 			signal->group_exit_code = 0;
 		}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 843fb50..80aa30d 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1557,7 +1557,7 @@
 		base->is_idle = false;
 	} else {
 		if (!is_max_delta)
-			expires = basem + (nextevt - basej) * TICK_NSEC;
+			expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
 		/*
 		 * If we expect to sleep more than a tick, mark the base idle.
 		 * Also the tick is stopped so any added timer must forward
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 5dcb992..41805fb 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -203,10 +203,36 @@
 		fmt_cnt++;
 	}
 
-	return __trace_printk(1/* fake ip will not be printed */, fmt,
-			      mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
-			      mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
-			      mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
+/* Horrid workaround for getting va_list handling working with different
+ * argument type combinations generically for 32 and 64 bit archs.
+ */
+#define __BPF_TP_EMIT()	__BPF_ARG3_TP()
+#define __BPF_TP(...)							\
+	__trace_printk(1 /* Fake ip will not be printed. */,		\
+		       fmt, ##__VA_ARGS__)
+
+#define __BPF_ARG1_TP(...)						\
+	((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))	\
+	  ? __BPF_TP(arg1, ##__VA_ARGS__)				\
+	  : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32))	\
+	      ? __BPF_TP((long)arg1, ##__VA_ARGS__)			\
+	      : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
+
+#define __BPF_ARG2_TP(...)						\
+	((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64))	\
+	  ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__)				\
+	  : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32))	\
+	      ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__)		\
+	      : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
+
+#define __BPF_ARG3_TP(...)						\
+	((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64))	\
+	  ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__)				\
+	  : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32))	\
+	      ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__)		\
+	      : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
+
+	return __BPF_TP_EMIT();
 }
 
 static const struct bpf_func_proto bpf_trace_printk_proto = {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4f7ea84..6e432ed 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -876,6 +876,10 @@
 
 	function_profile_call(trace->func, 0, NULL, NULL);
 
+	/* If function graph is shutting down, ret_stack can be NULL */
+	if (!current->ret_stack)
+		return 0;
+
 	if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
 		current->ret_stack[index].subtime = 0;
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 66b0714..cddedb5 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7926,4 +7926,4 @@
 }
 
 fs_initcall(tracer_init_tracefs);
-late_initcall(clear_boot_tracer);
+late_initcall_sync(clear_boot_tracer);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 9daa9b3..0193f58 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1926,6 +1926,10 @@
 		if (err && set_str)
 			append_filter_err(ps, filter);
 	}
+	if (err && !set_str) {
+		free_event_filter(filter);
+		filter = NULL;
+	}
 	create_filter_finish(ps);
 
 	*filterp = filter;
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 0a689bb..305039b 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -221,16 +221,19 @@
 	if (!a)
 		return;
 
-	if (!a->pages) {
-		kfree(a);
-		return;
-	}
+	if (!a->pages)
+		goto free;
 
 	for (i = 0; i < a->n_pages; i++) {
 		if (!a->pages[i])
 			break;
 		free_page((unsigned long)a->pages[i]);
 	}
+
+	kfree(a->pages);
+
+ free:
+	kfree(a);
 }
 
 struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 812b8f8..0e5e54f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3737,8 +3737,12 @@
 		return -EINVAL;
 
 	/* creating multiple pwqs breaks ordering guarantee */
-	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
-		return -EINVAL;
+	if (!list_empty(&wq->pwqs)) {
+		if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+			return -EINVAL;
+
+		wq->flags &= ~__WQ_ORDERED;
+	}
 
 	ctx = apply_wqattrs_prepare(wq, attrs);
 	if (!ctx)
@@ -3922,6 +3926,16 @@
 	struct workqueue_struct *wq;
 	struct pool_workqueue *pwq;
 
+	/*
+	 * Unbound && max_active == 1 used to imply ordered, which is no
+	 * longer the case on NUMA machines due to per-node pools.  While
+	 * alloc_ordered_workqueue() is the right way to create an ordered
+	 * workqueue, keep the previous behavior to avoid subtle breakages
+	 * on NUMA.
+	 */
+	if ((flags & WQ_UNBOUND) && max_active == 1)
+		flags |= __WQ_ORDERED;
+
 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
 		flags |= WQ_UNBOUND;
@@ -4110,13 +4124,14 @@
 	struct pool_workqueue *pwq;
 
 	/* disallow meddling with max_active for ordered workqueues */
-	if (WARN_ON(wq->flags & __WQ_ORDERED))
+	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
 		return;
 
 	max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
 	mutex_lock(&wq->mutex);
 
+	wq->flags &= ~__WQ_ORDERED;
 	wq->saved_max_active = max_active;
 
 	for_each_pwq(pwq, wq)
@@ -5221,7 +5236,7 @@
 	 * attributes breaks ordering guarantee.  Disallow exposing ordered
 	 * workqueues.
 	 */
-	if (WARN_ON(wq->flags & __WQ_ORDERED))
+	if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
 		return -EINVAL;
 
 	wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 411b383..2812580 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -145,7 +145,7 @@
 
 config DEBUG_INFO_SPLIT
 	bool "Produce split debuginfo in .dwo files"
-	depends on DEBUG_INFO
+	depends on DEBUG_INFO && !FRV
 	help
 	  Generate debug info into separate .dwo files. This significantly
 	  reduces the build directory size for builds with DEBUG_INFO,
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index f344f76..6b2e046 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -131,7 +131,7 @@
 #endif
 	int nbattempts = MAX_NB_ATTEMPTS;
 	size_t repl = 0, ml = 0;
-	u16 delta;
+	u16 delta = 0;
 
 	/* HC4 match finder */
 	lz4hc_insert(hc4, ip);
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index 5a0f75a..eead4b3 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -364,11 +364,11 @@
 	}
 
 	miter.consumed = lzeros;
-	sg_miter_stop(&miter);
 
 	nbytes -= lzeros;
 	nbits = nbytes * 8;
 	if (nbits > MAX_EXTERN_MPI_BITS) {
+		sg_miter_stop(&miter);
 		pr_info("MPI: mpi too large (%u bits)\n", nbits);
 		return NULL;
 	}
@@ -376,6 +376,8 @@
 	if (nbytes > 0)
 		nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8);
 
+	sg_miter_stop(&miter);
+
 	nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
 	val = mpi_alloc(nlimbs);
 	if (!val)
diff --git a/mm/internal.h b/mm/internal.h
index df6319f..0ee4f54 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -477,6 +477,7 @@
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 void try_to_unmap_flush(void);
 void try_to_unmap_flush_dirty(void);
+void flush_tlb_batched_pending(struct mm_struct *mm);
 #else
 static inline void try_to_unmap_flush(void)
 {
@@ -484,7 +485,9 @@
 static inline void try_to_unmap_flush_dirty(void)
 {
 }
-
+static inline void flush_tlb_batched_pending(struct mm_struct *mm)
+{
+}
 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
 
 extern const struct trace_print_flags pageflag_names[];
diff --git a/mm/madvise.c b/mm/madvise.c
index 279627a..088a5b22 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -21,6 +21,7 @@
 #include <linux/swap.h>
 #include <linux/swapops.h>
 #include <linux/mmu_notifier.h>
+#include "internal.h"
 
 #include <asm/tlb.h>
 
@@ -282,6 +283,7 @@
 		return 0;
 
 	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	flush_tlb_batched_pending(mm);
 	arch_enter_lazy_mmu_mode();
 	for (; addr != end; pte++, addr += PAGE_SIZE) {
 		ptent = *pte;
@@ -329,8 +331,8 @@
 				pte_offset_map_lock(mm, pmd, addr, &ptl);
 				goto out;
 			}
-			put_page(page);
 			unlock_page(page);
+			put_page(page);
 			pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 			pte--;
 			addr -= PAGE_SIZE;
@@ -531,6 +533,8 @@
 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
 {
 	struct page *p;
+	struct zone *zone;
+
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
 	for (; start < end; start += PAGE_SIZE <<
@@ -559,6 +563,11 @@
 		if (ret)
 			return ret;
 	}
+
+	/* Ensure that all poisoned pages are removed from per-cpu lists */
+	for_each_populated_zone(zone)
+		drain_all_pages(zone);
+
 	return 0;
 }
 #endif
diff --git a/mm/memblock.c b/mm/memblock.c
index f1eabcc..3b7d23c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -301,31 +301,27 @@
 }
 
 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
-
-phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
-					phys_addr_t *addr)
+/**
+ * Discard memory and reserved arrays if they were allocated
+ */
+void __init memblock_discard(void)
 {
-	if (memblock.reserved.regions == memblock_reserved_init_regions)
-		return 0;
+	phys_addr_t addr, size;
 
-	*addr = __pa(memblock.reserved.regions);
+	if (memblock.reserved.regions != memblock_reserved_init_regions) {
+		addr = __pa(memblock.reserved.regions);
+		size = PAGE_ALIGN(sizeof(struct memblock_region) *
+				  memblock.reserved.max);
+		__memblock_free_late(addr, size);
+	}
 
-	return PAGE_ALIGN(sizeof(struct memblock_region) *
-			  memblock.reserved.max);
+	if (memblock.memory.regions != memblock_memory_init_regions) {
+		addr = __pa(memblock.memory.regions);
+		size = PAGE_ALIGN(sizeof(struct memblock_region) *
+				  memblock.memory.max);
+		__memblock_free_late(addr, size);
+	}
 }
-
-phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
-					phys_addr_t *addr)
-{
-	if (memblock.memory.regions == memblock_memory_init_regions)
-		return 0;
-
-	*addr = __pa(memblock.memory.regions);
-
-	return PAGE_ALIGN(sizeof(struct memblock_region) *
-			  memblock.memory.max);
-}
-
 #endif
 
 /**
diff --git a/mm/memory.c b/mm/memory.c
index 49d9b42..378ebc0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1124,6 +1124,7 @@
 	init_rss_vec(rss);
 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 	pte = start_pte;
+	flush_tlb_batched_pending(mm);
 	arch_enter_lazy_mmu_mode();
 	do {
 		pte_t ptent = *pte;
@@ -3595,6 +3596,11 @@
 	/* do counter updates before entering really critical section. */
 	check_sync_rss_stat(current);
 
+	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
+					    flags & FAULT_FLAG_INSTRUCTION,
+					    flags & FAULT_FLAG_REMOTE))
+		return VM_FAULT_SIGSEGV;
+
 	/*
 	 * Enable the memcg OOM handling for faults triggered in user
 	 * space.  Kernel faults are handled more gracefully.
@@ -3602,11 +3608,6 @@
 	if (flags & FAULT_FLAG_USER)
 		mem_cgroup_oom_enable();
 
-	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
-					    flags & FAULT_FLAG_INSTRUCTION,
-					    flags & FAULT_FLAG_REMOTE))
-		return VM_FAULT_SIGSEGV;
-
 	if (unlikely(is_vm_hugetlb_page(vma)))
 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
 	else
@@ -3634,8 +3635,18 @@
 	 * further.
 	 */
 	if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
-				&& test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
+				&& test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) {
+
+		/*
+		 * We are going to enforce SIGBUS but the PF path might have
+		 * dropped the mmap_sem already so take it again so that
+		 * we do not break expectations of all arch specific PF paths
+		 * and g-u-p
+		 */
+		if (ret & VM_FAULT_RETRY)
+			down_read(&vma->vm_mm->mmap_sem);
 		ret = VM_FAULT_SIGBUS;
+	}
 
 	return ret;
 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9ff5657..9547583 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -927,11 +927,6 @@
 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
 	}
 
-	if (vma) {
-		up_read(&current->mm->mmap_sem);
-		vma = NULL;
-	}
-
 	err = 0;
 	if (nmask) {
 		if (mpol_store_user_nodemask(pol)) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 4213d27..eb1f043 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -40,6 +40,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/page_owner.h>
+#include <linux/ptrace.h>
 
 #include <asm/tlbflush.h>
 
@@ -1665,7 +1666,6 @@
 		const int __user *, nodes,
 		int __user *, status, int, flags)
 {
-	const struct cred *cred = current_cred(), *tcred;
 	struct task_struct *task;
 	struct mm_struct *mm;
 	int err;
@@ -1689,14 +1689,9 @@
 
 	/*
 	 * Check if this process has the right to modify the specified
-	 * process. The right exists if the process has administrative
-	 * capabilities, superuser privileges or the same
-	 * userid as the target process.
+	 * process. Use the regular "ptrace_may_access()" checks.
 	 */
-	tcred = __task_cred(task);
-	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
-	    !capable(CAP_SYS_NICE)) {
+	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
 		rcu_read_unlock();
 		err = -EPERM;
 		goto out;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2c40836..1f2c969 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -74,6 +74,7 @@
 	if (!pte)
 		return 0;
 
+	flush_tlb_batched_pending(vma->vm_mm);
 	arch_enter_lazy_mmu_mode();
 	do {
 		oldpte = *pte;
diff --git a/mm/mremap.c b/mm/mremap.c
index 30d7d24..1597671 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -142,6 +142,7 @@
 	new_ptl = pte_lockptr(mm, new_pmd);
 	if (new_ptl != old_ptl)
 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+	flush_tlb_batched_pending(vma->vm_mm);
 	arch_enter_lazy_mmu_mode();
 
 	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index e1e8c63..aa59572 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -146,22 +146,6 @@
 				NULL)
 		count += __free_memory_core(start, end);
 
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
-	{
-		phys_addr_t size;
-
-		/* Free memblock.reserved array if it was allocated */
-		size = get_allocated_memblock_reserved_regions_info(&start);
-		if (size)
-			count += __free_memory_core(start, start + size);
-
-		/* Free memblock.memory array if it was allocated */
-		size = get_allocated_memblock_memory_regions_info(&start);
-		if (size)
-			count += __free_memory_core(start, start + size);
-	}
-#endif
-
 	return count;
 }
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3eb5f68..acf411c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1579,6 +1579,10 @@
 	/* Reinit limits that are based on free pages after the kernel is up */
 	files_maxfiles_init();
 #endif
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+	/* Discard memblock private memory */
+	memblock_discard();
+#endif
 
 	for_each_populated_zone(zone)
 		set_zone_contiguous(zone);
@@ -1865,14 +1869,14 @@
 #endif
 
 	for (page = start_page; page <= end_page;) {
-		/* Make sure we are not inadvertently changing nodes */
-		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
 		if (!pfn_valid_within(page_to_pfn(page))) {
 			page++;
 			continue;
 		}
 
+		/* Make sure we are not inadvertently changing nodes */
+		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
 		if (!PageBuddy(page)) {
 			page++;
 			continue;
@@ -6525,8 +6529,8 @@
 	}
 
 	if (pages && s)
-		pr_info("Freeing %s memory: %ldK (%p - %p)\n",
-			s, pages << (PAGE_SHIFT - 10), start, end);
+		pr_info("Freeing %s memory: %ldK\n",
+			s, pages << (PAGE_SHIFT - 10));
 
 	return pages;
 }
@@ -7416,7 +7420,7 @@
 
 	/* Make sure the range is really isolated. */
 	if (test_pages_isolated(outer_start, end, false)) {
-		pr_info("%s: [%lx, %lx) PFNs busy\n",
+		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
 			__func__, outer_start, end);
 		ret = -EBUSY;
 		goto done;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 65e24fb..fe850b9 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -143,7 +143,7 @@
 		.nr_entries = 0,
 		.entries = entries,
 		.max_entries = PAGE_OWNER_STACK_DEPTH,
-		.skip = 0
+		.skip = 2
 	};
 	depot_stack_handle_t handle;
 
diff --git a/mm/rmap.c b/mm/rmap.c
index dfb19f0..4d19dd1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -617,6 +617,13 @@
 	tlb_ubc->flush_required = true;
 
 	/*
+	 * Ensure compiler does not re-order the setting of tlb_flush_batched
+	 * before the PTE is cleared.
+	 */
+	barrier();
+	mm->tlb_flush_batched = true;
+
+	/*
 	 * If the PTE was dirty then it's best to assume it's writable. The
 	 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
 	 * before the page is queued for IO.
@@ -643,6 +650,35 @@
 
 	return should_defer;
 }
+
+/*
+ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
+ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
+ * operation such as mprotect or munmap to race between reclaim unmapping
+ * the page and flushing the page. If this race occurs, it potentially allows
+ * access to data via a stale TLB entry. Tracking all mm's that have TLB
+ * batching in flight would be expensive during reclaim so instead track
+ * whether TLB batching occurred in the past and if so then do a flush here
+ * if required. This will cost one additional flush per reclaim cycle paid
+ * by the first operation at risk such as mprotect and mumap.
+ *
+ * This must be called under the PTL so that an access to tlb_flush_batched
+ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
+ * via the PTL.
+ */
+void flush_tlb_batched_pending(struct mm_struct *mm)
+{
+	if (mm->tlb_flush_batched) {
+		flush_tlb_mm(mm);
+
+		/*
+		 * Do not allow the compiler to re-order the clearing of
+		 * tlb_flush_batched before the tlb is flushed.
+		 */
+		barrier();
+		mm->tlb_flush_batched = false;
+	}
+}
 #else
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
 		struct page *page, bool writable)
diff --git a/mm/shmem.c b/mm/shmem.c
index 142887f..7a74b6d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1007,7 +1007,11 @@
 			 */
 			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
 				spin_lock(&sbinfo->shrinklist_lock);
-				if (list_empty(&info->shrinklist)) {
+				/*
+				 * _careful to defend against unlocked access to
+				 * ->shrink_list in shmem_unused_huge_shrink()
+				 */
+				if (list_empty_careful(&info->shrinklist)) {
 					list_add_tail(&info->shrinklist,
 							&sbinfo->shrinklist);
 					sbinfo->shrinklist_len++;
@@ -1774,7 +1778,11 @@
 			 * to shrink under memory pressure.
 			 */
 			spin_lock(&sbinfo->shrinklist_lock);
-			if (list_empty(&info->shrinklist)) {
+			/*
+			 * _careful to defend against unlocked access to
+			 * ->shrink_list in shmem_unused_huge_shrink()
+			 */
+			if (list_empty_careful(&info->shrinklist)) {
 				list_add_tail(&info->shrinklist,
 						&sbinfo->shrinklist);
 				sbinfo->shrinklist_len++;
@@ -3802,7 +3810,7 @@
 	}
 
 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
-	if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
+	if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
 	else
 		shmem_huge = 0; /* just in case it was patched */
@@ -3863,7 +3871,7 @@
 		return -EINVAL;
 
 	shmem_huge = huge;
-	if (shmem_huge < SHMEM_HUGE_DENY)
+	if (shmem_huge > SHMEM_HUGE_DENY)
 		SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
 	return count;
 }
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index fbf251f..4d6b94d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,16 +484,16 @@
 	struct net_device *dev = s->dev;
 	struct sock *sk = s->sock->sk;
 	struct sk_buff *skb;
-	wait_queue_t wait;
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
 	BT_DBG("");
 
 	set_user_nice(current, -15);
 
-	init_waitqueue_entry(&wait, current);
 	add_wait_queue(sk_sleep(sk), &wait);
 	while (1) {
-		set_current_state(TASK_INTERRUPTIBLE);
+		/* Ensure session->terminate is updated */
+		smp_mb__before_atomic();
 
 		if (atomic_read(&s->terminate))
 			break;
@@ -515,9 +515,8 @@
 				break;
 		netif_wake_queue(dev);
 
-		schedule();
+		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 	}
-	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(sk_sleep(sk), &wait);
 
 	/* Cleanup session */
@@ -666,7 +665,7 @@
 	s = __bnep_get_session(req->dst);
 	if (s) {
 		atomic_inc(&s->terminate);
-		wake_up_process(s->task);
+		wake_up_interruptible(sk_sleep(s->sock->sk));
 	} else
 		err = -ENOENT;
 
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 9e59b66..1152ce3 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -280,16 +280,16 @@
 	struct cmtp_session *session = arg;
 	struct sock *sk = session->sock->sk;
 	struct sk_buff *skb;
-	wait_queue_t wait;
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
 	BT_DBG("session %p", session);
 
 	set_user_nice(current, -15);
 
-	init_waitqueue_entry(&wait, current);
 	add_wait_queue(sk_sleep(sk), &wait);
 	while (1) {
-		set_current_state(TASK_INTERRUPTIBLE);
+		/* Ensure session->terminate is updated */
+		smp_mb__before_atomic();
 
 		if (atomic_read(&session->terminate))
 			break;
@@ -306,9 +306,8 @@
 
 		cmtp_process_transmit(session);
 
-		schedule();
+		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 	}
-	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(sk_sleep(sk), &wait);
 
 	down_write(&cmtp_session_sem);
@@ -393,7 +392,7 @@
 		err = cmtp_attach_device(session);
 		if (err < 0) {
 			atomic_inc(&session->terminate);
-			wake_up_process(session->task);
+			wake_up_interruptible(sk_sleep(session->sock->sk));
 			up_write(&cmtp_session_sem);
 			return err;
 		}
@@ -431,7 +430,11 @@
 
 		/* Stop session thread */
 		atomic_inc(&session->terminate);
-		wake_up_process(session->task);
+
+		/* Ensure session->terminate is updated */
+		smp_mb__after_atomic();
+
+		wake_up_interruptible(sk_sleep(session->sock->sk));
 	} else
 		err = -ENOENT;
 
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0bec458..1fc0764 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -36,6 +36,7 @@
 #define VERSION "1.2"
 
 static DECLARE_RWSEM(hidp_session_sem);
+static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
 static LIST_HEAD(hidp_session_list);
 
 static unsigned char hidp_keycode[256] = {
@@ -1068,12 +1069,12 @@
  * Wake up session thread and notify it to stop. This is asynchronous and
  * returns immediately. Call this whenever a runtime error occurs and you want
  * the session to stop.
- * Note: wake_up_process() performs any necessary memory-barriers for us.
+ * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
  */
 static void hidp_session_terminate(struct hidp_session *session)
 {
 	atomic_inc(&session->terminate);
-	wake_up_process(session->task);
+	wake_up_interruptible(&hidp_session_wq);
 }
 
 /*
@@ -1180,7 +1181,9 @@
 	struct sock *ctrl_sk = session->ctrl_sock->sk;
 	struct sock *intr_sk = session->intr_sock->sk;
 	struct sk_buff *skb;
+	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
+	add_wait_queue(&hidp_session_wq, &wait);
 	for (;;) {
 		/*
 		 * This thread can be woken up two ways:
@@ -1188,12 +1191,10 @@
 		 *    session->terminate flag and wakes this thread up.
 		 *  - Via modifying the socket state of ctrl/intr_sock. This
 		 *    thread is woken up by ->sk_state_changed().
-		 *
-		 * Note: set_current_state() performs any necessary
-		 * memory-barriers for us.
 		 */
-		set_current_state(TASK_INTERRUPTIBLE);
 
+		/* Ensure session->terminate is updated */
+		smp_mb__before_atomic();
 		if (atomic_read(&session->terminate))
 			break;
 
@@ -1227,11 +1228,22 @@
 		hidp_process_transmit(session, &session->ctrl_transmit,
 				      session->ctrl_sock);
 
-		schedule();
+		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
 	}
+	remove_wait_queue(&hidp_session_wq, &wait);
 
 	atomic_inc(&session->terminate);
-	set_current_state(TASK_RUNNING);
+
+	/* Ensure session->terminate is updated */
+	smp_mb__after_atomic();
+}
+
+static int hidp_session_wake_function(wait_queue_t *wait,
+				      unsigned int mode,
+				      int sync, void *key)
+{
+	wake_up_interruptible(&hidp_session_wq);
+	return false;
 }
 
 /*
@@ -1244,7 +1256,8 @@
 static int hidp_session_thread(void *arg)
 {
 	struct hidp_session *session = arg;
-	wait_queue_t ctrl_wait, intr_wait;
+	DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
+	DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
 
 	BT_DBG("session %p", session);
 
@@ -1254,8 +1267,6 @@
 	set_user_nice(current, -15);
 	hidp_set_timer(session);
 
-	init_waitqueue_entry(&ctrl_wait, current);
-	init_waitqueue_entry(&intr_wait, current);
 	add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
 	add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
 	/* This memory barrier is paired with wq_has_sleeper(). See
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 577f1c0..ffd09c1 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -58,7 +58,7 @@
 				       u8 code, u8 ident, u16 dlen, void *data);
 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
 			   void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
 
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
@@ -1473,7 +1473,7 @@
 
 			set_bit(CONF_REQ_SENT, &chan->conf_state);
 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-				       l2cap_build_conf_req(chan, buf), buf);
+				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
 			chan->num_conf_req++;
 		}
 
@@ -2977,12 +2977,15 @@
 	return len;
 }
 
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
 {
 	struct l2cap_conf_opt *opt = *ptr;
 
 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
 
+	if (size < L2CAP_CONF_OPT_SIZE + len)
+		return;
+
 	opt->type = type;
 	opt->len  = len;
 
@@ -3007,7 +3010,7 @@
 	*ptr += L2CAP_CONF_OPT_SIZE + len;
 }
 
-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
 {
 	struct l2cap_conf_efs efs;
 
@@ -3035,7 +3038,7 @@
 	}
 
 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
-			   (unsigned long) &efs);
+			   (unsigned long) &efs, size);
 }
 
 static void l2cap_ack_timeout(struct work_struct *work)
@@ -3181,11 +3184,12 @@
 	chan->ack_win = chan->tx_win;
 }
 
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
 {
 	struct l2cap_conf_req *req = data;
 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
 	void *ptr = req->data;
+	void *endptr = data + data_size;
 	u16 size;
 
 	BT_DBG("chan %p", chan);
@@ -3210,7 +3214,7 @@
 
 done:
 	if (chan->imtu != L2CAP_DEFAULT_MTU)
-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
 
 	switch (chan->mode) {
 	case L2CAP_MODE_BASIC:
@@ -3229,7 +3233,7 @@
 		rfc.max_pdu_size    = 0;
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-				   (unsigned long) &rfc);
+				   (unsigned long) &rfc, endptr - ptr);
 		break;
 
 	case L2CAP_MODE_ERTM:
@@ -3249,21 +3253,21 @@
 				       L2CAP_DEFAULT_TX_WINDOW);
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-				   (unsigned long) &rfc);
+				   (unsigned long) &rfc, endptr - ptr);
 
 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
-			l2cap_add_opt_efs(&ptr, chan);
+			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
 
 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
-					   chan->tx_win);
+					   chan->tx_win, endptr - ptr);
 
 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
 			if (chan->fcs == L2CAP_FCS_NONE ||
 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
 				chan->fcs = L2CAP_FCS_NONE;
 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
-						   chan->fcs);
+						   chan->fcs, endptr - ptr);
 			}
 		break;
 
@@ -3281,17 +3285,17 @@
 		rfc.max_pdu_size = cpu_to_le16(size);
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-				   (unsigned long) &rfc);
+				   (unsigned long) &rfc, endptr - ptr);
 
 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
-			l2cap_add_opt_efs(&ptr, chan);
+			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
 
 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
 			if (chan->fcs == L2CAP_FCS_NONE ||
 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
 				chan->fcs = L2CAP_FCS_NONE;
 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
-						   chan->fcs);
+						   chan->fcs, endptr - ptr);
 			}
 		break;
 	}
@@ -3302,10 +3306,11 @@
 	return ptr - data;
 }
 
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
 {
 	struct l2cap_conf_rsp *rsp = data;
 	void *ptr = rsp->data;
+	void *endptr = data + data_size;
 	void *req = chan->conf_req;
 	int len = chan->conf_len;
 	int type, hint, olen;
@@ -3407,7 +3412,7 @@
 			return -ECONNREFUSED;
 
 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-				   (unsigned long) &rfc);
+				   (unsigned long) &rfc, endptr - ptr);
 	}
 
 	if (result == L2CAP_CONF_SUCCESS) {
@@ -3420,7 +3425,7 @@
 			chan->omtu = mtu;
 			set_bit(CONF_MTU_DONE, &chan->conf_state);
 		}
-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
 
 		if (remote_efs) {
 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
@@ -3434,7 +3439,7 @@
 
 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
 						   sizeof(efs),
-						   (unsigned long) &efs);
+						   (unsigned long) &efs, endptr - ptr);
 			} else {
 				/* Send PENDING Conf Rsp */
 				result = L2CAP_CONF_PENDING;
@@ -3467,7 +3472,7 @@
 			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					   sizeof(rfc), (unsigned long) &rfc);
+					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
 
 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
 				chan->remote_id = efs.id;
@@ -3481,7 +3486,7 @@
 					le32_to_cpu(efs.sdu_itime);
 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
 						   sizeof(efs),
-						   (unsigned long) &efs);
+						   (unsigned long) &efs, endptr - ptr);
 			}
 			break;
 
@@ -3495,7 +3500,7 @@
 			set_bit(CONF_MODE_DONE, &chan->conf_state);
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
-					   (unsigned long) &rfc);
+					   (unsigned long) &rfc, endptr - ptr);
 
 			break;
 
@@ -3517,10 +3522,11 @@
 }
 
 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
-				void *data, u16 *result)
+				void *data, size_t size, u16 *result)
 {
 	struct l2cap_conf_req *req = data;
 	void *ptr = req->data;
+	void *endptr = data + size;
 	int type, olen;
 	unsigned long val;
 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
@@ -3538,13 +3544,13 @@
 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
 			} else
 				chan->imtu = val;
-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_FLUSH_TO:
 			chan->flush_to = val;
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
-					   2, chan->flush_to);
+					   2, chan->flush_to, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_RFC:
@@ -3558,13 +3564,13 @@
 			chan->fcs = 0;
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-					   sizeof(rfc), (unsigned long) &rfc);
+					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_EWS:
 			chan->ack_win = min_t(u16, val, chan->ack_win);
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
-					   chan->tx_win);
+					   chan->tx_win, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_EFS:
@@ -3577,7 +3583,7 @@
 				return -ECONNREFUSED;
 
 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
-					   (unsigned long) &efs);
+					   (unsigned long) &efs, endptr - ptr);
 			break;
 
 		case L2CAP_CONF_FCS:
@@ -3682,7 +3688,7 @@
 		return;
 
 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-		       l2cap_build_conf_req(chan, buf), buf);
+		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
 	chan->num_conf_req++;
 }
 
@@ -3890,7 +3896,7 @@
 		u8 buf[128];
 		set_bit(CONF_REQ_SENT, &chan->conf_state);
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-			       l2cap_build_conf_req(chan, buf), buf);
+			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
 		chan->num_conf_req++;
 	}
 
@@ -3968,7 +3974,7 @@
 			break;
 
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-			       l2cap_build_conf_req(chan, req), req);
+			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
 		chan->num_conf_req++;
 		break;
 
@@ -4080,7 +4086,7 @@
 	}
 
 	/* Complete config. */
-	len = l2cap_parse_conf_req(chan, rsp);
+	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
 	if (len < 0) {
 		l2cap_send_disconn_req(chan, ECONNRESET);
 		goto unlock;
@@ -4114,7 +4120,7 @@
 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
 		u8 buf[64];
 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
-			       l2cap_build_conf_req(chan, buf), buf);
+			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
 		chan->num_conf_req++;
 	}
 
@@ -4174,7 +4180,7 @@
 			char buf[64];
 
 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
-						   buf, &result);
+						   buf, sizeof(buf), &result);
 			if (len < 0) {
 				l2cap_send_disconn_req(chan, ECONNRESET);
 				goto done;
@@ -4204,7 +4210,7 @@
 			/* throw out any old stored conf requests */
 			result = L2CAP_CONF_SUCCESS;
 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
-						   req, &result);
+						   req, sizeof(req), &result);
 			if (len < 0) {
 				l2cap_send_disconn_req(chan, ECONNRESET);
 				goto done;
@@ -4781,7 +4787,7 @@
 			set_bit(CONF_REQ_SENT, &chan->conf_state);
 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
 				       L2CAP_CONF_REQ,
-				       l2cap_build_conf_req(chan, buf), buf);
+				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
 			chan->num_conf_req++;
 		}
 	}
@@ -7457,7 +7463,7 @@
 				set_bit(CONF_REQ_SENT, &chan->conf_state);
 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
 					       L2CAP_CONF_REQ,
-					       l2cap_build_conf_req(chan, buf),
+					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
 					       buf);
 				chan->num_conf_req++;
 			}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index fcaa484..04eea2f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -48,6 +48,9 @@
 		return NETDEV_TX_OK;
 	}
 
+#ifdef CONFIG_NET_SWITCHDEV
+	skb->offload_fwd_mark = 0;
+#endif
 	BR_INPUT_SKB_CB(skb)->brdev = dev;
 
 	skb_reset_mac_header(skb);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 58dfa23..4fa4011 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -351,7 +351,7 @@
 	if (flags & MSG_PEEK) {
 		err = -ENOENT;
 		spin_lock_bh(&sk->sk_receive_queue.lock);
-		if (skb == skb_peek(&sk->sk_receive_queue)) {
+		if (skb->next) {
 			__skb_unlink(skb, &sk->sk_receive_queue);
 			atomic_dec(&skb->users);
 			err = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 610e5f8..bc129b0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2705,7 +2705,7 @@
 {
 	if (tx_path)
 		return skb->ip_summed != CHECKSUM_PARTIAL &&
-		       skb->ip_summed != CHECKSUM_NONE;
+		       skb->ip_summed != CHECKSUM_UNNECESSARY;
 
 	return skb->ip_summed == CHECKSUM_NONE;
 }
@@ -5367,12 +5367,13 @@
  * Find out if a device is linked to an upper device and return true in case
  * it is. The caller must hold the RTNL lock.
  */
-static bool netdev_has_any_upper_dev(struct net_device *dev)
+bool netdev_has_any_upper_dev(struct net_device *dev)
 {
 	ASSERT_RTNL();
 
 	return !list_empty(&dev->all_adj_list.upper);
 }
+EXPORT_SYMBOL(netdev_has_any_upper_dev);
 
 /**
  * netdev_master_upper_dev_get - Get master upper device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d2..151e047 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -28,6 +28,7 @@
 
 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 		return -EFAULT;
+	ifr.ifr_name[IFNAMSIZ-1] = 0;
 
 	error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
 	if (error)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9c6fd7f..4d26297 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1965,7 +1965,8 @@
 		struct sockaddr *sa;
 		int len;
 
-		len = sizeof(sa_family_t) + dev->addr_len;
+		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
+						  sizeof(*sa));
 		sa = kmalloc(len, GFP_KERNEL);
 		if (!sa) {
 			err = -ENOMEM;
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 1704948..f227f00 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -1471,9 +1471,12 @@
 	 * singleton values (which always leads to failure).
 	 * These settings can still (later) be overridden via sockopts.
 	 */
-	if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
-	    ccid_get_builtin_ccids(&rx.val, &rx.len))
+	if (ccid_get_builtin_ccids(&tx.val, &tx.len))
 		return -ENOBUFS;
+	if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
+		kfree(tx.val);
+		return -ENOBUFS;
+	}
 
 	if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
 	    !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 86b0933..8fc1600 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -637,6 +637,7 @@
 		goto drop_and_free;
 
 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+	reqsk_put(req);
 	return 0;
 
 drop_and_free:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 2ac9d2a..28e8252 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -380,6 +380,7 @@
 		goto drop_and_free;
 
 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+	reqsk_put(req);
 	return 0;
 
 drop_and_free:
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9fe25bf..b68168f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -24,6 +24,7 @@
 #include <net/checksum.h>
 
 #include <net/inet_sock.h>
+#include <net/inet_common.h>
 #include <net/sock.h>
 #include <net/xfrm.h>
 
@@ -170,6 +171,15 @@
 
 EXPORT_SYMBOL_GPL(dccp_packet_name);
 
+static void dccp_sk_destruct(struct sock *sk)
+{
+	struct dccp_sock *dp = dccp_sk(sk);
+
+	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+	dp->dccps_hc_tx_ccid = NULL;
+	inet_sock_destruct(sk);
+}
+
 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
@@ -179,6 +189,7 @@
 	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
 	sk->sk_state		= DCCP_CLOSED;
 	sk->sk_write_space	= dccp_write_space;
+	sk->sk_destruct		= dccp_sk_destruct;
 	icsk->icsk_sync_mss	= dccp_sync_mss;
 	dp->dccps_mss_cache	= 536;
 	dp->dccps_rate_last	= jiffies;
@@ -201,10 +212,7 @@
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
-	/*
-	 * DCCP doesn't use sk_write_queue, just sk_send_head
-	 * for retransmissions
-	 */
+	__skb_queue_purge(&sk->sk_write_queue);
 	if (sk->sk_send_head != NULL) {
 		kfree_skb(sk->sk_send_head);
 		sk->sk_send_head = NULL;
@@ -222,8 +230,7 @@
 		dp->dccps_hc_rx_ackvec = NULL;
 	}
 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
-	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
-	dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
+	dp->dccps_hc_rx_ccid = NULL;
 
 	/* clean up feature negotiation state */
 	dccp_feat_list_purge(&dp->dccps_featneg);
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 30d875d..f85b08b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -580,19 +580,14 @@
 {
 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
 		net_ieee802154_lowpan(net);
-	int res;
 
 	ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
 	ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
 	ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
 
-	res = inet_frags_init_net(&ieee802154_lowpan->frags);
-	if (res)
-		return res;
-	res = lowpan_frags_ns_sysctl_register(net);
-	if (res)
-		inet_frags_uninit_net(&ieee802154_lowpan->frags);
-	return res;
+	inet_frags_init_net(&ieee802154_lowpan->frags);
+
+	return lowpan_frags_ns_sysctl_register(net);
 }
 
 static void __net_exit lowpan_frags_exit_net(struct net *net)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ceddf42..2b887c5 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1714,6 +1714,13 @@
 	net->ipv4.sysctl_ip_dynaddr = 0;
 	net->ipv4.sysctl_ip_early_demux = 1;
 
+	/* Some igmp sysctl, whose values are always used */
+	net->ipv4.sysctl_igmp_max_memberships = 20;
+	net->ipv4.sysctl_igmp_max_msf = 10;
+	/* IGMP reports for link-local multicast groups are enabled by default */
+	net->ipv4.sysctl_igmp_llm_reports = 1;
+	net->ipv4.sysctl_igmp_qrv = 2;
+
 	return 0;
 }
 
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 37f4578..c8409ca0 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1320,13 +1320,14 @@
 
 void __init ip_fib_init(void)
 {
-	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
-	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
-	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+	fib_trie_init();
 
 	register_pernet_subsys(&fib_net_ops);
+
 	register_netdevice_notifier(&fib_netdev_notifier);
 	register_inetaddr_notifier(&fib_inetaddr_notifier);
 
-	fib_trie_init();
+	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
 }
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 7563831..38c1c97 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1044,15 +1044,17 @@
 	fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
 	if (!fi)
 		goto failure;
-	fib_info_cnt++;
 	if (cfg->fc_mx) {
 		fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
-		if (!fi->fib_metrics)
-			goto failure;
+		if (unlikely(!fi->fib_metrics)) {
+			kfree(fi);
+			return ERR_PTR(err);
+		}
 		atomic_set(&fi->fib_metrics->refcnt, 1);
-	} else
+	} else {
 		fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
-
+	}
+	fib_info_cnt++;
 	fi->fib_net = net;
 	fi->fib_protocol = cfg->fc_protocol;
 	fi->fib_scope = cfg->fc_scope;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 19930da..08575e3 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2974,12 +2974,6 @@
 		goto out_sock;
 	}
 
-	/* Sysctl initialization */
-	net->ipv4.sysctl_igmp_max_memberships = 20;
-	net->ipv4.sysctl_igmp_max_msf = 10;
-	/* IGMP reports for link-local multicast groups are enabled by default */
-	net->ipv4.sysctl_igmp_llm_reports = 1;
-	net->ipv4.sysctl_igmp_qrv = 2;
 	return 0;
 
 out_sock:
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index b5e9317..631c0d0 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -234,10 +234,8 @@
 	cond_resched();
 
 	if (read_seqretry(&f->rnd_seqlock, seq) ||
-	    percpu_counter_sum(&nf->mem))
+	    sum_frag_mem_limit(nf))
 		goto evict_again;
-
-	percpu_counter_destroy(&nf->mem);
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
 
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index bbe7f72..453db95 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -835,8 +835,6 @@
 
 static int __net_init ipv4_frags_init_net(struct net *net)
 {
-	int res;
-
 	/* Fragment cache limits.
 	 *
 	 * The fragment memory accounting code, (tries to) account for
@@ -862,13 +860,9 @@
 
 	net->ipv4.frags.max_dist = 64;
 
-	res = inet_frags_init_net(&net->ipv4.frags);
-	if (res)
-		return res;
-	res = ip4_frags_ns_ctl_register(net);
-	if (res)
-		inet_frags_uninit_net(&net->ipv4.frags);
-	return res;
+	inet_frags_init_net(&net->ipv4.frags);
+
+	return ip4_frags_ns_ctl_register(net);
 }
 
 static void __net_exit ipv4_frags_exit_net(struct net *net)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 2c18bcf..e60f9fa 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -958,10 +958,12 @@
 		csummode = CHECKSUM_PARTIAL;
 
 	cork->length += length;
-	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+	if ((skb && skb_is_gso(skb)) ||
+	    ((length > mtu) &&
+	    (skb_queue_len(queue) <= 1) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
-	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
 					 hh_len, fragheaderlen, transhdrlen,
 					 maxfraglen, flags);
@@ -1277,6 +1279,7 @@
 		return -EINVAL;
 
 	if ((size + skb->len > mtu) &&
+	    (skb_queue_len(&sk->sk_write_queue) == 1) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO)) {
 		if (skb->ip_summed != CHECKSUM_PARTIAL)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 5719d6b..bd7f183 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -609,8 +609,8 @@
 		ip_rt_put(rt);
 		goto tx_dropped;
 	}
-	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos,
-		      key->ttl, df, !net_eq(tunnel->net, dev_net(dev)));
+	iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
+		      df, !net_eq(tunnel->net, dev_net(dev)));
 	return;
 tx_error:
 	dev->stats.tx_errors++;
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index fd82202..146d861 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -126,6 +126,8 @@
 	/* ip_route_me_harder expects skb->dst to be set */
 	skb_dst_set_noref(nskb, skb_dst(oldskb));
 
+	nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
+
 	skb_reserve(nskb, LL_MAX_HEADER);
 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
 				   ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 18c6e79..a4faf30 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1253,7 +1253,7 @@
 	if (mtu)
 		return mtu;
 
-	mtu = dst->dev->mtu;
+	mtu = READ_ONCE(dst->dev->mtu);
 
 	if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
 		if (rt->rt_uses_gateway && mtu > 576)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0dc6286..f56a668 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -334,6 +334,7 @@
 	treq = tcp_rsk(req);
 	treq->rcv_isn		= ntohl(th->seq) - 1;
 	treq->snt_isn		= cookie;
+	treq->txhash		= net_tx_rndhash();
 	req->mss		= mss;
 	ireq->ir_num		= ntohs(th->dest);
 	ireq->ir_rmt_port	= th->source;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 51ac77e..ccc484a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1010,7 +1010,7 @@
 		.data		= &init_net.ipv4.sysctl_tcp_notsent_lowat,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_douintvec,
 	},
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	{
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7c90130..4946e8f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2306,6 +2306,10 @@
 	tcp_set_ca_state(sk, TCP_CA_Open);
 	tcp_clear_retrans(tp);
 	inet_csk_delack_init(sk);
+	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+	 * issue in __tcp_select_window()
+	 */
+	icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
 	tcp_init_send_head(sk);
 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
 	__sk_dst_reset(sk);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 0ea66c2..cb8db34 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -83,7 +83,8 @@
 		cwnd_gain:10,	/* current gain for setting cwnd */
 		full_bw_cnt:3,	/* number of rounds without large bw gains */
 		cycle_idx:3,	/* current index in pacing_gain cycle array */
-		unused_b:6;
+		has_seen_rtt:1, /* have we seen an RTT sample yet? */
+		unused_b:5;
 	u32	prior_cwnd;	/* prior cwnd upon entering loss recovery */
 	u32	full_bw;	/* recent bw, to estimate if pipe is full */
 };
@@ -182,6 +183,35 @@
 	return rate >> BW_SCALE;
 }
 
+/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
+static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
+{
+	u64 rate = bw;
+
+	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+	return rate;
+}
+
+/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct bbr *bbr = inet_csk_ca(sk);
+	u64 bw;
+	u32 rtt_us;
+
+	if (tp->srtt_us) {		/* any RTT sample yet? */
+		rtt_us = max(tp->srtt_us >> 3, 1U);
+		bbr->has_seen_rtt = 1;
+	} else {			 /* no RTT sample yet */
+		rtt_us = USEC_PER_MSEC;	 /* use nominal default RTT */
+	}
+	bw = (u64)tp->snd_cwnd * BW_UNIT;
+	do_div(bw, rtt_us);
+	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
+}
+
 /* Pace using current bw estimate and a gain factor. In order to help drive the
  * network toward lower queues while maintaining high utilization and low
  * latency, the average pacing rate aims to be slightly (~1%) lower than the
@@ -191,12 +221,13 @@
  */
 static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
 {
+	struct tcp_sock *tp = tcp_sk(sk);
 	struct bbr *bbr = inet_csk_ca(sk);
-	u64 rate = bw;
+	u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
 
-	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
-	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
-	if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
+	if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
+		bbr_init_pacing_rate_from_rtt(sk);
+	if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
 		sk->sk_pacing_rate = rate;
 }
 
@@ -769,7 +800,6 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct bbr *bbr = inet_csk_ca(sk);
-	u64 bw;
 
 	bbr->prior_cwnd = 0;
 	bbr->tso_segs_goal = 0;	 /* default segs per skb until first ACK */
@@ -785,11 +815,8 @@
 
 	minmax_reset(&bbr->bw, bbr->rtt_cnt, 0);  /* init max bw to 0 */
 
-	/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
-	bw = (u64)tp->snd_cwnd * BW_UNIT;
-	do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
-	sk->sk_pacing_rate = 0;		/* force an update of sk_pacing_rate */
-	bbr_set_pacing_rate(sk, bw, bbr_high_gain);
+	bbr->has_seen_rtt = 0;
+	bbr_init_pacing_rate_from_rtt(sk);
 
 	bbr->restore_cwnd = 0;
 	bbr->round_start = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3d980d6..491b03a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2561,8 +2561,8 @@
 		return;
 
 	/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
-	if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
-	    (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+	if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+	    (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
 		tp->snd_cwnd = tp->snd_ssthresh;
 		tp->snd_cwnd_stamp = tcp_time_stamp;
 	}
@@ -3037,8 +3037,7 @@
 			/* delta may not be positive if the socket is locked
 			 * when the retrans timer fires and is rescheduled.
 			 */
-			if (delta > 0)
-				rto = delta;
+			rto = max(delta, 1);
 		}
 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
 					  TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e6bf011..a835716 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3344,6 +3344,9 @@
 	struct sk_buff *buff;
 	int err;
 
+	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
+		return -EHOSTUNREACH; /* Routing failure or similar. */
+
 	tcp_connect_init(sk);
 
 	if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 732060d..d3d3ef6 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -686,7 +686,8 @@
 		goto death;
 	}
 
-	if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
+	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
+	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
 		goto out;
 
 	elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fe24424..7c1f5f6 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -818,7 +818,7 @@
 	if (is_udplite)  				 /*     UDP-Lite      */
 		csum = udplite_csum(skb);
 
-	else if (sk->sk_no_check_tx) {   /* UDP csum disabled */
+	else if (sk->sk_no_check_tx && !skb_is_gso(skb)) {   /* UDP csum off */
 
 		skb->ip_summed = CHECKSUM_NONE;
 		goto send;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index b2be1d9..6de016f 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -232,7 +232,7 @@
 	if (uh->check == 0)
 		uh->check = CSUM_MANGLED_0;
 
-	skb->ip_summed = CHECKSUM_NONE;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 	/* If there is no outer header we can fake a checksum offload
 	 * due to the fact that we have already done the checksum in
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index fe5305a..371312b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5485,7 +5485,7 @@
 		 * our DAD process, so we don't need
 		 * to do it again
 		 */
-		if (!(ifp->rt->rt6i_node))
+		if (!rcu_access_pointer(ifp->rt->rt6i_node))
 			ip6_ins_rt(ifp->rt);
 		if (ifp->idev->cnf.forwarding)
 			addrconf_join_anycast(ifp);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4345ee3..5da8649 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -148,11 +148,23 @@
 	return fn;
 }
 
-static void node_free(struct fib6_node *fn)
+static void node_free_immediate(struct fib6_node *fn)
 {
 	kmem_cache_free(fib6_node_kmem, fn);
 }
 
+static void node_free_rcu(struct rcu_head *head)
+{
+	struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
+
+	kmem_cache_free(fib6_node_kmem, fn);
+}
+
+static void node_free(struct fib6_node *fn)
+{
+	call_rcu(&fn->rcu, node_free_rcu);
+}
+
 static void rt6_rcu_free(struct rt6_info *rt)
 {
 	call_rcu(&rt->dst.rcu_head, dst_rcu_free);
@@ -189,6 +201,12 @@
 	}
 }
 
+static void fib6_free_table(struct fib6_table *table)
+{
+	inetpeer_invalidate_tree(&table->tb6_peers);
+	kfree(table);
+}
+
 static void fib6_link_table(struct net *net, struct fib6_table *tb)
 {
 	unsigned int h;
@@ -589,9 +607,9 @@
 
 		if (!in || !ln) {
 			if (in)
-				node_free(in);
+				node_free_immediate(in);
 			if (ln)
-				node_free(ln);
+				node_free_immediate(ln);
 			return ERR_PTR(-ENOMEM);
 		}
 
@@ -862,7 +880,7 @@
 
 		rt->dst.rt6_next = iter;
 		*ins = rt;
-		rt->rt6i_node = fn;
+		rcu_assign_pointer(rt->rt6i_node, fn);
 		atomic_inc(&rt->rt6i_ref);
 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
 		info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
@@ -887,7 +905,7 @@
 			return err;
 
 		*ins = rt;
-		rt->rt6i_node = fn;
+		rcu_assign_pointer(rt->rt6i_node, fn);
 		rt->dst.rt6_next = iter->dst.rt6_next;
 		atomic_inc(&rt->rt6i_ref);
 		inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
@@ -897,6 +915,8 @@
 		}
 		nsiblings = iter->rt6i_nsiblings;
 		fib6_purge_rt(iter, fn, info->nl_net);
+		if (fn->rr_ptr == iter)
+			fn->rr_ptr = NULL;
 		rt6_release(iter);
 
 		if (nsiblings) {
@@ -909,6 +929,8 @@
 				if (rt6_qualify_for_ecmp(iter)) {
 					*ins = iter->dst.rt6_next;
 					fib6_purge_rt(iter, fn, info->nl_net);
+					if (fn->rr_ptr == iter)
+						fn->rr_ptr = NULL;
 					rt6_release(iter);
 					nsiblings--;
 				} else {
@@ -997,7 +1019,7 @@
 			/* Create subtree root node */
 			sfn = node_alloc();
 			if (!sfn)
-				goto st_failure;
+				goto failure;
 
 			sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
 			atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
@@ -1013,12 +1035,12 @@
 
 			if (IS_ERR(sn)) {
 				/* If it is failed, discard just allocated
-				   root, and then (in st_failure) stale node
+				   root, and then (in failure) stale node
 				   in main tree.
 				 */
-				node_free(sfn);
+				node_free_immediate(sfn);
 				err = PTR_ERR(sn);
-				goto st_failure;
+				goto failure;
 			}
 
 			/* Now link new subtree to main tree */
@@ -1032,7 +1054,7 @@
 
 			if (IS_ERR(sn)) {
 				err = PTR_ERR(sn);
-				goto st_failure;
+				goto failure;
 			}
 		}
 
@@ -1074,22 +1096,22 @@
 			atomic_inc(&pn->leaf->rt6i_ref);
 		}
 #endif
-		if (!(rt->dst.flags & DST_NOCACHE))
-			dst_free(&rt->dst);
+		goto failure;
 	}
 	return err;
 
-#ifdef CONFIG_IPV6_SUBTREES
-	/* Subtree creation failed, probably main tree node
-	   is orphan. If it is, shoot it.
+failure:
+	/* fn->leaf could be NULL if fn is an intermediate node and we
+	 * failed to add the new route to it in both subtree creation
+	 * failure and fib6_add_rt2node() failure case.
+	 * In both cases, fib6_repair_tree() should be called to fix
+	 * fn->leaf.
 	 */
-st_failure:
 	if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
 		fib6_repair_tree(info->nl_net, fn);
 	if (!(rt->dst.flags & DST_NOCACHE))
 		dst_free(&rt->dst);
 	return err;
-#endif
 }
 
 /*
@@ -1443,8 +1465,9 @@
 
 int fib6_del(struct rt6_info *rt, struct nl_info *info)
 {
+	struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
+				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
 	struct net *net = info->nl_net;
-	struct fib6_node *fn = rt->rt6i_node;
 	struct rt6_info **rtp;
 
 #if RT6_DEBUG >= 2
@@ -1633,7 +1656,9 @@
 			if (res) {
 #if RT6_DEBUG >= 2
 				pr_debug("%s: del failed: rt=%p@%p err=%d\n",
-					 __func__, rt, rt->rt6i_node, res);
+					 __func__, rt,
+					 rcu_access_pointer(rt->rt6i_node),
+					 res);
 #endif
 				continue;
 			}
@@ -1874,15 +1899,22 @@
 
 static void fib6_net_exit(struct net *net)
 {
+	unsigned int i;
+
 	rt6_ifdown(net, NULL);
 	del_timer_sync(&net->ipv6.ip6_fib_timer);
 
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-	inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
-	kfree(net->ipv6.fib6_local_tbl);
-#endif
-	inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
-	kfree(net->ipv6.fib6_main_tbl);
+	for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
+		struct hlist_head *head = &net->ipv6.fib_table_hash[i];
+		struct hlist_node *tmp;
+		struct fib6_table *tb;
+
+		hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
+			hlist_del(&tb->tb6_hlist);
+			fib6_free_table(tb);
+		}
+	}
+
 	kfree(net->ipv6.fib_table_hash);
 	kfree(net->ipv6.rt6_stats);
 }
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c329a15..ae87b9a 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -432,7 +432,9 @@
 		}
 		break;
 	case ICMPV6_PKT_TOOBIG:
-		mtu = be32_to_cpu(info) - offset;
+		mtu = be32_to_cpu(info) - offset - t->tun_hlen;
+		if (t->dev->type == ARPHRD_ETHER)
+			mtu -= ETH_HLEN;
 		if (mtu < IPV6_MIN_MTU)
 			mtu = IPV6_MIN_MTU;
 		t->dev->mtu = mtu;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4403260..821aa0b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -671,8 +671,6 @@
 		*prevhdr = NEXTHDR_FRAGMENT;
 		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 		if (!tmp_hdr) {
-			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-				      IPSTATS_MIB_FRAGFAILS);
 			err = -ENOMEM;
 			goto fail;
 		}
@@ -791,8 +789,6 @@
 		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
 				 hroom + troom, GFP_ATOMIC);
 		if (!frag) {
-			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-				      IPSTATS_MIB_FRAGFAILS);
 			err = -ENOMEM;
 			goto fail;
 		}
@@ -1384,11 +1380,12 @@
 	 */
 
 	cork->length += length;
-	if ((((length + fragheaderlen) > mtu) ||
-	     (skb && skb_is_gso(skb))) &&
+	if ((skb && skb_is_gso(skb)) ||
+	    (((length + fragheaderlen) > mtu) &&
+	    (skb_queue_len(queue) <= 1) &&
 	    (sk->sk_protocol == IPPROTO_UDP) &&
 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
-	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
 					  hh_len, fragheaderlen, exthdrlen,
 					  transhdrlen, mtu, flags, fl6);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 986d4ca..b263bf3 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -622,18 +622,12 @@
 
 static int nf_ct_net_init(struct net *net)
 {
-	int res;
-
 	net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
 	net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
 	net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
-	res = inet_frags_init_net(&net->nf_frag.frags);
-	if (res)
-		return res;
-	res = nf_ct_frag6_sysctl_register(net);
-	if (res)
-		inet_frags_uninit_net(&net->nf_frag.frags);
-	return res;
+	inet_frags_init_net(&net->nf_frag.frags);
+
+	return nf_ct_frag6_sysctl_register(net);
 }
 
 static void nf_ct_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 1009040..eedee5d 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@
 	fl6.fl6_sport = otcph->dest;
 	fl6.fl6_dport = otcph->source;
 	fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
+	fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
 	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
@@ -180,6 +181,8 @@
 
 	skb_dst_set(nskb, dst);
 
+	nskb->mark = fl6.flowi6_mark;
+
 	skb_reserve(nskb, hh_len + dst->header_len);
 	ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
 				    ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index e9065b8..a338bbc 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -78,7 +78,7 @@
 
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
-	u16 offset = sizeof(struct ipv6hdr);
+	unsigned int offset = sizeof(struct ipv6hdr);
 	unsigned int packet_len = skb_tail_pointer(skb) -
 		skb_network_header(skb);
 	int found_rhdr = 0;
@@ -112,6 +112,8 @@
 		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
 						 offset);
 		offset += ipv6_optlen(exthdr);
+		if (offset > IPV6_MAXPLEN)
+			return -EINVAL;
 		*nexthdr = &exthdr->nexthdr;
 	}
 
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 3815e85..e585c0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -709,19 +709,13 @@
 
 static int __net_init ipv6_frags_init_net(struct net *net)
 {
-	int res;
-
 	net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
 	net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
 	net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
 
-	res = inet_frags_init_net(&net->ipv6.frags);
-	if (res)
-		return res;
-	res = ip6_frags_ns_sysctl_register(net);
-	if (res)
-		inet_frags_uninit_net(&net->ipv6.frags);
-	return res;
+	inet_frags_init_net(&net->ipv6.frags);
+
+	return ip6_frags_ns_sysctl_register(net);
 }
 
 static void __net_exit ipv6_frags_exit_net(struct net *net)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d8123f6..5acd855 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1261,7 +1261,9 @@
 
 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
 {
-	if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+	u32 rt_cookie = 0;
+
+	if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
 		return NULL;
 
 	if (rt6_check_expired(rt))
@@ -1329,8 +1331,14 @@
 		if (rt->rt6i_flags & RTF_CACHE) {
 			dst_hold(&rt->dst);
 			ip6_del_rt(rt);
-		} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
-			rt->rt6i_node->fn_sernum = -1;
+		} else {
+			struct fib6_node *fn;
+
+			rcu_read_lock();
+			fn = rcu_dereference(rt->rt6i_node);
+			if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+				fn->fn_sernum = -1;
+			rcu_read_unlock();
 		}
 	}
 }
@@ -1347,7 +1355,8 @@
 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
 {
 	return !(rt->rt6i_flags & RTF_CACHE) &&
-		(rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+		(rt->rt6i_flags & RTF_PCPU ||
+		 rcu_access_pointer(rt->rt6i_node));
 }
 
 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 97830a6..a67174e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -209,6 +209,7 @@
 	treq->snt_synack.v64	= 0;
 	treq->rcv_isn = ntohl(th->seq) - 1;
 	treq->snt_isn = cookie;
+	treq->txhash = net_tx_rndhash();
 
 	/*
 	 * We need to lookup the dst_entry to get the correct window size.
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c925fd9..c43ef0c 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -915,6 +915,7 @@
 		 */
 		offset = skb_transport_offset(skb);
 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+		csum = skb->csum;
 
 		skb->ip_summed = CHECKSUM_NONE;
 
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a2267f8..e7d378c 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -72,7 +72,7 @@
 		if (uh->check == 0)
 			uh->check = CSUM_MANGLED_0;
 
-		skb->ip_summed = CHECKSUM_NONE;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 		/* If there is no outer header we can fake a checksum offload
 		 * due to the fact that we have already done the checksum in
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 391c3cb..101ed6c 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2223,7 +2223,7 @@
 {
 	struct sock *sk = sock->sk;
 	struct irda_sock *self = irda_sk(sk);
-	struct irda_device_list list;
+	struct irda_device_list list = { 0 };
 	struct irda_device_info *discoveries;
 	struct irda_ias_set *	ias_opt;	/* IAS get/query params */
 	struct ias_object *	ias_obj;	/* Object in IAS */
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index fecad10..7eb0e8f 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1381,6 +1381,10 @@
 	if (!csk)
 		return -EINVAL;
 
+	/* We must prevent loops or risk deadlock ! */
+	if (csk->sk_family == PF_KCM)
+		return -EOPNOTSUPP;
+
 	psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
 	if (!psock)
 		return -ENOMEM;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 2e1050e..94bf810 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -228,7 +228,7 @@
 #define BROADCAST_ONE		1
 #define BROADCAST_REGISTERED	2
 #define BROADCAST_PROMISC_ONLY	4
-static int pfkey_broadcast(struct sk_buff *skb,
+static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
 			   int broadcast_flags, struct sock *one_sk,
 			   struct net *net)
 {
@@ -278,7 +278,7 @@
 	rcu_read_unlock();
 
 	if (one_sk != NULL)
-		err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
+		err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
 
 	kfree_skb(skb2);
 	kfree_skb(skb);
@@ -311,7 +311,7 @@
 		hdr = (struct sadb_msg *) pfk->dump.skb->data;
 		hdr->sadb_msg_seq = 0;
 		hdr->sadb_msg_errno = rc;
-		pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
 				&pfk->sk, sock_net(&pfk->sk));
 		pfk->dump.skb = NULL;
 	}
@@ -355,7 +355,7 @@
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
 			     sizeof(uint64_t));
 
-	pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
 
 	return 0;
 }
@@ -1396,7 +1396,7 @@
 
 	xfrm_state_put(x);
 
-	pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
+	pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
 
 	return 0;
 }
@@ -1483,7 +1483,7 @@
 	hdr->sadb_msg_seq = c->seq;
 	hdr->sadb_msg_pid = c->portid;
 
-	pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
+	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
 
 	return 0;
 }
@@ -1596,7 +1596,7 @@
 	out_hdr->sadb_msg_reserved = 0;
 	out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
 	out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-	pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
+	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
 
 	return 0;
 }
@@ -1701,8 +1701,8 @@
 		return -ENOBUFS;
 	}
 
-	pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
-
+	pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
+			sock_net(sk));
 	return 0;
 }
 
@@ -1720,7 +1720,8 @@
 	hdr->sadb_msg_errno = (uint8_t) 0;
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 
-	return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
+			       sock_net(sk));
 }
 
 static int key_notify_sa_flush(const struct km_event *c)
@@ -1741,7 +1742,7 @@
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 	hdr->sadb_msg_reserved = 0;
 
-	pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
+	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
 
 	return 0;
 }
@@ -1798,7 +1799,7 @@
 	out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
 	if (pfk->dump.skb)
-		pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
 				&pfk->sk, sock_net(&pfk->sk));
 	pfk->dump.skb = out_skb;
 
@@ -1886,7 +1887,7 @@
 		new_hdr->sadb_msg_errno = 0;
 	}
 
-	pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
+	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
 	return 0;
 }
 
@@ -2219,7 +2220,7 @@
 	out_hdr->sadb_msg_errno = 0;
 	out_hdr->sadb_msg_seq = c->seq;
 	out_hdr->sadb_msg_pid = c->portid;
-	pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
+	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
 	return 0;
 
 }
@@ -2439,7 +2440,7 @@
 	out_hdr->sadb_msg_errno = 0;
 	out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
 	out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-	pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
+	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
 	err = 0;
 
 out:
@@ -2695,7 +2696,7 @@
 	out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
 	if (pfk->dump.skb)
-		pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
 				&pfk->sk, sock_net(&pfk->sk));
 	pfk->dump.skb = out_skb;
 
@@ -2752,7 +2753,7 @@
 	hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 	hdr->sadb_msg_reserved = 0;
-	pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
+	pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
 	return 0;
 
 }
@@ -2814,7 +2815,7 @@
 	void *ext_hdrs[SADB_EXT_MAX];
 	int err;
 
-	pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
+	pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
 			BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
 
 	memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -3036,7 +3037,8 @@
 	out_hdr->sadb_msg_seq = 0;
 	out_hdr->sadb_msg_pid = 0;
 
-	pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+			xs_net(x));
 	return 0;
 }
 
@@ -3226,7 +3228,8 @@
 		       xfrm_ctx->ctx_len);
 	}
 
-	return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+			       xs_net(x));
 }
 
 static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3424,7 +3427,8 @@
 	n_port->sadb_x_nat_t_port_port = sport;
 	n_port->sadb_x_nat_t_port_reserved = 0;
 
-	return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+			       xs_net(x));
 }
 
 #ifdef CONFIG_NET_KEY_MIGRATE
@@ -3616,7 +3620,7 @@
 	}
 
 	/* broadcast migrate message to sockets */
-	pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
+	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
 
 	return 0;
 
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 02bcf00..008299b 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@
 
 	rcu_read_lock();
 	t = rcu_dereference(nf_ct_ext_types[id]);
-	BUG_ON(t == NULL);
+	if (!t) {
+		rcu_read_unlock();
+		return NULL;
+	}
+
 	off = ALIGN(sizeof(struct nf_ct_ext), t->align);
 	len = off + t->len + var_alloc_len;
 	alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@
 
 	rcu_read_lock();
 	t = rcu_dereference(nf_ct_ext_types[id]);
-	BUG_ON(t == NULL);
+	if (!t) {
+		rcu_read_unlock();
+		return NULL;
+	}
 
 	newoff = ALIGN(old->len, t->align);
 	newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@
 	RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
 	update_alloc_size(type);
 	mutex_unlock(&nf_ct_ext_type_mutex);
-	rcu_barrier(); /* Wait for completion of call_rcu()'s */
+	synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 5b9c884..dde64c4 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -225,20 +225,21 @@
 		.tuple = tuple,
 		.zone = zone
 	};
-	struct rhlist_head *hl;
+	struct rhlist_head *hl, *h;
 
 	hl = rhltable_lookup(&nf_nat_bysource_table, &key,
 			     nf_nat_bysource_params);
-	if (!hl)
-		return 0;
 
-	ct = container_of(hl, typeof(*ct), nat_bysource);
+	rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
+		nf_ct_invert_tuplepr(result,
+				     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+		result->dst = tuple->dst;
 
-	nf_ct_invert_tuplepr(result,
-			     &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-	result->dst = tuple->dst;
+		if (in_range(l3proto, l4proto, result, range))
+			return 1;
+	}
 
-	return in_range(l3proto, l4proto, result, range);
+	return 0;
 }
 
 /* For [FUTURE] fragmentation handling, we want the least-used
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 1fbe4b6..1cf2874 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1597,14 +1597,6 @@
 	if (sk) {
 		MT_DEBUG("qtaguid: %p->sk_proto=%u "
 			 "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
-		/*
-		 * When in TCP_TIME_WAIT the sk is not a "struct sock" but
-		 * "struct inet_timewait_sock" which is missing fields.
-		 */
-		if (!sk_fullsock(sk) || sk->sk_state  == TCP_TIME_WAIT) {
-			sock_gen_put(sk);
-			sk = NULL;
-		}
 	}
 	return sk;
 }
@@ -1697,10 +1689,25 @@
 		 */
 		sk = qtaguid_find_sk(skb, par);
 		/*
-		 * If we got the socket from the find_sk(), we will need to put
-		 * it back, as nf_tproxy_get_sock_v4() got it.
+		 * TCP_NEW_SYN_RECV are not "struct sock" but "struct request_sock"
+		 * where we can get a pointer to a full socket to retrieve uid/gid.
+		 * When in TCP_TIME_WAIT, sk is a struct inet_timewait_sock
+		 * which is missing fields and does not contain any reference
+		 * to a full socket, so just ignore the socket.
 		 */
-		got_sock = sk;
+		if (sk && sk->sk_state == TCP_NEW_SYN_RECV) {
+			sock_gen_put(sk);
+			sk = sk_to_full_sk(sk);
+		} else if (sk && (!sk_fullsock(sk) || sk->sk_state == TCP_TIME_WAIT)) {
+			sock_gen_put(sk);
+			sk = NULL;
+		} else {
+			/*
+			 * If we got the socket from the find_sk(), we will need to put
+			 * it back, as nf_tproxy_get_sock_v4() got it.
+			 */
+			got_sock = sk;
+		}
 		if (sk)
 			atomic64_inc(&qtu_events.match_found_sk_in_ct);
 		else
@@ -1710,18 +1717,9 @@
 	}
 	MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
 		 par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
-	if (sk != NULL) {
-		set_sk_callback_lock = true;
-		read_lock_bh(&sk->sk_callback_lock);
-		MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
-			par->hooknum, sk, sk->sk_socket,
-			sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
-		filp = sk->sk_socket ? sk->sk_socket->file : NULL;
-		MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
-			par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
-	}
 
-	if (sk == NULL || sk->sk_socket == NULL) {
+
+	if (sk == NULL) {
 		/*
 		 * Here, the qtaguid_find_sk() using connection tracking
 		 * couldn't find the owner, so for now we just count them
@@ -1734,9 +1732,7 @@
 		 */
 		if (!(info->match & XT_QTAGUID_UID))
 			account_for_uid(skb, sk, 0, par);
-		MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
-			par->hooknum,
-			sk ? sk->sk_socket : NULL);
+		MT_DEBUG("qtaguid[%d]: leaving (sk=NULL)\n", par->hooknum);
 		res = (info->match ^ info->invert) == 0;
 		atomic64_inc(&qtu_events.match_no_sk);
 		goto put_sock_ret_res;
@@ -1744,16 +1740,7 @@
 		res = false;
 		goto put_sock_ret_res;
 	}
-	filp = sk->sk_socket->file;
-	if (filp == NULL) {
-		MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
-		account_for_uid(skb, sk, 0, par);
-		res = ((info->match ^ info->invert) &
-			(XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
-		atomic64_inc(&qtu_events.match_no_sk_file);
-		goto put_sock_ret_res;
-	}
-	sock_uid = filp->f_cred->fsuid;
+	sock_uid = sk->sk_uid;
 	/*
 	 * TODO: unhack how to force just accounting.
 	 * For now we only do iface stats when the uid-owner is not requested
@@ -1771,8 +1758,8 @@
 		kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
 		kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
 
-		if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
-		     uid_lte(filp->f_cred->fsuid, uid_max)) ^
+		if ((uid_gte(sk->sk_uid, uid_min) &&
+		     uid_lte(sk->sk_uid, uid_max)) ^
 		    !(info->invert & XT_QTAGUID_UID)) {
 			MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
 				 par->hooknum);
@@ -1783,7 +1770,19 @@
 	if (info->match & XT_QTAGUID_GID) {
 		kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
 		kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
-
+		set_sk_callback_lock = true;
+		read_lock_bh(&sk->sk_callback_lock);
+		MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n",
+			par->hooknum, sk, sk->sk_socket,
+			sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+		filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+		if (!filp) {
+			res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0;
+			atomic64_inc(&qtu_events.match_no_sk_gid);
+			goto put_sock_ret_res;
+		}
+		MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+			par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
 		if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
 				gid_lte(filp->f_cred->fsgid, gid_max)) ^
 			!(info->invert & XT_QTAGUID_GID)) {
@@ -1955,7 +1954,7 @@
 			   "match_found_sk_in_ct=%llu "
 			   "match_found_no_sk_in_ct=%llu "
 			   "match_no_sk=%llu "
-			   "match_no_sk_file=%llu\n",
+			   "match_no_sk_gid=%llu\n",
 			   (u64)atomic64_read(&qtu_events.sockets_tagged),
 			   (u64)atomic64_read(&qtu_events.sockets_untagged),
 			   (u64)atomic64_read(&qtu_events.counter_set_changes),
@@ -1967,7 +1966,7 @@
 			   (u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
 			   (u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
 			   (u64)atomic64_read(&qtu_events.match_no_sk),
-			   (u64)atomic64_read(&qtu_events.match_no_sk_file));
+			   (u64)atomic64_read(&qtu_events.match_no_sk_gid));
 
 		/* Count the following as part of the last item_index. No need
 		 * to lock the sock_tag_list here since it is already locked when
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
index 8178fbd..c705270 100644
--- a/net/netfilter/xt_qtaguid_internal.h
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -289,10 +289,10 @@
 	 */
 	atomic64_t match_no_sk;
 	/*
-	 * The file ptr in the sk_socket wasn't there.
+	 * The file ptr in the sk_socket wasn't there and we couldn't get GID.
 	 * This might happen for traffic while the socket is being closed.
 	 */
-	atomic64_t match_no_sk_file;
+	atomic64_t match_no_sk_gid;
 };
 
 /* Track the set active_set for the given tag. */
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index a52fbaf..ec87467 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -244,7 +244,7 @@
 			transparent = xt_socket_sk_is_transparent(sk);
 
 		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-		    transparent)
+		    transparent && sk_fullsock(sk))
 			pskb->mark = sk->sk_mark;
 
 		sock_gen_put(sk);
@@ -433,7 +433,7 @@
 			transparent = xt_socket_sk_is_transparent(sk);
 
 		if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
-		    transparent)
+		    transparent && sk_fullsock(sk))
 			pskb->mark = sk->sk_mark;
 
 		if (sk != skb->sk)
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 2b0f0ac..5a58f9f 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@
 		}
 		create_info = (struct hci_create_pipe_resp *)skb->data;
 
+		if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+			status = NFC_HCI_ANY_E_NOK;
+			goto exit;
+		}
+
 		/* Save the new created pipe and bind with local gate,
 		 * the description for skb->data[3] is destination gate id
 		 * but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@
 		}
 		delete_info = (struct hci_delete_pipe_noti *)skb->data;
 
+		if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+			status = NFC_HCI_ANY_E_NOK;
+			goto exit;
+		}
+
 		hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
 		hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
 		break;
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 4e03f64..05d9f42 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1240,6 +1240,7 @@
 		goto out;
 	}
 
+	OVS_CB(skb)->acts_origlen = acts->orig_len;
 	err = do_execute_actions(dp, skb, key,
 				 acts->actions, acts->actions_len);
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 48386bf..b28e45b 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1088,8 +1088,8 @@
 
 	nla_for_each_nested(a, attr, rem) {
 		int type = nla_type(a);
-		int maxlen = ovs_ct_attr_lens[type].maxlen;
-		int minlen = ovs_ct_attr_lens[type].minlen;
+		int maxlen;
+		int minlen;
 
 		if (type > OVS_CT_ATTR_MAX) {
 			OVS_NLERR(log,
@@ -1097,6 +1097,9 @@
 				  type, OVS_CT_ATTR_MAX);
 			return -EINVAL;
 		}
+
+		maxlen = ovs_ct_attr_lens[type].maxlen;
+		minlen = ovs_ct_attr_lens[type].minlen;
 		if (nla_len(a) < minlen || nla_len(a) > maxlen) {
 			OVS_NLERR(log,
 				  "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 4d67ea8..453f806 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -383,7 +383,7 @@
 }
 
 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
-			      unsigned int hdrlen)
+			      unsigned int hdrlen, int actions_attrlen)
 {
 	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
 		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
@@ -400,7 +400,7 @@
 
 	/* OVS_PACKET_ATTR_ACTIONS */
 	if (upcall_info->actions_len)
-		size += nla_total_size(upcall_info->actions_len);
+		size += nla_total_size(actions_attrlen);
 
 	/* OVS_PACKET_ATTR_MRU */
 	if (upcall_info->mru)
@@ -467,7 +467,8 @@
 	else
 		hlen = skb->len;
 
-	len = upcall_msg_size(upcall_info, hlen - cutlen);
+	len = upcall_msg_size(upcall_info, hlen - cutlen,
+			      OVS_CB(skb)->acts_origlen);
 	user_skb = genlmsg_new(len, GFP_ATOMIC);
 	if (!user_skb) {
 		err = -ENOMEM;
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index ab85c1c..e19ace4 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -100,12 +100,14 @@
  * @input_vport: The original vport packet came in on. This value is cached
  * when a packet is received by OVS.
  * @mru: The maximum received fragement size; 0 if the packet is not
+ * @acts_origlen: The netlink size of the flow actions applied to this skb.
  * @cutlen: The number of bytes from the packet end to be removed.
  * fragmented.
  */
 struct ovs_skb_cb {
 	struct vport		*input_vport;
 	u16			mru;
+	u16			acts_origlen;
 	u32			cutlen;
 };
 #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a563e6..35ba4b6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2151,6 +2151,7 @@
 	struct timespec ts;
 	__u32 ts_status;
 	bool is_drop_n_account = false;
+	bool do_vnet = false;
 
 	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
 	 * We may add members to them until current aligned size without forcing
@@ -2201,8 +2202,10 @@
 		netoff = TPACKET_ALIGN(po->tp_hdrlen +
 				       (maclen < 16 ? 16 : maclen)) +
 				       po->tp_reserve;
-		if (po->has_vnet_hdr)
+		if (po->has_vnet_hdr) {
 			netoff += sizeof(struct virtio_net_hdr);
+			do_vnet = true;
+		}
 		macoff = netoff - maclen;
 	}
 	if (po->tp_version <= TPACKET_V2) {
@@ -2219,8 +2222,10 @@
 					skb_set_owner_r(copy_skb, sk);
 			}
 			snaplen = po->rx_ring.frame_size - macoff;
-			if ((int)snaplen < 0)
+			if ((int)snaplen < 0) {
 				snaplen = 0;
+				do_vnet = false;
+			}
 		}
 	} else if (unlikely(macoff + snaplen >
 			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
@@ -2233,6 +2238,7 @@
 		if (unlikely((int)snaplen < 0)) {
 			snaplen = 0;
 			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+			do_vnet = false;
 		}
 	}
 	spin_lock(&sk->sk_receive_queue.lock);
@@ -2258,7 +2264,7 @@
 	}
 	spin_unlock(&sk->sk_receive_queue.lock);
 
-	if (po->has_vnet_hdr) {
+	if (do_vnet) {
 		if (__packet_rcv_vnet(skb, h.raw + macoff -
 					   sizeof(struct virtio_net_hdr))) {
 			spin_lock(&sk->sk_receive_queue.lock);
@@ -3698,14 +3704,19 @@
 
 		if (optlen != sizeof(val))
 			return -EINVAL;
-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-			return -EBUSY;
 		if (copy_from_user(&val, optval, sizeof(val)))
 			return -EFAULT;
 		if (val > INT_MAX)
 			return -EINVAL;
-		po->tp_reserve = val;
-		return 0;
+		lock_sock(sk);
+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+			ret = -EBUSY;
+		} else {
+			po->tp_reserve = val;
+			ret = 0;
+		}
+		release_sock(sk);
+		return ret;
 	}
 	case PACKET_LOSS:
 	{
@@ -4322,7 +4333,7 @@
 		register_prot_hook(sk);
 	}
 	spin_unlock(&po->bind_lock);
-	if (closing && (po->tp_version > TPACKET_V2)) {
+	if (pg_vec && (po->tp_version > TPACKET_V2)) {
 		/* Because we don't support block-based V3 on tx-ring */
 		if (!tx_ring)
 			prb_shutdown_retire_blk_timer(po, rb_queue);
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 35be79e..57646ef 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -537,6 +537,7 @@
 {
 	int required_headroom, additional_header_length, ckresult;
 	struct rmnet_map_header_s *map_header;
+	int non_linear_skb;
 
 	additional_header_length = 0;
 
@@ -565,9 +566,11 @@
 		rmnet_stats_ul_checksum(ckresult);
 	}
 
+	non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
+			 skb_is_nonlinear(skb);
+
 	if ((!(config->egress_data_format &
-	    RMNET_EGRESS_FORMAT_AGGREGATION)) ||
-	    ((orig_dev->features & NETIF_F_GSO) && skb_is_nonlinear(skb)))
+	    RMNET_EGRESS_FORMAT_AGGREGATION)) || non_linear_skb)
 		map_header = rmnet_map_add_map_header
 		(skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
 	else
@@ -589,7 +592,8 @@
 
 	skb->protocol = htons(ETH_P_MAP);
 
-	if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) &&
+	    !non_linear_skb) {
 		rmnet_map_aggregate(skb, config);
 		return RMNET_MAP_CONSUMED;
 	}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 378c1c9..5003051 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -41,6 +41,7 @@
 {
 	struct xt_tgchk_param par;
 	struct xt_target *target;
+	struct ipt_entry e = {};
 	int ret = 0;
 
 	target = xt_request_find_target(AF_INET, t->u.user.name,
@@ -49,8 +50,9 @@
 		return PTR_ERR(target);
 
 	t->u.kernel.target = target;
+	memset(&par, 0, sizeof(par));
 	par.table     = table;
-	par.entryinfo = NULL;
+	par.entryinfo = &e;
 	par.target    = target;
 	par.targinfo  = t->data;
 	par.hook_mask = hook;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index daf6624..a74d32e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -277,9 +277,6 @@
 void qdisc_hash_add(struct Qdisc *q)
 {
 	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
-		struct Qdisc *root = qdisc_dev(q)->qdisc;
-
-		WARN_ON_ONCE(root == &noop_qdisc);
 		ASSERT_RTNL();
 		hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
 	}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index bc5e995..ea8a56f 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -434,6 +434,7 @@
 		qdisc_drop(head, sch, to_free);
 
 		slot_queue_add(slot, skb);
+		qdisc_tree_reduce_backlog(sch, 0, delta);
 		return NET_XMIT_CN;
 	}
 
@@ -465,8 +466,10 @@
 	/* Return Congestion Notification only if we dropped a packet
 	 * from this flow.
 	 */
-	if (qlen != slot->qlen)
+	if (qlen != slot->qlen) {
+		qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
 		return NET_XMIT_CN;
+	}
 
 	/* As we dropped a packet, better let upper stack know this */
 	qdisc_tree_reduce_backlog(sch, 1, dropped);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0c09060..ca4a63e 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -512,7 +512,9 @@
 {
 	addr->sa.sa_family = AF_INET6;
 	addr->v6.sin6_port = port;
+	addr->v6.sin6_flowinfo = 0;
 	addr->v6.sin6_addr = *saddr;
+	addr->v6.sin6_scope_id = 0;
 }
 
 /* Compare addresses exactly.
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 048954e..e8f56b7 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -70,7 +70,8 @@
 
 	info = nla_data(attr);
 	list_for_each_entry_rcu(laddr, address_list, list) {
-		memcpy(info, &laddr->a, addrlen);
+		memcpy(info, &laddr->a, sizeof(laddr->a));
+		memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
 		info += addrlen;
 	}
 
@@ -93,7 +94,9 @@
 	info = nla_data(attr);
 	list_for_each_entry(from, &asoc->peer.transport_addr_list,
 			    transports) {
-		memcpy(info, &from->ipaddr, addrlen);
+		memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
+		memset(info + sizeof(from->ipaddr), 0,
+		       addrlen - sizeof(from->ipaddr));
 		info += addrlen;
 	}
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9647e31..3ef7252 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4373,8 +4373,7 @@
 	info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
 
 	prim = asoc->peer.primary_path;
-	memcpy(&info->sctpi_p_address, &prim->ipaddr,
-	       sizeof(struct sockaddr_storage));
+	memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
 	info->sctpi_p_state = prim->state;
 	info->sctpi_p_cwnd = prim->cwnd;
 	info->sctpi_p_srtt = prim->srtt;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 84d0fda..d3cfbf2 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -265,7 +265,8 @@
 		sctp_ulpq_clear_pd(ulpq);
 
 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
-		sp->data_ready_signalled = 1;
+		if (!sock_owned_by_user(sk))
+			sp->data_ready_signalled = 1;
 		sk->sk_data_ready(sk);
 	}
 	return 1;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a4bc982..266a30c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -408,6 +408,9 @@
 		dprintk("svc: socket %p(inet %p), busy=%d\n",
 			svsk, sk,
 			test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
+
+		/* Refer to svc_setup_socket() for details. */
+		rmb();
 		svsk->sk_odata(sk);
 		if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
 			svc_xprt_enqueue(&svsk->sk_xprt);
@@ -424,6 +427,9 @@
 	if (svsk) {
 		dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
 			svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
+
+		/* Refer to svc_setup_socket() for details. */
+		rmb();
 		svsk->sk_owspace(sk);
 		svc_xprt_enqueue(&svsk->sk_xprt);
 	}
@@ -748,8 +754,12 @@
 	dprintk("svc: socket %p TCP (listen) state change %d\n",
 		sk, sk->sk_state);
 
-	if (svsk)
+	if (svsk) {
+		/* Refer to svc_setup_socket() for details. */
+		rmb();
 		svsk->sk_odata(sk);
+	}
+
 	/*
 	 * This callback may called twice when a new connection
 	 * is established as a child socket inherits everything
@@ -782,6 +792,8 @@
 	if (!svsk)
 		printk("svc: socket %p: no user data\n", sk);
 	else {
+		/* Refer to svc_setup_socket() for details. */
+		rmb();
 		svsk->sk_ostate(sk);
 		if (sk->sk_state != TCP_ESTABLISHED) {
 			set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1368,12 +1380,18 @@
 		return ERR_PTR(err);
 	}
 
-	inet->sk_user_data = svsk;
 	svsk->sk_sock = sock;
 	svsk->sk_sk = inet;
 	svsk->sk_ostate = inet->sk_state_change;
 	svsk->sk_odata = inet->sk_data_ready;
 	svsk->sk_owspace = inet->sk_write_space;
+	/*
+	 * This barrier is necessary in order to prevent race condition
+	 * with svc_data_ready(), svc_listen_data_ready() and others
+	 * when calling callbacks above.
+	 */
+	wmb();
+	inet->sk_user_data = svsk;
 
 	/* Initialize the socket */
 	if (sock->type == SOCK_DGRAM)
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 1fd4647..aedc476 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -258,13 +258,15 @@
 	arg = nlmsg_new(0, GFP_KERNEL);
 	if (!arg) {
 		kfree_skb(msg->rep);
+		msg->rep = NULL;
 		return -ENOMEM;
 	}
 
 	err = __tipc_nl_compat_dumpit(cmd, msg, arg);
-	if (err)
+	if (err) {
 		kfree_skb(msg->rep);
-
+		msg->rep = NULL;
+	}
 	kfree_skb(arg);
 
 	return err;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index cf7063a..c40f3de 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -221,13 +221,7 @@
 
 	union {
 		struct cfg80211_connect_resp_params cr;
-		struct {
-			const u8 *req_ie;
-			const u8 *resp_ie;
-			size_t req_ie_len;
-			size_t resp_ie_len;
-			struct cfg80211_bss *bss;
-		} rm;
+		struct cfg80211_roam_info rm;
 		struct {
 			const u8 *ie;
 			size_t ie_len;
@@ -385,9 +379,7 @@
 			struct net_device *dev, u16 reason,
 			bool wextev);
 void __cfg80211_roamed(struct wireless_dev *wdev,
-		       struct cfg80211_bss *bss,
-		       const u8 *req_ie, size_t req_ie_len,
-		       const u8 *resp_ie, size_t resp_ie_len);
+		       struct cfg80211_roam_info *info);
 int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
 			      struct wireless_dev *wdev);
 
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 413deff..cad82a4 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -223,17 +223,16 @@
 	(5490 - 5710 @ 160), (30), DFS
 
 country BZ:
-	(2402 - 2482 @ 40), (36)
-	(5170 - 5330 @ 160), (27)
-	(5490 - 5730 @ 160), (36)
-	(5735 - 5835 @ 80), (36)
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5330 @ 160), (23)
+	(5490 - 5730 @ 160), (30)
+	(5735 - 5835 @ 80), (30)
 
 country CA: DFS-FCC
 	(2402 - 2472 @ 40), (30)
 	(5170 - 5250 @ 80), (24), AUTO-BW
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
-	(5490 - 5590 @ 80), (24), DFS
-	(5650 - 5730 @ 80), (24), DFS
+	(5490 - 5730 @ 160), (24), DFS
 	(5735 - 5835 @ 80), (30)
 	# 60 gHz band channels 1-3
 	(57240 - 63720 @ 2160), (40)
@@ -682,7 +681,13 @@
 country IN:
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5330 @ 160), (23)
-	(5735 - 5835 @ 80), (30)
+	(5735 - 5835 @ 80), (33)
+
+country IQ: DFS-ETSI
+	(2402 - 2482 @ 40), (20)
+	(5170 - 5250 @ 80), (23), AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5490 - 5710 @ 160), (30), DFS
 
 country IS: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
@@ -736,7 +741,6 @@
 
 country JP: DFS-JP
 	(2402 - 2482 @ 40), (20)
-	(2474 - 2494 @ 20), (20), NO-OFDM
 	(5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
 	(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
 	(5490 - 5710 @ 160), (20), DFS
@@ -758,7 +762,7 @@
 country KN: DFS-FCC
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
-	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
+	(5250 - 5330 @ 80), (30), DFS, AUTO-BW
 	(5490 - 5710 @ 160), (30), DFS
 	(5735 - 5815 @ 80), (30)
 
@@ -1009,7 +1013,7 @@
 	(5170 - 5250 @ 80), (24), AUTO-BW
 	(5250 - 5330 @ 80), (24), DFS, AUTO-BW
 	(5490 - 5650 @ 160), (24), DFS
-	(5735 - 5815 @ 80), (24)
+	(5735 - 5835 @ 80), (24)
 	# 60 gHz band channels 1-3
 	(57240 - 63720 @ 2160), (40)
 
@@ -1089,7 +1093,7 @@
 	(5490 - 5710 @ 160), (30), DFS
 
 country PA:
-	(2402 - 2472 @ 40), (30)
+	(2402 - 2472 @ 40), (36)
 	(5170 - 5250 @ 80), (23), AUT0-BW
 	(5250 - 5330 @ 80), (30), AUTO-BW
 	(5735 - 5835 @ 80), (36)
@@ -1374,9 +1378,9 @@
 
 country TT:
 	(2402 - 2482 @ 40), (20)
-	(5170 - 5330 @ 160), (27)
-	(5490 - 5730 @ 160), (36)
-	(5735 - 5835 @ 80), (36)
+	(5170 - 5330 @ 160), (24)
+	(5490 - 5730 @ 160), (24)
+	(5735 - 5835 @ 80), (30)
 	# 60 gHz band channels 1-3, FCC
 	(57240 - 63720 @ 2160), (40)
 
@@ -1450,7 +1454,7 @@
 country UZ: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
 	(5170 - 5250 @ 80), (23), AUTO-BW
-	(5250 - 5330 @ 80), (20), DFS, AUTO-BW
+	(5250 - 5330 @ 80), (23), DFS, AUTO-BW
 
 country VC: DFS-ETSI
 	(2402 - 2482 @ 40), (20)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ebd9a4b..adf7d03 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13415,14 +13415,14 @@
 }
 
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
-			 struct net_device *netdev, const u8 *bssid,
-			 const u8 *req_ie, size_t req_ie_len,
-			 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
+			 struct net_device *netdev,
+			 struct cfg80211_roam_info *info, gfp_t gfp)
 {
 	struct sk_buff *msg;
 	void *hdr;
+	const u8 *bssid = info->bss ? info->bss->bssid : info->bssid;
 
-	msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
+	msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len, gfp);
 	if (!msg)
 		return;
 
@@ -13435,10 +13435,14 @@
 	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
 	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
-	    (req_ie &&
-	     nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
-	    (resp_ie &&
-	     nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
+	    (info->req_ie &&
+	     nla_put(msg, NL80211_ATTR_REQ_IE, info->req_ie_len,
+		     info->req_ie)) ||
+	    (info->resp_ie &&
+	     nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len,
+		     info->resp_ie)) ||
+	    (info->authorized &&
+	     nla_put_flag(msg, NL80211_ATTR_PORT_AUTHORIZED)))
 		goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2a84d18..c9d4805 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,9 +59,8 @@
 				 struct cfg80211_connect_resp_params *params,
 				 gfp_t gfp);
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
-			 struct net_device *netdev, const u8 *bssid,
-			 const u8 *req_ie, size_t req_ie_len,
-			 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+			 struct net_device *netdev,
+			 struct cfg80211_roam_info *info, gfp_t gfp);
 void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
 			       struct net_device *netdev, u16 reason,
 			       const u8 *ie, size_t ie_len, bool from_ap);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d7e6abc..bb7f5be 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -5,6 +5,7 @@
  *
  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
  * Copyright (C) 2009   Intel Corporation. All rights reserved.
+ * Copyright 2017	Intel Deutschland GmbH
  */
 
 #include <linux/etherdevice.h>
@@ -893,9 +894,7 @@
 
 /* Consumes bss object one way or another */
 void __cfg80211_roamed(struct wireless_dev *wdev,
-		       struct cfg80211_bss *bss,
-		       const u8 *req_ie, size_t req_ie_len,
-		       const u8 *resp_ie, size_t resp_ie_len)
+		       struct cfg80211_roam_info *info)
 {
 #ifdef CONFIG_CFG80211_WEXT
 	union iwreq_data wrqu;
@@ -913,97 +912,85 @@
 	cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
 	wdev->current_bss = NULL;
 
-	cfg80211_hold_bss(bss_from_pub(bss));
-	wdev->current_bss = bss_from_pub(bss);
+	if (WARN_ON(!info->bss))
+		return;
+
+	cfg80211_hold_bss(bss_from_pub(info->bss));
+	wdev->current_bss = bss_from_pub(info->bss);
 
 	nl80211_send_roamed(wiphy_to_rdev(wdev->wiphy),
-			    wdev->netdev, bss->bssid,
-			    req_ie, req_ie_len, resp_ie, resp_ie_len,
-			    GFP_KERNEL);
+			    wdev->netdev, info, GFP_KERNEL);
 
 #ifdef CONFIG_CFG80211_WEXT
-	if (req_ie) {
+	if (info->req_ie) {
 		memset(&wrqu, 0, sizeof(wrqu));
-		wrqu.data.length = req_ie_len;
+		wrqu.data.length = info->req_ie_len;
 		wireless_send_event(wdev->netdev, IWEVASSOCREQIE,
-				    &wrqu, req_ie);
+				    &wrqu, info->req_ie);
 	}
 
-	if (resp_ie) {
+	if (info->resp_ie) {
 		memset(&wrqu, 0, sizeof(wrqu));
-		wrqu.data.length = resp_ie_len;
+		wrqu.data.length = info->resp_ie_len;
 		wireless_send_event(wdev->netdev, IWEVASSOCRESPIE,
-				    &wrqu, resp_ie);
+				    &wrqu, info->resp_ie);
 	}
 
 	memset(&wrqu, 0, sizeof(wrqu));
 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-	memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
-	memcpy(wdev->wext.prev_bssid, bss->bssid, ETH_ALEN);
+	memcpy(wrqu.ap_addr.sa_data, info->bss->bssid, ETH_ALEN);
+	memcpy(wdev->wext.prev_bssid, info->bss->bssid, ETH_ALEN);
 	wdev->wext.prev_bssid_valid = true;
 	wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
 #endif
 
 	return;
 out:
-	cfg80211_put_bss(wdev->wiphy, bss);
+	cfg80211_put_bss(wdev->wiphy, info->bss);
 }
 
-void cfg80211_roamed(struct net_device *dev,
-		     struct ieee80211_channel *channel,
-		     const u8 *bssid,
-		     const u8 *req_ie, size_t req_ie_len,
-		     const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
-{
-	struct wireless_dev *wdev = dev->ieee80211_ptr;
-	struct cfg80211_bss *bss;
-
-	bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
-			       wdev->ssid_len,
-			       wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
-	if (WARN_ON(!bss))
-		return;
-
-	cfg80211_roamed_bss(dev, bss, req_ie, req_ie_len, resp_ie,
-			    resp_ie_len, gfp);
-}
-EXPORT_SYMBOL(cfg80211_roamed);
-
-/* Consumes bss object one way or another */
-void cfg80211_roamed_bss(struct net_device *dev,
-			 struct cfg80211_bss *bss, const u8 *req_ie,
-			 size_t req_ie_len, const u8 *resp_ie,
-			 size_t resp_ie_len, gfp_t gfp)
+/* Consumes info->bss object one way or another */
+void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
+		     gfp_t gfp)
 {
 	struct wireless_dev *wdev = dev->ieee80211_ptr;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 	struct cfg80211_event *ev;
 	unsigned long flags;
 
-	if (WARN_ON(!bss))
+	if (!info->bss) {
+		info->bss = cfg80211_get_bss(wdev->wiphy, info->channel,
+					     info->bssid, wdev->ssid,
+					     wdev->ssid_len,
+					     wdev->conn_bss_type,
+					     IEEE80211_PRIVACY_ANY);
+	}
+
+	if (WARN_ON(!info->bss))
 		return;
 
-	ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+	ev = kzalloc(sizeof(*ev) + info->req_ie_len + info->resp_ie_len, gfp);
 	if (!ev) {
-		cfg80211_put_bss(wdev->wiphy, bss);
+		cfg80211_put_bss(wdev->wiphy, info->bss);
 		return;
 	}
 
 	ev->type = EVENT_ROAMED;
 	ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
-	ev->rm.req_ie_len = req_ie_len;
-	memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len);
-	ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
-	ev->rm.resp_ie_len = resp_ie_len;
-	memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len);
-	ev->rm.bss = bss;
+	ev->rm.req_ie_len = info->req_ie_len;
+	memcpy((void *)ev->rm.req_ie, info->req_ie, info->req_ie_len);
+	ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + info->req_ie_len;
+	ev->rm.resp_ie_len = info->resp_ie_len;
+	memcpy((void *)ev->rm.resp_ie, info->resp_ie, info->resp_ie_len);
+	ev->rm.bss = info->bss;
+	ev->rm.authorized = info->authorized;
 
 	spin_lock_irqsave(&wdev->event_lock, flags);
 	list_add_tail(&ev->list, &wdev->event_list);
 	spin_unlock_irqrestore(&wdev->event_lock, flags);
 	queue_work(cfg80211_wq, &rdev->event_work);
 }
-EXPORT_SYMBOL(cfg80211_roamed_bss);
+EXPORT_SYMBOL(cfg80211_roamed);
 
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 			     size_t ie_len, u16 reason, bool from_ap)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 8ac413f..a4ab20a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -953,9 +953,7 @@
 				ev->cr.status == WLAN_STATUS_SUCCESS);
 			break;
 		case EVENT_ROAMED:
-			__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
-					  ev->rm.req_ie_len, ev->rm.resp_ie,
-					  ev->rm.resp_ie_len);
+			__cfg80211_roamed(wdev, &ev->rm);
 			break;
 		case EVENT_DISCONNECTED:
 			__cfg80211_disconnected(wdev->netdev,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e26b515..8ce5711 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3308,9 +3308,15 @@
 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
 	struct xfrm_migrate *mp;
 
+	/* Stage 0 - sanity checks */
 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
 		goto out;
 
+	if (dir >= XFRM_POLICY_MAX) {
+		err = -EINVAL;
+		goto out;
+	}
+
 	/* Stage 1 - find policy */
 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
 		err = -ENOENT;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index a7e27e1..b2bba35 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1695,6 +1695,10 @@
 	struct sk_buff *skb;
 	int err;
 
+	err = verify_policy_dir(dir);
+	if (err)
+		return ERR_PTR(err);
+
 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
@@ -2216,6 +2220,10 @@
 	int n = 0;
 	struct net *net = sock_net(skb->sk);
 
+	err = verify_policy_dir(pi->dir);
+	if (err)
+		return err;
+
 	if (attrs[XFRMA_MIGRATE] == NULL)
 		return -EINVAL;
 
@@ -2331,6 +2339,11 @@
 {
 	struct net *net = &init_net;
 	struct sk_buff *skb;
+	int err;
+
+	err = verify_policy_dir(dir);
+	if (err)
+		return err;
 
 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC);
 	if (skb == NULL)
@@ -2985,6 +2998,11 @@
 
 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
 {
+	int err;
+
+	err = verify_policy_dir(dir);
+	if (err)
+		return err;
 
 	switch (c->event) {
 	case XFRM_MSG_NEWPOLICY:
diff --git a/security/commoncap.c b/security/commoncap.c
index 3e44d01..b5aca42 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -58,7 +58,7 @@
 }
 
 /**
- * cap_capable - Determine whether a task has a particular effective capability
+ * __cap_capable - Determine whether a task has a particular effective capability
  * @cred: The credentials to use
  * @ns:  The user namespace in which we need the capability
  * @cap: The capability to check for
@@ -72,18 +72,11 @@
  * cap_has_capability() returns 0 when a task has a capability, but the
  * kernel's capable() and has_capability() returns 1 for this case.
  */
-int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
+int __cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
 		int cap, int audit)
 {
 	struct user_namespace *ns = targ_ns;
 
-#ifdef CONFIG_ANDROID_PARANOID_NETWORK
-	if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
-		return 0;
-	if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
-		return 0;
-#endif
-
 	/* See if cred has the capability in the target user namespace
 	 * by examining the target user namespace and all of the target
 	 * user namespace's parents.
@@ -114,6 +107,27 @@
 	/* We never get here */
 }
 
+int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
+		int cap, int audit)
+{
+	int ret = __cap_capable(cred, targ_ns, cap, audit);
+
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (ret != 0 && cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW)) {
+		printk("Process %s granted CAP_NET_RAW from Android group net_raw.\n", current->comm);
+		printk("  Please update the .rc file to explictly set 'capabilities NET_RAW'\n");
+		printk("  Implicit grants are deprecated and will be removed in the future.\n");
+		return 0;
+	}
+	if (ret != 0 && cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN)) {
+		printk("Process %s granted CAP_NET_ADMIN from Android group net_admin.\n", current->comm);
+		printk("  Please update the .rc file to explictly set 'capabilities NET_ADMIN'\n");
+		printk("  Implicit grants are deprecated and will be removed in the future.\n");
+		return 0;
+	}
+#endif
+	return ret;
+}
 /**
  * cap_settime - Determine whether the current process may set the system clock
  * @ts: The time to set
diff --git a/sound/core/control.c b/sound/core/control.c
index fb096cb..995cde4 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1156,7 +1156,7 @@
 		mutex_lock(&ue->card->user_ctl_lock);
 		change = ue->tlv_data_size != size;
 		if (!change)
-			change = memcmp(ue->tlv_data, new_data, size);
+			change = memcmp(ue->tlv_data, new_data, size) != 0;
 		kfree(ue->tlv_data);
 		ue->tlv_data = new_data;
 		ue->tlv_data_size = size;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index f3b1d7f..67c4c68 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1502,16 +1502,11 @@
 static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
 {
 	struct snd_seq_queue_info *info = arg;
-	int result;
 	struct snd_seq_queue *q;
 
-	result = snd_seq_queue_alloc(client->number, info->locked, info->flags);
-	if (result < 0)
-		return result;
-
-	q = queueptr(result);
-	if (q == NULL)
-		return -EINVAL;
+	q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
+	if (IS_ERR(q))
+		return PTR_ERR(q);
 
 	info->queue = q->queue;
 	info->locked = q->locked;
@@ -1521,7 +1516,7 @@
 	if (!info->name[0])
 		snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
 	strlcpy(q->name, info->name, sizeof(q->name));
-	queuefree(q);
+	snd_use_lock_free(&q->use_lock);
 
 	return 0;
 }
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 450c518..79e0c56 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -184,22 +184,26 @@
 static void queue_use(struct snd_seq_queue *queue, int client, int use);
 
 /* allocate a new queue -
- * return queue index value or negative value for error
+ * return pointer to new queue or ERR_PTR(-errno) for error
+ * The new queue's use_lock is set to 1. It is the caller's responsibility to
+ * call snd_use_lock_free(&q->use_lock).
  */
-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
 {
 	struct snd_seq_queue *q;
 
 	q = queue_new(client, locked);
 	if (q == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	q->info_flags = info_flags;
 	queue_use(q, client, 1);
+	snd_use_lock_use(&q->use_lock);
 	if (queue_list_add(q) < 0) {
+		snd_use_lock_free(&q->use_lock);
 		queue_delete(q);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
-	return q->queue;
+	return q;
 }
 
 /* delete a queue - queue must be owned by the client */
diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
index 30c8111..7190934 100644
--- a/sound/core/seq/seq_queue.h
+++ b/sound/core/seq/seq_queue.h
@@ -71,7 +71,7 @@
 
 
 /* create new queue (constructor) */
-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
 
 /* delete queue (destructor) */
 int snd_seq_queue_delete(int client, int queueid);
diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
index f0e4d50..066b5df 100644
--- a/sound/firewire/iso-resources.c
+++ b/sound/firewire/iso-resources.c
@@ -210,9 +210,14 @@
  */
 void fw_iso_resources_free(struct fw_iso_resources *r)
 {
-	struct fw_card *card = fw_parent_device(r->unit)->card;
+	struct fw_card *card;
 	int bandwidth, channel;
 
+	/* Not initialized. */
+	if (r->unit == NULL)
+		return;
+	card = fw_parent_device(r->unit)->card;
+
 	mutex_lock(&r->mutex);
 
 	if (r->allocated) {
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index ffc67fd..58e59cd 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -120,24 +120,24 @@
 	unsigned long flags;
 	struct snd_msndmidi *mpu = mpuv;
 	void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+	u16 head, tail, size;
 
 	spin_lock_irqsave(&mpu->input_lock, flags);
-	while (readw(mpu->dev->MIDQ + JQS_wTail) !=
-	       readw(mpu->dev->MIDQ + JQS_wHead)) {
-		u16 wTmp, val;
-		val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
+	head = readw(mpu->dev->MIDQ + JQS_wHead);
+	tail = readw(mpu->dev->MIDQ + JQS_wTail);
+	size = readw(mpu->dev->MIDQ + JQS_wSize);
+	if (head > size || tail > size)
+		goto out;
+	while (head != tail) {
+		unsigned char val = readw(pwMIDQData + 2 * head);
 
-			if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
-				     &mpu->mode))
-				snd_rawmidi_receive(mpu->substream_input,
-						    (unsigned char *)&val, 1);
-
-		wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
-		if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
-			writew(0,  mpu->dev->MIDQ + JQS_wHead);
-		else
-			writew(wTmp,  mpu->dev->MIDQ + JQS_wHead);
+		if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
+			snd_rawmidi_receive(mpu->substream_input, &val, 1);
+		if (++head > size)
+			head = 0;
+		writew(head, mpu->dev->MIDQ + JQS_wHead);
 	}
+ out:
 	spin_unlock_irqrestore(&mpu->input_lock, flags);
 }
 EXPORT_SYMBOL(snd_msndmidi_input_read);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 4c07266..a31ea6c 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -170,23 +170,24 @@
 {
 	struct snd_msnd *chip = dev_id;
 	void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+	u16 head, tail, size;
 
 	/* Send ack to DSP */
 	/* inb(chip->io + HP_RXL); */
 
 	/* Evaluate queued DSP messages */
-	while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
-		u16 wTmp;
-
-		snd_msnd_eval_dsp_msg(chip,
-			readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
-
-		wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
-		if (wTmp > readw(chip->DSPQ + JQS_wSize))
-			writew(0, chip->DSPQ + JQS_wHead);
-		else
-			writew(wTmp, chip->DSPQ + JQS_wHead);
+	head = readw(chip->DSPQ + JQS_wHead);
+	tail = readw(chip->DSPQ + JQS_wTail);
+	size = readw(chip->DSPQ + JQS_wSize);
+	if (head > size || tail > size)
+		goto out;
+	while (head != tail) {
+		snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
+		if (++head > size)
+			head = 0;
+		writew(head, chip->DSPQ + JQS_wHead);
 	}
+ out:
 	/* Send ack to DSP */
 	inb(chip->io + HP_RXL);
 	return IRQ_HANDLED;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c15c51b..f2e4e99 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -854,6 +854,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
 	SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bb1aad3..6f337f0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2233,6 +2233,7 @@
 	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+	SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
 	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
 
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 10c2a564..1ac96ef 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3833,6 +3833,9 @@
 		}
 	}
 
+	regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
+		RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
+
 	if (rt5645->pdata.jd_invert) {
 		regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
 			RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index be6290d..bd8f34a 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -212,6 +212,7 @@
 			pr_debug("%s Don't close BE\n", __func__);
 			continue;
 		}
+
 		snd_soc_dapm_stream_event(be, dir, event);
 	}
 
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 9e7861a..6c3d62f 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -554,6 +554,8 @@
 
 	if (size < sizeof(scale))
 		return -ENOMEM;
+	if (cval->min_mute)
+		scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
 	scale[2] = cval->dBmin;
 	scale[3] = cval->dBmax;
 	if (copy_to_user(_tlv, scale, sizeof(scale)))
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 3417ef3..2b4b067 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -64,6 +64,7 @@
 	int cached;
 	int cache_val[MAX_CHANNELS];
 	u8 initialized;
+	u8 min_mute;
 	void *private_data;
 };
 
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 04991b0..5d2fc5f 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1873,6 +1873,12 @@
 		if (unitid == 7 && cval->control == UAC_FU_VOLUME)
 			snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
 		break;
+	/* lowest playback value is muted on C-Media devices */
+	case USB_ID(0x0d8c, 0x000c):
+	case USB_ID(0x0d8c, 0x0014):
+		if (strstr(kctl->id.name, "Playback"))
+			cval->min_mute = 1;
+		break;
 	}
 }
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 8279009..dd81574 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -39,6 +39,8 @@
 #define SUBSTREAM_FLAG_DATA_EP_STARTED	0
 #define SUBSTREAM_FLAG_SYNC_EP_STARTED	1
 
+#define MAX_SETALT_TIMEOUT_MS 1000
+
 /* return the estimated delay based on USB frame counters */
 snd_pcm_uframes_t snd_usb_pcm_delay(struct snd_usb_substream *subs,
 				    unsigned int rate)
@@ -508,7 +510,8 @@
 
 	/* close the old interface */
 	if (subs->interface >= 0 && subs->interface != fmt->iface) {
-		err = usb_set_interface(subs->dev, subs->interface, 0);
+		err = usb_set_interface_timeout(subs->dev, subs->interface, 0,
+			MAX_SETALT_TIMEOUT_MS);
 		if (err < 0) {
 			dev_err(&dev->dev,
 				"%d:%d: return to setting 0 failed (%d)\n",
@@ -527,7 +530,8 @@
 		if (err < 0)
 			return -EIO;
 
-		err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+		err = usb_set_interface_timeout(dev, fmt->iface,
+				fmt->altsetting, MAX_SETALT_TIMEOUT_MS);
 		if (err < 0) {
 			dev_err(&dev->dev,
 				"%d:%d: usb_set_interface failed (%d)\n",
@@ -574,7 +578,8 @@
 
 	if (!enable) {
 		if (subs->interface >= 0) {
-			usb_set_interface(subs->dev, subs->interface, 0);
+			usb_set_interface_timeout(subs->dev, subs->interface, 0,
+				MAX_SETALT_TIMEOUT_MS);
 			subs->altset_idx = 0;
 			subs->interface = -1;
 			subs->cur_audiofmt = NULL;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index eb4b9f7..286efc3 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1142,6 +1142,7 @@
 	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
 	case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+	case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
 	case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
 	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
 	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
@@ -1308,10 +1309,13 @@
 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(20);
 
-	/* Zoom R16/24 needs a tiny delay here, otherwise requests like
-	 * get/set frequency return as failed despite actually succeeding.
+	/* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
+	 * otherwise requests like get/set frequency return as failed despite
+	 * actually succeeding.
 	 */
-	if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
+	if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
+	     chip->usb_id == USB_ID(0x046d, 0x0a46) ||
+	     chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
 		mdelay(1);
 }
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 801508c..ebc081f 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -45,8 +45,7 @@
 /*  event ring iova base address */
 #define IOVA_BASE 0x1000
 
-#define IOVA_DCBA_BASE 0x2000
-#define IOVA_XFER_RING_BASE (IOVA_DCBA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_XFER_RING_BASE (IOVA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
 #define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
 #define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
 #define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
@@ -83,8 +82,6 @@
 	unsigned int card_num;
 	atomic_t in_use;
 	struct kref kref;
-	unsigned long dcba_iova;
-	size_t dcba_size;
 	wait_queue_head_t disconnect_wq;
 
 	/* interface specific */
@@ -101,9 +98,6 @@
 	struct iommu_domain *domain;
 
 	/* list to keep track of available iova */
-	struct list_head dcba_list;
-	size_t dcba_iova_size;
-	unsigned long curr_dcba_iova;
 	struct list_head xfer_ring_list;
 	size_t xfer_ring_iova_size;
 	unsigned long curr_xfer_ring_iova;
@@ -150,7 +144,6 @@
 
 enum mem_type {
 	MEM_EVENT_RING,
-	MEM_DCBA,
 	MEM_XFER_RING,
 	MEM_XFER_BUF,
 };
@@ -176,6 +169,24 @@
 	USB_QMI_PCM_FORMAT_U32_BE,
 };
 
+static enum usb_audio_device_speed_enum_v01
+get_speed_info(enum usb_device_speed udev_speed)
+{
+	switch (udev_speed) {
+	case USB_SPEED_LOW:
+		return USB_AUDIO_DEVICE_SPEED_LOW_V01;
+	case USB_SPEED_FULL:
+		return USB_AUDIO_DEVICE_SPEED_FULL_V01;
+	case USB_SPEED_HIGH:
+		return USB_AUDIO_DEVICE_SPEED_HIGH_V01;
+	case USB_SPEED_SUPER:
+		return USB_AUDIO_DEVICE_SPEED_SUPER_V01;
+	default:
+		pr_err("%s: udev speed %d\n", __func__, udev_speed);
+		return USB_AUDIO_DEVICE_SPEED_INVALID_V01;
+	}
+}
+
 static unsigned long uaudio_get_iova(unsigned long *curr_iova,
 	size_t *curr_iova_size, struct list_head *head, size_t size)
 {
@@ -275,10 +286,6 @@
 		if (uaudio_qdev->er_phys_addr == pa)
 			map = false;
 		break;
-	case MEM_DCBA:
-		va = uaudio_get_iova(&uaudio_qdev->curr_dcba_iova,
-		&uaudio_qdev->dcba_iova_size, &uaudio_qdev->dcba_list, size);
-		break;
 	case MEM_XFER_RING:
 		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
 		&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
@@ -363,10 +370,7 @@
 		else
 			unmap = false;
 		break;
-	case MEM_DCBA:
-		uaudio_put_iova(va, size, &uaudio_qdev->dcba_list,
-		&uaudio_qdev->dcba_iova_size);
-		break;
+
 	case MEM_XFER_RING:
 		uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
 		&uaudio_qdev->xfer_ring_iova_size);
@@ -407,10 +411,12 @@
 	int protocol, card_num, pcm_dev_num;
 	void *hdr_ptr;
 	u8 *xfer_buf;
-	u32 len, mult, remainder, xfer_buf_len;
-	unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
-	xfer_buf_va = 0;
-	phys_addr_t xhci_pa, xfer_buf_pa;
+	u32 len, mult, remainder, xfer_buf_len, sg_len, i, total_len = 0;
+	unsigned long va, va_sg, tr_data_va = 0, tr_sync_va = 0;
+	phys_addr_t xhci_pa, xfer_buf_pa, tr_data_pa = 0, tr_sync_pa = 0;
+	dma_addr_t dma;
+	struct sg_table sgt;
+	struct scatterlist *sg;
 
 	iface = usb_ifnum_to_if(subs->dev, subs->interface);
 	if (!iface) {
@@ -524,13 +530,13 @@
 	memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
 	resp->std_as_data_ep_desc_valid = 1;
 
-	xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
-	if (!xhci_pa) {
+	tr_data_pa = usb_get_xfer_ring_phys_addr(subs->dev, ep, &dma);
+	if (!tr_data_pa) {
 		pr_err("%s:failed to get data ep ring dma address\n", __func__);
 		goto err;
 	}
 
-	resp->xhci_mem_info.tr_data.pa = xhci_pa;
+	resp->xhci_mem_info.tr_data.pa = dma;
 
 	if (subs->sync_endpoint) {
 		ep = usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe);
@@ -541,19 +547,26 @@
 		memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
 		resp->std_as_sync_ep_desc_valid = 1;
 
-		xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
-		if (!xhci_pa) {
+		tr_sync_pa = usb_get_xfer_ring_phys_addr(subs->dev, ep, &dma);
+		if (!tr_sync_pa) {
 			pr_err("%s:failed to get sync ep ring dma address\n",
 				__func__);
 			goto err;
 		}
-		resp->xhci_mem_info.tr_sync.pa = xhci_pa;
+		resp->xhci_mem_info.tr_sync.pa = dma;
 	}
 
 skip_sync_ep:
 	resp->interrupter_num = uaudio_qdev->intr_num;
 	resp->interrupter_num_valid = 1;
 
+	ret = usb_get_controller_id(subs->dev);
+	if (ret < 0)
+		goto err;
+
+	resp->controller_num =  ret;
+	resp->controller_num_valid = 1;
+
 	/*  map xhci data structures PA memory to iova */
 
 	/* event ring */
@@ -563,8 +576,8 @@
 			ret);
 		goto err;
 	}
-	xhci_pa = usb_get_sec_event_ring_dma_addr(subs->dev,
-			resp->interrupter_num);
+	xhci_pa = usb_get_sec_event_ring_phys_addr(subs->dev,
+			resp->interrupter_num, &dma);
 	if (!xhci_pa) {
 		pr_err("%s: failed to get sec event ring dma address\n",
 		__func__);
@@ -577,37 +590,20 @@
 
 	resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
 						uaudio_qdev->sid);
-	resp->xhci_mem_info.evt_ring.pa = xhci_pa;
+	resp->xhci_mem_info.evt_ring.pa = dma;
 	resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
 	uaudio_qdev->er_phys_addr = xhci_pa;
 
-	/* dcba */
-	xhci_pa = usb_get_dcba_dma_addr(subs->dev);
-	if (!xhci_pa) {
-		pr_err("%s:failed to get dcba dma address\n", __func__);
+	resp->speed_info = get_speed_info(subs->dev->speed);
+	if (resp->speed_info == USB_AUDIO_DEVICE_SPEED_INVALID_V01)
 		goto unmap_er;
-	}
 
-	if (!uadev[card_num].dcba_iova) { /* mappped per usb device */
-		va = uaudio_iommu_map(MEM_DCBA, xhci_pa, PAGE_SIZE);
-		if (!va)
-			goto unmap_er;
-
-		uadev[card_num].dcba_iova = va;
-		uadev[card_num].dcba_size = PAGE_SIZE;
-	}
-
-	dcba_va = uadev[card_num].dcba_iova;
-	resp->xhci_mem_info.dcba.va = PREPEND_SID_TO_IOVA(dcba_va,
-						uaudio_qdev->sid);
-	resp->xhci_mem_info.dcba.pa = xhci_pa;
-	resp->xhci_mem_info.dcba.size = PAGE_SIZE;
+	resp->speed_info_valid = 1;
 
 	/* data transfer ring */
-	xhci_pa = resp->xhci_mem_info.tr_data.pa;
-	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	va = uaudio_iommu_map(MEM_XFER_RING, tr_data_pa, PAGE_SIZE);
 	if (!va)
-		goto unmap_dcba;
+		goto unmap_er;
 
 	tr_data_va = va;
 	resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
@@ -619,7 +615,7 @@
 		goto skip_sync;
 
 	xhci_pa = resp->xhci_mem_info.tr_sync.pa;
-	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	va = uaudio_iommu_map(MEM_XFER_RING, tr_sync_pa, PAGE_SIZE);
 	if (!va)
 		goto unmap_data;
 
@@ -648,19 +644,33 @@
 	if (!xfer_buf)
 		goto unmap_sync;
 
+	dma_get_sgtable(subs->dev->bus->sysdev, &sgt, xfer_buf, xfer_buf_pa,
+			len);
+
+	va = 0;
+	for_each_sg(sgt.sgl, sg, sgt.nents, i) {
+		sg_len = PAGE_ALIGN(sg->offset + sg->length);
+		va_sg = uaudio_iommu_map(MEM_XFER_BUF,
+			page_to_phys(sg_page(sg)), sg_len);
+		if (!va_sg)
+			goto unmap_xfer_buf;
+
+		if (!va)
+			va = va_sg;
+
+		total_len += sg_len;
+	}
+
 	resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa;
 	resp->xhci_mem_info.xfer_buff.size = len;
 
-	va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len);
-	if (!va)
-		goto unmap_sync;
-
-	xfer_buf_va = va;
 	resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
 						uaudio_qdev->sid);
 
 	resp->xhci_mem_info_valid = 1;
 
+	sg_free_table(&sgt);
+
 	if (!atomic_read(&uadev[card_num].in_use)) {
 		kref_init(&uadev[card_num].kref);
 		init_waitqueue_head(&uadev[card_num].disconnect_wq);
@@ -686,7 +696,7 @@
 	uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
 	uadev[card_num].info[info_idx].sync_xfer_ring_va = tr_sync_va;
 	uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
-	uadev[card_num].info[info_idx].xfer_buf_va = xfer_buf_va;
+	uadev[card_num].info[info_idx].xfer_buf_va = va;
 	uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
 	uadev[card_num].info[info_idx].xfer_buf_size = len;
 	uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
@@ -701,14 +711,13 @@
 	return 0;
 
 unmap_xfer_buf:
-	uaudio_iommu_unmap(MEM_XFER_BUF, xfer_buf_va, len);
+	if (va)
+		uaudio_iommu_unmap(MEM_XFER_BUF, va, total_len);
 unmap_sync:
 	usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_pa);
 	uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
 unmap_data:
 	uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
-unmap_dcba:
-	uaudio_iommu_unmap(MEM_DCBA, dcba_va, PAGE_SIZE);
 unmap_er:
 	uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
 err:
@@ -754,11 +763,6 @@
 			dev->info[if_idx].intf_num, dev->card_num);
 	}
 
-	/* iommu_unmap dcba iova for a usb device */
-	uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
-
-	dev->dcba_iova = 0;
-	dev->dcba_size = 0;
 	dev->num_intf = 0;
 
 	/* free interface info */
@@ -1228,11 +1232,7 @@
 		goto free_domain;
 	}
 
-	/* initialize dcba, xfer ring and xfer buf iova list */
-	INIT_LIST_HEAD(&uaudio_qdev->dcba_list);
-	uaudio_qdev->curr_dcba_iova = IOVA_DCBA_BASE;
-	uaudio_qdev->dcba_iova_size = SNDRV_CARDS * PAGE_SIZE;
-
+	/* initialize xfer ring and xfer buf iova list */
 	INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
 	uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
 	uaudio_qdev->xfer_ring_iova_size =
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
index fef7505..a93665c 100644
--- a/sound/usb/usb_audio_qmi_v01.c
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -633,6 +633,46 @@
 					interrupter_num),
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					speed_info_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_device_speed_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1C,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					speed_info),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					controller_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1D,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					controller_num),
+	},
+	{
 		.data_type      = QMI_EOTI,
 		.is_array       = NO_ARRAY,
 		.is_array       = QMI_COMMON_TLV_TYPE,
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
index 83a966c..9900764 100644
--- a/sound/usb/usb_audio_qmi_v01.h
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ /* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,16 @@
 	USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
 };
 
+enum usb_audio_device_speed_enum_v01 {
+	USB_AUDIO_DEVICE_SPEED_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEVICE_SPEED_INVALID_V01 = 0,
+	USB_AUDIO_DEVICE_SPEED_LOW_V01 = 1,
+	USB_AUDIO_DEVICE_SPEED_FULL_V01 = 2,
+	USB_AUDIO_DEVICE_SPEED_HIGH_V01 = 3,
+	USB_AUDIO_DEVICE_SPEED_SUPER_V01 = 4,
+	USB_AUDIO_DEVICE_SPEED_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
 struct qmi_uaudio_stream_req_msg_v01 {
 	uint8_t enable;
 	uint32_t usb_token;
@@ -118,8 +128,12 @@
 	struct apps_mem_info_v01 xhci_mem_info;
 	uint8_t interrupter_num_valid;
 	uint8_t interrupter_num;
+	uint8_t speed_info_valid;
+	enum usb_audio_device_speed_enum_v01 speed_info;
+	uint8_t controller_num_valid;
+	uint8_t controller_num;
 };
-#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 202
 extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
 
 struct qmi_uaudio_stream_ind_msg_v01 {
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 6c50d9f..6a6f44d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -163,7 +163,7 @@
 
 	/* A file path -- this is an offline module */
 	if (module && strchr(module, '/'))
-		return machine__findnew_module_map(host_machine, 0, module);
+		return dso__new_map(module);
 
 	if (!module)
 		module = "kernel";
@@ -173,6 +173,7 @@
 		if (strncmp(pos->dso->short_name + 1, module,
 			    pos->dso->short_name_len - 2) == 0 &&
 		    module[pos->dso->short_name_len - 2] == '\0') {
+			map__get(pos);
 			return pos;
 		}
 	}
@@ -188,15 +189,6 @@
 		return kernel_get_module_map(target);
 }
 
-static void put_target_map(struct map *map, bool user)
-{
-	if (map && user) {
-		/* Only the user map needs to be released */
-		map__put(map);
-	}
-}
-
-
 static int convert_exec_to_group(const char *exec, char **result)
 {
 	char *ptr1, *ptr2, *exec_copy;
@@ -412,7 +404,7 @@
 	}
 
 out:
-	put_target_map(map, uprobes);
+	map__put(map);
 	return ret;
 
 }
@@ -2944,7 +2936,7 @@
 	}
 
 out:
-	put_target_map(map, pev->uprobes);
+	map__put(map);
 	free(syms);
 	return ret;
 
@@ -3437,10 +3429,7 @@
 		return ret;
 
 	/* Get a symbol map */
-	if (user)
-		map = dso__new_map(target);
-	else
-		map = kernel_get_module_map(target);
+	map = get_target_map(target, user);
 	if (!map) {
 		pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
 		return -EINVAL;
@@ -3472,9 +3461,7 @@
         }
 
 end:
-	if (user) {
-		map__put(map);
-	}
+	map__put(map);
 	exit_probe_symbol_maps();
 
 	return ret;
diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
index a676d3e..b3c48fc 100755
--- a/tools/testing/selftests/ntb/ntb_test.sh
+++ b/tools/testing/selftests/ntb/ntb_test.sh
@@ -305,7 +305,7 @@
 	echo "Running remote perf test $WITH DMA"
 	write_file "" $REMOTE_PERF/run
 	echo -n "  "
-	read_file $LOCAL_PERF/run
+	read_file $REMOTE_PERF/run
 	echo "  Passed"
 
 	_modprobe -r ntb_perf
@@ -326,6 +326,10 @@
 	link_test $LOCAL_TOOL $REMOTE_TOOL
 	link_test $REMOTE_TOOL $LOCAL_TOOL
 
+	#Ensure the link is up on both sides before continuing
+	write_file Y $LOCAL_TOOL/link_event
+	write_file Y $REMOTE_TOOL/link_event
+
 	for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
 		PT=$(basename $PEER_TRANS)
 		write_file $MW_SIZE $LOCAL_TOOL/$PT
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 5b2b4b3..9b4610c 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -285,9 +285,12 @@
 	}
 }
 
-static void set_gs_and_switch_to(unsigned long local, unsigned long remote)
+static void set_gs_and_switch_to(unsigned long local,
+				 unsigned short force_sel,
+				 unsigned long remote)
 {
 	unsigned long base;
+	unsigned short sel_pre_sched, sel_post_sched;
 
 	bool hard_zero = false;
 	if (local == HARD_ZERO) {
@@ -297,6 +300,8 @@
 
 	printf("[RUN]\tARCH_SET_GS(0x%lx)%s, then schedule to 0x%lx\n",
 	       local, hard_zero ? " and clear gs" : "", remote);
+	if (force_sel)
+		printf("\tBefore schedule, set selector to 0x%hx\n", force_sel);
 	if (syscall(SYS_arch_prctl, ARCH_SET_GS, local) != 0)
 		err(1, "ARCH_SET_GS");
 	if (hard_zero)
@@ -307,18 +312,35 @@
 		printf("[FAIL]\tGSBASE wasn't set as expected\n");
 	}
 
+	if (force_sel) {
+		asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
+		sel_pre_sched = force_sel;
+		local = read_base(GS);
+
+		/*
+		 * Signal delivery seems to mess up weird selectors.  Put it
+		 * back.
+		 */
+		asm volatile ("mov %0, %%gs" : : "rm" (force_sel));
+	} else {
+		asm volatile ("mov %%gs, %0" : "=rm" (sel_pre_sched));
+	}
+
 	remote_base = remote;
 	ftx = 1;
 	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
 	while (ftx != 0)
 		syscall(SYS_futex, &ftx, FUTEX_WAIT, 1, NULL, NULL, 0);
 
+	asm volatile ("mov %%gs, %0" : "=rm" (sel_post_sched));
 	base = read_base(GS);
-	if (base == local) {
-		printf("[OK]\tGSBASE remained 0x%lx\n", local);
+	if (base == local && sel_pre_sched == sel_post_sched) {
+		printf("[OK]\tGS/BASE remained 0x%hx/0x%lx\n",
+		       sel_pre_sched, local);
 	} else {
 		nerrs++;
-		printf("[FAIL]\tGSBASE changed to 0x%lx\n", base);
+		printf("[FAIL]\tGS/BASE changed from 0x%hx/0x%lx to 0x%hx/0x%lx\n",
+		       sel_pre_sched, local, sel_post_sched, base);
 	}
 }
 
@@ -381,8 +403,15 @@
 
 	for (int local = 0; local < 4; local++) {
 		for (int remote = 0; remote < 4; remote++) {
-			set_gs_and_switch_to(bases_with_hard_zero[local],
-					     bases_with_hard_zero[remote]);
+			for (unsigned short s = 0; s < 5; s++) {
+				unsigned short sel = s;
+				if (s == 4)
+					asm ("mov %%ss, %0" : "=rm" (sel));
+				set_gs_and_switch_to(
+					bases_with_hard_zero[local],
+					sel,
+					bases_with_hard_zero[remote]);
+			}
 		}
 	}