Merge "soundwire: Fix swr device count logic in registration"
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index d9995f1..a25a99e 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -11,24 +11,56 @@
 The kernel configures the translation tables so that translations made
 via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
 the virtual address ignored by the translation hardware. This frees up
-this byte for application use, with the following caveats:
+this byte for application use.
 
-	(1) The kernel requires that all user addresses passed to EL1
-	    are tagged with tag 0x00. This means that any syscall
-	    parameters containing user virtual addresses *must* have
-	    their top byte cleared before trapping to the kernel.
 
-	(2) Non-zero tags are not preserved when delivering signals.
-	    This means that signal handlers in applications making use
-	    of tags cannot rely on the tag information for user virtual
-	    addresses being maintained for fields inside siginfo_t.
-	    One exception to this rule is for signals raised in response
-	    to watchpoint debug exceptions, where the tag information
-	    will be preserved.
+Passing tagged addresses to the kernel
+--------------------------------------
 
-	(3) Special care should be taken when using tagged pointers,
-	    since it is likely that C compilers will not hazard two
-	    virtual addresses differing only in the upper byte.
+All interpretation of userspace memory addresses by the kernel assumes
+an address tag of 0x00.
+
+This includes, but is not limited to, addresses found in:
+
+ - pointer arguments to system calls, including pointers in structures
+   passed to system calls,
+
+ - the stack pointer (sp), e.g. when interpreting it to deliver a
+   signal,
+
+ - the frame pointer (x29) and frame records, e.g. when interpreting
+   them to generate a backtrace or call graph.
+
+Using non-zero address tags in any of these locations may result in an
+error code being returned, a (fatal) signal being raised, or other modes
+of failure.
+
+For these reasons, passing non-zero address tags to the kernel via
+system calls is forbidden, and using a non-zero address tag for sp is
+strongly discouraged.
+
+Programs maintaining a frame pointer and frame records that use non-zero
+address tags may suffer impaired or inaccurate debug and profiling
+visibility.
+
+
+Preserving tags
+---------------
+
+Non-zero tags are not preserved when delivering signals. This means that
+signal handlers in applications making use of tags cannot rely on the
+tag information for user virtual addresses being maintained for fields
+inside siginfo_t. One exception to this rule is for signals raised in
+response to watchpoint debug exceptions, where the tag information will
+be preserved.
 
 The architecture prevents the use of a tagged PC, so the upper byte will
 be set to a sign-extension of bit 55 on exception return.
+
+
+Other considerations
+--------------------
+
+Special care should be taken when using tagged pointers, since it is
+likely that C compilers will not hazard two virtual addresses differing
+only in the upper byte.
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index bf93a2a..6451b34 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -89,8 +89,8 @@
 - SDM845
   compatible = "qcom,sdm845"
 
-- SDM830
-  compatible = "qcom,sdm830"
+- SDM670
+  compatible = "qcom,sdm670"
 
 - MSM8952
   compatible = "qcom,msm8952"
@@ -267,10 +267,9 @@
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-qrd"
-compatible = "qcom,sdm830-sim"
-compatible = "qcom,sdm830-rumi"
-compatible = "qcom,sdm830-cdp"
-compatible = "qcom,sdm830-mtp"
+compatible = "qcom,sdm670-rumi"
+compatible = "qcom,sdm670-cdp"
+compatible = "qcom,sdm670-mtp"
 compatible = "qcom,msm8952-rumi"
 compatible = "qcom,msm8952-sim"
 compatible = "qcom,msm8952-qrd"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt b/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt
new file mode 100644
index 0000000..a415c8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_memory_dump.txt
@@ -0,0 +1,31 @@
+Qualcomm Technologies Inc. memory dump driver
+
+QTI memory dump driver allows various client subsystems to register and allocate respective
+dump regions. At the time of deadlocks or cpu hangs these dump regions
+are captured to give a snapshot of the system at the time of the crash.
+
+Required properties:
+
+-compatible: "qcom,mem-dump"
+-memory-region: phandle to the CMA region. The size of the CMA region
+		should be greater than sum of size of all child nodes
+		to account for padding.
+
+If any child nodes exist the following property are required:
+
+-qcom,dump-size: The size of memory that needs to be allocated for the
+		 particular node.
+-qcom,dump-id: The ID within the data dump table where this entry needs
+	       to be added.
+
+Example:
+
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpmh_dump {
+			qcom,dump-size = <0x2000000>;
+			qcom,dump-id = <0xEC>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index c467327..0c75cf6 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -32,6 +32,8 @@
 - clocks	    : List of phandle and clock specifier pairs
 - clock-names       : List of clock input name strings sorted in the same
 		      order as the clocks property.
+- qcom,keep-radio-on-during-sleep: Boolean flag to indicate if to suspend to d3hot
+				   instead of turning off the device
 
 Example:
 	wil6210: qcom,wil6210 {
@@ -56,5 +58,6 @@
 		clocks = <&clock_gcc clk_rf_clk3>,
 			 <&clock_gcc clk_rf_clk3_pin>;
 		clock-names = "rf_clk3_clk", "rf_clk3_pin_clk";
+		qcom,keep-radio-on-during-sleep;
 	};
 
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index c766df8..5cf2cb8 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -132,8 +132,6 @@
 				power collapse feature available or not.
 - qcom,sde-has-mixer-gc:	Boolean property to indicate if mixer has gamma correction
 				feature available or not.
-- qcom,sde-has-cdp:		Boolean property to indicate if cdp feature is
-				available or not.
 - qcom,sde-sspp-clk-ctrl:	Array of offsets describing clk control
 				offsets for dynamic clock gating. 1st value
 				in the array represents offset of the control
@@ -148,10 +146,6 @@
 				control register. Number of offsets defined should
 				match the number of offsets defined in
 				property: qcom,sde-sspp-off.
-- qcom,sde-sspp-danger-lut:	A 3 cell property, with a format of <linear, tile, nrt>,
-				indicating the danger luts on sspp.
-- qcom,sde-sspp-safe-lut:	A 3 cell property, with a format of <linear, tile, nrt>,
-				indicating the safe luts on sspp.
 - qcom,sde-sspp-excl-rect:	Array of u32 values indicating exclusion rectangle
 				support on each sspp.
 - qcom,sde-sspp-smart-dma-priority:	Array of u32 values indicating hw pipe
@@ -240,6 +234,8 @@
 				of (pps, OT limit), where pps is pixel per second and
 				OT limit is the write limit to apply if the given
 				pps is not exceeded.
+- qcom,sde-vbif-memtype-0:	Array of u32 vbif memory type settings, group 0
+- qcom,sde-vbif-memtype-1:	Array of u32 vbif memory type settings, group 1
 - qcom,sde-wb-id:		Array of writeback ids corresponding to the
 				offsets defined in property: qcom,sde-wb-off.
 - qcom,sde-wb-clk-ctrl:		Array of 2 cell property describing clk control
@@ -297,6 +293,35 @@
 - qcom,sde-downscaling-prefill-lines:	A u32 value indicates the latency of downscaling in lines.
 - qcom,sde-max-per-pipe-bw-kbps:	Array of u32 value indicates the max per pipe bandwidth in Kbps.
 - qcom,sde-amortizable-threshold:	This value indicates the min for traffic shaping in lines.
+- qcom,sde-vbif-qos-rt-remap:	This array is used to program vbif qos remapper register
+				priority for realtime clients.
+- qcom,sde-vbif-qos-nrt-remap:	This array is used to program vbif qos remapper register
+				priority for non-realtime clients.
+- qcom,sde-danger-lut:		A 4 cell property, with a format of <linear,
+				tile, nrt, cwb>,
+				indicating the danger luts on sspp.
+- qcom,sde-safe-lut:		A 4 cell property, with a format of <linear,
+				tile, nrt, cwb>,
+				indicating the safe luts on sspp.
+- qcom,sde-qos-lut-linear:	Array of 3 cell property, with a format of
+				<fill level, lut hi, lut lo> in ascending fill level
+				indicating the qos luts for linear format on sspp.
+				Zero fill level on the last entry identifies the default lut.
+- qcom,sde-qos-lut-macrotile:	Array of 3 cell property, with a format of
+				<fill level, lut hi, lut lo> in ascending fill level
+				indicating the qos luts for macrotile format on sspp.
+				Zero fill level on the last entry identifies the default lut.
+- qcom,sde-qos-lut-nrt:		Array of 3 cell property, with a format of
+				<fill level, lut hi, lut lo> in ascending fill level
+				indicating the qos luts for nrt (e.g wfd) on sspp.
+				Zero fill level on the last entry identifies the default lut.
+- qcom,sde-qos-lut-cwb:		Array of 3 cell property, with a format of
+				<fill level, lut hi, lut lo> in ascending fill level
+				indicating the qos luts for cwb on sspp.
+				Zero fill level on the last entry identifies the default lut.
+- qcom,sde-cdp-setting:		Array of 2 cell property, with a format of
+				<read enable, write enable> for cdp use cases in
+				order of <real_time>, and <non_real_time>.
 
 Bus Scaling Subnodes:
 - qcom,sde-reg-bus:		Property to provide Bus scaling for register access for
@@ -425,7 +450,6 @@
     qcom,sde-ubwc-static = <0x100>;
     qcom,sde-ubwc-swizzle = <0>;
     qcom,sde-panic-per-pipe;
-    qcom,sde-has-cdp;
     qcom,sde-has-src-split;
     qcom,sde-has-dim-layer;
     qcom,sde-sspp-src-size = <0x100>;
@@ -467,8 +491,35 @@
     qcom,sde-wb-id = <2>;
     qcom,sde-wb-clk-ctrl = <0x2bc 16>;
 
-    qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
-    qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+    qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+            0x00000000>;
+    qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+    qcom,sde-qos-lut-linear =
+            <4 0x00000000 0x00000357>,
+            <5 0x00000000 0x00003357>,
+            <6 0x00000000 0x00023357>,
+            <7 0x00000000 0x00223357>,
+            <8 0x00000000 0x02223357>,
+            <9 0x00000000 0x22223357>,
+            <10 0x00000002 0x22223357>,
+            <11 0x00000022 0x22223357>,
+            <12 0x00000222 0x22223357>,
+            <13 0x00002222 0x22223357>,
+            <14 0x00012222 0x22223357>,
+            <0 0x00112222 0x22223357>;
+    qcom,sde-qos-lut-macrotile =
+            <10 0x00000003 0x44556677>,
+            <11 0x00000033 0x44556677>,
+            <12 0x00000233 0x44556677>,
+            <13 0x00002233 0x44556677>,
+            <14 0x00012233 0x44556677>,
+            <0 0x00112233 0x44556677>;
+    qcom,sde-qos-lut-nrt =
+            <0 0x00000000 0x00000000>;
+    qcom,sde-qos-lut-cwb =
+            <0 0x75300000 0x00000000>;
+
+    qcom,sde-cdp-setting = <1 1>, <1 0>;
 
     qcom,sde-vbif-off = <0 0>;
     qcom,sde-vbif-id = <0 1>;
@@ -478,6 +529,8 @@
         <124416000 4>, <248832000 16>;
     qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
         <124416000 4>, <248832000 16>;
+    qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+    qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
 
     qcom,sde-dram-channels = <2>;
     qcom,sde-num-nrt-paths = <1>;
@@ -500,6 +553,9 @@
         2400000 2400000 2400000 2400000>;
     qcom,sde-amortizable-threshold = <11>;
 
+    qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+    qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
     qcom,sde-sspp-vig-blocks {
         qcom,sde-vig-csc-off = <0x320>;
         qcom,sde-vig-qseed-off = <0x200>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index d0d7fff..59fa6a0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -16,7 +16,7 @@
                         "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
                         "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
                         "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
-                        "qcom,mdss_hdmi_pll_8998"
+                        "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm".
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
 - reg-names :		names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
new file mode 100644
index 0000000..8598d0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/qti,pdc.txt
@@ -0,0 +1,63 @@
+QTI PDC interrupt controller
+
+PDC is QTI's platform parent interrupt controller that serves as wakeup source.
+
+Newer QTI SOCs are replacing MPM (MSM sleep Power Manager) with PDC (Power
+Domain Controller) to manage subsystem wakeups and resources during sleep.
+This driver marks the wakeup interrupts in APSS PDC such that it monitors the
+interrupts when the system is asleep, wakes up the APSS when one of these
+interrupts occur and replays it to the subsystem interrupt controller after it
+becomes operational.
+
+Earlier MPM architecture used arch-extension of GIC interrupt
+controller to mark enabled wake-up interrupts and monitor these when the
+system goes to sleep. Since the arch-extensions are no-longer available
+on newer kernel versions, this driver is implemented as hierarchical irq
+domain.  GIC is parent interrupt controller at the highest level.
+Platform interrupt controller PDC is next in hierarchy, followed by others.
+This driver only configures the interrupts, does not handle them.
+
+PDC interrupt configuration involves programming of 2 set of registers:
+IRQ_ENABLE_BANK    - Enable the irq
+IRQ_i_CFG          - Configure the interrupt i
+
+Properties:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: Should contain "qcom,pdc-<target>"
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: Specifies the base physical address for PDC hardware
+			block for DRV2.
+
+- interrupt-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: Specifies the number of cells needed to encode an interrupt source.
+			Value must be 3.
+			The encoding of these cells are same as described in
+			Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+
+- interrupt-parent:
+	Usage: required
+	Value type: <phandle>
+	Definition: Specifies the interrupt parent necessary for hierarchical domain to operate.
+
+- interrupt-controller:
+	Usage: required
+	Value type: <bool>
+	Definition: Identifies the node as an interrupt controller.
+
+Example:
+
+pdcgic: interrupt-controller@0xb220000{
+	compatible = "qcom,pdc-sdm845";
+	reg = <0xb220000 0x30000>;
+	#interrupt-cells = <3>;
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 2d971b7a..1394fd3 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -74,18 +74,15 @@
                   address size faults are due to a fundamental programming
                   error from which we don't care about recovering anyways.
 
-- qcom,skip-init : Disable resetting configuration for all context banks
-                  during device reset.  This is useful for targets where
-                  some context banks are dedicated to other execution
-                  environments outside of Linux and those other EEs are
-                  programming their own stream match tables, SCTLR, etc.
-                  Without setting this option we will trample on their
-                  configuration.
-
 - qcom,dynamic  : Allow dynamic domains to be attached. This is only
 		  useful if the upstream hardware is capable of switching
 		  between multiple domains within a single context bank.
 
+- qcom,use-3-lvl-tables:
+		  Some hardware configurations may not be optimized for using
+		  a four level page table configuration. Set to use a three
+		  level page table instead.
+
 - clocks        : List of clocks to be used during SMMU register access. See
                   Documentation/devicetree/bindings/clock/clock-bindings.txt
                   for information about the format. For each clock specified
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
index da54fb1..176f9e1 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
@@ -169,8 +169,15 @@
                           sleep configuration defined for each pin or pin group.
 - qcom,hw-strobe-gpio	: phandle to specify GPIO for hardware strobing. This is used when there is no
 			  pinctrl support or PMIC GPIOs are used.
-- qcom,hw-strobe-sel	: Boolean property to enable hardware strobe. If not defined, software strobe
-			  will be used.
+- qcom,strobe-sel	: Property to select strobe type. If not defined,
+			  software strobe will be used. Allowed options are:
+			  0 - SW strobe
+			  1 - HW strobe
+			  2 - LPG strobe
+			  LPG strobe is supported only for LED3.
+			  If LPG strobe is specified, then strobe control is
+			  configured for active high and level triggered. Also
+			  qcom,hw-strobe-option should be set to 1 or 2.
 - qcom,hw-strobe-edge-trigger	: Boolean property to select trigger type. If defined, hw-strobe is set to
 				  be edge triggered. Otherwise, it is level triggered.
 - qcom,hw-strobe-active-low	: Boolean property to select strobe signal polarity. If defined, hw-strobe
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
index a61bab3..62a51cf 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -104,6 +104,17 @@
   Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
   for the properties above.
 
+- vdd-corners
+  Usage: required
+  Value type: <u32>
+  Definition: List of vdd corners to map for ahb level.
+
+- vdd-corner-ahb-mapping
+  Usage: required
+  Value type: <string>
+  Definition: List of ahb level strings corresponds to vdd-corners.
+  Supported strings: suspend, svs, nominal, turbo
+
 - client-id-based
   Usage: required
   Value type: <empty>
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 0295e1b..937ccb9 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -81,6 +81,32 @@
 				limits.
 - qcom,mdss-rot-vbif-qos-setting: This array is used to program vbif qos remapper register
 				  priority for rotator clients.
+- qcom,mdss-rot-cdp-setting:	Integer array of size two, to indicate client driven
+				prefetch is available or not. Index 0 represents
+				if CDP is enabled for read and index 1, if CDP
+				is enabled for write operation.
+- qcom,mdss-rot-qos-lut		A 4 cell property with the format of <rd_lut_0,
+				rd_lut_1, wr_lut_0, wr_lut_1> indicating the qos
+				lut settings for the rotator sspp and writeback
+				client.
+- qcom,mdss-rot-danger-lut	A two cell property with the format of <rd_lut,
+				wr_lut> indicating the danger lut settings for
+				the rotator sspp and writeback client.
+- qcom,mdss-rot-safe-lut	A two cell property with the format of <rd_lut,
+				wr_lut> indicating the safe lut settings for the
+				rotator sspp and writeback client.
+- qcom,mdss-inline-rot-qos-lut:	A 4 cell property with the format of <rd_lut_0,
+				rd_lut_1, wr_lut_0, wr_lut_1> indicating the qos
+				lut settings for the inline rotator sspp and
+				writeback client.
+- qcom,mdss-inline-rot-danger-lut: A two cell property with the format of
+				<rd_lut, wr_lut> indicating the danger lut
+				settings for the inline rotator sspp and
+				writeback client.
+- qcom,mdss-inline-rot-safe-lut: A two cell property with the format of
+				<rd_lut, wr_lut> indicating the safe lut
+				settings for the inline rotator sspp and
+				writeback client.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
 - qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
@@ -146,9 +172,19 @@
 		/* VBIF QoS remapper settings*/
 		qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
 
+		com,mdss-rot-cdp-setting = <1 1>;
+
 		qcom,mdss-default-ot-rd-limit = <8>;
 		qcom,mdss-default-ot-wr-limit = <16>;
 
+		qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+		qcom,mdss-rot-danger-lut = <0x0 0x0>;
+		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0>;
+
+		qcom,mdss-inline-rot-qos-lut = <0x0 0x0 0x00112233 0x44556677>;
+		qcom,mdss-inline-rot-danger-lut = <0x0 0x0000ffff>;
+		qcom,mdss-inline-rot-safe-lut = <0x0 0x0000ff00>;
+
 		qcom,mdss-sbuf-headroom = <20>;
 		cache-slice-names = "rotator";
 		cache-slices = <&llcc 4>;
diff --git a/Documentation/devicetree/bindings/nfc/nq-nci.txt b/Documentation/devicetree/bindings/nfc/nq-nci.txt
new file mode 100644
index 0000000..b85e070
--- /dev/null
+++ b/Documentation/devicetree/bindings/nfc/nq-nci.txt
@@ -0,0 +1,49 @@
+Qualcomm Technologies, Inc NQxxxx NFC NCI device
+
+Near Field Communication (NFC) device is based on NFC Controller Interface (NCI)
+
+Required properties:
+
+- compatible: "qcom,nq-nci"
+- reg: NCI i2c slave address.
+- qcom,nq-ven: specific gpio for hardware reset.
+- qcom,nq-irq: specific gpio for read interrupt.
+- qcom,nq-firm: gpio for firmware download
+- qcom,nq-clkreq: gpio for clock
+- interrupt-parent: Should be phandle for the interrupt controller
+                    that services interrupts for this device.
+- interrupts: Nfc read interrupt,gpio-clk-req interrupt
+
+
+Recommended properties:
+
+- interrupt-names: names of interrupts, should include "nfc_irq", used for reference
+
+
+Optional properties:
+
+- pinctrl-names, pinctrl-0, pincntrl-1: references to our pincntrl settings
+- clocks, clock-names: must contain the NQxxxx's core clock.
+- qcom,nq-esepwr: gpio to control power of secure element
+
+Example:
+
+	nq-nci@2b {
+		compatible = "qcom,nq-nci";
+		reg = <0x2b>;
+		qcom,nq-irq = <&tlmm 29 0x00>;
+		qcom,nq-ven = <&tlmm 30 0x00>;
+		qcom,nq-firm = <&tlmm 93 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		qcom,clk-src = "BBCLK2";
+		interrupt-parent = <&tlmm>;
+		interrupts = <29 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active","nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_disable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+		qcom,clk-gpio = <&pm8916_gpios 2 0>;
+		clocks = <&clock_rpm clk_bb_clk2_pin>;
+		clock-names = "ref_clk";
+	};
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
index d7edafc..4a69e03 100644
--- a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -67,6 +67,7 @@
 - qcom,complete-ramdump: Boolean. If set, complete ramdump i.e. region between start address of
 			first segment to end address of last segment will be collected without
 			leaving any hole in between.
+- qcom,ignore-ssr-failure: Boolean. If set, SSR failures are not considered fatal.
 
 Example:
 	qcom,venus@fdce0000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
similarity index 95%
rename from Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
rename to Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
index 0fe8a1b..0eb1043f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdm830-pinctrl
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdm670-pinctrl
@@ -1,12 +1,12 @@
-Qualcomm Technologies, Inc. SDM830 TLMM block
+Qualcomm Technologies, Inc. SDM670 TLMM block
 
 This binding describes the Top Level Mode Multiplexer block found in the
-SDM830 platform.
+SDM670 platform.
 
 - compatible:
 	Usage: required
 	Value type: <string>
-	Definition: must be "qcom,sdm830-pinctrl"
+	Definition: must be "qcom,sdm670-pinctrl"
 
 - reg:
 	Usage: required
@@ -135,9 +135,9 @@
 
 Example:
 
-	tlmm: pinctrl@03800000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03800000 0xc00000>;
+	tlmm: pinctrl@03400000 {
+		compatible = "qcom,sdm670-pinctrl";
+		reg = <0x03400000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
 		#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 12d32ec..0123682 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -145,22 +145,30 @@
 
 - qcom,fg-esr-timer-charging
 	Usage:      optional
-	Value type: <u32>
+	Value type: <prop-encoded-array>
 	Definition: Number of cycles between ESR pulses while the battery is
-		    charging.
+		    charging. Array of 2 elements if specified.
+		    Element 0 - Retry value for timer
+		    Element 1 - Maximum value for timer
 
 - qcom,fg-esr-timer-awake
 	Usage:      optional
-	Value type: <u32>
+	Value type: <prop-encoded-array>
 	Definition: Number of cycles between ESR pulses while the system is
-		    awake and the battery is discharging.
+		    awake and the battery is discharging. Array of 2 elements
+		    if specified.
+		    Element 0 - Retry value for timer
+		    Element 1 - Maximum value for timer
 
 - qcom,fg-esr-timer-asleep
 	Usage:      optional
-	Value type: <u32>
+	Value type: <prop-encoded-array>
 	Definition: Number of cycles between ESR pulses while the system is
 		    asleep and the battery is discharging. This option requires
-		    qcom,fg-esr-timer-awake to be defined.
+		    qcom,fg-esr-timer-awake to be defined. Array of 2 elements
+		    if specified.
+		    Element 0 - Retry value for timer
+		    Element 1 - Maximum value for timer
 
 - qcom,fg-esr-pulse-thresh-ma
 	Usage:      optional
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index e1f194f3..441d771 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -35,11 +35,6 @@
 		addition battery properties will be faked such that the device
 		assumes normal operation.
 
-- qcom,external-vconn
-  Usage:      optional
-  Value type: <empty>
-  Definition: Boolean flag which indicates VCONN is sourced externally.
-
 - qcom,fcc-max-ua
   Usage:      optional
   Value type: <u32>
@@ -181,6 +176,12 @@
   Definition: Specifies the maximum charger buck/boost switching frequency in
 		 KHz. It overrides the max frequency defined for the charger.
 
+- qcom,otg-deglitch-time-ms
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies the deglitch interval for OTG detection.
+		If the value is not present, 50 msec is used as default.
+
 =============================================
 Second Level Nodes - SMB2 Charger Peripherals
 =============================================
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index e0ab31f..3a09b28 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -2256,8 +2256,8 @@
 - qcom,wcn-btfm : Property to specify if WCN BT/FM chip is used for the target
 - qcom,msm-mbhc-usbc-audio-supported : Property to specify if analog audio feature is
 				       enabled or not.
-- qcom,usbc-analog-en1_gpio : EN1 GPIO to enable USB type-C analog audio
-- qcom,usbc-analog-en2_n_gpio : EN2 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en1-gpio : EN1 GPIO to enable USB type-C analog audio
+- qcom,usbc-analog-en2-gpio : EN2 GPIO to enable USB type-C analog audio
 - qcom,usbc-analog-force_detect_gpio : Force detect GPIO to enable USB type-C analog audio
 
 Example:
@@ -2333,8 +2333,8 @@
 		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
 					  "SpkrRight", "SpkrLeft";
 		qcom,msm-mbhc-usbc-audio-supported = <1>;
-		qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
-		qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
+		qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+		qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
 		qcom,usbc-analog-force_detect_gpio = <&wcd_usbc_analog_f_gpio>;
 	};
 
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 958194b..4901fa0 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -70,6 +70,7 @@
 			  2: 38.4 MHz
 			  3: 52 MHz
 			  Defaults to 26 MHz if not specified.
+- extcon:       phandle to external connector (Refer Documentation/devicetree/bindings/extcon/extcon-gpio.txt for more details).
 
 Note: If above properties are not defined it can be assumed that the supply
 regulators or clocks are always on.
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 53b872c..db86cda 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -308,6 +308,12 @@
 .. kernel-doc:: drivers/gpu/drm/drm_color_mgmt.c
    :export:
 
+Explicit Fencing Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+   :doc: explicit fencing properties
+
 Existing KMS Properties
 -----------------------
 
diff --git a/Makefile b/Makefile
index b9aba93..29030c6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 29
+SUBLEVEL = 31
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 9e46d6e..fa47df6a 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ffb93f49..4f95577 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1188,8 +1188,10 @@
 	if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
 		return -EFAULT;
 
-	err = 0;
-	err |= put_user(status, ustatus);
+	err = put_user(status, ustatus);
+	if (ret < 0)
+		return err ? err : ret;
+
 	err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
 	err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
 	err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index c51fc65..5a53fcf 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -162,9 +162,10 @@
 			};
 
 			adc0: adc@f8018000 {
+				atmel,adc-vref = <3300>;
+				atmel,adc-channels-used = <0xfe>;
 				pinctrl-0 = <
 					&pinctrl_adc0_adtrg
-					&pinctrl_adc0_ad0
 					&pinctrl_adc0_ad1
 					&pinctrl_adc0_ad2
 					&pinctrl_adc0_ad3
@@ -172,8 +173,6 @@
 					&pinctrl_adc0_ad5
 					&pinctrl_adc0_ad6
 					&pinctrl_adc0_ad7
-					&pinctrl_adc0_ad8
-					&pinctrl_adc0_ad9
 					>;
 				status = "okay";
 			};
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 5bb8fd5..d71da30 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -12,23 +12,6 @@
 	model = "Freescale i.MX6 SoloX SDB RevB Board";
 };
 
-&cpu0 {
-	operating-points = <
-		/* kHz    uV */
-		996000  1250000
-		792000  1175000
-		396000  1175000
-		198000  1175000
-		>;
-	fsl,soc-operating-points = <
-		/* ARM kHz      SOC uV */
-		996000	1250000
-		792000	1175000
-		396000	1175000
-		198000  1175000
-	>;
-};
-
 &i2c1 {
 	clock-frequency = <100000>;
 	pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index ba1da74..961adc9 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -142,6 +142,7 @@
 		compatible = "qcom,dummycc";
 		clock-output-names = "gcc_clocks";
 		#clock-cells = <1>;
+		#reset-cells = <1>;
 	};
 
 	clock_cpu: qcom,clock-a7@17810008 {
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 4e361a8..b4bfa55 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -569,6 +569,7 @@
 			regulator-name = "+3VS,vdd_pnl";
 			regulator-min-microvolt = <3300000>;
 			regulator-max-microvolt = <3300000>;
+			regulator-boot-on;
 			gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
 			enable-active-high;
 		};
diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig
index fc0d3b0..40289a8 100644
--- a/arch/arm/configs/sdxpoorwills-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-perf_defconfig
@@ -270,6 +270,8 @@
 CONFIG_MSM_SMP2P=y
 CONFIG_MSM_SMP2P_TEST=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_PWM=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
 CONFIG_ANDROID=y
diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig
index 9d12771..d91f5f6 100644
--- a/arch/arm/configs/sdxpoorwills_defconfig
+++ b/arch/arm/configs/sdxpoorwills_defconfig
@@ -263,6 +263,8 @@
 CONFIG_MSM_BOOT_STATS=y
 CONFIG_TRACER_PKT=y
 CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_PWM=y
 CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
index 4917c2f..e74ab0f 100644
--- a/arch/arm/include/asm/kvm_coproc.h
+++ b/arch/arm/include/asm/kvm_coproc.h
@@ -31,7 +31,8 @@
 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
 
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 464748b..ed23196 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -18,13 +18,18 @@
 };
 #endif
 
+struct mod_plt_sec {
+	struct elf32_shdr	*plt;
+	int			plt_count;
+};
+
 struct mod_arch_specific {
 #ifdef CONFIG_ARM_UNWIND
 	struct unwind_table *unwind[ARM_SEC_MAX];
 #endif
 #ifdef CONFIG_ARM_MODULE_PLTS
-	struct elf32_shdr   *plt;
-	int		    plt_count;
+	struct mod_plt_sec	core;
+	struct mod_plt_sec	init;
 #endif
 };
 
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index d060641..9edea10 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -24,6 +24,7 @@
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
+unsigned long arch_get_cpu_efficiency(int cpu);
 
 #ifdef CONFIG_CPU_FREQ
 #define arch_scale_freq_capacity cpufreq_scale_freq_capacity
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
index 3a5cba9..3d0c2e4 100644
--- a/arch/arm/kernel/module-plts.c
+++ b/arch/arm/kernel/module-plts.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -31,9 +31,17 @@
 	u32	lit[PLT_ENT_COUNT];
 };
 
+static bool in_init(const struct module *mod, unsigned long loc)
+{
+	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
+}
+
 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
 {
-	struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
+	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
+							  &mod->arch.init;
+
+	struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
 	int idx = 0;
 
 	/*
@@ -41,9 +49,9 @@
 	 * relocations are sorted, this will be the last entry we allocated.
 	 * (if one exists).
 	 */
-	if (mod->arch.plt_count > 0) {
-		plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
-		idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
+	if (pltsec->plt_count > 0) {
+		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
+		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
 
 		if (plt->lit[idx] == val)
 			return (u32)&plt->ldr[idx];
@@ -53,8 +61,8 @@
 			plt++;
 	}
 
-	mod->arch.plt_count++;
-	BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
+	pltsec->plt_count++;
+	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
 
 	if (!idx)
 		/* Populate a new set of entries */
@@ -129,7 +137,7 @@
 
 /* Count how many PLT entries we may need */
 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
-			       const Elf32_Rel *rel, int num)
+			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
 {
 	unsigned int ret = 0;
 	const Elf32_Sym *s;
@@ -144,13 +152,17 @@
 		case R_ARM_THM_JUMP24:
 			/*
 			 * We only have to consider branch targets that resolve
-			 * to undefined symbols. This is not simply a heuristic,
-			 * it is a fundamental limitation, since the PLT itself
-			 * is part of the module, and needs to be within range
-			 * as well, so modules can never grow beyond that limit.
+			 * to symbols that are defined in a different section.
+			 * This is not simply a heuristic, it is a fundamental
+			 * limitation, since there is no guaranteed way to emit
+			 * PLT entries sufficiently close to the branch if the
+			 * section size exceeds the range of a branch
+			 * instruction. So ignore relocations against defined
+			 * symbols if they live in the same section as the
+			 * relocation target.
 			 */
 			s = syms + ELF32_R_SYM(rel[i].r_info);
-			if (s->st_shndx != SHN_UNDEF)
+			if (s->st_shndx == dstidx)
 				break;
 
 			/*
@@ -161,7 +173,12 @@
 			 * So we need to support them, but there is no need to
 			 * take them into consideration when trying to optimize
 			 * this code. So let's only check for duplicates when
-			 * the addend is zero.
+			 * the addend is zero. (Note that calls into the core
+			 * module via init PLT entries could involve section
+			 * relative symbol references with non-zero addends, for
+			 * which we may end up emitting duplicates, but the init
+			 * PLT is released along with the rest of the .init
+			 * region as soon as module loading completes.)
 			 */
 			if (!is_zero_addend_relocation(base, rel + i) ||
 			    !duplicate_rel(base, rel, i))
@@ -174,7 +191,8 @@
 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
 			      char *secstrings, struct module *mod)
 {
-	unsigned long plts = 0;
+	unsigned long core_plts = 0;
+	unsigned long init_plts = 0;
 	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
 	Elf32_Sym *syms = NULL;
 
@@ -184,13 +202,15 @@
 	 */
 	for (s = sechdrs; s < sechdrs_end; ++s) {
 		if (strcmp(".plt", secstrings + s->sh_name) == 0)
-			mod->arch.plt = s;
+			mod->arch.core.plt = s;
+		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
+			mod->arch.init.plt = s;
 		else if (s->sh_type == SHT_SYMTAB)
 			syms = (Elf32_Sym *)s->sh_addr;
 	}
 
-	if (!mod->arch.plt) {
-		pr_err("%s: module PLT section missing\n", mod->name);
+	if (!mod->arch.core.plt || !mod->arch.init.plt) {
+		pr_err("%s: module PLT section(s) missing\n", mod->name);
 		return -ENOEXEC;
 	}
 	if (!syms) {
@@ -213,16 +233,29 @@
 		/* sort by type and symbol index */
 		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
 
-		plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
+		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
+			core_plts += count_plts(syms, dstsec->sh_addr, rels,
+						numrels, s->sh_info);
+		else
+			init_plts += count_plts(syms, dstsec->sh_addr, rels,
+						numrels, s->sh_info);
 	}
 
-	mod->arch.plt->sh_type = SHT_NOBITS;
-	mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
-	mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
-	mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
-					  sizeof(struct plt_entries));
-	mod->arch.plt_count = 0;
+	mod->arch.core.plt->sh_type = SHT_NOBITS;
+	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
+					       sizeof(struct plt_entries));
+	mod->arch.core.plt_count = 0;
 
-	pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
+	mod->arch.init.plt->sh_type = SHT_NOBITS;
+	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
+	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
+					       sizeof(struct plt_entries));
+	mod->arch.init.plt_count = 0;
+
+	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
+		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
 	return 0;
 }
diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
index 05881e2..eacb5c6 100644
--- a/arch/arm/kernel/module.lds
+++ b/arch/arm/kernel/module.lds
@@ -1,3 +1,4 @@
 SECTIONS {
 	.plt : { BYTE(0) }
+	.init.plt : { BYTE(0) }
 }
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index bd884da..2b6c530 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -196,6 +196,14 @@
 	return 0;
 }
 
+static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE;
+
+unsigned long arch_get_cpu_efficiency(int cpu)
+{
+	return per_cpu(cpu_efficiency, cpu);
+}
+EXPORT_SYMBOL(arch_get_cpu_efficiency);
+
 #ifdef CONFIG_OF
 struct cpu_efficiency {
 	const char *compatible;
@@ -272,6 +280,7 @@
 	for_each_possible_cpu(cpu) {
 		const u32 *rate;
 		int len;
+		u32 efficiency;
 
 		/* too early to use cpu->of_node */
 		cn = of_get_cpu_node(cpu, NULL);
@@ -280,12 +289,26 @@
 			continue;
 		}
 
-		for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
-			if (of_device_is_compatible(cn, cpu_eff->compatible))
-				break;
+		/*
+		 * The CPU efficiency value passed from the device tree
+		 * overrides the value defined in the table_efficiency[]
+		 */
+		if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) {
 
-		if (cpu_eff->compatible == NULL)
-			continue;
+			for (cpu_eff = table_efficiency;
+					cpu_eff->compatible; cpu_eff++)
+
+				if (of_device_is_compatible(cn,
+						cpu_eff->compatible))
+					break;
+
+			if (cpu_eff->compatible == NULL)
+				continue;
+
+			efficiency = cpu_eff->efficiency;
+		}
+
+		per_cpu(cpu_efficiency, cpu) = efficiency;
 
 		rate = of_get_property(cn, "clock-frequency", &len);
 		if (!rate || len != 4) {
@@ -294,7 +317,7 @@
 			continue;
 		}
 
-		capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+		capacity = ((be32_to_cpup(rate)) >> 20) * efficiency;
 
 		/* Save min capacity of the system */
 		if (capacity < min_capacity)
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3e5e419..c3ed6bd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -93,12 +93,6 @@
 	return 1;
 }
 
-int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-	kvm_inject_undefined(vcpu);
-	return 1;
-}
-
 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 {
 	/*
@@ -514,12 +508,7 @@
 	return 1;
 }
 
-/**
- * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run:  The kvm_run struct
- */
-int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
 {
 	struct coproc_params params;
 
@@ -533,9 +522,38 @@
 	params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
 	params.CRm = 0;
 
+	return params;
+}
+
+/**
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_64bit_hsr(vcpu);
+
 	return emulate_cp15(vcpu, &params);
 }
 
+/**
+ * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_64bit_hsr(vcpu);
+
+	/* raz_wi cp14 */
+	pm_fake(vcpu, &params, NULL);
+
+	/* handled */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+	return 1;
+}
+
 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
 			      const struct coproc_reg *table, size_t num)
 {
@@ -546,12 +564,7 @@
 			table[i].reset(vcpu, &table[i]);
 }
 
-/**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
- * @vcpu: The VCPU pointer
- * @run:  The kvm_run struct
- */
-int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
 {
 	struct coproc_params params;
 
@@ -565,9 +578,37 @@
 	params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
 	params.Rt2 = 0;
 
+	return params;
+}
+
+/**
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_32bit_hsr(vcpu);
 	return emulate_cp15(vcpu, &params);
 }
 
+/**
+ * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
+ * @vcpu: The VCPU pointer
+ * @run:  The kvm_run struct
+ */
+int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+	struct coproc_params params = decode_32bit_hsr(vcpu);
+
+	/* raz_wi cp14 */
+	pm_fake(vcpu, &params, NULL);
+
+	/* handled */
+	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+	return 1;
+}
+
 /******************************************************************************
  * Userspace API
  *****************************************************************************/
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 4e40d19..066b6d4 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -83,9 +83,9 @@
 	[HSR_EC_WFI]		= kvm_handle_wfx,
 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
-	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access,
+	[HSR_EC_CP14_MR]	= kvm_handle_cp14_32,
 	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store,
-	[HSR_EC_CP14_64]	= kvm_handle_cp14_access,
+	[HSR_EC_CP14_64]	= kvm_handle_cp14_64,
 	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access,
 	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id,
 	[HSR_EC_HVC]		= handle_hvc,
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
index 3023bb5..8679405 100644
--- a/arch/arm/kvm/hyp/Makefile
+++ b/arch/arm/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
+ccflags-y += -fno-stack-protector
+
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 92678b7..624a510 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -48,7 +48,9 @@
 	write_sysreg(HSTR_T(15), HSTR);
 	write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
 	val = read_sysreg(HDCR);
-	write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
+	val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
+	val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
+	write_sysreg(val, HDCR);
 }
 
 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 8dea616..5049777 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -147,10 +147,10 @@
 
 	@ Configure caches (if implemented)
 	teq     r8, #0
-	stmneia	r12, {r0-r6, lr}	@ v7m_invalidate_l1 touches r0-r6
+	stmneia	sp, {r0-r6, lr}		@ v7m_invalidate_l1 touches r0-r6
 	blne	v7m_invalidate_l1
 	teq     r8, #0			@ re-evalutae condition
-	ldmneia	r12, {r0-r6, lr}
+	ldmneia	sp, {r0-r6, lr}
 
 	@ Configure the System Control Register to ensure 8-byte stack alignment
 	@ Note the STKALIGN bit is either RW or RAO.
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 445aeb6..dae2f9f 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -128,13 +128,13 @@
 	  This enables support for the SDM845 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
-config ARCH_SDM830
-	bool "Enable Support for Qualcomm Technologies Inc. SDM830"
+config ARCH_SDM670
+	bool "Enable Support for Qualcomm Technologies Inc. SDM670"
 	depends on ARCH_QCOM
 	select COMMON_CLK_QCOM
 	select QCOM_GDSC
 	help
-	  This enables support for the SDM830 chipset. If you do not
+	  This enables support for the SDM670 chipset. If you do not
 	  wish to build a kernel that runs on this chipset, say 'N' here.
 
 config ARCH_ROCKCHIP
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
index 17839db..509a2ed 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
@@ -772,6 +772,7 @@
 			clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
+			reset-names = "reset";
 			bus-width = <0x8>;
 			vmmc-supply = <&ldo19>;
 			pinctrl-names = "default";
@@ -795,6 +796,7 @@
 			clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
+			reset-names = "reset";
 			vqmmc-supply = <&ldo7>;
 			vmmc-supply = <&ldo10>;
 			bus-width = <0x4>;
@@ -813,6 +815,7 @@
 			clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
 			clock-names = "ciu", "biu";
 			resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
+			reset-names = "reset";
 			bus-width = <0x4>;
 			broken-cd;
 			pinctrl-names = "default", "idle";
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index cd13516..7ad029a 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -23,10 +23,9 @@
 sdm845-mtp-overlay.dtbo-base := sdm845.dtb
 endif
 
-dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
-	sdm830-rumi.dtb \
-	sdm830-mtp.dtb \
-	sdm830-cdp.dtb
+dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \
+	sdm670-mtp.dtb \
+	sdm670-cdp.dtb
 
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index 6a3e8b4..0c2ae5f 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -20,6 +20,7 @@
 		reg = <0x5040000 0x10000>;
 		#iommu-cells = <1>;
 		qcom,dynamic;
+		qcom,use-3-lvl-tables;
 		#global-interrupts = <2>;
 		qcom,regulator-names = "vdd";
 		vdd-supply = <&gpu_cx_gdsc>;
@@ -62,7 +63,7 @@
 			<0x150c2000 0x20>;
 		reg-names = "base", "tcu-base";
 		#iommu-cells = <2>;
-		qcom,skip-init;
+		qcom,use-3-lvl-tables;
 		#global-interrupts = <1>;
 		#size-cells = <1>;
 		#address-cells = <1>;
@@ -328,9 +329,19 @@
 	apps_iommu_test_device {
 		compatible = "iommu-debug-test";
 		/*
-		 * This SID belongs to QUP1-GSI. We can't use a fake SID for
+		 * This SID belongs to TSIF. We can't use a fake SID for
 		 * the apps_smmu device.
 		 */
-		iommus = <&apps_smmu 0x16 0>;
+		iommus = <&apps_smmu 0x20 0>;
+	};
+
+	apps_iommu_coherent_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * This SID belongs to QUP1-DMA. We can't use a fake SID for
+		 * the apps_smmu device.
+		 */
+		iommus = <&apps_smmu 0x3 0>;
+		dma-coherent;
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 886e792..660dac5 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -211,7 +211,7 @@
 			compatible = "qcom,qpnp-pdphy";
 			reg = <0x1700 0x100>;
 			vdd-pdphy-supply = <&pm8998_l24>;
-			vbus-supply = <&smb2_vbus>;
+			vbus-supply = <&ext_5v_boost>;
 			vconn-supply = <&smb2_vconn>;
 			interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
 				     <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
@@ -270,8 +270,9 @@
 			io-channels = <&pmi8998_rradc 0>;
 			io-channel-names = "rradc_batt_id";
 			qcom,rradc-base = <0x4500>;
-			qcom,fg-esr-timer-awake = <96>;
-			qcom,fg-esr-timer-asleep = <256>;
+			qcom,fg-esr-timer-awake = <96 96>;
+			qcom,fg-esr-timer-asleep = <256 256>;
+			qcom,fg-esr-timer-charging = <0 96>;
 			qcom,cycle-counter-en;
 			status = "okay";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-cdp.dts
index 4b3fa93..7e5947b 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dts
@@ -10,14 +10,14 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 CDP";
+	compatible = "qcom,sdm670-cdp", "qcom,sdm670", "qcom,cdp";
+	qcom,board-id = <1 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
similarity index 78%
rename from arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
rename to arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
index c7bbef0..6ea92ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-cdp.dtsi"
-#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
new file mode 100644
index 0000000..61ef7ff
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670-ion.dtsi
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@22 { /* ADSP HEAP */
+			reg = <22>;
+			memory-region = <&adsp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@13 { /* SPSS HEAP */
+			reg = <13>;
+			memory-region = <&sp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+			reg = <10>;
+			memory-region = <&secure_display_memory>;
+			qcom,ion-heap-type = "HYP_CMA";
+		};
+
+		qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
similarity index 70%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-mtp.dts
index 4b3fa93..1de40b7 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dts
@@ -10,14 +10,14 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
+
+/dts-v1/;
+
+#include "sdm670.dtsi"
+#include "sdm670-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670 MTP";
+	compatible = "qcom,sdm670-mtp", "qcom,sdm670", "qcom,mtp";
+	qcom,board-id = <8 0>;
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
similarity index 78%
copy from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
index b2d607d..6ea92ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
similarity index 94%
rename from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
rename to arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 4b3fa93..09ce9d2 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -12,7 +12,7 @@
 
 &soc {
 	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
+		compatible = "qcom,sdm670-pinctrl";
 		reg = <0x03400000 0xc00000>;
 		interrupts = <0 208 0>;
 		gpio-controller;
diff --git a/arch/arm64/boot/dts/qcom/sdm830-rumi.dts b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
similarity index 65%
rename from arch/arm64/boot/dts/qcom/sdm830-rumi.dts
rename to arch/arm64/boot/dts/qcom/sdm670-rumi.dts
index 2485051..6201488 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dts
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,12 +14,16 @@
 /dts-v1/;
 /memreserve/ 0x90000000 0x00000100;
 
-#include "sdm830.dtsi"
-#include "sdm830-rumi.dtsi"
-
+#include "sdm670.dtsi"
+#include "sdm670-rumi.dtsi"
 / {
-	model = "Qualcomm Technologies, Inc. SDM830 RUMI";
-	compatible = "qcom,sdm830-rumi", "qcom,sdm830", "qcom,rumi";
+	model = "Qualcomm Technologies, Inc. SDM670 RUMI";
+	compatible = "qcom,sdm670-rumi", "qcom,sdm670", "qcom,rumi";
 	qcom,board-id = <15 0>;
 };
 
+&soc {
+	wdog: qcom,wdt@17980000{
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
similarity index 77%
copy from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
copy to arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
index b2d607d..6ea92ee 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
new file mode 100644
index 0000000..5969c1d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -0,0 +1,656 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM670";
+	compatible = "qcom,sdm670";
+	qcom,msm-id = <336 0x0>;
+	interrupt-parent = <&intc>;
+
+	aliases { };
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+				L3_0: l3-cache {
+					compatible = "arm,arch-cache";
+					cache-size = <0x100000>;
+					cache-level = <3>;
+				};
+			};
+			L1_I_0: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_0: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU1: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_100>;
+			L2_100: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_100: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_100: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU2: cpu@200 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_200>;
+			L2_200: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_200: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_200: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU3: cpu@300 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_300>;
+			L2_300: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_300: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_300: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU4: cpu@400 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x400>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_400>;
+			L2_400: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_400: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_400: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU5: cpu@500 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x500>;
+			enable-method = "psci";
+			efficiency = <1024>;
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_500>;
+			L2_500: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x20000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_500: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+			L1_D_500: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x9000>;
+			};
+		};
+
+		CPU6: cpu@600 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x600>;
+			enable-method = "psci";
+			efficiency = <1740>;
+			cache-size = <0x10000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_600>;
+			L2_600: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x40000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_600: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_600: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+		};
+
+		CPU7: cpu@700 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x700>;
+			enable-method = "psci";
+			efficiency = <1740>;
+			cache-size = <0x10000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_700>;
+			L2_700: l2-cache {
+				compatible = "arm,arch-cache";
+				cache-size = <0x40000>;
+				cache-level = <2>;
+				next-level-cache = <&L3_0>;
+			};
+			L1_I_700: l1-icache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+			L1_D_700: l1-dcache {
+				compatible = "arm,arch-cache";
+				qcom,dump-size = <0x12000>;
+			};
+		};
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+
+				core1 {
+					cpu = <&CPU1>;
+				};
+
+				core2 {
+					cpu = <&CPU2>;
+				};
+
+				core3 {
+					cpu = <&CPU3>;
+				};
+
+				core4 {
+					cpu = <&CPU4>;
+				};
+
+				core5 {
+					cpu = <&CPU5>;
+				};
+			};
+			cluster1 {
+				core0 {
+					cpu = <&CPU6>;
+				};
+
+				core1 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+	};
+
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+
+	soc: soc { };
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		removed_regions: removed_regions@85700000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x85700000 0 0x3800000>;
+		};
+
+		pil_camera_mem: camera_region@8ab00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8ab00000 0 0x500000>;
+		};
+
+		pil_modem_mem: modem_region@8b000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8b000000 0 0x7e00000>;
+		};
+
+		pil_video_mem: pil_video_region@92e00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x92e00000 0 0x500000>;
+		};
+
+		pil_cdsp_mem: cdsp_regions@93300000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93300000 0 0x600000>;
+		};
+
+		pil_mba_mem: pil_mba_region@0x93900000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93900000 0 0x200000>;
+		};
+
+		pil_adsp_mem: pil_adsp_region@93b00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x93b00000 0 0x1e00000>;
+		};
+
+		pil_ipa_fw_mem: pil_ipa_fw_region@95900000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95900000 0 0x10000>;
+		};
+
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95910000 0 0x5000>;
+		};
+
+		pil_gpu_mem: pil_gpu_region@95915000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95915000 0 0x1000>;
+		};
+
+		adsp_mem: adsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0xc00000>;
+		};
+
+		qseecom_mem: qseecom_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1400000>;
+		};
+
+		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+
+		secure_display_memory: secure_display_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x5c00000>;
+		};
+
+		/* global autoconfigured region for contiguous allocations */
+		linux,cma {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x2000000>;
+			linux,cma-default;
+		};
+	};
+};
+
+#include "sdm670-ion.dtsi"
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges = <0 0 0 0xffffffff>;
+	compatible = "simple-bus";
+
+	intc: interrupt-controller@17a00000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		#redistributor-regions = <1>;
+		redistributor-stride = <0x0 0x20000>;
+		reg = <0x17a00000 0x10000>,     /* GICD */
+		      <0x17a60000 0x100000>;    /* GICR * 8 */
+		interrupts = <1 9 4>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 1 0xf08>,
+			     <1 2 0xf08>,
+			     <1 3 0xf08>,
+			     <1 0 0xf08>;
+		clock-frequency = <19200000>;
+	};
+
+	qcom,sps {
+		compatible = "qcom,msm_sps_4k";
+		qcom,pipe-attr-ee;
+	};
+
+	timer@0x17c90000{
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "arm,armv7-timer-mem";
+		reg = <0x17c90000 0x1000>;
+		clock-frequency = <19200000>;
+
+		frame@0x17ca0000 {
+			frame-number = <0>;
+			interrupts = <0 7 0x4>,
+				     <0 6 0x4>;
+			reg = <0x17ca0000 0x1000>,
+			      <0x17cb0000 0x1000>;
+		};
+
+		frame@17cc0000 {
+			frame-number = <1>;
+			interrupts = <0 8 0x4>;
+			reg = <0x17cc0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17cd0000 {
+			frame-number = <2>;
+			interrupts = <0 9 0x4>;
+			reg = <0x17cd0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17ce0000 {
+			frame-number = <3>;
+			interrupts = <0 10 0x4>;
+			reg = <0x17ce0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17cf0000 {
+			frame-number = <4>;
+			interrupts = <0 11 0x4>;
+			reg = <0x17cf0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17d00000 {
+			frame-number = <5>;
+			interrupts = <0 12 0x4>;
+			reg = <0x17d00000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17d10000 {
+			frame-number = <6>;
+			interrupts = <0 13 0x4>;
+			reg = <0x17d10000 0x1000>;
+			status = "disabled";
+		};
+	};
+
+	restart@10ac000 {
+		compatible = "qcom,pshold";
+		reg = <0xC264000 0x4>,
+		      <0x1fd3000 0x4>;
+		reg-names = "pshold-base", "tcsr-boot-misc-detect";
+	};
+
+	clock_cpucc: qcom,cpucc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "cpucc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	wdog: qcom,wdt@17980000{
+		compatible = "qcom,msm-watchdog";
+		reg = <0x17980000 0x1000>;
+		reg-names = "wdt-base";
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping;
+		qcom,wakeup-enable;
+	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,rtb-size = <0x100000>;
+	};
+
+	qcom,msm-imem@146bf000 {
+		compatible = "qcom,msm-imem";
+		reg = <0x146bf000 0x1000>;
+		ranges = <0x0 0x146bf000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mem_dump_table@10 {
+			compatible = "qcom,msm-imem-mem_dump_table";
+			reg = <0x10 8>;
+		};
+
+		restart_reason@65c {
+			compatible = "qcom,msm-imem-restart_reason";
+			reg = <0x65c 4>;
+		};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+
+		kaslr_offset@6d0 {
+			compatible = "qcom,msm-imem-kaslr_offset";
+			reg = <0x6d0 12>;
+		};
+	};
+
+	cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,l1_i_cache0 {
+			qcom,dump-node = <&L1_I_0>;
+			qcom,dump-id = <0x60>;
+		};
+		qcom,l1_i_cache1 {
+			qcom,dump-node = <&L1_I_100>;
+			qcom,dump-id = <0x61>;
+		};
+		qcom,l1_i_cache2 {
+			qcom,dump-node = <&L1_I_200>;
+			qcom,dump-id = <0x62>;
+		};
+		qcom,l1_i_cache3 {
+			qcom,dump-node = <&L1_I_300>;
+			qcom,dump-id = <0x63>;
+		};
+		qcom,l1_i_cache100 {
+			qcom,dump-node = <&L1_I_400>;
+			qcom,dump-id = <0x64>;
+		};
+		qcom,l1_i_cache101 {
+			qcom,dump-node = <&L1_I_500>;
+			qcom,dump-id = <0x65>;
+		};
+		qcom,l1_i_cache102 {
+			qcom,dump-node = <&L1_I_600>;
+			qcom,dump-id = <0x66>;
+		};
+		qcom,l1_i_cache103 {
+			qcom,dump-node = <&L1_I_700>;
+			qcom,dump-id = <0x67>;
+		};
+		qcom,l1_d_cache0 {
+			qcom,dump-node = <&L1_D_0>;
+			qcom,dump-id = <0x80>;
+		};
+		qcom,l1_d_cache1 {
+			qcom,dump-node = <&L1_D_100>;
+			qcom,dump-id = <0x81>;
+		};
+		qcom,l1_d_cache2 {
+			qcom,dump-node = <&L1_D_200>;
+			qcom,dump-id = <0x82>;
+		};
+		qcom,l1_d_cache3 {
+			qcom,dump-node = <&L1_D_300>;
+			qcom,dump-id = <0x83>;
+		};
+		qcom,l1_d_cache100 {
+			qcom,dump-node = <&L1_D_400>;
+			qcom,dump-id = <0x84>;
+		};
+		qcom,l1_d_cache101 {
+			qcom,dump-node = <&L1_D_500>;
+			qcom,dump-id = <0x85>;
+		};
+		qcom,l1_d_cache102 {
+			qcom,dump-node = <&L1_D_600>;
+			qcom,dump-id = <0x86>;
+		};
+		qcom,l1_d_cache103 {
+			qcom,dump-node = <&L1_D_700>;
+			qcom,dump-id = <0x87>;
+		};
+	};
+
+	kryo3xx-erp {
+		compatible = "arm,arm64-kryo3xx-cpu-erp";
+		interrupts = <1 6 4>,
+			     <1 7 4>,
+			     <0 34 4>,
+			     <0 35 4>;
+
+		interrupt-names = "l1-l2-faultirq",
+				  "l1-l2-errirq",
+				  "l3-scu-errirq",
+				  "l3-scu-faultirq";
+	};
+
+	qcom,chd_sliver {
+		compatible = "qcom,core-hang-detect";
+		label = "silver";
+		qcom,threshold-arr = <0x17e00058 0x17e10058 0x17e20058
+					0x17e30058 0x17e40058 0x17e50058>;
+		qcom,config-arr = <0x17e00060 0x17e10060 0x17e20060
+					0x17e30060 0x17e40060 0x17e50060>;
+	};
+
+	qcom,chd_gold {
+		compatible = "qcom,core-hang-detect";
+		label = "gold";
+		qcom,threshold-arr = <0x17e60058 0x17e70058>;
+		qcom,config-arr = <0x17e60060 0x17e70060>;
+	};
+
+	qcom,ghd {
+		compatible = "qcom,gladiator-hang-detect-v2";
+		qcom,threshold-arr = <0x1799041c 0x17990420>;
+		qcom,config-reg = <0x17990434>;
+	};
+
+	qcom,msm-gladiator-v3@17900000 {
+		compatible = "qcom,msm-gladiator-v3";
+		reg = <0x17900000 0xd080>;
+		reg-names = "gladiator_base";
+		interrupts = <0 17 0>;
+	};
+
+	dcc: dcc_v2@10a2000 {
+		compatible = "qcom,dcc_v2";
+		reg = <0x10a2000 0x1000>,
+		      <0x10ae000 0x2000>;
+		reg-names = "dcc-base", "dcc-ram-base";
+	};
+
+};
+
+#include "sdm670-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sdm830-cdp.dts b/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
deleted file mode 100644
index dab4a9d..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-cdp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "sdm830.dtsi"
-#include "sdm830-cdp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 CDP";
-	compatible = "qcom,sdm830-cdp", "qcom,sdm830", "qcom,cdp";
-	qcom,board-id = <1 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dts b/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
deleted file mode 100644
index 5da16e6..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dts
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-
-#include "sdm830.dtsi"
-#include "sdm830-mtp.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM bat v1 MTP";
-	compatible = "qcom,sdm830-mtp", "qcom,sdm830", "qcom,mtp";
-	qcom,board-id = <8 0>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
deleted file mode 100644
index 2bc5f3f..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-rumi.dtsi
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * As a general rule, only version-specific property overrides should be placed
- * inside this file. Common device definitions should be placed inside the
- * sdm845-rumi.dtsi file.
- */
-
- #include "sdm845-rumi.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/sdm830-sim.dts b/arch/arm64/boot/dts/qcom/sdm830-sim.dts
deleted file mode 100644
index 57cd155..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-sim.dts
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-/dts-v1/;
-/memreserve/ 0x90000000 0x00000100;
-
-#include "sdm830.dtsi"
-#include "sdm830-sim.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM830 SIM";
-	compatible = "qcom,sdm830-sim", "qcom,sdm830", "qcom,sim";
-	qcom,board-id = <16 0>;
-};
-
diff --git a/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
deleted file mode 100644
index 85e8075..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830-sim.dtsi
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * As a general rule, only version-specific property overrides should be placed
- * inside this file. Common device definitions should be placed inside the
- * sdm845-sim.dtsi file.
- */
-
- #include "sdm845-sim.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/sdm830.dtsi b/arch/arm64/boot/dts/qcom/sdm830.dtsi
deleted file mode 100644
index 81ae913..0000000
--- a/arch/arm64/boot/dts/qcom/sdm830.dtsi
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-/*
- * As a general rule, only version-specific property overrides should be placed
- * inside this file. Common device definitions should be placed inside the
- * sdm845.dtsi file.
- */
-
- #include "sdm845.dtsi"
-
-/ {
-	model = "Qualcomm Technologies, Inc. SDM830";
-	compatible = "qcom,sdm830";
-	qcom,msm-id = <328 0x0>;
-
-};
-
-&soc {
-	qcom,llcc@1300000 {
-		status = "disabled";
-	};
-
-	qcom,spss@1880000 {
-		status = "disabled";
-	};
-
-	qcom,glink-mailbox-xprt-spss@1885008 {
-		status = "disabled";
-	};
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
index 122299c..4b7a680 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts
@@ -22,12 +22,12 @@
 	qcom,board-id = <1 1>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb>;
 };
 
 &dsi_sharp_4k_dsc_video {
@@ -35,7 +35,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
index 55e615c..fcf6ad1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts
@@ -22,12 +22,12 @@
 	qcom,board-id = <8 1>;
 };
 
-&dsi_dual_nt35597_truly_video_display {
+&dsi_dual_nt35597_truly_cmd_display {
 	/delete-property/ qcom,dsi-display-active;
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb>;
 };
 
 &dsi_sharp_4k_dsc_video {
@@ -35,7 +35,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index fcc09a0..709c89d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -78,6 +78,9 @@
 		qcom,hph-en0-gpio = <&tavil_hph_en0>;
 		qcom,hph-en1-gpio = <&tavil_hph_en1>;
 		qcom,tavil-mclk-clk-freq = <9600000>;
+
+		qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>;
+
 		asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
 				<&loopback>, <&compress>, <&hostless>,
 				<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -136,6 +139,18 @@
 				<&wsa881x_0213>, <&wsa881x_0214>;
 		qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
 					  "SpkrLeft", "SpkrRight";
+
+		qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+	};
+
+	wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 {
+		compatible = "qcom,msm-cdc-pinctrl";
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
 	};
 
 	wcd9xxx_intc: wcd9xxx-irq {
@@ -169,6 +184,14 @@
 	qocm,wcd-dsp-glink {
 		compatible = "qcom,wcd-dsp-glink";
 	};
+
+	qcom,wcd-dsp-mgr {
+		compatible = "qcom,wcd-dsp-mgr";
+		qcom,wdsp-components = <&wcd934x_cdc 0>,
+				       <&wcd_spi_0 1>,
+				       <&glink_spi_xprt_wdsp 2>;
+		qcom,img-filename = "cpe_9340";
+	};
 };
 
 &slim_aud {
@@ -229,5 +252,13 @@
 		qcom,cdc-mad-dmic-rate = <600000>;
 
 		qcom,wdsp-cmpnt-dev-name = "tavil_codec";
+
+		wcd_spi_0: wcd_spi {
+			compatible = "qcom,wcd-spi-v2";
+			qcom,master-bus-num = <0>;
+			qcom,chip-select = <0>;
+			qcom,max-frequency = <9600000>;
+			qcom,mem-base-addr = <0x100000>;
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
index 1702e80..e26f888 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi
@@ -25,12 +25,14 @@
 			<0x1380000 0x40000>,
 			<0x1740000 0x40000>,
 			<0x1620000 0x40000>,
+			<0x1620000 0x40000>,
 			<0x1620000 0x40000>;
 
 		reg-names = "aggre1_noc-base", "aggre2_noc-base",
 			"config_noc-base", "dc_noc-base",
 			"gladiator_noc-base", "mc_virt-base", "mem_noc-base",
-			"mmss_noc-base", "system_noc-base", "ipa_virt-base";
+			"mmss_noc-base", "system_noc-base", "ipa_virt-base",
+			"camnoc_virt-base";
 
 		mbox-names = "apps_rsc", "disp_rsc";
 		mboxes = <&apps_rsc 0 &disp_rsc 0>;
@@ -368,6 +370,15 @@
 			clocks = <>;
 		};
 
+		fab_camnoc_virt: fab-camnoc_virt {
+			cell-id = <MSM_BUS_FAB_CAMNOC_VIRT>;
+			label = "fab-camnoc_virt";
+			qcom,fab-dev;
+			qcom,base-name = "camnoc_virt-base";
+			qcom,bypass-qos-prg;
+			clocks = <>;
+		};
+
 		fab_config_noc: fab-config_noc {
 			cell-id = <MSM_BUS_FAB_CONFIG_NOC>;
 			label = "fab-config_noc";
@@ -654,6 +665,33 @@
 			qcom,bus-dev = <&fab_aggre2_noc>;
 		};
 
+		mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP>;
+			label = "mas-qxm-camnoc-hf0-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
+		mas_qxm_camnoc_hf1_uncomp: mas-qxm-camnoc-hf1-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP>;
+			label = "mas-qxm-camnoc-hf1-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
+		mas_qxm_camnoc_sf_uncomp: mas-qxm-camnoc-sf-uncomp {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_SF_UNCOMP>;
+			label = "mas-qxm-camnoc-sf-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,connections = <&slv_qns_camnoc_uncomp>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+		};
+
 		mas_qhm_spdm: mas-qhm-spdm {
 			cell-id = <MSM_BUS_MASTER_SPDM>;
 			label = "mas-qhm-spdm";
@@ -900,12 +938,23 @@
 			qcom,bus-dev = <&fab_mmss_noc>;
 		};
 
-		mas_qxm_camnoc_hf: mas-qxm-camnoc-hf {
-			cell-id = <MSM_BUS_MASTER_CAMNOC_HF>;
-			label = "mas-qxm-camnoc-hf";
+		mas_qxm_camnoc_hf0: mas-qxm-camnoc-hf0 {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF0>;
+			label = "mas-qxm-camnoc-hf0";
 			qcom,buswidth = <32>;
-			qcom,agg-ports = <2>;
-			qcom,qport = <1 2>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <1>;
+			qcom,connections = <&slv_qns_mem_noc_hf>;
+			qcom,bus-dev = <&fab_mmss_noc>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
+		mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 {
+			cell-id = <MSM_BUS_MASTER_CAMNOC_HF1>;
+			label = "mas-qxm-camnoc-hf1";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,qport = <2>;
 			qcom,connections = <&slv_qns_mem_noc_hf>;
 			qcom,bus-dev = <&fab_mmss_noc>;
 			qcom,bcms = <&bcm_mm1>;
@@ -1073,6 +1122,15 @@
 			qcom,bcms = <&bcm_sn4>;
 		};
 
+		mas_alc: mas-alc {
+			cell-id = <MSM_BUS_MASTER_ALC>;
+			label = "mas-alc";
+			qcom,buswidth = <1>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_mc_virt>;
+			qcom,bcms = <&bcm_alc>;
+		};
+
 		mas_llcc_mc_display: mas-llcc-mc_display {
 			cell-id = <MSM_BUS_MASTER_LLCC_DISPLAY>;
 			label = "mas-llcc-mc_display";
@@ -1184,6 +1242,15 @@
 			qcom,bcms = <&bcm_sn11>;
 		};
 
+		slv_qns_camnoc_uncomp:slv-qns-camnoc-uncomp {
+			cell-id = <MSM_BUS_SLAVE_CAMNOC_UNCOMP>;
+			label = "slv-qns-camnoc-uncomp";
+			qcom,buswidth = <32>;
+			qcom,agg-ports = <1>;
+			qcom,bus-dev = <&fab_camnoc_virt>;
+			qcom,bcms = <&bcm_mm1>;
+		};
+
 		slv_qhs_a1_noc_cfg:slv-qhs-a1-noc-cfg {
 			cell-id = <MSM_BUS_SLAVE_A1NOC_CFG>;
 			label = "slv-qhs-a1-noc-cfg";
@@ -1635,7 +1702,7 @@
 			qcom,buswidth = <4>;
 			qcom,agg-ports = <4>;
 			qcom,bus-dev = <&fab_mc_virt>;
-			qcom,bcms = <&bcm_mc0>;
+			qcom,bcms = <&bcm_mc0>, <&bcm_acv>;
 		};
 
 		slv_qhs_mdsp_ms_mpu_cfg:slv-qhs-mdsp-ms-mpu-cfg {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
index 922e990..a715025 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-cdp.dtsi
@@ -73,7 +73,7 @@
 	};
 };
 
-&cci {
+&cam_cci {
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
index 922e990..a715025 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-mtp.dtsi
@@ -73,7 +73,7 @@
 	};
 };
 
-&cci {
+&cam_cci {
 	actuator_rear: qcom,actuator@0 {
 		cell-index = <0>;
 		reg = <0x0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
index 4c642e3..91b8738 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera.dtsi
@@ -17,7 +17,7 @@
 		status = "ok";
 	};
 
-	qcom,csiphy@ac65000 {
+	cam_csiphy0: qcom,csiphy@ac65000 {
 		cell-index = <0>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0x0ac65000 0x1000>;
@@ -53,7 +53,7 @@
 		status = "ok";
 	};
 
-	qcom,csiphy@ac66000{
+	cam_csiphy1: qcom,csiphy@ac66000{
 		cell-index = <1>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0xac66000 0x1000>;
@@ -90,7 +90,7 @@
 		status = "ok";
 	};
 
-	qcom,csiphy@ac67000 {
+	cam_csiphy2: qcom,csiphy@ac67000 {
 		cell-index = <2>;
 		compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
 		reg = <0xac67000 0x1000>;
@@ -126,7 +126,7 @@
 		status = "ok";
 	};
 
-	cci: qcom,cci@ac4a000 {
+	cam_cci: qcom,cci@ac4a000 {
 		cell-index = <0>;
 		compatible = "qcom,cci";
 		reg = <0xac4a000 0x4000>;
@@ -343,17 +343,17 @@
 		clock-names = "gcc_ahb_clk",
 			"gcc_axi_clk",
 			"soc_ahb_clk",
-			"cpas_ahb_clk",
 			"slow_ahb_clk_src",
+			"cpas_ahb_clk",
 			"camnoc_axi_clk";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
 			<&clock_gcc GCC_CAMERA_AXI_CLK>,
 			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
-			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		src-clock-name = "slow_ahb_clk_src";
-		clock-rates = <0 0 0 0 80000000 0>;
+		clock-rates = <0 0 0 80000000 0 0>;
 		qcom,msm-bus,name = "cam_ahb";
 		qcom,msm-bus,num-cases = <4>;
 		qcom,msm-bus,num-paths = <1>;
@@ -366,6 +366,21 @@
 			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
 			<MSM_BUS_MASTER_AMPSS_M0
 			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		vdd-corners = <RPMH_REGULATOR_LEVEL_OFF
+			RPMH_REGULATOR_LEVEL_RETENTION
+			RPMH_REGULATOR_LEVEL_MIN_SVS
+			RPMH_REGULATOR_LEVEL_LOW_SVS
+			RPMH_REGULATOR_LEVEL_SVS
+			RPMH_REGULATOR_LEVEL_SVS_L1
+			RPMH_REGULATOR_LEVEL_NOM
+			RPMH_REGULATOR_LEVEL_NOM_L1
+			RPMH_REGULATOR_LEVEL_NOM_L2
+			RPMH_REGULATOR_LEVEL_TURBO
+			RPMH_REGULATOR_LEVEL_TURBO_L1>;
+		vdd-corner-ahb-mapping = "suspend", "suspend",
+			"svs", "svs", "svs", "svs",
+			"nominal", "nominal", "nominal",
+			"turbo", "turbo";
 		client-id-based;
 		client-names =
 			"csiphy0", "csiphy1", "csiphy2", "cci0",
@@ -389,10 +404,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF0
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF0
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
 				};
 				qcom,axi-port-camnoc {
 					qcom,msm-bus,name = "cam_hf_1_camnoc";
@@ -400,10 +415,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
 				};
 			};
 			qcom,axi-port2 {
@@ -414,21 +429,21 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF1
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF1
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
 				};
 				qcom,axi-port-camnoc {
-					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus,name = "cam_hf_2_camnoc";
 					qcom,msm-bus-vector-dyn-vote;
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_HF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
 				};
 			};
 			qcom,axi-port3 {
@@ -439,10 +454,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_SF
+					MSM_BUS_SLAVE_EBI_CH0 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_SF
+					MSM_BUS_SLAVE_EBI_CH0 0 0>;
 				};
 				qcom,axi-port-camnoc {
 					qcom,msm-bus,name = "cam_sf_1_camnoc";
@@ -450,10 +465,10 @@
 					qcom,msm-bus,num-cases = <2>;
 					qcom,msm-bus,num-paths = <1>;
 					qcom,msm-bus,vectors-KBps =
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>,
-						<MSM_BUS_MASTER_CAMNOC_SF
-						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+					<MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>,
+					<MSM_BUS_MASTER_CAMNOC_SF_UNCOMP
+					MSM_BUS_SLAVE_CAMNOC_UNCOMP 0 0>;
 				};
 			};
 		};
@@ -464,7 +479,7 @@
 		cell-index = <0>;
 		label = "cam-cdm-intf";
 		num-hw-cdm = <1>;
-		cdm-client-names = "ife",
+		cdm-client-names = "vfe",
 			"jpeg-dma",
 			"jpeg",
 			"fd";
@@ -493,7 +508,7 @@
 			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
 		clock-rates = <0 0 0 0 0>;
-		cdm-client-names = "vfe";
+		cdm-client-names = "ife";
 		status = "ok";
 	};
 
@@ -503,7 +518,7 @@
 		status = "ok";
 	};
 
-	qcom,csid0@acb3000 {
+	cam_csid0: qcom,csid0@acb3000 {
 		cell-index = <0>;
 		compatible = "qcom,csid170";
 		reg-names = "csid";
@@ -545,7 +560,7 @@
 		status = "ok";
 	};
 
-	qcom,vfe0@acaf000 {
+	cam_vfe0: qcom,vfe0@acaf000 {
 		cell-index = <0>;
 		compatible = "qcom,vfe170";
 		reg-names = "ife";
@@ -582,7 +597,7 @@
 		status = "ok";
 	};
 
-	qcom,csid1@acba000 {
+	cam_csid1: qcom,csid1@acba000 {
 		cell-index = <1>;
 		compatible = "qcom,csid170";
 		reg-names = "csid";
@@ -624,7 +639,7 @@
 		status = "ok";
 	};
 
-	qcom,vfe1@acb6000 {
+	cam_vfe1: qcom,vfe1@acb6000 {
 		cell-index = <1>;
 		compatible = "qcom,vfe170";
 		reg-names = "ife";
@@ -661,7 +676,7 @@
 		status = "ok";
 	};
 
-	qcom,csid-lite@acc8000 {
+	cam_csid_lite: qcom,csid-lite@acc8000 {
 		cell-index = <2>;
 		compatible = "qcom,csid-lite170";
 		reg-names = "csid-lite";
@@ -700,7 +715,7 @@
 		status = "ok";
 	};
 
-	qcom,vfe-lite@acc4000 {
+	cam_vfe_lite: qcom,vfe-lite@acc4000 {
 		cell-index = <2>;
 		compatible = "qcom,vfe-lite170";
 		reg-names = "ife-lite";
@@ -743,7 +758,7 @@
 		status = "ok";
 	};
 
-	qcom,a5@ac00000 {
+	cam_a5: qcom,a5@ac00000 {
 		cell-index = <0>;
 		compatible = "qcom,cam_a5";
 		reg = <0xac00000 0x6000>,
@@ -757,33 +772,29 @@
 		camss-vdd-supply = <&titan_top_gdsc>;
 		clock-names = "gcc_cam_ahb_clk",
 			"gcc_cam_axi_clk",
+			"soc_fast_ahb",
 			"soc_ahb_clk",
 			"cpas_ahb_clk",
 			"camnoc_axi_clk",
 			"icp_apb_clk",
-			"icp_atb_clk",
 			"icp_clk",
-			"icp_clk_src",
-			"icp_cti_clk",
-			"icp_ts_clk";
+			"icp_clk_src";
 		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
 				<&clock_gcc GCC_CAMERA_AXI_CLK>,
+				<&clock_camcc CAM_CC_FAST_AHB_CLK_SRC>,
 				<&clock_camcc CAM_CC_SOC_AHB_CLK>,
 				<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
 				<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
 				<&clock_camcc CAM_CC_ICP_APB_CLK>,
-				<&clock_camcc CAM_CC_ICP_ATB_CLK>,
 				<&clock_camcc CAM_CC_ICP_CLK>,
-				<&clock_camcc CAM_CC_ICP_CLK_SRC>,
-				<&clock_camcc CAM_CC_ICP_CTI_CLK>,
-				<&clock_camcc CAM_CC_ICP_TS_CLK>;
+				<&clock_camcc CAM_CC_ICP_CLK_SRC>;
 
-		clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+		clock-rates = <0 0 400000000 0 0 0 0 0 600000000>;
 		fw_name = "CAMERA_ICP.elf";
 		status = "ok";
 	};
 
-	qcom,ipe0 {
+	cam_ipe0: qcom,ipe0 {
 		cell-index = <0>;
 		compatible = "qcom,cam_ipe";
 		regulator-names = "ipe0-vdd";
@@ -799,11 +810,11 @@
 				<&clock_camcc CAM_CC_IPE_0_CLK>,
 				<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+		clock-rates = <0 0 0 0 600000000>;
 		status = "ok";
 	};
 
-	qcom,ipe1 {
+	cam_ipe1: qcom,ipe1 {
 		cell-index = <1>;
 		compatible = "qcom,cam_ipe";
 		regulator-names = "ipe1-vdd";
@@ -819,11 +830,11 @@
 				<&clock_camcc CAM_CC_IPE_1_CLK>,
 				<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+		clock-rates = <0 0 0 0 600000000>;
 		status = "ok";
 	};
 
-	qcom,bps {
+	cam_bps: qcom,bps {
 		cell-index = <0>;
 		compatible = "qcom,cam_bps";
 		regulator-names = "bps-vdd";
@@ -839,7 +850,7 @@
 				<&clock_camcc CAM_CC_BPS_CLK>,
 				<&clock_camcc CAM_CC_BPS_CLK_SRC>;
 
-		clock-rates = <80000000 400000000 0 0 600000000>;
+		clock-rates = <0 0 0 0 600000000>;
 		status = "ok";
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
index fff9160..ef964ae 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts
@@ -13,8 +13,13 @@
 /dts-v1/;
 /plugin/;
 
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "sdm845-cdp.dtsi"
+#include "sdm845-qupv3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v1 CDP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 1fdf740..8ace432 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -155,6 +155,8 @@
 	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -237,7 +239,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
@@ -247,7 +249,29 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
@@ -291,6 +315,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
@@ -504,3 +546,7 @@
 &wil6210 {
 	status = "ok";
 };
+
+&ext_5v_boost {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index 97573ea..e32ec6e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -780,7 +780,8 @@
 	};
 
 	tpdm_lpass: tpdm@6844000 {
-		compatible = "qcom,coresight-tpdm";
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b968>;
 		reg = <0x6844000 0x1000>;
 		reg-names = "tpdm-base";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index 1b3f2a6..1ce68e1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -65,7 +65,7 @@
 
 		qcom,gpu-quirk-hfi-use-reg;
 
-		qcom,idle-timeout = <100000000>; //msecs
+		qcom,idle-timeout = <80>; //msecs
 		qcom,no-nap;
 
 		qcom,highest-bank-bit = <15>;
@@ -219,6 +219,7 @@
 
 		qcom,secure_align_mask = <0xfff>;
 		qcom,global_pt;
+		qcom,hyp_secure_alloc;
 
 		gfx3d_user: gfx3d_user {
 			compatible = "qcom,smmu-kgsl-cb";
@@ -237,7 +238,7 @@
 		label = "kgsl-gmu";
 		compatible = "qcom,gpu-gmu";
 
-		reg = <0x506a000 0x26000>, <0xb200000 0x300000>;
+		reg = <0x506a000 0x30000>, <0xb200000 0x300000>;
 		reg-names = "kgsl_gmu_reg", "kgsl_gmu_pdc_reg";
 
 		interrupts = <0 304 0>, <0 305 0>;
@@ -277,7 +278,7 @@
 
 			qcom,gmu-pwrlevel@1 {
 				reg = <1>;
-				qcom,gmu-freq = <19200000>;
+				qcom,gmu-freq = <200000000>;
 			};
 
 			qcom,gmu-pwrlevel@2 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
index 79fa580..548bd49 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts
@@ -13,8 +13,13 @@
 /dts-v1/;
 /plugin/;
 
-#include "sdm845.dtsi"
+#include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
 #include "sdm845-mtp.dtsi"
+#include "sdm845-qupv3.dtsi"
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 v1 MTP";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 508b645..ab266ef 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -89,7 +89,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
 };
@@ -99,9 +99,31 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
+};
+
+&dsi_nt35597_truly_dsc_video {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+};
+
+&dsi_nt35597_truly_dsc_cmd {
+	qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+	qcom,mdss-dsi-bl-min-level = <1>;
+	qcom,mdss-dsi-bl-max-level = <4095>;
+	qcom,mdss-dsi-mode-sel-gpio-state = "single_port";
+	qcom,panel-mode-gpio = <&tlmm 52 0>;
+	qcom,platform-reset-gpio = <&tlmm 6 0>;
+	qcom,platform-te-gpio = <&tlmm 10 0>;
 };
 
 &dsi_sim_vid {
@@ -166,6 +188,17 @@
 	status = "ok";
 };
 
+&extcon_storage_cd {
+	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+	debounce-ms = <200>;
+	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&storage_cd>;
+
+	status = "ok";
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -188,6 +221,8 @@
 	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -208,6 +243,8 @@
 				50000000 100000000 200000000>;
 	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
 
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -262,6 +299,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
@@ -281,6 +336,10 @@
 	status = "okay";
 };
 
+&ext_5v_boost {
+	status = "ok";
+};
+
 &usb_qmp_phy {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
index da5d6fa..c7a4d7d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pcie.dtsi
@@ -39,44 +39,44 @@
 				36 37>;
 		#interrupt-cells = <1>;
 		interrupt-map-mask = <0 0 0 0xffffffff>;
-		interrupt-map = <0 0 0 0 &intc 0 141 0
-				0 0 0 1 &intc 0 149 0
-				0 0 0 2 &intc 0 150 0
-				0 0 0 3 &intc 0 151 0
-				0 0 0 4 &intc 0 152 0
-				0 0 0 5 &intc 0 140 0
-				0 0 0 6 &intc 0 672 0
-				0 0 0 7 &intc 0 673 0
-				0 0 0 8 &intc 0 674 0
-				0 0 0 9 &intc 0 675 0
-				0 0 0 10 &intc 0 676 0
-				0 0 0 11 &intc 0 677 0
-				0 0 0 12 &intc 0 678 0
-				0 0 0 13 &intc 0 679 0
-				0 0 0 14 &intc 0 680 0
-				0 0 0 15 &intc 0 681 0
-				0 0 0 16 &intc 0 682 0
-				0 0 0 17 &intc 0 683 0
-				0 0 0 18 &intc 0 684 0
-				0 0 0 19 &intc 0 685 0
-				0 0 0 20 &intc 0 686 0
-				0 0 0 21 &intc 0 687 0
-				0 0 0 22 &intc 0 688 0
-				0 0 0 23 &intc 0 689 0
-				0 0 0 24 &intc 0 690 0
-				0 0 0 25 &intc 0 691 0
-				0 0 0 26 &intc 0 692 0
-				0 0 0 27 &intc 0 693 0
-				0 0 0 28 &intc 0 694 0
-				0 0 0 29 &intc 0 695 0
-				0 0 0 30 &intc 0 696 0
-				0 0 0 31 &intc 0 697 0
-				0 0 0 32 &intc 0 698 0
-				0 0 0 33 &intc 0 699 0
-				0 0 0 34 &intc 0 700 0
-				0 0 0 35 &intc 0 701 0
-				0 0 0 36 &intc 0 702 0
-				0 0 0 37 &intc 0 703 0>;
+		interrupt-map = <0 0 0 0 &pdc 0 141 0
+				0 0 0 1 &pdc 0 149 0
+				0 0 0 2 &pdc 0 150 0
+				0 0 0 3 &pdc 0 151 0
+				0 0 0 4 &pdc 0 152 0
+				0 0 0 5 &pdc 0 140 0
+				0 0 0 6 &pdc 0 672 0
+				0 0 0 7 &pdc 0 673 0
+				0 0 0 8 &pdc 0 674 0
+				0 0 0 9 &pdc 0 675 0
+				0 0 0 10 &pdc 0 676 0
+				0 0 0 11 &pdc 0 677 0
+				0 0 0 12 &pdc 0 678 0
+				0 0 0 13 &pdc 0 679 0
+				0 0 0 14 &pdc 0 680 0
+				0 0 0 15 &pdc 0 681 0
+				0 0 0 16 &pdc 0 682 0
+				0 0 0 17 &pdc 0 683 0
+				0 0 0 18 &pdc 0 684 0
+				0 0 0 19 &pdc 0 685 0
+				0 0 0 20 &pdc 0 686 0
+				0 0 0 21 &pdc 0 687 0
+				0 0 0 22 &pdc 0 688 0
+				0 0 0 23 &pdc 0 689 0
+				0 0 0 24 &pdc 0 690 0
+				0 0 0 25 &pdc 0 691 0
+				0 0 0 26 &pdc 0 692 0
+				0 0 0 27 &pdc 0 693 0
+				0 0 0 28 &pdc 0 694 0
+				0 0 0 29 &pdc 0 695 0
+				0 0 0 30 &pdc 0 696 0
+				0 0 0 31 &pdc 0 697 0
+				0 0 0 32 &pdc 0 698 0
+				0 0 0 33 &pdc 0 699 0
+				0 0 0 34 &pdc 0 700 0
+				0 0 0 35 &pdc 0 701 0
+				0 0 0 36 &pdc 0 702 0
+				0 0 0 37 &pdc 0 703 0>;
 
 		interrupt-names = "int_msi", "int_a", "int_b", "int_c",
 				"int_d", "int_global_int",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 1744574..9946a25 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -19,6 +19,7 @@
 		#gpio-cells = <2>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+		interrupt-parent = <&pdc>;
 
 		ufs_dev_reset_assert: ufs_dev_reset_assert {
 			config {
@@ -298,6 +299,63 @@
 			};
 		};
 
+		/* USB C analog configuration */
+		wcd_usbc_analog_en1 {
+			wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+				mux {
+					pins = "gpio49";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio49";
+					drive-strength = <2>;
+					bias-pull-down;
+					output-low;
+				};
+			};
+
+			wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+				mux {
+					pins = "gpio49";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio49";
+					drive-strength = <2>;
+					bias-disable;
+					output-high;
+				};
+			};
+		};
+
+		wcd_usbc_analog_en2 {
+			wcd_usbc_analog_en2_idle: wcd_usbc_ana_en2_idle {
+				mux {
+					pins = "gpio51";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio51";
+					drive-strength = <2>;
+					bias-pull-down;
+					output-low;
+				};
+			};
+
+			wcd_usbc_analog_en2_active: wcd_usbc_ana_en2_active {
+				mux {
+					pins = "gpio51";
+					function = "gpio";
+				};
+				config {
+					pins = "gpio51";
+					drive-strength = <2>;
+					bias-disable;
+					output-high;
+				};
+			};
+		};
+
 		pri_aux_pcm_clk {
 			pri_aux_pcm_clk_sleep: pri_aux_pcm_clk_sleep {
 				mux {
@@ -1555,6 +1613,68 @@
 			};
 		};
 
+		nfc {
+			nfc_int_active: nfc_int_active {
+				/* active state */
+				mux {
+					/* GPIO 63 NFC Read Interrupt */
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_int_suspend: nfc_int_suspend {
+				/* sleep state */
+				mux {
+					/* GPIO 63 NFC Read Interrupt */
+					pins = "gpio63";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio63";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_active: nfc_enable_active {
+				/* active state */
+				mux {
+					/* 12: NFC ENABLE 116:ESE Enable */
+					pins = "gpio12", "gpio62", "gpio116";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "gpio62", "gpio116";
+					drive-strength = <2>; /* 2 MA */
+					bias-pull-up;
+				};
+			};
+
+			nfc_enable_suspend: nfc_enable_suspend {
+				/* sleep state */
+				mux {
+					/* 12: NFC ENABLE 116:ESE Enable */
+					pins = "gpio12", "gpio62", "gpio116";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio12", "gpio62", "gpio116";
+					drive-strength = <2>; /* 2 MA */
+					bias-disable;
+				};
+			};
+		};
+
 		qupv3_se3_spi_pins: qupv3_se3_spi_pins {
 			qupv3_se3_spi_active: qupv3_se3_spi_active {
 				mux {
@@ -2680,6 +2800,14 @@
 };
 
 &pm8998_gpios {
+	gpio@d400 {
+		qcom,mode = <0>;
+		qcom,vin-sel = <1>;
+		qcom,src-sel = <0>;
+		qcom,master-en = <1>;
+		status = "okay";
+	};
+
 	key_home {
 		key_home_default: key_home_default {
 			pins = "gpio5";
@@ -2768,4 +2896,13 @@
 			power-source = <0>;
 		};
 	};
+
+	usb2_ext_5v_boost {
+		usb2_ext_5v_boost_default: usb2_ext_5v_boost_default {
+			pins = "gpio10";
+			function = "normal";
+			output-low;
+			power-source = <0>;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 70e749b..6806145 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -145,4 +145,12 @@
 		reg = <0xC300000 0x1000>, <0xC3F0004 0x4>;
 		reg-names = "phys_addr_base", "offset_addr";
 	};
+
+	pdc: interrupt-controller@0xb220000{
+		compatible = "qcom,pdc-sdm845";
+		reg = <0xb220000 0x400>;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&intc>;
+		interrupt-controller;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index b51996d..c2fbed5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -14,6 +14,27 @@
 #include <dt-bindings/gpio/gpio.h>
 
 /{
+	bluetooth: bt_wcn3990 {
+		compatible = "qca,wcn3990";
+		qca,bt-vdd-io-supply = <&pm8998_s3>;
+		qca,bt-vdd-xtal-supply = <&pm8998_s5>;
+		qca,bt-vdd-core-supply = <&pm8998_l7>;
+		qca,bt-vdd-pa-supply = <&pm8998_l17>;
+		qca,bt-vdd-ldo-supply = <&pm8998_l25>;
+
+		qca,bt-vdd-io-voltage-level = <1352000 1352000>;
+		qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
+		qca,bt-vdd-core-voltage-level = <1800000 1800000>;
+		qca,bt-vdd-pa-voltage-level = <1304000 1304000>;
+		qca,bt-vdd-ldo-voltage-level = <3312000 3312000>;
+
+		qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-core-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-pa-current-level = <1>; /* LPM/PFM */
+		qca,bt-vdd-ldo-current-level = <1>; /* LPM/PFM */
+	};
+
 	qrd_batterydata: qcom,battery-data {
 		qcom,batt-id-range-pct = <15>;
 		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
@@ -39,6 +60,24 @@
 
 &qupv3_se3_i2c {
 	status = "ok";
+	nq@28 {
+		compatible = "qcom,nq-nci";
+		reg = <0x28>;
+		qcom,nq-irq = <&tlmm 63 0x00>;
+		qcom,nq-ven = <&tlmm 12 0x00>;
+		qcom,nq-firm = <&tlmm 62 0x00>;
+		qcom,nq-clkreq = <&pm8998_gpios 21 0x00>;
+		qcom,nq-esepwr = <&tlmm 116 0x00>;
+		interrupt-parent = <&tlmm>;
+		qcom,clk-src = "BBCLK3";
+		interrupts = <63 0>;
+		interrupt-names = "nfc_irq";
+		pinctrl-names = "nfc_active", "nfc_suspend";
+		pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+		pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+		clocks = <&clock_rpmh RPMH_LN_BB_CLK3>;
+		clock-names = "ref_clk";
+	};
 };
 
 &qupv3_se10_i2c {
@@ -66,6 +105,41 @@
 		qcom,wsa-max-devs = <1>;
 		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
 		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+
+		qcom,msm-mbhc-usbc-audio-supported = <1>;
+
+		qcom,usbc-analog-en2-gpio = <&tlmm 51 0>;
+		pinctrl-names = "aud_active", "aud_sleep";
+		pinctrl-0 = <&wcd_usbc_analog_en2_active>;
+		pinctrl-1 = <&wcd_usbc_analog_en2_idle>;
+	};
+};
+
+&wcd934x_cdc {
+	wcd: wcd_pinctrl@5 {
+		us_euro_sw_wcd_active: us_euro_sw_wcd_active {
+			mux {
+				pins = "gpio1";
+			};
+
+			config {
+				pins = "gpio1";
+				/delete-property/ output-high;
+				bias-high-impedance;
+			};
+		};
+
+		us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep {
+			mux {
+				pins = "gpio1";
+			};
+
+			config {
+				pins = "gpio1";
+				/delete-property/ output-low;
+				bias-high-impedance;
+			};
+		};
 	};
 };
 
@@ -94,6 +168,17 @@
 	status = "ok";
 };
 
+&extcon_storage_cd {
+	gpio = <&tlmm 126 GPIO_ACTIVE_LOW>;
+	debounce-ms = <200>;
+	irq-flags = <IRQ_TYPE_EDGE_BOTH>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&storage_cd>;
+
+	status = "ok";
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qmp-v3";
 
@@ -116,6 +201,30 @@
 	qcom,vddp-ref-clk-supply = <&pm8998_l2>;
 	qcom,vddp-ref-clk-max-microamp = <100>;
 
+	extcon = <&extcon_storage_cd>;
+
+	status = "ok";
+};
+
+&sdhc_2 {
+	vdd-supply = <&pm8998_l21>;
+	qcom,vdd-voltage-level = <2950000 2960000>;
+	qcom,vdd-current-level = <200 800000>;
+
+	vdd-io-supply = <&pm8998_l13>;
+	qcom,vdd-io-voltage-level = <1808000 2960000>;
+	qcom,vdd-io-current-level = <200 22000>;
+
+	pinctrl-names = "active", "sleep";
+	pinctrl-0 = <&sdc2_clk_on  &sdc2_cmd_on &sdc2_data_on>;
+	pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+
+	qcom,clk-rates = <400000 20000000 25000000
+				50000000 100000000 200000000>;
+	qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
+
+	extcon = <&extcon_storage_cd>;
+
 	status = "ok";
 };
 
@@ -130,7 +239,7 @@
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb &dsi_sharp_4k_dsc_video_display>;
+	connectors = <&sde_rscc &sde_wb>;
 };
 
 &dsi_sharp_4k_dsc_video {
@@ -138,7 +247,7 @@
 	qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
 	qcom,mdss-dsi-bl-min-level = <1>;
 	qcom,mdss-dsi-bl-max-level = <4095>;
-	qcom,mdss-dsi-panel-mode-gpio-state = "dual_port";
+	qcom,mdss-dsi-mode-sel-gpio-state = "dual_port";
 	qcom,panel-mode-gpio = <&tlmm 52 0>;
 	qcom,platform-te-gpio = <&tlmm 10 0>;
 	qcom,platform-reset-gpio = <&tlmm 6 0>;
@@ -152,3 +261,7 @@
 &wil6210 {
 	status = "ok";
 };
+
+&ext_5v_boost {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
index 0fb455f..1fa6e26 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qupv3.dtsi
@@ -42,7 +42,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se6_4uart_active>;
 		pinctrl-1 = <&qupv3_se6_4uart_sleep>;
-		interrupts-extended = <&intc GIC_SPI 607 0>,
+		interrupts-extended = <&pdc GIC_SPI 607 0>,
 				<&tlmm 48 0>;
 		status = "disabled";
 		qcom,wakeup-byte = <0xFD>;
@@ -60,7 +60,7 @@
 		pinctrl-names = "default", "sleep";
 		pinctrl-0 = <&qupv3_se7_4uart_active>;
 		pinctrl-1 = <&qupv3_se7_4uart_sleep>;
-		interrupts-extended = <&intc GIC_SPI 608 0>,
+		interrupts-extended = <&pdc GIC_SPI 608 0>,
 				<&tlmm 96 0>;
 		status = "disabled";
 		qcom,wakeup-byte = <0xFD>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 7ae63af..7befe3b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -10,6 +10,7 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 
 /* Stub regulators */
@@ -36,7 +37,7 @@
 			reg = <0x3500 0x100>;
 			regulator-name = "pm8998_s12";
 			regulator-min-microvolt = <568000>;
-			regulator-max-microvolt = <1056000>;
+			regulator-max-microvolt = <1136000>;
 			qcom,enable-time = <500>;
 			regulator-always-on;
 		};
@@ -114,9 +115,9 @@
 				regulator-max-microvolt = <19>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <19 19>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <19 19 19>;
 				qcom,cpr-corners = <19>;
 
 				qcom,cpr-corner-fmax-map = <6 12 17 19>;
@@ -137,6 +138,11 @@
 					<568000  568000  568000  568000  568000
 					 568000  568000  568000  568000  584000
 					 584000  584000  632000  632000  632000
+					 632000  672000  712000  712000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
 					 632000  672000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
@@ -146,13 +152,30 @@
 					 32000  32000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  748800000
 					 825600000  902400000  979200000
 					1056000000 1132800000 1209600000
 					1286400000 1363200000 1440000000
 					1516800000 1593600000 1651200000
-					1708800000>;
+					1708800000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000 1651200000
+					1708800000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000 1670400000
+					1747200000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2594 2795 2576 2761 2469 2673 2198
@@ -185,6 +208,8 @@
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -201,20 +226,41 @@
 			apc0_l3_vreg: regulator {
 				regulator-name = "apc0_l3_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <11>;
+				regulator-max-microvolt = <13>;
 
 				qcom,cpr-fuse-corners = <4>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <11 11>;
-				qcom,cpr-corners = <11>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <11 11 13>;
+				qcom,cpr-corners =
+					/* Speed bin 0 */
+					<11 11 11 11 11 11 11 11>,
+					/* Speed bin 1 */
+					<11 11 11 11 11 11 11 11>,
+					/* Speed bin 2 */
+					<13 13 13 13 13 13 13 13>;
 
-				qcom,cpr-corner-fmax-map = <4 7 9 11>;
+				qcom,cpr-corner-fmax-map =
+					/* Speed bin 0 */
+					<4 7 9 11>,
+					/* Speed bin 1 */
+					<4 7 9 11>,
+					/* Speed bin 2 */
+					<4 7 9 13>;
 
 				qcom,cpr-voltage-ceiling =
+					/* Speed bin 0 */
 					<872000  872000  872000  872000  872000
 					 872000  872000  872000  928000  996000
-					 996000>;
+					 996000>,
+					/* Speed bin 1 */
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000  996000
+					 996000>,
+					/* Speed bin 2 */
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000  996000
+					 996000  996000  996000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -224,18 +270,43 @@
 					/* Speed bin 1 */
 					<568000  568000  568000  568000  568000
 					 584000  584000  632000  672000  712000
-					 712000>;
+					 712000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000  712000
+					 712000  712000  712000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
+					/* Speed bin 0 */
 					<32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  40000
-					 40000>;
+					 40000>,
+					/* Speed bin 1 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  40000
+					 40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  40000
+					 40000  40000  40000>;
 
 				qcom,corner-frequencies =
+					/* Speed bin 0 */
 					<300000000  422400000  499200000
 					 576000000  652800000  729600000
 					 806400000  883200000  960000000
-					1036800000 1094400000>;
+					1036800000 1094400000>,
+					/* Speed bin 1 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1094400000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1209600000
+					1305600000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -262,12 +333,14 @@
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <11>;
+				qcom,cpr-aging-ref-corner = <11 11 13>;
 				qcom,cpr-aging-ro-scaling-factor = <1620>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -320,7 +393,7 @@
 		qcom,cpr-panic-reg-name-list =
 			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
 
-		qcom,cpr-aging-ref-voltage = <1056000>;
+		qcom,cpr-aging-ref-voltage = <1136000>;
 		vdd-supply = <&pm8998_s12>;
 
 		thread@0 {
@@ -333,23 +406,27 @@
 			apc1_perfcl_vreg: regulator {
 				regulator-name = "apc1_perfcl_corner";
 				regulator-min-microvolt = <1>;
-				regulator-max-microvolt = <26>;
+				regulator-max-microvolt = <27>;
 
 				qcom,cpr-fuse-corners = <3>;
-				qcom,cpr-fuse-combos = <16>;
-				qcom,cpr-speed-bins = <2>;
-				qcom,cpr-speed-bin-corners = <22 24>;
+				qcom,cpr-fuse-combos = <24>;
+				qcom,cpr-speed-bins = <3>;
+				qcom,cpr-speed-bin-corners = <22 24 25>;
 				qcom,cpr-corners =
 					/* Speed bin 0 */
 					<22 22 22 22 22 22 22 22>,
 					/* Speed bin 1 */
-					<24 24 24 24 24 24 24 24>;
+					<24 24 24 24 24 24 24 24>,
+					/* Speed bin 2 */
+					<25 25 25 25 25 25 25 25>;
 
 				qcom,cpr-corner-fmax-map =
 					/* Speed bin 0 */
 					<10 17 22>,
 					/* Speed bin 1 */
-					<10 17 24>;
+					<10 17 24>,
+					/* Speed bin 2 */
+					<10 17 25>;
 
 				qcom,cpr-voltage-ceiling =
 					/* Speed bin 0 */
@@ -357,13 +434,20 @@
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000>,
+					1136000 1136000>,
 					/* Speed bin 1 */
 					<828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
-					1056000 1056000 1056000 1056000>;
+					1136000 1136000 1136000 1136000>,
+					/* Speed bin 2 */
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1136000 1136000 1136000 1136000
+					1136000>;
 
 				qcom,cpr-voltage-floor =
 					/* Speed bin 0 */
@@ -377,7 +461,14 @@
 					 568000  568000  568000  568000  568000
 					 584000  584000  632000  632000  632000
 					 632000  632000  672000  712000  712000
-					 772000  772000  772000  772000>;
+					 772000  772000  772000  772000>,
+					/* Speed bin 2 */
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000  772000  772000
+					 772000>;
 
 				qcom,cpr-floor-to-ceiling-max-range =
 					/* Speed bin 0 */
@@ -391,7 +482,13 @@
 					 32000  32000  32000  32000  32000
 					 32000  32000  32000  32000  32000
 					 32000  32000  40000  40000  40000
-					 40000  40000  40000  40000>;
+					 40000  40000  40000  40000>,
+					/* Speed bin 2 */
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000  40000  40000  40000>;
 
 				qcom,corner-frequencies =
 					/* Speed bin 0 */
@@ -411,7 +508,17 @@
 					1267200000 1344000000 1420800000
 					1497600000 1574400000 1651200000
 					1728000000 1804800000 1881600000
-					1958400000 2035200000 2092800000>;
+					1958400000 2035200000 2092800000>,
+					/* Speed bin 2 */
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000 2035200000 2112000000
+					2208000000>;
 
 				qcom,cpr-ro-scaling-factor =
 					<2857 3056 2828 2952 2699 2796 2447
@@ -442,6 +549,15 @@
 					<     0      0      0>,
 					<     0      0      0>,
 					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
 					<     0      0      0>;
 
 				qcom,cpr-closed-loop-voltage-fuse-adjustment =
@@ -462,6 +578,15 @@
 					<     0      0      0>,
 					<     0      0      0>,
 					<     0      0      0>,
+					<     0      0      0>,
+					/* Speed bin 2 */
+					<100000 100000 100000>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
+					<     0      0      0>,
 					<     0      0      0>;
 
 				qcom,allow-voltage-interpolation;
@@ -469,12 +594,14 @@
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
 
 				qcom,cpr-aging-max-voltage-adjustment = <15000>;
-				qcom,cpr-aging-ref-corner = <22 24>;
+				qcom,cpr-aging-ref-corner = <22 24 25>;
 				qcom,cpr-aging-ro-scaling-factor = <1700>;
 				qcom,allow-aging-voltage-adjustment =
 					/* Speed bin 0 */
 					<0 1 1 1 1 1 1 1>,
 					/* Speed bin 1 */
+					<0 1 1 1 1 1 1 1>,
+					/* Speed bin 2 */
 					<0 1 1 1 1 1 1 1>;
 				qcom,allow-aging-open-loop-voltage-adjustment =
 					<1>;
@@ -1134,8 +1261,12 @@
 		pm8005_s1_level: regulator-s1-level {
 			regulator-name = "pm8005_s1_level";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
-			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+			regulator-min-microvolt
+				= <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+			regulator-max-microvolt
+				= <RPMH_REGULATOR_LEVEL_MAX>;
+			qcom,init-voltage-level
+				= <RPMH_REGULATOR_LEVEL_MIN_SVS>;
 		};
 	};
 
@@ -1164,13 +1295,21 @@
 			qcom,init-voltage = <600000>;
 		};
 	};
+
+	ext_5v_boost: ext_5v_boost {
+		status = "disabled";
+		compatible = "regulator-fixed";
+		regulator-name = "ext_5v_boost";
+		gpio = <&pmi8998_gpios 10 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+
+		regulator-enable-ramp-delay = <1600>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_ext_5v_boost_default>;
+	};
 };
 
 &pmi8998_charger {
-	smb2_vbus: qcom,smb2-vbus {
-		regulator-name = "smb2-vbus";
-	};
-
 	smb2_vconn: qcom,smb2-vconn {
 		regulator-name = "smb2-vconn";
 	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 255c0b3..1500bb5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -248,10 +248,10 @@
 		label = "dsi_nt35597_truly_dsc_cmd_display";
 		qcom,display-type = "primary";
 
-		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
-		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
-			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		qcom,dsi-ctrl = <&mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy1>;
+		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -272,10 +272,10 @@
 		label = "dsi_nt35597_truly_dsc_video_display";
 		qcom,display-type = "primary";
 
-		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
-		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
-		clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
-			<&mdss_dsi0_pll PCLK_MUX_0_CLK>;
+		qcom,dsi-ctrl = <&mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy1>;
+		clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
+			<&mdss_dsi1_pll PCLK_MUX_1_CLK>;
 		clock-names = "src_byte_clk", "src_pixel_clk";
 
 		pinctrl-names = "panel_active", "panel_suspend";
@@ -368,10 +368,106 @@
 		cell-index = <0>;
 		label = "wb_display";
 	};
+
+	sde_dp: qcom,dp_display@0{
+		cell-index = <0>;
+		compatible = "qcom,dp-display";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+
+		reg =	<0xae90000 0xa84>,
+			<0x88eaa00 0x200>,
+			<0x88ea200 0x200>,
+			<0x88ea600 0x200>,
+			<0xaf02000 0x1a0>,
+			<0x780000 0x621c>,
+			<0x88ea030 0x10>,
+			<0x0aee1000 0x034>;
+		reg-names = "dp_ctrl", "dp_phy", "dp_ln_tx0", "dp_ln_tx1",
+			"dp_mmss_cc", "qfprom_physical", "dp_pll",
+			"hdcp_physical";
+
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <12 0>;
+
+		clocks =  <&clock_dispcc DISP_CC_MDSS_DP_AUX_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>,
+			 <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
+			 <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>;
+		clock-names = "core_aux_clk", "core_usb_ref_clk_src",
+			"core_usb_ref_clk", "core_usb_cfg_ahb_clk",
+			"core_usb_pipe_clk", "ctrl_link_clk",
+			"ctrl_link_iface_clk", "ctrl_crypto_clk",
+			"ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent";
+
+		qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
+
+		qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03];
+
+		qcom,core-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,core-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1200000>;
+				qcom,supply-max-voltage = <1200000>;
+				qcom,supply-enable-load = <21800>;
+				qcom,supply-disable-load = <4>;
+			};
+		};
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <880000>;
+				qcom,supply-max-voltage = <880000>;
+				qcom,supply-enable-load = <36000>;
+				qcom,supply-disable-load = <32>;
+			};
+		};
+	};
+};
+
+&sde_dp {
+	pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+	pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+	pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+	qcom,aux-en-gpio = <&tlmm 43 0>;
+	qcom,aux-sel-gpio = <&tlmm 51 0>;
+	qcom,usbplug-cc-gpio = <&tlmm 38 0>;
 };
 
 &mdss_mdp {
-	connectors = <&sde_rscc &sde_wb &dsi_dual_nt35597_truly_cmd_display>;
+	connectors = <&sde_rscc &sde_wb &sde_dp>;
 };
 
 &dsi_dual_nt35597_truly_video {
@@ -396,7 +492,8 @@
 	qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,display-topology = <2 2 2>;
+	qcom,display-topology = <1 1 1>,
+				<2 2 1>;
 	qcom,default-topology-index = <0>;
 };
 
@@ -404,7 +501,8 @@
 	qcom,mdss-dsi-panel-phy-timings = [00 15 05 05 20 1f 05 05 03 03 04 00];
 	qcom,mdss-dsi-t-clk-post = <0x0b>;
 	qcom,mdss-dsi-t-clk-pre = <0x23>;
-	qcom,display-topology = <2 2 2>;
+	qcom,display-topology = <1 1 1>,
+				<2 2 1>;
 	qcom,default-topology-index = <0>;
 };
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
index 168f2a9..b9eac3c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-pll.dtsi
@@ -64,4 +64,46 @@
 			};
 		};
 	};
+
+	mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+		compatible = "qcom,mdss_dp_pll_10nm";
+		label = "MDSS DP PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0x088ea000 0x200>,
+		      <0x088eaa00 0x200>,
+		      <0x088ea200 0x200>,
+		      <0x088ea600 0x200>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "ln_tx0_base",
+			"ln_tx1_base", "gdsc_base";
+
+		gdsc-supply = <&mdss_core_gdsc>;
+
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+		clock-names = "iface_clk", "ref_clk_src", "ref_clk",
+			"cfg_ahb_clk", "pipe_clk";
+		clock-rate = <0>;
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+
+		};
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index e92bfd9..08c7cf0 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -35,7 +35,7 @@
 		sde-vdd-supply = <&mdss_core_gdsc>;
 
 		/* interrupt config */
-		interrupt-parent = <&intc>;
+		interrupt-parent = <&pdc>;
 		interrupts = <0 83 0>;
 		interrupt-controller;
 		#interrupt-cells = <1>;
@@ -86,8 +86,6 @@
 		qcom,sde-dither-version = <0x00010000>;
 		qcom,sde-dither-size = <0x20>;
 
-		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
-
 		qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
 					"dma", "dma", "dma", "dma";
 
@@ -136,6 +134,39 @@
 		qcom,sde-vbif-off = <0>;
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
+		qcom,sde-vbif-memtype-0 = <3 3 3 3 3 3 3 3>;
+		qcom,sde-vbif-memtype-1 = <3 3 3 3 3 3>;
+
+		qcom,sde-vbif-qos-rt-remap = <3 3 4 4 5 5 6 6>;
+		qcom,sde-vbif-qos-nrt-remap = <3 3 3 3 3 3 3 3>;
+
+		qcom,sde-danger-lut = <0x0000000f 0x0000ffff 0x00000000
+			0x00000000>;
+		qcom,sde-safe-lut = <0xfffc 0xff00 0xffff 0xffff>;
+		qcom,sde-qos-lut-linear =
+			<4 0x00000000 0x00000357>,
+			<5 0x00000000 0x00003357>,
+			<6 0x00000000 0x00023357>,
+			<7 0x00000000 0x00223357>,
+			<8 0x00000000 0x02223357>,
+			<9 0x00000000 0x22223357>,
+			<10 0x00000002 0x22223357>,
+			<11 0x00000022 0x22223357>,
+			<12 0x00000222 0x22223357>,
+			<13 0x00002222 0x22223357>,
+			<14 0x00012222 0x22223357>,
+			<0 0x00112222 0x22223357>;
+		qcom,sde-qos-lut-macrotile =
+			<10 0x00000003 0x44556677>,
+			<11 0x00000033 0x44556677>,
+			<12 0x00000233 0x44556677>,
+			<13 0x00002233 0x44556677>,
+			<14 0x00012233 0x44556677>,
+			<0 0x00112233 0x44556677>;
+		qcom,sde-qos-lut-nrt =
+			<0 0x00000000 0x00000000>;
+		qcom,sde-qos-lut-cwb =
+			<0 0x75300000 0x00000000>;
 
 		qcom,sde-inline-rotator = <&mdss_rotator 0>;
 
@@ -261,7 +292,19 @@
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
-		qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+		/* Offline rotator QoS setting */
+		qcom,mdss-rot-vbif-qos-setting = <3 3 3 3 3 3 3 3>;
+		qcom,mdss-rot-cdp-setting = <1 1>;
+		qcom,mdss-rot-qos-lut = <0x0 0x0 0x0 0x0>;
+		qcom,mdss-rot-danger-lut = <0x0 0x0>;
+		qcom,mdss-rot-safe-lut = <0x0000ffff 0x0000ffff>;
+
+		/* Inline rotator QoS Setting */
+		/* setting default register values for RD - qos/danger/safe */
+		qcom,mdss-inline-rot-qos-lut = <0x44556677 0x00112233
+							0x44556677 0x00112233>;
+		qcom,mdss-inline-rot-danger-lut = <0x0055aaff 0x0000ffff>;
+		qcom,mdss-inline-rot-safe-lut = <0x0000f000 0x0000ff00>;
 
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 6fb6fb8..3870d8f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -67,7 +67,6 @@
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a600000 0xcd00>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 133 0>;
 			usb-phy = <&qusb_phy0>, <&usb_qmp_dp_phy>;
 			tx-fifo-resize;
@@ -80,7 +79,6 @@
 		qcom,usbbam@a704000 {
 			compatible = "qcom,usb-bam-msm";
 			reg = <0xa704000 0x17000>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 132 0>;
 
 			qcom,bam-type = <0>;
@@ -361,7 +359,6 @@
 		dwc3@a600000 {
 			compatible = "snps,dwc3";
 			reg = <0x0a800000 0xcd00>;
-			interrupt-parent = <&intc>;
 			interrupts = <0 138 0>;
 			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
 			tx-fifo-resize;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 4fe9282..af12224 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -27,6 +27,10 @@
 		qcom,max-secure-instances = <5>;
 		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
 
+		/* LLCC Info */
+		cache-slice-names = "vidsc0", "vidsc1";
+		cache-slices = <&llcc 2>, <&llcc 3>;
+
 		/* Supply */
 		venus-supply = <&venus_gdsc>;
 		venus-core0-supply = <&vcodec0_gdsc>;
@@ -91,6 +95,14 @@
 			qcom,bus-governor = "performance";
 			qcom,bus-range-kbps = <1000 1000>;
 		};
+		venus_bus_llcc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-llcc";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <17000 125700>;
+		};
 
 		/* MMUs */
 		non_secure_cb {
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index ad451ce..6284361 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -30,7 +30,7 @@
 	model = "Qualcomm Technologies, Inc. SDM845";
 	compatible = "qcom,sdm845";
 	qcom,msm-id = <321 0x0>;
-	interrupt-parent = <&intc>;
+	interrupt-parent = <&pdc>;
 
 	aliases {
 		ufshc1 = &ufshc_mem; /* Embedded UFS slot */
@@ -459,14 +459,35 @@
 		compatible = "simple-bus";
 	};
 
+	firmware: firmware {
+		android {
+			compatible = "android,firmware";
+			fstab {
+				compatible = "android,fstab";
+				vendor {
+					compatible = "android,vendor";
+					dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor";
+					type = "ext4";
+					mnt_flags = "ro,barrier=1,discard";
+					fsmgr_flags = "wait,slotselect";
+				};
+			};
+		};
+	};
+
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
 		ranges;
 
-		removed_regions: removed_regions@85800000 {
+		removed_region1: removed_region1@85700000 {
 			no-map;
-			reg = <0 0x85800000 0 0x3700000>;
+			reg = <0 0x85700000 0 0x800000>;
+		};
+
+		removed_region2: removed_region2@85fc0000 {
+			no-map;
+			reg = <0 0x85fc0000 0 0x2f40000>;
 		};
 
 		pil_camera_mem: camera_region@8ab00000 {
@@ -567,6 +588,12 @@
 			size = <0 0x5c00000>;
 		};
 
+		dump_mem: mem_dump_region {
+			compatible = "shared-dma-pool";
+			reusable;
+			size = <0 0x2400000>;
+		};
+
 		/* global autoconfigured region for contiguous allocations */
 		linux,cma {
 			compatible = "shared-dma-pool";
@@ -600,6 +627,7 @@
 		reg = <0x17a00000 0x10000>,     /* GICD */
 		      <0x17a60000 0x100000>;    /* GICR * 8 */
 		interrupts = <1 9 4>;
+		interrupt-parent = <&intc>;
 	};
 
 	timer {
@@ -748,62 +776,6 @@
 		};
 	};
 
-	msm_cpufreq: qcom,msm-cpufreq {
-		compatible = "qcom,msm-cpufreq";
-		clock-names = "cpu0_clk", "cpu4_clk";
-		clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
-			 <&clock_cpucc CPU4_PERFCL_CLK>;
-
-		qcom,governor-per-policy;
-
-		qcom,cpufreq-table-0 =
-			<  300000 >,
-			<  422400 >,
-			<  499200 >,
-			<  576000 >,
-			<  652800 >,
-			<  748800 >,
-			<  825600 >,
-			<  902400 >,
-			<  979200 >,
-			< 1056000 >,
-			< 1132800 >,
-			< 1209600 >,
-			< 1286400 >,
-			< 1363200 >,
-			< 1440000 >,
-			< 1516800 >,
-			< 1593600 >,
-			< 1651200 >,
-			< 1708800 >;
-
-		qcom,cpufreq-table-4 =
-			<  300000 >,
-			<  422400 >,
-			<  499200 >,
-			<  576000 >,
-			<  652800 >,
-			<  729600 >,
-			<  806400 >,
-			<  883200 >,
-			<  960000 >,
-			< 1036800 >,
-			< 1113600 >,
-			< 1190400 >,
-			< 1267200 >,
-			< 1344000 >,
-			< 1420800 >,
-			< 1497600 >,
-			< 1574400 >,
-			< 1651200 >,
-			< 1728000 >,
-			< 1804800 >,
-			< 1881600 >,
-			< 1958400 >,
-			< 2035200 >,
-			< 2092800 >;
-	};
-
 	cpubw: qcom,cpubw {
 		compatible = "qcom,devbw";
 		governor = "performance";
@@ -955,7 +927,9 @@
 			< 883200 >,
 			< 960000 >,
 			< 1036800 >,
-			< 1094400 >;
+			< 1094400 >,
+			< 1209600 >,
+			< 1305600 >;
 	};
 
 	l3_cpu4: qcom,l3-cpu4 {
@@ -975,7 +949,9 @@
 			< 883200 >,
 			< 960000 >,
 			< 1036800 >,
-			< 1094400 >;
+			< 1094400 >,
+			< 1209600 >,
+			< 1305600 >;
 	};
 
 	devfreq_l3lat_0: qcom,cpu0-l3lat-mon {
@@ -1035,8 +1011,8 @@
 			cpu-to-dev-map-0 =
 				< 1708800  762 >;
 			cpu-to-dev-map-4 =
-				< 2035200  762 >,
-				< 2092800 2597 >;
+				< 1881600  762 >,
+				< 2208000 2597 >;
 		};
 	};
 
@@ -1072,6 +1048,20 @@
 		reg-names = "cc_base";
 		vdd_cx-supply = <&pm8998_s9_level>;
 		vdd_mx-supply = <&pm8998_s6_level>;
+		qcom,cam_cc_csi0phytimer_clk_src-opp-handle = <&cam_csiphy0>;
+		qcom,cam_cc_csi1phytimer_clk_src-opp-handle = <&cam_csiphy1>;
+		qcom,cam_cc_csi2phytimer_clk_src-opp-handle = <&cam_csiphy2>;
+		qcom,cam_cc_cci_clk_src-opp-handle = <&cam_cci>;
+		qcom,cam_cc_ife_0_csid_clk_src-opp-handle = <&cam_csid0>;
+		qcom,cam_cc_ife_0_clk_src-opp-handle = <&cam_vfe0>;
+		qcom,cam_cc_ife_1_csid_clk_src-opp-handle = <&cam_csid1>;
+		qcom,cam_cc_ife_1_clk_src-opp-handle = <&cam_vfe1>;
+		qcom,cam_cc_ife_lite_csid_clk_src-opp-handle = <&cam_csid_lite>;
+		qcom,cam_cc_ife_lite_clk_src-opp-handle = <&cam_vfe_lite>;
+		qcom,cam_cc_icp_clk_src-opp-handle = <&cam_a5>;
+		qcom,cam_cc_ipe_0_clk_src-opp-handle = <&cam_ipe0>;
+		qcom,cam_cc_ipe_1_clk_src-opp-handle = <&cam_ipe1>;
+		qcom,cam_cc_bps_clk_src-opp-handle = <&cam_bps>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -1134,6 +1124,9 @@
 		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
 		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
 
+		l3-dev0 = <&l3_cpu0>;
+		l3-dev4 = <&l3_cpu4>;
+
 		qcom,l3-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1158,6 +1151,21 @@
 			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
 			<  1094400000 0x402c0a39 0x00002e2e 0x3 11 >;
 
+		qcom,l3-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x2 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x3 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x3 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x3 12 >,
+			<  1305600000 0x40340c44 0x00003636 0x3 13 >;
+
 		qcom,pwrcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1198,6 +1206,27 @@
 			<  1651200000 0x403c1156 0x00004545 0x3 18 >,
 			<  1708800000 0x40441259 0x00004747 0x3 19 >;
 
+		qcom,pwrcl-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   748800000 0x401c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003232 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x2 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x2 14 >,
+			<  1440000000 0x40340e4b 0x00003c3c 0x2 15 >,
+			<  1516800000 0x403c0f4f 0x00003f3f 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004242 0x2 17 >,
+			<  1670400000 0x40441157 0x00004646 0x3 18 >,
+			<  1747200000 0x4044125b 0x00004949 0x3 19 >;
+
 		qcom,perfcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
@@ -1248,6 +1277,33 @@
 			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
 			<  2092800000 0x4054176d 0x00005757 0x3 24 >;
 
+		qcom,perfcl-speedbin2-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072e 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x2 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x2 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x2 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x2 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x3 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x3 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x3 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >,
+			<  2035200000 0x404c166a 0x00005555 0x3 23 >,
+			<  2112000000 0x4054176e 0x00005858 0x3 24 >,
+			<  2208000000 0x40541873 0x00005c5c 0x3 25 >;
+
 		qcom,l3-min-cpr-vc-bin0 = <7>;
 		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
 		qcom,perfcl-min-cpr-vc-bin0 = <7>;
@@ -1712,7 +1768,7 @@
 	qcom,ssc@5c00000 {
 		compatible = "qcom,pil-tz-generic";
 		reg = <0x5c00000 0x4000>;
-		interrupts = <0 377 1>;
+		interrupts = <0 494 1>;
 
 		vdd_cx-supply = <&pm8998_l27_level>;
 		vdd_px-supply = <&pm8998_lvs2>;
@@ -2082,19 +2138,19 @@
 		};
 		qcom,llcc1_d_cache {
 			qcom,dump-node = <&LLCC_1>;
-			qcom,dump-id = <0x121>;
+			qcom,dump-id = <0x140>;
 		};
 		qcom,llcc2_d_cache {
 			qcom,dump-node = <&LLCC_2>;
-			qcom,dump-id = <0x122>;
+			qcom,dump-id = <0x141>;
 		};
 		qcom,llcc3_d_cache {
 			qcom,dump-node = <&LLCC_3>;
-			qcom,dump-id = <0x123>;
+			qcom,dump-id = <0x142>;
 		};
 		qcom,llcc4_d_cache {
 			qcom,dump-node = <&LLCC_4>;
-			qcom,dump-id = <0x124>;
+			qcom,dump-id = <0x143>;
 		};
 		qcom,l1_tlb_dump0 {
 			qcom,dump-node = <&L1_TLB_0>;
@@ -3186,44 +3242,48 @@
 			};
 		};
 
-		gpu0-step {
+		gpu-virt-max-step {
 			polling-delay-passive = <10>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 11>;
+			polling-delay = <100>;
 			thermal-governor = "step_wise";
 			trips {
-				gpu0_trip: gpu0-trip {
+				gpu_trip0: gpu-trip0 {
 					temperature = <95000>;
 					hysteresis = <0>;
 					type = "passive";
 				};
 			};
 			cooling-maps {
-				gpu0_cdev {
-					trip = <&gpu0_trip>;
+				gpu_cdev0 {
+					trip = <&gpu_trip0>;
 					cooling-device =
-						<&msm_gpu 1 THERMAL_NO_LIMIT>;
+						<&msm_gpu 0 THERMAL_NO_LIMIT>;
 				};
 			};
 		};
 
-		gpu1-step {
-			polling-delay-passive = <10>;
-			polling-delay = <0>;
-			thermal-sensors = <&tsens0 12>;
-			thermal-governor = "step_wise";
+		silver-virt-max-usr {
+			polling-delay-passive = <100>;
+			polling-delay = <100>;
+			thermal-governor = "user_space";
 			trips {
-				gpu1_trip: gpu1-trip {
-					temperature = <95000>;
+				silver-trip {
+					temperature = <120000>;
 					hysteresis = <0>;
 					type = "passive";
 				};
 			};
-			cooling-maps {
-				gpu1_cdev {
-					trip = <&gpu1_trip>;
-					cooling-device =
-						<&msm_gpu 1 THERMAL_NO_LIMIT>;
+		};
+
+		gold-virt-max-usr {
+			polling-delay-passive = <100>;
+			polling-delay = <100>;
+			thermal-governor = "user_space";
+			trips {
+				gold-trip {
+					temperature = <120000>;
+					hysteresis = <0>;
+					type = "passive";
 				};
 			};
 		};
@@ -3912,6 +3972,56 @@
 		#thermal-sensor-cells = <1>;
 	};
 
+	mem_dump {
+		compatible = "qcom,mem-dump";
+		memory-region = <&dump_mem>;
+
+		rpmh_dump {
+			qcom,dump-size = <0x2000000>;
+			qcom,dump-id = <0xec>;
+		};
+
+		rpm_sw_dump {
+			qcom,dump-size = <0x28000>;
+			qcom,dump-id = <0xea>;
+		};
+
+		pmic_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xe4>;
+		};
+
+		tmc_etf_dump {
+			qcom,dump-size = <0x10000>;
+			qcom,dump-id = <0xf0>;
+		};
+
+		tmc_etf_swao_dump {
+			qcom,dump-size = <0x8400>;
+			qcom,dump-id = <0xf1>;
+		};
+
+		tmc_etr_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x100>;
+		};
+
+		tmc_etf_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x101>;
+		};
+
+		tmc_etf_swao_reg_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0x102>;
+		};
+
+		misc_data_dump {
+			qcom,dump-size = <0x1000>;
+			qcom,dump-id = <0xe8>;
+		};
+	};
+
 	gpi_dma0: qcom,gpi-dma@0x800000 {
 		#dma-cells = <6>;
 		compatible = "qcom,gpi-dma";
@@ -4075,10 +4185,12 @@
 };
 
 &vcodec0_gdsc {
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
 &vcodec1_gdsc {
+	qcom,support-hw-trigger;
 	status = "ok";
 };
 
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 19a6db8..f10047f 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -9,6 +9,8 @@
 CONFIG_SCHED_WALT=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -47,7 +49,7 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM830=y
+CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
@@ -222,6 +224,7 @@
 CONFIG_CFG80211=y
 CONFIG_CFG80211_INTERNAL_REGDB=y
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -296,7 +299,7 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_SDM670=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -321,6 +324,7 @@
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
@@ -547,6 +551,7 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
 CONFIG_CRYPTO_XCBC=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 04a0d3e..4cd202c 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -12,6 +12,8 @@
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_RCU_EXPERT=y
 CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
@@ -52,7 +54,7 @@
 # CONFIG_IOSCHED_DEADLINE is not set
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SDM845=y
-CONFIG_ARCH_SDM830=y
+CONFIG_ARCH_SDM670=y
 CONFIG_PCI=y
 CONFIG_PCI_MSM=y
 CONFIG_SCHED_MC=y
@@ -232,6 +234,7 @@
 CONFIG_CFG80211_INTERNAL_REGDB=y
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
 CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
 CONFIG_IPC_ROUTER=y
 CONFIG_IPC_ROUTER_SECURITY=y
 CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
@@ -305,7 +308,7 @@
 CONFIG_SPMI=y
 CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_SDM830=y
+CONFIG_PINCTRL_SDM670=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -330,6 +333,7 @@
 CONFIG_THERMAL_TSENS=y
 CONFIG_MSM_BCL_PERIPHERAL_CTL=y
 CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
 CONFIG_MFD_I2C_PMIC=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_WCD934X_CODEC=y
@@ -620,6 +624,7 @@
 CONFIG_CORESIGHT_DUMMY=y
 CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
 CONFIG_CRYPTO_XCBC=y
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 4e0497f..0fe7e43 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -42,25 +42,35 @@
 #define __smp_rmb()	dmb(ishld)
 #define __smp_wmb()	dmb(ishst)
 
-#define __smp_store_release(p, v)						\
+#define __smp_store_release(p, v)					\
 do {									\
+	union { typeof(*p) __val; char __c[1]; } __u =			\
+		{ .__val = (__force typeof(*p)) (v) }; 			\
 	compiletime_assert_atomic_type(*p);				\
 	switch (sizeof(*p)) {						\
 	case 1:								\
 		asm volatile ("stlrb %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u8 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 2:								\
 		asm volatile ("stlrh %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u16 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 4:								\
 		asm volatile ("stlr %w1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u32 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	case 8:								\
 		asm volatile ("stlr %1, %0"				\
-				: "=Q" (*p) : "r" (v) : "memory");	\
+				: "=Q" (*p)				\
+				: "r" (*(__u64 *)__u.__c)		\
+				: "memory");				\
 		break;							\
 	}								\
 } while (0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 91b26d2..ae852ad 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -46,7 +46,7 @@
 	"	swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n"	\
 		__nops(3)						\
 	"	" #nop_lse)						\
-	: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)			\
+	: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr)	\
 	: "r" (x)							\
 	: cl);								\
 									\
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 73fee2c..21934d1 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -98,11 +98,12 @@
  */
 #define __range_ok(addr, size)						\
 ({									\
+	unsigned long __addr = (unsigned long __force)(addr);		\
 	unsigned long flag, roksum;					\
 	__chk_user_ptr(addr);						\
 	asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"		\
 		: "=&r" (flag), "=&r" (roksum)				\
-		: "1" (addr), "Ir" (size),				\
+		: "1" (__addr), "Ir" (size),				\
 		  "r" (current_thread_info()->addr_limit)		\
 		: "cc");						\
 	flag;								\
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index bdb35b9..29d2ad8 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -306,7 +306,8 @@
 	_ASM_EXTABLE(0b, 4b)					\
 	_ASM_EXTABLE(1b, 4b)					\
 	: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2)	\
-	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT),		\
+	: "r" ((unsigned long)addr), "i" (-EAGAIN),		\
+	  "i" (-EFAULT),					\
 	  "i" (__SWP_LL_SC_LOOPS)				\
 	: "memory");						\
 	uaccess_disable();					\
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index aaf42ae..14c4e3b 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,6 +2,8 @@
 # Makefile for Kernel-based Virtual Machine module, HYP part
 #
 
+ccflags-y += -fno-stack-protector
+
 KVM=../../../../virt/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 9c4b57a..d8199e1 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -252,8 +252,9 @@
 	 */
 	off = offsetof(struct bpf_array, ptrs);
 	emit_a64_mov_i64(tmp, off, ctx);
-	emit(A64_LDR64(tmp, r2, tmp), ctx);
-	emit(A64_LDR64(prg, tmp, r3), ctx);
+	emit(A64_ADD(1, tmp, r2, tmp), ctx);
+	emit(A64_LSL(1, prg, r3, 3), ctx);
+	emit(A64_LDR64(prg, tmp, prg), ctx);
 	emit(A64_CBZ(1, prg, jmp_offset), ctx);
 
 	/* goto *(prog->bpf_func + prologue_size); */
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 1fd147f..5f10f9b 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _UAPI__ASM_AVR32_SOCKET_H */
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index afbc98f0..ed960d3 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -90,5 +90,7 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 0018fad..9790d13 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -99,4 +99,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 5fe42fc..ad25676 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 07238b3..3db3812 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -28,24 +28,32 @@
 
 #define segment_eq(a, b)	((a).seg == (b).seg)
 
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
-				((addr) > PAGE_OFFSET &&		\
-				 (addr) < LINCORE_BASE))
-
 static inline int __access_ok(unsigned long addr, unsigned long size)
 {
-	return __kernel_ok || !__user_bad(addr, size);
+	/*
+	 * Allow access to the user mapped memory area, but not the system area
+	 * before it. The check extends to the top of the address space when
+	 * kernel access is allowed (there's no real reason to user copy to the
+	 * system area in any case).
+	 */
+	if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+		   size <= get_fs().seg - addr))
+		return true;
+	/*
+	 * Explicitly allow NULL pointers here. Parts of the kernel such
+	 * as readv/writev use access_ok to validate pointers, but want
+	 * to allow NULL pointers for various reasons. NULL pointers are
+	 * safe to allow through because the first page is not mappable on
+	 * Meta.
+	 */
+	if (!addr)
+		return true;
+	/* Allow access to core code memory area... */
+	if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+	    size <= LINCORE_CODE_LIMIT + 1 - addr)
+		return true;
+	/* ... but no other areas. */
+	return false;
 }
 
 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
@@ -186,8 +194,13 @@
 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
 					     long count);
 
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	if (!access_ok(VERIFY_READ, src, 1))
+		return -EFAULT;
+	return __strncpy_from_user(dst, src, count);
+}
 /*
  * Return the size of a string (including the ending 0)
  *
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5a4f2eb..5e844f6 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1368,6 +1368,7 @@
 	select WEAK_ORDERING
 	select WEAK_REORDERING_BEYOND_LLSC
 	select MIPS_PGD_C0_CONTEXT
+	select MIPS_L1_CACHE_SHIFT_6
 	select GPIOLIB
 	help
 		The Loongson 3 processor implements the MIPS64R2 instruction
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2027240a..2f106d0 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -108,4 +108,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 5129f23..69f9618 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 9c935d7..b96a193 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -89,4 +89,6 @@
 
 #define SO_CNX_ADVICE		0x402E
 
+#define SO_COOKIE		0x4032
+
 #endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b9e3f0a..0012f03 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -70,8 +70,9 @@
  * switch_mm is the entry point called from the architecture independent
  * code in kernel/sched/core.c
  */
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-			     struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+				      struct mm_struct *next,
+				      struct task_struct *tsk)
 {
 	/* Mark this context has been used on the new CPU */
 	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
@@ -110,6 +111,18 @@
 	switch_mmu_context(prev, next, tsk);
 }
 
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	switch_mm_irqs_off(prev, next, tsk);
+	local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
 /*
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index 1672e33..e78550f 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -97,4 +97,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif	/* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index a5dd493..6ef8f0b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -724,7 +724,7 @@
  */
 #define MAX_WAIT_FOR_RECOVERY 300
 
-static void eeh_handle_normal_event(struct eeh_pe *pe)
+static bool eeh_handle_normal_event(struct eeh_pe *pe)
 {
 	struct pci_bus *frozen_bus;
 	struct eeh_dev *edev, *tmp;
@@ -736,7 +736,7 @@
 	if (!frozen_bus) {
 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
 			__func__, pe->phb->global_number, pe->addr);
-		return;
+		return false;
 	}
 
 	eeh_pe_update_time_stamp(pe);
@@ -870,7 +870,7 @@
 	pr_info("EEH: Notify device driver to resume\n");
 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
 
-	return;
+	return false;
 
 excess_failures:
 	/*
@@ -915,8 +915,12 @@
 			pci_lock_rescan_remove();
 			pci_hp_remove_devices(frozen_bus);
 			pci_unlock_rescan_remove();
+
+			/* The passed PE should no longer be used */
+			return true;
 		}
 	}
+	return false;
 }
 
 static void eeh_handle_special_event(void)
@@ -982,7 +986,14 @@
 		 */
 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
-			eeh_handle_normal_event(pe);
+			/*
+			 * eeh_handle_normal_event() can make the PE stale if it
+			 * determines that the PE cannot possibly be recovered.
+			 * Don't modify the PE state if that's the case.
+			 */
+			if (eeh_handle_normal_event(pe))
+				continue;
+
 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
 		} else {
 			pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 38a1f96..ca03eb2 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@
 	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 	beq+	1f
 
+#ifdef CONFIG_RELOCATABLE
+	ld	r15,PACATOC(r13)
+	ld	r14,interrupt_base_book3e@got(r15)
+	ld	r15,__end_interrupts@got(r15)
+#else
 	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
 	cmpld	cr0,r10,r14
 	cmpld	cr1,r10,r15
 	blt+	cr0,1f
@@ -799,8 +805,14 @@
 	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 	beq+	1f
 
+#ifdef CONFIG_RELOCATABLE
+	ld	r15,PACATOC(r13)
+	ld	r14,interrupt_base_book3e@got(r15)
+	ld	r15,__end_interrupts@got(r15)
+#else
 	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
 	cmpld	cr0,r10,r14
 	cmpld	cr1,r10,r15
 	blt+	cr0,1f
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 5e7ece0..ea236bf 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -205,6 +205,8 @@
 {
 	int index;
 
+	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
 	/*
 	 * For now just print it to console.
 	 * TODO: log this error event to FSP or nvram.
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 49a680d..c716473 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -839,6 +839,25 @@
 	if (!MSR_TM_SUSPENDED(mfmsr()))
 		return;
 
+	/*
+	 * If we are in a transaction and FP is off then we can't have
+	 * used FP inside that transaction. Hence the checkpointed
+	 * state is the same as the live state. We need to copy the
+	 * live state to the checkpointed state so that when the
+	 * transaction is restored, the checkpointed state is correct
+	 * and the aborted transaction sees the correct state. We use
+	 * ckpt_regs.msr here as that's what tm_reclaim will use to
+	 * determine if it's going to write the checkpointed state or
+	 * not. So either this will write the checkpointed registers,
+	 * or reclaim will. Similarly for VMX.
+	 */
+	if ((thr->ckpt_regs.msr & MSR_FP) == 0)
+		memcpy(&thr->ckfp_state, &thr->fp_state,
+		       sizeof(struct thread_fp_state));
+	if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
+		memcpy(&thr->ckvr_state, &thr->vr_state,
+		       sizeof(struct thread_vr_state));
+
 	giveup_all(container_of(thr, struct task_struct, thread));
 
 	tm_reclaim(thr, thr->ckpt_regs.msr, cause);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 023a462..43021f8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -302,8 +302,6 @@
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
-	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
 	if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
 		handled = cur_cpu_spec->machine_check_early(regs);
 	return handled;
@@ -737,6 +735,8 @@
 
 	__this_cpu_inc(irq_stat.mce_exceptions);
 
+	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
 	/* See if any machine dependent calls. In theory, we would want
 	 * to call the CPU first, and call the ppc_md. one if the CPU
 	 * one returns a positive number. However there is existing code
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 7de7124..fd59680 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -81,7 +81,7 @@
 	gfp_t gfp_mask = GFP_USER;
 	struct page *new_page;
 
-	if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+	if (PageCompound(page))
 		return NULL;
 
 	if (PageHighMem(page))
@@ -100,7 +100,7 @@
 	LIST_HEAD(cma_migrate_pages);
 
 	/* Ignore huge pages for now */
-	if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
+	if (PageCompound(page))
 		return -EBUSY;
 
 	lru_add_drain();
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e84d8fb..378c37a 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -197,7 +197,9 @@
 	    (REGION_ID(ea) != USER_REGION_ID)) {
 
 		spin_unlock(&spu->register_lock);
-		ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+		ret = hash_page(ea,
+				_PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+				0x300, dsisr);
 		spin_lock(&spu->register_lock);
 
 		if (!ret) {
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 423e450..72ae2cd 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -288,7 +288,6 @@
 	if (rc)
 		return rc;
 
-	of_node_put(dn); /* Must decrement the refcount */
 	return 0;
 }
 
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index 41b51c2..04fe908 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -96,4 +96,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* _ASM_SOCKET_H */
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 408b4f4..5982544 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -427,6 +427,20 @@
 }
 
 /*
+ * Initialize final note (needed for /proc/vmcore code)
+ */
+static void *nt_final(void *ptr)
+{
+	Elf64_Nhdr *note;
+
+	note = (Elf64_Nhdr *) ptr;
+	note->n_namesz = 0;
+	note->n_descsz = 0;
+	note->n_type = 0;
+	return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
+}
+
+/*
  * Initialize ELF header (new kernel)
  */
 static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
@@ -513,6 +527,7 @@
 		if (sa->prefix != 0)
 			ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
 	ptr = nt_vmcoreinfo(ptr);
+	ptr = nt_final(ptr);
 	memset(phdr, 0, sizeof(*phdr));
 	phdr->p_type = PT_NOTE;
 	phdr->p_offset = notes_offset;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 49a3073..c438168 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -321,6 +321,7 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lsysc_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -606,6 +607,7 @@
 	lg	%r14,__LC_VDSO_PER_CPU
 	lmg	%r0,%r10,__PT_R0(%r11)
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lio_exit_timer:
 	stpt	__LC_EXIT_TIMER
 	mvc	__VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
 	lmg	%r11,%r15,__PT_R11(%r11)
@@ -1135,15 +1137,23 @@
 	br	%r14
 
 .Lcleanup_sysc_restore:
+	# check if stpt has been executed
 	clg	%r9,BASED(.Lcleanup_sysc_restore_insn)
+	jh	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+	cghi	%r11,__LC_SAVE_AREA_ASYNC
 	je	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	clg	%r9,BASED(.Lcleanup_sysc_restore_insn+8)
+	je	1f
 	lg	%r9,24(%r11)		# get saved pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
-0:	lmg	%r8,%r9,__LC_RETURN_PSW
+1:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 .Lcleanup_sysc_restore_insn:
+	.quad	.Lsysc_exit_timer
 	.quad	.Lsysc_done - 4
 
 .Lcleanup_io_tif:
@@ -1151,15 +1161,20 @@
 	br	%r14
 
 .Lcleanup_io_restore:
+	# check if stpt has been executed
 	clg	%r9,BASED(.Lcleanup_io_restore_insn)
-	je	0f
+	jh	0f
+	mvc	__LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0:	clg	%r9,BASED(.Lcleanup_io_restore_insn+8)
+	je	1f
 	lg	%r9,24(%r11)		# get saved r11 pointer to pt_regs
 	mvc	__LC_RETURN_PSW(16),__PT_PSW(%r9)
 	mvc	0(64,%r11),__PT_R8(%r9)
 	lmg	%r0,%r7,__PT_R0(%r9)
-0:	lmg	%r8,%r9,__LC_RETURN_PSW
+1:	lmg	%r8,%r9,__LC_RETURN_PSW
 	br	%r14
 .Lcleanup_io_restore_insn:
+	.quad	.Lio_exit_timer
 	.quad	.Lio_done - 4
 
 .Lcleanup_idle:
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index ce6f569..cf19072 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 
 /*
  * In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 29d64b1..be0cc1b 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@
  */
 extern unsigned char boot_cpu_id;
 
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 
 extern int serial_console;
 static inline int con_is_present(void)
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 31aede3..de15f0a 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -86,6 +86,8 @@
 
 #define SO_CNX_ADVICE		0x0037
 
+#define SO_COOKIE		0x003b
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT	0x5002
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 6bcff69..cec54dc 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -130,18 +130,17 @@
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return parent + 8UL;
 
+	trace.func = self_addr;
+	trace.depth = current->curr_ret_stack + 1;
+
+	/* Only trace if the calling function expects to */
+	if (!ftrace_graph_entry(&trace))
+		return parent + 8UL;
+
 	if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
 				     frame_pointer, NULL) == -EBUSY)
 		return parent + 8UL;
 
-	trace.func = self_addr;
-
-	/* Only trace if the calling function expects to */
-	if (!ftrace_graph_entry(&trace)) {
-		current->curr_ret_stack--;
-		return parent + 8UL;
-	}
-
 	return return_hooker;
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb82871..3b7092d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -301,7 +301,7 @@
 
 
 	/* Saves us work later. */
-	memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 
 	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
 	i += 1;
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 48bae81..6f6e789 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -14,7 +14,7 @@
 static char *initrd __initdata = NULL;
 static int load_initrd(char *filename, void *buf, int size);
 
-static int __init read_initrd(void)
+int __init read_initrd(void)
 {
 	void *area;
 	long long size;
@@ -46,8 +46,6 @@
 	return 0;
 }
 
-__uml_postsetup(read_initrd);
-
 static int __init uml_initrd_setup(char *line, int *add)
 {
 	initrd = line;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index e8175a8..26b47de 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -336,11 +336,17 @@
 	return start_uml();
 }
 
+int __init __weak read_initrd(void)
+{
+	return 0;
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	stack_protections((unsigned long) &init_thread_info);
 	setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
 	mem_total_pages(physmem_size, iomem_size, highmem);
+	read_initrd();
 
 	paging_init();
 	strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 34d9e15..4669b3a 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -94,7 +94,7 @@
 quiet_cmd_check_data_rel = DATAREL $@
 define cmd_check_data_rel
 	for obj in $(filter %.o,$^); do \
-		readelf -S $$obj | grep -qF .rel.local && { \
+		${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
 			echo "error: $$obj has data relocations!" >&2; \
 			exit 1; \
 		} || true; \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 9bd7ff5..70c9cc3 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -257,6 +257,7 @@
 #endif
 
 int mce_available(struct cpuinfo_x86 *c);
+bool mce_is_memory_error(struct mce *m);
 
 DECLARE_PER_CPU(unsigned, mce_exception_count);
 DECLARE_PER_CPU(unsigned, mce_poll_count);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index faf3687..a300aa1 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -315,10 +315,10 @@
 #define __get_user_asm_u64(x, ptr, retval, errret)			\
 ({									\
 	__typeof__(ptr) __ptr = (ptr);					\
-	asm volatile(ASM_STAC "\n"					\
+	asm volatile("\n"					\
 		     "1:	movl %2,%%eax\n"			\
 		     "2:	movl %3,%%edx\n"			\
-		     "3: " ASM_CLAC "\n"				\
+		     "3:\n"				\
 		     ".section .fixup,\"ax\"\n"				\
 		     "4:	mov %4,%0\n"				\
 		     "	xorl %%eax,%%eax\n"				\
@@ -327,7 +327,7 @@
 		     ".previous\n"					\
 		     _ASM_EXTABLE(1b, 4b)				\
 		     _ASM_EXTABLE(2b, 4b)				\
-		     : "=r" (retval), "=A"(x)				\
+		     : "=r" (retval), "=&A"(x)				\
 		     : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1),	\
 		       "i" (errret), "0" (retval));			\
 })
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 22cda29..8ca5f8a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -598,16 +598,14 @@
 	}
 }
 
-static bool memory_error(struct mce *m)
+bool mce_is_memory_error(struct mce *m)
 {
-	struct cpuinfo_x86 *c = &boot_cpu_data;
-
-	if (c->x86_vendor == X86_VENDOR_AMD) {
+	if (m->cpuvendor == X86_VENDOR_AMD) {
 		/* ErrCodeExt[20:16] */
 		u8 xec = (m->status >> 16) & 0x1f;
 
 		return (xec == 0x0 || xec == 0x8);
-	} else if (c->x86_vendor == X86_VENDOR_INTEL) {
+	} else if (m->cpuvendor == X86_VENDOR_INTEL) {
 		/*
 		 * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
 		 *
@@ -628,6 +626,7 @@
 
 	return false;
 }
+EXPORT_SYMBOL_GPL(mce_is_memory_error);
 
 DEFINE_PER_CPU(unsigned, mce_poll_count);
 
@@ -691,7 +690,7 @@
 
 		severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
 
-		if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
+		if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
 			if (m.status & MCI_STATUS_ADDRV)
 				m.severity = severity;
 
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 2f2b8c7..6f0ab305 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -101,6 +101,7 @@
  * Boot time FPU feature detection code:
  */
 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
 
 static void __init fpu__init_system_mxcsr(void)
 {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 43c1528..81bba3c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1735,6 +1735,7 @@
 {
 	struct kvm_arch *ka = &kvm->arch;
 	struct pvclock_vcpu_time_info hv_clock;
+	u64 ret;
 
 	spin_lock(&ka->pvclock_gtod_sync_lock);
 	if (!ka->use_master_clock) {
@@ -1746,10 +1747,17 @@
 	hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
 	spin_unlock(&ka->pvclock_gtod_sync_lock);
 
+	/* both __this_cpu_read() and rdtsc() should be on the same cpu */
+	get_cpu();
+
 	kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
 			   &hv_clock.tsc_shift,
 			   &hv_clock.tsc_to_system_mul);
-	return __pvclock_read_cycles(&hv_clock, rdtsc());
+	ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+
+	put_cpu();
+
+	return ret;
 }
 
 u64 get_kvmclock_ns(struct kvm *kvm)
@@ -3231,11 +3239,14 @@
 	}
 }
 
+#define XSAVE_MXCSR_OFFSET 24
+
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
 					struct kvm_xsave *guest_xsave)
 {
 	u64 xstate_bv =
 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+	u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
 
 	if (boot_cpu_has(X86_FEATURE_XSAVE)) {
 		/*
@@ -3243,11 +3254,13 @@
 		 * CPUID leaf 0xD, index 0, EDX:EAX.  This is for compatibility
 		 * with old userspace.
 		 */
-		if (xstate_bv & ~kvm_supported_xcr0())
+		if (xstate_bv & ~kvm_supported_xcr0() ||
+			mxcsr & ~mxcsr_feature_mask)
 			return -EINVAL;
 		load_xsave(vcpu, (u8 *)guest_xsave->region);
 	} else {
-		if (xstate_bv & ~XFEATURE_MASK_FPSSE)
+		if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
+			mxcsr & ~mxcsr_feature_mask)
 			return -EINVAL;
 		memcpy(&vcpu->arch.guest_fpu.state.fxsave,
 			guest_xsave->region, sizeof(struct fxregs_state));
@@ -4750,16 +4763,20 @@
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
 {
-	/* TODO: String I/O for in kernel device */
-	int r;
+	int r = 0, i;
 
-	if (vcpu->arch.pio.in)
-		r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
-				    vcpu->arch.pio.size, pd);
-	else
-		r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
-				     vcpu->arch.pio.port, vcpu->arch.pio.size,
-				     pd);
+	for (i = 0; i < vcpu->arch.pio.count; i++) {
+		if (vcpu->arch.pio.in)
+			r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
+					    vcpu->arch.pio.size, pd);
+		else
+			r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
+					     vcpu->arch.pio.port, vcpu->arch.pio.size,
+					     pd);
+		if (r)
+			break;
+		pd += vcpu->arch.pio.size;
+	}
 	return r;
 }
 
@@ -4797,6 +4814,8 @@
 	if (vcpu->arch.pio.count)
 		goto data_avail;
 
+	memset(vcpu->arch.pio_data, 0, size * count);
+
 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
 	if (ret) {
 data_avail:
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 81435d9..fc7ca28 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -101,4 +101,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif	/* _XTENSA_SOCKET_H */
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index f7d0018..93110d7 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -221,6 +221,44 @@
 	return 0;
 }
 
+static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
+				     const u8 *key, unsigned int keylen)
+{
+	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+	u8 *buffer, *alignbuffer;
+	unsigned long absize;
+	int ret;
+
+	absize = keylen + alignmask;
+	buffer = kmalloc(absize, GFP_ATOMIC);
+	if (!buffer)
+		return -ENOMEM;
+
+	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
+	memcpy(alignbuffer, key, keylen);
+	ret = cipher->setkey(tfm, alignbuffer, keylen);
+	kzfree(buffer);
+	return ret;
+}
+
+static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+			   unsigned int keylen)
+{
+	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
+	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+
+	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
+		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if ((unsigned long)key & alignmask)
+		return skcipher_setkey_unaligned(tfm, key, keylen);
+
+	return cipher->setkey(tfm, key, keylen);
+}
+
 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
@@ -241,7 +279,7 @@
 	    tfm->__crt_alg->cra_type == &crypto_givcipher_type)
 		return crypto_init_skcipher_ops_ablkcipher(tfm);
 
-	skcipher->setkey = alg->setkey;
+	skcipher->setkey = skcipher_setkey;
 	skcipher->encrypt = alg->encrypt;
 	skcipher->decrypt = alg->decrypt;
 	skcipher->ivsize = alg->ivsize;
diff --git a/drivers/Makefile b/drivers/Makefile
index 990f63c..d0abb5a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -73,6 +73,7 @@
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
+obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SCSI)		+= scsi/
 obj-y				+= nvme/
 obj-$(CONFIG_ATA)		+= ata/
@@ -131,7 +132,6 @@
 obj-$(CONFIG_INFINIBAND)	+= infiniband/
 obj-$(CONFIG_SGI_SN)		+= sn/
 obj-y				+= firmware/
-obj-$(CONFIG_CRYPTO)		+= crypto/
 obj-$(CONFIG_SUPERH)		+= sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y				+= clocksource/
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6d5a8c1..e19f530 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -113,7 +113,7 @@
 
 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
 static struct acpi_device *lid_device;
-static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN;
+static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD;
 
 static unsigned long lid_report_interval __read_mostly = 500;
 module_param(lid_report_interval, ulong, 0644);
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index e5ce81c..e25787a 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -26,7 +26,7 @@
 	struct nfit_spa *nfit_spa;
 
 	/* We only care about memory errors */
-	if (!(mce->status & MCACOD))
+	if (!mce_is_memory_error(mce))
 		return NOTIFY_DONE;
 
 	/*
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4256d9b..b0beb52 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -206,6 +206,7 @@
 	buf->data = dbuf;
 	buf->allocated_size = size;
 	init_completion(&buf->completion);
+	INIT_LIST_HEAD(&buf->list);
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	INIT_LIST_HEAD(&buf->pending_list);
 #endif
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 3f2ce31..9102df7 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1381,24 +1381,14 @@
 			goto bail;
 	}
 
-	PERF(fl->profile, fl->perf.invargs,
-	if (!fl->sctx->smmu.coherent) {
+	if (!fl->sctx->smmu.coherent)
 		inv_args_pre(ctx);
-		if (mode == FASTRPC_MODE_SERIAL)
-			inv_args(ctx);
-	}
-	PERF_END);
-
 	PERF(fl->profile, fl->perf.link,
 	VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
 	PERF_END);
 
 	if (err)
 		goto bail;
-	PERF(fl->profile, fl->perf.invargs,
-	if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
-		inv_args(ctx);
-	PERF_END);
  wait:
 	if (kernel)
 		wait_for_completion(&ctx->work);
@@ -1408,6 +1398,12 @@
 		if (err)
 			goto bail;
 	}
+
+	PERF(fl->profile, fl->perf.invargs,
+	if (!fl->sctx->smmu.coherent)
+		inv_args(ctx);
+	PERF_END);
+
 	VERIFY(err, 0 == (err = ctx->retval));
 	if (err)
 		goto bail;
@@ -1790,11 +1786,9 @@
 		link->port_state = FASTRPC_LINK_DISCONNECTED;
 		break;
 	case GLINK_REMOTE_DISCONNECTED:
-		if (me->channel[cid].chan &&
-			link->link_state == FASTRPC_LINK_STATE_UP) {
+		if (me->channel[cid].chan) {
 			fastrpc_glink_close(me->channel[cid].chan, cid);
 			me->channel[cid].chan = 0;
-			link->port_state = FASTRPC_LINK_DISCONNECTED;
 		}
 		break;
 	default:
@@ -1960,10 +1954,9 @@
 	if (err)
 		goto bail;
 
-	if (link->port_state == FASTRPC_LINK_CONNECTED ||
-		link->port_state == FASTRPC_LINK_CONNECTING) {
+	VERIFY(err, (link->port_state == FASTRPC_LINK_DISCONNECTED));
+	if (err)
 		goto bail;
-	}
 
 	link->port_state = FASTRPC_LINK_CONNECTING;
 	cfg->priv = (void *)(uintptr_t)cid;
@@ -2113,7 +2106,9 @@
 	fl->ssrcount = me->channel[cid].ssrcount;
 	if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
 	    (me->channel[cid].chan == 0)) {
-		fastrpc_glink_register(cid, me);
+		VERIFY(err, 0 == fastrpc_glink_register(cid, me));
+		if (err)
+			goto bail;
 		VERIFY(err, 0 == fastrpc_glink_open(cid));
 		if (err)
 			goto bail;
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index bad4629..73f2fe8 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index c4094c4..34ef474 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -859,7 +859,11 @@
 	} else if (!strcmp(str, "auto")) {
 		parport_nr[0] = LP_PARPORT_AUTO;
 	} else if (!strcmp(str, "none")) {
-		parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		if (parport_ptr < LP_NO)
+			parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+		else
+			printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+			       str);
 	} else if (!strcmp(str, "reset")) {
 		reset = 1;
 	}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 7e4a9d1..6e0cbe0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -340,6 +340,11 @@
 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 {
 	size_t size = vma->vm_end - vma->vm_start;
+	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+	/* It's illegal to wrap around the end of the physical address space. */
+	if (offset + (phys_addr_t)size < offset)
+		return -EINVAL;
 
 	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 		return -EINVAL;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index fc061f7..a7de8ae 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@
 
 	rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
 	if (rc <= 0) {
-		DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
 		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 		if (rc == -ERESTARTSYS)
 			return rc;
@@ -387,7 +387,7 @@
 	for (i = 0; i < bytes_to_write; i++) {
 		rc = wait_for_bulk_out_ready(dev);
 		if (rc <= 0) {
-			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+			DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
 			       rc);
 			DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 			if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@
 	rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
 
 	if (rc <= 0) {
-		DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+		DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
 		DEBUGP(2, dev, "<- cm4040_write (failed)\n");
 		if (rc == -ERESTARTSYS)
 			return rc;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a7c870a..fa0f668 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -111,8 +111,7 @@
 
 	memcpy_fromio(buf, priv->rsp, 6);
 	expected = be32_to_cpup((__be32 *) &buf[2]);
-
-	if (expected > count)
+	if (expected > count || expected < 6)
 		return -EIO;
 
 	memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index e3a9155..c642877 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -49,9 +49,10 @@
  */
 #define TPM_I2C_MAX_BUF_SIZE           32
 #define TPM_I2C_RETRY_COUNT            32
-#define TPM_I2C_BUS_DELAY              1       /* msec */
-#define TPM_I2C_RETRY_DELAY_SHORT      2       /* msec */
-#define TPM_I2C_RETRY_DELAY_LONG       10      /* msec */
+#define TPM_I2C_BUS_DELAY              1000      	/* usec */
+#define TPM_I2C_RETRY_DELAY_SHORT      (2 * 1000)	/* usec */
+#define TPM_I2C_RETRY_DELAY_LONG       (10 * 1000) 	/* usec */
+#define TPM_I2C_DELAY_RANGE            300		/* usec */
 
 #define OF_IS_TPM2 ((void *)1)
 #define I2C_IS_TPM2 1
@@ -123,7 +124,9 @@
 	/* this causes the current command to be aborted */
 	for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
 		status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
-		msleep(TPM_I2C_BUS_DELAY);
+		if (status < 0)
+			usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+				     + TPM_I2C_DELAY_RANGE);
 	}
 	return status;
 }
@@ -160,7 +163,8 @@
 			burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
 			break;
 		}
-		msleep(TPM_I2C_BUS_DELAY);
+		usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
+			     + TPM_I2C_DELAY_RANGE);
 	} while (time_before(jiffies, stop));
 
 	return burst_count;
@@ -203,13 +207,17 @@
 			return 0;
 
 		/* use polling to wait for the event */
-		ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
+		ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
 		stop = jiffies + timeout;
 		do {
 			if (time_before(jiffies, ten_msec))
-				msleep(TPM_I2C_RETRY_DELAY_SHORT);
+				usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
+					     TPM_I2C_RETRY_DELAY_SHORT
+					     + TPM_I2C_DELAY_RANGE);
 			else
-				msleep(TPM_I2C_RETRY_DELAY_LONG);
+				usleep_range(TPM_I2C_RETRY_DELAY_LONG,
+					     TPM_I2C_RETRY_DELAY_LONG
+					     + TPM_I2C_DELAY_RANGE);
 			status_valid = i2c_nuvoton_check_status(chip, mask,
 								value);
 			if (status_valid)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index a1ce060..4d24ec3 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -160,8 +160,10 @@
 	u32 value;
 
 	/* wait for burstcount */
-	/* which timeout value, spec has 2 answers (c & d) */
-	stop = jiffies + chip->timeout_d;
+	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+		stop = jiffies + chip->timeout_a;
+	else
+		stop = jiffies + chip->timeout_d;
 	do {
 		rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
 		if (rc < 0)
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
index dbaad9c..3b97b14 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi.c
@@ -48,8 +48,8 @@
 	struct tpm_tis_data priv;
 	struct spi_device *spi_device;
 
-	u8 tx_buf[MAX_SPI_FRAMESIZE + 4];
-	u8 rx_buf[MAX_SPI_FRAMESIZE + 4];
+	u8 tx_buf[4];
+	u8 rx_buf[4];
 };
 
 static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@@ -57,120 +57,96 @@
 	return container_of(data, struct tpm_tis_spi_phy, priv);
 }
 
-static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
-				  u16 len, u8 *result)
+static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
+				u8 *buffer, u8 direction)
 {
 	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
-	int ret, i;
+	int ret = 0;
+	int i;
 	struct spi_message m;
-	struct spi_transfer spi_xfer = {
-		.tx_buf = phy->tx_buf,
-		.rx_buf = phy->rx_buf,
-		.len = 4,
-	};
-
-	if (len > MAX_SPI_FRAMESIZE)
-		return -ENOMEM;
-
-	phy->tx_buf[0] = 0x80 | (len - 1);
-	phy->tx_buf[1] = 0xd4;
-	phy->tx_buf[2] = (addr >> 8)  & 0xFF;
-	phy->tx_buf[3] = addr	      & 0xFF;
-
-	spi_xfer.cs_change = 1;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
+	struct spi_transfer spi_xfer;
+	u8 transfer_len;
 
 	spi_bus_lock(phy->spi_device->master);
-	ret = spi_sync_locked(phy->spi_device, &m);
-	if (ret < 0)
-		goto exit;
 
-	memset(phy->tx_buf, 0, len);
+	while (len) {
+		transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
 
-	/* According to TCG PTP specification, if there is no TPM present at
-	 * all, then the design has a weak pull-up on MISO. If a TPM is not
-	 * present, a pull-up on MISO means that the SB controller sees a 1,
-	 * and will latch in 0xFF on the read.
-	 */
-	for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
-		spi_xfer.len = 1;
+		phy->tx_buf[0] = direction | (transfer_len - 1);
+		phy->tx_buf[1] = 0xd4;
+		phy->tx_buf[2] = addr >> 8;
+		phy->tx_buf[3] = addr;
+
+		memset(&spi_xfer, 0, sizeof(spi_xfer));
+		spi_xfer.tx_buf = phy->tx_buf;
+		spi_xfer.rx_buf = phy->rx_buf;
+		spi_xfer.len = 4;
+		spi_xfer.cs_change = 1;
+
 		spi_message_init(&m);
 		spi_message_add_tail(&spi_xfer, &m);
 		ret = spi_sync_locked(phy->spi_device, &m);
 		if (ret < 0)
 			goto exit;
+
+		if ((phy->rx_buf[3] & 0x01) == 0) {
+			// handle SPI wait states
+			phy->tx_buf[0] = 0;
+
+			for (i = 0; i < TPM_RETRY; i++) {
+				spi_xfer.len = 1;
+				spi_message_init(&m);
+				spi_message_add_tail(&spi_xfer, &m);
+				ret = spi_sync_locked(phy->spi_device, &m);
+				if (ret < 0)
+					goto exit;
+				if (phy->rx_buf[0] & 0x01)
+					break;
+			}
+
+			if (i == TPM_RETRY) {
+				ret = -ETIMEDOUT;
+				goto exit;
+			}
+		}
+
+		spi_xfer.cs_change = 0;
+		spi_xfer.len = transfer_len;
+		spi_xfer.delay_usecs = 5;
+
+		if (direction) {
+			spi_xfer.tx_buf = NULL;
+			spi_xfer.rx_buf = buffer;
+		} else {
+			spi_xfer.tx_buf = buffer;
+			spi_xfer.rx_buf = NULL;
+		}
+
+		spi_message_init(&m);
+		spi_message_add_tail(&spi_xfer, &m);
+		ret = spi_sync_locked(phy->spi_device, &m);
+		if (ret < 0)
+			goto exit;
+
+		len -= transfer_len;
+		buffer += transfer_len;
 	}
 
-	spi_xfer.cs_change = 0;
-	spi_xfer.len = len;
-	spi_xfer.rx_buf = result;
-
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-	ret = spi_sync_locked(phy->spi_device, &m);
-
 exit:
 	spi_bus_unlock(phy->spi_device->master);
 	return ret;
 }
 
+static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
+				  u16 len, u8 *result)
+{
+	return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
+}
+
 static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
 				   u16 len, u8 *value)
 {
-	struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
-	int ret, i;
-	struct spi_message m;
-	struct spi_transfer spi_xfer = {
-		.tx_buf = phy->tx_buf,
-		.rx_buf = phy->rx_buf,
-		.len = 4,
-	};
-
-	if (len > MAX_SPI_FRAMESIZE)
-		return -ENOMEM;
-
-	phy->tx_buf[0] = len - 1;
-	phy->tx_buf[1] = 0xd4;
-	phy->tx_buf[2] = (addr >> 8)  & 0xFF;
-	phy->tx_buf[3] = addr         & 0xFF;
-
-	spi_xfer.cs_change = 1;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-
-	spi_bus_lock(phy->spi_device->master);
-	ret = spi_sync_locked(phy->spi_device, &m);
-	if (ret < 0)
-		goto exit;
-
-	memset(phy->tx_buf, 0, len);
-
-	/* According to TCG PTP specification, if there is no TPM present at
-	 * all, then the design has a weak pull-up on MISO. If a TPM is not
-	 * present, a pull-up on MISO means that the SB controller sees a 1,
-	 * and will latch in 0xFF on the read.
-	 */
-	for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
-		spi_xfer.len = 1;
-		spi_message_init(&m);
-		spi_message_add_tail(&spi_xfer, &m);
-		ret = spi_sync_locked(phy->spi_device, &m);
-		if (ret < 0)
-			goto exit;
-	}
-
-	spi_xfer.len = len;
-	spi_xfer.tx_buf = value;
-	spi_xfer.cs_change = 0;
-	spi_xfer.tx_buf = value;
-	spi_message_init(&m);
-	spi_message_add_tail(&spi_xfer, &m);
-	ret = spi_sync_locked(phy->spi_device, &m);
-
-exit:
-	spi_bus_unlock(phy->spi_device->master);
-	return ret;
+	return tpm_tis_spi_transfer(data, addr, len, value, 0);
 }
 
 static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1f0c111..89201e2 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -557,7 +557,7 @@
 		pr_debug("Set Voltage level Min %d, Max %d\n", uv[new_base + i],
 				uv[max_lvl + i]);
 		rc = regulator_set_voltage(r[i], uv[new_base + i],
-				uv[max_lvl + i]);
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
 		if (rc)
 			goto set_voltage_fail;
 
@@ -578,11 +578,13 @@
 	return rc;
 
 enable_disable_fail:
-	regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+	regulator_set_voltage(r[i], uv[cur_base + i],
+			vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
 
 set_voltage_fail:
 	for (i--; i >= 0; i--) {
-		regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+		regulator_set_voltage(r[i], uv[cur_base + i],
+		       vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
 		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
 			regulator_disable(r[i]);
 		else if (level == 0)
@@ -693,6 +695,9 @@
 {
 	struct clk_handoff_vdd *v;
 
+	if (vdd->skip_handoff)
+		return 0;
+
 	list_for_each_entry(v, &clk_handoff_vdd_list, list) {
 		if (v->vdd_class == vdd)
 			return 0;
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index adbabea..03d3ab9 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -87,7 +87,7 @@
 };
 
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
@@ -278,6 +278,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
 	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -316,7 +317,6 @@
 	{ }
 };
 
-
 static struct clk_rcg2 cam_cc_cci_clk_src = {
 	.cmd_rcgr = 0xb0d8,
 	.mnd_width = 8,
@@ -341,7 +341,7 @@
 	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
 	F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
-	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	{ }
 };
 
@@ -430,6 +430,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
@@ -490,13 +491,22 @@
 	},
 };
 
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
 static struct clk_rcg2 cam_cc_icp_clk_src = {
 	.cmd_rcgr = 0xb088,
 	.mnd_width = 0,
 	.hid_width = 5,
 	.enable_safe_config = true,
 	.parent_map = cam_cc_parent_map_0,
-	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.freq_tbl = ftbl_cam_cc_icp_clk_src,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "cam_cc_icp_clk_src",
 		.parent_names = cam_cc_parent_names_0,
@@ -513,6 +523,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
 	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -544,6 +555,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
 	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
 	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
@@ -655,6 +667,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
 	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
@@ -733,6 +746,7 @@
 };
 
 static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
 	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
 	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
 	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index d15d1bb..fd3617b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -59,7 +59,6 @@
 
 #define FABIA_USER_CTL_LO	0xc
 #define FABIA_USER_CTL_HI	0x10
-#define FABIA_CAL_L_VAL		0x8
 #define FABIA_FRAC_VAL		0x38
 #define FABIA_OPMODE		0x2c
 #define FABIA_PLL_STANDBY	0x0
@@ -463,12 +462,9 @@
 {
 	u32 val, mask;
 
-	if (config->l) {
+	if (config->l)
 		regmap_write(regmap, pll->offset + PLL_L_VAL,
 						config->l);
-		regmap_write(regmap, pll->offset + FABIA_CAL_L_VAL,
-						config->l);
-	}
 
 	if (config->frac)
 		regmap_write(regmap, pll->offset + FABIA_FRAC_VAL,
@@ -627,12 +623,6 @@
 	}
 
 	regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
-	/*
-	 * pll_cal_l_val is set to pll_l_val on MOST targets. Set it
-	 * explicitly here for PLL out-of-reset calibration to work
-	 * without a glitch on ALL of them.
-	 */
-	regmap_write(pll->clkr.regmap, off + FABIA_CAL_L_VAL, l);
 	regmap_write(pll->clkr.regmap, off + FABIA_FRAC_VAL, a);
 
 	/* Latch the PLL input */
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 4efecef..2902f87 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -33,6 +33,8 @@
 #include <linux/regmap.h>
 #include <linux/uaccess.h>
 #include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
 #include <soc/qcom/scm.h>
 #include <dt-bindings/clock/qcom,cpucc-sdm845.h>
 
@@ -42,6 +44,7 @@
 #include "clk-voter.h"
 #include "clk-debug.h"
 
+#define OSM_INIT_RATE			300000000UL
 #define OSM_TABLE_SIZE			40
 #define SINGLE_CORE			1
 #define MAX_CLUSTER_CNT			3
@@ -236,7 +239,6 @@
 	unsigned long pbases[NUM_BASES];
 	spinlock_t lock;
 
-	u32 cpu_reg_mask;
 	u32 num_entries;
 	u32 cluster_num;
 	u32 core_num;
@@ -738,7 +740,6 @@
 
 static struct clk_osm l3_clk = {
 	.cluster_num = 0,
-	.cpu_reg_mask = 0x0,
 	.hw.init = &osm_clks_init[0],
 };
 
@@ -747,7 +748,6 @@
 
 static struct clk_osm pwrcl_clk = {
 	.cluster_num = 1,
-	.cpu_reg_mask = 0x300,
 	.hw.init = &osm_clks_init[1],
 };
 
@@ -804,7 +804,6 @@
 
 static struct clk_osm perfcl_clk = {
 	.cluster_num = 2,
-	.cpu_reg_mask = 0x700,
 	.hw.init = &osm_clks_init[2],
 };
 
@@ -888,67 +887,232 @@
 	const u32 *cell;
 	u64 hwid;
 	static struct clk_osm *cpu_clk_map[NR_CPUS];
+	struct clk_osm *clk_cpu_map[] = {
+		&cpu0_pwrcl_clk,
+		&cpu1_pwrcl_clk,
+		&cpu2_pwrcl_clk,
+		&cpu3_pwrcl_clk,
+		&cpu4_perfcl_clk,
+		&cpu5_perfcl_clk,
+		&cpu6_perfcl_clk,
+		&cpu7_perfcl_clk,
+	};
 
-	if (cpu_clk_map[cpu])
-		return cpu_clk_map[cpu];
+	if (!cpu_clk_map[cpu]) {
+		cpu_node = of_get_cpu_node(cpu, NULL);
+		if (!cpu_node)
+			return NULL;
 
-	cpu_node = of_get_cpu_node(cpu, NULL);
-	if (!cpu_node)
-		goto fail;
-
-	cell = of_get_property(cpu_node, "reg", NULL);
-	if (!cell) {
-		pr_err("%s: missing reg property\n", cpu_node->full_name);
-		goto fail;
-	}
-
-	hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
-	if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
-		switch (cpu) {
-		case 0:
-			cpu_clk_map[cpu] = &cpu0_pwrcl_clk;
-			break;
-		case 1:
-			cpu_clk_map[cpu] = &cpu1_pwrcl_clk;
-			break;
-		case 2:
-			cpu_clk_map[cpu] = &cpu2_pwrcl_clk;
-			break;
-		case 3:
-			cpu_clk_map[cpu] = &cpu3_pwrcl_clk;
-			break;
-		default:
-			pr_err("unsupported CPU number for power cluster\n");
+		cell = of_get_property(cpu_node, "reg", NULL);
+		if (!cell) {
+			pr_err("%s: missing reg property\n",
+			       cpu_node->full_name);
+			of_node_put(cpu_node);
 			return NULL;
 		}
-		return cpu_clk_map[cpu];
-	}
 
-	if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
-		switch (cpu) {
-		case 4:
-			cpu_clk_map[cpu] = &cpu4_perfcl_clk;
-			break;
-		case 5:
-			cpu_clk_map[cpu] = &cpu5_perfcl_clk;
-			break;
-		case 6:
-			cpu_clk_map[cpu] = &cpu6_perfcl_clk;
-			break;
-		case 7:
-			cpu_clk_map[cpu] = &cpu7_perfcl_clk;
-			break;
-		default:
-			pr_err("unsupported CPU number for perf cluster\n");
+		hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+		hwid = (hwid >> 8) & 0xff;
+		of_node_put(cpu_node);
+		if (hwid >= ARRAY_SIZE(clk_cpu_map)) {
+			pr_err("unsupported CPU number - %d (hw_id - %llu)\n",
+			       cpu, hwid);
 			return NULL;
 		}
-		return cpu_clk_map[cpu];
+
+		cpu_clk_map[cpu] = clk_cpu_map[hwid];
 	}
 
-fail:
-	return NULL;
+	return cpu_clk_map[cpu];
 }
 
+static struct clk_osm *osm_configure_policy(struct cpufreq_policy *policy)
+{
+	int cpu;
+	struct clk_hw *parent, *c_parent;
+	struct clk_osm *first;
+	struct clk_osm *c, *n;
+
+	c = logical_cpu_to_clk(policy->cpu);
+	if (!c)
+		return NULL;
+
+	c_parent = clk_hw_get_parent(&c->hw);
+	if (!c_parent)
+		return NULL;
+
+	/*
+	 * Don't put any other CPUs into the policy if we're doing
+	 * per_core_dcvs
+	 */
+	if (to_clk_osm(c_parent)->per_core_dcvs)
+		return c;
+
+	first = c;
+	/* Find CPUs that share the same clock domain */
+	for_each_possible_cpu(cpu) {
+		n = logical_cpu_to_clk(cpu);
+		if (!n)
+			continue;
+
+		parent = clk_hw_get_parent(&n->hw);
+		if (!parent)
+			return NULL;
+		if (parent != c_parent)
+			continue;
+
+		cpumask_set_cpu(cpu, policy->cpus);
+		if (n->core_num == 0)
+			first = n;
+	}
+
+	return first;
+}
+
+static void
+osm_set_index(struct clk_osm *c, unsigned int index, unsigned int num)
+{
+	clk_osm_write_reg(c, index, DCVS_PERF_STATE_DESIRED_REG(num), OSM_BASE);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(c, OSM_BASE);
+}
+
+static int
+osm_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
+{
+	struct clk_osm *c = policy->driver_data;
+
+	osm_set_index(c, index, c->core_num);
+	return 0;
+}
+
+static unsigned int osm_cpufreq_get(unsigned int cpu)
+{
+	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+	struct clk_osm *c;
+	u32 index;
+
+	if (!policy)
+		return 0;
+
+	c = policy->driver_data;
+	index = clk_osm_read_reg(c, DCVS_PERF_STATE_DESIRED_REG(c->core_num));
+
+	return policy->freq_table[index].frequency;
+}
+
+static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	struct cpufreq_frequency_table *table;
+	struct clk_osm *c, *parent;
+	struct clk_hw *p_hw;
+	int ret;
+	unsigned int i;
+	unsigned int xo_kHz;
+
+	c = osm_configure_policy(policy);
+	if (!c) {
+		pr_err("no clock for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+
+	p_hw = clk_hw_get_parent(&c->hw);
+	if (!p_hw) {
+		pr_err("no parent clock for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+
+	parent = to_clk_osm(p_hw);
+	c->vbases[OSM_BASE] = parent->vbases[OSM_BASE];
+
+	p_hw = clk_hw_get_parent(p_hw);
+	if (!p_hw) {
+		pr_err("no xo clock for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+	xo_kHz = clk_hw_get_rate(p_hw) / 1000;
+
+	table = kcalloc(OSM_TABLE_SIZE + 1, sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		u32 data, src, div, lval, core_count;
+
+		data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
+		src = (data & GENMASK(31, 30)) >> 30;
+		div = (data & GENMASK(29, 28)) >> 28;
+		lval = data & GENMASK(7, 0);
+		core_count = CORE_COUNT_VAL(data);
+
+		if (!src)
+			table[i].frequency = OSM_INIT_RATE / 1000;
+		else
+			table[i].frequency = xo_kHz * lval;
+		table[i].driver_data = table[i].frequency;
+
+		if (core_count != MAX_CORE_COUNT)
+			table[i].frequency = CPUFREQ_ENTRY_INVALID;
+
+		/* Two of the same frequencies means end of table */
+		if (i > 0 && table[i - 1].driver_data == table[i].driver_data) {
+			struct cpufreq_frequency_table *prev = &table[i - 1];
+
+			if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
+				prev->flags = CPUFREQ_BOOST_FREQ;
+				prev->frequency = prev->driver_data;
+			}
+
+			break;
+		}
+	}
+	table[i].frequency = CPUFREQ_TABLE_END;
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+		goto err;
+	}
+
+	policy->driver_data = c;
+
+	clk_osm_enable(&parent->hw);
+	udelay(300);
+
+	return 0;
+
+err:
+	kfree(table);
+	return ret;
+}
+
+static int osm_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+	kfree(policy->freq_table);
+	policy->freq_table = NULL;
+	return 0;
+}
+
+static struct freq_attr *osm_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	&cpufreq_freq_attr_scaling_boost_freqs,
+	NULL
+};
+
+static struct cpufreq_driver qcom_osm_cpufreq_driver = {
+	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+			  CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= osm_cpufreq_target_index,
+	.get		= osm_cpufreq_get,
+	.init		= osm_cpufreq_cpu_init,
+	.exit		= osm_cpufreq_cpu_exit,
+	.name		= "osm-cpufreq",
+	.attr		= osm_cpufreq_attr,
+	.boost_enabled	= true,
+};
+
 static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
 {
 	u64 temp;
@@ -1737,6 +1901,8 @@
 	struct device *cpu_dev;
 	struct clk_osm *c, *parent;
 	struct clk_hw *hw_parent;
+	struct device_node *l3_node_0, *l3_node_4;
+	struct platform_device *l3_dev_0, *l3_dev_4;
 
 	for_each_possible_cpu(cpu) {
 		c = logical_cpu_to_clk(cpu);
@@ -1754,7 +1920,35 @@
 					dev_name(cpu_dev));
 	}
 
-	/*TODO: Figure out which device to tag the L3 table to */
+	l3_node_0 = of_parse_phandle(pdev->dev.of_node, "l3-dev0", 0);
+	if (!l3_node_0) {
+		pr_err("can't find the L3 cluster 0 dt node\n");
+		return;
+	}
+
+	l3_dev_0 = of_find_device_by_node(l3_node_0);
+	if (!l3_dev_0) {
+		pr_err("can't find the L3 cluster 0 dt device\n");
+		return;
+	}
+
+	if (add_opp(&l3_clk, &l3_dev_0->dev))
+		pr_err("Failed to add OPP levels for L3 cluster 0\n");
+
+	l3_node_4 = of_parse_phandle(pdev->dev.of_node, "l3-dev4", 0);
+	if (!l3_node_4) {
+		pr_err("can't find the L3 cluster 1 dt node\n");
+		return;
+	}
+
+	l3_dev_4 = of_find_device_by_node(l3_node_4);
+	if (!l3_dev_4) {
+		pr_err("can't find the L3 cluster 1 dt device\n");
+		return;
+	}
+
+	if (add_opp(&l3_clk, &l3_dev_4->dev))
+		pr_err("Failed to add OPP levels for L3 cluster 1\n");
 }
 
 static u64 clk_osm_get_cpu_cycle_counter(int cpu)
@@ -2886,16 +3080,13 @@
 	return 0;
 }
 
-static unsigned long init_rate = 300000000;
-
 static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
 {
-	int rc = 0, cpu, i;
+	int rc = 0, i;
 	int pvs_ver = 0;
 	u32 pte_efuse, val;
 	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
 	struct clk *ext_xo_clk, *clk;
-	struct clk_osm *c, *parent;
 	struct device *dev = &pdev->dev;
 	struct clk_onecell_data *clk_data;
 	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
@@ -3203,7 +3394,7 @@
 	get_online_cpus();
 
 	/* Set the L3 clock to run off GPLL0 and enable OSM for the domain */
-	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+	rc = clk_set_rate(l3_clk.hw.clk, OSM_INIT_RATE);
 	if (rc) {
 		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
 			rc);
@@ -3213,43 +3404,12 @@
 		     "Failed to enable clock for L3\n");
 	udelay(300);
 
-	/* Set CPU clocks to run off GPLL0 and enable OSM for both domains */
-	for_each_online_cpu(cpu) {
-		c = logical_cpu_to_clk(cpu);
-		if (!c) {
-			pr_err("no clock device for CPU=%d\n", cpu);
-			return -EINVAL;
-		}
-
-		parent = to_clk_osm(clk_hw_get_parent(&c->hw));
-		if (!parent->per_core_dcvs) {
-			if (cpu >= 0 && cpu <= 3)
-				c = logical_cpu_to_clk(0);
-			else if (cpu >= 4 && cpu <= 7)
-				c = logical_cpu_to_clk(4);
-			if (!c)
-				return -EINVAL;
-		}
-
-		rc = clk_set_rate(c->hw.clk, init_rate);
-		if (rc) {
-			dev_err(&pdev->dev, "Unable to set init rate on %s, rc=%d\n",
-					clk_hw_get_name(&parent->hw), rc);
-			goto provider_err;
-		}
-		WARN(clk_prepare_enable(c->hw.clk),
-					"Failed to enable OSM for %s\n",
-					clk_hw_get_name(&parent->hw));
-		udelay(300);
+	/* Configure default rate to lowest frequency */
+	for (i = 0; i < MAX_CORE_COUNT; i++) {
+		osm_set_index(&pwrcl_clk, 0, i);
+		osm_set_index(&perfcl_clk, 0, i);
 	}
 
-	/*
-	 * Add always-on votes for the CPU cluster clocks since we do not want
-	 * to re-enable OSM at any point.
-	 */
-	clk_prepare_enable(pwrcl_clk.hw.clk);
-	clk_prepare_enable(perfcl_clk.hw.clk);
-
 	populate_opp_table(pdev);
 	populate_debugfs_dir(&l3_clk);
 	populate_debugfs_dir(&pwrcl_clk);
@@ -3257,18 +3417,24 @@
 
 	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	register_cpu_cycle_counter_cb(&cb);
-	pr_info("OSM driver inited\n");
 	put_online_cpus();
 
+	rc = cpufreq_register_driver(&qcom_osm_cpufreq_driver);
+	if (rc)
+		goto provider_err;
+
+	pr_info("OSM CPUFreq driver inited\n");
 	return 0;
+
 provider_err:
 	if (clk_data)
 		devm_kfree(&pdev->dev, clk_data->clks);
 clk_err:
 	devm_kfree(&pdev->dev, clk_data);
 exit:
-	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
-	panic("Unable to Setup OSM");
+	dev_err(&pdev->dev, "OSM CPUFreq driver failed to initialize, rc=%d\n",
+		rc);
+	panic("Unable to Setup OSM CPUFreq");
 }
 
 static const struct of_device_id match_table[] = {
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index 44c5b81..9ffa555 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -104,14 +104,6 @@
 	"disp_cc_mdss_rot_clk",
 	"disp_cc_mdss_rscc_ahb_clk",
 	"disp_cc_mdss_rscc_vsync_clk",
-	"disp_cc_mdss_spdm_debug_clk",
-	"disp_cc_mdss_spdm_dp_crypto_clk",
-	"disp_cc_mdss_spdm_dp_pixel1_clk",
-	"disp_cc_mdss_spdm_dp_pixel_clk",
-	"disp_cc_mdss_spdm_mdp_clk",
-	"disp_cc_mdss_spdm_pclk0_clk",
-	"disp_cc_mdss_spdm_pclk1_clk",
-	"disp_cc_mdss_spdm_rot_clk",
 	"disp_cc_mdss_vsync_clk",
 	"measure_only_snoc_clk",
 	"measure_only_cnoc_clk",
@@ -254,13 +246,13 @@
 	"gpu_cc_cxo_aon_clk",
 	"gpu_cc_cxo_clk",
 	"gpu_cc_gx_cxo_clk",
+	"gpu_cc_gx_gfx3d_clk",
 	"gpu_cc_gx_gmu_clk",
 	"gpu_cc_gx_qdss_tsctr_clk",
 	"gpu_cc_gx_vsense_clk",
 	"gpu_cc_rbcpr_ahb_clk",
 	"gpu_cc_rbcpr_clk",
 	"gpu_cc_sleep_clk",
-	"gpu_cc_spdm_gx_gfx3d_div_clk",
 	"video_cc_apb_clk",
 	"video_cc_at_clk",
 	"video_cc_qdss_trig_clk",
@@ -433,22 +425,6 @@
 			0x17, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "disp_cc_mdss_rscc_vsync_clk", 0x47, 4, DISP_CC,
 			0x18, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_debug_clk", 0x47, 4, DISP_CC,
-			0x20, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_dp_crypto_clk", 0x47, 4, DISP_CC,
-			0x1D, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_dp_pixel1_clk", 0x47, 4, DISP_CC,
-			0x1F, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_dp_pixel_clk", 0x47, 4, DISP_CC,
-			0x1E, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_mdp_clk", 0x47, 4, DISP_CC,
-			0x1B, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_pclk0_clk", 0x47, 4, DISP_CC,
-			0x19, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_pclk1_clk", 0x47, 4, DISP_CC,
-			0x1A, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
-		{ "disp_cc_mdss_spdm_rot_clk", 0x47, 4, DISP_CC,
-			0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
 			0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "measure_only_snoc_clk", 0x7, 4, GCC,
@@ -733,6 +709,8 @@
 			0xA, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_gx_cxo_clk", 0x144, 4, GPU_CC,
 			0xF, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
+		{ "gpu_cc_gx_gfx3d_clk", 0x144, 4, GPU_CC,
+			0xC, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_gx_gmu_clk", 0x144, 4, GPU_CC,
 			0x10, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_gx_qdss_tsctr_clk", 0x144, 4, GPU_CC,
@@ -745,8 +723,6 @@
 			0x1C, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "gpu_cc_sleep_clk", 0x144, 4, GPU_CC,
 			0x17, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
-		{ "gpu_cc_spdm_gx_gfx3d_div_clk", 0x144, 4, GPU_CC,
-			0x1E, 0xFF, 0, 0x3, 0, 1, 0x1568, 0x10FC, 0x1100 },
 		{ "video_cc_apb_clk", 0x48, 4, VIDEO_CC,
 			0x8, 0x3F, 0, 0x7, 0, 1, 0xA4C, 0xA50, 0xA58 },
 		{ "video_cc_at_clk", 0x48, 4, VIDEO_CC,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 2742ab3..4e0711d 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -197,7 +197,7 @@
 };
 
 static struct pll_vco fabia_vco[] = {
-	{ 250000000, 2000000000, 0 },
+	{ 249600000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
 };
 
@@ -790,8 +790,8 @@
 	F(400000, P_BI_TCXO, 12, 1, 4),
 	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
-	F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
-	F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+	F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+	F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
 	F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
 	F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
 	{ }
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index ae9d509..f2fa577 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -105,20 +105,6 @@
 	"core_bi_pll_test_se",
 };
 
-static const struct parent_map gpu_cc_parent_map_2[] = {
-	{ P_BI_TCXO, 0 },
-	{ P_GPLL0_OUT_MAIN, 5 },
-	{ P_GPLL0_OUT_MAIN_DIV, 6 },
-	{ P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gpu_cc_parent_names_2[] = {
-	"bi_tcxo",
-	"gcc_gpu_gpll0_clk_src",
-	"gcc_gpu_gpll0_div_clk_src",
-	"core_bi_pll_test_se",
-};
-
 static struct pll_vco fabia_vco[] = {
 	{ 250000000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
@@ -232,29 +218,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
-	F(19200000, P_BI_TCXO, 1, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
-	.cmd_rcgr = 0x10b0,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gpu_cc_parent_map_2,
-	.freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gpu_cc_rbcpr_clk_src",
-		.parent_names = gpu_cc_parent_names_2,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP2(
-			MIN, 19200000,
-			NOMINAL, 50000000),
-	},
-};
-
 static struct clk_branch gpu_cc_acd_ahb_clk = {
 	.halt_reg = 0x1168,
 	.halt_check = BRANCH_HALT,
@@ -488,37 +451,6 @@
 	},
 };
 
-static struct clk_branch gpu_cc_rbcpr_ahb_clk = {
-	.halt_reg = 0x10f4,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f4,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gpu_cc_rbcpr_clk = {
-	.halt_reg = 0x10f0,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0x10f0,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gpu_cc_rbcpr_clk",
-			.parent_names = (const char *[]){
-				"gpu_cc_rbcpr_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
 	[GPU_CC_ACD_AHB_CLK] = &gpu_cc_acd_ahb_clk.clkr,
 	[GPU_CC_ACD_CXO_CLK] = &gpu_cc_acd_cxo_clk.clkr,
@@ -536,9 +468,6 @@
 	[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
 	[GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
 	[GPU_CC_PLL_TEST_CLK] = &gpu_cc_pll_test_clk.clkr,
-	[GPU_CC_RBCPR_AHB_CLK] = &gpu_cc_rbcpr_ahb_clk.clkr,
-	[GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
-	[GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
 };
 
 static struct clk_regmap *gpu_cc_gfx_sdm845_clocks[] = {
@@ -554,7 +483,6 @@
 	[GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 },
 	[GPUCC_GPU_CC_GMU_BCR] = { 0x111c },
 	[GPUCC_GPU_CC_GX_BCR] = { 0x1008 },
-	[GPUCC_GPU_CC_RBCPR_BCR] = { 0x10ac },
 	[GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 },
 	[GPUCC_GPU_CC_XO_BCR] = { 0x1000 },
 };
@@ -636,6 +564,9 @@
 		return PTR_ERR(vdd_gfx.regulator[0]);
 	}
 
+	/* Avoid turning on the rail during clock registration */
+	vdd_gfx.skip_handoff = true;
+
 	clk_fabia_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
 
 	ret = qcom_cc_really_probe(pdev, &gpu_cc_gfx_sdm845_desc, regmap);
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index d183393..87feee6 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,3 +1,6 @@
 obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
 obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
 obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
new file mode 100644
index 0000000..eb2092a
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
@@ -0,0 +1,766 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+#define DP_PHY_REVISION_ID0			0x0000
+#define DP_PHY_REVISION_ID1			0x0004
+#define DP_PHY_REVISION_ID2			0x0008
+#define DP_PHY_REVISION_ID3			0x000C
+
+#define DP_PHY_CFG				0x0010
+#define DP_PHY_PD_CTL				0x0018
+#define DP_PHY_MODE				0x001C
+
+#define DP_PHY_AUX_CFG0				0x0020
+#define DP_PHY_AUX_CFG1				0x0024
+#define DP_PHY_AUX_CFG2				0x0028
+#define DP_PHY_AUX_CFG3				0x002C
+#define DP_PHY_AUX_CFG4				0x0030
+#define DP_PHY_AUX_CFG5				0x0034
+#define DP_PHY_AUX_CFG6				0x0038
+#define DP_PHY_AUX_CFG7				0x003C
+#define DP_PHY_AUX_CFG8				0x0040
+#define DP_PHY_AUX_CFG9				0x0044
+#define DP_PHY_AUX_INTERRUPT_MASK		0x0048
+#define DP_PHY_AUX_INTERRUPT_CLEAR		0x004C
+#define DP_PHY_AUX_BIST_CFG			0x0050
+
+#define DP_PHY_VCO_DIV				0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL			0x006C
+#define DP_PHY_TX2_TX3_LANE_CTL			0x0088
+
+#define DP_PHY_SPARE0				0x00AC
+#define DP_PHY_STATUS				0x00C0
+
+/* Tx registers */
+#define TXn_BIST_MODE_LANENO			0x0000
+#define TXn_CLKBUF_ENABLE			0x0008
+#define TXn_TX_EMP_POST1_LVL			0x000C
+
+#define TXn_TX_DRV_LVL				0x001C
+
+#define TXn_RESET_TSYNC_EN			0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
+#define TXn_TX_BAND				0x002C
+#define TXn_SLEW_CNTL				0x0030
+#define TXn_INTERFACE_SELECT			0x0034
+
+#define TXn_RES_CODE_LANE_TX			0x003C
+#define TXn_RES_CODE_LANE_RX			0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
+
+#define TXn_DEBUG_BUS_SEL			0x0058
+#define TXn_TRANSCEIVER_BIAS_EN			0x005C
+#define TXn_HIGHZ_DRVR_EN			0x0060
+#define TXn_TX_POL_INV				0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
+
+#define TXn_LANE_MODE_1				0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN			0x00C0
+#define TXn_TX_INTERFACE_MODE			0x00C4
+
+#define TXn_VMODE_CTRL1				0x00F0
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1			0x0000
+#define QSERDES_COM_ATB_SEL2			0x0004
+#define QSERDES_COM_FREQ_UPDATE			0x0008
+#define QSERDES_COM_BG_TIMER			0x000C
+#define QSERDES_COM_SSC_EN_CENTER		0x0010
+#define QSERDES_COM_SSC_ADJ_PER1		0x0014
+#define QSERDES_COM_SSC_ADJ_PER2		0x0018
+#define QSERDES_COM_SSC_PER1			0x001C
+#define QSERDES_COM_SSC_PER2			0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
+#define QSERDES_COM_POST_DIV			0x002C
+#define QSERDES_COM_POST_DIV_MUX		0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
+#define QSERDES_COM_CLK_ENABLE1			0x0038
+#define QSERDES_COM_SYS_CLK_CTRL		0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
+#define QSERDES_COM_PLL_EN			0x0044
+#define QSERDES_COM_PLL_IVCO			0x0048
+#define QSERDES_COM_CMN_IETRIM			0x004C
+#define QSERDES_COM_CMN_IPTRIM			0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0		0x0060
+#define QSERDES_COM_CP_CTRL_MODE1		0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
+#define QSERDES_COM_PLL_CNTRL			0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
+#define QSERDES_COM_RESETSM_CNTRL		0x0088
+#define QSERDES_COM_RESETSM_CNTRL2		0x008C
+#define QSERDES_COM_LOCK_CMP_EN			0x0090
+#define QSERDES_COM_LOCK_CMP_CFG		0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0		0x00B0
+#define QSERDES_COM_DEC_START_MODE1		0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
+#define QSERDES_COM_INTEGLOOP_EN		0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
+
+#define QSERDES_COM_CMN_STATUS			0x0124
+#define QSERDES_COM_RESET_SM_STATUS		0x0128
+
+#define QSERDES_COM_CLK_SEL			0x0138
+#define QSERDES_COM_HSCLK_SEL			0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
+
+#define QSERDES_COM_SW_RESET			0x0150
+#define QSERDES_COM_CORE_CLK_EN			0x0154
+#define QSERDES_COM_C_READY_STATUS		0x0158
+#define QSERDES_COM_CMN_CONFIG			0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
+
+#define DP_PHY_PLL_POLL_SLEEP_US		500
+#define DP_PHY_PLL_POLL_TIMEOUT_US		10000
+
+#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
+#define DP_VCO_RATE_9720MHZDIV1000		9720000UL
+#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
+
+int dp_mux_set_parent_10nm(void *context, unsigned int reg, unsigned int val)
+{
+	struct mdss_pll_resources *dp_res = context;
+	int rc;
+	u32 auxclk_div;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= ~0x03;	/* bits 0 to 1 */
+
+	if (val == 0) /* mux parent index = 0 */
+		auxclk_div |= 1;
+	else if (val == 1) /* mux parent index = 1 */
+		auxclk_div |= 2;
+	else if (val == 2) /* mux parent index = 2 */
+		auxclk_div |= 0;
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_VCO_DIV, auxclk_div);
+	/* Make sure the PHY registers writes are done */
+	wmb();
+	pr_debug("%s: mux=%d auxclk_div=%x\n", __func__, val, auxclk_div);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return 0;
+}
+
+int dp_mux_get_parent_10nm(void *context, unsigned int reg, unsigned int *val)
+{
+	int rc;
+	u32 auxclk_div = 0;
+	struct mdss_pll_resources *dp_res = context;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= 0x03;
+
+	if (auxclk_div == 1) /* Default divider */
+		*val = 0;
+	else if (auxclk_div == 2)
+		*val = 1;
+	else if (auxclk_div == 0)
+		*val = 2;
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	pr_debug("%s: auxclk_div=%d, val=%d\n", __func__, auxclk_div, *val);
+
+	return 0;
+}
+
+static int dp_vco_pll_init_db_10nm(struct dp_pll_db *pdb,
+		unsigned long rate)
+{
+	struct mdss_pll_resources *dp_res = pdb->pll;
+	u32 spare_value = 0;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	pdb->lane_cnt = spare_value & 0x0F;
+	pdb->orientation = (spare_value & 0xF0) >> 4;
+
+	pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+			__func__, spare_value, pdb->lane_cnt, pdb->orientation);
+
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_9720MHZDIV1000);
+		pdb->hsclk_sel = 0x0c;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x6f;
+		pdb->lock_cmp2_mode0 = 0x08;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x04;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x0f;
+		pdb->lock_cmp2_mode0 = 0x0e;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x00;
+		pdb->dec_start_mode0 = 0x8c;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x00;
+		pdb->div_frac_start3_mode0 = 0x0a;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x1f;
+		pdb->lock_cmp2_mode0 = 0x1c;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x2;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_8100MHZDIV1000);
+		pdb->hsclk_sel = 0x03;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x2f;
+		pdb->lock_cmp2_mode0 = 0x2a;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x0;
+		pdb->lock_cmp_en = 0x08;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int dp_config_vco_rate_10nm(struct dp_pll_vco_clk *vco,
+		unsigned long rate)
+{
+	u32 res = 0;
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+	res = dp_vco_pll_init_db_10nm(pdb, rate);
+	if (res) {
+		pr_err("VCO Init DB failed\n");
+		return res;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC2)
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
+		else
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
+	}
+
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0e);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+	/* Different for each clock rates */
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x07);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	if (pdb->orientation == ORIENTATION_CC2)
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
+	else
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	/* TX Lane configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+	/* TX-0 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
+
+	/* TX-1 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	/* dependent on the vco frequency */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+
+	return res;
+}
+
+static bool dp_10nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL lock status */
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: C_READY status is not high. Status=%x\n",
+				__func__, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static bool dp_10nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool phy_ready = true;
+
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & (BIT(1))) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: Phy_ready is not high. Status=%x\n",
+				__func__, status);
+		phy_ready = false;
+	}
+
+	return phy_ready;
+}
+
+static int dp_pll_enable_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+	u32 bias_en, drvr_en;
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x04);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+	wmb(); /* Make sure the PHY register writes are done */
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+	wmb();	/* Make sure the PLL register writes are done */
+
+	if (!dp_10nm_pll_lock_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	/* poll for PHY ready status */
+	if (!dp_10nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	pr_debug("%s: PLL is locked\n", __func__);
+
+	if (pdb->lane_cnt == 1) {
+		bias_en = 0x3e;
+		drvr_en = 0x13;
+	} else {
+		bias_en = 0x3f;
+		drvr_en = 0x10;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC1) {
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		} else {
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		}
+	} else {
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+	udelay(2000);
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+
+	/*
+	 * Make sure all the register writes are completed before
+	 * doing any other operation
+	 */
+	wmb();
+
+	/* poll for PHY ready status */
+	if (!dp_10nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x38);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x38);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x20);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x20);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+lock_err:
+	return rc;
+}
+
+static int dp_pll_disable_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	/* Assert DP PHY power down */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+	/*
+	 * Make sure all the register writes to disable PLL are
+	 * completed before doing any other operation
+	 */
+	wmb();
+
+	return rc;
+}
+
+
+int dp_vco_prepare_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	pr_debug("rate=%ld\n", vco->rate);
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll resources\n");
+		goto error;
+	}
+
+	if ((dp_res->vco_cached_rate != 0)
+		&& (dp_res->vco_cached_rate == vco->rate)) {
+		rc = vco->hw.init->ops->set_rate(hw,
+			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+				rc, dp_res->index);
+			mdss_pll_resource_enable(dp_res, false);
+			goto error;
+		}
+	}
+
+	rc = dp_pll_enable_10nm(hw);
+	if (rc) {
+		mdss_pll_resource_enable(dp_res, false);
+		pr_err("ndx=%d failed to enable dp pll\n",
+					dp_res->index);
+		goto error;
+	}
+
+	mdss_pll_resource_enable(dp_res, false);
+error:
+	return rc;
+}
+
+void dp_vco_unprepare_10nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	if (!dp_res) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!dp_res->pll_on &&
+		mdss_pll_resource_enable(dp_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+	dp_res->vco_cached_rate = vco->rate;
+	dp_pll_disable_10nm(hw);
+
+	dp_res->handoff_resources = false;
+	mdss_pll_resource_enable(dp_res, false);
+	dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	int rc;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	pr_debug("DP lane CLK rate=%ld\n", rate);
+
+	rc = dp_config_vco_rate_10nm(vco, rate);
+	if (rc)
+		pr_err("%s: Failed to set clk rate\n", __func__);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	vco->rate = rate;
+
+	return 0;
+}
+
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	int rc;
+	u32 div, hsclk_div, link_clk_div = 0;
+	u64 vco_rate;
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+	div &= 0x0f;
+
+	if (div == 12)
+		hsclk_div = 6; /* Default */
+	else if (div == 4)
+		hsclk_div = 4;
+	else if (div == 0)
+		hsclk_div = 2;
+	else if (div == 3)
+		hsclk_div = 1;
+	else {
+		pr_debug("unknown divider. forcing to default\n");
+		hsclk_div = 5;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
+	div >>= 2;
+
+	if ((div & 0x3) == 0)
+		link_clk_div = 5;
+	else if ((div & 0x3) == 1)
+		link_clk_div = 10;
+	else if ((div & 0x3) == 2)
+		link_clk_div = 20;
+	else
+		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+	if (link_clk_div == 20) {
+		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	} else {
+		if (hsclk_div == 6)
+			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+		else if (hsclk_div == 4)
+			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+		else if (hsclk_div == 2)
+			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+		else
+			vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	dp_res->vco_cached_rate = vco->rate = vco_rate;
+	return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+			unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+
+	if (rate <= vco->min_rate)
+		rrate = vco->min_rate;
+	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+	else
+		rrate = vco->max_rate;
+
+	pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+	*parent_rate = rrate;
+	return rrate;
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
new file mode 100644
index 0000000..e30ef82
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
@@ -0,0 +1,310 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ *		+------------------------------+
+ *		|         DP_VCO_CLK           |
+ *		|                              |
+ *		|    +-------------------+     |
+ *		|    |   (DP PLL/VCO)    |     |
+ *		|    +---------+---------+     |
+ *		|              v               |
+ *		|   +----------+-----------+   |
+ *		|   | hsclk_divsel_clk_src |   |
+ *		|   +----------+-----------+   |
+ *		+------------------------------+
+ *				|
+ *	 +------------<---------v------------>----------+
+ *	 |                                              |
+ * +-----v------------+                                 |
+ * | dp_link_clk_src  |                                 |
+ * |    divsel_ten    |                                 |
+ * +---------+--------+                                 |
+ *	|                                               |
+ *	|                                               |
+ *	v                                               v
+ * Input to DISPCC block                                |
+ * for link clk, crypto clk                             |
+ * and interface clock                                  |
+ *							|
+ *							|
+ *	+--------<------------+-----------------+---<---+
+ *	|                     |                 |
+ * +-------v------+  +--------v-----+  +--------v------+
+ * | vco_divided  |  | vco_divided  |  | vco_divided   |
+ * |    _clk_src  |  |    _clk_src  |  |    _clk_src   |
+ * |              |  |              |  |               |
+ * |divsel_six    |  |  divsel_two  |  |  divsel_four  |
+ * +-------+------+  +-----+--------+  +--------+------+
+ *         |	           |		        |
+ *	v------->----------v-------------<------v
+ *                         |
+ *		+----------+---------+
+ *		|   vco_divided_clk  |
+ *		|       _src_mux     |
+ *		+---------+----------+
+ *                        |
+ *                        v
+ *              Input to DISPCC block
+ *              for DP pixel clock
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+static struct dp_pll_db dp_pdb;
+static struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_10nm_cfg = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+	.reg_write = dp_mux_set_parent_10nm,
+	.reg_read = dp_mux_get_parent_10nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_10nm_vco_clk_ops = {
+	.recalc_rate = dp_vco_recalc_rate_10nm,
+	.set_rate = dp_vco_set_rate_10nm,
+	.round_rate = dp_vco_round_rate_10nm,
+	.prepare = dp_vco_prepare_10nm,
+	.unprepare = dp_vco_unprepare_10nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+	.max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_clk",
+		.parent_names = (const char *[]){ "xo_board" },
+		.num_parents = 1,
+		.ops = &dp_10nm_vco_clk_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_link_clk_divsel_ten = {
+	.div = 10,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_link_clk_divsel_ten",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_two_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+	.div = 4,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_four_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
+	.div = 6,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_six_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+				     struct clk_rate_request *req)
+{
+	int ret = 0;
+
+	ret = __clk_mux_determine_rate_closest(hw, req);
+	if (ret)
+		return ret;
+
+	/* Set the new parent of mux if there is a new valid parent */
+	if (hw->clk && req->best_parent_hw->clk)
+		clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+	return 0;
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk *div_clk = NULL, *vco_clk = NULL;
+	struct dp_pll_vco_clk *vco = NULL;
+
+	div_clk = clk_get_parent(hw->clk);
+	if (!div_clk)
+		return 0;
+
+	vco_clk = clk_get_parent(div_clk);
+	if (!vco_clk)
+		return 0;
+
+	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+	if (!vco)
+		return 0;
+
+	if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
+		return (vco->rate / 6);
+	else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		return (vco->rate / 4);
+	else
+		return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
+	.reg = 0x64,
+	.shift = 0,
+	.width = 2,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dp_vco_divided_clk_src_mux",
+			.parent_names =
+				(const char *[]){"dp_vco_divsel_two_clk_src",
+					"dp_vco_divsel_four_clk_src",
+					"dp_vco_divsel_six_clk_src"},
+			.num_parents = 3,
+			.ops = &mux_clk_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_hw *mdss_dp_pllcc_10nm[] = {
+	[DP_VCO_CLK] = &dp_vco_clk.hw,
+	[DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
+	[DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+	[DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+	[DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
+	[DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
+};
+
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP, i = 0;
+	struct clk_onecell_data *clk_data;
+	struct clk *clk;
+	struct regmap *regmap;
+	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_10nm);
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	if (!pll_res || !pll_res->pll_base || !pll_res->phy_base ||
+		!pll_res->ln_tx0_base || !pll_res->ln_tx1_base) {
+		pr_err("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+				sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks) {
+		devm_kfree(&pdev->dev, clk_data);
+		return -ENOMEM;
+	}
+	clk_data->clk_num = num_clks;
+
+	pll_res->priv = &dp_pdb;
+	dp_pdb.pll = pll_res;
+
+	/* Set client data for vco, mux and div clocks */
+	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+			pll_res, &dp_pll_10nm_cfg);
+	dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
+	mux_clk_ops = clk_regmap_mux_closest_ops;
+	mux_clk_ops.determine_rate = clk_mux_determine_rate;
+	mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+	dp_vco_clk.priv = pll_res;
+
+	for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
+		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+		clk = devm_clk_register(&pdev->dev,
+				mdss_dp_pllcc_10nm[i]);
+		if (IS_ERR(clk)) {
+			pr_err("clk registration failed for DP: %d\n",
+					pll_res->index);
+			rc = -EINVAL;
+			goto clk_reg_fail;
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node,
+			of_clk_src_onecell_get, clk_data);
+	if (rc) {
+		pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		pr_debug("%s SUCCESS\n", __func__);
+	}
+	return 0;
+clk_reg_fail:
+	devm_kfree(&pdev->dev, clk_data->clks);
+	devm_kfree(&pdev->dev, clk_data);
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
new file mode 100644
index 0000000..c3b5635
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_10NM_H
+#define __MDSS_DP_PLL_10NM_H
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
+#define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
+
+struct dp_pll_db {
+	struct mdss_pll_resources *pll;
+
+	/* lane and orientation settings */
+	u8 lane_cnt;
+	u8 orientation;
+
+	/* COM PHY settings */
+	u32 hsclk_sel;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 vco_tune_map;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp3_mode0;
+	u32 lock_cmp_en;
+
+	/* PHY vco divider */
+	u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+				unsigned long parent_rate);
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate);
+int dp_vco_prepare_10nm(struct clk_hw *hw);
+void dp_vco_unprepare_10nm(struct clk_hw *hw);
+int dp_mux_set_parent_10nm(void *context,
+				unsigned int reg, unsigned int val);
+int dp_mux_get_parent_10nm(void *context,
+				unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_10NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
deleted file mode 100644
index a3ed8a8..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998-util.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/iopoll.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clock-generic.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-int link2xclk_divsel_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	u32 link2xclk_div_tx0, link2xclk_div_tx1;
-	u32 phy_mode;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	link2xclk_div_tx0 = MDSS_PLL_REG_R(dp_res->phy_base,
-				QSERDES_TX0_OFFSET + TXn_TX_BAND);
-	link2xclk_div_tx1 = MDSS_PLL_REG_R(dp_res->phy_base,
-				QSERDES_TX1_OFFSET + TXn_TX_BAND);
-
-	link2xclk_div_tx0 &= ~0x07;	/* bits 0 to 2 */
-	link2xclk_div_tx1 &= ~0x07;	/* bits 0 to 2 */
-
-	/* Configure TX band Mux */
-	link2xclk_div_tx0 |= 0x4;
-	link2xclk_div_tx1 |= 0x4;
-
-	/*configure DP PHY MODE */
-	phy_mode = 0x58;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_BAND,
-			link2xclk_div_tx0);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_BAND,
-			link2xclk_div_tx1);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_MODE, phy_mode);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
-			__func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return rc;
-}
-
-int link2xclk_divsel_get_div(struct div_clk *clk)
-{
-	int rc;
-	u32 div = 0, phy_mode;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	phy_mode = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
-
-	if (phy_mode & 0x48)
-		pr_err("%s: DP PAR Rate not correct\n", __func__);
-
-	if ((phy_mode & 0x3) == 1)
-		div = 10;
-	else if ((phy_mode & 0x3) == 0)
-		div = 5;
-	else
-		pr_err("%s: unsupported div: %d\n", __func__, phy_mode);
-
-	mdss_pll_resource_enable(dp_res, false);
-	pr_debug("%s: phy_mode=%d, div=%d\n", __func__,
-						phy_mode, div);
-
-	return div;
-}
-
-int vco_divided_clk_set_div(struct div_clk *clk, int div)
-{
-	int rc;
-	u32 auxclk_div;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP PLL resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= ~0x03;	/* bits 0 to 1 */
-
-	auxclk_div |= 1; /* Default divider */
-
-	if (div == 4)
-		auxclk_div |= 2;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_VCO_DIV, auxclk_div);
-	/* Make sure the PHY registers writes are done */
-	wmb();
-	pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	return rc;
-}
-
-
-enum handoff vco_divided_clk_handoff(struct clk *c)
-{
-	/*
-	 * Since cont-splash is not enabled, disable handoff
-	 * for vco_divider_clk.
-	 */
-	return HANDOFF_DISABLED_CLK;
-}
-
-int vco_divided_clk_get_div(struct div_clk *clk)
-{
-	int rc;
-	u32 div, auxclk_div;
-	struct mdss_pll_resources *dp_res = clk->priv;
-
-	rc = mdss_pll_resource_enable(dp_res, true);
-	if (rc) {
-		pr_err("Failed to enable dp_res resources\n");
-		return rc;
-	}
-
-	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
-	auxclk_div &= 0x03;
-
-	div = 2; /* Default divider */
-	if (auxclk_div == 2)
-		div = 4;
-
-	mdss_pll_resource_enable(dp_res, false);
-
-	pr_debug("%s: auxclk_div=%d, div=%d\n", __func__, auxclk_div, div);
-
-	return div;
-}
-
-int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
-{
-	u32 res = 0;
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_PD_CTL, 0x3d);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYSCLK_EN_SEL, 0x37);
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CLK_ENABLE1, 0x0e);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CLK_SEL, 0x30);
-
-	/* Different for each clock rates */
-	if (rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000) {
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_8100MHZDIV1000);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYS_CLK_CTRL, 0x02);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_HSCLK_SEL, 0x2c);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP_EN, 0x04);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DEC_START_MODE0, 0x69);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CMN_CONFIG, 0x42);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP1_MODE0, 0xbf);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP2_MODE0, 0x21);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	} else if (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000) {
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_8100MHZDIV1000);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYS_CLK_CTRL, 0x06);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_HSCLK_SEL, 0x84);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP_EN, 0x08);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DEC_START_MODE0, 0x69);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CMN_CONFIG, 0x02);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	} else if (rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000) {
-		pr_debug("%s: VCO rate: %ld\n", __func__,
-				DP_VCO_RATE_10800MHZDIV1000);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_SYS_CLK_CTRL, 0x06);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_HSCLK_SEL, 0x80);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP_EN, 0x08);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DEC_START_MODE0, 0x8c);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_DIV_FRAC_START3_MODE0, 0xa0);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CMN_CONFIG, 0x12);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
-		MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
-	} else {
-		pr_err("%s: unsupported rate: %ld\n", __func__, rate);
-		return -EINVAL;
-	}
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	if ((rate == DP_VCO_HSCLK_RATE_1620MHZDIV1000)
-	    || (rate == DP_VCO_HSCLK_RATE_2700MHZDIV1000)) {
-		MDSS_PLL_REG_W(dp_res->phy_base,
-				DP_PHY_VCO_DIV, 0x1);
-	} else {
-		MDSS_PLL_REG_W(dp_res->phy_base,
-				DP_PHY_VCO_DIV, 0x2);
-	}
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_VCO_TUNE_MAP, 0x00);
-
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_BG_TIMER, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_BG_TIMER, 0x0a);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_VCO_TUNE_CTRL, 0x00);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CP_CTRL_MODE0, 0x06);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_PLL_IVCO, 0x07);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_CORE_CLK_EN, 0x0f);
-
-	/* Make sure the PLL register writes are done */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_MODE, 0x58);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x1a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x1a);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
-			0x40);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
-			0x40);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
-			0x30);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
-			0x30);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT,
-			0x3d);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT,
-			0x3d);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN,
-			0x03);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
-			0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
-			0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE,
-			0x00);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
-			0x00);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_BAND,
-			0x4);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_BAND,
-			0x4);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	return res;
-}
-
-static bool dp_pll_lock_status(struct mdss_pll_resources *dp_res)
-{
-	u32 status;
-	bool pll_locked;
-
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: C_READY status is not high. Status=%x\n",
-				__func__, status);
-		pll_locked = false;
-	} else if (readl_poll_timeout_atomic((dp_res->pll_base +
-			DP_PHY_STATUS),
-			status,
-			((status & BIT(1)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		pll_locked = false;
-	} else {
-		pll_locked = true;
-	}
-
-	return pll_locked;
-}
-
-
-static int dp_pll_enable(struct clk *c)
-{
-	int rc = 0;
-	u32 status;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x05);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x01);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x09);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	MDSS_PLL_REG_W(dp_res->pll_base,
-			QSERDES_COM_RESETSM_CNTRL, 0x20);
-	/* Make sure the PLL register writes are done */
-	wmb();
-	/* poll for PLL ready status */
-	if (readl_poll_timeout_atomic((dp_res->pll_base +
-			QSERDES_COM_C_READY_STATUS),
-			status,
-			((status & BIT(0)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: C_READY status is not high. Status=%x\n",
-				__func__, status);
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x19);
-	/* Make sure the PHY register writes are done */
-	wmb();
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & BIT(1)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	pr_debug("%s: PLL is locked\n", __func__);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x3f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
-			0x10);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
-			0x3f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
-			0x10);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
-			0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_POL_INV,
-			0x0a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x18);
-	udelay(2000);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x19);
-
-	/*
-	 * Make sure all the register writes are completed before
-	 * doing any other operation
-	 */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_LANE_MODE_1,
-			0xf6);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_LANE_MODE_1,
-			0xf6);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
-			0x1f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
-			0x1f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
-			0x0f);
-	/*
-	 * Make sure all the register writes are completed before
-	 * doing any other operation
-	 */
-	wmb();
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x09);
-	udelay(2000);
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_CFG, 0x19);
-	udelay(2000);
-	/* poll for PHY ready status */
-	if (readl_poll_timeout_atomic((dp_res->phy_base +
-			DP_PHY_STATUS),
-			status,
-			((status & BIT(1)) > 0),
-			DP_PLL_POLL_SLEEP_US,
-			DP_PLL_POLL_TIMEOUT_US)) {
-		pr_err("%s: Lane_mode: Phy_ready is not high. Status=%x\n",
-				__func__, status);
-		rc = -EINVAL;
-		goto lock_err;
-	}
-
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
-			0x2a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
-			0x2a);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
-			0x20);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
-			0x20);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
-			0x11);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX,
-			0x11);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
-			0x11);
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX,
-			0x11);
-	/* Make sure the PHY register writes are done */
-	wmb();
-
-lock_err:
-	return rc;
-}
-
-static int dp_pll_disable(struct clk *c)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *dp_res = vco->priv;
-
-	/* Assert DP PHY power down */
-	MDSS_PLL_REG_W(dp_res->phy_base,
-			DP_PHY_PD_CTL, 0x2);
-	/*
-	 * Make sure all the register writes to disable PLL are
-	 * completed before doing any other operation
-	 */
-	wmb();
-
-	return rc;
-}
-
-
-int dp_vco_prepare(struct clk *c)
-{
-	int rc = 0;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *dp_pll_res = vco->priv;
-
-	DEV_DBG("rate=%ld\n", vco->rate);
-	rc = mdss_pll_resource_enable(dp_pll_res, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll resources\n");
-		goto error;
-	}
-
-	rc = dp_pll_enable(c);
-	if (rc) {
-		mdss_pll_resource_enable(dp_pll_res, false);
-		pr_err("ndx=%d failed to enable dsi pll\n",
-					dp_pll_res->index);
-		goto error;
-	}
-
-	mdss_pll_resource_enable(dp_pll_res, false);
-error:
-	return rc;
-}
-
-void dp_vco_unprepare(struct clk *c)
-{
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (!io) {
-		DEV_ERR("Invalid input parameter\n");
-		return;
-	}
-
-	if (!io->pll_on &&
-		mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return;
-	}
-	dp_pll_disable(c);
-
-	io->handoff_resources = false;
-	mdss_pll_resource_enable(io, false);
-	io->pll_on = false;
-}
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate)
-{
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-	int rc;
-
-	rc = mdss_pll_resource_enable(io, true);
-	if (rc) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return rc;
-	}
-
-	DEV_DBG("DP lane CLK rate=%ld\n", rate);
-
-	rc = dp_config_vco_rate(vco, rate);
-	if (rc)
-		DEV_ERR("%s: Failed to set clk rate\n", __func__);
-
-	mdss_pll_resource_enable(io, false);
-
-	vco->rate = rate;
-
-	return 0;
-}
-
-unsigned long dp_vco_get_rate(struct clk *c)
-{
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	int rc;
-	u32 div, hsclk_div, link2xclk_div;
-	u64 vco_rate;
-	struct mdss_pll_resources *pll = vco->priv;
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("Failed to enable mdss DP pll=%d\n", pll->index);
-		return rc;
-	}
-
-	div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
-	div &= 0x0f;
-
-	if (div == 12)
-		hsclk_div = 5; /* Default */
-	else if (div == 4)
-		hsclk_div = 3;
-	else if (div == 0)
-		hsclk_div = 2;
-	else {
-		pr_debug("unknown divider. forcing to default\n");
-		hsclk_div = 5;
-	}
-
-	div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
-
-	if (div & 0x58)
-		pr_err("%s: DP PAR Rate not correct\n", __func__);
-
-	if ((div & 0x3) == 1)
-		link2xclk_div = 10;
-	else if ((div & 0x3) == 0)
-		link2xclk_div = 5;
-	else
-		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
-
-	if (link2xclk_div == 10) {
-		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	} else {
-		if (hsclk_div == 5)
-			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
-		else if (hsclk_div == 3)
-			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-		else
-			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
-	}
-
-	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
-
-	mdss_pll_resource_enable(pll, false);
-
-	return (unsigned long)vco_rate;
-}
-
-long dp_vco_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-
-	if (rate <= vco->min_rate)
-		rrate = vco->min_rate;
-	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
-		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
-	else
-		rrate = vco->max_rate;
-
-	pr_debug("%s: rrate=%ld\n", __func__, rrate);
-
-	return rrate;
-}
-
-enum handoff dp_vco_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
-	struct mdss_pll_resources *io = vco->priv;
-
-	if (mdss_pll_resource_enable(io, true)) {
-		DEV_ERR("pll resource can't be enabled\n");
-		return ret;
-	}
-
-	if (dp_pll_lock_status(io)) {
-		io->pll_on = true;
-		c->rate = dp_vco_get_rate(c);
-		io->handoff_resources = true;
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		io->handoff_resources = false;
-		mdss_pll_resource_enable(io, false);
-		DEV_DBG("%s: PLL not locked\n", __func__);
-	}
-
-	DEV_DBG("done, ret=%d\n", ret);
-	return ret;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
deleted file mode 100644
index 6a49d15..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-/*
- ***************************************************************************
- ******** Display Port PLL driver block diagram for branch clocks **********
- ***************************************************************************
-
-			+--------------------------+
-			|       DP_VCO_CLK         |
-			|			   |
-			|  +-------------------+   |
-			|  |   (DP PLL/VCO)    |   |
-			|  +---------+---------+   |
-			|	     v		   |
-			| +----------+-----------+ |
-			| | hsclk_divsel_clk_src | |
-			| +----------+-----------+ |
-			+--------------------------+
-				     |
-				     v
-	   +------------<------------|------------>-------------+
-	   |                         |                          |
-+----------v----------+	  +----------v----------+    +----------v----------+
-|   dp_link_2x_clk    |	  | vco_divided_clk_src	|    | vco_divided_clk_src |
-|     divsel_five     |	  |			|    |			   |
-v----------+----------v	  |	divsel_two	|    |	   divsel_four	   |
-	   |		  +----------+----------+    +----------+----------+
-	   |                         |                          |
-	   v			     v				v
-				     |	+---------------------+	|
-  Input to MMSSCC block		     |	|    (aux_clk_ops)    |	|
-  for link clk, crypto clk	     +-->   vco_divided_clk   <-+
-  and interface clock			|	_src_mux      |
-					+----------+----------+
-						   |
-						   v
-					 Input to MMSSCC block
-					 for DP pixel clock
-
- ******************************************************************************
- */
-
-#define pr_fmt(fmt)	"%s: " fmt, __func__
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
-#include "mdss-dp-pll.h"
-#include "mdss-dp-pll-8998.h"
-
-static const struct clk_ops clk_ops_vco_divided_clk_src_c;
-static const struct clk_ops clk_ops_link_2x_clk_div_c;
-static const struct clk_ops clk_ops_gen_mux_dp;
-
-static struct clk_div_ops link2xclk_divsel_ops = {
-	.set_div = link2xclk_divsel_set_div,
-	.get_div = link2xclk_divsel_get_div,
-};
-
-static struct clk_div_ops vco_divided_clk_ops = {
-	.set_div = vco_divided_clk_set_div,
-	.get_div = vco_divided_clk_get_div,
-};
-
-static const struct clk_ops dp_8998_vco_clk_ops = {
-	.set_rate = dp_vco_set_rate,
-	.round_rate = dp_vco_round_rate,
-	.prepare = dp_vco_prepare,
-	.unprepare = dp_vco_unprepare,
-	.handoff = dp_vco_handoff,
-};
-
-static struct clk_mux_ops mdss_mux_ops = {
-	.set_mux_sel = mdss_set_mux_sel,
-	.get_mux_sel = mdss_get_mux_sel,
-};
-
-static struct dp_pll_vco_clk dp_vco_clk = {
-	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
-	.max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
-	.c = {
-		.dbg_name = "dp_vco_clk",
-		.ops = &dp_8998_vco_clk_ops,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dp_vco_clk.c),
-	},
-};
-
-static struct div_clk dp_link_2x_clk_divsel_five = {
-	.data = {
-		.div = 5,
-		.min_div = 5,
-		.max_div = 5,
-	},
-	.ops = &link2xclk_divsel_ops,
-	.c = {
-		.parent = &dp_vco_clk.c,
-		.dbg_name = "dp_link_2x_clk_divsel_five",
-		.ops = &clk_ops_link_2x_clk_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dp_link_2x_clk_divsel_five.c),
-	},
-};
-
-static struct div_clk vco_divsel_four_clk_src = {
-	.data = {
-		.div = 4,
-		.min_div = 4,
-		.max_div = 4,
-	},
-	.ops = &vco_divided_clk_ops,
-	.c = {
-		.parent = &dp_vco_clk.c,
-		.dbg_name = "vco_divsel_four_clk_src",
-		.ops = &clk_ops_vco_divided_clk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(vco_divsel_four_clk_src.c),
-	},
-};
-
-static struct div_clk vco_divsel_two_clk_src = {
-	.data = {
-		.div = 2,
-		.min_div = 2,
-		.max_div = 2,
-	},
-	.ops = &vco_divided_clk_ops,
-	.c = {
-		.parent = &dp_vco_clk.c,
-		.dbg_name = "vco_divsel_two_clk_src",
-		.ops = &clk_ops_vco_divided_clk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(vco_divsel_two_clk_src.c),
-	},
-};
-
-static struct mux_clk vco_divided_clk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&vco_divsel_two_clk_src.c, 0},
-		{&vco_divsel_four_clk_src.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &vco_divsel_two_clk_src.c,
-		.dbg_name = "vco_divided_clk_src_mux",
-		.ops = &clk_ops_gen_mux_dp,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(vco_divided_clk_src_mux.c),
-	}
-};
-
-static struct clk_lookup dp_pllcc_8998[] = {
-	CLK_LIST(dp_vco_clk),
-	CLK_LIST(dp_link_2x_clk_divsel_five),
-	CLK_LIST(vco_divsel_four_clk_src),
-	CLK_LIST(vco_divsel_two_clk_src),
-	CLK_LIST(vco_divided_clk_src_mux),
-};
-
-int dp_pll_clock_register_8998(struct platform_device *pdev,
-				 struct mdss_pll_resources *pll_res)
-{
-	int rc = -ENOTSUPP;
-
-	if (!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
-		DEV_ERR("%s: Invalid input parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	/* Set client data for vco, mux and div clocks */
-	dp_vco_clk.priv = pll_res;
-	vco_divided_clk_src_mux.priv = pll_res;
-	vco_divsel_two_clk_src.priv = pll_res;
-	vco_divsel_four_clk_src.priv = pll_res;
-	dp_link_2x_clk_divsel_five.priv = pll_res;
-
-	clk_ops_link_2x_clk_div_c = clk_ops_div;
-	clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
-
-	/*
-	 * Set the ops for the divider in the pixel clock tree to the
-	 * slave_div to ensure that a set rate on this divider clock will not
-	 * be propagated to it's parent. This is needed ensure that when we set
-	 * the rate for pixel clock, the vco is not reconfigured
-	 */
-	clk_ops_vco_divided_clk_src_c = clk_ops_slave_div;
-	clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
-	clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
-
-	clk_ops_gen_mux_dp = clk_ops_gen_mux;
-	clk_ops_gen_mux_dp.get_rate = parent_get_rate;
-
-	/* We can select different clock ops for future versions */
-	dp_vco_clk.c.ops = &dp_8998_vco_clk_ops;
-
-	rc = of_msm_clock_register(pdev->dev.of_node, dp_pllcc_8998,
-					ARRAY_SIZE(dp_pllcc_8998));
-	if (rc) {
-		DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
-		rc = -EPROBE_DEFER;
-	} else {
-		DEV_DBG("%s SUCCESS\n", __func__);
-	}
-
-	return rc;
-}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h b/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
deleted file mode 100644
index 11d5ddc..0000000
--- a/drivers/clk/qcom/mdss/mdss-dp-pll-8998.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __MDSS_DP_PLL_8998_H
-#define __MDSS_DP_PLL_8998_H
-
-#define DP_PHY_REVISION_ID0			0x0000
-#define DP_PHY_REVISION_ID1			0x0004
-#define DP_PHY_REVISION_ID2			0x0008
-#define DP_PHY_REVISION_ID3			0x000C
-
-#define DP_PHY_CFG				0x0010
-#define DP_PHY_PD_CTL				0x0014
-#define DP_PHY_MODE				0x0018
-
-#define DP_PHY_AUX_CFG0				0x001C
-#define DP_PHY_AUX_CFG1				0x0020
-#define DP_PHY_AUX_CFG2				0x0024
-#define DP_PHY_AUX_CFG3				0x0028
-#define DP_PHY_AUX_CFG4				0x002C
-#define DP_PHY_AUX_CFG5				0x0030
-#define DP_PHY_AUX_CFG6				0x0034
-#define DP_PHY_AUX_CFG7				0x0038
-#define DP_PHY_AUX_CFG8				0x003C
-#define DP_PHY_AUX_CFG9				0x0040
-#define DP_PHY_AUX_INTERRUPT_MASK		0x0044
-#define DP_PHY_AUX_INTERRUPT_CLEAR		0x0048
-#define DP_PHY_AUX_BIST_CFG			0x004C
-
-#define DP_PHY_VCO_DIV				0x0064
-#define DP_PHY_TX0_TX1_LANE_CTL			0x0068
-
-#define DP_PHY_TX2_TX3_LANE_CTL			0x0084
-#define DP_PHY_STATUS				0x00BC
-
-/* Tx registers */
-#define QSERDES_TX0_OFFSET			0x0400
-#define QSERDES_TX1_OFFSET			0x0800
-
-#define TXn_BIST_MODE_LANENO			0x0000
-#define TXn_CLKBUF_ENABLE			0x0008
-#define TXn_TX_EMP_POST1_LVL			0x000C
-
-#define TXn_TX_DRV_LVL				0x001C
-
-#define TXn_RESET_TSYNC_EN			0x0024
-#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
-#define TXn_TX_BAND				0x002C
-#define TXn_SLEW_CNTL				0x0030
-#define TXn_INTERFACE_SELECT			0x0034
-
-#define TXn_RES_CODE_LANE_TX			0x003C
-#define TXn_RES_CODE_LANE_RX			0x0040
-#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
-#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
-
-#define TXn_DEBUG_BUS_SEL			0x0058
-#define TXn_TRANSCEIVER_BIAS_EN			0x005C
-#define TXn_HIGHZ_DRVR_EN			0x0060
-#define TXn_TX_POL_INV				0x0064
-#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
-
-#define TXn_LANE_MODE_1				0x008C
-
-#define TXn_TRAN_DRVR_EMP_EN			0x00C0
-#define TXn_TX_INTERFACE_MODE			0x00C4
-
-#define TXn_VMODE_CTRL1				0x00F0
-
-
-/* PLL register offset */
-#define QSERDES_COM_ATB_SEL1			0x0000
-#define QSERDES_COM_ATB_SEL2			0x0004
-#define QSERDES_COM_FREQ_UPDATE			0x0008
-#define QSERDES_COM_BG_TIMER			0x000C
-#define QSERDES_COM_SSC_EN_CENTER		0x0010
-#define QSERDES_COM_SSC_ADJ_PER1		0x0014
-#define QSERDES_COM_SSC_ADJ_PER2		0x0018
-#define QSERDES_COM_SSC_PER1			0x001C
-#define QSERDES_COM_SSC_PER2			0x0020
-#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
-#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
-#define QSERDES_COM_POST_DIV			0x002C
-#define QSERDES_COM_POST_DIV_MUX		0x0030
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
-#define QSERDES_COM_CLK_ENABLE1			0x0038
-#define QSERDES_COM_SYS_CLK_CTRL		0x003C
-#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
-#define QSERDES_COM_PLL_EN			0x0044
-#define QSERDES_COM_PLL_IVCO			0x0048
-#define QSERDES_COM_CMN_IETRIM			0x004C
-#define QSERDES_COM_CMN_IPTRIM			0x0050
-
-#define QSERDES_COM_CP_CTRL_MODE0		0x0060
-#define QSERDES_COM_CP_CTRL_MODE1		0x0064
-#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
-#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
-#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
-#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
-#define QSERDES_COM_PLL_CNTRL			0x0078
-#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
-#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
-#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
-#define QSERDES_COM_RESETSM_CNTRL		0x0088
-#define QSERDES_COM_RESETSM_CNTRL2		0x008C
-#define QSERDES_COM_LOCK_CMP_EN			0x0090
-#define QSERDES_COM_LOCK_CMP_CFG		0x0094
-#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
-#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
-#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
-
-#define QSERDES_COM_DEC_START_MODE0		0x00B0
-#define QSERDES_COM_DEC_START_MODE1		0x00B4
-#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
-#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
-#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
-#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
-#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
-#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
-#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
-#define QSERDES_COM_INTEGLOOP_EN		0x00D4
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
-#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
-#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
-#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
-#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
-#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
-
-#define QSERDES_COM_CMN_STATUS			0x0124
-#define QSERDES_COM_RESET_SM_STATUS		0x0128
-
-#define QSERDES_COM_CLK_SEL			0x0138
-#define QSERDES_COM_HSCLK_SEL			0x013C
-
-#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
-
-#define QSERDES_COM_SW_RESET			0x0150
-#define QSERDES_COM_CORE_CLK_EN			0x0154
-#define QSERDES_COM_C_READY_STATUS		0x0158
-#define QSERDES_COM_CMN_CONFIG			0x015C
-
-#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
-
-#define DP_PLL_POLL_SLEEP_US			500
-#define DP_PLL_POLL_TIMEOUT_US			10000
-
-#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
-#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
-
-#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
-#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
-#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
-
-int dp_vco_set_rate(struct clk *c, unsigned long rate);
-unsigned long dp_vco_get_rate(struct clk *c);
-long dp_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff dp_vco_handoff(struct clk *c);
-enum handoff vco_divided_clk_handoff(struct clk *c);
-int dp_vco_prepare(struct clk *c);
-void dp_vco_unprepare(struct clk *c);
-int hsclk_divsel_set_div(struct div_clk *clk, int div);
-int hsclk_divsel_get_div(struct div_clk *clk);
-int link2xclk_divsel_set_div(struct div_clk *clk, int div);
-int link2xclk_divsel_get_div(struct div_clk *clk);
-int vco_divided_clk_set_div(struct div_clk *clk, int div);
-int vco_divided_clk_get_div(struct div_clk *clk);
-
-#endif /* __MDSS_DP_PLL_8998_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll.h b/drivers/clk/qcom/mdss/mdss-dp-pll.h
index 2805ff9..2b1d70e 100644
--- a/drivers/clk/qcom/mdss/mdss-dp-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,21 +15,19 @@
 #define __MDSS_DP_PLL_H
 
 struct dp_pll_vco_clk {
+	struct clk_hw hw;
 	unsigned long	rate;		/* current vco rate */
 	u64		min_rate;	/* min vco rate */
 	u64		max_rate;	/* max vco rate */
 	void		*priv;
-
-	struct clk	c;
 };
 
-static inline struct dp_pll_vco_clk *mdss_dp_to_vco_clk(struct clk *clk)
+static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)
 {
-	return container_of(clk, struct dp_pll_vco_clk, c);
+	return container_of(hw, struct dp_pll_vco_clk, hw);
 }
 
-int dp_pll_clock_register_8998(struct platform_device *pdev,
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
 				struct mdss_pll_resources *pll_res);
 
-
 #endif /* __MDSS_DP_PLL_H */
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index 7f82fda..e292ef8 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -21,6 +21,7 @@
 #include <linux/iopoll.h>
 #include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
+#include "mdss-dp-pll.h"
 
 int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
 {
@@ -126,6 +127,8 @@
 
 	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
 		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+	if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DP_PLL_10NM;
 	else
 		goto err;
 
@@ -151,6 +154,9 @@
 	case MDSS_DSI_PLL_10NM:
 		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
 		break;
+	case MDSS_DP_PLL_10NM:
+		rc = dp_pll_clock_register_10nm(pdev, pll_res);
+		break;
 	case MDSS_UNKNOWN_PLL:
 	default:
 		rc = -EINVAL;
@@ -171,6 +177,7 @@
 	const char *label;
 	struct resource *pll_base_reg;
 	struct resource *phy_base_reg;
+	struct resource *tx0_base_reg, *tx1_base_reg;
 	struct resource *dynamic_pll_base_reg;
 	struct resource *gdsc_base_reg;
 	struct mdss_pll_resources *pll_res;
@@ -272,6 +279,30 @@
 		}
 	}
 
+	tx0_base_reg = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ln_tx0_base");
+	if (tx0_base_reg) {
+		pll_res->ln_tx0_base = ioremap(tx0_base_reg->start,
+				resource_size(tx0_base_reg));
+		if (!pll_res->ln_tx0_base) {
+			pr_err("Unable to remap Lane TX0 base resources\n");
+			rc = -ENOMEM;
+			goto tx0_io_error;
+		}
+	}
+
+	tx1_base_reg = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "ln_tx1_base");
+	if (tx1_base_reg) {
+		pll_res->ln_tx1_base = ioremap(tx1_base_reg->start,
+				resource_size(tx1_base_reg));
+		if (!pll_res->ln_tx1_base) {
+			pr_err("Unable to remap Lane TX1 base resources\n");
+			rc = -ENOMEM;
+			goto tx1_io_error;
+		}
+	}
+
 	gdsc_base_reg = platform_get_resource_byname(pdev,
 					IORESOURCE_MEM, "gdsc_base");
 	if (!gdsc_base_reg) {
@@ -309,6 +340,12 @@
 	if (pll_res->gdsc_base)
 		iounmap(pll_res->gdsc_base);
 gdsc_io_error:
+	if (pll_res->ln_tx1_base)
+		iounmap(pll_res->ln_tx1_base);
+tx1_io_error:
+	if (pll_res->ln_tx0_base)
+		iounmap(pll_res->ln_tx0_base);
+tx0_io_error:
 	if (pll_res->dyn_pll_base)
 		iounmap(pll_res->dyn_pll_base);
 dyn_pll_io_error:
@@ -347,6 +384,7 @@
 
 static const struct of_device_id mdss_pll_dt_match[] = {
 	{.compatible = "qcom,mdss_dsi_pll_10nm"},
+	{.compatible = "qcom,mdss_dp_pll_10nm"},
 	{}
 };
 
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index ee91e11..033462d 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -37,6 +37,7 @@
 
 enum {
 	MDSS_DSI_PLL_10NM,
+	MDSS_DP_PLL_10NM,
 	MDSS_UNKNOWN_PLL,
 };
 
@@ -81,6 +82,8 @@
 	 */
 	void __iomem	*pll_base;
 	void __iomem	*phy_base;
+	void __iomem	*ln_tx0_base;
+	void __iomem	*ln_tx1_base;
 	void __iomem	*gdsc_base;
 	void __iomem	*dyn_pll_base;
 
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 8af73ac..d9ebe113 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -4877,15 +4877,12 @@
 	if (handle == NULL)
 		return -ENODEV;
 
-	qce_enable_clk(pce_dev);
-
 	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
 	sps_disconnect(sps_pipe_info);
 
 	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
 	sps_disconnect(sps_pipe_info);
 
-	qce_disable_clk(pce_dev);
 	return 0;
 }
 
@@ -4899,8 +4896,6 @@
 	if (handle == NULL)
 		return -ENODEV;
 
-	qce_enable_clk(pce_dev);
-
 	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
 	sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
 	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
@@ -4923,7 +4918,6 @@
 	if (rc)
 		pr_err("Producer callback registration failed rc = %d\n", rc);
 
-	qce_disable_clk(pce_dev);
 	return rc;
 }
 
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index f184ee1..56fbb94 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -583,44 +583,92 @@
 static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
 				 bool high_bw_req)
 {
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned int control_flag;
 	int ret = 0;
 
-	if (high_bw_req) {
+	if (cp->ce_support.req_bw_before_clk) {
+		if (high_bw_req)
+			control_flag = QCE_BW_REQUEST_FIRST;
+		else
+			control_flag = QCE_CLK_DISABLE_FIRST;
+	} else {
+		if (high_bw_req)
+			control_flag = QCE_CLK_ENABLE_FIRST;
+		else
+			control_flag = QCE_BW_REQUEST_RESET_FIRST;
+	}
+
+	switch (control_flag) {
+	case QCE_CLK_ENABLE_FIRST:
 		ret = qce_enable_clk(pengine->qce);
 		if (ret) {
 			pr_err("%s Unable enable clk\n", __func__);
-			goto clk_err;
+			return;
 		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
 		if (ret) {
-			pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			qce_disable_clk(pengine->qce);
-			goto clk_err;
+			pr_err("%s Unable to set high bw\n", __func__);
+			ret = qce_disable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable disable clk\n", __func__);
+			return;
 		}
-	} else {
+		break;
+	case QCE_BW_REQUEST_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set high bw\n", __func__);
+			return;
+		}
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+			if (ret)
+				pr_err("%s Unable to set low bw\n", __func__);
+			return;
+		}
+		break;
+	case QCE_CLK_DISABLE_FIRST:
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable to disable clk\n", __func__);
+			return;
+		}
 		ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 0);
 		if (ret) {
-			pr_err("%s Unable to set to low bandwidth\n",
-						__func__);
-			goto clk_err;
+			pr_err("%s Unable to set low bw\n", __func__);
+			ret = qce_enable_clk(pengine->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n", __func__);
+			return;
+		}
+		break;
+	case QCE_BW_REQUEST_RESET_FIRST:
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set low bw\n", __func__);
+			return;
 		}
 		ret = qce_disable_clk(pengine->qce);
 		if (ret) {
-			pr_err("%s Unable disable clk\n", __func__);
+			pr_err("%s Unable to disable clk\n", __func__);
 			ret = msm_bus_scale_client_update_request(
 				pengine->bus_scale_handle, 1);
 			if (ret)
-				pr_err("%s Unable to set to high bandwidth\n",
-						__func__);
-			goto clk_err;
+				pr_err("%s Unable to set high bw\n", __func__);
+			return;
 		}
+		break;
+	default:
+		return;
 	}
-clk_err:
-	return;
-
 }
 
 static void qcrypto_bw_reaper_timer_callback(unsigned long data)
@@ -4856,12 +4904,36 @@
 	if (!pengine)
 		return -ENOMEM;
 
-	/* open qce */
+	cp->platform_support.bus_scale_table = (struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+	if (!cp->platform_support.bus_scale_table) {
+		dev_err(&pdev->dev, "bus_scale_table is NULL\n");
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+	} else {
+		pengine->bus_scale_handle = msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				cp->platform_support.bus_scale_table);
+		if (!pengine->bus_scale_handle) {
+			dev_err(&pdev->dev, "failed to get bus scale handle\n");
+			rc = -ENOMEM;
+			goto exit_kzfree;
+		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+	rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 1);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set high bandwidth\n");
+		goto exit_kzfree;
+	}
 	handle = qce_open(pdev, &rc);
 	if (handle == NULL) {
-		kzfree(pengine);
-		platform_set_drvdata(pdev, NULL);
-		return rc;
+		rc = -ENODEV;
+		goto exit_free_pdata;
+	}
+	rc = msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to set low bandwidth\n");
+		goto exit_qce_close;
 	}
 
 	platform_set_drvdata(pdev, pengine);
@@ -4903,7 +4975,7 @@
 			pengine->max_req, GFP_KERNEL);
 	if (pqcrypto_req_control == NULL) {
 		rc = -ENOMEM;
-		goto err;
+		goto exit_unlock_mutex;
 	}
 	qcrypto_init_req_control(pengine, pqcrypto_req_control);
 	if (cp->ce_support.bam)	 {
@@ -4911,15 +4983,7 @@
 		cp->platform_support.shared_ce_resource = 0;
 		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
 		cp->platform_support.sha_hmac = 1;
-
-		cp->platform_support.bus_scale_table =
-			(struct msm_bus_scale_pdata *)
-					msm_bus_cl_get_pdata(pdev);
-		if (!cp->platform_support.bus_scale_table)
-			pr_warn("bus_scale_table is NULL\n");
-
 		pengine->ce_device = cp->ce_support.ce_device;
-
 	} else {
 		platform_support =
 			(struct msm_ce_hw_support *)pdev->dev.platform_data;
@@ -4928,33 +4992,11 @@
 				platform_support->shared_ce_resource;
 		cp->platform_support.hw_key_support =
 				platform_support->hw_key_support;
-		cp->platform_support.bus_scale_table =
-				platform_support->bus_scale_table;
 		cp->platform_support.sha_hmac = platform_support->sha_hmac;
 	}
 
-	pengine->bus_scale_handle = 0;
-
-	if (cp->platform_support.bus_scale_table != NULL) {
-		pengine->bus_scale_handle =
-			msm_bus_scale_register_client(
-				(struct msm_bus_scale_pdata *)
-					cp->platform_support.bus_scale_table);
-		if (!pengine->bus_scale_handle) {
-			pr_err("%s not able to get bus scale\n",
-				__func__);
-			rc =  -ENOMEM;
-			goto err;
-		}
-		pengine->bw_state = BUS_NO_BANDWIDTH;
-	} else {
-		pengine->bw_state = BUS_HAS_BANDWIDTH;
-	}
-
-	if (cp->total_units != 1) {
-		mutex_unlock(&cp->engine_lock);
-		return 0;
-	}
+	if (cp->total_units != 1)
+		goto exit_unlock_mutex;
 
 	/* register crypto cipher algorithms the device supports */
 	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
@@ -5243,13 +5285,19 @@
 	}
 	mutex_unlock(&cp->engine_lock);
 
-
 	return 0;
 err:
 	_qcrypto_remove_engine(pengine);
+	kzfree(pqcrypto_req_control);
+exit_unlock_mutex:
 	mutex_unlock(&cp->engine_lock);
+exit_qce_close:
 	if (pengine->qce)
 		qce_close(pengine->qce);
+exit_free_pdata:
+	msm_bus_scale_client_update_request(pengine->bus_scale_handle, 0);
+	platform_set_drvdata(pdev, NULL);
+exit_kzfree:
 	kzfree(pengine);
 	return rc;
 };
@@ -5323,8 +5371,11 @@
 	spin_unlock_irqrestore(&cp->lock, flags);
 	if (ret)
 		return ret;
-	if (qce_pm_table.suspend)
+	if (qce_pm_table.suspend) {
+		qcrypto_ce_set_bus(pengine, true);
 		qce_pm_table.suspend(pengine->qce);
+		qcrypto_ce_set_bus(pengine, false);
+	}
 	return 0;
 }
 
@@ -5345,9 +5396,11 @@
 	spin_lock_irqsave(&cp->lock, flags);
 	if (pengine->bw_state == BUS_SUSPENDED) {
 		spin_unlock_irqrestore(&cp->lock, flags);
-		if (qce_pm_table.resume)
+		if (qce_pm_table.resume) {
+			qcrypto_ce_set_bus(pengine, true);
 			qce_pm_table.resume(pengine->qce);
-
+			qcrypto_ce_set_bus(pengine, false);
+		}
 		spin_lock_irqsave(&cp->lock, flags);
 		pengine->bw_state = BUS_NO_BANDWIDTH;
 		pengine->active_seq++;
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
index a0e52a0..8c533ee 100644
--- a/drivers/devfreq/governor_memlat.h
+++ b/drivers/devfreq/governor_memlat.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,8 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#ifndef _GOVERNOR_BW_HWMON_H
-#define _GOVERNOR_BW_HWMON_H
+#ifndef _GOVERNOR_MEMLAT_H
+#define _GOVERNOR_MEMLAT_H
 
 #include <linux/kernel.h>
 #include <linux/devfreq.h>
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index 43d8fef..e8bfff2 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -548,10 +548,6 @@
 					(devfreq->profile),
 					struct msm_adreno_extended_profile,
 					profile);
-	if (devfreq == NULL) {
-		pr_err(TAG "NULL defvreq passed to tz_handler\n");
-		return -EFAULT;
-	}
 
 	switch (event) {
 	case DEVFREQ_GOV_START:
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 2453e07..094548b 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -68,6 +68,8 @@
 	struct fence_cb *cur, *tmp;
 	int ret = 0;
 
+	lockdep_assert_held(fence->lock);
+
 	if (WARN_ON(!fence))
 		return -EINVAL;
 
@@ -159,9 +161,6 @@
 	if (WARN_ON(timeout < 0))
 		return -EINVAL;
 
-	if (timeout == 0)
-		return fence_is_signaled(fence);
-
 	trace_fence_wait_start(fence);
 	ret = fence->ops->wait(fence, intr, timeout);
 	trace_fence_wait_end(fence);
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 723d8af..82f35a4 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -280,18 +280,24 @@
 				      unsigned *pshared_count,
 				      struct fence ***pshared)
 {
-	unsigned shared_count = 0;
-	unsigned retry = 1;
-	struct fence **shared = NULL, *fence_excl = NULL;
-	int ret = 0;
+	struct fence **shared = NULL;
+	struct fence *fence_excl;
+	unsigned int shared_count;
+	int ret = 1;
 
-	while (retry) {
+	do {
 		struct reservation_object_list *fobj;
 		unsigned seq;
+		unsigned int i;
 
-		seq = read_seqcount_begin(&obj->seq);
+		shared_count = i = 0;
 
 		rcu_read_lock();
+		seq = read_seqcount_begin(&obj->seq);
+
+		fence_excl = rcu_dereference(obj->fence_excl);
+		if (fence_excl && !fence_get_rcu(fence_excl))
+			goto unlock;
 
 		fobj = rcu_dereference(obj->fence);
 		if (fobj) {
@@ -309,52 +315,37 @@
 				}
 
 				ret = -ENOMEM;
-				shared_count = 0;
 				break;
 			}
 			shared = nshared;
-			memcpy(shared, fobj->shared, sz);
 			shared_count = fobj->shared_count;
-		} else
-			shared_count = 0;
-		fence_excl = rcu_dereference(obj->fence_excl);
-
-		retry = read_seqcount_retry(&obj->seq, seq);
-		if (retry)
-			goto unlock;
-
-		if (!fence_excl || fence_get_rcu(fence_excl)) {
-			unsigned i;
 
 			for (i = 0; i < shared_count; ++i) {
-				if (fence_get_rcu(shared[i]))
-					continue;
-
-				/* uh oh, refcount failed, abort and retry */
-				while (i--)
-					fence_put(shared[i]);
-
-				if (fence_excl) {
-					fence_put(fence_excl);
-					fence_excl = NULL;
-				}
-
-				retry = 1;
-				break;
+				shared[i] = rcu_dereference(fobj->shared[i]);
+				if (!fence_get_rcu(shared[i]))
+					break;
 			}
-		} else
-			retry = 1;
+		}
 
+		if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
+			while (i--)
+				fence_put(shared[i]);
+			fence_put(fence_excl);
+			goto unlock;
+		}
+
+		ret = 0;
 unlock:
 		rcu_read_unlock();
-	}
-	*pshared_count = shared_count;
-	if (shared_count)
-		*pshared = shared;
-	else {
-		*pshared = NULL;
+	} while (ret);
+
+	if (!shared_count) {
 		kfree(shared);
+		shared = NULL;
 	}
+
+	*pshared_count = shared_count;
+	*pshared = shared;
 	*pfence_excl = fence_excl;
 
 	return ret;
@@ -379,10 +370,7 @@
 {
 	struct fence *fence;
 	unsigned seq, shared_count, i = 0;
-	long ret = timeout;
-
-	if (!timeout)
-		return reservation_object_test_signaled_rcu(obj, wait_all);
+	long ret = timeout ? timeout : 1;
 
 retry:
 	fence = NULL;
@@ -397,9 +385,6 @@
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
 			struct fence *lfence = rcu_dereference(fobj->shared[i]);
 
@@ -422,9 +407,6 @@
 	if (!shared_count) {
 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		if (fence_excl &&
 		    !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
 			if (!fence_get_rcu(fence_excl))
@@ -439,6 +421,11 @@
 
 	rcu_read_unlock();
 	if (fence) {
+		if (read_seqcount_retry(&obj->seq, seq)) {
+			fence_put(fence);
+			goto retry;
+		}
+
 		ret = fence_wait_timeout(fence, intr, ret);
 		fence_put(fence);
 		if (ret > 0 && wait_all && (i + 1 < shared_count))
@@ -484,12 +471,13 @@
 					  bool test_all)
 {
 	unsigned seq, shared_count;
-	int ret = true;
+	int ret;
 
+	rcu_read_lock();
 retry:
+	ret = true;
 	shared_count = 0;
 	seq = read_seqcount_begin(&obj->seq);
-	rcu_read_lock();
 
 	if (test_all) {
 		unsigned i;
@@ -500,46 +488,35 @@
 		if (fobj)
 			shared_count = fobj->shared_count;
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		for (i = 0; i < shared_count; ++i) {
 			struct fence *fence = rcu_dereference(fobj->shared[i]);
 
 			ret = reservation_object_test_signaled_single(fence);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
 			else if (!ret)
 				break;
 		}
 
-		/*
-		 * There could be a read_seqcount_retry here, but nothing cares
-		 * about whether it's the old or newer fence pointers that are
-		 * signaled. That race could still have happened after checking
-		 * read_seqcount_retry. If you care, use ww_mutex_lock.
-		 */
+		if (read_seqcount_retry(&obj->seq, seq))
+			goto retry;
 	}
 
 	if (!shared_count) {
 		struct fence *fence_excl = rcu_dereference(obj->fence_excl);
 
-		if (read_seqcount_retry(&obj->seq, seq))
-			goto unlock_retry;
-
 		if (fence_excl) {
 			ret = reservation_object_test_signaled_single(
 								fence_excl);
 			if (ret < 0)
-				goto unlock_retry;
+				goto retry;
+
+			if (read_seqcount_retry(&obj->seq, seq))
+				goto retry;
 		}
 	}
 
 	rcu_read_unlock();
 	return ret;
-
-unlock_retry:
-	rcu_read_unlock();
-	goto retry;
 }
 EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 454d3b3..0cb8d9d 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -324,8 +324,8 @@
 	}
 
 	sync_file = sync_file_create(&pt->base);
+	fence_put(&pt->base);
 	if (!sync_file) {
-		fence_put(&pt->base);
 		err = -ENOMEM;
 		goto err;
 	}
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 8a9cf92..5a9166a 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -67,9 +67,10 @@
  * sync_file_create() - creates a sync file
  * @fence:	fence to add to the sync_fence
  *
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence. The sync_file can be released with
- * fput(sync_file->file). Returns the sync_file or NULL in case of error.
+ * Creates a sync_file containg @fence. This function acquires and additional
+ * reference of @fence for the newly-created &sync_file, if it succeeds. The
+ * sync_file can be released with fput(sync_file->file). Returns the
+ * sync_file or NULL in case of error.
  */
 struct sync_file *sync_file_create(struct fence *fence)
 {
@@ -79,7 +80,7 @@
 	if (!sync_file)
 		return NULL;
 
-	sync_file->fence = fence;
+	sync_file->fence = fence_get(fence);
 
 	snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
 		 fence->ops->get_driver_name(fence),
@@ -90,13 +91,6 @@
 }
 EXPORT_SYMBOL(sync_file_create);
 
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd:		fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
 static struct sync_file *sync_file_fdget(int fd)
 {
 	struct file *file = fget(fd);
@@ -311,10 +305,9 @@
 
 	poll_wait(file, &sync_file->wq, wait);
 
-	if (!poll_does_not_wait(wait) &&
-	    !test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
+	if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) {
 		if (fence_add_callback(sync_file->fence, &sync_file->cb,
-				       fence_check_cb_func) < 0)
+					   fence_check_cb_func) < 0)
 			wake_up_all(&sync_file->wq);
 	}
 
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index 4403f86..a8ec359 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -103,7 +103,7 @@
 
 struct erp_drvdata {
 	struct regmap *llcc_map;
-	phys_addr_t *llcc_banks;
+	u32 *llcc_banks;
 	u32 ecc_irq;
 	u32 num_banks;
 	u32 b_off;
@@ -353,12 +353,13 @@
 	struct erp_drvdata *drv;
 	struct edac_device_ctl_info *edev_ctl;
 	struct device *dev = &pdev->dev;
-	u32 *banks;
-	u32 i;
 
 	/* Allocate edac control info */
 	edev_ctl = edac_device_alloc_ctl_info(sizeof(*drv), "qcom-llcc", 1,
-			NULL, 1, 1, NULL, 0, edac_device_alloc_index());
+			NULL, 0, 1, NULL, 0, edac_device_alloc_index());
+
+	if (!edev_ctl)
+		return -ENOMEM;
 
 	edev_ctl->dev = dev;
 	edev_ctl->mod_name = dev_name(dev);
@@ -404,20 +405,15 @@
 	drv->num_banks >>= LLCC_LB_CNT_SHIFT;
 
 	drv->llcc_banks = devm_kzalloc(&pdev->dev,
-		sizeof(phys_addr_t) * drv->num_banks, GFP_KERNEL);
+		sizeof(u32) * drv->num_banks, GFP_KERNEL);
 
-	if (!drv->num_banks) {
+	if (!drv->llcc_banks) {
 		dev_err(dev, "Cannot allocate memory for llcc_banks\n");
 		return -ENOMEM;
 	}
 
-	banks = devm_kzalloc(&pdev->dev,
-		sizeof(u32) * drv->num_banks, GFP_KERNEL);
-	if (!banks)
-		return -ENOMEM;
-
 	rc = of_property_read_u32_array(dev->parent->of_node,
-			"qcom,llcc-banks-off", banks, drv->num_banks);
+			"qcom,llcc-banks-off", drv->llcc_banks, drv->num_banks);
 	if (rc) {
 		dev_err(dev, "Cannot read llcc-banks-off property\n");
 		return -EINVAL;
@@ -430,9 +426,6 @@
 		return -EINVAL;
 	}
 
-	for (i = 0; i < drv->num_banks; i++)
-		drv->llcc_banks[i] = banks[i];
-
 	platform_set_drvdata(pdev, edev_ctl);
 
 	rc = edac_device_add_device(edev_ctl);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index b98ede7..6f9c9ac 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -208,9 +208,11 @@
  * OMAP's debounce time is in 31us steps
  *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
  * so we need to convert and round up to the closest unit.
+ *
+ * Return: 0 on success, negative error otherwise.
  */
-static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
-				    unsigned debounce)
+static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
+				   unsigned debounce)
 {
 	void __iomem		*reg;
 	u32			val;
@@ -218,11 +220,12 @@
 	bool			enable = !!debounce;
 
 	if (!bank->dbck_flag)
-		return;
+		return -ENOTSUPP;
 
 	if (enable) {
 		debounce = DIV_ROUND_UP(debounce, 31) - 1;
-		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+		if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce)
+			return -EINVAL;
 	}
 
 	l = BIT(offset);
@@ -255,6 +258,8 @@
 		bank->context.debounce = debounce;
 		bank->context.debounce_en = val;
 	}
+
+	return 0;
 }
 
 /**
@@ -964,14 +969,20 @@
 {
 	struct gpio_bank *bank;
 	unsigned long flags;
+	int ret;
 
 	bank = gpiochip_get_data(chip);
 
 	raw_spin_lock_irqsave(&bank->lock, flags);
-	omap2_set_gpio_debounce(bank, offset, debounce);
+	ret = omap2_set_gpio_debounce(bank, offset, debounce);
 	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
-	return 0;
+	if (ret)
+		dev_info(chip->parent,
+			 "Could not set line %u debounce to %u microseconds (%d)",
+			 offset, debounce, ret);
+
+	return ret;
 }
 
 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 6898aa0..2f936a7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -13,6 +13,7 @@
 	select I2C
 	select I2C_ALGOBIT
 	select DMA_SHARED_BUFFER
+	select SYNC_FILE
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 882404c..42448c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1173,23 +1173,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1297,14 +1284,14 @@
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce10_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1319,7 +1306,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1358,7 +1345,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 64a1df6..904dabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1140,23 +1140,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1264,14 +1251,14 @@
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce10_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1286,7 +1273,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1325,7 +1312,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index fde6ee1..6d02bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -932,23 +932,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1057,18 +1044,18 @@
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce6_wm_params wm_low, wm_high;
 	u32 dram_channels;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 priority_a_mark = 0, priority_b_mark = 0;
 	u32 priority_a_cnt = PRIORITY_OFF;
 	u32 priority_b_cnt = PRIORITY_OFF;
-	u32 tmp, arb_control3;
+	u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
 	fixed20_12 a, b, c;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 		priority_a_cnt = 0;
 		priority_b_cnt = 0;
 
@@ -1087,7 +1074,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1114,7 +1101,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1175,6 +1162,8 @@
 		c.full = dfixed_div(c, a);
 		priority_b_mark = dfixed_trunc(c);
 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
+
+		lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
 	}
 
 	/* select wm A */
@@ -1204,6 +1193,9 @@
 	/* save values for DPM */
 	amdgpu_crtc->line_time = line_time;
 	amdgpu_crtc->wm_high = latency_watermark_a;
+
+	/* Save number of lines the linebuffer leads before the scanout */
+	amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
 }
 
 /* watermark setup */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 7d9ffde..b1fb601 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1094,23 +1094,10 @@
 	a.full = dfixed_const(available_bandwidth);
 	b.full = dfixed_const(wm->num_heads);
 	a.full = dfixed_div(a, b);
+	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+	tmp = min(dfixed_trunc(a), tmp);
 
-	b.full = dfixed_const(mc_latency + 512);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(b, c);
-
-	c.full = dfixed_const(dmif_size);
-	b.full = dfixed_div(c, b);
-
-	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
-	b.full = dfixed_const(1000);
-	c.full = dfixed_const(wm->disp_clk);
-	b.full = dfixed_div(c, b);
-	c.full = dfixed_const(wm->bytes_per_pixel);
-	b.full = dfixed_mul(b, c);
-
-	lb_fill_bw = min(tmp, dfixed_trunc(b));
+	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
 
 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
 	b.full = dfixed_const(1000);
@@ -1218,14 +1205,14 @@
 {
 	struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
 	struct dce8_wm_params wm_low, wm_high;
-	u32 pixel_period;
+	u32 active_time;
 	u32 line_time = 0;
 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
 	u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
 
 	if (amdgpu_crtc->base.enabled && num_heads && mode) {
-		pixel_period = 1000000 / (u32)mode->clock;
-		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+		active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+		line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
 
 		/* watermark for high clocks */
 		if (adev->pm.dpm_enabled) {
@@ -1240,7 +1227,7 @@
 
 		wm_high.disp_clk = mode->clock;
 		wm_high.src_width = mode->crtc_hdisplay;
-		wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_high.active_time = active_time;
 		wm_high.blank_time = line_time - wm_high.active_time;
 		wm_high.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1279,7 +1266,7 @@
 
 		wm_low.disp_clk = mode->clock;
 		wm_low.src_width = mode->crtc_hdisplay;
-		wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+		wm_low.active_time = active_time;
 		wm_low.blank_time = line_time - wm_low.active_time;
 		wm_low.interlaced = false;
 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 4e19bde..99011621 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
+#include <linux/sync_file.h>
 
 #include "drm_crtc_internal.h"
 
@@ -292,6 +293,23 @@
 }
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
+static void set_out_fence_for_crtc(struct drm_atomic_state *state,
+				   struct drm_crtc *crtc, s32 __user *fence_ptr)
+{
+	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
+}
+
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+					  struct drm_crtc *crtc)
+{
+	s32 __user *fence_ptr;
+
+	fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
+	state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
+
+	return fence_ptr;
+}
+
 /**
  * drm_atomic_set_mode_for_crtc - set mode for CRTC
  * @state: the CRTC whose incoming state to update
@@ -496,6 +514,16 @@
 					&replaced);
 		state->color_mgmt_changed |= replaced;
 		return ret;
+	} else if (property == config->prop_out_fence_ptr) {
+		s32 __user *fence_ptr = u64_to_user_ptr(val);
+
+		if (!fence_ptr)
+			return 0;
+
+		if (put_user(-1, fence_ptr))
+			return -EFAULT;
+
+		set_out_fence_for_crtc(state->state, crtc, fence_ptr);
 	} else if (crtc->funcs->atomic_set_property)
 		return crtc->funcs->atomic_set_property(crtc, state, property, val);
 	else
@@ -538,6 +566,8 @@
 		*val = (state->ctm) ? state->ctm->base.id : 0;
 	else if (property == config->gamma_lut_property)
 		*val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
+	else if (property == config->prop_out_fence_ptr)
+		*val = 0;
 	else if (crtc->funcs->atomic_get_property)
 		return crtc->funcs->atomic_get_property(crtc, state, property, val);
 	else
@@ -693,6 +723,17 @@
 		drm_atomic_set_fb_for_plane(state, fb);
 		if (fb)
 			drm_framebuffer_unreference(fb);
+	} else if (property == config->prop_in_fence_fd) {
+		if (state->fence)
+			return -EINVAL;
+
+		if (U642I64(val) == -1)
+			return 0;
+
+		state->fence = sync_file_get_fence(val);
+		if (!state->fence)
+			return -EINVAL;
+
 	} else if (property == config->prop_crtc_id) {
 		struct drm_crtc *crtc = drm_crtc_find(dev, val);
 		return drm_atomic_set_crtc_for_plane(state, crtc);
@@ -752,6 +793,8 @@
 
 	if (property == config->prop_fb_id) {
 		*val = (state->fb) ? state->fb->base.id : 0;
+	} else if (property == config->prop_in_fence_fd) {
+		*val = -1;
 	} else if (property == config->prop_crtc_id) {
 		*val = (state->crtc) ? state->crtc->base.id : 0;
 	} else if (property == config->prop_crtc_x) {
@@ -1152,6 +1195,36 @@
 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
 
 /**
+ * drm_atomic_set_fence_for_plane - set fence for plane
+ * @plane_state: atomic state object for the plane
+ * @fence: fence to use for the plane
+ *
+ * Helper to setup the plane_state fence in case it is not set yet.
+ * By using this drivers doesn't need to worry if the user choose
+ * implicit or explicit fencing.
+ *
+ * This function will not set the fence to the state if it was set
+ * via explicit fencing interfaces on the atomic ioctl. It will
+ * all drope the reference to the fence as we not storing it
+ * anywhere.
+ *
+ * Otherwise, if plane_state->fence is not set this function we
+ * just set it with the received implict fence.
+ */
+void
+drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+			       struct fence *fence)
+{
+	if (plane_state->fence) {
+		fence_put(fence);
+		return;
+	}
+
+	plane_state->fence = fence;
+}
+EXPORT_SYMBOL(drm_atomic_set_fence_for_plane);
+
+/**
  * drm_atomic_set_crtc_for_connector - set crtc for connector
  * @conn_state: atomic state object for the connector
  * @crtc: crtc to use for the connector
@@ -1467,11 +1540,9 @@
  */
 
 static struct drm_pending_vblank_event *create_vblank_event(
-		struct drm_device *dev, struct drm_file *file_priv,
-		struct fence *fence, uint64_t user_data)
+		struct drm_device *dev, uint64_t user_data)
 {
 	struct drm_pending_vblank_event *e = NULL;
-	int ret;
 
 	e = kzalloc(sizeof *e, GFP_KERNEL);
 	if (!e)
@@ -1481,17 +1552,6 @@
 	e->event.base.length = sizeof(e->event);
 	e->event.user_data = user_data;
 
-	if (file_priv) {
-		ret = drm_event_reserve_init(dev, file_priv, &e->base,
-					     &e->event.base);
-		if (ret) {
-			kfree(e);
-			return NULL;
-		}
-	}
-
-	e->base.fence = fence;
-
 	return e;
 }
 
@@ -1596,6 +1656,206 @@
 }
 EXPORT_SYMBOL(drm_atomic_clean_old_fb);
 
+/**
+ * DOC: explicit fencing properties
+ *
+ * Explicit fencing allows userspace to control the buffer synchronization
+ * between devices. A Fence or a group of fences are transfered to/from
+ * userspace using Sync File fds and there are two DRM properties for that.
+ * IN_FENCE_FD on each DRM Plane to send fences to the kernel and
+ * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel.
+ *
+ * As a contrast, with implicit fencing the kernel keeps track of any
+ * ongoing rendering, and automatically ensures that the atomic update waits
+ * for any pending rendering to complete. For shared buffers represented with
+ * a struct &dma_buf this is tracked in &reservation_object structures.
+ * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
+ * whereas explicit fencing is what Android wants.
+ *
+ * "IN_FENCE_FD”:
+ *	Use this property to pass a fence that DRM should wait on before
+ *	proceeding with the Atomic Commit request and show the framebuffer for
+ *	the plane on the screen. The fence can be either a normal fence or a
+ *	merged one, the sync_file framework will handle both cases and use a
+ *	fence_array if a merged fence is received. Passing -1 here means no
+ *	fences to wait on.
+ *
+ *	If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag
+ *	it will only check if the Sync File is a valid one.
+ *
+ *	On the driver side the fence is stored on the @fence parameter of
+ *	struct &drm_plane_state. Drivers which also support implicit fencing
+ *	should set the implicit fence using drm_atomic_set_fence_for_plane(),
+ *	to make sure there's consistent behaviour between drivers in precedence
+ *	of implicit vs. explicit fencing.
+ *
+ * "OUT_FENCE_PTR”:
+ *	Use this property to pass a file descriptor pointer to DRM. Once the
+ *	Atomic Commit request call returns OUT_FENCE_PTR will be filled with
+ *	the file descriptor number of a Sync File. This Sync File contains the
+ *	CRTC fence that will be signaled when all framebuffers present on the
+ *	Atomic Commit * request for that given CRTC are scanned out on the
+ *	screen.
+ *
+ *	The Atomic Commit request fails if a invalid pointer is passed. If the
+ *	Atomic Commit request fails for any other reason the out fence fd
+ *	returned will be -1. On a Atomic Commit with the
+ *	DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1.
+ *
+ *	Note that out-fences don't have a special interface to drivers and are
+ *	internally represented by a struct &drm_pending_vblank_event in struct
+ *	&drm_crtc_state, which is also used by the nonblocking atomic commit
+ *	helpers and for the DRM event handling for existing userspace.
+ */
+
+struct drm_out_fence_state {
+	s32 __user *out_fence_ptr;
+	struct sync_file *sync_file;
+	int fd;
+};
+
+static int setup_out_fence(struct drm_out_fence_state *fence_state,
+			   struct fence *fence)
+{
+	fence_state->fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fence_state->fd < 0)
+		return fence_state->fd;
+
+	if (put_user(fence_state->fd, fence_state->out_fence_ptr))
+		return -EFAULT;
+
+	fence_state->sync_file = sync_file_create(fence);
+	if (!fence_state->sync_file)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int prepare_crtc_signaling(struct drm_device *dev,
+				  struct drm_atomic_state *state,
+				  struct drm_mode_atomic *arg,
+				  struct drm_file *file_priv,
+				  struct drm_out_fence_state **fence_state,
+				  unsigned int *num_fences)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i, ret;
+
+	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
+		return 0;
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		s32 __user *fence_ptr;
+
+		fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
+
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) {
+			struct drm_pending_vblank_event *e;
+
+			e = create_vblank_event(dev, arg->user_data);
+			if (!e)
+				return -ENOMEM;
+
+			crtc_state->event = e;
+		}
+
+		if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+			struct drm_pending_vblank_event *e = crtc_state->event;
+
+			if (!file_priv)
+				continue;
+
+			ret = drm_event_reserve_init(dev, file_priv, &e->base,
+						     &e->event.base);
+			if (ret) {
+				kfree(e);
+				crtc_state->event = NULL;
+				return ret;
+			}
+		}
+
+		if (fence_ptr) {
+			struct fence *fence;
+			struct drm_out_fence_state *f;
+
+			f = krealloc(*fence_state, sizeof(**fence_state) *
+				     (*num_fences + 1), GFP_KERNEL);
+			if (!f)
+				return -ENOMEM;
+
+			memset(&f[*num_fences], 0, sizeof(*f));
+
+			f[*num_fences].out_fence_ptr = fence_ptr;
+			*fence_state = f;
+
+			fence = drm_crtc_create_fence(crtc);
+			if (!fence)
+				return -ENOMEM;
+
+			ret = setup_out_fence(&f[(*num_fences)++], fence);
+			if (ret) {
+				fence_put(fence);
+				return ret;
+			}
+
+			crtc_state->event->base.fence = fence;
+		}
+	}
+
+	return 0;
+}
+
+static void complete_crtc_signaling(struct drm_device *dev,
+				    struct drm_atomic_state *state,
+				    struct drm_out_fence_state *fence_state,
+				    unsigned int num_fences,
+				    bool install_fds)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	int i;
+
+	if (install_fds) {
+		for (i = 0; i < num_fences; i++)
+			fd_install(fence_state[i].fd,
+				   fence_state[i].sync_file->file);
+
+		kfree(fence_state);
+		return;
+	}
+
+	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+		struct drm_pending_vblank_event *event = crtc_state->event;
+		/*
+		 * Free the allocated event. drm_atomic_helper_setup_commit
+		 * can allocate an event too, so only free it if it's ours
+		 * to prevent a double free in drm_atomic_state_clear.
+		 */
+		if (event && (event->base.fence || event->base.file_priv)) {
+			drm_event_cancel_free(dev, &event->base);
+			crtc_state->event = NULL;
+		}
+	}
+
+	if (!fence_state)
+		return;
+
+	for (i = 0; i < num_fences; i++) {
+		if (fence_state[i].sync_file)
+			fput(fence_state[i].sync_file->file);
+		if (fence_state[i].fd >= 0)
+			put_unused_fd(fence_state[i].fd);
+
+		/* If this fails log error to the user */
+		if (fence_state[i].out_fence_ptr &&
+		    put_user(-1, fence_state[i].out_fence_ptr))
+			DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n");
+	}
+
+	kfree(fence_state);
+}
+
 int drm_mode_atomic_ioctl(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv)
 {
@@ -1608,11 +1868,10 @@
 	struct drm_atomic_state *state;
 	struct drm_modeset_acquire_ctx ctx;
 	struct drm_plane *plane;
-	struct drm_crtc *crtc;
-	struct drm_crtc_state *crtc_state;
+	struct drm_out_fence_state *fence_state = NULL;
 	unsigned plane_mask;
 	int ret = 0;
-	unsigned int i, j;
+	unsigned int i, j, num_fences = 0;
 
 	/* disallow for drivers not supporting atomic: */
 	if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -1727,20 +1986,10 @@
 		drm_mode_object_unreference(obj);
 	}
 
-	if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			struct drm_pending_vblank_event *e;
-
-			e = create_vblank_event(dev, file_priv, NULL,
-						arg->user_data);
-			if (!e) {
-				ret = -ENOMEM;
-				goto out;
-			}
-
-			crtc_state->event = e;
-		}
-	}
+	ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state,
+				     &num_fences);
+	if (ret)
+		goto out;
 
 	if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
 		/*
@@ -1757,20 +2006,7 @@
 out:
 	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
-	if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-		/*
-		 * Free the allocated event. drm_atomic_helper_setup_commit
-		 * can allocate an event too, so only free it if it's ours
-		 * to prevent a double free in drm_atomic_state_clear.
-		 */
-		for_each_crtc_in_state(state, crtc, crtc_state, i) {
-			struct drm_pending_vblank_event *event = crtc_state->event;
-			if (event && (event->base.fence || event->base.file_priv)) {
-				drm_event_cancel_free(dev, &event->base);
-				crtc_state->event = NULL;
-			}
-		}
-	}
+	complete_crtc_signaling(dev, state, fence_state, num_fences, !ret);
 
 	if (ret == -EDEADLK) {
 		drm_atomic_state_clear(state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 50acd79..f34b4e8 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3166,6 +3166,9 @@
 {
 	if (state->fb)
 		drm_framebuffer_unreference(state->fb);
+
+	if (state->fence)
+		fence_put(state->fence);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
 
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2d7bedf..79b3d52 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/fence.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
@@ -141,6 +142,54 @@
 	}
 }
 
+static const struct fence_ops drm_crtc_fence_ops;
+
+static struct drm_crtc *fence_to_crtc(struct fence *fence)
+{
+	BUG_ON(fence->ops != &drm_crtc_fence_ops);
+	return container_of(fence->lock, struct drm_crtc, fence_lock);
+}
+
+static const char *drm_crtc_fence_get_driver_name(struct fence *fence)
+{
+	struct drm_crtc *crtc = fence_to_crtc(fence);
+
+	return crtc->dev->driver->name;
+}
+
+static const char *drm_crtc_fence_get_timeline_name(struct fence *fence)
+{
+	struct drm_crtc *crtc = fence_to_crtc(fence);
+
+	return crtc->timeline_name;
+}
+
+static bool drm_crtc_fence_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static const struct fence_ops drm_crtc_fence_ops = {
+	.get_driver_name = drm_crtc_fence_get_driver_name,
+	.get_timeline_name = drm_crtc_fence_get_timeline_name,
+	.enable_signaling = drm_crtc_fence_enable_signaling,
+	.wait = fence_default_wait,
+};
+
+struct fence *drm_crtc_create_fence(struct drm_crtc *crtc)
+{
+	struct fence *fence;
+
+	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+	if (!fence)
+		return NULL;
+
+	fence_init(fence, &drm_crtc_fence_ops, &crtc->fence_lock,
+		       crtc->fence_context, ++crtc->fence_seqno);
+
+	return fence;
+}
+
 /**
  * drm_crtc_init_with_planes - Initialise a new CRTC object with
  *    specified primary and cursor planes.
@@ -198,6 +247,11 @@
 		return -ENOMEM;
 	}
 
+	crtc->fence_context = fence_context_alloc(1);
+	spin_lock_init(&crtc->fence_lock);
+	snprintf(crtc->timeline_name, sizeof(crtc->timeline_name),
+		 "CRTC:%d-%s", crtc->base.id, crtc->name);
+
 	crtc->base.properties = &crtc->properties;
 
 	list_add_tail(&crtc->head, &config->crtc_list);
@@ -213,6 +267,8 @@
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&crtc->base, config->prop_active, 0);
 		drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
+		drm_object_attach_property(&crtc->base,
+					   config->prop_out_fence_ptr, 0);
 	}
 
 	return 0;
@@ -365,6 +421,18 @@
 		return -ENOMEM;
 	dev->mode_config.prop_fb_id = prop;
 
+	prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC,
+			"IN_FENCE_FD", -1, INT_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_in_fence_fd = prop;
+
+	prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC,
+			"OUT_FENCE_PTR", 0, U64_MAX);
+	if (!prop)
+		return -ENOMEM;
+	dev->mode_config.prop_out_fence_ptr = prop;
+
 	prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC,
 			"CRTC_ID", DRM_MODE_OBJECT_CRTC);
 	if (!prop)
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c48ba02..df2b51a 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -41,6 +41,8 @@
 			    const struct drm_display_mode *mode,
 			    const struct drm_framebuffer *fb);
 
+struct fence *drm_crtc_create_fence(struct drm_crtc *crtc);
+
 void drm_fb_release(struct drm_file *file_priv);
 
 /* dumb buffer support IOCTLs */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0bc0afb..4e5ba7e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -76,6 +76,8 @@
 #define EDID_QUIRK_FORCE_12BPC			(1 << 9)
 /* Force 6bpc */
 #define EDID_QUIRK_FORCE_6BPC			(1 << 10)
+/* Force 10bpc */
+#define EDID_QUIRK_FORCE_10BPC			(1 << 11)
 
 struct detailed_mode_closure {
 	struct drm_connector *connector;
@@ -90,6 +92,14 @@
 #define LEVEL_GTF2	2
 #define LEVEL_CVT	3
 
+/*Enum storing luminance types for HDR blocks in EDID*/
+enum luminance_value {
+	NO_LUMINANCE_DATA = 3,
+	MAXIMUM_LUMINANCE = 4,
+	FRAME_AVERAGE_LUMINANCE = 5,
+	MINIMUM_LUMINANCE = 6
+};
+
 static const struct edid_quirk {
 	char vendor[4];
 	int product_id;
@@ -118,6 +128,9 @@
 	{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
 	  EDID_QUIRK_DETAILED_IN_CM },
 
+	/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+	{ "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+
 	/* LG Philips LCD LP154W01-A5 */
 	{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
 	{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@@ -997,6 +1010,221 @@
 		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
 	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+	/* 65 - 1280x720@24Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 66 - 1280x720@25Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+		   3740, 3960, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 67 - 1280x720@30Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+		   3080, 3300, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 68 - 1280x720@50Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 69 - 1280x720@60Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 70 - 1280x720@100Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+		   1760, 1980, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 71 - 1280x720@120Hz */
+	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+		   1430, 1650, 0, 720, 725, 730, 750, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 72 - 1920x1080@24Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 73 - 1920x1080@25Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 74 - 1920x1080@30Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 75 - 1920x1080@50Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 76 - 1920x1080@60Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	  .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 77 - 1920x1080@100Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 78 - 1920x1080@120Hz */
+	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	 .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 79 - 1680x720@24Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+		3080, 3300, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 80 - 1680x720@25Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+		2948, 3168, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 81 - 1680x720@30Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+		2420, 2640, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 82 - 1680x720@50Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+		1980, 2200, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 83 - 1680x720@60Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+		1980, 2200, 0, 720, 725, 730, 750, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 84 - 1680x720@100Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+		1780, 2000, 0, 720, 725, 730, 825, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 85 - 1680x720@120Hz */
+	{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+		1780, 2000, 0, 720, 725, 730, 825, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 86 - 2560x1080@24Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+		3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 87 - 2560x1080@25Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+		3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 88 - 2560x1080@30Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+		3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 89 - 2560x1080@50Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+		3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 90 - 2560x1080@60Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+		2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 91 - 2560x1080@100Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+		2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 92 - 2560x1080@120Hz */
+	{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+		3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+	/* 93 - 3840x2160p@24Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,},
+	/* 94 - 3840x2160p@25Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 95 - 3840x2160p@30Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 96 - 3840x2160p@50Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 97 - 3840x2160p@60Hz 16:9 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+	/* 98 - 4096x2160p@24Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 99 - 4096x2160p@25Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+		5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 100 - 4096x2160p@30Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+		4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 101 - 4096x2160p@50Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+		5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 102 - 4096x2160p@60Hz 256:135 */
+	{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+		4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+	/* 103 - 3840x2160p@24Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+		5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 104 - 3840x2160p@25Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 105 - 3840x2160p@30Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 106 - 3840x2160p@50Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+		4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+	/* 107 - 3840x2160p@60Hz 64:27 */
+	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+		4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+		DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+	.vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
 };
 
 /*
@@ -2514,12 +2742,15 @@
 
 	return closure.modes;
 }
-
+#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
 #define AUDIO_BLOCK	0x01
 #define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK	0x04
+#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define EXTENDED_TAG  0x07
 #define VIDEO_CAPABILITY_BLOCK	0x07
+#define Y420_VIDEO_DATA_BLOCK	0x0E
 #define EDID_BASIC_AUDIO	(1 << 6)
 #define EDID_CEA_YCRCB444	(1 << 5)
 #define EDID_CEA_YCRCB422	(1 << 4)
@@ -3168,6 +3399,21 @@
 	return hdmi_id == HDMI_IEEE_OUI;
 }
 
+static bool cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+	int hdmi_id;
+
+	if (cea_db_tag(db) != VENDOR_BLOCK)
+		return false;
+
+	if (cea_db_payload_len(db) < 7)
+		return false;
+
+	hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+	return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
 #define for_each_cea_db(cea, i, start, end) \
 	for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
@@ -3287,6 +3533,227 @@
 }
 
 static void
+parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
+{
+	u8 len = cea_db_payload_len(db);
+
+	if (len < 7)
+		return;
+
+	if (db[4] != 1)
+		return; /* invalid version */
+
+	connector->max_tmds_char = db[5] * 5;
+	connector->scdc_present = db[6] & (1 << 7);
+	connector->rr_capable = db[6] & (1 << 6);
+	connector->flags_3d = db[6] & 0x7;
+	connector->supports_scramble = connector->scdc_present &&
+			(db[6] & (1 << 3));
+
+	DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, "
+			"scdc %s, "
+			"rr %s, "
+			"3D flags 0x%x, "
+			"scramble %s\n",
+			connector->max_tmds_char,
+			connector->scdc_present ? "available" : "not available",
+			connector->rr_capable ? "capable" : "not capable",
+			connector->flags_3d,
+			connector->supports_scramble ?
+				"supported" : "not supported");
+}
+
+/*
+ * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ *
+ * Parses the HDMI VCDB to extract sink info for @connector.
+ */
+static void
+drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
+{
+	/*
+	 * Check if the sink specifies underscan
+	 * support for:
+	 * BIT 5: preferred video format
+	 * BIT 3: IT video format
+	 * BIT 1: CE video format
+	 */
+
+	connector->pt_scan_info =
+		(db[2] & (BIT(4) | BIT(5))) >> 4;
+	connector->it_scan_info =
+		(db[2] & (BIT(3) | BIT(2))) >> 2;
+	connector->ce_scan_info =
+		db[2] & (BIT(1) | BIT(0));
+
+	DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
+			  (int) connector->pt_scan_info,
+			  (int) connector->it_scan_info,
+			  (int) connector->ce_scan_info);
+}
+
+static bool drm_edid_is_luminance_value_present(
+u32 block_length, enum luminance_value value)
+{
+	return block_length > NO_LUMINANCE_DATA && value <= block_length;
+}
+
+/*
+ * drm_extract_hdr_db - Parse the HDMI HDR extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI HDR extended block
+ *
+ * Parses the HDMI HDR extended block to extract sink info for @connector.
+ */
+static void
+drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
+{
+
+	u8 len = 0;
+
+	if (!db)
+		return;
+
+	len = db[0] & 0x1f;
+	/* Byte 3: Electro-Optical Transfer Functions */
+	connector->hdr_eotf = db[2] & 0x3F;
+
+	/* Byte 4: Static Metadata Descriptor Type 1 */
+	connector->hdr_metadata_type_one = (db[3] & BIT(0));
+
+	/* Byte 5: Desired Content Maximum Luminance */
+	if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
+		connector->hdr_max_luminance =
+			db[MAXIMUM_LUMINANCE];
+
+	/* Byte 6: Desired Content Max Frame-average Luminance */
+	if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
+		connector->hdr_avg_luminance =
+			db[FRAME_AVERAGE_LUMINANCE];
+
+	/* Byte 7: Desired Content Min Luminance */
+	if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
+		connector->hdr_min_luminance =
+			db[MINIMUM_LUMINANCE];
+
+	connector->hdr_supported = true;
+
+	DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
+	DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
+	DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
+	DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
+	DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
+}
+
+/*
+ * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the all extended tag blocks extract sink info for @connector.
+ */
+static void
+drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
+struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == EXTENDED_TAG) {
+				DRM_DEBUG_KMS("found extended tag block = %d\n",
+				db[1]);
+				switch (db[1]) {
+				case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
+					drm_extract_vcdb_info(connector, db);
+					break;
+				case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
+					drm_extract_hdr_db(connector, db);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+}
+
+static u8 *
+drm_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid)
+		return NULL;
+
+	cea = drm_find_cea_extension(edid);
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((cea_db_tag(db) == EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the YCbCr420 VDB block and adds the modes to @connector.
+ */
+static int
+add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid)
+{
+
+	const u8 *db = NULL;
+	u32 i = 0;
+	u32 modes = 0;
+	u32 video_format = 0;
+	u8 len = 0;
+
+	/*Find the YCbCr420 VDB*/
+	db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK);
+	/* Offset to byte 3 */
+	if (db) {
+		len = db[0] & 0x1F;
+		db += 2;
+		for (i = 0; i < len - 1; i++) {
+			struct drm_display_mode *mode;
+
+			video_format = *(db + i) & 0x7F;
+			mode = drm_display_mode_from_vic_index(connector,
+					db, len-1, i);
+			if (mode) {
+				DRM_DEBUG_KMS("Adding mode for vic = %d\n",
+				video_format);
+				drm_mode_probed_add(connector, mode);
+				modes++;
+			}
+		}
+	}
+	return modes;
+}
+
+static void
 monitor_name(struct detailed_timing *t, void *data)
 {
 	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
@@ -3410,6 +3877,9 @@
 				/* HDMI Vendor-Specific Data Block */
 				if (cea_db_is_hdmi_vsdb(db))
 					drm_parse_hdmi_vsdb_audio(connector, db);
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
 				break;
 			default:
 				break;
@@ -3840,6 +4310,37 @@
 	}
 }
 
+static void
+drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == VENDOR_BLOCK) {
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db)) {
+					drm_parse_hdmi_vsdb_video(
+						connector, db);
+					drm_parse_hdmi_vsdb_audio(
+						connector, db);
+				}
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_hf_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
+			}
+		}
+	}
+}
+
 static void drm_add_display_info(struct drm_connector *connector,
 				 struct edid *edid)
 {
@@ -3877,6 +4378,11 @@
 			  connector->name, info->bpc);
 	}
 
+	/* Extract audio and video latency fields for the sink */
+	drm_hdmi_extract_vsdbs_info(connector, edid);
+	/* Extract info from extended tag blocks */
+	drm_hdmi_extract_extended_blk_info(connector, edid);
+
 	/* Only defined for 1.4 with digital displays */
 	if (edid->revision < 4)
 		return;
@@ -4091,6 +4597,7 @@
 	num_modes += add_cea_modes(connector, edid);
 	num_modes += add_alternate_cea_modes(connector, edid);
 	num_modes += add_displayid_detailed_modes(connector, edid);
+	num_modes += add_YCbCr420VDB_modes(connector, edid);
 	if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
 		num_modes += add_inferred_modes(connector, edid);
 
@@ -4105,6 +4612,9 @@
 	if (quirks & EDID_QUIRK_FORCE_8BPC)
 		connector->display_info.bpc = 8;
 
+	if (quirks & EDID_QUIRK_FORCE_10BPC)
+		connector->display_info.bpc = 10;
+
 	if (quirks & EDID_QUIRK_FORCE_12BPC)
 		connector->display_info.bpc = 12;
 
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 1fd6eac..52629b6 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -18,13 +18,16 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-buf.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/reservation.h>
 
 #define DEFAULT_FBDEFIO_DELAY_MS 50
 
@@ -265,6 +268,38 @@
 }
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
+/**
+ * drm_fb_cma_prepare_fb() - Prepare CMA framebuffer
+ * @plane: Which plane
+ * @state: Plane state attach fence to
+ *
+ * This should be put into prepare_fb hook of struct &drm_plane_helper_funcs .
+ *
+ * This function checks if the plane FB has an dma-buf attached, extracts
+ * the exclusive fence and attaches it to plane state for the atomic helper
+ * to wait on.
+ *
+ * There is no need for cleanup_fb for CMA based framebuffer drivers.
+ */
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+			  struct drm_plane_state *state)
+{
+	struct dma_buf *dma_buf;
+	struct fence *fence;
+
+	if ((plane->state->fb == state->fb) || !state->fb)
+		return 0;
+
+	dma_buf = drm_fb_cma_get_gem_obj(state->fb, 0)->base.dma_buf;
+	if (dma_buf) {
+		fence = reservation_object_get_excl_rcu(dma_buf->resv);
+		drm_atomic_set_fence_for_plane(state, fence);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb);
+
 #ifdef CONFIG_DEBUG_FS
 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index f5815e1..fe00bea 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -663,6 +663,10 @@
 		list_del(&p->pending_link);
 	}
 	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	if (p->fence)
+		fence_put(p->fence);
+
 	kfree(p);
 }
 EXPORT_SYMBOL(drm_event_cancel_free);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 249c0ae..3957ef8 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -137,6 +137,7 @@
 
 	if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
 		drm_object_attach_property(&plane->base, config->prop_fb_id, 0);
+		drm_object_attach_property(&plane->base, config->prop_in_fence_fd, -1);
 		drm_object_attach_property(&plane->base, config->prop_crtc_id, 0);
 		drm_object_attach_property(&plane->base, config->prop_crtc_x, 0);
 		drm_object_attach_property(&plane->base, config->prop_crtc_y, 0);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index fd7c912..79e9d36 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -774,20 +774,23 @@
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
 			mode_dev->panel_fixed_mode =
 			    drm_mode_duplicate(dev, scan);
+			DRM_DEBUG_KMS("Using mode from DDC\n");
 			goto out;	/* FIXME: check for quirks */
 		}
 	}
 
 	/* Failed to get EDID, what about VBT? do we need this? */
-	if (mode_dev->vbt_mode)
+	if (dev_priv->lfp_lvds_vbt_mode) {
 		mode_dev->panel_fixed_mode =
-		    drm_mode_duplicate(dev, mode_dev->vbt_mode);
+			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
 
-	if (!mode_dev->panel_fixed_mode)
-		if (dev_priv->lfp_lvds_vbt_mode)
-			mode_dev->panel_fixed_mode =
-				drm_mode_duplicate(dev,
-					dev_priv->lfp_lvds_vbt_mode);
+		if (mode_dev->panel_fixed_mode) {
+			mode_dev->panel_fixed_mode->type |=
+				DRM_MODE_TYPE_PREFERRED;
+			DRM_DEBUG_KMS("Using mode from VBT\n");
+			goto out;
+		}
+	}
 
 	/*
 	 * If we didn't get EDID, try checking if the panel is already turned
@@ -804,6 +807,7 @@
 		if (mode_dev->panel_fixed_mode) {
 			mode_dev->panel_fixed_mode->type |=
 			    DRM_MODE_TYPE_PREFERRED;
+			DRM_DEBUG_KMS("Using pre-programmed mode\n");
 			goto out;	/* FIXME: check for quirks */
 		}
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f46aac1..c75f4bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -420,6 +420,11 @@
 		return 0;
 	}
 
+	if (intel_vgpu_active(dev_priv)) {
+		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+		return 0;
+	}
+
 #ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
 		DRM_INFO("DMAR active, disabling use of stolen memory\n");
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4112bef..9ded825 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,10 +1,20 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
 ccflags-y += -Idrivers/gpu/drm/msm/sde
 ccflags-y += -Idrivers/media/platform/msm/sde/rotator
 
 msm_drm-y := \
+	dp/dp_usbpd.o \
+	dp/dp_parser.o \
+	dp/dp_power.o \
+	dp/dp_catalog.o \
+	dp/dp_aux.o \
+	dp/dp_panel.o \
+	dp/dp_link.o \
+	dp/dp_ctrl.o \
+	dp/dp_display.o \
+	dp/dp_drm.o \
 	hdmi/hdmi.o \
 	hdmi/hdmi_audio.o \
 	hdmi/hdmi_bridge.o \
@@ -51,6 +61,7 @@
 	sde/sde_hw_reg_dma_v1_color_proc.o \
 	sde/sde_hw_color_proc_v4.o \
 	sde/sde_hw_ad4.o \
+	sde_edid_parser.o
 
 msm_drm-$(CONFIG_DRM_SDE_RSC) += sde_rsc.o \
 	sde_rsc_hw.o \
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index 30f477e..70581e2 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -20,14 +20,8 @@
 
 #define DP_AUX_ENUM_STR(x)		#x
 
-struct aux_buf {
-	u8 *start;      /* buffer start addr */
-	u8 *end;	/* buffer end addr */
-	u8 *data;       /* data pou32er */
-	u32 size;       /* size of buffer */
-	u32 len;	/* dara length */
-	u8 trans_num;   /* transaction number */
-	enum aux_tx_mode tx_mode;
+enum {
+	DP_AUX_DATA_INDEX_WRITE = BIT(31),
 };
 
 struct dp_aux_private {
@@ -38,14 +32,12 @@
 	struct mutex mutex;
 	struct completion comp;
 
-	struct aux_cmd *cmds;
-	struct aux_buf txp;
-	struct aux_buf rxp;
-
 	u32 aux_error_num;
+	bool cmd_busy;
+	bool native;
+	bool read;
 
-	u8 txbuf[256];
-	u8 rxbuf[256];
+	struct drm_dp_aux drm_aux;
 };
 
 static char *dp_aux_get_error(u32 aux_error)
@@ -68,158 +60,104 @@
 	}
 }
 
-static void dp_aux_buf_init(struct aux_buf *buf, u8 *data, u32 size)
+static u32 dp_aux_write(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg)
 {
-	buf->start     = data;
-	buf->size      = size;
-	buf->data      = buf->start;
-	buf->end       = buf->start + buf->size;
-	buf->len       = 0;
-	buf->trans_num = 0;
-	buf->tx_mode   = AUX_NATIVE;
-}
+	u32 data[4], reg, len;
+	u8 *msgdata = msg->buffer;
+	int const aux_cmd_fifo_len = 128;
+	int i = 0;
 
-static void dp_aux_buf_set(struct dp_aux_private *aux)
-{
-	init_completion(&aux->comp);
-	mutex_init(&aux->mutex);
-
-	dp_aux_buf_init(&aux->txp, aux->txbuf, sizeof(aux->txbuf));
-	dp_aux_buf_init(&aux->rxp, aux->rxbuf, sizeof(aux->rxbuf));
-}
-
-static void dp_aux_buf_reset(struct aux_buf *buf)
-{
-	buf->data      = buf->start;
-	buf->len       = 0;
-	buf->trans_num = 0;
-	buf->tx_mode   = AUX_NATIVE;
-
-	memset(buf->start, 0x0, 256);
-}
-
-static void dp_aux_buf_push(struct aux_buf *buf, u32 len)
-{
-	buf->data += len;
-	buf->len  += len;
-}
-
-static u32 dp_aux_buf_trailing(struct aux_buf *buf)
-{
-	return (u32)(buf->end - buf->data);
-}
-
-static u32 dp_aux_add_cmd(struct aux_buf *buf, struct aux_cmd *cmd)
-{
-	u8 data;
-	u8 *bp, *cp;
-	u32 i, len;
-
-	if (cmd->ex_mode == AUX_READ)
+	if (aux->read)
 		len = 4;
 	else
-		len = cmd->len + 4;
-
-	if (dp_aux_buf_trailing(buf) < len) {
-		pr_err("buf trailing error\n");
-		return 0;
-	}
+		len = msg->size + 4;
 
 	/*
 	 * cmd fifo only has depth of 144 bytes
 	 * limit buf length to 128 bytes here
 	 */
-	if ((buf->len + len) > 128) {
+	if (len > aux_cmd_fifo_len) {
 		pr_err("buf len error\n");
 		return 0;
 	}
 
-	bp = buf->data;
-	data = cmd->addr >> 16;
-	data &= 0x0f;  /* 4 addr bits */
+	/* Pack cmd and write to HW */
+	data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+	if (aux->read)
+		data[0] |=  BIT(4); /* R/W */
 
-	if (cmd->ex_mode == AUX_READ)
-		data |=  BIT(4);
+	data[1] = (msg->address >> 8) & 0xff;	/* addr[15:8] */
+	data[2] = msg->address & 0xff;		/* addr[7:0] */
+	data[3] = (msg->size - 1) & 0xff;	/* len[7:0] */
 
-	*bp++ = data;
-	*bp++ = cmd->addr >> 8;
-	*bp++ = cmd->addr;
-	*bp++ = cmd->len - 1;
-
-	if (cmd->ex_mode == AUX_WRITE) {
-		cp = cmd->buf;
-
-		for (i = 0; i < cmd->len; i++)
-			*bp++ = *cp++;
-	}
-
-	dp_aux_buf_push(buf, len);
-
-	buf->tx_mode = cmd->tx_mode;
-
-	buf->trans_num++;
-
-	return cmd->len - 1;
-}
-
-static u32 dp_aux_cmd_fifo_tx(struct dp_aux_private *aux)
-{
-	u8 *dp;
-	u32 data, len, cnt;
-	struct aux_buf *tp = &aux->txp;
-
-	len = tp->len;
-	if (len == 0) {
-		pr_err("invalid len\n");
-		return 0;
-	}
-
-	cnt = 0;
-	dp = tp->start;
-
-	while (cnt < len) {
-		data = *dp;
-		data <<= 8;
-		data &= 0x00ff00;
-		if (cnt == 0)
-			data |= BIT(31);
-
-		aux->catalog->data = data;
+	for (i = 0; i < len; i++) {
+		reg = (i < 4) ? data[i] : msgdata[i - 4];
+		reg = ((reg) << 8) & 0x0000ff00; /* index = 0, write */
+		if (i == 0)
+			reg |= DP_AUX_DATA_INDEX_WRITE;
+		aux->catalog->data = reg;
 		aux->catalog->write_data(aux->catalog);
-
-		cnt++;
-		dp++;
 	}
 
-	data = (tp->trans_num - 1);
-	if (tp->tx_mode == AUX_I2C) {
-		data |= BIT(8); /* I2C */
-		data |= BIT(10); /* NO SEND ADDR */
-		data |= BIT(11); /* NO SEND STOP */
-	}
+	reg = 0; /* Transaction number == 1 */
+	if (!aux->native) /* i2c */
+		reg |= (BIT(8) | BIT(10) | BIT(11));
 
-	data |= BIT(9); /* GO */
-	aux->catalog->data = data;
+	reg |= BIT(9);
+	aux->catalog->data = reg;
 	aux->catalog->write_trans(aux->catalog);
 
-	return tp->len;
+	return len;
 }
 
-static u32 dp_cmd_fifo_rx(struct dp_aux_private *aux, u32 len)
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg)
+{
+	u32 ret = 0, len = 0, timeout;
+	int const aux_timeout_ms = HZ/4;
+
+	reinit_completion(&aux->comp);
+
+	len = dp_aux_write(aux, msg);
+	if (len == 0) {
+		pr_err("DP AUX write failed\n");
+		return -EINVAL;
+	}
+
+	timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+	if (!timeout) {
+		pr_err("aux write timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	pr_debug("aux status %s\n",
+		dp_aux_get_error(aux->aux_error_num));
+
+	if (aux->aux_error_num == DP_AUX_ERR_NONE)
+		ret = len;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+		struct drm_dp_aux_msg *msg)
 {
 	u32 data;
 	u8 *dp;
 	u32 i;
-	struct aux_buf *rp = &aux->rxp;
+	u32 len = msg->size;
 
 	data = 0;
-	data |= BIT(31); /* INDEX_WRITE */
+	data |= DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
 	data |= BIT(0);  /* read */
 
 	aux->catalog->data = data;
 	aux->catalog->write_data(aux->catalog);
 
-	dp = rp->data;
+	dp = msg->buffer;
 
 	/* discard first byte */
 	data = aux->catalog->read_data(aux->catalog);
@@ -228,14 +166,11 @@
 		data = aux->catalog->read_data(aux->catalog);
 		*dp++ = (u8)((data >> 8) & 0xff);
 	}
-
-	rp->len = len;
-	return len;
 }
 
 static void dp_aux_native_handler(struct dp_aux_private *aux)
 {
-	u32 isr = aux->catalog->isr1;
+	u32 isr = aux->catalog->isr;
 
 	if (isr & DP_INTR_AUX_I2C_DONE)
 		aux->aux_error_num = DP_AUX_ERR_NONE;
@@ -251,7 +186,7 @@
 
 static void dp_aux_i2c_handler(struct dp_aux_private *aux)
 {
-	u32 isr = aux->catalog->isr1;
+	u32 isr = aux->catalog->isr;
 
 	if (isr & DP_INTR_AUX_I2C_DONE) {
 		if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
@@ -285,217 +220,81 @@
 
 	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
 
-	aux->catalog->get_irq(aux->catalog);
+	aux->catalog->get_irq(aux->catalog, aux->cmd_busy);
 
-	if (aux->cmds->tx_mode == AUX_NATIVE)
+	if (!aux->cmd_busy)
+		return;
+
+	if (aux->native)
 		dp_aux_native_handler(aux);
 	else
 		dp_aux_i2c_handler(aux);
 }
 
-
-
-static int dp_aux_write(struct dp_aux_private *aux)
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *drm_aux,
+		struct drm_dp_aux_msg *msg)
 {
-	struct aux_cmd *cm;
-	struct aux_buf *tp;
-	u32 len, ret, timeout;
+	ssize_t ret;
+	int const aux_cmd_native_max = 16;
+	int const aux_cmd_i2c_max = 128;
+	struct dp_aux_private *aux = container_of(drm_aux,
+		struct dp_aux_private, drm_aux);
 
 	mutex_lock(&aux->mutex);
 
-	tp = &aux->txp;
-	dp_aux_buf_reset(tp);
+	aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+	aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+	aux->cmd_busy = true;
 
-	cm = aux->cmds;
-	while (cm) {
-		ret = dp_aux_add_cmd(tp, cm);
-		if (ret <= 0)
-			break;
-
-		if (!cm->next)
-			break;
-		cm++;
+	/* Ignore address only message */
+	if ((msg->size == 0) || (msg->buffer == NULL)) {
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+		ret = msg->size;
+		goto unlock_exit;
 	}
 
-	reinit_completion(&aux->comp);
-
-	len = dp_aux_cmd_fifo_tx(aux);
-
-	timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
-	if (!timeout)
-		pr_err("aux write timeout\n");
-
-	pr_debug("aux status %s\n",
-		dp_aux_get_error(aux->aux_error_num));
-
-	if (aux->aux_error_num == DP_AUX_ERR_NONE)
-		ret = len;
-	else
-		ret = aux->aux_error_num;
-
-	mutex_unlock(&aux->mutex);
-	return  ret;
-}
-
-static int dp_aux_read(struct dp_aux_private *aux)
-{
-	struct aux_cmd *cm;
-	struct aux_buf *tp, *rp;
-	u32 len, ret, timeout;
-
-	mutex_lock(&aux->mutex);
-
-	tp = &aux->txp;
-	rp = &aux->rxp;
-
-	dp_aux_buf_reset(tp);
-	dp_aux_buf_reset(rp);
-
-	cm = aux->cmds;
-	len = 0;
-
-	while (cm) {
-		ret = dp_aux_add_cmd(tp, cm);
-		len += cm->len;
-
-		if (ret <= 0)
-			break;
-
-		if (!cm->next)
-			break;
-		cm++;
+	/* msg sanity check */
+	if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+		(msg->size > aux_cmd_i2c_max)) {
+		pr_err("%s: invalid msg: size(%zu), request(%x)\n",
+			__func__, msg->size, msg->request);
+		ret = -EINVAL;
+		goto unlock_exit;
 	}
 
-	reinit_completion(&aux->comp);
+	ret = dp_aux_cmd_fifo_tx(aux, msg);
+	if (ret < 0) {
+		aux->catalog->reset(aux->catalog); /* reset aux */
+		goto unlock_exit;
+	}
 
-	dp_aux_cmd_fifo_tx(aux);
+	if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+		if (aux->read)
+			dp_aux_cmd_fifo_rx(aux, msg);
 
-	timeout = wait_for_completion_timeout(&aux->comp, HZ/4);
-	if (!timeout)
-		pr_err("aux read timeout\n");
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+	} else {
+		/* Reply defer to retry */
+		msg->reply = aux->native ?
+			DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+	}
 
-	pr_debug("aux status %s\n",
-		dp_aux_get_error(aux->aux_error_num));
+	/* Return requested size for success or retry */
+	ret = msg->size;
 
-	if (aux->aux_error_num == DP_AUX_ERR_NONE)
-		ret = dp_cmd_fifo_rx(aux, len);
-	else
-		ret = aux->aux_error_num;
-
-	aux->cmds->buf = rp->data;
-
+unlock_exit:
+	aux->cmd_busy = false;
 	mutex_unlock(&aux->mutex);
-
 	return ret;
 }
 
-static int dp_aux_write_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
-				enum aux_tx_mode mode, u8 *buf)
-{
-	struct aux_cmd cmd = {0};
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !len) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	cmd.ex_mode = AUX_WRITE;
-	cmd.tx_mode = mode;
-	cmd.addr    = addr;
-	cmd.len     = len;
-	cmd.buf     = buf;
-
-	aux->cmds = &cmd;
-
-	return dp_aux_write(aux);
-}
-
-static int dp_aux_read_ex(struct dp_aux *dp_aux, u32 addr, u32 len,
-				enum aux_tx_mode mode, u8 **buf)
-{
-	int rc = 0;
-	struct aux_cmd cmd = {0};
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !len) {
-		pr_err("invalid input\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	cmd.ex_mode = AUX_READ;
-	cmd.tx_mode = mode;
-	cmd.addr    = addr;
-	cmd.len     = len;
-
-	aux->cmds = &cmd;
-
-	rc = dp_aux_read(aux);
-	if (rc <= 0) {
-		rc = -EINVAL;
-		goto end;
-	}
-
-	*buf = cmd.buf;
-end:
-	return rc;
-}
-
-static int dp_aux_process(struct dp_aux *dp_aux, struct aux_cmd *cmds)
-{
-	struct dp_aux_private *aux;
-
-	if (!dp_aux || !cmds) {
-		pr_err("invalid input\n");
-		return -EINVAL;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	aux->cmds = cmds;
-
-	if (cmds->ex_mode == AUX_READ)
-		return dp_aux_read(aux);
-	else
-		return dp_aux_write(aux);
-}
-
-static bool dp_aux_ready(struct dp_aux *dp_aux)
-{
-	u8 data = 0;
-	int count, ret;
-	struct dp_aux_private *aux;
-
-	if (!dp_aux) {
-		pr_err("invalid input\n");
-		goto error;
-	}
-
-	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
-
-	for (count = 5; count; count--) {
-		ret = dp_aux_write_ex(dp_aux, 0x50, 1, AUX_I2C, &data);
-		if (ret >= 0)
-			break;
-
-		msleep(100);
-	}
-
-	if (count <= 0) {
-		pr_err("aux chan NOT ready\n");
-		goto error;
-	}
-
-	return true;
-error:
-	return false;
-}
-
 static void dp_aux_init(struct dp_aux *dp_aux, u32 *aux_cfg)
 {
 	struct dp_aux_private *aux;
@@ -526,6 +325,45 @@
 	aux->catalog->enable(aux->catalog, false);
 }
 
+static int dp_aux_register(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+	int ret = 0;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+	aux->drm_aux.name = "sde_dp_aux";
+	aux->drm_aux.dev = aux->dev;
+	aux->drm_aux.transfer = dp_aux_transfer;
+	ret = drm_dp_aux_register(&aux->drm_aux);
+	if (ret) {
+		pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
+		goto exit;
+	}
+	dp_aux->drm_aux = &aux->drm_aux;
+exit:
+	return ret;
+}
+
+static void dp_aux_deregister(struct dp_aux *dp_aux)
+{
+	struct dp_aux_private *aux;
+
+	if (!dp_aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+	drm_dp_aux_unregister(&aux->drm_aux);
+}
+
 struct dp_aux *dp_aux_get(struct device *dev, struct dp_catalog_aux *catalog)
 {
 	int rc = 0;
@@ -544,21 +382,19 @@
 		goto error;
 	}
 
+	init_completion(&aux->comp);
+	aux->cmd_busy = false;
+	mutex_init(&aux->mutex);
+
 	aux->dev = dev;
-
-	dp_aux_buf_set(aux);
-
 	aux->catalog = catalog;
-
 	dp_aux = &aux->dp_aux;
 
-	dp_aux->process = dp_aux_process;
-	dp_aux->read    = dp_aux_read_ex;
-	dp_aux->write   = dp_aux_write_ex;
-	dp_aux->ready   = dp_aux_ready;
 	dp_aux->isr     = dp_aux_isr;
 	dp_aux->init    = dp_aux_init;
 	dp_aux->deinit  = dp_aux_deinit;
+	dp_aux->drm_aux_register = dp_aux_register;
+	dp_aux->drm_aux_deregister = dp_aux_deregister;
 
 	return dp_aux;
 error:
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index 0603c15..f08c12b 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -16,6 +16,7 @@
 #define _DP_AUX_H_
 
 #include "dp_catalog.h"
+#include "drm_dp_helper.h"
 
 enum dp_aux_error {
 	DP_AUX_ERR_NONE	= 0,
@@ -26,32 +27,10 @@
 	DP_AUX_ERR_NACK_DEFER	= -5,
 };
 
-enum aux_tx_mode {
-	AUX_NATIVE,
-	AUX_I2C,
-};
-
-enum aux_exe_mode {
-	AUX_WRITE,
-	AUX_READ,
-};
-
-struct aux_cmd {
-	enum aux_exe_mode ex_mode;
-	enum aux_tx_mode tx_mode;
-	u32 addr;
-	u32 len;
-	u8 *buf;
-	bool next;
-};
-
 struct dp_aux {
-	int (*process)(struct dp_aux *aux, struct aux_cmd *cmd);
-	int (*write)(struct dp_aux *aux, u32 addr, u32 len,
-			enum aux_tx_mode mode, u8 *buf);
-	int (*read)(struct dp_aux *aux, u32 addr, u32 len,
-			enum aux_tx_mode mode, u8 **buf);
-	bool (*ready)(struct dp_aux *aux);
+	struct drm_dp_aux *drm_aux;
+	int (*drm_aux_register)(struct dp_aux *aux);
+	void (*drm_aux_deregister)(struct dp_aux *aux);
 	void (*isr)(struct dp_aux *aux);
 	void (*init)(struct dp_aux *aux, u32 *aux_cfg);
 	void (*deinit)(struct dp_aux *aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644
index 0000000..9361b52
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -0,0 +1,962 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/delay.h>
+
+#include "dp_catalog.h"
+
+/* DP_TX Registers */
+#define DP_HW_VERSION				(0x00000000)
+#define DP_SW_RESET				(0x00000010)
+#define DP_PHY_CTRL				(0x00000014)
+#define DP_CLK_CTRL				(0x00000018)
+#define DP_CLK_ACTIVE				(0x0000001C)
+#define DP_INTR_STATUS				(0x00000020)
+#define DP_INTR_STATUS2				(0x00000024)
+#define DP_INTR_STATUS3				(0x00000028)
+
+#define DP_DP_HPD_CTRL				(0x00000200)
+#define DP_DP_HPD_INT_STATUS			(0x00000204)
+#define DP_DP_HPD_INT_ACK			(0x00000208)
+#define DP_DP_HPD_INT_MASK			(0x0000020C)
+#define DP_DP_HPD_REFTIMER			(0x00000218)
+#define DP_DP_HPD_EVENT_TIME_0			(0x0000021C)
+#define DP_DP_HPD_EVENT_TIME_1			(0x00000220)
+#define DP_AUX_CTRL				(0x00000230)
+#define DP_AUX_DATA				(0x00000234)
+#define DP_AUX_TRANS_CTRL			(0x00000238)
+#define DP_TIMEOUT_COUNT			(0x0000023C)
+#define DP_AUX_LIMITS				(0x00000240)
+#define DP_AUX_STATUS				(0x00000244)
+
+#define DP_DPCD_CP_IRQ				(0x201)
+#define DP_DPCD_RXSTATUS			(0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM			(0x000002A0)
+
+#define DP_MAINLINK_CTRL			(0x00000400)
+#define DP_STATE_CTRL				(0x00000404)
+#define DP_CONFIGURATION_CTRL			(0x00000408)
+#define DP_SOFTWARE_MVID			(0x00000410)
+#define DP_SOFTWARE_NVID			(0x00000418)
+#define DP_TOTAL_HOR_VER			(0x0000041C)
+#define DP_START_HOR_VER_FROM_SYNC		(0x00000420)
+#define DP_HSYNC_VSYNC_WIDTH_POLARITY		(0x00000424)
+#define DP_ACTIVE_HOR_VER			(0x00000428)
+#define DP_MISC1_MISC0				(0x0000042C)
+#define DP_VALID_BOUNDARY			(0x00000430)
+#define DP_VALID_BOUNDARY_2			(0x00000434)
+#define DP_LOGICAL2PHYSCIAL_LANE_MAPPING	(0x00000438)
+
+#define DP_MAINLINK_READY			(0x00000440)
+#define DP_MAINLINK_LEVELS			(0x00000444)
+#define DP_TU					(0x0000044C)
+
+#define DP_HBR2_COMPLIANCE_SCRAMBLER_RESET	(0x00000454)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG0	(0x000004C0)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG1	(0x000004C4)
+#define DP_TEST_80BIT_CUSTOM_PATTERN_REG2	(0x000004C8)
+
+#define MMSS_DP_MISC1_MISC0			(0x0000042C)
+#define MMSS_DP_AUDIO_TIMING_GEN		(0x00000480)
+#define MMSS_DP_AUDIO_TIMING_RBR_32		(0x00000484)
+#define MMSS_DP_AUDIO_TIMING_HBR_32		(0x00000488)
+#define MMSS_DP_AUDIO_TIMING_RBR_44		(0x0000048C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44		(0x00000490)
+#define MMSS_DP_AUDIO_TIMING_RBR_48		(0x00000494)
+#define MMSS_DP_AUDIO_TIMING_HBR_48		(0x00000498)
+
+#define MMSS_DP_PSR_CRC_RG			(0x00000554)
+#define MMSS_DP_PSR_CRC_B			(0x00000558)
+
+#define MMSS_DP_AUDIO_CFG			(0x00000600)
+#define MMSS_DP_AUDIO_STATUS			(0x00000604)
+#define MMSS_DP_AUDIO_PKT_CTRL			(0x00000608)
+#define MMSS_DP_AUDIO_PKT_CTRL2			(0x0000060C)
+#define MMSS_DP_AUDIO_ACR_CTRL			(0x00000610)
+#define MMSS_DP_AUDIO_CTRL_RESET		(0x00000614)
+
+#define MMSS_DP_SDP_CFG				(0x00000628)
+#define MMSS_DP_SDP_CFG2			(0x0000062C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0		(0x00000630)
+#define MMSS_DP_AUDIO_TIMESTAMP_1		(0x00000634)
+
+#define MMSS_DP_AUDIO_STREAM_0			(0x00000640)
+#define MMSS_DP_AUDIO_STREAM_1			(0x00000644)
+
+#define MMSS_DP_EXTENSION_0			(0x00000650)
+#define MMSS_DP_EXTENSION_1			(0x00000654)
+#define MMSS_DP_EXTENSION_2			(0x00000658)
+#define MMSS_DP_EXTENSION_3			(0x0000065C)
+#define MMSS_DP_EXTENSION_4			(0x00000660)
+#define MMSS_DP_EXTENSION_5			(0x00000664)
+#define MMSS_DP_EXTENSION_6			(0x00000668)
+#define MMSS_DP_EXTENSION_7			(0x0000066C)
+#define MMSS_DP_EXTENSION_8			(0x00000670)
+#define MMSS_DP_EXTENSION_9			(0x00000674)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0		(0x00000678)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1		(0x0000067C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2		(0x00000680)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3		(0x00000684)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4		(0x00000688)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5		(0x0000068C)
+#define MMSS_DP_AUDIO_ISRC_0			(0x00000690)
+#define MMSS_DP_AUDIO_ISRC_1			(0x00000694)
+#define MMSS_DP_AUDIO_ISRC_2			(0x00000698)
+#define MMSS_DP_AUDIO_ISRC_3			(0x0000069C)
+#define MMSS_DP_AUDIO_ISRC_4			(0x000006A0)
+#define MMSS_DP_AUDIO_ISRC_5			(0x000006A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0		(0x000006A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1		(0x000006AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2		(0x000006B0)
+
+#define MMSS_DP_GENERIC0_0			(0x00000700)
+#define MMSS_DP_GENERIC0_1			(0x00000704)
+#define MMSS_DP_GENERIC0_2			(0x00000708)
+#define MMSS_DP_GENERIC0_3			(0x0000070C)
+#define MMSS_DP_GENERIC0_4			(0x00000710)
+#define MMSS_DP_GENERIC0_5			(0x00000714)
+#define MMSS_DP_GENERIC0_6			(0x00000718)
+#define MMSS_DP_GENERIC0_7			(0x0000071C)
+#define MMSS_DP_GENERIC0_8			(0x00000720)
+#define MMSS_DP_GENERIC0_9			(0x00000724)
+#define MMSS_DP_GENERIC1_0			(0x00000728)
+#define MMSS_DP_GENERIC1_1			(0x0000072C)
+#define MMSS_DP_GENERIC1_2			(0x00000730)
+#define MMSS_DP_GENERIC1_3			(0x00000734)
+#define MMSS_DP_GENERIC1_4			(0x00000738)
+#define MMSS_DP_GENERIC1_5			(0x0000073C)
+#define MMSS_DP_GENERIC1_6			(0x00000740)
+#define MMSS_DP_GENERIC1_7			(0x00000744)
+#define MMSS_DP_GENERIC1_8			(0x00000748)
+#define MMSS_DP_GENERIC1_9			(0x0000074C)
+
+#define MMSS_DP_TIMING_ENGINE_EN		(0x00000A10)
+#define MMSS_DP_ASYNC_FIFO_CONFIG		(0x00000A88)
+
+/*DP PHY Register offsets */
+#define DP_PHY_REVISION_ID0                     (0x00000000)
+#define DP_PHY_REVISION_ID1                     (0x00000004)
+#define DP_PHY_REVISION_ID2                     (0x00000008)
+#define DP_PHY_REVISION_ID3                     (0x0000000C)
+
+#define DP_PHY_CFG                              (0x00000010)
+#define DP_PHY_PD_CTL                           (0x00000018)
+#define DP_PHY_MODE                             (0x0000001C)
+
+#define DP_PHY_AUX_CFG0                         (0x00000020)
+#define DP_PHY_AUX_CFG1                         (0x00000024)
+#define DP_PHY_AUX_CFG2                         (0x00000028)
+#define DP_PHY_AUX_CFG3                         (0x0000002C)
+#define DP_PHY_AUX_CFG4                         (0x00000030)
+#define DP_PHY_AUX_CFG5                         (0x00000034)
+#define DP_PHY_AUX_CFG6                         (0x00000038)
+#define DP_PHY_AUX_CFG7                         (0x0000003C)
+#define DP_PHY_AUX_CFG8                         (0x00000040)
+#define DP_PHY_AUX_CFG9                         (0x00000044)
+#define DP_PHY_AUX_INTERRUPT_MASK               (0x00000048)
+#define DP_PHY_AUX_INTERRUPT_CLEAR              (0x0000004C)
+
+#define DP_PHY_SPARE0				(0x00AC)
+
+#define TXn_TX_EMP_POST1_LVL			(0x000C)
+#define TXn_TX_DRV_LVL				(0x001C)
+
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		(0x004)
+
+/* DP MMSS_CC registers */
+#define MMSS_DP_LINK_CMD_RCGR			(0x0138)
+#define MMSS_DP_LINK_CFG_RCGR			(0x013C)
+#define MMSS_DP_PIXEL_M				(0x0174)
+#define MMSS_DP_PIXEL_N				(0x0178)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL                                   (0x0A0)
+#define DP_HDCP_STATUS                                 (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV                          (0x298)
+#define DP_HDCP_SW_LOWER_AKSV                          (0x29C)
+#define DP_HDCP_ENTROPY_CTRL0                          (0x750)
+#define DP_HDCP_ENTROPY_CTRL1                          (0x75C)
+#define DP_HDCP_SHA_STATUS                             (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3                          (0x2A4)
+#define DP_HDCP_RCVPORT_DATA4                          (0x2A8)
+#define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL           (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA           (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0      (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1      (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7      (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8      (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9      (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10     (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11     (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12     (0x020)
+
+#define dp_read(offset) readl_relaxed((offset))
+#define dp_write(offset, data) writel_relaxed((data), (offset))
+
+#define dp_catalog_get_priv(x) { \
+	struct dp_catalog *dp_catalog; \
+	dp_catalog = container_of(x, struct dp_catalog, x); \
+	catalog = container_of(dp_catalog, struct dp_catalog_private, \
+				dp_catalog); \
+}
+
+#define DP_INTERRUPT_STATUS1 \
+	(DP_INTR_AUX_I2C_DONE| \
+	DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+	DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+	DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+	DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTR_MASK1		(DP_INTERRUPT_STATUS1 << 2)
+
+#define DP_INTERRUPT_STATUS2 \
+	(DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+	DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTR_MASK2		(DP_INTERRUPT_STATUS2 << 2)
+
+static u8 const vm_pre_emphasis[4][4] = {
+	{0x00, 0x0B, 0x12, 0xFF},       /* pe0, 0 db */
+	{0x00, 0x0A, 0x12, 0xFF},       /* pe1, 3.5 db */
+	{0x00, 0x0C, 0xFF, 0xFF},       /* pe2, 6.0 db */
+	{0xFF, 0xFF, 0xFF, 0xFF}        /* pe3, 9.5 db */
+};
+
+/* voltage swing, 0.2v and 1.0v are not support */
+static u8 const vm_voltage_swing[4][4] = {
+	{0x07, 0x0F, 0x14, 0xFF}, /* sw0, 0.4v  */
+	{0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+	{0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
+	{0xFF, 0xFF, 0xFF, 0xFF}  /* sw1, 1.2 v, optional */
+};
+
+struct dp_catalog_private {
+	struct device *dev;
+	struct dp_io *io;
+	struct dp_catalog dp_catalog;
+};
+
+/* aux related catalog functions */
+static u32 dp_catalog_aux_read_data(struct dp_catalog_aux *aux)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	return dp_read(base + DP_AUX_DATA);
+end:
+	return 0;
+}
+
+static int dp_catalog_aux_write_data(struct dp_catalog_aux *aux)
+{
+	int rc = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_AUX_DATA, aux->data);
+end:
+	return rc;
+}
+
+static int dp_catalog_aux_write_trans(struct dp_catalog_aux *aux)
+{
+	int rc = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_AUX_TRANS_CTRL, aux->data);
+end:
+	return rc;
+}
+
+static void dp_catalog_aux_reset(struct dp_catalog_aux *aux)
+{
+	u32 aux_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	aux_ctrl = dp_read(base + DP_AUX_CTRL);
+
+	aux_ctrl |= BIT(1);
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+	usleep_range(1000, 1010); /* h/w recommended delay */
+
+	aux_ctrl &= ~BIT(1);
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+}
+
+static void dp_catalog_aux_enable(struct dp_catalog_aux *aux, bool enable)
+{
+	u32 aux_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	aux_ctrl = dp_read(base + DP_AUX_CTRL);
+
+	if (enable) {
+		dp_write(base + DP_TIMEOUT_COUNT, 0xffff);
+		dp_write(base + DP_AUX_LIMITS, 0xffff);
+		aux_ctrl |= BIT(0);
+	} else {
+		aux_ctrl &= ~BIT(0);
+	}
+
+	dp_write(base + DP_AUX_CTRL, aux_ctrl);
+}
+
+static void dp_catalog_aux_setup(struct dp_catalog_aux *aux, u32 *aux_cfg)
+{
+	struct dp_catalog_private *catalog;
+
+	if (!aux || !aux_cfg) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x02);
+	wmb(); /* make sure PD programming happened */
+	dp_write(catalog->io->phy_io.base + DP_PHY_PD_CTL, 0x7d);
+
+	/* Turn on BIAS current for PHY/PLL */
+	dp_write(catalog->io->dp_pll_io.base +
+		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+
+	/* DP AUX CFG register programming */
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG0, aux_cfg[0]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG1, aux_cfg[1]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG2, aux_cfg[2]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG3, aux_cfg[3]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG4, aux_cfg[4]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG5, aux_cfg[5]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG6, aux_cfg[6]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG7, aux_cfg[7]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG8, aux_cfg[8]);
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_CFG9, aux_cfg[9]);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_AUX_INTERRUPT_MASK, 0x1F);
+}
+
+static void dp_catalog_aux_get_irq(struct dp_catalog_aux *aux, bool cmd_busy)
+{
+	u32 ack;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!aux) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(aux);
+	base = catalog->io->ctrl_io.base;
+
+	if (cmd_busy)
+		dp_write(base + DP_AUX_TRANS_CTRL, 0x0);
+
+	aux->isr = dp_read(base + DP_INTR_STATUS);
+	aux->isr &= ~DP_INTR_MASK1;
+	ack = aux->isr & DP_INTERRUPT_STATUS1;
+	ack <<= 1;
+	ack |= DP_INTR_MASK1;
+	dp_write(base + DP_INTR_STATUS, ack);
+}
+
+/* controller related catalog functions */
+static void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_VALID_BOUNDARY, ctrl->valid_boundary);
+	dp_write(base + DP_TU, ctrl->dp_tu);
+	dp_write(base + DP_VALID_BOUNDARY_2, ctrl->valid_boundary2);
+}
+
+static void dp_catalog_ctrl_state_ctrl(struct dp_catalog_ctrl *ctrl, u32 state)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_STATE_CTRL, state);
+}
+
+static void dp_catalog_ctrl_config_ctrl(struct dp_catalog_ctrl *ctrl, u32 cfg)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_CONFIGURATION_CTRL, cfg);
+	dp_write(base + DP_MAINLINK_LEVELS, 0xa08);
+	dp_write(base + MMSS_DP_ASYNC_FIFO_CONFIG, 0x1);
+}
+
+static void dp_catalog_ctrl_lane_mapping(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING, 0xe4);
+}
+
+static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
+						bool enable)
+{
+	u32 mainlink_ctrl;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	mainlink_ctrl = dp_read(base + DP_MAINLINK_CTRL);
+
+	if (enable) {
+		mainlink_ctrl |= BIT(0);
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+		wmb(); /* make sure mainlink is turned off before reset */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000002);
+		wmb(); /* make sure mainlink entered reset */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000000);
+		wmb(); /* make sure mainlink reset done */
+		dp_write(base + DP_MAINLINK_CTRL, 0x02000001);
+		wmb(); /* make sure mainlink turned on */
+	} else {
+		mainlink_ctrl &= ~BIT(0);
+		dp_write(base + DP_MAINLINK_CTRL, 0x0);
+	}
+}
+
+static void dp_catalog_ctrl_config_misc(struct dp_catalog_ctrl *ctrl,
+					u32 cc, u32 tb)
+{
+	u32 misc_val = cc;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	misc_val |= (tb << 5);
+	misc_val |= BIT(0); /* Configure clock to synchronous mode */
+
+	pr_debug("isc settings = 0x%x\n", misc_val);
+	dp_write(base + DP_MISC1_MISC0, misc_val);
+}
+
+static void dp_catalog_ctrl_config_msa(struct dp_catalog_ctrl *ctrl)
+{
+	u32 pixel_m, pixel_n;
+	u32 mvid, nvid;
+	struct dp_catalog_private *catalog;
+	void __iomem *base_cc, *base_ctrl;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base_cc = catalog->io->dp_cc_io.base;
+	base_ctrl = catalog->io->ctrl_io.base;
+
+	pixel_m = dp_read(base_cc + MMSS_DP_PIXEL_M);
+	pixel_n = dp_read(base_cc + MMSS_DP_PIXEL_N);
+	pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
+
+	mvid = (pixel_m & 0xFFFF) * 5;
+	nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+	pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+	dp_write(base_ctrl + DP_SOFTWARE_MVID, mvid);
+	dp_write(base_ctrl + DP_SOFTWARE_NVID, nvid);
+}
+
+static void dp_catalog_ctrl_set_pattern(struct dp_catalog_ctrl *ctrl,
+					u32 pattern)
+{
+	int bit, cnt = 10;
+	u32 data;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	bit = 1;
+	bit <<= (pattern - 1);
+	pr_debug("bit=%d train=%d\n", bit, pattern);
+	dp_write(base + DP_STATE_CTRL, bit);
+
+	bit = 8;
+	bit <<= (pattern - 1);
+
+	while (cnt--) {
+		data = dp_read(base + DP_MAINLINK_READY);
+		if (data & bit)
+			break;
+	}
+
+	if (cnt == 0)
+		pr_err("set link_train=%d failed\n", pattern);
+}
+
+static void dp_catalog_ctrl_reset(struct dp_catalog_ctrl *ctrl)
+{
+	u32 sw_reset;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	sw_reset = dp_read(base + DP_SW_RESET);
+
+	sw_reset |= BIT(0);
+	dp_write(base + DP_SW_RESET, sw_reset);
+	usleep_range(1000, 1010); /* h/w recommended delay */
+
+	sw_reset &= ~BIT(0);
+	dp_write(base + DP_SW_RESET, sw_reset);
+}
+
+static bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog_ctrl *ctrl)
+{
+	u32 data;
+	int cnt = 10;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	while (--cnt) {
+		/* DP_MAINLINK_READY */
+		data = dp_read(base + DP_MAINLINK_READY);
+		if (data & BIT(0))
+			return true;
+
+		usleep_range(1000, 1010); /* 1ms wait before next reg read */
+	}
+	pr_err("mainlink not ready\n");
+end:
+	return false;
+}
+
+static void dp_catalog_ctrl_enable_irq(struct dp_catalog_ctrl *ctrl,
+						bool enable)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	if (enable) {
+		dp_write(base + DP_INTR_STATUS, DP_INTR_MASK1);
+		dp_write(base + DP_INTR_STATUS2, DP_INTR_MASK2);
+	} else {
+		dp_write(base + DP_INTR_STATUS, 0x00);
+		dp_write(base + DP_INTR_STATUS2, 0x00);
+	}
+}
+
+static void dp_catalog_ctrl_hpd_config(struct dp_catalog_ctrl *ctrl, bool en)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	if (en) {
+		u32 reftimer = dp_read(base + DP_DP_HPD_REFTIMER);
+
+		dp_write(base + DP_DP_HPD_INT_ACK, 0xF);
+		dp_write(base + DP_DP_HPD_INT_MASK, 0xF);
+
+		/* Enabling REFTIMER */
+		reftimer |= BIT(16);
+		dp_write(base + DP_DP_HPD_REFTIMER, 0xF);
+		/* Enable HPD */
+		dp_write(base + DP_DP_HPD_CTRL, 0x1);
+	} else {
+		/*Disable HPD */
+		dp_write(base + DP_DP_HPD_CTRL, 0x0);
+	}
+}
+
+static void dp_catalog_ctrl_get_interrupt(struct dp_catalog_ctrl *ctrl)
+{
+	u32 ack = 0;
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	ctrl->isr = dp_read(base + DP_INTR_STATUS2);
+	ctrl->isr &= ~DP_INTR_MASK2;
+	ack = ctrl->isr & DP_INTERRUPT_STATUS2;
+	ack <<= 1;
+	ack |= DP_INTR_MASK2;
+	dp_write(base + DP_INTR_STATUS2, ack);
+}
+
+static void dp_catalog_ctrl_phy_reset(struct dp_catalog_ctrl *ctrl)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_PHY_CTRL, 0x5); /* bit 0 & 2 */
+	usleep_range(1000, 1010); /* h/w recommended delay */
+	dp_write(base + DP_PHY_CTRL, 0x0);
+	wmb(); /* make sure PHY reset done */
+}
+
+static void dp_catalog_ctrl_phy_lane_cfg(struct dp_catalog_ctrl *ctrl,
+		bool flipped, u8 ln_cnt)
+{
+	u32 info = 0x0;
+	struct dp_catalog_private *catalog;
+	u8 orientation = BIT(!!flipped);
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+
+	info |= (ln_cnt & 0x0F);
+	info |= ((orientation & 0x0F) << 4);
+	pr_debug("Shared Info = 0x%x\n", info);
+
+	dp_write(catalog->io->phy_io.base + DP_PHY_SPARE0, info);
+}
+
+static void dp_catalog_ctrl_update_vx_px(struct dp_catalog_ctrl *ctrl,
+		u8 v_level, u8 p_level)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base0, *base1;
+	u8 value0, value1;
+
+	if (!ctrl) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	dp_catalog_get_priv(ctrl);
+	base0 = catalog->io->ln_tx0_io.base;
+	base1 = catalog->io->ln_tx1_io.base;
+
+	pr_debug("v=%d p=%d\n", v_level, p_level);
+
+	value0 = vm_voltage_swing[v_level][p_level];
+	value1 = vm_pre_emphasis[v_level][p_level];
+
+	/* program default setting first */
+	dp_write(base0 + TXn_TX_DRV_LVL, 0x2A);
+	dp_write(base1 + TXn_TX_DRV_LVL, 0x2A);
+	dp_write(base0 + TXn_TX_EMP_POST1_LVL, 0x20);
+	dp_write(base1 + TXn_TX_EMP_POST1_LVL, 0x20);
+
+	/* Enable MUX to use Cursor values from these registers */
+	value0 |= BIT(5);
+	value1 |= BIT(5);
+
+	/* Configure host and panel only if both values are allowed */
+	if (value0 != 0xFF && value1 != 0xFF) {
+		dp_write(base0 + TXn_TX_DRV_LVL, value0);
+		dp_write(base1 + TXn_TX_DRV_LVL, value0);
+		dp_write(base0 + TXn_TX_EMP_POST1_LVL, value1);
+		dp_write(base1 + TXn_TX_EMP_POST1_LVL, value1);
+
+		pr_debug("host PHY settings: value0=0x%x value1=0x%x",
+						value0, value1);
+	}
+}
+
+/* panel related catalog functions */
+static int dp_catalog_panel_timing_cfg(struct dp_catalog_panel *panel)
+{
+	struct dp_catalog_private *catalog;
+	void __iomem *base;
+
+	if (!panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	dp_catalog_get_priv(panel);
+	base = catalog->io->ctrl_io.base;
+
+	dp_write(base + DP_TOTAL_HOR_VER, panel->total);
+	dp_write(base + DP_START_HOR_VER_FROM_SYNC, panel->sync_start);
+	dp_write(base + DP_HSYNC_VSYNC_WIDTH_POLARITY, panel->width_blanking);
+	dp_write(base + DP_ACTIVE_HOR_VER, panel->dp_active);
+end:
+	return 0;
+}
+ /* audio related catalog functions */
+static int dp_catalog_audio_acr_ctrl(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_stream_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_timestamp_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_infoframe_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_copy_mgmt_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_isrc_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+static int dp_catalog_audio_setup_sdp(struct dp_catalog_audio *audio)
+{
+	return 0;
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+	int rc = 0;
+	struct dp_catalog *dp_catalog;
+	struct dp_catalog_private *catalog;
+	struct dp_catalog_aux aux = {
+		.read_data     = dp_catalog_aux_read_data,
+		.write_data    = dp_catalog_aux_write_data,
+		.write_trans   = dp_catalog_aux_write_trans,
+		.reset         = dp_catalog_aux_reset,
+		.enable        = dp_catalog_aux_enable,
+		.setup         = dp_catalog_aux_setup,
+		.get_irq       = dp_catalog_aux_get_irq,
+	};
+	struct dp_catalog_ctrl ctrl = {
+		.state_ctrl     = dp_catalog_ctrl_state_ctrl,
+		.config_ctrl    = dp_catalog_ctrl_config_ctrl,
+		.lane_mapping   = dp_catalog_ctrl_lane_mapping,
+		.mainlink_ctrl  = dp_catalog_ctrl_mainlink_ctrl,
+		.config_misc    = dp_catalog_ctrl_config_misc,
+		.config_msa     = dp_catalog_ctrl_config_msa,
+		.set_pattern    = dp_catalog_ctrl_set_pattern,
+		.reset          = dp_catalog_ctrl_reset,
+		.mainlink_ready = dp_catalog_ctrl_mainlink_ready,
+		.enable_irq     = dp_catalog_ctrl_enable_irq,
+		.hpd_config     = dp_catalog_ctrl_hpd_config,
+		.phy_reset      = dp_catalog_ctrl_phy_reset,
+		.phy_lane_cfg   = dp_catalog_ctrl_phy_lane_cfg,
+		.update_vx_px   = dp_catalog_ctrl_update_vx_px,
+		.get_interrupt  = dp_catalog_ctrl_get_interrupt,
+		.update_transfer_unit = dp_catalog_ctrl_update_transfer_unit,
+	};
+	struct dp_catalog_audio audio = {
+		.acr_ctrl      = dp_catalog_audio_acr_ctrl,
+		.stream_sdp    = dp_catalog_audio_stream_sdp,
+		.timestamp_sdp = dp_catalog_audio_timestamp_sdp,
+		.infoframe_sdp = dp_catalog_audio_infoframe_sdp,
+		.copy_mgmt_sdp = dp_catalog_audio_copy_mgmt_sdp,
+		.isrc_sdp      = dp_catalog_audio_isrc_sdp,
+		.setup_sdp     = dp_catalog_audio_setup_sdp,
+	};
+	struct dp_catalog_panel panel = {
+		.timing_cfg = dp_catalog_panel_timing_cfg,
+	};
+
+	if (!io) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	catalog  = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+	if (!catalog) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	catalog->dev = dev;
+	catalog->io = io;
+
+	dp_catalog = &catalog->dp_catalog;
+
+	dp_catalog->aux   = aux;
+	dp_catalog->ctrl  = ctrl;
+	dp_catalog->audio = audio;
+	dp_catalog->panel = panel;
+
+	return dp_catalog;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_catalog_put(struct dp_catalog *dp_catalog)
+{
+	struct dp_catalog_private *catalog;
+
+	if (!dp_catalog)
+		return;
+
+	catalog = container_of(dp_catalog, struct dp_catalog_private,
+				dp_catalog);
+
+	devm_kfree(catalog->dev, catalog);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644
index 0000000..ce88569
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include "dp_parser.h"
+
+/* interrupts */
+#define DP_INTR_HPD		BIT(0)
+#define DP_INTR_AUX_I2C_DONE	BIT(3)
+#define DP_INTR_WRONG_ADDR	BIT(6)
+#define DP_INTR_TIMEOUT		BIT(9)
+#define DP_INTR_NACK_DEFER	BIT(12)
+#define DP_INTR_WRONG_DATA_CNT	BIT(15)
+#define DP_INTR_I2C_NACK	BIT(18)
+#define DP_INTR_I2C_DEFER	BIT(21)
+#define DP_INTR_PLL_UNLOCKED	BIT(24)
+#define DP_INTR_AUX_ERROR	BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO		BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT	BIT(3)
+#define DP_INTR_FRAME_END		BIT(6)
+#define DP_INTR_CRC_UPDATED		BIT(9)
+
+struct dp_catalog_aux {
+	u32 data;
+	u32 isr;
+
+	u32 (*read_data)(struct dp_catalog_aux *aux);
+	int (*write_data)(struct dp_catalog_aux *aux);
+	int (*write_trans)(struct dp_catalog_aux *aux);
+	void (*reset)(struct dp_catalog_aux *aux);
+	void (*enable)(struct dp_catalog_aux *aux, bool enable);
+	void (*setup)(struct dp_catalog_aux *aux, u32 *aux_cfg);
+	void (*get_irq)(struct dp_catalog_aux *aux, bool cmd_busy);
+};
+
+struct dp_catalog_ctrl {
+	u32 dp_tu;
+	u32 valid_boundary;
+	u32 valid_boundary2;
+	u32 isr;
+
+	void (*state_ctrl)(struct dp_catalog_ctrl *ctrl, u32 state);
+	void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u32 config);
+	void (*lane_mapping)(struct dp_catalog_ctrl *ctrl);
+	void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*config_misc)(struct dp_catalog_ctrl *ctrl, u32 cc, u32 tb);
+	void (*config_msa)(struct dp_catalog_ctrl *ctrl);
+	void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
+	void (*reset)(struct dp_catalog_ctrl *ctrl);
+	bool (*mainlink_ready)(struct dp_catalog_ctrl *ctrl);
+	void (*enable_irq)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*hpd_config)(struct dp_catalog_ctrl *ctrl, bool enable);
+	void (*phy_reset)(struct dp_catalog_ctrl *ctrl);
+	void (*phy_lane_cfg)(struct dp_catalog_ctrl *ctrl, bool flipped,
+				u8 lane_cnt);
+	void (*update_vx_px)(struct dp_catalog_ctrl *ctrl, u8 v_level,
+				u8 p_level);
+	void (*get_interrupt)(struct dp_catalog_ctrl *ctrl);
+	void (*update_transfer_unit)(struct dp_catalog_ctrl *ctrl);
+};
+
+struct dp_catalog_audio {
+	u32 data;
+
+	int (*acr_ctrl)(struct dp_catalog_audio *audio);
+	int (*stream_sdp)(struct dp_catalog_audio *audio);
+	int (*timestamp_sdp)(struct dp_catalog_audio *audio);
+	int (*infoframe_sdp)(struct dp_catalog_audio *audio);
+	int (*copy_mgmt_sdp)(struct dp_catalog_audio *audio);
+	int (*isrc_sdp)(struct dp_catalog_audio *audio);
+	int (*setup_sdp)(struct dp_catalog_audio *audio);
+};
+
+struct dp_catalog_panel {
+	u32 total;
+	u32 sync_start;
+	u32 width_blanking;
+	u32 dp_active;
+
+	int (*timing_cfg)(struct dp_catalog_panel *panel);
+};
+
+struct dp_catalog {
+	struct dp_catalog_aux aux;
+	struct dp_catalog_ctrl ctrl;
+	struct dp_catalog_audio audio;
+	struct dp_catalog_panel panel;
+};
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+void dp_catalog_put(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644
index 0000000..888c511
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -0,0 +1,1369 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+
+#include "dp_ctrl.h"
+
+#define DP_KHZ_TO_HZ 1000
+#define DP_CRYPTO_CLK_RATE_KHZ 180000
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
+
+/* dp state ctrl */
+#define ST_TRAIN_PATTERN_1		BIT(0)
+#define ST_TRAIN_PATTERN_2		BIT(1)
+#define ST_TRAIN_PATTERN_3		BIT(2)
+#define ST_TRAIN_PATTERN_4		BIT(3)
+#define ST_SYMBOL_ERR_RATE_MEASUREMENT	BIT(4)
+#define ST_PRBS7			BIT(5)
+#define ST_CUSTOM_80_BIT_PATTERN	BIT(6)
+#define ST_SEND_VIDEO			BIT(7)
+#define ST_PUSH_IDLE			BIT(8)
+
+struct dp_vc_tu_mapping_table {
+	u32 vic;
+	u8 lanes;
+	u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+	u8 bpp;
+	u8 valid_boundary_link;
+	u16 delay_start_link;
+	bool boundary_moderation_en;
+	u8 valid_lower_boundary_link;
+	u8 upper_boundary_count;
+	u8 lower_boundary_count;
+	u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+	struct dp_ctrl dp_ctrl;
+
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_panel *panel;
+	struct dp_link *link;
+	struct dp_power *power;
+	struct dp_parser *parser;
+	struct dp_catalog_ctrl *catalog;
+
+	struct completion idle_comp;
+	struct completion video_comp;
+	struct completion irq_comp;
+
+	bool hpd_irq_on;
+	bool power_on;
+	bool sink_info_read;
+	bool cont_splash;
+	bool psm_enabled;
+	bool initialized;
+	bool orientation;
+
+	u32 pixel_rate;
+	u32 vic;
+};
+
+enum notification_status {
+	NOTIFY_UNKNOWN,
+	NOTIFY_CONNECT,
+	NOTIFY_DISCONNECT,
+	NOTIFY_CONNECT_IRQ_HPD,
+	NOTIFY_DISCONNECT_IRQ_HPD,
+};
+
+static void dp_ctrl_idle_patterns_sent(struct dp_ctrl_private *ctrl)
+{
+	pr_debug("idle_patterns_sent\n");
+	complete(&ctrl->idle_comp);
+}
+
+static void dp_ctrl_video_ready(struct dp_ctrl_private *ctrl)
+{
+	pr_debug("dp_video_ready\n");
+	complete(&ctrl->video_comp);
+}
+
+static void dp_ctrl_state_ctrl(struct dp_ctrl_private *ctrl, u32 state)
+{
+	ctrl->catalog->state_ctrl(ctrl->catalog, state);
+}
+
+static void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+	int const idle_pattern_completion_timeout_ms = 3 * HZ / 100;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	drm_dp_link_power_down(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
+
+	reinit_completion(&ctrl->idle_comp);
+	dp_ctrl_state_ctrl(ctrl, ST_PUSH_IDLE);
+
+	if (!wait_for_completion_timeout(&ctrl->idle_comp,
+			idle_pattern_completion_timeout_ms))
+		pr_warn("PUSH_IDLE pattern timedout\n");
+
+	pr_debug("mainlink off done\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+	u32 config = 0, tbd;
+	u8 *dpcd = ctrl->panel->dpcd;
+
+	config |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK  */
+	config |= (0 << 11); /* RGB */
+
+	/* Scrambler reset enable */
+	if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+		config |= (1 << 10);
+
+	tbd = ctrl->link->get_test_bits_depth(ctrl->link,
+			ctrl->panel->pinfo.bpp);
+	config |= tbd << 8;
+
+	/* Num of Lanes */
+	config |= ((ctrl->link->lane_count - 1) << 4);
+
+	if (drm_dp_enhanced_frame_cap(dpcd))
+		config |= 0x40;
+
+	config |= 0x04; /* progressive video */
+
+	config |= 0x03;	/* sycn clock & static Mvid */
+
+	ctrl->catalog->config_ctrl(ctrl->catalog, config);
+}
+
+/**
+ * dp_ctrl_configure_source_params() - configures DP transmitter source params
+ * @ctrl: Display Port Driver data
+ *
+ * Configures the DP transmitter source params including details such as lane
+ * configuration, output format and sink/panel timing information.
+ */
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+	u32 cc, tb;
+
+	ctrl->catalog->lane_mapping(ctrl->catalog);
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
+
+	dp_ctrl_config_ctrl(ctrl);
+
+	tb = ctrl->link->get_test_bits_depth(ctrl->link,
+		ctrl->panel->pinfo.bpp);
+	cc = ctrl->link->get_colorimetry_config(ctrl->link);
+	ctrl->catalog->config_misc(ctrl->catalog, cc, tb);
+
+	ctrl->catalog->config_msa(ctrl->catalog);
+
+	ctrl->panel->timing_cfg(ctrl->panel);
+}
+
+static void dp_ctrl_get_extra_req_bytes(u64 result_valid,
+					int valid_bdary_link,
+					u64 value1, u64 value2,
+					bool *negative, u64 *result,
+					u64 compare)
+{
+	*negative = false;
+	if (result_valid >= compare) {
+		if (valid_bdary_link
+				>= compare)
+			*result = value1 + value2;
+		else {
+			if (value1 < value2)
+				*negative = true;
+			*result = (value1 >= value2) ?
+				(value1 - value2) : (value2 - value1);
+		}
+	} else {
+		if (valid_bdary_link
+				>= compare) {
+			if (value1 >= value2)
+				*negative = true;
+			*result = (value1 >= value2) ?
+				(value1 - value2) : (value2 - value1);
+		} else {
+			*result = value1 + value2;
+			*negative = true;
+		}
+	}
+}
+
+static u64 roundup_u64(u64 x, u64 y)
+{
+	x += (y - 1);
+	return (div64_ul(x, y) * y);
+}
+
+static u64 rounddown_u64(u64 x, u64 y)
+{
+	u64 rem;
+
+	div64_u64_rem(x, y, &rem);
+	return (x - rem);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+		struct dp_vc_tu_mapping_table *tu_table)
+{
+	u32 const multiplier = 1000000;
+	u64 pclk, lclk;
+	u8 bpp, ln_cnt, link_rate;
+	int run_idx = 0;
+	u32 lwidth, h_blank;
+	u32 fifo_empty = 0;
+	u32 ratio_scale = 1001;
+	u64 temp, ratio, original_ratio;
+	u64 temp2, reminder;
+	u64 temp3, temp4, result = 0;
+
+	u64 err = multiplier;
+	u64 n_err = 0, n_n_err = 0;
+	bool n_err_neg, nn_err_neg;
+	u8 hblank_margin = 16;
+
+	u8 tu_size, tu_size_desired = 0, tu_size_minus1;
+	int valid_boundary_link;
+	u64 resulting_valid;
+	u64 total_valid;
+	u64 effective_valid;
+	u64 effective_valid_recorded;
+	int n_tus;
+	int n_tus_per_lane;
+	int paired_tus;
+	int remainder_tus;
+	int remainder_tus_upper, remainder_tus_lower;
+	int extra_bytes;
+	int filler_size;
+	int delay_start_link;
+	int boundary_moderation_en = 0;
+	int upper_bdry_cnt = 0;
+	int lower_bdry_cnt = 0;
+	int i_upper_bdry_cnt = 0;
+	int i_lower_bdry_cnt = 0;
+	int valid_lower_boundary_link = 0;
+	int even_distribution_bf = 0;
+	int even_distribution_legacy = 0;
+	int even_distribution = 0;
+	int min_hblank = 0;
+	int extra_pclk_cycles;
+	u8 extra_pclk_cycle_delay = 4;
+	int extra_pclk_cycles_in_link_clk;
+	u64 ratio_by_tu;
+	u64 average_valid2;
+	u64 extra_buffer_margin;
+	int new_valid_boundary_link;
+
+	u64 resulting_valid_tmp;
+	u64 ratio_by_tu_tmp;
+	int n_tus_tmp;
+	int extra_pclk_cycles_tmp;
+	int extra_pclk_cycles_in_lclk_tmp;
+	int extra_req_bytes_new_tmp;
+	int filler_size_tmp;
+	int lower_filler_size_tmp;
+	int delay_start_link_tmp;
+	int min_hblank_tmp = 0;
+	bool extra_req_bytes_is_neg = false;
+	struct dp_panel_info *pinfo = &ctrl->panel->pinfo;
+
+	u8 dp_brute_force = 1;
+	u64 brute_force_threshold = 10;
+	u64 diff_abs;
+
+	link_rate = ctrl->link->link_rate;
+	ln_cnt =  ctrl->link->lane_count;
+
+	bpp = pinfo->bpp;
+	lwidth = pinfo->h_active;
+	h_blank = pinfo->h_back_porch + pinfo->h_front_porch +
+				pinfo->h_sync_width;
+	pclk = pinfo->pixel_clk_khz * 1000;
+
+	boundary_moderation_en = 0;
+	upper_bdry_cnt = 0;
+	lower_bdry_cnt = 0;
+	i_upper_bdry_cnt = 0;
+	i_lower_bdry_cnt = 0;
+	valid_lower_boundary_link = 0;
+	even_distribution_bf = 0;
+	even_distribution_legacy = 0;
+	even_distribution = 0;
+	min_hblank = 0;
+
+	lclk = drm_dp_bw_code_to_link_rate(link_rate) * DP_KHZ_TO_HZ;
+
+	pr_debug("pclk=%lld, active_width=%d, h_blank=%d\n",
+						pclk, lwidth, h_blank);
+	pr_debug("lclk = %lld, ln_cnt = %d\n", lclk, ln_cnt);
+	ratio = div64_u64_rem(pclk * bpp * multiplier,
+				8 * ln_cnt * lclk, &reminder);
+	ratio = div64_u64((pclk * bpp * multiplier), (8 * ln_cnt * lclk));
+	original_ratio = ratio;
+
+	extra_buffer_margin = roundup_u64(div64_u64(extra_pclk_cycle_delay
+				* lclk * multiplier, pclk), multiplier);
+	extra_buffer_margin = div64_u64(extra_buffer_margin, multiplier);
+
+	/* To deal with cases where lines are not distributable */
+	if (((lwidth % ln_cnt) != 0) && ratio < multiplier) {
+		ratio = ratio * ratio_scale;
+		ratio = ratio < (1000 * multiplier)
+				? ratio : (1000 * multiplier);
+	}
+	pr_debug("ratio = %lld\n", ratio);
+
+	for (tu_size = 32; tu_size <= 64; tu_size++) {
+		temp = ratio * tu_size;
+		temp2 = ((temp / multiplier) + 1) * multiplier;
+		n_err = roundup_u64(temp, multiplier) - temp;
+
+		if (n_err < err) {
+			err = n_err;
+			tu_size_desired = tu_size;
+		}
+	}
+	pr_debug("Info: tu_size_desired = %d\n", tu_size_desired);
+
+	tu_size_minus1 = tu_size_desired - 1;
+
+	valid_boundary_link = roundup_u64(ratio * tu_size_desired, multiplier);
+	valid_boundary_link /= multiplier;
+	n_tus = rounddown((lwidth * bpp * multiplier)
+			/ (8 * valid_boundary_link), multiplier) / multiplier;
+	even_distribution_legacy = n_tus % ln_cnt == 0 ? 1 : 0;
+	pr_debug("Info: n_symbol_per_tu=%d, number_of_tus=%d\n",
+					valid_boundary_link, n_tus);
+
+	extra_bytes = roundup_u64((n_tus + 1)
+			* ((valid_boundary_link * multiplier)
+			- (original_ratio * tu_size_desired)), multiplier);
+	extra_bytes /= multiplier;
+	extra_pclk_cycles = roundup(extra_bytes * 8 * multiplier / bpp,
+			multiplier);
+	extra_pclk_cycles /= multiplier;
+	extra_pclk_cycles_in_link_clk = roundup_u64(div64_u64(extra_pclk_cycles
+				* lclk * multiplier, pclk), multiplier);
+	extra_pclk_cycles_in_link_clk /= multiplier;
+	filler_size = roundup_u64((tu_size_desired - valid_boundary_link)
+						* multiplier, multiplier);
+	filler_size /= multiplier;
+	ratio_by_tu = div64_u64(ratio * tu_size_desired, multiplier);
+
+	pr_debug("extra_pclk_cycles_in_link_clk=%d, extra_bytes=%d\n",
+				extra_pclk_cycles_in_link_clk, extra_bytes);
+	pr_debug("extra_pclk_cycles_in_link_clk=%d\n",
+				extra_pclk_cycles_in_link_clk);
+	pr_debug("filler_size=%d, extra_buffer_margin=%lld\n",
+				filler_size, extra_buffer_margin);
+
+	delay_start_link = ((extra_bytes > extra_pclk_cycles_in_link_clk)
+			? extra_bytes
+			: extra_pclk_cycles_in_link_clk)
+				+ filler_size + extra_buffer_margin;
+	resulting_valid = valid_boundary_link;
+	pr_debug("Info: delay_start_link=%d, filler_size=%d\n",
+				delay_start_link, filler_size);
+	pr_debug("valid_boundary_link=%d ratio_by_tu=%lld\n",
+				valid_boundary_link, ratio_by_tu);
+
+	diff_abs = (resulting_valid >= ratio_by_tu)
+				? (resulting_valid - ratio_by_tu)
+				: (ratio_by_tu - resulting_valid);
+
+	if (err != 0 && ((diff_abs > brute_force_threshold)
+			|| (even_distribution_legacy == 0)
+			|| (dp_brute_force == 1))) {
+		err = multiplier;
+		for (tu_size = 32; tu_size <= 64; tu_size++) {
+			for (i_upper_bdry_cnt = 1; i_upper_bdry_cnt <= 15;
+						i_upper_bdry_cnt++) {
+				for (i_lower_bdry_cnt = 1;
+					i_lower_bdry_cnt <= 15;
+					i_lower_bdry_cnt++) {
+					new_valid_boundary_link =
+						roundup_u64(ratio
+						* tu_size, multiplier);
+					average_valid2 = (i_upper_bdry_cnt
+						* new_valid_boundary_link
+						+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier))
+						/ (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt);
+					n_tus = rounddown_u64(div64_u64(lwidth
+						* multiplier * multiplier
+						* (bpp / 8), average_valid2),
+							multiplier);
+					n_tus /= multiplier;
+					n_tus_per_lane
+						= rounddown(n_tus
+							* multiplier
+							/ ln_cnt, multiplier);
+					n_tus_per_lane /= multiplier;
+					paired_tus =
+						rounddown((n_tus_per_lane)
+							* multiplier
+							/ (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt),
+							multiplier);
+					paired_tus /= multiplier;
+					remainder_tus = n_tus_per_lane
+							- paired_tus
+						* (i_upper_bdry_cnt
+							+ i_lower_bdry_cnt);
+					if ((remainder_tus
+						- i_upper_bdry_cnt) > 0) {
+						remainder_tus_upper
+							= i_upper_bdry_cnt;
+						remainder_tus_lower =
+							remainder_tus
+							- i_upper_bdry_cnt;
+					} else {
+						remainder_tus_upper
+							= remainder_tus;
+						remainder_tus_lower = 0;
+					}
+					total_valid = paired_tus
+						* (i_upper_bdry_cnt
+						* new_valid_boundary_link
+							+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier))
+						+ (remainder_tus_upper
+						* new_valid_boundary_link)
+						+ (remainder_tus_lower
+						* (new_valid_boundary_link
+							- multiplier));
+					n_err_neg = nn_err_neg = false;
+					effective_valid
+						= div_u64(total_valid,
+							n_tus_per_lane);
+					n_n_err = (effective_valid
+							>= (ratio * tu_size))
+						? (effective_valid
+							- (ratio * tu_size))
+						: ((ratio * tu_size)
+							- effective_valid);
+					if (effective_valid < (ratio * tu_size))
+						nn_err_neg = true;
+					n_err = (average_valid2
+						>= (ratio * tu_size))
+						? (average_valid2
+							- (ratio * tu_size))
+						: ((ratio * tu_size)
+							- average_valid2);
+					if (average_valid2 < (ratio * tu_size))
+						n_err_neg = true;
+					even_distribution =
+						n_tus % ln_cnt == 0 ? 1 : 0;
+					diff_abs =
+						resulting_valid >= ratio_by_tu
+						? (resulting_valid
+							- ratio_by_tu)
+						: (ratio_by_tu
+							- resulting_valid);
+
+					resulting_valid_tmp = div64_u64(
+						(i_upper_bdry_cnt
+						* new_valid_boundary_link
+						+ i_lower_bdry_cnt
+						* (new_valid_boundary_link
+							- multiplier)),
+						(i_upper_bdry_cnt
+							+ i_lower_bdry_cnt));
+					ratio_by_tu_tmp =
+						original_ratio * tu_size;
+					ratio_by_tu_tmp /= multiplier;
+					n_tus_tmp = rounddown_u64(
+						div64_u64(lwidth
+						* multiplier * multiplier
+						* bpp / 8,
+						resulting_valid_tmp),
+						multiplier);
+					n_tus_tmp /= multiplier;
+
+					temp3 = (resulting_valid_tmp
+						>= (original_ratio * tu_size))
+						? (resulting_valid_tmp
+						- original_ratio * tu_size)
+						: (original_ratio * tu_size)
+						- resulting_valid_tmp;
+					temp3 = (n_tus_tmp + 1) * temp3;
+					temp4 = (new_valid_boundary_link
+						>= (original_ratio * tu_size))
+						? (new_valid_boundary_link
+							- original_ratio
+							* tu_size)
+						: (original_ratio * tu_size)
+						- new_valid_boundary_link;
+					temp4 = (i_upper_bdry_cnt
+							* ln_cnt * temp4);
+
+					temp3 = roundup_u64(temp3, multiplier);
+					temp4 = roundup_u64(temp4, multiplier);
+					dp_ctrl_get_extra_req_bytes
+						(resulting_valid_tmp,
+						new_valid_boundary_link,
+						temp3, temp4,
+						&extra_req_bytes_is_neg,
+						&result,
+						(original_ratio * tu_size));
+					extra_req_bytes_new_tmp
+						= div64_ul(result, multiplier);
+					if ((extra_req_bytes_is_neg)
+						&& (extra_req_bytes_new_tmp
+							> 1))
+						extra_req_bytes_new_tmp
+						= extra_req_bytes_new_tmp - 1;
+					if (extra_req_bytes_new_tmp == 0)
+						extra_req_bytes_new_tmp = 1;
+					extra_pclk_cycles_tmp =
+						(u64)(extra_req_bytes_new_tmp
+						      * 8 * multiplier) / bpp;
+					extra_pclk_cycles_tmp /= multiplier;
+
+					if (extra_pclk_cycles_tmp <= 0)
+						extra_pclk_cycles_tmp = 1;
+					extra_pclk_cycles_in_lclk_tmp =
+						roundup_u64(div64_u64(
+							extra_pclk_cycles_tmp
+							* lclk * multiplier,
+							pclk), multiplier);
+					extra_pclk_cycles_in_lclk_tmp
+						/= multiplier;
+					filler_size_tmp = roundup_u64(
+						(tu_size * multiplier *
+						new_valid_boundary_link),
+						multiplier);
+					filler_size_tmp /= multiplier;
+					lower_filler_size_tmp =
+						filler_size_tmp + 1;
+					if (extra_req_bytes_is_neg)
+						temp3 = (extra_req_bytes_new_tmp
+						> extra_pclk_cycles_in_lclk_tmp
+						? extra_pclk_cycles_in_lclk_tmp
+						: extra_req_bytes_new_tmp);
+					else
+						temp3 = (extra_req_bytes_new_tmp
+						> extra_pclk_cycles_in_lclk_tmp
+						? extra_req_bytes_new_tmp :
+						extra_pclk_cycles_in_lclk_tmp);
+
+					temp4 = lower_filler_size_tmp
+						+ extra_buffer_margin;
+					if (extra_req_bytes_is_neg)
+						delay_start_link_tmp
+							= (temp3 >= temp4)
+							? (temp3 - temp4)
+							: (temp4 - temp3);
+					else
+						delay_start_link_tmp
+							= temp3 + temp4;
+
+					min_hblank_tmp = (int)div64_u64(
+						roundup_u64(
+						div64_u64(delay_start_link_tmp
+						* pclk * multiplier, lclk),
+						multiplier), multiplier)
+						+ hblank_margin;
+
+					if (((even_distribution == 1)
+						|| ((even_distribution_bf == 0)
+						&& (even_distribution_legacy
+								== 0)))
+						&& !n_err_neg && !nn_err_neg
+						&& n_n_err < err
+						&& (n_n_err < diff_abs
+						|| (dp_brute_force == 1))
+						&& (new_valid_boundary_link
+									- 1) > 0
+						&& (h_blank >=
+							(u32)min_hblank_tmp)) {
+						upper_bdry_cnt =
+							i_upper_bdry_cnt;
+						lower_bdry_cnt =
+							i_lower_bdry_cnt;
+						err = n_n_err;
+						boundary_moderation_en = 1;
+						tu_size_desired = tu_size;
+						valid_boundary_link =
+							new_valid_boundary_link;
+						effective_valid_recorded
+							= effective_valid;
+						delay_start_link
+							= delay_start_link_tmp;
+						filler_size = filler_size_tmp;
+						min_hblank = min_hblank_tmp;
+						n_tus = n_tus_tmp;
+						even_distribution_bf = 1;
+
+						pr_debug("upper_bdry_cnt=%d, lower_boundary_cnt=%d, err=%lld, tu_size_desired=%d, valid_boundary_link=%d, effective_valid=%lld\n",
+							upper_bdry_cnt,
+							lower_bdry_cnt, err,
+							tu_size_desired,
+							valid_boundary_link,
+							effective_valid);
+					}
+				}
+			}
+		}
+
+		if (boundary_moderation_en == 1) {
+			resulting_valid = (u64)(upper_bdry_cnt
+					*valid_boundary_link + lower_bdry_cnt
+					* (valid_boundary_link - 1))
+					/ (upper_bdry_cnt + lower_bdry_cnt);
+			ratio_by_tu = original_ratio * tu_size_desired;
+			valid_lower_boundary_link =
+				(valid_boundary_link / multiplier) - 1;
+
+			tu_size_minus1 = tu_size_desired - 1;
+			even_distribution_bf = 1;
+			valid_boundary_link /= multiplier;
+			pr_debug("Info: Boundary_moderation enabled\n");
+		}
+	}
+
+	min_hblank = ((int) roundup_u64(div64_u64(delay_start_link * pclk
+			* multiplier, lclk), multiplier))
+			/ multiplier + hblank_margin;
+	if (h_blank < (u32)min_hblank) {
+		pr_debug(" WARNING: run_idx=%d Programmed h_blank %d is smaller than the min_hblank %d supported.\n",
+					run_idx, h_blank, min_hblank);
+	}
+
+	if (fifo_empty)	{
+		tu_size_minus1 = 31;
+		valid_boundary_link = 32;
+		delay_start_link = 0;
+		boundary_moderation_en = 0;
+	}
+
+	pr_debug("tu_size_minus1=%d valid_boundary_link=%d delay_start_link=%d boundary_moderation_en=%d\n upper_boundary_cnt=%d lower_boundary_cnt=%d valid_lower_boundary_link=%d min_hblank=%d\n",
+		tu_size_minus1, valid_boundary_link, delay_start_link,
+		boundary_moderation_en, upper_bdry_cnt, lower_bdry_cnt,
+		valid_lower_boundary_link, min_hblank);
+
+	tu_table->valid_boundary_link = valid_boundary_link;
+	tu_table->delay_start_link = delay_start_link;
+	tu_table->boundary_moderation_en = boundary_moderation_en;
+	tu_table->valid_lower_boundary_link = valid_lower_boundary_link;
+	tu_table->upper_boundary_count = upper_bdry_cnt;
+	tu_table->lower_boundary_count = lower_bdry_cnt;
+	tu_table->tu_size_minus1 = tu_size_minus1;
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+	u32 dp_tu = 0x0;
+	u32 valid_boundary = 0x0;
+	u32 valid_boundary2 = 0x0;
+	struct dp_vc_tu_mapping_table tu_calc_table;
+
+	dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+	dp_tu |= tu_calc_table.tu_size_minus1;
+	valid_boundary |= tu_calc_table.valid_boundary_link;
+	valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+	valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+	valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+	valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+	if (tu_calc_table.boundary_moderation_en)
+		valid_boundary2 |= BIT(0);
+
+	pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+			dp_tu, valid_boundary, valid_boundary2);
+
+	ctrl->catalog->dp_tu = dp_tu;
+	ctrl->catalog->valid_boundary = valid_boundary;
+	ctrl->catalog->valid_boundary2 = valid_boundary2;
+
+	ctrl->catalog->update_transfer_unit(ctrl->catalog);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (ctrl->cont_splash)
+		return ret;
+
+	ret = wait_for_completion_timeout(&ctrl->video_comp, HZ / 2);
+	if (ret <= 0) {
+		pr_err("Link Train timedout\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_update_sink_vx_px(struct dp_ctrl_private *ctrl,
+		u32 voltage_level, u32 pre_emphasis_level)
+{
+	int i;
+	u8 buf[4];
+	u32 max_level_reached = 0;
+
+	if (voltage_level == DP_LINK_VOLTAGE_MAX) {
+		pr_debug("max. voltage swing level reached %d\n",
+				voltage_level);
+		max_level_reached |= BIT(2);
+	}
+
+	if (pre_emphasis_level == DP_LINK_PRE_EMPHASIS_MAX) {
+		pr_debug("max. pre-emphasis level reached %d\n",
+				pre_emphasis_level);
+		max_level_reached  |= BIT(5);
+	}
+
+	pr_debug("max_level_reached = 0x%x\n", max_level_reached);
+
+	pre_emphasis_level <<= 3;
+
+	for (i = 0; i < 4; i++)
+		buf[i] = voltage_level | pre_emphasis_level | max_level_reached;
+
+	pr_debug("p|v=0x%x\n", voltage_level | pre_emphasis_level);
+	return drm_dp_dpcd_write(ctrl->aux->drm_aux, 0x103, buf, 4);
+}
+
+static void dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+	struct dp_link *link = ctrl->link;
+
+	pr_debug("v=%d p=%d\n", link->v_level, link->p_level);
+
+	ctrl->catalog->update_vx_px(ctrl->catalog,
+			link->v_level, link->p_level);
+
+	dp_ctrl_update_sink_vx_px(ctrl, link->v_level, link->p_level);
+}
+
+static void dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+		u8 pattern)
+{
+	u8 buf[4];
+
+	pr_debug("pattern=%x\n", pattern);
+
+	buf[0] = pattern;
+	drm_dp_dpcd_write(ctrl->aux->drm_aux, DP_TRAINING_PATTERN_SET, buf, 1);
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl)
+{
+	int tries, old_v_level, ret = 0, len = 0;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	int const maximum_retries = 5;
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+	ctrl->catalog->set_pattern(ctrl->catalog, 0x01);
+	dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+		DP_RECOVERED_CLOCK_OUT_EN); /* train_1 */
+	dp_ctrl_update_vx_px(ctrl);
+
+	tries = 0;
+	old_v_level = ctrl->link->v_level;
+	while (1) {
+		drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
+
+		len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+			link_status);
+		if (len < DP_LINK_STATUS_SIZE) {
+			pr_err("[%s]: DP link status read failed\n", __func__);
+			ret = -1;
+			break;
+		}
+
+		if (drm_dp_clock_recovery_ok(link_status,
+			ctrl->link->lane_count)) {
+			ret = 0;
+			break;
+		}
+
+		if (ctrl->link->v_level == DP_LINK_VOLTAGE_MAX) {
+			ret = -1;
+			break;	/* quit */
+		}
+
+		if (old_v_level == ctrl->link->v_level) {
+			tries++;
+			if (tries >= maximum_retries) {
+				ret = -1;
+				break;	/* quit */
+			}
+		} else {
+			tries = 0;
+			old_v_level = ctrl->link->v_level;
+		}
+
+		ctrl->link->adjust_levels(ctrl->link, link_status);
+		dp_ctrl_update_vx_px(ctrl);
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (!ctrl)
+		return -EINVAL;
+
+	switch (ctrl->link->link_rate) {
+	case DP_LINK_RATE_810:
+		ctrl->link->link_rate = DP_LINK_BW_5_4;
+		break;
+	case DP_LINK_BW_5_4:
+		ctrl->link->link_rate = DP_LINK_BW_2_7;
+		break;
+	case DP_LINK_BW_2_7:
+		ctrl->link->link_rate = DP_LINK_BW_1_62;
+		break;
+	case DP_LINK_BW_1_62:
+	default:
+		ret = -EINVAL;
+		break;
+	};
+
+	pr_debug("new rate=%d\n", ctrl->link->link_rate);
+
+	return ret;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+	dp_ctrl_train_pattern_set(ctrl, 0);
+	drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+}
+
+static int dp_ctrl_link_training_2(struct dp_ctrl_private *ctrl)
+{
+	int tries = 0, ret = 0, len = 0;
+	char pattern;
+	int const maximum_retries = 5;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+
+	if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+		pattern = DP_TRAINING_PATTERN_3;
+	else
+		pattern = DP_TRAINING_PATTERN_2;
+
+	dp_ctrl_update_vx_px(ctrl);
+	ctrl->catalog->set_pattern(ctrl->catalog, pattern);
+	dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+
+	do  {
+		drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+
+		len = drm_dp_dpcd_read_link_status(ctrl->aux->drm_aux,
+			link_status);
+		if (len < DP_LINK_STATUS_SIZE) {
+			pr_err("[%s]: DP link status read failed\n", __func__);
+			ret = -1;
+			break;
+		}
+
+		if (drm_dp_channel_eq_ok(link_status, ctrl->link->lane_count)) {
+			ret = 0;
+			break;
+		}
+
+		if (tries > maximum_retries) {
+			ret = -1;
+			break;
+		}
+		tries++;
+
+		ctrl->link->adjust_levels(ctrl->link, link_status);
+		dp_ctrl_update_vx_px(ctrl);
+	} while (1);
+
+	return ret;
+}
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+	struct drm_dp_link dp_link;
+
+	ctrl->link->p_level = 0;
+	ctrl->link->v_level = 0;
+
+	dp_ctrl_config_ctrl(ctrl);
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	dp_link.num_lanes = ctrl->link->lane_count;
+	dp_link.rate = ctrl->link->link_rate;
+	dp_link.capabilities = ctrl->panel->dp_link.capabilities;
+	drm_dp_link_configure(ctrl->aux->drm_aux, &dp_link);
+
+	ret = dp_ctrl_link_train_1(ctrl);
+	if (ret < 0) {
+		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
+			pr_debug("retry with lower rate\n");
+
+			dp_ctrl_clear_training_pattern(ctrl);
+			return -EAGAIN;
+		}
+
+		pr_err("Training 1 failed\n");
+		ret = -EINVAL;
+		goto clear;
+	}
+
+	pr_debug("Training 1 completed successfully\n");
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+	ret = dp_ctrl_link_training_2(ctrl);
+	if (ret < 0) {
+		if (!dp_ctrl_link_rate_down_shift(ctrl)) {
+			pr_debug("retry with lower rate\n");
+
+			dp_ctrl_clear_training_pattern(ctrl);
+			return -EAGAIN;
+		}
+
+		pr_err("Training 2 failed\n");
+		ret = -EINVAL;
+		goto clear;
+	}
+
+	pr_debug("Training 2 completed successfully\n");
+
+	dp_ctrl_state_ctrl(ctrl, 0);
+	/* Make sure to clear the current pattern before starting a new one */
+	wmb();
+
+clear:
+	dp_ctrl_clear_training_pattern(ctrl);
+	return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl, bool train)
+{
+	bool mainlink_ready = false;
+	int ret = 0;
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, true);
+
+	drm_dp_link_power_up(ctrl->aux->drm_aux, &ctrl->panel->dp_link);
+
+	if (ctrl->link->phy_pattern_requested(ctrl->link))
+		goto end;
+
+	if (!train)
+		goto send_video;
+
+	/*
+	 * As part of previous calls, DP controller state might have
+	 * transitioned to PUSH_IDLE. In order to start transmitting a link
+	 * training pattern, we have to first to a DP software reset.
+	 */
+	ctrl->catalog->reset(ctrl->catalog);
+
+	ret = dp_ctrl_link_train(ctrl);
+	if (ret)
+		goto end;
+
+send_video:
+	/*
+	 * Set up transfer unit values and set controller state to send
+	 * video.
+	 */
+	dp_ctrl_setup_tr_unit(ctrl);
+	ctrl->catalog->state_ctrl(ctrl->catalog, ST_SEND_VIDEO);
+
+	dp_ctrl_wait4video_ready(ctrl);
+	mainlink_ready = ctrl->catalog->mainlink_ready(ctrl->catalog);
+	pr_debug("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+end:
+	return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+		char *name, u32 rate)
+{
+	u32 num = ctrl->parser->mp[DP_CTRL_PM].num_clk;
+	struct dss_clk *cfg = ctrl->parser->mp[DP_CTRL_PM].clk_config;
+
+	while (num && strcmp(cfg->clk_name, name)) {
+		num--;
+		cfg++;
+	}
+
+	if (num)
+		cfg->rate = rate;
+	else
+		pr_err("%s clock could not be set with rate %d\n", name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	ctrl->power->set_pixel_clk_parent(ctrl->power);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk",
+		drm_dp_bw_code_to_link_rate(ctrl->link->link_rate));
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ);
+
+	dp_ctrl_set_clock_rate(ctrl, "ctrl_pixel_clk", ctrl->pixel_rate);
+
+	ret = ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, true);
+	if (ret) {
+		pr_err("Unabled to start link clocks\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int dp_ctrl_disable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+	return ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, false);
+}
+
+static int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+{
+	struct dp_ctrl_private *ctrl;
+	struct dp_catalog_ctrl *catalog;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return -EINVAL;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->initialized) {
+		pr_debug("host init done already\n");
+		return 0;
+	}
+
+	ctrl->orientation = flip;
+	catalog = ctrl->catalog;
+
+	catalog->reset(ctrl->catalog);
+	catalog->phy_reset(ctrl->catalog);
+	catalog->enable_irq(ctrl->catalog, true);
+
+	ctrl->initialized = true;
+
+	return 0;
+}
+
+/**
+ * dp_ctrl_host_deinit() - Uninitialize DP controller
+ * @ctrl: Display Port Driver data
+ *
+ * Perform required steps to uninitialize DP controller
+ * and its resources.
+ */
+static void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		pr_err("Invalid input data\n");
+		return;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (!ctrl->initialized) {
+		pr_debug("host deinit done already\n");
+		return;
+	}
+
+	ctrl->catalog->enable_irq(ctrl->catalog, false);
+	ctrl->catalog->reset(ctrl->catalog);
+
+	/* Make sure DP is disabled before clk disable */
+	wmb();
+
+	dp_ctrl_disable_mainlink_clocks(ctrl);
+
+	ctrl->initialized = false;
+	pr_debug("Host deinitialized successfully\n");
+}
+
+static int dp_ctrl_on_irq(struct dp_ctrl_private *ctrl, bool lt_needed)
+{
+	int ret = 0;
+
+	do {
+		if (ret == -EAGAIN)
+			ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+		ctrl->catalog->phy_lane_cfg(ctrl->catalog,
+			ctrl->orientation, ctrl->link->lane_count);
+
+		if (lt_needed) {
+			/*
+			 * Diasable and re-enable the mainlink clock since the
+			 * link clock might have been adjusted as part of the
+			 * link maintenance.
+			 */
+			if (!ctrl->link->phy_pattern_requested(
+					ctrl->link))
+				dp_ctrl_disable_mainlink_clocks(ctrl);
+
+			ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+			if (ret)
+				continue;
+		}
+
+		dp_ctrl_configure_source_params(ctrl);
+
+		reinit_completion(&ctrl->idle_comp);
+
+		ctrl->power_on = true;
+
+		if (ctrl->psm_enabled) {
+			ret = ctrl->link->send_psm_request(ctrl->link, false);
+			if (ret) {
+				pr_err("failed to exit low power mode, rc=%d\n",
+					ret);
+				continue;
+			}
+		}
+
+		ret = dp_ctrl_setup_main_link(ctrl, lt_needed);
+	} while (ret == -EAGAIN);
+
+	return ret;
+}
+
+static int dp_ctrl_on_hpd(struct dp_ctrl_private *ctrl)
+{
+	int ret = 0;
+
+	if (ctrl->cont_splash)
+		goto link_training;
+
+	ctrl->power->clk_enable(ctrl->power, DP_CORE_PM, true);
+	ctrl->catalog->hpd_config(ctrl->catalog, true);
+
+	ctrl->link->link_rate  = ctrl->panel->get_link_rate(ctrl->panel);
+	ctrl->link->lane_count = ctrl->panel->dp_link.num_lanes;
+	ctrl->pixel_rate = ctrl->panel->pinfo.pixel_clk_khz;
+
+	pr_debug("link_rate=%d, lane_count=%d, pixel_rate=%d\n",
+		ctrl->link->link_rate, ctrl->link->lane_count,
+		ctrl->pixel_rate);
+
+	ctrl->catalog->phy_lane_cfg(ctrl->catalog,
+			ctrl->orientation, ctrl->link->lane_count);
+
+	ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+	if (ret)
+		goto exit;
+
+	reinit_completion(&ctrl->idle_comp);
+
+	dp_ctrl_configure_source_params(ctrl);
+
+	if (ctrl->psm_enabled)
+		ret = ctrl->link->send_psm_request(ctrl->link, false);
+link_training:
+	ctrl->power_on = true;
+
+	while (-EAGAIN == dp_ctrl_setup_main_link(ctrl, true))
+		pr_debug("MAIN LINK TRAINING RETRY\n");
+
+	ctrl->cont_splash = 0;
+
+	ctrl->power_on = true;
+	pr_debug("End-\n");
+
+exit:
+	return ret;
+}
+
+static int dp_ctrl_off_irq(struct dp_ctrl_private *ctrl)
+{
+	if (!ctrl->power_on) {
+		pr_debug("ctrl already powered off\n");
+		return 0;
+	}
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+	/* Make sure DP mainlink and audio engines are disabled */
+	wmb();
+
+	complete_all(&ctrl->irq_comp);
+	pr_debug("end\n");
+
+	return 0;
+}
+
+static int dp_ctrl_off_hpd(struct dp_ctrl_private *ctrl)
+{
+	if (!ctrl->power_on) {
+		pr_debug("panel already powered off\n");
+		return 0;
+	}
+
+	ctrl->catalog->mainlink_ctrl(ctrl->catalog, false);
+
+	ctrl->power_on = false;
+	ctrl->sink_info_read = false;
+
+	pr_debug("DP off done\n");
+
+	return 0;
+}
+
+static int dp_ctrl_on(struct dp_ctrl *dp_ctrl)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->hpd_irq_on)
+		rc = dp_ctrl_on_irq(ctrl, false);
+	else
+		rc = dp_ctrl_on_hpd(ctrl);
+end:
+	return rc;
+}
+
+static int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	if (ctrl->hpd_irq_on)
+		rc = dp_ctrl_off_irq(ctrl);
+	else
+		rc = dp_ctrl_off_hpd(ctrl);
+end:
+	return rc;
+}
+
+static void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	ctrl->catalog->get_interrupt(ctrl->catalog);
+
+	if (ctrl->catalog->isr & DP_CTRL_INTR_READY_FOR_VIDEO)
+		dp_ctrl_video_ready(ctrl);
+
+	if (ctrl->catalog->isr & DP_CTRL_INTR_IDLE_PATTERN_SENT)
+		dp_ctrl_idle_patterns_sent(ctrl);
+}
+
+struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in)
+{
+	int rc = 0;
+	struct dp_ctrl_private *ctrl;
+	struct dp_ctrl *dp_ctrl;
+
+	if (!in->dev || !in->panel || !in->aux ||
+	    !in->link || !in->catalog) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	ctrl = devm_kzalloc(in->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	init_completion(&ctrl->idle_comp);
+	init_completion(&ctrl->video_comp);
+	init_completion(&ctrl->irq_comp);
+
+	/* in parameters */
+	ctrl->parser   = in->parser;
+	ctrl->panel    = in->panel;
+	ctrl->power    = in->power;
+	ctrl->aux      = in->aux;
+	ctrl->link     = in->link;
+	ctrl->catalog  = in->catalog;
+
+	dp_ctrl = &ctrl->dp_ctrl;
+
+	/* out parameters */
+	dp_ctrl->init      = dp_ctrl_host_init;
+	dp_ctrl->deinit    = dp_ctrl_host_deinit;
+	dp_ctrl->on        = dp_ctrl_on;
+	dp_ctrl->off       = dp_ctrl_off;
+	dp_ctrl->push_idle = dp_ctrl_push_idle;
+	dp_ctrl->isr       = dp_ctrl_isr;
+
+	return dp_ctrl;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
+{
+	struct dp_ctrl_private *ctrl;
+
+	if (!dp_ctrl)
+		return;
+
+	ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+	devm_kfree(ctrl->dev, ctrl);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644
index 0000000..5efe505
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+	int (*init)(struct dp_ctrl *dp_ctrl, bool flip);
+	void (*deinit)(struct dp_ctrl *dp_ctrl);
+	int (*on)(struct dp_ctrl *dp_ctrl);
+	int (*off)(struct dp_ctrl *dp_ctrl);
+	void (*push_idle)(struct dp_ctrl *dp_ctrl);
+	void (*isr)(struct dp_ctrl *dp_ctrl);
+};
+
+struct dp_ctrl_in {
+	struct device *dev;
+	struct dp_panel *panel;
+	struct dp_aux *aux;
+	struct dp_link *link;
+	struct dp_parser *parser;
+	struct dp_power *power;
+	struct dp_catalog_ctrl *catalog;
+};
+
+struct dp_ctrl *dp_ctrl_get(struct dp_ctrl_in *in);
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644
index 0000000..d3f6bca
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+
+#include "msm_drv.h"
+#include "dp_usbpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+
+static struct dp_display *g_dp_display;
+
+struct dp_display_private {
+	char *name;
+	int irq;
+
+	struct platform_device *pdev;
+	struct dentry *root;
+	struct mutex lock;
+
+	struct dp_usbpd   *usbpd;
+	struct dp_parser  *parser;
+	struct dp_power   *power;
+	struct dp_catalog *catalog;
+	struct dp_aux     *aux;
+	struct dp_link    *link;
+	struct dp_panel   *panel;
+	struct dp_ctrl    *ctrl;
+
+	struct dp_usbpd_cb usbpd_cb;
+	struct dp_display_mode mode;
+	struct dp_display dp_display;
+};
+
+static const struct of_device_id dp_dt_match[] = {
+	{.compatible = "qcom,dp-display"},
+	{}
+};
+
+static irqreturn_t dp_display_irq(int irq, void *dev_id)
+{
+	struct dp_display_private *dp = dev_id;
+
+	if (!dp) {
+		pr_err("invalid data\n");
+		return IRQ_NONE;
+	}
+
+	/* DP controller isr */
+	dp->ctrl->isr(dp->ctrl);
+
+	/* DP aux isr */
+	dp->aux->isr(dp->aux);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t debugfs_dp_info_read(struct file *file, char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	struct dp_display_private *dp = file->private_data;
+	char *buf;
+	u32 len = 0;
+
+	if (!dp)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;
+
+	buf = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", dp->name);
+	len += snprintf(buf + len, (SZ_4K - len),
+			"\tResolution = %dx%d\n",
+			dp->panel->pinfo.h_active,
+			dp->panel->pinfo.v_active);
+
+	if (copy_to_user(buff, buf, len)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+
+	*ppos += len;
+
+	kfree(buf);
+	return len;
+}
+
+static const struct file_operations dp_debug_fops = {
+	.open = simple_open,
+	.read = debugfs_dp_info_read,
+};
+
+static int dp_display_debugfs_init(struct dp_display_private *dp)
+{
+	int rc = 0;
+	struct dentry *dir, *file;
+
+	dir = debugfs_create_dir(dp->name, NULL);
+	if (IS_ERR_OR_NULL(dir)) {
+		rc = PTR_ERR(dir);
+		pr_err("[%s] debugfs create dir failed, rc = %d\n",
+		       dp->name, rc);
+		goto error;
+	}
+
+	file = debugfs_create_file("dp_debug", 0444, dir, dp, &dp_debug_fops);
+	if (IS_ERR_OR_NULL(file)) {
+		rc = PTR_ERR(file);
+		pr_err("[%s] debugfs create file failed, rc=%d\n",
+		       dp->name, rc);
+		goto error_remove_dir;
+	}
+
+	dp->root = dir;
+	return rc;
+error_remove_dir:
+	debugfs_remove(dir);
+error:
+	return rc;
+}
+
+static int dp_display_debugfs_deinit(struct dp_display_private *dp)
+{
+	debugfs_remove(dp->root);
+	return 0;
+}
+
+static int dp_display_bind(struct device *dev, struct device *master,
+		void *data)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+	struct drm_device *drm;
+	struct msm_drm_private *priv;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (!dev || !pdev || !master) {
+		pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+				dev, pdev, master);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	drm = dev_get_drvdata(master);
+	dp = platform_get_drvdata(pdev);
+	if (!drm || !dp) {
+		pr_err("invalid param(s), drm %pK, dp %pK\n",
+				drm, dp);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp->dp_display.drm_dev = drm;
+	priv = drm->dev_private;
+
+	mutex_lock(&dp->lock);
+
+	rc = dp_display_debugfs_init(dp);
+	if (rc) {
+		pr_err("[%s]Debugfs init failed, rc=%d\n", dp->name, rc);
+		goto end;
+	}
+
+	rc = dp->parser->parse(dp->parser);
+	if (rc) {
+		pr_err("device tree parsing failed\n");
+		goto end;
+	}
+
+	rc = dp->aux->drm_aux_register(dp->aux);
+	if (rc) {
+		pr_err("DRM DP AUX register failed\n");
+		goto end;
+	}
+
+	rc = dp->panel->sde_edid_register(dp->panel);
+	if (rc) {
+		pr_err("DRM DP EDID register failed\n");
+		goto end;
+	}
+
+	rc = dp->power->power_client_init(dp->power, &priv->phandle);
+	if (rc) {
+		pr_err("Power client create failed\n");
+		goto end;
+	}
+end:
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+		void *data)
+{
+	struct dp_display_private *dp;
+	struct platform_device *pdev = to_platform_device(dev);
+
+	if (!dev || !pdev) {
+		pr_err("invalid param(s)\n");
+		return;
+	}
+
+	dp = platform_get_drvdata(pdev);
+	if (!dp) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	mutex_lock(&dp->lock);
+
+	(void)dp->power->power_client_deinit(dp->power);
+
+	(void) dp->panel->sde_edid_deregister(dp->panel);
+
+	(void) dp->aux->drm_aux_deregister(dp->aux);
+
+	(void)dp_display_debugfs_deinit(dp);
+
+	mutex_unlock(&dp->lock);
+}
+
+static const struct component_ops dp_display_comp_ops = {
+	.bind = dp_display_bind,
+	.unbind = dp_display_unbind,
+};
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+	int rc;
+
+	rc = dp->panel->read_dpcd(dp->panel);
+	if (rc)
+		goto end;
+
+	sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc,
+		(void **)&dp->panel->edid_ctrl);
+
+	return 0;
+end:
+	return rc;
+}
+
+static int dp_display_process_hpd_low(struct dp_display_private *dp)
+{
+	dp->dp_display.is_connected = false;
+	return 0;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+	int rc = 0;
+	bool flip = false;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+
+	if (dp->usbpd->orientation == ORIENTATION_CC2)
+		flip = true;
+
+	dp->power->init(dp->power, flip);
+	dp->ctrl->init(dp->ctrl, flip);
+	dp->aux->init(dp->aux, dp->parser->aux_cfg);
+	enable_irq(dp->irq);
+
+	if (dp->usbpd->hpd_high)
+		dp_display_process_hpd_high(dp);
+	dp->dp_display.is_connected = true;
+
+	mutex_unlock(&dp->lock);
+end:
+	return rc;
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+	dp->dp_display.is_connected = false;
+	disable_irq(dp->irq);
+	mutex_unlock(&dp->lock);
+
+end:
+	return rc;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dev) {
+		pr_err("invalid dev\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp = dev_get_drvdata(dev);
+	if (!dp) {
+		pr_err("no driver data found\n");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	mutex_lock(&dp->lock);
+
+	if (dp->usbpd->hpd_irq) {
+		if (!dp->link->process_request(dp->link))
+			goto end;
+	}
+
+	if (dp->usbpd->hpd_high)
+		dp_display_process_hpd_high(dp);
+	else
+		dp_display_process_hpd_low(dp);
+
+	mutex_unlock(&dp->lock);
+end:
+	return rc;
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+	int rc = 0;
+	struct device *dev = &dp->pdev->dev;
+	struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+	struct dp_ctrl_in ctrl_in = {
+		.dev = dev,
+	};
+
+	cb->configure  = dp_display_usbpd_configure_cb;
+	cb->disconnect = dp_display_usbpd_disconnect_cb;
+	cb->attention  = dp_display_usbpd_attention_cb;
+
+	dp->usbpd = dp_usbpd_get(dev, cb);
+	if (IS_ERR(dp->usbpd)) {
+		rc = PTR_ERR(dp->usbpd);
+		pr_err("failed to initialize usbpd, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->parser = dp_parser_get(dp->pdev);
+	if (IS_ERR(dp->parser)) {
+		rc = PTR_ERR(dp->parser);
+		pr_err("failed to initialize parser, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+	if (IS_ERR(dp->catalog)) {
+		rc = PTR_ERR(dp->catalog);
+		pr_err("failed to initialize catalog, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->power = dp_power_get(dp->parser);
+	if (IS_ERR(dp->power)) {
+		rc = PTR_ERR(dp->power);
+		pr_err("failed to initialize power, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->aux = dp_aux_get(dev, &dp->catalog->aux);
+	if (IS_ERR(dp->aux)) {
+		rc = PTR_ERR(dp->aux);
+		pr_err("failed to initialize aux, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->panel = dp_panel_get(dev, dp->aux, &dp->catalog->panel);
+	if (IS_ERR(dp->panel)) {
+		rc = PTR_ERR(dp->panel);
+		pr_err("failed to initialize panel, rc = %d\n", rc);
+		goto err;
+	}
+
+	dp->link = dp_link_get(dev, dp->aux);
+	if (IS_ERR(dp->link)) {
+		rc = PTR_ERR(dp->link);
+		pr_err("failed to initialize link, rc = %d\n", rc);
+		goto err;
+	}
+
+	ctrl_in.link = dp->link;
+	ctrl_in.panel = dp->panel;
+	ctrl_in.aux = dp->aux;
+	ctrl_in.power = dp->power;
+	ctrl_in.catalog = &dp->catalog->ctrl;
+	ctrl_in.parser = dp->parser;
+
+	dp->ctrl = dp_ctrl_get(&ctrl_in);
+	if (IS_ERR(dp->ctrl)) {
+		rc = PTR_ERR(dp->ctrl);
+		pr_err("failed to initialize ctrl, rc = %d\n", rc);
+		goto err;
+	}
+err:
+	return rc;
+}
+
+static int dp_display_set_mode(struct dp_display *dp_display,
+		struct dp_display_mode *mode)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	dp->panel->pinfo = mode->timing;
+	dp->panel->init_info(dp->panel);
+error:
+	return rc;
+}
+
+static int dp_display_prepare(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_enable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+	dp->ctrl->on(dp->ctrl);
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_display_post_enable(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_pre_disable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+
+	dp->ctrl->off(dp->ctrl);
+
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_display_disable(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	mutex_lock(&dp->lock);
+
+	dp->aux->deinit(dp->aux);
+	dp->ctrl->deinit(dp->ctrl);
+	dp->power->deinit(dp->power);
+
+	mutex_unlock(&dp->lock);
+error:
+	return rc;
+}
+
+static int dp_request_irq(struct dp_display *dp_display)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!dp_display) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+	dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+	if (dp->irq < 0) {
+		rc = dp->irq;
+		pr_err("failed to get irq: %d\n", rc);
+		return rc;
+	}
+
+	rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq,
+		IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+	if (rc < 0) {
+		pr_err("failed to request IRQ%u: %d\n",
+				dp->irq, rc);
+		return rc;
+	}
+	disable_irq(dp->irq);
+
+	return 0;
+}
+
+static int dp_display_unprepare(struct dp_display *dp)
+{
+	return 0;
+}
+
+static int dp_display_validate_mode(struct dp_display *dp,
+	struct dp_display_mode *mode)
+{
+	return 0;
+}
+
+static int dp_display_get_modes(struct dp_display *dp)
+{
+	int ret = 0;
+	struct dp_display_private *dp_display;
+
+	dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+	ret = _sde_edid_update_modes(dp->connector,
+		dp_display->panel->edid_ctrl);
+
+	return ret;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct dp_display_private *dp;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("pdev not found\n");
+		return -ENODEV;
+	}
+
+	dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+	if (!dp)
+		return -ENOMEM;
+
+	mutex_init(&dp->lock);
+	dp->pdev = pdev;
+	dp->name = "drm_dp";
+
+	rc = dp_init_sub_modules(dp);
+	if (rc) {
+		devm_kfree(&pdev->dev, dp);
+		return -EPROBE_DEFER;
+	}
+
+	platform_set_drvdata(pdev, dp);
+
+	g_dp_display = &dp->dp_display;
+
+	g_dp_display->enable        = dp_display_enable;
+	g_dp_display->post_enable   = dp_display_post_enable;
+	g_dp_display->pre_disable   = dp_display_pre_disable;
+	g_dp_display->disable       = dp_display_disable;
+	g_dp_display->set_mode      = dp_display_set_mode;
+	g_dp_display->validate_mode = dp_display_validate_mode;
+	g_dp_display->get_modes     = dp_display_get_modes;
+	g_dp_display->prepare       = dp_display_prepare;
+	g_dp_display->unprepare     = dp_display_unprepare;
+	g_dp_display->request_irq   = dp_request_irq;
+
+	rc = component_add(&pdev->dev, &dp_display_comp_ops);
+	if (rc)
+		pr_err("component add failed, rc=%d\n", rc);
+
+	return rc;
+}
+
+int dp_display_get_displays(void **displays, int count)
+{
+	if (!displays) {
+		pr_err("invalid data\n");
+		return -EINVAL;
+	}
+
+	if (count != 1) {
+		pr_err("invalid number of displays\n");
+		return -EINVAL;
+	}
+
+	displays[0] = g_dp_display;
+	return count;
+}
+
+int dp_display_get_num_of_displays(void)
+{
+	return 1;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+	dp_ctrl_put(dp->ctrl);
+	dp_link_put(dp->link);
+	dp_panel_put(dp->panel);
+	dp_aux_put(dp->aux);
+	dp_power_put(dp->power);
+	dp_catalog_put(dp->catalog);
+	dp_parser_put(dp->parser);
+	dp_usbpd_put(dp->usbpd);
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+	struct dp_display_private *dp;
+
+	if (!pdev)
+		return -EINVAL;
+
+	dp = platform_get_drvdata(pdev);
+
+	dp_display_deinit_sub_modules(dp);
+
+	platform_set_drvdata(pdev, NULL);
+	devm_kfree(&pdev->dev, dp);
+
+	return 0;
+}
+
+static struct platform_driver dp_display_driver = {
+	.probe  = dp_display_probe,
+	.remove = dp_display_remove,
+	.driver = {
+		.name = "msm-dp-display",
+		.of_match_table = dp_dt_match,
+	},
+};
+
+static int __init dp_display_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&dp_display_driver);
+	if (ret) {
+		pr_err("driver register failed");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(dp_display_init);
+
+static void __exit dp_display_cleanup(void)
+{
+	platform_driver_unregister(&dp_display_driver);
+}
+module_exit(dp_display_cleanup);
+
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644
index 0000000..877287a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include <drm/drmP.h>
+
+#include "dp_panel.h"
+
+struct dp_display_mode {
+	struct dp_panel_info timing;
+	u32 capabilities;
+};
+
+struct dp_display {
+	struct drm_device *drm_dev;
+	struct dp_bridge *bridge;
+	struct drm_connector *connector;
+	bool is_connected;
+
+	int (*enable)(struct dp_display *dp_display);
+	int (*post_enable)(struct dp_display *dp_display);
+
+	int (*pre_disable)(struct dp_display *dp_display);
+	int (*disable)(struct dp_display *dp_display);
+
+	int (*set_mode)(struct dp_display *dp_display,
+			struct dp_display_mode *mode);
+	int (*validate_mode)(struct dp_display *dp_display,
+			struct dp_display_mode *mode);
+	int (*get_modes)(struct dp_display *dp_display);
+	int (*prepare)(struct dp_display *dp_display);
+	int (*unprepare)(struct dp_display *dp_display);
+	int (*request_irq)(struct dp_display *dp_display);
+};
+
+int dp_display_get_num_of_displays(void);
+int dp_display_get_displays(void **displays, int count);
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644
index 0000000..78c04c4
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp]: %s: " fmt, __func__
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "dp_drm.h"
+
+#define to_dp_bridge(x)     container_of((x), struct dp_bridge, base)
+
+static void convert_to_dp_mode(const struct drm_display_mode *drm_mode,
+			struct dp_display_mode *dp_mode, struct dp_display *dp)
+{
+	const u32 num_components = 3;
+
+	memset(dp_mode, 0, sizeof(*dp_mode));
+
+	dp_mode->timing.h_active = drm_mode->hdisplay;
+	dp_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
+	dp_mode->timing.h_sync_width = drm_mode->htotal -
+			(drm_mode->hsync_start + dp_mode->timing.h_back_porch);
+	dp_mode->timing.h_front_porch = drm_mode->hsync_start -
+					 drm_mode->hdisplay;
+	dp_mode->timing.h_skew = drm_mode->hskew;
+
+	dp_mode->timing.v_active = drm_mode->vdisplay;
+	dp_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
+	dp_mode->timing.v_sync_width = drm_mode->vtotal -
+		(drm_mode->vsync_start + dp_mode->timing.v_back_porch);
+
+	dp_mode->timing.v_front_porch = drm_mode->vsync_start -
+					 drm_mode->vdisplay;
+	dp_mode->timing.bpp = dp->connector->display_info.bpc * num_components;
+
+	dp_mode->timing.refresh_rate = drm_mode->vrefresh;
+
+	dp_mode->timing.pixel_clk_khz = drm_mode->clock;
+
+	dp_mode->timing.v_active_low =
+		!!(drm_mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+	dp_mode->timing.h_active_low =
+		!!(drm_mode->flags & DRM_MODE_FLAG_NHSYNC);
+}
+
+static void convert_to_drm_mode(const struct dp_display_mode *dp_mode,
+				struct drm_display_mode *drm_mode)
+{
+	u32 flags = 0;
+
+	memset(drm_mode, 0, sizeof(*drm_mode));
+
+	drm_mode->hdisplay = dp_mode->timing.h_active;
+	drm_mode->hsync_start = drm_mode->hdisplay +
+				dp_mode->timing.h_front_porch;
+	drm_mode->hsync_end = drm_mode->hsync_start +
+			      dp_mode->timing.h_sync_width;
+	drm_mode->htotal = drm_mode->hsync_end + dp_mode->timing.h_back_porch;
+	drm_mode->hskew = dp_mode->timing.h_skew;
+
+	drm_mode->vdisplay = dp_mode->timing.v_active;
+	drm_mode->vsync_start = drm_mode->vdisplay +
+				dp_mode->timing.v_front_porch;
+	drm_mode->vsync_end = drm_mode->vsync_start +
+			      dp_mode->timing.v_sync_width;
+	drm_mode->vtotal = drm_mode->vsync_end + dp_mode->timing.v_back_porch;
+
+	drm_mode->vrefresh = dp_mode->timing.refresh_rate;
+	drm_mode->clock = dp_mode->timing.pixel_clk_khz;
+
+	if (dp_mode->timing.h_active_low)
+		flags |= DRM_MODE_FLAG_NHSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PHSYNC;
+
+	if (dp_mode->timing.v_active_low)
+		flags |= DRM_MODE_FLAG_NVSYNC;
+	else
+		flags |= DRM_MODE_FLAG_PVSYNC;
+
+	drm_mode->flags = flags;
+
+	drm_mode->type = 0x48;
+	drm_mode_set_name(drm_mode);
+}
+
+static int dp_bridge_attach(struct drm_bridge *dp_bridge)
+{
+	struct dp_bridge *bridge = to_dp_bridge(dp_bridge);
+
+	if (!dp_bridge) {
+		pr_err("Invalid params\n");
+		return -EINVAL;
+	}
+
+	pr_debug("[%d] attached\n", bridge->id);
+
+	return 0;
+}
+
+static void dp_bridge_pre_enable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	/* By this point mode should have been validated through mode_fixup */
+	rc = dp->set_mode(dp, &bridge->dp_mode);
+	if (rc) {
+		pr_err("[%d] failed to perform a mode set, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->prepare(dp);
+	if (rc) {
+		pr_err("[%d] DP display prepare failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->enable(dp);
+	if (rc) {
+		pr_err("[%d] DP display enable failed, rc=%d\n",
+		       bridge->id, rc);
+		dp->unprepare(dp);
+	}
+}
+
+static void dp_bridge_enable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->post_enable(dp);
+	if (rc)
+		pr_err("[%d] DP display post enable failed, rc=%d\n",
+		       bridge->id, rc);
+}
+
+static void dp_bridge_disable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->pre_disable(dp);
+	if (rc) {
+		pr_err("[%d] DP display pre disable failed, rc=%d\n",
+		       bridge->id, rc);
+	}
+}
+
+static void dp_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	rc = dp->disable(dp);
+	if (rc) {
+		pr_err("[%d] DP display disable failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+
+	rc = dp->unprepare(dp);
+	if (rc) {
+		pr_err("[%d] DP display unprepare failed, rc=%d\n",
+		       bridge->id, rc);
+		return;
+	}
+}
+
+static void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
+				struct drm_display_mode *mode,
+				struct drm_display_mode *adjusted_mode)
+{
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		return;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	memset(&bridge->dp_mode, 0x0, sizeof(struct dp_display_mode));
+	convert_to_dp_mode(adjusted_mode, &bridge->dp_mode, dp);
+}
+
+static bool dp_bridge_mode_fixup(struct drm_bridge *drm_bridge,
+				  const struct drm_display_mode *mode,
+				  struct drm_display_mode *adjusted_mode)
+{
+	int rc = 0;
+	bool ret = true;
+	struct dp_display_mode dp_mode;
+	struct dp_bridge *bridge;
+	struct dp_display *dp;
+
+	if (!drm_bridge || !mode || !adjusted_mode) {
+		pr_err("Invalid params\n");
+		ret = false;
+		goto end;
+	}
+
+	bridge = to_dp_bridge(drm_bridge);
+	dp = bridge->display;
+
+	convert_to_dp_mode(mode, &dp_mode, dp);
+
+	rc = dp->validate_mode(dp, &dp_mode);
+	if (rc) {
+		pr_err("[%d] mode is not valid, rc=%d\n", bridge->id, rc);
+		ret = false;
+	} else {
+		convert_to_drm_mode(&dp_mode, adjusted_mode);
+	}
+end:
+	return ret;
+}
+
+static const struct drm_bridge_funcs dp_bridge_ops = {
+	.attach       = dp_bridge_attach,
+	.mode_fixup   = dp_bridge_mode_fixup,
+	.pre_enable   = dp_bridge_pre_enable,
+	.enable       = dp_bridge_enable,
+	.disable      = dp_bridge_disable,
+	.post_disable = dp_bridge_post_disable,
+	.mode_set     = dp_bridge_mode_set,
+};
+
+int dp_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display)
+{
+	struct dp_display *dp_display = display;
+
+	if (!info || !dp_display)
+		return -EINVAL;
+
+	dp_display->connector = connector;
+	return 0;
+}
+
+int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
+	struct msm_display_topology *topology, u32 max_mixer_width)
+{
+	const u32 dual_lm = 2;
+	const u32 single_lm = 1;
+	const u32 single_intf = 1;
+	const u32 no_enc = 0;
+
+	if (!drm_mode || !topology || !max_mixer_width) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ?
+							dual_lm : single_lm;
+	topology->num_enc = no_enc;
+	topology->num_intf = single_intf;
+
+	return 0;
+}
+
+int dp_connector_get_info(struct msm_display_info *info, void *data)
+{
+	struct dp_display *display = data;
+
+	if (!info || !display) {
+		pr_err("invalid params\n");
+		return -EINVAL;
+	}
+
+	info->intf_type = DRM_MODE_CONNECTOR_DisplayPort;
+
+	info->num_of_h_tiles = 1;
+	info->h_tile_instance[0] = 0;
+	info->is_connected = display->is_connected;
+	info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
+	info->capabilities = MSM_DISPLAY_CAP_VID_MODE | MSM_DISPLAY_CAP_EDID |
+		MSM_DISPLAY_CAP_HOT_PLUG;
+
+	return 0;
+}
+
+enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+		bool force,
+		void *display)
+{
+	enum drm_connector_status status = connector_status_unknown;
+	struct msm_display_info info;
+	int rc;
+
+	if (!conn || !display)
+		return status;
+
+	/* get display dp_info */
+	memset(&info, 0x0, sizeof(info));
+	rc = dp_connector_get_info(&info, display);
+	if (rc) {
+		pr_err("failed to get display info, rc=%d\n", rc);
+		return connector_status_disconnected;
+	}
+
+	if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+		status = (info.is_connected ? connector_status_connected :
+					      connector_status_disconnected);
+	else
+		status = connector_status_connected;
+
+	conn->display_info.width_mm = info.width_mm;
+	conn->display_info.height_mm = info.height_mm;
+
+	return status;
+}
+
+int dp_connector_get_modes(struct drm_connector *connector,
+		void *display)
+{
+	int rc = 0;
+	struct dp_display *dp;
+
+	if (!connector || !display)
+		return -EINVAL;
+
+	dp = display;
+	/* pluggable case assumes EDID is read when HPD */
+	if (dp->is_connected) {
+		rc = dp->get_modes(dp);
+		if (!rc)
+			pr_err("failed to get DP sink modes, rc=%d\n", rc);
+	} else {
+		pr_err("No sink connected\n");
+	}
+
+	return 0;
+}
+
+int dp_drm_bridge_init(void *data, struct drm_encoder *encoder)
+{
+	int rc = 0;
+	struct dp_bridge *bridge;
+	struct drm_device *dev;
+	struct dp_display *display = data;
+	struct msm_drm_private *priv = NULL;
+
+	bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+	if (!bridge) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	dev = display->drm_dev;
+	bridge->display = display;
+	bridge->base.funcs = &dp_bridge_ops;
+	bridge->base.encoder = encoder;
+
+	priv = dev->dev_private;
+
+	rc = drm_bridge_attach(dev, &bridge->base);
+	if (rc) {
+		pr_err("failed to attach bridge, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	rc = display->request_irq(display);
+	if (rc) {
+		pr_err("request_irq failed, rc=%d\n", rc);
+		goto error_free_bridge;
+	}
+
+	encoder->bridge = &bridge->base;
+	priv->bridges[priv->num_bridges++] = &bridge->base;
+	display->bridge = bridge;
+
+	return 0;
+error_free_bridge:
+	kfree(bridge);
+error:
+	return rc;
+}
+
+void dp_drm_bridge_deinit(void *data)
+{
+	struct dp_display *display = data;
+	struct dp_bridge *bridge = display->bridge;
+
+	if (bridge && bridge->base.encoder)
+		bridge->base.encoder->bridge = NULL;
+
+	kfree(bridge);
+}
+
+enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display)
+{
+	return MODE_OK;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644
index 0000000..bef3758
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct dp_bridge {
+	struct drm_bridge base;
+	u32 id;
+
+	struct dp_display *display;
+	struct dp_display_mode dp_mode;
+};
+
+/**
+ * dp_connector_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dp_connector_post_init(struct drm_connector *connector,
+		void *info,
+		void *display);
+
+/**
+ * dp_connector_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+		bool force,
+		void *display);
+
+/**
+ * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int dp_connector_get_modes(struct drm_connector *connector,
+		void *display);
+
+/**
+ * dp_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector,
+		struct drm_display_mode *mode,
+		void *display);
+
+/**
+ * dp_connector_get_topology - retrieve current topology for the mode selected
+ * @drm_mode: Display mode set for the display
+ * @topology: Out parameter. Topology for the mode.
+ * @max_mixer_width: max width supported by HW layer mixer
+ * Returns: zero on success
+ */
+int dp_connector_get_topology(const struct drm_display_mode *drm_mode,
+		struct msm_display_topology *topology,
+		u32 max_mixer_width);
+
+int dp_connector_get_info(struct msm_display_info *info, void *display);
+
+int dp_drm_bridge_init(void *display,
+	struct drm_encoder *encoder);
+
+void dp_drm_bridge_deinit(void *display);
+#endif /* _DP_DRM_H_ */
+
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644
index 0000000..741acfca
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -0,0 +1,1612 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_LINK_ENUM_STR(x)		#x
+
+enum dp_lane_count {
+	DP_LANE_COUNT_1	= 1,
+	DP_LANE_COUNT_2	= 2,
+	DP_LANE_COUNT_4	= 4,
+};
+
+enum phy_test_pattern {
+	PHY_TEST_PATTERN_NONE,
+	PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING,
+	PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT,
+	PHY_TEST_PATTERN_PRBS7,
+	PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN,
+	PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN,
+};
+
+enum dynamic_range {
+	DP_DYNAMIC_RANGE_RGB_VESA = 0x00,
+	DP_DYNAMIC_RANGE_RGB_CEA = 0x01,
+	DP_DYNAMIC_RANGE_UNKNOWN = 0xFFFFFFFF,
+};
+
+enum test_video_pattern {
+	DP_TEST_VIDEO_PATTERN_NONE = 0x00,
+	DP_TEST_VIDEO_PATTERN_COLOR_RAMPS = 0x01,
+	DP_TEST_VIDEO_PATTERN_BW_VERT_LINES = 0x02,
+	DP_TEST_VIDEO_PATTERN_COLOR_SQUARE = 0x03,
+};
+
+enum test_bit_depth {
+	DP_TEST_BIT_DEPTH_6 = 0x00,
+	DP_TEST_BIT_DEPTH_8 = 0x01,
+	DP_TEST_BIT_DEPTH_10 = 0x02,
+	DP_TEST_BIT_DEPTH_UNKNOWN = 0xFFFFFFFF,
+};
+
+enum dp_link_response {
+	TEST_ACK			= 0x1,
+	TEST_NACK			= 0x2,
+	TEST_EDID_CHECKSUM_WRITE	= 0x4,
+};
+
+enum audio_sample_rate {
+	AUDIO_SAMPLE_RATE_32_KHZ	= 0x00,
+	AUDIO_SAMPLE_RATE_44_1_KHZ	= 0x01,
+	AUDIO_SAMPLE_RATE_48_KHZ	= 0x02,
+	AUDIO_SAMPLE_RATE_88_2_KHZ	= 0x03,
+	AUDIO_SAMPLE_RATE_96_KHZ	= 0x04,
+	AUDIO_SAMPLE_RATE_176_4_KHZ	= 0x05,
+	AUDIO_SAMPLE_RATE_192_KHZ	= 0x06,
+};
+
+enum audio_pattern_type {
+	AUDIO_TEST_PATTERN_OPERATOR_DEFINED	= 0x00,
+	AUDIO_TEST_PATTERN_SAWTOOTH		= 0x01,
+};
+
+struct dp_link_request {
+	u32 test_requested;
+	u32 test_link_rate;
+	u32 test_lane_count;
+	u32 phy_test_pattern_sel;
+	u32 test_video_pattern;
+	u32 test_bit_depth;
+	u32 test_dyn_range;
+	u32 test_h_total;
+	u32 test_v_total;
+	u32 test_h_start;
+	u32 test_v_start;
+	u32 test_hsync_pol;
+	u32 test_hsync_width;
+	u32 test_vsync_pol;
+	u32 test_vsync_width;
+	u32 test_h_width;
+	u32 test_v_height;
+	u32 test_rr_d;
+	u32 test_rr_n;
+	u32 test_audio_sampling_rate;
+	u32 test_audio_channel_count;
+	u32 test_audio_pattern_type;
+	u32 test_audio_period_ch_1;
+	u32 test_audio_period_ch_2;
+	u32 test_audio_period_ch_3;
+	u32 test_audio_period_ch_4;
+	u32 test_audio_period_ch_5;
+	u32 test_audio_period_ch_6;
+	u32 test_audio_period_ch_7;
+	u32 test_audio_period_ch_8;
+	u32 response;
+};
+
+struct dp_link_sink_count {
+	u32 count;
+	bool cp_ready;
+};
+
+struct dp_link_private {
+	struct device *dev;
+	struct dp_aux *aux;
+	struct dp_link dp_link;
+
+	struct dp_link_request request;
+	struct dp_link_sink_count sink_count;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(enum test_bit_depth tbd)
+{
+	u32 bpp;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Bit depth is per color component
+	 *    2. If bit depth is unknown return 0
+	 *    3. Assume 3 color components
+	 */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		bpp = 18;
+		break;
+	case DP_TEST_BIT_DEPTH_8:
+		bpp = 24;
+		break;
+	case DP_TEST_BIT_DEPTH_10:
+		bpp = 30;
+		break;
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		bpp = 0;
+	}
+
+	return bpp;
+}
+
+static char *dp_link_get_phy_test_pattern(u32 phy_test_pattern_sel)
+{
+	switch (phy_test_pattern_sel) {
+	case PHY_TEST_PATTERN_NONE:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_NONE);
+	case PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING);
+	case PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
+		return DP_LINK_ENUM_STR(
+			PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT);
+	case PHY_TEST_PATTERN_PRBS7:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_PRBS7);
+	case PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN);
+	case PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN:
+		return DP_LINK_ENUM_STR(PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN);
+	default:
+		return "unknown";
+	}
+}
+
+static char *dp_link_get_audio_test_pattern(u32 pattern)
+{
+	switch (pattern) {
+	case AUDIO_TEST_PATTERN_OPERATOR_DEFINED:
+		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_OPERATOR_DEFINED);
+	case AUDIO_TEST_PATTERN_SAWTOOTH:
+		return DP_LINK_ENUM_STR(AUDIO_TEST_PATTERN_SAWTOOTH);
+	default:
+		return "unknown";
+	}
+}
+
+static char *dp_link_get_audio_sample_rate(u32 rate)
+{
+	switch (rate) {
+	case AUDIO_SAMPLE_RATE_32_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_32_KHZ);
+	case AUDIO_SAMPLE_RATE_44_1_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_44_1_KHZ);
+	case AUDIO_SAMPLE_RATE_48_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_48_KHZ);
+	case AUDIO_SAMPLE_RATE_88_2_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_88_2_KHZ);
+	case AUDIO_SAMPLE_RATE_96_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_96_KHZ);
+	case AUDIO_SAMPLE_RATE_176_4_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_176_4_KHZ);
+	case AUDIO_SAMPLE_RATE_192_KHZ:
+		return DP_LINK_ENUM_STR(AUDIO_SAMPLE_RATE_192_KHZ);
+	default:
+		return "unknown";
+	}
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	u32 const param_len = 0x1;
+	u32 const max_audio_period = 0xA;
+
+	/* TEST_AUDIO_PERIOD_CH_XX */
+	if (drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp,
+		param_len) < param_len) {
+		pr_err("failed to read test_audio_period (0x%x)\n", addr);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	data = *bp;
+
+	/* Period - Bits 3:0 */
+	data = data & 0xF;
+	if ((int)data > max_audio_period) {
+		pr_err("invalid test_audio_period_ch_1 = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	ret = data;
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+	int ret = 0;
+	int const test_audio_period_ch_1_addr = 0x273;
+	int const test_audio_period_ch_2_addr = 0x274;
+	int const test_audio_period_ch_3_addr = 0x275;
+	int const test_audio_period_ch_4_addr = 0x276;
+	int const test_audio_period_ch_5_addr = 0x277;
+	int const test_audio_period_ch_6_addr = 0x278;
+	int const test_audio_period_ch_7_addr = 0x279;
+	int const test_audio_period_ch_8_addr = 0x27A;
+	struct dp_link_request *req = &link->request;
+
+	/* TEST_AUDIO_PERIOD_CH_1 (Byte 0x273) */
+	ret = dp_link_get_period(link, test_audio_period_ch_1_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_1 = ret;
+	pr_debug("test_audio_period_ch_1 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_2 (Byte 0x274) */
+	ret = dp_link_get_period(link, test_audio_period_ch_2_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_2 = ret;
+	pr_debug("test_audio_period_ch_2 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+	ret = dp_link_get_period(link, test_audio_period_ch_3_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_3 = ret;
+	pr_debug("test_audio_period_ch_3 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_4 (Byte 0x276) */
+	ret = dp_link_get_period(link, test_audio_period_ch_4_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_4 = ret;
+	pr_debug("test_audio_period_ch_4 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_5 (Byte 0x277) */
+	ret = dp_link_get_period(link, test_audio_period_ch_5_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_5 = ret;
+	pr_debug("test_audio_period_ch_5 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_6 (Byte 0x278) */
+	ret = dp_link_get_period(link, test_audio_period_ch_6_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_6 = ret;
+	pr_debug("test_audio_period_ch_6 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_7 (Byte 0x279) */
+	ret = dp_link_get_period(link, test_audio_period_ch_7_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_7 = ret;
+	pr_debug("test_audio_period_ch_7 = 0x%x\n", ret);
+
+	/* TEST_AUDIO_PERIOD_CH_8 (Byte 0x27A) */
+	ret = dp_link_get_period(link, test_audio_period_ch_8_addr);
+	if (ret == -EINVAL)
+		goto exit;
+
+	req->test_audio_period_ch_8 = ret;
+	pr_debug("test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_audio_pattern_type_addr = 0x272;
+	int const max_audio_pattern_type = 0x1;
+
+	/* Read the requested audio pattern type (Byte 0x272). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+		test_audio_pattern_type_addr, &bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read link audio mode data\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Audio Pattern Type - Bits 7:0 */
+	if ((int)data > max_audio_pattern_type) {
+		pr_err("invalid audio pattern type = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_audio_pattern_type = data;
+	pr_debug("audio pattern type = %s\n",
+			dp_link_get_audio_test_pattern(data));
+exit:
+	return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const test_audio_mode_addr = 0x271;
+	int const max_audio_sampling_rate = 0x6;
+	int const max_audio_channel_count = 0x8;
+	int sampling_rate = 0x0;
+	int channel_count = 0x0;
+
+	/* Read the requested audio mode (Byte 0x271). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_audio_mode_addr,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read link audio mode data\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Sampling Rate - Bits 3:0 */
+	sampling_rate = data & 0xF;
+	if (sampling_rate > max_audio_sampling_rate) {
+		pr_err("sampling rate (0x%x) greater than max (0x%x)\n",
+				sampling_rate, max_audio_sampling_rate);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Channel Count - Bits 7:4 */
+	channel_count = ((data & 0xF0) >> 4) + 1;
+	if (channel_count > max_audio_channel_count) {
+		pr_err("channel_count (0x%x) greater than max (0x%x)\n",
+				channel_count, max_audio_channel_count);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_audio_sampling_rate = sampling_rate;
+	link->request.test_audio_channel_count = channel_count;
+	pr_debug("sampling_rate = %s, channel_count = 0x%x\n",
+		dp_link_get_audio_sample_rate(sampling_rate), channel_count);
+exit:
+	return ret;
+}
+
+/**
+ * dp_parse_audio_pattern_params() - parses audio pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the audio link pattern parameters.
+ */
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+	int ret = 0;
+
+	ret = dp_link_parse_audio_mode(link);
+	if (ret)
+		goto exit;
+
+	ret = dp_link_parse_audio_pattern_type(link);
+	if (ret)
+		goto exit;
+
+	ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+	return ret;
+}
+
+/**
+ * dp_link_is_video_pattern_valid() - validates the video pattern
+ * @pattern: video pattern requested by the sink
+ *
+ * Returns true if the requested video pattern is supported.
+ */
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+	switch (pattern) {
+	case DP_TEST_VIDEO_PATTERN_NONE:
+	case DP_TEST_VIDEO_PATTERN_COLOR_RAMPS:
+	case DP_TEST_VIDEO_PATTERN_BW_VERT_LINES:
+	case DP_TEST_VIDEO_PATTERN_COLOR_SQUARE:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_video_pattern_to_string(u32 test_video_pattern)
+{
+	switch (test_video_pattern) {
+	case DP_TEST_VIDEO_PATTERN_NONE:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_NONE);
+	case DP_TEST_VIDEO_PATTERN_COLOR_RAMPS:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_COLOR_RAMPS);
+	case DP_TEST_VIDEO_PATTERN_BW_VERT_LINES:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_BW_VERT_LINES);
+	case DP_TEST_VIDEO_PATTERN_COLOR_SQUARE:
+		return DP_LINK_ENUM_STR(DP_TEST_VIDEO_PATTERN_COLOR_SQUARE);
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_dynamic_range_valid() - validates the dynamic range
+ * @bit_depth: the dynamic range value to be checked
+ *
+ * Returns true if the dynamic range value is supported.
+ */
+static bool dp_link_is_dynamic_range_valid(u32 dr)
+{
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_dynamic_range_to_string(u32 dr)
+{
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_VESA);
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		return DP_LINK_ENUM_STR(DP_DYNAMIC_RANGE_RGB_CEA);
+	case DP_DYNAMIC_RANGE_UNKNOWN:
+	default:
+		return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @bit_depth: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+	/* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+	case DP_TEST_BIT_DEPTH_8:
+	case DP_TEST_BIT_DEPTH_10:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static char *dp_link_bit_depth_to_string(u32 tbd)
+{
+	switch (tbd) {
+	case DP_TEST_BIT_DEPTH_6:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_6);
+	case DP_TEST_BIT_DEPTH_8:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_8);
+	case DP_TEST_BIT_DEPTH_10:
+		return DP_LINK_ENUM_STR(DP_TEST_BIT_DEPTH_10);
+	case DP_TEST_BIT_DEPTH_UNKNOWN:
+	default:
+		return "unknown";
+	}
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+	int const addr, int const len, u32 *val)
+{
+	u8 *bp;
+	int rlen;
+
+	if (len < 2)
+		return -EINVAL;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+	if (rlen < len) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+
+	*val = bp[1] | (bp[0] << 8);
+
+	return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+	int const addr, int const len, u32 *val1, u32 *val2)
+{
+	u8 *bp;
+	int rlen;
+
+	if (len < 2)
+		return -EINVAL;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+	if (rlen < len) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+
+	*val1 = (bp[0] & BIT(7)) >> 7;
+	*val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+	return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+	int const addr, u32 *val)
+{
+	u8 *bp;
+	u32 len = 1;
+	int rlen;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr, &bp, len);
+	if (rlen < 1) {
+		pr_err("failed to read 0x%x\n", addr);
+		return -EINVAL;
+	}
+	*val = bp[0];
+
+	return 0;
+}
+
+/**
+ * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+	int ret = 0;
+	int rlen;
+	u8 *bp;
+	u8 data;
+	u32 dyn_range;
+	int const param_len = 0x1;
+	int const test_video_pattern_addr = 0x221;
+	int const test_misc_addr = 0x232;
+
+	/* Read the requested video link pattern (Byte 0x221). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_video_pattern_addr,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read link video pattern\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	if (!dp_link_is_video_pattern_valid(data)) {
+		pr_err("invalid link video pattern = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_video_pattern = data;
+	pr_debug("link video pattern = 0x%x (%s)\n",
+		link->request.test_video_pattern,
+		dp_link_video_pattern_to_string(
+			link->request.test_video_pattern));
+
+	/* Read the requested color bit depth and dynamic range (Byte 0x232) */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, test_misc_addr,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read link bit depth\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	/* Dynamic Range */
+	dyn_range = (data & BIT(3)) >> 3;
+	if (!dp_link_is_dynamic_range_valid(dyn_range)) {
+		pr_err("invalid link dynamic range = 0x%x", dyn_range);
+		ret = -EINVAL;
+		goto exit;
+	}
+	link->request.test_dyn_range = dyn_range;
+	pr_debug("link dynamic range = 0x%x (%s)\n",
+		link->request.test_dyn_range,
+		dp_link_dynamic_range_to_string(
+			link->request.test_dyn_range));
+
+	/* Color bit depth */
+	data &= (BIT(5) | BIT(6) | BIT(7));
+	data >>= 5;
+	if (!dp_link_is_bit_depth_valid(data)) {
+		pr_err("invalid link bit depth = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_bit_depth = data;
+	pr_debug("link bit depth = 0x%x (%s)\n",
+		link->request.test_bit_depth,
+		dp_link_bit_depth_to_string(link->request.test_bit_depth));
+
+	/* resolution timing params */
+	ret = dp_link_parse_timing_params1(link, 0x222, 2,
+			&link->request.test_h_total);
+	if (ret) {
+		pr_err("failed to parse test_h_total (0x222)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_TOTAL = %d\n", link->request.test_h_total);
+
+	ret = dp_link_parse_timing_params1(link, 0x224, 2,
+			&link->request.test_v_total);
+	if (ret) {
+		pr_err("failed to parse test_v_total (0x224)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_TOTAL = %d\n", link->request.test_v_total);
+
+	ret = dp_link_parse_timing_params1(link, 0x226, 2,
+			&link->request.test_h_start);
+	if (ret) {
+		pr_err("failed to parse test_h_start (0x226)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_START = %d\n", link->request.test_h_start);
+
+	ret = dp_link_parse_timing_params1(link, 0x228, 2,
+			&link->request.test_v_start);
+	if (ret) {
+		pr_err("failed to parse test_v_start (0x228)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_START = %d\n", link->request.test_v_start);
+
+	ret = dp_link_parse_timing_params2(link, 0x22A, 2,
+			&link->request.test_hsync_pol,
+			&link->request.test_hsync_width);
+	if (ret) {
+		pr_err("failed to parse (0x22A)\n");
+		goto exit;
+	}
+	pr_debug("TEST_HSYNC_POL = %d\n", link->request.test_hsync_pol);
+	pr_debug("TEST_HSYNC_WIDTH = %d\n", link->request.test_hsync_width);
+
+	ret = dp_link_parse_timing_params2(link, 0x22C, 2,
+			&link->request.test_vsync_pol,
+			&link->request.test_vsync_width);
+	if (ret) {
+		pr_err("failed to parse (0x22C)\n");
+		goto exit;
+	}
+	pr_debug("TEST_VSYNC_POL = %d\n", link->request.test_vsync_pol);
+	pr_debug("TEST_VSYNC_WIDTH = %d\n", link->request.test_vsync_width);
+
+	ret = dp_link_parse_timing_params1(link, 0x22E, 2,
+			&link->request.test_h_width);
+	if (ret) {
+		pr_err("failed to parse test_h_width (0x22E)\n");
+		goto exit;
+	}
+	pr_debug("TEST_H_WIDTH = %d\n", link->request.test_h_width);
+
+	ret = dp_link_parse_timing_params1(link, 0x230, 2,
+			&link->request.test_v_height);
+	if (ret) {
+		pr_err("failed to parse test_v_height (0x230)\n");
+		goto exit;
+	}
+	pr_debug("TEST_V_HEIGHT = %d\n", link->request.test_v_height);
+
+	ret = dp_link_parse_timing_params3(link, 0x233,
+		&link->request.test_rr_d);
+	link->request.test_rr_d &= BIT(0);
+	if (ret) {
+		pr_err("failed to parse test_rr_d (0x233)\n");
+		goto exit;
+	}
+	pr_debug("TEST_REFRESH_DENOMINATOR = %d\n", link->request.test_rr_d);
+
+	ret = dp_link_parse_timing_params3(link, 0x234,
+		&link->request.test_rr_n);
+	if (ret) {
+		pr_err("failed to parse test_rr_n (0x234)\n");
+		goto exit;
+	}
+	pr_debug("TEST_REFRESH_NUMERATOR = %d\n", link->request.test_rr_n);
+exit:
+	return ret;
+}
+
+/**
+ * dp_link_is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static bool dp_link_is_link_rate_valid(u32 link_rate)
+{
+	return ((link_rate == DP_LINK_BW_1_62) ||
+		(link_rate == DP_LINK_BW_2_7) ||
+		(link_rate == DP_LINK_BW_5_4) ||
+		(link_rate == DP_LINK_RATE_810));
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static bool dp_link_is_lane_count_valid(u32 lane_count)
+{
+	return (lane_count == DP_LANE_COUNT_1) ||
+		(lane_count == DP_LANE_COUNT_2) ||
+		(lane_count == DP_LANE_COUNT_4);
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int ret = 0;
+	int rlen;
+	int const param_len = 0x1;
+
+	/* Read the requested link rate (Byte 0x219). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LINK_RATE,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read link rate\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+
+	if (!dp_link_is_link_rate_valid(data)) {
+		pr_err("invalid link rate = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_link_rate = data;
+	pr_debug("link rate = 0x%x\n", link->request.test_link_rate);
+
+	/* Read the requested lane count (Byte 0x220). */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_LANE_COUNT,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read lane count\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+	data = *bp;
+	data &= 0x1F;
+
+	if (!dp_link_is_lane_count_valid(data)) {
+		pr_err("invalid lane count = 0x%x\n", data);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	link->request.test_lane_count = data;
+	pr_debug("lane count = 0x%x\n", link->request.test_lane_count);
+exit:
+	return ret;
+}
+
+static bool dp_link_is_phy_test_pattern_supported(u32 phy_test_pattern_sel)
+{
+	switch (phy_test_pattern_sel) {
+	case PHY_TEST_PATTERN_NONE:
+	case PHY_TEST_PATTERN_D10_2_NO_SCRAMBLING:
+	case PHY_TEST_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT:
+	case PHY_TEST_PATTERN_PRBS7:
+	case PHY_TEST_PATTERN_80_BIT_CUSTOM_PATTERN:
+	case PHY_TEST_PATTERN_HBR2_CTS_EYE_PATTERN:
+		return true;
+	default:
+		return false;
+	}
+}
+
+/**
+ * dp_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+	int const phy_test_pattern_addr = 0x248;
+	int ret = 0;
+
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, phy_test_pattern_addr,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read phy link pattern\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	link->request.phy_test_pattern_sel = data;
+
+	pr_debug("phy_test_pattern_sel = %s\n",
+			dp_link_get_phy_test_pattern(data));
+
+	if (!dp_link_is_phy_test_pattern_supported(data))
+		ret = -EINVAL;
+end:
+	return ret;
+}
+
+static char *dp_link_get_test_name(u32 test_requested)
+{
+	switch (test_requested) {
+	case TEST_LINK_TRAINING: return DP_LINK_ENUM_STR(TEST_LINK_TRAINING);
+	case TEST_VIDEO_PATTERN: return DP_LINK_ENUM_STR(TEST_VIDEO_PATTERN);
+	case PHY_TEST_PATTERN:	 return DP_LINK_ENUM_STR(PHY_TEST_PATTERN);
+	case TEST_EDID_READ:	 return DP_LINK_ENUM_STR(TEST_EDID_READ);
+	case TEST_AUDIO_PATTERN: return DP_LINK_ENUM_STR(TEST_AUDIO_PATTERN);
+	default:		 return "unknown";
+	}
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+	return (link == TEST_VIDEO_PATTERN) ||
+		(link == (TEST_AUDIO_PATTERN | TEST_VIDEO_PATTERN)) ||
+		(link == TEST_AUDIO_PATTERN) ||
+		(link == (TEST_AUDIO_PATTERN | TEST_AUDIO_DISABLED_VIDEO));
+}
+
+/**
+ * dp_link_supported() - checks if link requested by sink is supported
+ * @test_requested: link requested by the sink
+ *
+ * Returns true if the requested link is supported.
+ */
+static bool dp_link_is_test_supported(u32 test_requested)
+{
+	return (test_requested == TEST_LINK_TRAINING) ||
+		(test_requested == TEST_EDID_READ) ||
+		(test_requested == PHY_TEST_PATTERN) ||
+		dp_link_is_video_audio_test_requested(test_requested);
+}
+
+/**
+ * dp_sink_parse_test_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+	int ret = 0;
+	u8 *bp;
+	u8 data;
+	int rlen;
+	u32 const param_len = 0x1;
+	u8 buf[4];
+
+	/**
+	 * Read the device service IRQ vector (Byte 0x201) to determine
+	 * whether an automated link has been requested by the sink.
+	 */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux,
+		DP_DEVICE_SERVICE_IRQ_VECTOR, &bp, param_len);
+	if (rlen < param_len) {
+		pr_err("aux read failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("device service irq vector = 0x%x\n", data);
+
+	if (!(data & BIT(1))) {
+		pr_debug("no link requested\n");
+		goto end;
+	}
+
+	/**
+	 * Read the link request byte (Byte 0x218) to determine what type
+	 * of automated link has been requested by the sink.
+	 */
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_TEST_REQUEST,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("aux read failed\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	if (!dp_link_is_test_supported(data)) {
+		pr_debug("link 0x%x not supported\n", data);
+		goto end;
+	}
+
+	pr_debug("%s (0x%x) requested\n", dp_link_get_test_name(data), data);
+	link->request.test_requested = data;
+
+	if (link->request.test_requested == PHY_TEST_PATTERN) {
+		ret = dp_link_parse_phy_test_params(link);
+		if (ret)
+			goto end;
+		ret = dp_link_parse_link_training_params(link);
+	}
+
+	if (link->request.test_requested == TEST_LINK_TRAINING)
+		ret = dp_link_parse_link_training_params(link);
+
+	if (dp_link_is_video_audio_test_requested(
+			link->request.test_requested)) {
+		ret = dp_link_parse_video_pattern_params(link);
+		if (ret)
+			goto end;
+
+		ret = dp_link_parse_audio_pattern_params(link);
+	}
+end:
+	/* clear the link request IRQ */
+	buf[0] = 1;
+	drm_dp_dpcd_write(link->aux->drm_aux, DP_TEST_REQUEST, buf, 1);
+
+	/**
+	 * Send a TEST_ACK if all link parameters are valid, otherwise send
+	 * a TEST_NACK.
+	 */
+	if (ret)
+		link->request.response = TEST_NACK;
+	else
+		link->request.response = TEST_ACK;
+
+	return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static void dp_link_parse_sink_count(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int rlen;
+	int const param_len = 0x1;
+
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, DP_SINK_COUNT,
+			&bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed to read sink count\n");
+		return;
+	}
+
+	data = *bp;
+
+	/* BIT 7, BIT 5:0 */
+	link->sink_count.count = (data & BIT(7)) << 6 | (data & 0x63);
+	/* BIT 6*/
+	link->sink_count.cp_ready = data & BIT(6);
+
+	pr_debug("sink_count = 0x%x, cp_ready = 0x%x\n",
+		link->sink_count.count, link->sink_count.cp_ready);
+}
+
+static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+	int len = 0;
+
+	dp_link_parse_sink_count(link);
+	dp_link_parse_request(link);
+	len = drm_dp_dpcd_read_link_status(link->aux->drm_aux,
+		link->link_status);
+	if (len < DP_LINK_STATUS_SIZE)
+		pr_err("DP link status read failed\n");
+}
+
+static bool dp_link_is_link_training_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested == TEST_LINK_TRAINING);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * link rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_link_training_requested(link))
+		return -EINVAL;
+
+	pr_debug("%s link rate = 0x%x, lane count = 0x%x\n",
+			dp_link_get_test_name(TEST_LINK_TRAINING),
+			link->request.test_link_rate,
+			link->request.test_lane_count);
+
+	link->dp_link.lane_count = link->request.test_lane_count;
+	link->dp_link.link_rate = link->request.test_link_rate;
+
+	return 0;
+}
+
+static bool dp_link_phy_pattern_requested(struct dp_link *dp_link)
+{
+	struct dp_link_private *link = container_of(dp_link,
+			struct dp_link_private, dp_link);
+
+	return (link->request.test_requested == PHY_TEST_PATTERN);
+}
+
+static int dp_link_parse_vx_px(struct dp_link_private *link)
+{
+	u8 *bp;
+	u8 data;
+	int const param_len = 0x1;
+	int const addr1 = 0x206;
+	int const addr2 = 0x207;
+	int ret = 0;
+	u32 v0, p0, v1, p1, v2, p2, v3, p3;
+	int rlen;
+
+	pr_debug("\n");
+
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr1, &bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed reading lanes 0/1\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("lanes 0/1 (Byte 0x206): 0x%x\n", data);
+
+	v0 = data & 0x3;
+	data = data >> 2;
+	p0 = data & 0x3;
+	data = data >> 2;
+
+	v1 = data & 0x3;
+	data = data >> 2;
+	p1 = data & 0x3;
+	data = data >> 2;
+
+	rlen = drm_dp_dpcd_read(link->aux->drm_aux, addr2, &bp, param_len);
+	if (rlen < param_len) {
+		pr_err("failed reading lanes 2/3\n");
+		ret = -EINVAL;
+		goto end;
+	}
+
+	data = *bp;
+
+	pr_debug("lanes 2/3 (Byte 0x207): 0x%x\n", data);
+
+	v2 = data & 0x3;
+	data = data >> 2;
+	p2 = data & 0x3;
+	data = data >> 2;
+
+	v3 = data & 0x3;
+	data = data >> 2;
+	p3 = data & 0x3;
+	data = data >> 2;
+
+	pr_debug("vx: 0=%d, 1=%d, 2=%d, 3=%d\n", v0, v1, v2, v3);
+	pr_debug("px: 0=%d, 1=%d, 2=%d, 3=%d\n", p0, p1, p2, p3);
+
+	/**
+	 * Update the voltage and pre-emphasis levels as per DPCD request
+	 * vector.
+	 */
+	pr_debug("Current: v_level = 0x%x, p_level = 0x%x\n",
+			link->dp_link.v_level, link->dp_link.p_level);
+	pr_debug("Requested: v_level = 0x%x, p_level = 0x%x\n", v0, p0);
+	link->dp_link.v_level = v0;
+	link->dp_link.p_level = p0;
+
+	pr_debug("Success\n");
+end:
+	return ret;
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+		struct dp_link_private *link)
+{
+	u32 test_link_rate = 0, test_lane_count = 0;
+
+	if (!dp_link_phy_pattern_requested(&link->dp_link))
+		return -EINVAL;
+
+	test_link_rate = link->request.test_link_rate;
+	test_lane_count = link->request.test_lane_count;
+
+	if (!dp_link_is_link_rate_valid(test_link_rate) ||
+		!dp_link_is_lane_count_valid(test_lane_count)) {
+		pr_err("Invalid params: link rate = 0x%x, lane count = 0x%x\n",
+				test_link_rate, test_lane_count);
+		return -EINVAL;
+	}
+
+	pr_debug("start\n");
+
+	link->dp_link.lane_count = link->request.test_lane_count;
+	link->dp_link.link_rate = link->request.test_link_rate;
+
+	dp_link_parse_vx_px(link);
+
+	pr_debug("end\n");
+
+	return 0;
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+	if (!(link->link_status[2] & BIT(7)) || /* link status updated */
+		(drm_dp_clock_recovery_ok(link->link_status,
+			link->dp_link.lane_count) &&
+	     drm_dp_channel_eq_ok(link->link_status,
+			link->dp_link.lane_count)))
+		return -EINVAL;
+
+	pr_debug("channel_eq_done = %d, clock_recovery_done = %d\n",
+			drm_dp_clock_recovery_ok(link->link_status,
+			link->dp_link.lane_count),
+			drm_dp_clock_recovery_ok(link->link_status,
+			link->dp_link.lane_count));
+
+	return 0;
+}
+
+static bool dp_link_is_ds_port_status_changed(struct dp_link_private *link)
+{
+	return (link->link_status[2] & BIT(6)); /* port status changed */
+}
+
+/**
+ * dp_link_process_downstream_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+	if (!dp_link_is_ds_port_status_changed(link))
+		return -EINVAL;
+
+	return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested & TEST_VIDEO_PATTERN)
+		&& !(link->request.test_requested & TEST_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+	return (link->request.test_requested & TEST_AUDIO_PATTERN);
+}
+
+/**
+ * dp_link_process_video_pattern_request() - process new video pattern request
+ * @link: Display Port link module's data
+ *
+ * This function will handle a new video pattern request that are initiated by
+ * the sink. This is acheieved by first sending a disconnect notification to
+ * the sink followed by a subsequent connect notification to the user modules,
+ * where it is expected that the user modules would draw the required link
+ * pattern.
+ */
+static int dp_link_process_video_pattern_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_video_pattern_requested(link))
+		goto end;
+
+	pr_debug("%s: bit depth=%d(%d bpp) pattern=%s\n",
+		dp_link_get_test_name(TEST_VIDEO_PATTERN),
+		link->request.test_bit_depth,
+		dp_link_bit_depth_to_bpp(link->request.test_bit_depth),
+		dp_link_video_pattern_to_string(
+			link->request.test_video_pattern));
+
+	return 0;
+end:
+	return -EINVAL;
+}
+
+/**
+ * dp_link_process_audio_pattern_request() - process new audio pattern request
+ * @link: Display Port link module data
+ *
+ * This function will handle a new audio pattern request that is initiated by
+ * the sink. This is acheieved by sending the necessary secondary data packets
+ * to the sink. It is expected that any simulatenous requests for video
+ * patterns will be handled before the audio pattern is sent to the sink.
+ */
+static int dp_link_process_audio_pattern_request(struct dp_link_private *link)
+{
+	if (!dp_link_is_audio_pattern_requested(link))
+		return -EINVAL;
+
+	pr_debug("sampling_rate=%s, channel_count=%d, pattern_type=%s\n",
+		dp_link_get_audio_sample_rate(
+			link->request.test_audio_sampling_rate),
+		link->request.test_audio_channel_count,
+		dp_link_get_audio_test_pattern(
+			link->request.test_audio_pattern_type));
+
+	pr_debug("audio_period: ch1=0x%x, ch2=0x%x, ch3=0x%x, ch4=0x%x\n",
+		link->request.test_audio_period_ch_1,
+		link->request.test_audio_period_ch_2,
+		link->request.test_audio_period_ch_3,
+		link->request.test_audio_period_ch_4);
+
+	pr_debug("audio_period: ch5=0x%x, ch6=0x%x, ch7=0x%x, ch8=0x%x\n",
+		link->request.test_audio_period_ch_5,
+		link->request.test_audio_period_ch_6,
+		link->request.test_audio_period_ch_7,
+		link->request.test_audio_period_ch_8);
+
+	return 0;
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+	link->request = (const struct dp_link_request){ 0 };
+	link->request.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+
+	link->dp_link.test_requested = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+static int dp_link_process_request(struct dp_link *dp_link)
+{
+	int ret = 0;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	pr_debug("start\n");
+
+	dp_link_reset_data(link);
+
+	dp_link_parse_sink_status_field(link);
+
+	ret = dp_link_process_link_training_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_LINK_TRAINING;
+		goto exit;
+	}
+
+	ret = dp_link_process_phy_test_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= PHY_TEST_PATTERN;
+		goto exit;
+	}
+
+	ret = dp_link_process_link_status_update(link);
+	if (!ret) {
+		dp_link->test_requested |= LINK_STATUS_UPDATED;
+		goto exit;
+	}
+
+	ret = dp_link_process_ds_port_status_change(link);
+	if (!ret) {
+		dp_link->test_requested |= DS_PORT_STATUS_CHANGED;
+		goto exit;
+	}
+
+	ret = dp_link_process_video_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_VIDEO_PATTERN;
+		goto exit;
+	}
+
+	ret = dp_link_process_audio_pattern_request(link);
+	if (!ret) {
+		dp_link->test_requested |= TEST_AUDIO_PATTERN;
+		goto exit;
+	}
+
+	pr_debug("done\n");
+exit:
+	return ret;
+}
+
+static int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+	u32 cc;
+	enum dynamic_range dr;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	/* unless a video pattern CTS test is ongoing, use CEA_VESA */
+	if (dp_link_is_video_pattern_requested(link))
+		dr = link->request.test_dyn_range;
+	else
+		dr = DP_DYNAMIC_RANGE_RGB_VESA;
+
+	/* Only RGB_VESA nd RGB_CEA supported for now */
+	switch (dr) {
+	case DP_DYNAMIC_RANGE_RGB_CEA:
+		cc = BIT(3);
+		break;
+	case DP_DYNAMIC_RANGE_RGB_VESA:
+	default:
+		cc = 0;
+	}
+
+	return cc;
+}
+
+static int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+{
+	int i;
+	int max = 0;
+	u8 data;
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	/* use the max level across lanes */
+	for (i = 0; i < dp_link->lane_count; i++) {
+		data = drm_dp_get_adjust_request_voltage(link_status, i);
+		pr_debug("lane=%d req_voltage_swing=%d\n", i, data);
+		if (max < data)
+			max = data;
+	}
+
+	dp_link->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+
+	/* use the max level across lanes */
+	max = 0;
+	for (i = 0; i < dp_link->lane_count; i++) {
+		data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
+		pr_debug("lane=%d req_pre_emphasis=%d\n", i, data);
+		if (max < data)
+			max = data;
+	}
+
+	dp_link->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+	/**
+	 * Adjust the voltage swing and pre-emphasis level combination to within
+	 * the allowable range.
+	 */
+	if (dp_link->v_level > DP_LINK_VOLTAGE_MAX) {
+		pr_debug("Requested vSwingLevel=%d, change to %d\n",
+				dp_link->v_level, DP_LINK_VOLTAGE_MAX);
+		dp_link->v_level = DP_LINK_VOLTAGE_MAX;
+	}
+
+	if (dp_link->p_level > DP_LINK_PRE_EMPHASIS_MAX) {
+		pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+				dp_link->p_level, DP_LINK_PRE_EMPHASIS_MAX);
+		dp_link->p_level = DP_LINK_PRE_EMPHASIS_MAX;
+	}
+
+	if ((dp_link->p_level > DP_LINK_PRE_EMPHASIS_LEVEL_1)
+			&& (dp_link->v_level == DP_LINK_VOLTAGE_LEVEL_2)) {
+		pr_debug("Requested preEmphasisLevel=%d, change to %d\n",
+				dp_link->p_level, DP_LINK_PRE_EMPHASIS_LEVEL_1);
+		dp_link->p_level = DP_LINK_PRE_EMPHASIS_LEVEL_1;
+	}
+
+	pr_debug("v_level=%d, p_level=%d\n",
+		dp_link->v_level, dp_link->p_level);
+
+	return 0;
+}
+
+static int dp_link_send_psm_request(struct dp_link *dp_link, bool req)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link) {
+		pr_err("invalid input\n");
+		return -EINVAL;
+	}
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	return 0;
+}
+
+static u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+	enum test_bit_depth tbd;
+
+	/*
+	 * Few simplistic rules and assumptions made here:
+	 *    1. Test bit depth is bit depth per color component
+	 *    2. Assume 3 color components
+	 */
+	switch (bpp) {
+	case 18:
+		tbd = DP_TEST_BIT_DEPTH_6;
+		break;
+	case 24:
+		tbd = DP_TEST_BIT_DEPTH_8;
+		break;
+	case 30:
+		tbd = DP_TEST_BIT_DEPTH_10;
+		break;
+	default:
+		tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+		break;
+	}
+
+	return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux)
+{
+	int rc = 0;
+	struct dp_link_private *link;
+	struct dp_link *dp_link;
+
+	if (!dev || !aux) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+	if (!link) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link->dev   = dev;
+	link->aux   = aux;
+
+	dp_link = &link->dp_link;
+
+	dp_link->process_request        = dp_link_process_request;
+	dp_link->get_test_bits_depth    = dp_link_get_test_bits_depth;
+	dp_link->get_colorimetry_config = dp_link_get_colorimetry_config;
+	dp_link->adjust_levels          = dp_link_adjust_levels;
+	dp_link->send_psm_request       = dp_link_send_psm_request;
+	dp_link->phy_pattern_requested  = dp_link_phy_pattern_requested;
+
+	return dp_link;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_link_put(struct dp_link *dp_link)
+{
+	struct dp_link_private *link;
+
+	if (!dp_link)
+		return;
+
+	link = container_of(dp_link, struct dp_link_private, dp_link);
+
+	devm_kfree(link->dev, link);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644
index 0000000..26249d6
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+enum dp_link_voltage_level {
+	DP_LINK_VOLTAGE_LEVEL_0	= 0,
+	DP_LINK_VOLTAGE_LEVEL_1	= 1,
+	DP_LINK_VOLTAGE_LEVEL_2	= 2,
+	DP_LINK_VOLTAGE_MAX	= DP_LINK_VOLTAGE_LEVEL_2,
+};
+
+enum dp_link_preemaphasis_level {
+	DP_LINK_PRE_EMPHASIS_LEVEL_0	= 0,
+	DP_LINK_PRE_EMPHASIS_LEVEL_1	= 1,
+	DP_LINK_PRE_EMPHASIS_LEVEL_2	= 2,
+	DP_LINK_PRE_EMPHASIS_MAX	= DP_LINK_PRE_EMPHASIS_LEVEL_2,
+};
+
+enum test_type {
+	UNKNOWN_TEST		  = 0,
+	TEST_LINK_TRAINING	  = 0x01,
+	TEST_VIDEO_PATTERN	  = 0x02,
+	PHY_TEST_PATTERN	  = 0x08,
+	TEST_EDID_READ		  = 0x04,
+	TEST_AUDIO_PATTERN	  = 0x20,
+	TEST_AUDIO_DISABLED_VIDEO = 0x40,
+};
+
+enum status_update {
+	LINK_STATUS_UPDATED    = 0x100,
+	DS_PORT_STATUS_CHANGED = 0x200,
+};
+
+struct dp_link {
+	u32 test_requested;
+
+	u32 lane_count;
+	u32 link_rate;
+	u32 v_level;
+	u32 p_level;
+
+	u32 (*get_test_bits_depth)(struct dp_link *dp_link, u32 bpp);
+	int (*process_request)(struct dp_link *dp_link);
+	int (*get_colorimetry_config)(struct dp_link *dp_link);
+	int (*adjust_levels)(struct dp_link *dp_link, u8 *link_status);
+	int (*send_psm_request)(struct dp_link *dp_link, bool req);
+	bool (*phy_pattern_requested)(struct dp_link *dp_link);
+};
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct dp_aux *aux);
+
+/**
+ * dp_link_put() - releases the dp test module's resources
+ *
+ * @dp_link: an instance of dp_link module
+ *
+ */
+void dp_link_put(struct dp_link *dp_link);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644
index 0000000..fed1dbb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"[drm-dp] %s: " fmt, __func__
+
+#include "dp_panel.h"
+
+enum {
+	DP_LINK_RATE_MULTIPLIER = 27000000,
+};
+
+struct dp_panel_private {
+	struct device *dev;
+	struct dp_panel dp_panel;
+	struct dp_aux *aux;
+	struct dp_catalog_panel *catalog;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+	int rlen, rc = 0;
+	struct dp_panel_private *panel;
+	struct drm_dp_link *dp_link;
+	u8 major = 0, minor = 0;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	dp_link = &dp_panel->dp_link;
+
+	rlen = drm_dp_dpcd_read(panel->aux->drm_aux, DP_DPCD_REV,
+		dp_panel->dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+	if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+		pr_err("dpcd read failed, rlen=%d\n", rlen);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_link->revision = dp_panel->dpcd[DP_DPCD_REV];
+
+	major = (dp_link->revision >> 4) & 0x0f;
+	minor = dp_link->revision & 0x0f;
+	pr_debug("version: %d.%d\n", major, minor);
+
+	dp_link->rate =
+		drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+	pr_debug("link_rate=%d\n", dp_link->rate);
+
+	dp_link->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
+			DP_MAX_LANE_COUNT_MASK;
+	pr_debug("lane_count=%d\n", dp_link->num_lanes);
+
+	if (dp_panel->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+		dp_link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+end:
+	return rc;
+}
+
+static int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	u32 data, total_ver, total_hor;
+	struct dp_catalog_panel *catalog;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+	catalog = panel->catalog;
+	pinfo = &panel->dp_panel.pinfo;
+
+	pr_debug("width=%d hporch= %d %d %d\n",
+		pinfo->h_active, pinfo->h_back_porch,
+		pinfo->h_front_porch, pinfo->h_sync_width);
+
+	pr_debug("height=%d vporch= %d %d %d\n",
+		pinfo->v_active, pinfo->v_back_porch,
+		pinfo->v_front_porch, pinfo->v_sync_width);
+
+	total_hor = pinfo->h_active + pinfo->h_back_porch +
+		pinfo->h_front_porch + pinfo->h_sync_width;
+
+	total_ver = pinfo->v_active + pinfo->v_back_porch +
+			pinfo->v_front_porch + pinfo->v_sync_width;
+
+	data = total_ver;
+	data <<= 16;
+	data |= total_hor;
+
+	catalog->total = data;
+
+	data = (pinfo->v_back_porch + pinfo->v_sync_width);
+	data <<= 16;
+	data |= (pinfo->h_back_porch + pinfo->h_sync_width);
+
+	catalog->sync_start = data;
+
+	data = pinfo->v_sync_width;
+	data <<= 16;
+	data |= (pinfo->v_active_low << 31);
+	data |= pinfo->h_sync_width;
+	data |= (pinfo->h_active_low << 15);
+
+	catalog->width_blanking = data;
+
+	data = pinfo->v_active;
+	data <<= 16;
+	data |= pinfo->h_active;
+
+	catalog->dp_active = data;
+
+	panel->catalog->timing_cfg(catalog);
+end:
+	return rc;
+}
+
+static int dp_panel_edid_register(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	dp_panel->edid_ctrl = sde_edid_init();
+	if (!dp_panel->edid_ctrl) {
+		pr_err("sde edid init for DP failed\n");
+		rc = -ENOMEM;
+		goto end;
+	}
+end:
+	return rc;
+}
+
+static void dp_panel_edid_deregister(struct dp_panel *dp_panel)
+{
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	sde_edid_deinit((void **)&dp_panel->edid_ctrl);
+}
+
+static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+end:
+	return rc;
+}
+
+static u32 dp_panel_get_link_rate(struct dp_panel *dp_panel)
+{
+	const u32 encoding_factx10 = 8;
+	const u32 ln_to_link_ratio = 10;
+	u32 min_link_rate, reminder = 0;
+	u32 calc_link_rate = 0, lane_cnt, max_rate = 0;
+	struct dp_panel_private *panel;
+	struct dp_panel_info *pinfo;
+
+	if (!dp_panel) {
+		pr_err("invalid input\n");
+		goto end;
+	}
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	lane_cnt = dp_panel->dp_link.num_lanes;
+	max_rate = drm_dp_link_rate_to_bw_code(dp_panel->dp_link.rate);
+	pinfo = &dp_panel->pinfo;
+
+	/*
+	 * The max pixel clock supported is 675Mhz. The
+	 * current calculations below will make sure
+	 * the min_link_rate is within 32 bit limits.
+	 * Any changes in the section of code should
+	 * consider this limitation.
+	 */
+	min_link_rate = (u32)div_u64(pinfo->pixel_clk_khz * 1000,
+				(lane_cnt * encoding_factx10));
+	min_link_rate /= ln_to_link_ratio;
+	min_link_rate = (min_link_rate * pinfo->bpp);
+	min_link_rate = (u32)div_u64_rem(min_link_rate * 10,
+				DP_LINK_RATE_MULTIPLIER, &reminder);
+
+	/*
+	 * To avoid any fractional values,
+	 * increment the min_link_rate
+	 */
+	if (reminder)
+		min_link_rate += 1;
+	pr_debug("min_link_rate = %d\n", min_link_rate);
+
+	if (min_link_rate <= DP_LINK_BW_1_62)
+		calc_link_rate = DP_LINK_BW_1_62;
+	else if (min_link_rate <= DP_LINK_BW_2_7)
+		calc_link_rate = DP_LINK_BW_2_7;
+	else if (min_link_rate <= DP_LINK_BW_5_4)
+		calc_link_rate = DP_LINK_BW_5_4;
+	else if (min_link_rate <= DP_LINK_RATE_810)
+		calc_link_rate = DP_LINK_RATE_810;
+	else {
+		/* Cap the link rate to the max supported rate */
+		pr_debug("link_rate = %d is unsupported\n", min_link_rate);
+		calc_link_rate = DP_LINK_RATE_810;
+	}
+
+	if (calc_link_rate > max_rate)
+		calc_link_rate = max_rate;
+
+	pr_debug("calc_link_rate = 0x%x\n", calc_link_rate);
+end:
+	return calc_link_rate;
+}
+
+struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
+				struct dp_catalog_panel *catalog)
+{
+	int rc = 0;
+	struct dp_panel_private *panel;
+	struct dp_panel *dp_panel;
+
+	if (!dev || !aux || !catalog) {
+		pr_err("invalid input\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+	if (!panel) {
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	panel->dev = dev;
+	panel->aux = aux;
+	panel->catalog = catalog;
+
+	dp_panel = &panel->dp_panel;
+
+	dp_panel->sde_edid_register = dp_panel_edid_register;
+	dp_panel->sde_edid_deregister = dp_panel_edid_deregister;
+	dp_panel->init_info = dp_panel_init_panel_info;
+	dp_panel->timing_cfg = dp_panel_timing_cfg;
+	dp_panel->read_dpcd = dp_panel_read_dpcd;
+	dp_panel->get_link_rate = dp_panel_get_link_rate;
+
+	return dp_panel;
+error:
+	return ERR_PTR(rc);
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+	struct dp_panel_private *panel;
+
+	if (!dp_panel)
+		return;
+
+	panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+	devm_kfree(panel->dev, panel);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644
index 0000000..5852c70
--- /dev/null
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include "dp_aux.h"
+#include "sde_edid_parser.h"
+
+#define DP_LINK_RATE_810	30	/* 8.10G = 270M * 30 */
+
+struct dp_panel_info {
+	u32 h_active;
+	u32 v_active;
+	u32 h_back_porch;
+	u32 h_front_porch;
+	u32 h_sync_width;
+	u32 h_active_low;
+	u32 v_back_porch;
+	u32 v_front_porch;
+	u32 v_sync_width;
+	u32 v_active_low;
+	u32 h_skew;
+	u32 refresh_rate;
+	u32 pixel_clk_khz;
+	u32 bpp;
+};
+
+struct dp_panel {
+	/* dpcd raw data */
+	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+	struct drm_dp_link dp_link;
+
+	struct sde_edid_ctrl *edid_ctrl;
+	struct dp_panel_info pinfo;
+
+	u32 vic;
+
+	int (*sde_edid_register)(struct dp_panel *dp_panel);
+	void (*sde_edid_deregister)(struct dp_panel *dp_panel);
+	int (*init_info)(struct dp_panel *dp_panel);
+	int (*timing_cfg)(struct dp_panel *dp_panel);
+	int (*read_dpcd)(struct dp_panel *dp_panel);
+	u32 (*get_link_rate)(struct dp_panel *dp_panel);
+};
+
+struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux,
+				struct dp_catalog_panel *catalog);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
index 54d4a10..60c8966 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.c
+++ b/drivers/gpu/drm/msm/dp/dp_power.c
@@ -17,6 +17,8 @@
 #include <linux/clk.h>
 #include "dp_power.h"
 
+#define DP_CLIENT_NAME_SIZE	20
+
 struct dp_power_private {
 	struct dp_parser *parser;
 	struct platform_device *pdev;
@@ -24,6 +26,8 @@
 	struct clk *pixel_parent;
 
 	struct dp_power dp_power;
+	struct sde_power_client *dp_core_client;
+	struct sde_power_handle *phandle;
 
 	bool core_clks_on;
 	bool link_clks_on;
@@ -58,6 +62,25 @@
 	return rc;
 }
 
+static void dp_power_regulator_deinit(struct dp_power_private *power)
+{
+	int rc = 0, i = 0;
+	struct platform_device *pdev;
+	struct dp_parser *parser;
+
+	parser = power->parser;
+	pdev = power->pdev;
+
+	for (i = DP_CORE_PM; (i < DP_MAX_PM); i++) {
+		rc = msm_dss_config_vreg(&pdev->dev,
+			parser->mp[i].vreg_config,
+			parser->mp[i].num_vreg, 0);
+		if (rc)
+			pr_err("failed to deinit vregs for %s\n",
+				dp_parser_pm_name(i));
+	}
+}
+
 static int dp_power_regulator_ctrl(struct dp_power_private *power, bool enable)
 {
 	int rc = 0, i = 0, j = 0;
@@ -387,6 +410,67 @@
 	return 0;
 }
 
+static int dp_power_client_init(struct dp_power *dp_power,
+		struct sde_power_handle *phandle)
+{
+	int rc = 0;
+	struct dp_power_private *power;
+	char dp_client_name[DP_CLIENT_NAME_SIZE];
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		return -EINVAL;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	rc = dp_power_regulator_init(power);
+	if (rc) {
+		pr_err("failed to init regulators\n");
+		goto error_power;
+	}
+
+	rc = dp_power_clk_init(power, true);
+	if (rc) {
+		pr_err("failed to init clocks\n");
+		goto error_clk;
+	}
+
+	power->phandle = phandle;
+	snprintf(dp_client_name, DP_CLIENT_NAME_SIZE, "dp_core_client");
+	power->dp_core_client = sde_power_client_create(phandle,
+			dp_client_name);
+	if (IS_ERR_OR_NULL(power->dp_core_client)) {
+		pr_err("[%s] client creation failed for DP", dp_client_name);
+		rc = -EINVAL;
+		goto error_client;
+	}
+	return 0;
+
+error_client:
+	dp_power_clk_init(power, false);
+error_clk:
+	dp_power_regulator_deinit(power);
+error_power:
+	return rc;
+}
+
+static void dp_power_client_deinit(struct dp_power *dp_power)
+{
+	struct dp_power_private *power;
+
+	if (!dp_power) {
+		pr_err("invalid power data\n");
+		return;
+	}
+
+	power = container_of(dp_power, struct dp_power_private, dp_power);
+
+	sde_power_client_destroy(power->phandle, power->dp_core_client);
+	dp_power_clk_init(power, false);
+	dp_power_regulator_deinit(power);
+}
+
 static int dp_power_set_pixel_clk_parent(struct dp_power *dp_power)
 {
 	int rc = 0;
@@ -437,6 +521,13 @@
 		goto err_gpio;
 	}
 
+	rc = sde_power_resource_enable(power->phandle,
+		power->dp_core_client, true);
+	if (rc) {
+		pr_err("Power resource enable failed\n");
+		goto err_sde_power;
+	}
+
 	rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
 	if (rc) {
 		pr_err("failed to enable DP core clocks\n");
@@ -446,6 +537,8 @@
 	return 0;
 
 err_clk:
+	sde_power_resource_enable(power->phandle, power->dp_core_client, false);
+err_sde_power:
 	dp_power_config_gpios(power, flip, false);
 err_gpio:
 	dp_power_pinctrl_set(power, false);
@@ -469,6 +562,12 @@
 	power = container_of(dp_power, struct dp_power_private, dp_power);
 
 	dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+	rc = sde_power_resource_enable(power->phandle,
+			power->dp_core_client, false);
+	if (rc) {
+		pr_err("Power resource enable failed, rc=%d\n", rc);
+		goto exit;
+	}
 	dp_power_config_gpios(power, false, false);
 	dp_power_pinctrl_set(power, false);
 	dp_power_regulator_ctrl(power, false);
@@ -503,18 +602,8 @@
 	dp_power->deinit = dp_power_deinit;
 	dp_power->clk_enable = dp_power_clk_enable;
 	dp_power->set_pixel_clk_parent = dp_power_set_pixel_clk_parent;
-
-	rc = dp_power_regulator_init(power);
-	if (rc) {
-		pr_err("failed to init regulators\n");
-		goto error;
-	}
-
-	rc = dp_power_clk_init(power, true);
-	if (rc) {
-		pr_err("failed to init clocks\n");
-		goto error;
-	}
+	dp_power->power_client_init = dp_power_client_init;
+	dp_power->power_client_deinit = dp_power_client_deinit;
 
 	return dp_power;
 error:
@@ -526,6 +615,5 @@
 	struct dp_power_private *power = container_of(dp_power,
 			struct dp_power_private, dp_power);
 
-	(void)dp_power_clk_init(power, false);
 	devm_kfree(&power->pdev->dev, power);
 }
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
index 3d71695..e6e9900 100644
--- a/drivers/gpu/drm/msm/dp/dp_power.h
+++ b/drivers/gpu/drm/msm/dp/dp_power.h
@@ -16,6 +16,7 @@
 #define _DP_POWER_H_
 
 #include "dp_parser.h"
+#include "sde_power_handle.h"
 
 /**
  * sruct dp_power - DisplayPort's power related data
@@ -31,6 +32,9 @@
 	int (*clk_enable)(struct dp_power *power, enum dp_pm_type pm_type,
 				bool enable);
 	int (*set_pixel_clk_parent)(struct dp_power *power);
+	int (*power_client_init)(struct dp_power *power,
+				struct sde_power_handle *phandle);
+	void (*power_client_deinit)(struct dp_power *power);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
index 5dcdf46..2d7b174 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -142,7 +142,8 @@
 void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_mode_info *mode,
 				     u32 h_stride,
-				     u32 vc_id);
+				     u32 vc_id,
+				     struct dsi_rect *roi);
 void dsi_ctrl_hw_cmn_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
 void dsi_ctrl_hw_cmn_soft_reset(struct dsi_ctrl_hw *ctrl);
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index f187ad1..da7a7c0 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -879,14 +879,12 @@
 	int rc = 0;
 	struct mipi_dsi_packet packet;
 	struct dsi_ctrl_cmd_dma_fifo_info cmd;
+	struct dsi_ctrl_cmd_dma_info cmd_mem;
 	u32 hw_flags = 0;
 	u32 length = 0;
 	u8 *buffer = NULL;
-
-	if (!(flags & DSI_CTRL_CMD_FIFO_STORE)) {
-		pr_err("Memory DMA is not supported, use FIFO\n");
-		goto error;
-	}
+	u32 cnt = 0;
+	u8 *cmdbuf;
 
 	rc = mipi_dsi_create_packet(&packet, msg);
 	if (rc) {
@@ -894,7 +892,32 @@
 		goto error;
 	}
 
-	if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+	if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+		rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+				&packet,
+				&buffer,
+				&length);
+
+		if (rc) {
+			pr_err("[%s] failed to copy message, rc=%d\n",
+					dsi_ctrl->name, rc);
+			goto error;
+		}
+
+		cmd_mem.offset = dsi_ctrl->cmd_buffer_iova;
+		cmd_mem.length = length;
+		cmd_mem.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
+			true : false;
+		cmd_mem.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
+			true : false;
+		cmd_mem.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+			true : false;
+
+		cmdbuf = (u8 *)(dsi_ctrl->vaddr);
+		for (cnt = 0; cnt < length; cnt++)
+			cmdbuf[cnt] = buffer[cnt];
+
+	} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
 		rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
 					       &packet,
 					       &buffer,
@@ -920,10 +943,15 @@
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
 		reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
 
-	if (flags & DSI_CTRL_CMD_FIFO_STORE)
+	if (flags & DSI_CTRL_CMD_FETCH_MEMORY) {
+		dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw,
+						&cmd_mem,
+						hw_flags);
+	} else if (flags & DSI_CTRL_CMD_FIFO_STORE) {
 		dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
 						      &cmd,
 						      hw_flags);
+	}
 
 	if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
 		u32 retry = 10;
@@ -1558,7 +1586,6 @@
 
 int dsi_ctrl_setup(struct dsi_ctrl *dsi_ctrl)
 {
-	struct dsi_mode_info video_timing;
 	int rc = 0;
 
 	if (!dsi_ctrl) {
@@ -1568,12 +1595,6 @@
 
 	mutex_lock(&dsi_ctrl->ctrl_lock);
 
-	/* replace video mode width with actual roi width */
-	memcpy(&video_timing, &dsi_ctrl->host_config.video_timing,
-			sizeof(video_timing));
-	video_timing.h_active = dsi_ctrl->roi.w;
-	video_timing.v_active = dsi_ctrl->roi.h;
-
 	dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.lane_map);
 
@@ -1586,9 +1607,10 @@
 					&dsi_ctrl->host_config.u.cmd_engine);
 
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
-				&video_timing,
-				video_timing.h_active * 3,
-				0x0);
+				&dsi_ctrl->host_config.video_timing,
+				dsi_ctrl->host_config.video_timing.h_active * 3,
+				0x0,
+				&dsi_ctrl->roi);
 		dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
 	} else {
 		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
@@ -1690,7 +1712,8 @@
 		dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
 				&dsi_ctrl->host_config.video_timing,
 				dsi_ctrl->host_config.video_timing.h_active * 3,
-				0x0);
+				0x0,
+				NULL);
 	} else {
 		dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
 					&dsi_ctrl->host_config.common_config,
@@ -2176,14 +2199,14 @@
 }
 
 /**
-  * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
-  * @dsi_ctrl:		DSI controller handle.
-  * @enable:		enable/disable ULPS.
-  *
-  * ULPS can be enabled/disabled after DSI host engine is turned on.
-  *
-  * Return: error code.
-  */
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl:		DSI controller handle.
+ * @enable:		enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
 int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
 {
 	int rc = 0;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
index f89cb68..7f36fde 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -33,12 +33,15 @@
  * @DSI_CTRL_CMD_DEFER_TRIGGER:    Defer the command trigger to later.
  * @DSI_CTRL_CMD_FIFO_STORE:       Use FIFO for command transfer in place of
  *				   reading data from memory.
+ * @DSI_CTRL_CMD_FETCH_MEMORY:     Fetch command from memory through AXI bus
+ *				   and transfer it.
  */
 #define DSI_CTRL_CMD_READ             0x1
 #define DSI_CTRL_CMD_BROADCAST        0x2
 #define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
 #define DSI_CTRL_CMD_DEFER_TRIGGER    0x8
 #define DSI_CTRL_CMD_FIFO_STORE       0x10
+#define DSI_CTRL_CMD_FETCH_MEMORY     0x20
 
 /**
  * enum dsi_power_state - defines power states for dsi controller.
@@ -188,6 +191,8 @@
  * @roi:                 Partial update region of interest.
  *                       Origin is top left of this CTRL.
  * @tx_cmd_buf:          Tx command buffer.
+ * @cmd_buffer_iova:     cmd buffer mapped address.
+ * @vaddr:		 CPU virtual address of cmd buffer.
  * @cmd_buffer_size:     Size of command buffer.
  * @debugfs_root:        Root for debugfs entries.
  */
@@ -221,6 +226,8 @@
 	/* Command tx and rx */
 	struct drm_gem_object *tx_cmd_buf;
 	u32 cmd_buffer_size;
+	u32 cmd_buffer_iova;
+	void *vaddr;
 
 	/* Debug Information */
 	struct dentry *debugfs_root;
@@ -377,14 +384,14 @@
 int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
 
 /**
-  * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
-  * @dsi_ctrl:		DSI controller handle.
-  * @enable:		enable/disable ULPS.
-  *
-  * ULPS can be enabled/disabled after DSI host engine is turned on.
-  *
-  * Return: error code.
-  */
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl:		DSI controller handle.
+ * @enable:		enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
 int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
index 859d707..bb72807 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -319,7 +319,8 @@
 	void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
 				 struct dsi_mode_info *mode,
 				 u32 h_stride,
-				 u32 vc_id);
+				 u32 vc_id,
+				 struct dsi_rect *roi);
 
 	/**
 	 * ctrl_en() - enable DSI controller engine
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
index 48c2370..a024c43 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c
@@ -19,6 +19,7 @@
 #include "dsi_ctrl_hw.h"
 #include "dsi_ctrl_reg.h"
 #include "dsi_hw.h"
+#include "dsi_panel.h"
 
 #define MMSS_MISC_CLAMP_REG_OFF           0x0014
 #define DSI_CTRL_DYNAMIC_FORCE_ON         (0x23F|BIT(8)|BIT(9)|BIT(11)|BIT(21))
@@ -234,21 +235,36 @@
 void dsi_ctrl_hw_cmn_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
 				     struct dsi_mode_info *mode,
 				     u32 h_stride,
-				     u32 vc_id)
+				     u32 vc_id,
+				     struct dsi_rect *roi)
 {
-	u32 reg = 0;
 	u32 width_final, stride_final;
+	u32 height_final;
+	u32 stream_total = 0, stream_ctrl = 0;
+	u32 reg_ctrl = 0, reg_ctrl2 = 0;
+
+	if (roi && (!roi->w || !roi->h))
+		return;
 
 	if (mode->dsc_enabled && mode->dsc) {
+		u32 reg = 0;
 		u32 offset = 0;
-		u32 reg_ctrl, reg_ctrl2;
+		int pic_width, this_frame_slices, intf_ip_w;
+		struct msm_display_dsc_info dsc;
+
+		memcpy(&dsc, mode->dsc, sizeof(dsc));
+		pic_width = roi ? roi->w : mode->h_active;
+		this_frame_slices = pic_width / dsc.slice_width;
+		intf_ip_w = this_frame_slices * dsc.slice_width;
+		dsi_dsc_pclk_param_calc(&dsc, intf_ip_w);
 
 		if (vc_id != 0)
 			offset = 16;
 		reg_ctrl = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL);
 		reg_ctrl2 = DSI_R32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2);
-		width_final = mode->dsc->pclk_per_line;
-		stride_final = mode->dsc->bytes_per_pkt;
+		width_final = dsc.pclk_per_line;
+		stride_final = dsc.bytes_per_pkt;
+		height_final = roi ? roi->h : mode->v_active;
 
 		reg = 0x39 << 8;
 		/*
@@ -258,34 +274,45 @@
 		 * 2 == 4 pkt
 		 * 3 pkt is not support
 		 */
-		if (mode->dsc->pkt_per_line == 4)
-			reg |= (mode->dsc->pkt_per_line - 2) << 6;
+		if (dsc.pkt_per_line == 4)
+			reg |= (dsc.pkt_per_line - 2) << 6;
 		else
-			reg |= (mode->dsc->pkt_per_line - 1) << 6;
-		reg |= mode->dsc->eol_byte_num << 4;
+			reg |= (dsc.pkt_per_line - 1) << 6;
+		reg |= dsc.eol_byte_num << 4;
 		reg |= 1;
 
 		reg_ctrl &= ~(0xFFFF << offset);
 		reg_ctrl |= (reg << offset);
 		reg_ctrl2 &= ~(0xFFFF << offset);
-		reg_ctrl2 |= (mode->dsc->bytes_in_slice << offset);
+		reg_ctrl2 |= (dsc.bytes_in_slice << offset);
 		DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
 		DSI_W32(ctrl, DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
+
+		pr_debug("ctrl %d reg_ctrl 0x%x reg_ctrl2 0x%x\n", ctrl->index,
+				reg_ctrl, reg_ctrl2);
+	} else if (roi) {
+		width_final = roi->w;
+		stride_final = roi->w * 3;
+		height_final = roi->h;
 	} else {
 		width_final = mode->h_active;
 		stride_final = h_stride;
+		height_final = mode->v_active;
 	}
 
-	reg = (stride_final + 1) << 16;
-	reg |= (vc_id & 0x3) << 8;
-	reg |= 0x39; /* packet data type */
+	stream_ctrl = (stride_final + 1) << 16;
+	stream_ctrl |= (vc_id & 0x3) << 8;
+	stream_ctrl |= 0x39; /* packet data type */
 
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, reg);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, reg);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, stream_ctrl);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, stream_ctrl);
 
-	reg = (mode->v_active << 16) | width_final;
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, reg);
-	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, reg);
+	stream_total = (height_final << 16) | width_final;
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, stream_total);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, stream_total);
+
+	pr_debug("ctrl %d stream_ctrl 0x%x stream_total 0x%x\n", ctrl->index,
+			stream_ctrl, stream_total);
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index c2cf2cb..3dd4950 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -16,6 +16,7 @@
 
 #include <linux/list.h>
 #include <linux/of.h>
+#include <linux/err.h>
 
 #include "msm_drv.h"
 #include "dsi_display.h"
@@ -27,10 +28,14 @@
 #include "dsi_pwr.h"
 
 #define to_dsi_display(x) container_of(x, struct dsi_display, host)
+#define INT_BASE_10 10
 
 static DEFINE_MUTEX(dsi_display_list_lock);
 static LIST_HEAD(dsi_display_list);
-
+static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
+static char dsi_display_secondary[MAX_CMDLINE_PARAM_LEN];
+static struct dsi_display_boot_param boot_displays[MAX_DSI_ACTIVE_DISPLAY];
+static struct device_node *default_active_node;
 static const struct of_device_id dsi_display_dt_match[] = {
 	{.compatible = "qcom,dsi-display"},
 	{}
@@ -553,6 +558,184 @@
 	return rc;
 }
 
+static int dsi_display_parse_cmdline_topology(unsigned int display_type)
+{
+	char *str = NULL;
+	int top_index = -1;
+
+	if (display_type >= MAX_DSI_ACTIVE_DISPLAY) {
+		pr_err("display_type=%d not supported\n", display_type);
+		return -EINVAL;
+	}
+	if (display_type == DSI_PRIMARY)
+		str = strnstr(dsi_display_primary,
+			":config", strlen(dsi_display_primary));
+	else
+		str = strnstr(dsi_display_secondary,
+			":config", strlen(dsi_display_secondary));
+	if (!str)
+		return -EINVAL;
+
+	if (kstrtol(str + strlen(":config"), INT_BASE_10,
+				(unsigned long *)&top_index))
+		return -EINVAL;
+
+	return top_index;
+}
+
+/**
+ * dsi_display_name_compare()- compare whether DSI display name matches.
+ * @node:	Pointer to device node structure
+ * @display_name: Name of display to validate
+ *
+ * Return:	returns a bool specifying whether given display is active
+ */
+static bool dsi_display_name_compare(struct device_node *node,
+			const char *display_name, int index)
+{
+	if (index >= MAX_DSI_ACTIVE_DISPLAY) {
+		pr_err("Invalid Index\n");
+		return false;
+	}
+
+	if (boot_displays[index].boot_disp_en) {
+		if (!(strcmp(&boot_displays[index].name[0], display_name))) {
+			boot_displays[index].node = node;
+			return true;
+		}
+	}
+	return false;
+}
+
+/**
+ * dsi_display_parse_boot_display_selection()- Parse DSI boot display name
+ *
+ * Return:	returns error status
+ */
+static int dsi_display_parse_boot_display_selection(void)
+{
+	char *pos = NULL;
+	char disp_buf[MAX_CMDLINE_PARAM_LEN] = {'\0'};
+	int i, j, num_displays;
+
+	if (strlen(dsi_display_primary) == 0)
+		return -EINVAL;
+
+	if ((strlen(dsi_display_secondary) > 0))
+		num_displays = MAX_DSI_ACTIVE_DISPLAY;
+	else {
+		/*
+		 * Initialize secondary dsi variables
+		 * for the senario where dsi_display1
+		 * is null but dsi_display0 is valid
+		 */
+
+		/* Max number of displays will be one->only Primary */
+		num_displays = 1;
+		boot_displays[DSI_SECONDARY].is_primary = false;
+		boot_displays[DSI_SECONDARY].name[0] = '\0';
+	}
+
+	for (i = 0; i < num_displays; i++) {
+		boot_displays[i].is_primary = false;
+		if (i == DSI_PRIMARY) {
+			strlcpy(disp_buf, &dsi_display_primary[0],
+				sizeof(dsi_display_primary));
+			pos = strnstr(disp_buf, ":",
+				sizeof(dsi_display_primary));
+		} else {
+			strlcpy(disp_buf, &dsi_display_secondary[0],
+				sizeof(dsi_display_secondary));
+			pos = strnstr(disp_buf, ":",
+				sizeof(dsi_display_secondary));
+		}
+		/* Use ':' as a delimiter to retrieve the display name */
+		if (!pos) {
+			pr_debug("display name[%s]is not valid\n", disp_buf);
+			continue;
+		}
+
+		for (j = 0; (disp_buf + j) < pos; j++)
+			boot_displays[i].name[j] = *(disp_buf + j);
+		boot_displays[i].name[j] = '\0';
+
+		if (i == DSI_PRIMARY) {
+			boot_displays[i].is_primary = true;
+			/* Currently, secondary DSI display is not supported */
+			boot_displays[i].boot_disp_en = true;
+		}
+	}
+	return 0;
+}
+
+/**
+ * validate_dsi_display_selection()- validate boot DSI display selection
+ *
+ * Return:	returns true when both displays have unique configurations
+ */
+static bool validate_dsi_display_selection(void)
+{
+	int i, j;
+	int rc = 0;
+	int phy_count = 0;
+	int ctrl_count = 0;
+	int index = 0;
+	bool ctrl_flags[MAX_DSI_ACTIVE_DISPLAY] = {false, false};
+	bool phy_flags[MAX_DSI_ACTIVE_DISPLAY] = {false, false};
+	struct device_node *node, *ctrl_node, *phy_node;
+
+	for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
+		node = boot_displays[i].node;
+		ctrl_count = of_count_phandle_with_args(node, "qcom,dsi-ctrl",
+								NULL);
+
+		for (j = 0; j < ctrl_count; j++) {
+			ctrl_node = of_parse_phandle(node, "qcom,dsi-ctrl", j);
+			rc = of_property_read_u32(ctrl_node, "cell-index",
+					&index);
+			of_node_put(ctrl_node);
+			if (rc) {
+				pr_err("cell index not set for ctrl_nodes\n");
+				return false;
+			}
+			if (ctrl_flags[index])
+				return false;
+			ctrl_flags[index] = true;
+		}
+
+		phy_count = of_count_phandle_with_args(node, "qcom,dsi-phy",
+								NULL);
+		for (j = 0; j < phy_count; j++) {
+			phy_node = of_parse_phandle(node, "qcom,dsi-phy", j);
+			rc = of_property_read_u32(phy_node, "cell-index",
+					&index);
+			of_node_put(phy_node);
+			if (rc) {
+				pr_err("cell index not set phy_nodes\n");
+				return false;
+			}
+			if (phy_flags[index])
+				return false;
+			phy_flags[index] = true;
+		}
+	}
+	return true;
+}
+
+struct device_node *dsi_display_get_boot_display(int index)
+{
+
+	pr_err("index = %d\n", index);
+
+	if (boot_displays[index].node)
+		return boot_displays[index].node;
+	else if ((index == (MAX_DSI_ACTIVE_DISPLAY - 1))
+			&& (default_active_node))
+		return default_active_node;
+	else
+		return NULL;
+}
+
 static int dsi_display_phy_power_on(struct dsi_display *display)
 {
 	int rc = 0;
@@ -1004,9 +1187,9 @@
 	int i;
 
 	m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
-		   DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FIFO_STORE);
+		   DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FETCH_MEMORY);
 	flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
-		 DSI_CTRL_CMD_FIFO_STORE);
+		 DSI_CTRL_CMD_FETCH_MEMORY);
 
 	/*
 	 * 1. Setup commands in FIFO
@@ -1101,8 +1284,8 @@
 				 const struct mipi_dsi_msg *msg)
 {
 	struct dsi_display *display = to_dsi_display(host);
-
-	int rc = 0;
+	struct dsi_display_ctrl *display_ctrl;
+	int rc = 0, cnt = 0;
 
 	if (!host || !msg) {
 		pr_err("Invalid params\n");
@@ -1131,6 +1314,44 @@
 		goto error_disable_clks;
 	}
 
+	if (display->tx_cmd_buf == NULL) {
+		mutex_lock(&display->drm_dev->struct_mutex);
+		display->tx_cmd_buf = msm_gem_new(display->drm_dev,
+				SZ_4K,
+				MSM_BO_UNCACHED);
+		mutex_unlock(&display->drm_dev->struct_mutex);
+
+		display->cmd_buffer_size = SZ_4K;
+
+		if ((display->tx_cmd_buf) == NULL) {
+			pr_err("value of display->tx_cmd_buf is NULL");
+			goto error_disable_cmd_engine;
+		}
+		rc = msm_gem_get_iova(display->tx_cmd_buf, 0,
+					&(display->cmd_buffer_iova));
+		if (rc) {
+			pr_err("failed to get the iova rc %d\n", rc);
+			goto free_gem;
+		}
+
+		display->vaddr =
+			(void *) msm_gem_get_vaddr(display->tx_cmd_buf);
+
+		if (IS_ERR_OR_NULL(display->vaddr)) {
+			pr_err("failed to get va rc %d\n", rc);
+			rc = -EINVAL;
+			goto put_iova;
+		}
+
+		for (cnt = 0; cnt < display->ctrl_count; cnt++) {
+			display_ctrl = &display->ctrl[cnt];
+			display_ctrl->ctrl->cmd_buffer_size = SZ_4K;
+			display_ctrl->ctrl->cmd_buffer_iova =
+						display->cmd_buffer_iova;
+			display_ctrl->ctrl->vaddr = display->vaddr;
+		}
+	}
+
 	if (display->ctrl_count > 1 && !(msg->flags & MIPI_DSI_MSG_UNICAST)) {
 		rc = dsi_display_broadcast_cmd(display, msg);
 		if (rc) {
@@ -1143,22 +1364,28 @@
 				msg->ctrl : 0;
 
 		rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
-					  DSI_CTRL_CMD_FIFO_STORE);
+					  DSI_CTRL_CMD_FETCH_MEMORY);
 		if (rc) {
 			pr_err("[%s] cmd transfer failed, rc=%d\n",
 			       display->name, rc);
 			goto error_disable_cmd_engine;
 		}
 	}
+
 error_disable_cmd_engine:
 	(void)dsi_display_cmd_engine_disable(display);
 error_disable_clks:
 	rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
 			DSI_ALL_CLKS, DSI_CLK_OFF);
 	if (rc) {
-		pr_err("[%s] failed to enable all DSI clocks, rc=%d\n",
+		pr_err("[%s] failed to disable all DSI clocks, rc=%d\n",
 		       display->name, rc);
 	}
+	return rc;
+put_iova:
+	msm_gem_put_iova(display->tx_cmd_buf, 0);
+free_gem:
+	msm_gem_free_object(display->tx_cmd_buf);
 error:
 	return rc;
 }
@@ -1754,7 +1981,8 @@
 		}
 	}
 
-	display->panel = dsi_panel_get(&display->pdev->dev, display->panel_of);
+	display->panel = dsi_panel_get(&display->pdev->dev, display->panel_of,
+						display->cmdline_topology);
 	if (IS_ERR_OR_NULL(display->panel)) {
 		rc = PTR_ERR(display->panel);
 		pr_err("failed to get panel, rc=%d\n", rc);
@@ -2419,6 +2647,7 @@
 		goto error_panel_deinit;
 	}
 
+	pr_info("Successfully bind display panel '%s'\n", display->name);
 	display->drm_dev = drm;
 	goto error;
 
@@ -2516,6 +2745,9 @@
 {
 	int rc = 0;
 	struct dsi_display *display;
+	static bool display_from_cmdline, boot_displays_parsed;
+	static bool comp_add_success;
+	static struct device_node *primary_np, *secondary_np;
 
 	if (!pdev || !pdev->dev.of_node) {
 		pr_err("pdev not found\n");
@@ -2528,9 +2760,66 @@
 
 	display->name = of_get_property(pdev->dev.of_node, "label", NULL);
 
-	display->is_active = of_property_read_bool(pdev->dev.of_node,
-						"qcom,dsi-display-active");
+	if (!boot_displays_parsed) {
+		boot_displays[DSI_PRIMARY].boot_disp_en = false;
+		boot_displays[DSI_SECONDARY].boot_disp_en = false;
+		if (dsi_display_parse_boot_display_selection())
+			pr_debug("Display Boot param not valid/available\n");
 
+		boot_displays_parsed = true;
+	}
+
+	/* Initialize cmdline_topology to use default topology */
+	display->cmdline_topology = -1;
+	if ((!display_from_cmdline) &&
+			(boot_displays[DSI_PRIMARY].boot_disp_en)) {
+		display->is_active = dsi_display_name_compare(pdev->dev.of_node,
+						display->name, DSI_PRIMARY);
+		if (display->is_active) {
+			if (comp_add_success) {
+				(void)_dsi_display_dev_deinit(main_display);
+				component_del(&main_display->pdev->dev,
+					      &dsi_display_comp_ops);
+				comp_add_success = false;
+				default_active_node = NULL;
+				pr_debug("removed the existing comp ops\n");
+			}
+			/*
+			 * Need to add component for
+			 * the secondary DSI display
+			 * when more than one DSI display
+			 * is supported.
+			 */
+			pr_debug("cmdline primary dsi: %s\n",
+						display->name);
+			display_from_cmdline = true;
+			display->cmdline_topology =
+				dsi_display_parse_cmdline_topology(DSI_PRIMARY);
+			primary_np = pdev->dev.of_node;
+		}
+	}
+
+	if (boot_displays[DSI_SECONDARY].boot_disp_en) {
+		if (!secondary_np) {
+			if (dsi_display_name_compare(pdev->dev.of_node,
+				display->name, DSI_SECONDARY)) {
+				pr_debug("cmdline secondary dsi: %s\n",
+							display->name);
+				secondary_np = pdev->dev.of_node;
+				if (primary_np) {
+					if (validate_dsi_display_selection()) {
+					display->is_active = true;
+					display->cmdline_topology =
+					dsi_display_parse_cmdline_topology
+							(DSI_SECONDARY);
+					} else {
+						boot_displays[DSI_SECONDARY]
+							.boot_disp_en = false;
+					}
+				}
+			}
+		}
+	}
 	display->display_type = of_get_property(pdev->dev.of_node,
 						"qcom,display-type", NULL);
 	if (!display->display_type)
@@ -2543,6 +2832,10 @@
 	list_add(&display->list, &dsi_display_list);
 	mutex_unlock(&dsi_display_list_lock);
 
+	if (!display_from_cmdline)
+		display->is_active = of_property_read_bool(pdev->dev.of_node,
+						"qcom,dsi-display-active");
+
 	if (display->is_active) {
 		main_display = display;
 		rc = _dsi_display_dev_init(display);
@@ -2554,6 +2847,11 @@
 		rc = component_add(&pdev->dev, &dsi_display_comp_ops);
 		if (rc)
 			pr_err("component add failed, rc=%d\n", rc);
+
+		comp_add_success = true;
+		pr_debug("Component_add success: %s\n", display->name);
+		if (!display_from_cmdline)
+			default_active_node = pdev->dev.of_node;
 	}
 	return rc;
 }
@@ -2736,6 +3034,7 @@
 		goto error;
 	}
 
+	memset(info, 0, sizeof(struct msm_display_info));
 	info->intf_type = DRM_MODE_CONNECTOR_DSI;
 	timing = &display->panel->mode.timing;
 
@@ -3405,6 +3704,13 @@
 	dsi_ctrl_drv_unregister();
 	dsi_phy_drv_unregister();
 }
-
+module_param_string(dsi_display0, dsi_display_primary, MAX_CMDLINE_PARAM_LEN,
+								0600);
+MODULE_PARM_DESC(dsi_display0,
+	"msm_drm.dsi_display0=<display node>:<configX> where <display node> is 'primary dsi display node name' and <configX> where x represents index in the topology list");
+module_param_string(dsi_display1, dsi_display_secondary, MAX_CMDLINE_PARAM_LEN,
+								0600);
+MODULE_PARM_DESC(dsi_display1,
+	"msm_drm.dsi_display1=<display node>:<configX> where <display node> is 'secondary dsi display node name' and <configX> where x represents index in the topology list");
 module_init(dsi_display_register);
 module_exit(dsi_display_unregister);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index d2bc7d8..9aa3113 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -30,6 +30,7 @@
 
 #define MAX_DSI_CTRLS_PER_DISPLAY             2
 #define DSI_CLIENT_NAME_SIZE		20
+#define MAX_CMDLINE_PARAM_LEN	 512
 /*
  * DSI Validate Mode modifiers
  * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
@@ -37,6 +38,18 @@
 #define DSI_VALIDATE_FLAG_ALLOW_ADJUST	0x1
 
 /**
+ * enum dsi_display_selection_type - enumerates DSI display selection types
+ * @DSI_PRIMARY:    primary DSI display selected from module parameter
+ * @DSI_SECONDARY:  Secondary DSI display selected from module parameter
+ * @MAX_DSI_ACTIVE_DISPLAY: Maximum acive displays that can be selected
+ */
+enum dsi_display_selection_type {
+	DSI_PRIMARY = 0,
+	DSI_SECONDARY,
+	MAX_DSI_ACTIVE_DISPLAY,
+};
+
+/**
  * enum dsi_display_type - enumerates DSI display types
  * @DSI_DISPLAY_SINGLE:       A panel connected on a single DSI interface.
  * @DSI_DISPLAY_EXT_BRIDGE:   A bridge is connected between panel and DSI host.
@@ -78,6 +91,22 @@
 
 	bool phy_enabled;
 };
+/**
+ * struct dsi_display_boot_param - defines DSI boot display selection
+ * @name:Name of DSI display selected as a boot param.
+ * @boot_disp_en:bool to indicate dtsi availability of display node
+ * @is_primary:bool to indicate whether current display is primary display
+ * @length:length of DSI display.
+ * @cmdline_topology: Display topology shared from kernel command line.
+ */
+struct dsi_display_boot_param {
+	char name[MAX_CMDLINE_PARAM_LEN];
+	bool boot_disp_en;
+	bool is_primary;
+	int length;
+	struct device_node *node;
+	int cmdline_topology;
+};
 
 /**
  * struct dsi_display_clk_info - dsi display clock source information
@@ -113,6 +142,7 @@
  * @config:           DSI host configuration information.
  * @lane_map:         Lane mapping between DSI host and Panel.
  * @num_of_modes:     Number of modes supported by display.
+ * @cmdline_topology: Display topology shared from kernel command line.
  * @is_tpg_enabled:   TPG state.
  * @ulps_enabled:     ulps state.
  * @clamp_enabled:    clamp state.
@@ -151,10 +181,15 @@
 	struct dsi_host_config config;
 	struct dsi_lane_map lane_map;
 	u32 num_of_modes;
+	int cmdline_topology;
 	bool is_tpg_enabled;
 	bool ulps_enabled;
 	bool clamp_enabled;
 	bool phy_idle_power_off;
+	struct drm_gem_object *tx_cmd_buf;
+	u32 cmd_buffer_size;
+	u32 cmd_buffer_iova;
+	void *vaddr;
 
 	struct mipi_dsi_host host;
 	struct dsi_bridge    *bridge;
@@ -189,8 +224,16 @@
 		u32 max_display_count);
 
 /**
+  * dsi_display_get_boot_display()- get DSI boot display name
+  * @index:	index of display selection
+  *
+  * Return:	returns the display node pointer
+  */
+struct device_node *dsi_display_get_boot_display(int index);
+
+/**
  * dsi_display_get_display_by_name()- finds display by name
- * @index:      name of the display.
+ * @name:	name of the display.
  *
  * Return: handle to the display or error code.
  */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 37ed411..4e09cfb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -20,6 +20,7 @@
 #include "msm_kms.h"
 #include "sde_connector.h"
 #include "dsi_drm.h"
+#include "sde_trace.h"
 
 #define to_dsi_bridge(x)     container_of((x), struct dsi_bridge, base)
 #define to_dsi_state(x)      container_of((x), struct dsi_connector_state, base)
@@ -134,19 +135,24 @@
 		return;
 	}
 
+	SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
 	rc = dsi_display_prepare(c_bridge->display);
 	if (rc) {
 		pr_err("[%d] DSI display prepare failed, rc=%d\n",
 		       c_bridge->id, rc);
+		SDE_ATRACE_END("dsi_bridge_pre_enable");
 		return;
 	}
 
+	SDE_ATRACE_BEGIN("dsi_display_enable");
 	rc = dsi_display_enable(c_bridge->display);
 	if (rc) {
 		pr_err("[%d] DSI display enable failed, rc=%d\n",
 		       c_bridge->id, rc);
 		(void)dsi_display_unprepare(c_bridge->display);
 	}
+	SDE_ATRACE_END("dsi_display_enable");
+	SDE_ATRACE_END("dsi_bridge_pre_enable");
 }
 
 static void dsi_bridge_enable(struct drm_bridge *bridge)
@@ -197,19 +203,25 @@
 		return;
 	}
 
+	SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
+	SDE_ATRACE_BEGIN("dsi_display_disable");
 	rc = dsi_display_disable(c_bridge->display);
 	if (rc) {
 		pr_err("[%d] DSI display disable failed, rc=%d\n",
 		       c_bridge->id, rc);
+		SDE_ATRACE_END("dsi_display_disable");
 		return;
 	}
+	SDE_ATRACE_END("dsi_display_disable");
 
 	rc = dsi_display_unprepare(c_bridge->display);
 	if (rc) {
 		pr_err("[%d] DSI display unprepare failed, rc=%d\n",
 		       c_bridge->id, rc);
+		SDE_ATRACE_END("dsi_bridge_post_disable");
 		return;
 	}
+	SDE_ATRACE_END("dsi_bridge_post_disable");
 }
 
 static void dsi_bridge_mode_set(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index dcb787b..b8bf7a8 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -22,9 +22,6 @@
 #include "dsi_panel.h"
 #include "dsi_ctrl_hw.h"
 
-#define MAX_CMDLINE_PARAM_LEN 256
-static char display_config[MAX_CMDLINE_PARAM_LEN];
-
 /**
  * topology is currently defined by a set of following 3 values:
  * 1. num of layer mixers
@@ -32,7 +29,6 @@
  * 3. num of interfaces
  */
 #define TOPOLOGY_SET_LEN 3
-#define INT_BASE_10 10
 #define MAX_TOPOLOGY 5
 
 #define DSI_PANEL_DEFAULT_LABEL  "Default dsi panel"
@@ -2078,31 +2074,9 @@
 	return 0;
 }
 
-static int dsi_get_cmdline_top_override(void)
-{
-	char *str = display_config;
-	int top_index = -1;
-
-	/*
-	 * This module need to be updated with needed cmd line argument parsing
-	 * for other dsi parameters.
-	 */
-	if (strlcat(str, "\0", sizeof(str)) > sizeof(str))
-		return -EINVAL;
-
-	str = strnstr(display_config, "config", strlen(display_config));
-	if (!str)
-		return -EINVAL;
-
-	if (kstrtol(str + strlen("config"), INT_BASE_10,
-				(unsigned long *)&top_index))
-		return -EINVAL;
-
-	return top_index;
-}
-
 static int dsi_panel_parse_topology(struct dsi_panel *panel,
-		struct device_node *of_node)
+				struct device_node *of_node,
+				int topology_override)
 {
 	struct msm_display_topology *topology;
 	u32 top_count, top_sel, *array = NULL;
@@ -2143,12 +2117,13 @@
 		top->num_intf = array[i * TOPOLOGY_SET_LEN + 2];
 	};
 
-	top_sel = dsi_get_cmdline_top_override();
-	if (top_sel >= 0 && top_sel < top_count) {
-		pr_info("overidden topology: lm: %d comp_enc:%d intf: %d\n",
-			topology[top_sel].num_lm,
-			topology[top_sel].num_enc,
-			topology[top_sel].num_intf);
+	if (topology_override >= 0 && topology_override < top_count) {
+		pr_info("override topology: cfg:%d lm:%d comp_enc:%d intf:%d\n",
+			topology_override,
+			topology[topology_override].num_lm,
+			topology[topology_override].num_enc,
+			topology[topology_override].num_intf);
+		top_sel = topology_override;
 		goto parse_done;
 	}
 
@@ -2266,7 +2241,8 @@
 }
 
 struct dsi_panel *dsi_panel_get(struct device *parent,
-				struct device_node *of_node)
+				struct device_node *of_node,
+				int topology_override)
 {
 	struct dsi_panel *panel;
 	const char *data;
@@ -2323,7 +2299,7 @@
 				    DSI_V_TOTAL(&panel->mode.timing) *
 				    panel->mode.timing.refresh_rate) / 1000;
 
-	rc = dsi_panel_parse_topology(panel, of_node);
+	rc = dsi_panel_parse_topology(panel, of_node, topology_override);
 	if (rc) {
 		pr_err("failed to parse panel topology, rc=%d\n", rc);
 		goto error;
@@ -2970,6 +2946,3 @@
 	mutex_unlock(&panel->panel_lock);
 	return rc;
 }
-
-module_param_string(display_param, display_config, MAX_CMDLINE_PARAM_LEN, 0600);
-MODULE_PARM_DESC(display_param, "format: configx - x indexes the selected topology from the display topology list. Index 0 corresponds to the first topology in the list");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 4c9fbbe..3569b5b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -205,7 +205,8 @@
 }
 
 struct dsi_panel *dsi_panel_get(struct device *parent,
-				struct device_node *of_node);
+				struct device_node *of_node,
+				int topology_override);
 void dsi_panel_put(struct dsi_panel *panel);
 
 int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
@@ -249,4 +250,6 @@
 int dsi_panel_send_roi_dcs(struct dsi_panel *panel, int ctrl_idx,
 		struct dsi_rect *roi);
 
+void dsi_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc, int intf_width);
+
 #endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index ff6802e..efeea31 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -20,6 +20,7 @@
 #include "msm_kms.h"
 #include "msm_gem.h"
 #include "msm_fence.h"
+#include "sde_trace.h"
 
 struct msm_commit {
 	struct drm_device *dev;
@@ -96,6 +97,7 @@
 	struct drm_crtc_state *old_crtc_state;
 	int i;
 
+	SDE_ATRACE_BEGIN("msm_disable");
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
 		const struct drm_encoder_helper_funcs *funcs;
 		struct drm_encoder *encoder;
@@ -177,6 +179,7 @@
 		else
 			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 	}
+	SDE_ATRACE_END("msm_disable");
 }
 
 static void
@@ -286,6 +289,7 @@
 	int bridge_enable_count = 0;
 	int i;
 
+	SDE_ATRACE_BEGIN("msm_enable");
 	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
 		const struct drm_crtc_helper_funcs *funcs;
 
@@ -352,8 +356,10 @@
 	}
 
 	/* If no bridges were pre_enabled, skip iterating over them again */
-	if (bridge_enable_count == 0)
+	if (bridge_enable_count == 0) {
+		SDE_ATRACE_END("msm_enable");
 		return;
+	}
 
 	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
 		struct drm_encoder *encoder;
@@ -373,6 +379,7 @@
 
 		drm_bridge_enable(encoder->bridge);
 	}
+	SDE_ATRACE_END("msm_enable");
 }
 
 /* The (potentially) asynchronous part of the commit.  At this point
@@ -430,7 +437,9 @@
 
 	commit = container_of(work, struct msm_commit, commit_work);
 
+	SDE_ATRACE_BEGIN("complete_commit");
 	complete_commit(commit);
+	SDE_ATRACE_END("complete_commit");
 }
 
 static struct msm_commit *commit_init(struct drm_atomic_state *state)
@@ -512,9 +521,12 @@
 	struct drm_plane_state *plane_state;
 	int i, ret;
 
+	SDE_ATRACE_BEGIN("atomic_commit");
 	ret = drm_atomic_helper_prepare_planes(dev, state);
-	if (ret)
+	if (ret) {
+		SDE_ATRACE_END("atomic_commit");
 		return ret;
+	}
 
 	c = commit_init(state);
 	if (!c) {
@@ -592,14 +604,17 @@
 			commit_destroy(c);
 			goto error;
 		}
+		SDE_ATRACE_END("atomic_commit");
 		return 0;
 	}
 
 	complete_commit(c);
 
+	SDE_ATRACE_END("atomic_commit");
 	return 0;
 
 error:
 	drm_atomic_helper_cleanup_planes(dev, state);
+	SDE_ATRACE_END("atomic_commit");
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a3a9142..962087c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -45,6 +45,7 @@
 #include "msm_gpu.h"
 #include "msm_kms.h"
 #include "sde_wb.h"
+#include "dsi_display.h"
 
 /*
  * MSM driver version:
@@ -58,12 +59,68 @@
 
 #define TEARDOWN_DEADLOCK_RETRY_MAX 5
 
+static void msm_drm_helper_hotplug_event(struct drm_device *dev)
+{
+	struct drm_connector *connector;
+	char *event_string;
+	char const *connector_name;
+	char *envp[2];
+
+	if (!dev) {
+		DRM_ERROR("hotplug_event failed, invalid input\n");
+		return;
+	}
+
+	if (!dev->mode_config.poll_enabled)
+		return;
+
+	event_string = kzalloc(SZ_4K, GFP_KERNEL);
+	if (!event_string) {
+		DRM_ERROR("failed to allocate event string\n");
+		return;
+	}
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_for_each_connector(connector, dev) {
+		/* Only handle HPD capable connectors. */
+		if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+			continue;
+
+		connector->status = connector->funcs->detect(connector, false);
+
+		if (connector->name)
+			connector_name = connector->name;
+		else
+			connector_name = "unknown";
+
+		snprintf(event_string, SZ_4K, "name=%s status=%s\n",
+			connector_name,
+			drm_get_connector_status_name(connector->status));
+		DRM_DEBUG("generating hotplug event [%s]\n", event_string);
+		envp[0] = event_string;
+		envp[1] = NULL;
+		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+				envp);
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+	kfree(event_string);
+}
+
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
-	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_private *priv = NULL;
+
+	if (!dev) {
+		DRM_ERROR("output_poll_changed failed, invalid input\n");
+		return;
+	}
+
+	priv = dev->dev_private;
 
 	if (priv->fbdev)
 		drm_fb_helper_hotplug_event(priv->fbdev);
+	else
+		msm_drm_helper_hotplug_event(dev);
 }
 
 int msm_atomic_check(struct drm_device *dev,
@@ -1764,15 +1821,26 @@
 				  struct component_match **matchptr)
 {
 	struct device *mdp_dev = NULL;
+	struct device_node *node;
+	const char *name;
 	int ret;
 
 	if (of_device_is_compatible(dev->of_node, "qcom,sde-kms")) {
 		struct device_node *np = dev->of_node;
 		unsigned int i;
 
-		for (i = 0; ; i++) {
-			struct device_node *node;
+		for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
+			node = dsi_display_get_boot_display(i);
 
+			if (node != NULL) {
+				name = of_get_property(node, "label", NULL);
+				component_match_add(dev, matchptr, compare_of,
+						node);
+				pr_debug("Added component = %s\n", name);
+			}
+		}
+
+		for (i = 0; ; i++) {
 			node = of_parse_phandle(np, "connectors", i);
 			if (!node)
 				break;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index f596989..2cd9aa1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -110,6 +110,8 @@
 	PLANE_PROP_ROT_DST_Y,
 	PLANE_PROP_ROT_DST_W,
 	PLANE_PROP_ROT_DST_H,
+	PLANE_PROP_PREFILL_SIZE,
+	PLANE_PROP_PREFILL_TIME,
 
 	/* enum/bitmask properties */
 	PLANE_PROP_ROTATION,
diff --git a/drivers/gpu/drm/msm/msm_evtlog.c b/drivers/gpu/drm/msm/msm_evtlog.c
deleted file mode 100644
index dbe9b88..0000000
--- a/drivers/gpu/drm/msm/msm_evtlog.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)	"msm_evtlog:[%s] " fmt, __func__
-
-#include "msm_evtlog.h"
-#include <linux/slab.h>
-#include <linux/sched.h>
-#include <asm-generic/current.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-
-#include "sde_trace.h"
-
-#define SIZE_MASK(x) (x - 1)
-
-static int msm_evtlog_debugfs_dump(struct seq_file *s, void *data)
-{
-	struct msm_evtlog *log = s->private;
-	unsigned long cnt;	/* # of samples since clear */
-	unsigned long n;	/* # of samples to print, also head index */
-	unsigned long i;
-	struct timespec timespec;
-
-	/**
-	 * Prints in chronological order, oldest -> newest
-	 * Note due to lock-less design, the first few printed entries
-	 * may be corrupted by new writer not oldest.
-	 * This is a tradeoff for speed of sampling
-	 */
-	cnt = atomic_read(&log->cnt);
-	if (!cnt)
-		return 0;
-
-	n = cnt & SIZE_MASK(log->size);
-
-	/**
-	 * If not full, print from first log
-	 * (which is index 1 since atomic_inc_return is prefix operator)
-	 */
-	i = (cnt < log->size) ? 0 : n;
-
-	seq_puts(s, "time_ns, pid, func, line, val1, val2, msg\n");
-	do {
-		i = (i + 1) & SIZE_MASK(log->size);
-		timespec = ktime_to_timespec(log->events[i].ktime);
-		seq_printf(s, "[%5lu.%06lu], %d, %s, %d, %llu, %llu, %s\n",
-				timespec.tv_sec,
-				timespec.tv_nsec / 1000,
-				log->events[i].pid,
-				log->events[i].func,
-				log->events[i].line,
-				log->events[i].val1,
-				log->events[i].val2,
-				log->events[i].msg);
-	} while (i != n);
-
-	return 0;
-}
-
-static int msm_evtlog_debugfs_open_dump(struct inode *inode, struct file *file)
-{
-	return single_open(file, msm_evtlog_debugfs_dump, inode->i_private);
-}
-
-static ssize_t msm_evtlog_debugfs_write(
-		struct file *file,
-		const char __user *user_buf,
-		size_t size,
-		loff_t *ppos)
-{
-	struct seq_file *s = file->private_data;
-	struct msm_evtlog *log = s->private;
-	char buf[64];
-	int buf_size;
-
-	buf_size = min(size, (sizeof(buf) - 1));
-	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
-		return -EFAULT;
-	buf[buf_size] = 0;
-
-	if (strcmp(buf, "0") == 0)
-		atomic_set(&log->cnt, 0);
-
-	return size;
-
-}
-
-static const struct file_operations msm_evtlog_fops = {
-	.open =		msm_evtlog_debugfs_open_dump,
-	.read =		seq_read,
-	.write =	msm_evtlog_debugfs_write,
-	.llseek =	seq_lseek,
-	.release =	single_release,
-};
-
-int msm_evtlog_init(
-		struct msm_evtlog *log,
-		int size,
-		struct dentry *parent)
-{
-	if (!log || size < 1) {
-		pr_err("Invalid params\n");
-		return -EINVAL;
-	}
-
-	memset(log, 0, sizeof(*log));
-	log->size = roundup_pow_of_two(size);
-	log->events = kcalloc(log->size, sizeof(struct msm_evtlog_evt),
-			GFP_KERNEL);
-
-	if (!log->events) {
-		pr_err("Insufficient memory\n");
-		return -ENOMEM;
-	}
-
-	atomic_set(&log->cnt, 0);
-
-	log->dentry = debugfs_create_file("evtlog", 0644, parent,
-			log, &msm_evtlog_fops);
-
-	if (IS_ERR_OR_NULL(log->dentry)) {
-		int rc = PTR_ERR(log->dentry);
-
-		pr_err("debugfs create file failed, rc=%d\n", rc);
-		kfree(log->events);
-		return rc;
-	}
-
-	return 0;
-}
-
-void msm_evtlog_destroy(struct msm_evtlog *log)
-{
-	debugfs_remove(log->dentry);
-
-	/* Caller needs to make sure that log sampling has stopped */
-	kfree(log->events);
-
-}
-
-void msm_evtlog_sample(
-		struct msm_evtlog *log,
-		const char *func,
-		const char *msg,
-		uint64_t val1,
-		uint64_t val2,
-		uint32_t line)
-{
-	unsigned long i;
-
-	/**
-	 * Since array sized with pow of 2, roll to 0 when cnt overflows
-	 * mod the value with the size to get current idx into array
-	 */
-	i = (unsigned long)(atomic_inc_return(&log->cnt)) &
-			SIZE_MASK(log->size);
-	log->events[i].ktime = ktime_get();
-	log->events[i].func = func;
-	log->events[i].msg = msg;
-	log->events[i].val1 = val1;
-	log->events[i].val2 = val2;
-	log->events[i].line = line;
-	log->events[i].pid = current->pid;
-
-	trace_sde_evtlog(func, line, val1, val2);
-}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3061099..acd7af5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -341,7 +341,8 @@
 			if (obj->import_attach && mmu->funcs->map_dma_buf) {
 				ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
 						obj->import_attach->dmabuf,
-						DMA_BIDIRECTIONAL);
+						DMA_BIDIRECTIONAL,
+						msm_obj->flags);
 				if (ret) {
 					DRM_ERROR("Unable to map dma buf\n");
 					return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2cf170d..19c7726 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -23,6 +23,7 @@
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
+#define MSM_BO_KEEPATTRS     0x20000000     /* keep h/w bus attributes */
 
 struct msm_gem_object {
 	struct drm_gem_object base;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index ee93339..fbf7e7b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -43,7 +43,7 @@
 	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
 		enum dma_data_direction dir);
 	int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
-			struct dma_buf *dma_buf, int dir);
+			struct dma_buf *dma_buf, int dir, u32 flags);
 	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
 			struct dma_buf *dma_buf, int dir);
 	void (*destroy)(struct msm_mmu *mmu);
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index c279d01..4d45898 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -25,6 +25,7 @@
 #include <soc/qcom/secure_buffer.h>
 
 #include "msm_drv.h"
+#include "msm_gem.h"
 #include "msm_mmu.h"
 
 #ifndef SZ_4G
@@ -220,14 +221,18 @@
 }
 
 static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
-			struct dma_buf *dma_buf, int dir)
+			struct dma_buf *dma_buf, int dir, u32 flags)
 {
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+	unsigned long attrs = 0x0;
 	int ret;
 
-	ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
-			dma_buf);
+	if (flags & MSM_BO_KEEPATTRS)
+		attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+	ret = msm_dma_map_sg_attrs(client->dev, sgt->sgl, sgt->nents, dir,
+			dma_buf, attrs);
 	if (ret != sgt->nents) {
 		DRM_ERROR("dma map sg failed\n");
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 71e64e4..497d0db 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -282,7 +282,7 @@
  * Returns: Pointer to associated private display structure
  */
 #define sde_connector_get_panel(C) \
-	((C) ? to_sde_connector((C))->panel : 0)
+	((C) ? to_sde_connector((C))->panel : NULL)
 
 /**
  * sde_connector_get_encoder - get sde connector's private encoder pointer
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index 5adef2d..1b40161 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -428,6 +428,103 @@
 	sde_kms->irq_obj.total_irqs = 0;
 }
 
+static void sde_core_irq_mask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static void sde_core_irq_unmask(struct irq_data *irqd)
+{
+	struct sde_kms *sde_kms;
+
+	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+		SDE_ERROR("invalid parameters irqd %d\n", irqd != NULL);
+		return;
+	}
+	sde_kms = irq_data_get_irq_chip_data(irqd);
+
+	/* memory barrier */
+	smp_mb__before_atomic();
+	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+	/* memory barrier */
+	smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_core_irq_chip = {
+	.name = "sde",
+	.irq_mask = sde_core_irq_mask,
+	.irq_unmask = sde_core_irq_unmask,
+};
+
+static int sde_core_irqdomain_map(struct irq_domain *domain,
+		unsigned int irq, irq_hw_number_t hwirq)
+{
+	struct sde_kms *sde_kms;
+	int rc;
+
+	if (!domain || !domain->host_data) {
+		SDE_ERROR("invalid parameters domain %d\n", domain != NULL);
+		return -EINVAL;
+	}
+	sde_kms = domain->host_data;
+
+	irq_set_chip_and_handler(irq, &sde_core_irq_chip, handle_level_irq);
+	rc = irq_set_chip_data(irq, sde_kms);
+
+	return rc;
+}
+
+static const struct irq_domain_ops sde_core_irqdomain_ops = {
+	.map = sde_core_irqdomain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+	struct device *dev;
+	struct irq_domain *domain;
+
+	if (!sde_kms->dev || !sde_kms->dev->dev) {
+		pr_err("invalid device handles\n");
+		return -EINVAL;
+	}
+
+	dev = sde_kms->dev->dev;
+
+	domain = irq_domain_add_linear(dev->of_node, 32,
+			&sde_core_irqdomain_ops, sde_kms);
+	if (!domain) {
+		pr_err("failed to add irq_domain\n");
+		return -EINVAL;
+	}
+
+	sde_kms->irq_controller.enabled_mask = 0;
+	sde_kms->irq_controller.domain = domain;
+
+	return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+	if (sde_kms->irq_controller.domain) {
+		irq_domain_remove(sde_kms->irq_controller.domain);
+		sde_kms->irq_controller.domain = NULL;
+	}
+	return 0;
+}
+
 irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
 {
 	/*
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 64f4160..c775f8c 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -38,6 +38,20 @@
 void sde_core_irq_uninstall(struct sde_kms *sde_kms);
 
 /**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms:		SDE handle
+ * @return:		none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms:		SDE handle
+ * @return:		0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
  * sde_core_irq - core IRQ handler
  * @sde_kms:		SDE handle
  * @return:		interrupt handling status
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 7671649..fd79016 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -42,6 +42,23 @@
 	SDE_PERF_MODE_MAX
 };
 
+/**
+ * enum sde_perf_vote_mode: perf vote mode.
+ * @APPS_RSC_MODE:	It combines the vote for all displays and votes it
+ *                      through APPS rsc. This is default mode when display
+ *                      rsc is not available.
+ * @DISP_RSC_MODE:	It combines the vote for all displays and votes it
+ *                      through display rsc. This is default configuration
+ *                      when display rsc is available.
+ * @DISP_RSC_PRIMARY_MODE:	The primary display votes through display rsc
+ *                      while all other displays votes through apps rsc.
+ */
+enum sde_perf_vote_mode {
+	APPS_RSC_MODE,
+	DISP_RSC_MODE,
+	DISP_RSC_PRIMARY_MODE,
+};
+
 static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -87,13 +104,14 @@
 	return intf_connected;
 }
 
-static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
+		struct drm_crtc *crtc,
 		struct drm_crtc_state *state,
 		struct sde_core_perf_params *perf)
 {
 	struct sde_crtc_state *sde_cstate;
 
-	if (!crtc || !state || !perf) {
+	if (!kms || !kms->catalog || !crtc || !state || !perf) {
 		SDE_ERROR("invalid parameters\n");
 		return;
 	}
@@ -107,6 +125,20 @@
 	perf->core_clk_rate =
 			sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
 
+	if (!sde_cstate->bw_control) {
+		perf->bw_ctl = kms->catalog->perf.max_bw_high * 1000ULL;
+		perf->max_per_pipe_ib = perf->bw_ctl;
+		perf->core_clk_rate = kms->perf.max_core_clk_rate;
+	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) {
+		perf->bw_ctl = 0;
+		perf->max_per_pipe_ib = 0;
+		perf->core_clk_rate = 0;
+	} else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) {
+		perf->bw_ctl = kms->perf.fix_core_ab_vote;
+		perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
+		perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+	}
+
 	SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n",
 			crtc->base.id, perf->core_clk_rate,
 			perf->max_per_pipe_ib, perf->bw_ctl);
@@ -140,9 +172,8 @@
 
 	sde_cstate = to_sde_crtc_state(state);
 
-	/* swap state and obtain new values */
-	sde_cstate->cur_perf = sde_cstate->new_perf;
-	_sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+	/* obtain new values */
+	_sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf);
 
 	bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
 	curr_client_type = sde_crtc_get_client_type(crtc);
@@ -169,12 +200,12 @@
 
 	SDE_DEBUG("final threshold bw limit = %d\n", threshold);
 
-	if (!threshold) {
-		sde_cstate->new_perf = sde_cstate->cur_perf;
+	if (!sde_cstate->bw_control) {
+		SDE_DEBUG("bypass bandwidth check\n");
+	} else if (!threshold) {
 		SDE_ERROR("no bandwidth limits specified\n");
 		return -E2BIG;
 	} else if (bw > threshold) {
-		sde_cstate->new_perf = sde_cstate->cur_perf;
 		SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
 		return -E2BIG;
 	}
@@ -182,12 +213,38 @@
 	return 0;
 }
 
+static inline bool _is_crtc_client_type_matches(struct drm_crtc *tmp_crtc,
+	enum sde_crtc_client_type curr_client_type,
+	struct sde_core_perf *perf)
+{
+	if (!tmp_crtc)
+		return false;
+	else if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
+							perf->sde_rsc_available)
+		return curr_client_type == sde_crtc_get_client_type(tmp_crtc);
+	else
+		return true;
+}
+
+static inline enum sde_crtc_client_type _get_sde_client_type(
+	enum sde_crtc_client_type curr_client_type,
+	struct sde_core_perf *perf)
+{
+	if (perf->bw_vote_mode == DISP_RSC_PRIMARY_MODE &&
+						perf->sde_rsc_available)
+		return curr_client_type;
+	else if (perf->bw_vote_mode != APPS_RSC_MODE && perf->sde_rsc_available)
+		return RT_RSC_CLIENT;
+	else
+		return RT_CLIENT;
+}
+
 static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
 		struct drm_crtc *crtc)
 {
 	u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota;
 	struct sde_core_perf_params perf = {0};
-	enum sde_crtc_client_type curr_client_type
+	enum sde_crtc_client_type client_vote, curr_client_type
 					= sde_crtc_get_client_type(crtc);
 	struct drm_crtc *tmp_crtc;
 	struct sde_crtc_state *sde_cstate;
@@ -195,7 +252,8 @@
 
 	drm_for_each_crtc(tmp_crtc, crtc->dev) {
 		if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
-		    (curr_client_type == sde_crtc_get_client_type(tmp_crtc))) {
+		    _is_crtc_client_type_matches(tmp_crtc, curr_client_type,
+								&kms->perf)) {
 			sde_cstate = to_sde_crtc_state(tmp_crtc->state);
 
 			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
@@ -217,7 +275,8 @@
 		bus_ib_quota = kms->perf.fix_core_ib_vote;
 	}
 
-	switch (curr_client_type) {
+	client_vote = _get_sde_client_type(curr_client_type, &kms->perf);
+	switch (client_vote) {
 	case NRT_CLIENT:
 		sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
 				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
@@ -246,6 +305,32 @@
 		SDE_ERROR("invalid client type:%d\n", curr_client_type);
 		break;
 	}
+
+	if (kms->perf.bw_vote_mode_updated) {
+		switch (kms->perf.bw_vote_mode) {
+		case DISP_RSC_MODE:
+			sde_power_data_bus_set_quota(&priv->phandle,
+				kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, 0, 0);
+			sde_power_data_bus_set_quota(&priv->phandle,
+				kms->core_client,
+				SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, 0, 0);
+			kms->perf.bw_vote_mode_updated = false;
+			break;
+
+		case APPS_RSC_MODE:
+			sde_cstate = to_sde_crtc_state(crtc->state);
+			if (sde_cstate->rsc_client) {
+				sde_rsc_client_vote(sde_cstate->rsc_client,
+									0, 0);
+				kms->perf.bw_vote_mode_updated = false;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
 }
 
 /**
@@ -259,6 +344,7 @@
 void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
 {
 	struct drm_crtc *tmp_crtc;
+	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *sde_cstate;
 	struct sde_kms *kms;
 
@@ -273,6 +359,7 @@
 		return;
 	}
 
+	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
 	/* only do this for command mode rt client (non-rsc client) */
@@ -295,8 +382,7 @@
 	/* Release the bandwidth */
 	if (kms->perf.enable_bw_release) {
 		trace_sde_cmd_release_bw(crtc->base.id);
-		sde_cstate->cur_perf.bw_ctl = 0;
-		sde_cstate->new_perf.bw_ctl = 0;
+		sde_crtc->cur_perf.bw_ctl = 0;
 		SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
 		_sde_core_perf_crtc_update_bus(kms, crtc);
 	}
@@ -349,25 +435,31 @@
 	}
 	priv = kms->dev->dev_private;
 
+	/* wake vote update is not required with display rsc */
+	if (kms->perf.bw_vote_mode == DISP_RSC_MODE && stop_req)
+		return;
+
 	sde_crtc = to_sde_crtc(crtc);
 	sde_cstate = to_sde_crtc_state(crtc->state);
 
 	SDE_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
 			crtc->base.id, stop_req, kms->perf.core_clk_rate);
 
-	old = &sde_cstate->cur_perf;
+	old = &sde_crtc->cur_perf;
 	new = &sde_cstate->new_perf;
 
 	if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
 		/*
 		 * cases for bus bandwidth update.
-		 * 1. new bandwidth vote or writeback output vote
-		 *    are higher than current vote for update request.
-		 * 2. new bandwidth vote or writeback output vote are
-		 *    lower than current vote at end of commit or stop.
+		 * 1. new bandwidth vote - "ab or ib vote" is higher
+		 *    than current vote for update request.
+		 * 2. new bandwidth vote - "ab or ib vote" is lower
+		 *    than current vote at end of commit or stop.
 		 */
-		if ((params_changed && ((new->bw_ctl > old->bw_ctl))) ||
-		    (!params_changed && ((new->bw_ctl < old->bw_ctl)))) {
+		if ((params_changed && ((new->bw_ctl > old->bw_ctl) ||
+			  (new->max_per_pipe_ib > old->max_per_pipe_ib))) ||
+		    (!params_changed && ((new->bw_ctl < old->bw_ctl) ||
+			  (new->max_per_pipe_ib < old->max_per_pipe_ib)))) {
 			SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
 				crtc->base.id, params_changed, new->bw_ctl,
 				old->bw_ctl);
@@ -376,6 +468,22 @@
 			update_bus = 1;
 		}
 
+		/* display rsc override during solver mode */
+		if (kms->perf.bw_vote_mode == DISP_RSC_MODE &&
+				get_sde_rsc_current_state(SDE_RSC_INDEX) ==
+							    SDE_RSC_CMD_STATE) {
+			/* update new bandwdith in all cases */
+			if (params_changed && ((new->bw_ctl != old->bw_ctl) ||
+			      (new->max_per_pipe_ib != old->max_per_pipe_ib))) {
+				old->bw_ctl = new->bw_ctl;
+				old->max_per_pipe_ib = new->max_per_pipe_ib;
+				update_bus = 1;
+			/* reduce bw vote is not required in solver mode */
+			} else if (!params_changed) {
+				update_bus = 0;
+			}
+		}
+
 		if ((params_changed &&
 				(new->core_clk_rate > old->core_clk_rate)) ||
 				(!params_changed &&
@@ -390,6 +498,9 @@
 		update_bus = 1;
 		update_clk = 1;
 	}
+	trace_sde_perf_crtc_update(crtc->base.id, new->bw_ctl,
+				new->core_clk_rate, stop_req,
+				update_bus, update_clk);
 
 	if (update_bus)
 		_sde_core_perf_crtc_update_bus(kms, crtc);
@@ -535,6 +646,10 @@
 			(u32 *)&catalog->perf.max_bw_high);
 	debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
 			(u32 *)perf, &sde_core_perf_mode_fops);
+	debugfs_create_u32("bw_vote_mode", 0600, perf->debugfs_root,
+			&perf->bw_vote_mode);
+	debugfs_create_bool("bw_vote_mode_updated", 0600, perf->debugfs_root,
+			&perf->bw_vote_mode_updated);
 	debugfs_create_u64("fix_core_clk_rate", 0644, perf->debugfs_root,
 			&perf->fix_core_clk_rate);
 	debugfs_create_u64("fix_core_ib_vote", 0644, perf->debugfs_root,
@@ -566,7 +681,6 @@
 	sde_core_perf_debugfs_destroy(perf);
 	perf->max_core_clk_rate = 0;
 	perf->core_clk = NULL;
-	mutex_destroy(&perf->perf_lock);
 	perf->clk_name = NULL;
 	perf->phandle = NULL;
 	perf->catalog = NULL;
@@ -590,7 +704,12 @@
 	perf->phandle = phandle;
 	perf->pclient = pclient;
 	perf->clk_name = clk_name;
-	mutex_init(&perf->perf_lock);
+	perf->sde_rsc_available = is_sde_rsc_available(SDE_RSC_INDEX);
+	/* set default mode */
+	if (perf->sde_rsc_available)
+		perf->bw_vote_mode = DISP_RSC_MODE;
+	else
+		perf->bw_vote_mode = APPS_RSC_MODE;
 
 	perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
 	if (!perf->core_clk) {
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
index 31851be..4a1bdad 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -22,8 +22,6 @@
 #include "sde_power_handle.h"
 
 #define	SDE_PERF_DEFAULT_MAX_CORE_CLK_RATE	320000000
-#define	SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA	2000000000
-#define	SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA	2000000000
 
 /**
  * struct sde_core_perf_params - definition of performance parameters
@@ -53,7 +51,6 @@
  * struct sde_core_perf - definition of core performance context
  * @dev: Pointer to drm device
  * @debugfs_root: top level debug folder
- * @perf_lock: serialization lock for this context
  * @catalog: Pointer to catalog configuration
  * @phandle: Pointer to power handler
  * @pclient: Pointer to power client
@@ -66,11 +63,13 @@
  * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
  * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
  * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ * @bw_vote_mode: apps rsc vs display rsc bandwidth vote mode
+ * @sde_rsc_available: is display rsc available
+ * @bw_vote_mode_updated: bandwidth vote mode update
  */
 struct sde_core_perf {
 	struct drm_device *dev;
 	struct dentry *debugfs_root;
-	struct mutex perf_lock;
 	struct sde_mdss_cfg *catalog;
 	struct sde_power_handle *phandle;
 	struct sde_power_client *pclient;
@@ -83,6 +82,9 @@
 	u64 fix_core_clk_rate;
 	u64 fix_core_ib_vote;
 	u64 fix_core_ab_vote;
+	u32 bw_vote_mode;
+	bool sde_rsc_available;
+	bool bw_vote_mode_updated;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a457938..075864b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -36,6 +36,7 @@
 #include "sde_connector.h"
 #include "sde_power_handle.h"
 #include "sde_core_perf.h"
+#include "sde_trace.h"
 
 struct sde_crtc_irq_info {
 	struct sde_irq_callback irq;
@@ -381,7 +382,7 @@
 	if (rp->ops.get)
 		val = rp->ops.get(NULL, type, -1);
 	if (IS_ERR_OR_NULL(val)) {
-		SDE_ERROR("crtc%d.%u failed to get res:0x%x//\n",
+		SDE_DEBUG("crtc%d.%u failed to get res:0x%x//\n",
 				crtc->base.id, rp->sequence_id, type);
 		return NULL;
 	}
@@ -612,12 +613,15 @@
 	int i;
 
 	if (!dim_layer->rect.w || !dim_layer->rect.h) {
-		SDE_DEBUG("empty dim layer\n");
+		SDE_DEBUG("empty dim_layer\n");
 		return;
 	}
 
 	cstate = to_sde_crtc_state(crtc->state);
 
+	SDE_DEBUG("dim_layer - flags:%d, stage:%d\n",
+			dim_layer->flags, dim_layer->stage);
+
 	split_dim_layer.stage = dim_layer->stage;
 	split_dim_layer.color_fill = dim_layer->color_fill;
 
@@ -651,9 +655,13 @@
 		} else {
 			split_dim_layer.rect.x =
 					split_dim_layer.rect.x -
-					cstate->lm_bounds[i].w;
+						cstate->lm_bounds[i].x;
 		}
 
+		SDE_DEBUG("split_dim_layer - LM:%d, rect:{%d,%d,%d,%d}}\n",
+			i, split_dim_layer.rect.x, split_dim_layer.rect.y,
+			split_dim_layer.rect.w, split_dim_layer.rect.h);
+
 		lm = mixer[i].hw_lm;
 		mixer[i].mixer_op_mode |= 1 << split_dim_layer.stage;
 		lm->ops.setup_dim_layer(lm, &split_dim_layer);
@@ -858,9 +866,24 @@
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 
-	for (i = 0; i < sde_crtc->num_mixers; i++) {
-		if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
-			disp_bitmask |= BIT(i);
+	/* pingpong split: one ROI, one LM, two physical displays */
+	if (crtc_state->is_ppsplit) {
+		u32 lm_split_width = crtc_state->lm_bounds[0].w / 2;
+		struct sde_rect *roi = &crtc_state->lm_roi[0];
+
+		if (sde_kms_rect_is_null(roi))
+			disp_bitmask = 0;
+		else if ((u32)roi->x + (u32)roi->w <= lm_split_width)
+			disp_bitmask = BIT(0);		/* left only */
+		else if (roi->x >= lm_split_width)
+			disp_bitmask = BIT(1);		/* right only */
+		else
+			disp_bitmask = BIT(0) | BIT(1); /* left and right */
+	} else {
+		for (i = 0; i < sde_crtc->num_mixers; i++) {
+			if (!sde_kms_rect_is_null(&crtc_state->lm_roi[i]))
+				disp_bitmask |= BIT(i);
+		}
 	}
 
 	SDE_DEBUG("affected displays 0x%x\n", disp_bitmask);
@@ -881,9 +904,6 @@
 	sde_crtc = to_sde_crtc(crtc);
 	crtc_state = to_sde_crtc_state(state);
 
-	if (sde_crtc->num_mixers == 1)
-		return 0;
-
 	if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
 		SDE_ERROR("%s: unsupported number of mixers: %d\n",
 				sde_crtc->name, sde_crtc->num_mixers);
@@ -891,9 +911,41 @@
 	}
 
 	/*
-	 * On certain HW, ROIs must be centered on the split between LMs,
-	 * and be of equal width.
+	 * If using pingpong split: one ROI, one LM, two physical displays
+	 * then the ROI must be centered on the panel split boundary and
+	 * be of equal width across the split.
 	 */
+	if (crtc_state->is_ppsplit) {
+		u16 panel_split_width;
+		u32 display_mask;
+
+		roi[0] = &crtc_state->lm_roi[0];
+
+		if (sde_kms_rect_is_null(roi[0]))
+			return 0;
+
+		display_mask = _sde_crtc_get_displays_affected(crtc, state);
+		if (display_mask != (BIT(0) | BIT(1)))
+			return 0;
+
+		panel_split_width = crtc_state->lm_bounds[0].w / 2;
+		if (roi[0]->x + roi[0]->w / 2 != panel_split_width) {
+			SDE_ERROR("%s: roi x %d w %d split %d\n",
+					sde_crtc->name, roi[0]->x, roi[0]->w,
+					panel_split_width);
+			return -EINVAL;
+		}
+
+		return 0;
+	}
+
+	/*
+	 * On certain HW, if using 2 LM, ROIs must be split evenly between the
+	 * LMs and be of equal width.
+	 */
+	if (sde_crtc->num_mixers == 1)
+		return 0;
+
 	roi[0] = &crtc_state->lm_roi[0];
 	roi[1] = &crtc_state->lm_roi[1];
 
@@ -1060,12 +1112,11 @@
 	struct sde_hw_stage_cfg *stage_cfg;
 	struct sde_rect plane_crtc_roi;
 
-	u32 flush_mask = 0;
+	u32 flush_mask, flush_sbuf, flush_tmp;
 	uint32_t lm_idx = LEFT_MIXER, stage_idx;
 	bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
 	int zpos_cnt[CRTC_DUAL_MIXERS][SDE_STAGE_MAX + 1] = { {0} };
 	int i;
-	bool sbuf_mode = false;
 	u32 prefill = 0;
 
 	if (!sde_crtc || !mixer) {
@@ -1077,6 +1128,10 @@
 	lm = mixer->hw_lm;
 	stage_cfg = &sde_crtc->stage_cfg;
 	cstate = to_sde_crtc_state(crtc->state);
+	flush_sbuf = 0x0;
+
+	cstate->sbuf_cfg.rot_op_mode = SDE_CTL_ROT_OP_MODE_OFFLINE;
+	cstate->sbuf_prefill_line = 0;
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		state = plane->state;
@@ -1092,10 +1147,16 @@
 		fb = state->fb;
 
 		if (sde_plane_is_sbuf_mode(plane, &prefill))
-			sbuf_mode = true;
+			cstate->sbuf_cfg.rot_op_mode =
+					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
+		if (prefill > cstate->sbuf_prefill_line)
+			cstate->sbuf_prefill_line = prefill;
 
-		sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
+		sde_plane_get_ctl_flush(plane, ctl, &flush_mask, &flush_tmp);
 
+		/* persist rotator flush bit(s) for one more commit */
+		flush_mask |= cstate->sbuf_flush_mask | flush_tmp;
+		flush_sbuf |= flush_tmp;
 
 		SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
 				crtc->base.id,
@@ -1111,7 +1172,8 @@
 				state->src_x >> 16, state->src_y >> 16,
 				state->src_w >> 16, state->src_h >> 16,
 				state->crtc_x, state->crtc_y,
-				state->crtc_w, state->crtc_h);
+				state->crtc_w, state->crtc_h,
+				cstate->sbuf_cfg.rot_op_mode);
 
 		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
 			struct sde_rect intersect;
@@ -1156,6 +1218,8 @@
 		}
 	}
 
+	cstate->sbuf_flush_mask = flush_sbuf;
+
 	if (lm && lm->ops.setup_dim_layer) {
 		cstate = to_sde_crtc_state(crtc->state);
 		for (i = 0; i < cstate->num_dim_layers; i++)
@@ -1163,24 +1227,75 @@
 					mixer, &cstate->dim_layer[i]);
 	}
 
-	if (ctl->ops.setup_sbuf_cfg) {
-		cstate = to_sde_crtc_state(crtc->state);
-		if (!sbuf_mode) {
-			cstate->sbuf_cfg.rot_op_mode =
-					SDE_CTL_ROT_OP_MODE_OFFLINE;
-			cstate->sbuf_prefill_line = 0;
-		} else {
-			cstate->sbuf_cfg.rot_op_mode =
-					SDE_CTL_ROT_OP_MODE_INLINE_SYNC;
-			cstate->sbuf_prefill_line = prefill;
-		}
-
+	if (ctl->ops.setup_sbuf_cfg)
 		ctl->ops.setup_sbuf_cfg(ctl, &cstate->sbuf_cfg);
-	}
 
 	_sde_crtc_program_lm_output_roi(crtc);
 }
 
+static void _sde_crtc_swap_mixers_for_right_partial_update(
+		struct drm_crtc *crtc)
+{
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_state *cstate;
+	struct drm_encoder *drm_enc;
+	bool is_right_only;
+	bool encoder_in_dsc_merge = false;
+
+	if (!crtc || !crtc->state)
+		return;
+
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(crtc->state);
+
+	if (sde_crtc->num_mixers != CRTC_DUAL_MIXERS)
+		return;
+
+	drm_for_each_encoder(drm_enc, crtc->dev) {
+		if (drm_enc->crtc == crtc &&
+				sde_encoder_is_dsc_merge(drm_enc)) {
+			encoder_in_dsc_merge = true;
+			break;
+		}
+	}
+
+	/**
+	 * For right-only partial update with DSC merge, we swap LM0 & LM1.
+	 * This is due to two reasons:
+	 * - On 8996, there is a DSC HW requirement that in DSC Merge Mode,
+	 *   the left DSC must be used, right DSC cannot be used alone.
+	 *   For right-only partial update, this means swap layer mixers to map
+	 *   Left LM to Right INTF. On later HW this was relaxed.
+	 * - In DSC Merge mode, the physical encoder has already registered
+	 *   PP0 as the master, to switch to right-only we would have to
+	 *   reprogram to be driven by PP1 instead.
+	 * To support both cases, we prefer to support the mixer swap solution.
+	 */
+	if (!encoder_in_dsc_merge)
+		return;
+
+	is_right_only = sde_kms_rect_is_null(&cstate->lm_roi[0]) &&
+			!sde_kms_rect_is_null(&cstate->lm_roi[1]);
+
+	if (is_right_only && !sde_crtc->mixers_swapped) {
+		/* right-only update swap mixers */
+		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
+		sde_crtc->mixers_swapped = true;
+	} else if (!is_right_only && sde_crtc->mixers_swapped) {
+		/* left-only or full update, swap back */
+		swap(sde_crtc->mixers[0], sde_crtc->mixers[1]);
+		sde_crtc->mixers_swapped = false;
+	}
+
+	SDE_DEBUG("%s: right_only %d swapped %d, mix0->lm%d, mix1->lm%d\n",
+			sde_crtc->name, is_right_only, sde_crtc->mixers_swapped,
+			sde_crtc->mixers[0].hw_lm->idx - LM_0,
+			sde_crtc->mixers[1].hw_lm->idx - LM_0);
+	SDE_EVT32(DRMID(crtc), is_right_only, sde_crtc->mixers_swapped,
+			sde_crtc->mixers[0].hw_lm->idx - LM_0,
+			sde_crtc->mixers[1].hw_lm->idx - LM_0);
+}
+
 /**
  * _sde_crtc_blend_setup - configure crtc mixers
  * @crtc: Pointer to drm crtc structure
@@ -1226,6 +1341,8 @@
 			lm->ops.clear_dim_layer(lm);
 	}
 
+	_sde_crtc_swap_mixers_for_right_partial_update(crtc);
+
 	/* initialize stage cfg */
 	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
 
@@ -1374,6 +1491,7 @@
 	struct sde_crtc_state *cstate;
 	struct sde_kms *sde_kms;
 	unsigned long flags;
+	bool disable_inprogress = false;
 
 	if (!work) {
 		SDE_ERROR("invalid work handle\n");
@@ -1399,6 +1517,9 @@
 
 	SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
 			ktime_to_ns(fevent->ts));
+	disable_inprogress = fevent->event &
+					SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
+	fevent->event &= ~SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
 
 	if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
 			(fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) ||
@@ -1412,9 +1533,6 @@
 					atomic_read(&sde_crtc->frame_pending));
 			SDE_EVT32(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE1);
-
-			/* don't propagate unexpected frame done events */
-			return;
 		} else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
 			/* release bandwidth and other resources */
 			SDE_DEBUG("crtc%d ts:%lld last pending\n",
@@ -1422,13 +1540,15 @@
 					ktime_to_ns(fevent->ts));
 			SDE_EVT32(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE2);
-			sde_core_perf_crtc_release_bw(crtc);
+			if (!disable_inprogress)
+				sde_core_perf_crtc_release_bw(crtc);
 		} else {
 			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
 							SDE_EVTLOG_FUNC_CASE3);
 		}
 
-		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+		if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE &&
+							!disable_inprogress)
 			sde_core_perf_crtc_update(crtc, 0, false);
 	} else {
 		SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
@@ -1464,7 +1584,7 @@
 	pipe_id = drm_crtc_index(crtc);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
-	SDE_EVT32_VERBOSE(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc), event);
 
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
@@ -1483,7 +1603,11 @@
 	fevent->event = event;
 	fevent->crtc = crtc;
 	fevent->ts = ktime_get();
-	kthread_queue_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
+	if (event & SDE_ENCODER_FRAME_EVENT_DURING_DISABLE)
+		sde_crtc_frame_event_work(&fevent->work);
+	else
+		kthread_queue_work(&priv->disp_thread[pipe_id].worker,
+								&fevent->work);
 }
 
 void sde_crtc_complete_commit(struct drm_crtc *crtc,
@@ -1534,26 +1658,28 @@
 {
 	struct sde_drm_dim_layer_v1 dim_layer_v1;
 	struct sde_drm_dim_layer_cfg *user_cfg;
+	struct sde_hw_dim_layer *dim_layer;
 	u32 count, i;
 
 	if (!cstate) {
 		SDE_ERROR("invalid cstate\n");
 		return;
 	}
+	dim_layer = cstate->dim_layer;
 
 	if (!usr_ptr) {
-		SDE_DEBUG("dim layer data removed\n");
+		SDE_DEBUG("dim_layer data removed\n");
 		return;
 	}
 
 	if (copy_from_user(&dim_layer_v1, usr_ptr, sizeof(dim_layer_v1))) {
-		SDE_ERROR("failed to copy dim layer data\n");
+		SDE_ERROR("failed to copy dim_layer data\n");
 		return;
 	}
 
 	count = dim_layer_v1.num_layers;
-	if (!count || (count > SDE_MAX_DIM_LAYERS)) {
-		SDE_ERROR("invalid number of Dim Layers:%d", count);
+	if (count > SDE_MAX_DIM_LAYERS) {
+		SDE_ERROR("invalid number of dim_layers:%d", count);
 		return;
 	}
 
@@ -1561,22 +1687,31 @@
 	cstate->num_dim_layers = count;
 	for (i = 0; i < count; i++) {
 		user_cfg = &dim_layer_v1.layer_cfg[i];
-		cstate->dim_layer[i].flags = user_cfg->flags;
-		cstate->dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
 
-		cstate->dim_layer[i].rect.x = user_cfg->rect.x1;
-		cstate->dim_layer[i].rect.y = user_cfg->rect.y1;
-		cstate->dim_layer[i].rect.w = user_cfg->rect.x2 -
-						user_cfg->rect.x1 + 1;
-		cstate->dim_layer[i].rect.h = user_cfg->rect.y2 -
-						user_cfg->rect.y1 + 1;
+		dim_layer[i].flags = user_cfg->flags;
+		dim_layer[i].stage = user_cfg->stage + SDE_STAGE_0;
 
-		cstate->dim_layer[i].color_fill = (struct sde_mdss_color) {
+		dim_layer[i].rect.x = user_cfg->rect.x1;
+		dim_layer[i].rect.y = user_cfg->rect.y1;
+		dim_layer[i].rect.w = user_cfg->rect.x2 - user_cfg->rect.x1;
+		dim_layer[i].rect.h = user_cfg->rect.y2 - user_cfg->rect.y1;
+
+		dim_layer[i].color_fill = (struct sde_mdss_color) {
 				user_cfg->color_fill.color_0,
 				user_cfg->color_fill.color_1,
 				user_cfg->color_fill.color_2,
 				user_cfg->color_fill.color_3,
 		};
+
+		SDE_DEBUG("dim_layer[%d] - flags:%d, stage:%d\n",
+				i, dim_layer[i].flags, dim_layer[i].stage);
+		SDE_DEBUG(" rect:{%d,%d,%d,%d}, color:{%d,%d,%d,%d}\n",
+				dim_layer[i].rect.x, dim_layer[i].rect.y,
+				dim_layer[i].rect.w, dim_layer[i].rect.h,
+				dim_layer[i].color_fill.color_0,
+				dim_layer[i].color_fill.color_1,
+				dim_layer[i].color_fill.color_2,
+				dim_layer[i].color_fill.color_3);
 	}
 }
 
@@ -1612,6 +1747,7 @@
 	 * if its fence has timed out. Call input fence wait multiple times if
 	 * fence wait is interrupted due to interrupt call.
 	 */
+	SDE_ATRACE_BEGIN("plane_wait_input_fence");
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		do {
 			kt_wait = ktime_sub(kt_end, ktime_get());
@@ -1623,6 +1759,7 @@
 			rc = sde_plane_wait_input_fence(plane, wait_ms);
 		} while (wait_ms && rc == -ERESTARTSYS);
 	}
+	SDE_ATRACE_END("plane_wait_input_fence");
 }
 
 static void _sde_crtc_setup_mixer_for_encoder(
@@ -1700,6 +1837,23 @@
 	mutex_unlock(&sde_crtc->crtc_lock);
 }
 
+static void _sde_crtc_setup_is_ppsplit(struct drm_crtc_state *state)
+{
+	int i;
+	struct sde_crtc_state *cstate;
+
+	cstate = to_sde_crtc_state(state);
+
+	cstate->is_ppsplit = false;
+	for (i = 0; i < cstate->num_connectors; i++) {
+		struct drm_connector *conn = cstate->connectors[i];
+
+		if (sde_connector_get_topology_name(conn) ==
+				SDE_RM_TOPOLOGY_PPSPLIT)
+			cstate->is_ppsplit = true;
+	}
+}
+
 static void _sde_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
@@ -1764,6 +1918,7 @@
 
 	if (!sde_crtc->num_mixers) {
 		_sde_crtc_setup_mixers(crtc);
+		_sde_crtc_setup_is_ppsplit(crtc->state);
 		_sde_crtc_setup_lm_bounds(crtc, crtc->state);
 	}
 
@@ -1931,6 +2086,7 @@
 	if (unlikely(!sde_crtc->num_mixers))
 		return;
 
+	SDE_ATRACE_BEGIN("crtc_commit");
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 		struct sde_encoder_kickoff_params params = { 0 };
 
@@ -1952,7 +2108,7 @@
 		SDE_ERROR("crtc%d invalid frame pending\n",
 				crtc->base.id);
 		SDE_EVT32(DRMID(crtc), 0);
-		return;
+		goto end;
 	} else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
 		/* acquire bandwidth and other resources */
 		SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
@@ -1968,6 +2124,9 @@
 
 		sde_encoder_kickoff(encoder);
 	}
+end:
+	SDE_ATRACE_END("crtc_commit");
+	return;
 }
 
 /**
@@ -1991,7 +2150,13 @@
 	dev = crtc->dev;
 
 	if (enable) {
-		if (_sde_crtc_power_enable(sde_crtc, true))
+		int ret;
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&sde_crtc->crtc_lock);
+		ret = _sde_crtc_power_enable(sde_crtc, true);
+		mutex_lock(&sde_crtc->crtc_lock);
+		if (ret)
 			return;
 
 		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
@@ -2012,7 +2177,11 @@
 
 			sde_encoder_register_vblank_callback(enc, NULL, NULL);
 		}
+
+		/* drop lock since power crtc cb may try to re-acquire lock */
+		mutex_unlock(&sde_crtc->crtc_lock);
 		_sde_crtc_power_enable(sde_crtc, false);
+		mutex_lock(&sde_crtc->crtc_lock);
 	}
 }
 
@@ -2260,8 +2429,7 @@
 
 	if (atomic_read(&sde_crtc->frame_pending)) {
 		/* release bandwidth and other resources */
-		SDE_ERROR("crtc%d invalid frame pending\n",
-				crtc->base.id);
+		SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id);
 		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
 							SDE_EVTLOG_FUNC_CASE2);
 		sde_core_perf_crtc_release_bw(crtc);
@@ -2285,6 +2453,9 @@
 	memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
 	sde_crtc->num_mixers = 0;
 
+	/* disable clk & bw control until clk & bw properties are set */
+	cstate->bw_control = false;
+
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	list_for_each_entry(node, &sde_crtc->user_event_list, list) {
 		ret = 0;
@@ -2486,6 +2657,7 @@
 
 	mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
 
+	_sde_crtc_setup_is_ppsplit(state);
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
 	 /* get plane state for all drm planes associated with crtc state */
@@ -2508,9 +2680,10 @@
 		/* check dim layer stage with every plane */
 		for (i = 0; i < cstate->num_dim_layers; i++) {
 			if (pstates[cnt].stage == cstate->dim_layer[i].stage) {
-				SDE_ERROR("plane%d/dimlayer in same stage:%d\n",
-						plane->base.id,
-						cstate->dim_layer[i].stage);
+				SDE_ERROR(
+					"plane:%d/dim_layer:%i-same stage:%d\n",
+					plane->base.id, i,
+					cstate->dim_layer[i].stage);
 				rc = -EINVAL;
 				goto end;
 			}
@@ -2755,19 +2928,19 @@
 			CRTC_PROP_CORE_CLK);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ab", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"core_ib", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_CORE_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ab", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_AB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_MEM_AB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"mem_ib", 0x0, 0, U64_MAX,
-			SDE_PERF_DEFAULT_MAX_BUS_IB_QUOTA,
+			catalog->perf.max_bw_high * 1000ULL,
 			CRTC_PROP_MEM_IB);
 	msm_property_install_range(&sde_crtc->property_info,
 			"rot_prefill_bw", 0, 0, U64_MAX,
@@ -2781,16 +2954,18 @@
 	msm_property_install_blob(&sde_crtc->property_info, "capabilities",
 		DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
 
-	if (catalog->has_dim_layer) {
-		msm_property_install_volatile_range(&sde_crtc->property_info,
-			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
-	}
-
 	msm_property_install_volatile_range(&sde_crtc->property_info,
 		"sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1);
 
 	sde_kms_info_reset(info);
 
+	if (catalog->has_dim_layer) {
+		msm_property_install_volatile_range(&sde_crtc->property_info,
+			"dim_layer_v1", 0x0, 0, ~0, 0, CRTC_PROP_DIM_LAYER_V1);
+		sde_kms_info_add_keyint(info, "dim_layer_v1_max_layers",
+				SDE_MAX_DIM_LAYERS);
+	}
+
 	sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
 	sde_kms_info_add_keyint(info, "max_linewidth",
 			catalog->max_mixer_width);
@@ -2891,6 +3066,13 @@
 			case CRTC_PROP_ROI_V1:
 				ret = _sde_crtc_set_roi_v1(state, (void *)val);
 				break;
+			case CRTC_PROP_CORE_CLK:
+			case CRTC_PROP_CORE_AB:
+			case CRTC_PROP_CORE_IB:
+			case CRTC_PROP_MEM_AB:
+			case CRTC_PROP_MEM_IB:
+				cstate->bw_control = true;
+				break;
 			default:
 				/* nothing to do */
 				break;
@@ -2990,6 +3172,7 @@
 	struct drm_display_mode *mode;
 	struct drm_framebuffer *fb;
 	struct drm_plane_state *state;
+	struct sde_crtc_state *cstate;
 
 	int i, out_width;
 
@@ -2998,6 +3181,7 @@
 
 	sde_crtc = s->private;
 	crtc = &sde_crtc->base;
+	cstate = to_sde_crtc_state(crtc->state);
 
 	mutex_lock(&sde_crtc->crtc_lock);
 	mode = &crtc->state->adjusted_mode;
@@ -3022,6 +3206,23 @@
 
 	seq_puts(s, "\n");
 
+	for (i = 0; i < cstate->num_dim_layers; i++) {
+		struct sde_hw_dim_layer *dim_layer = &cstate->dim_layer[i];
+
+		seq_printf(s, "\tdim_layer:%d] stage:%d flags:%d\n",
+				i, dim_layer->stage, dim_layer->flags);
+		seq_printf(s, "\tdst_x:%d dst_y:%d dst_w:%d dst_h:%d\n",
+				dim_layer->rect.x, dim_layer->rect.y,
+				dim_layer->rect.w, dim_layer->rect.h);
+		seq_printf(s,
+			"\tcolor_0:%d color_1:%d color_2:%d color_3:%d\n",
+				dim_layer->color_fill.color_0,
+				dim_layer->color_fill.color_1,
+				dim_layer->color_fill.color_2,
+				dim_layer->color_fill.color_3);
+		seq_puts(s, "\n");
+	}
+
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
 		pstate = to_sde_plane_state(plane->state);
 		state = plane->state;
@@ -3221,16 +3422,18 @@
 static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
 {
 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
 	struct sde_crtc_res *res;
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
 	seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
-	seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
-	seq_printf(s, "core_clk_rate: %llu\n", cstate->cur_perf.core_clk_rate);
+	seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+	seq_printf(s, "core_clk_rate: %llu\n",
+			sde_crtc->cur_perf.core_clk_rate);
 	seq_printf(s, "max_per_pipe_ib: %llu\n",
-			cstate->cur_perf.max_per_pipe_ib);
+			sde_crtc->cur_perf.max_per_pipe_ib);
 
 	seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
 	list_for_each_entry(res, &cstate->rp.res_list, list)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 6a22115..a622d9c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -108,6 +108,8 @@
  * @name          : ASCII description of this crtc
  * @num_ctls      : Number of ctl paths in use
  * @num_mixers    : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ *                  especially in the case of DSC Merge.
  * @mixers        : List of active mixers
  * @event         : Pointer to last received drm vblank event. If there is a
  *                  pending vblank event, this will be non-null.
@@ -139,6 +141,7 @@
  * @event_lock    : Spinlock around event handling code
  * @misr_enable   : boolean entry indicates misr enable/disable status.
  * @power_event   : registered power event handle
+ * @cur_perf      : current performance committed to clock/bandwidth driver
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -147,6 +150,7 @@
 	/* HW Resources reserved for the crtc */
 	u32 num_ctls;
 	u32 num_mixers;
+	bool mixers_swapped;
 	struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
 
 	struct drm_pending_vblank_event *event;
@@ -190,6 +194,8 @@
 	bool misr_enable;
 
 	struct sde_power_event *power_event;
+
+	struct sde_core_perf_params cur_perf;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -251,6 +257,8 @@
  * @num_connectors: Number of associated drm connectors
  * @intf_mode     : Interface mode of the primary connector
  * @rsc_client    : sde rsc client when mode is valid
+ * @is_ppsplit    : Whether current topology requires PPSplit special handling
+ * @bw_control    : true if bw/clk controlled by bw/clk properties
  * @crtc_roi      : Current CRTC ROI. Possibly sub-rectangle of mode.
  *                  Origin top left of CRTC.
  * @lm_bounds     : LM boundaries based on current mode full resolution, no ROI.
@@ -263,10 +271,10 @@
  * @property_blobs: Reference pointers for blob properties
  * @num_dim_layers: Number of dim layers
  * @dim_layer: Dim layer configs
- * @cur_perf: current performance state
- * @new_perf: new performance state
+ * @new_perf: new performance state being requested
  * @sbuf_cfg: stream buffer configuration
  * @sbuf_prefill_line: number of line for inline rotator prefetch
+ * @sbuf_flush_mask: flush mask for inline rotator
  */
 struct sde_crtc_state {
 	struct drm_crtc_state base;
@@ -276,7 +284,9 @@
 	enum sde_intf_mode intf_mode;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_update;
+	bool bw_control;
 
+	bool is_ppsplit;
 	struct sde_rect crtc_roi;
 	struct sde_rect lm_bounds[CRTC_DUAL_MIXERS];
 	struct sde_rect lm_roi[CRTC_DUAL_MIXERS];
@@ -288,10 +298,10 @@
 	uint32_t num_dim_layers;
 	struct sde_hw_dim_layer dim_layer[SDE_MAX_DIM_LAYERS];
 
-	struct sde_core_perf_params cur_perf;
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
-	u64 sbuf_prefill_line;
+	u32 sbuf_prefill_line;
+	u32 sbuf_flush_mask;
 
 	struct sde_crtc_respool rp;
 };
@@ -426,10 +436,14 @@
  */
 static inline u32 sde_crtc_get_inline_prefill(struct drm_crtc *crtc)
 {
+	struct sde_crtc_state *cstate;
+
 	if (!crtc || !crtc->state)
 		return 0;
 
-	return to_sde_crtc_state(crtc->state)->sbuf_prefill_line;
+	cstate = to_sde_crtc_state(crtc->state);
+	return cstate->sbuf_cfg.rot_op_mode != SDE_CTL_ROT_OP_MODE_OFFLINE ?
+		cstate->sbuf_prefill_line : 0;
 }
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index f11ba51..5ccd385 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -35,6 +35,7 @@
 #include "sde_power_handle.h"
 #include "sde_hw_dsc.h"
 #include "sde_crtc.h"
+#include "sde_trace.h"
 
 #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -123,8 +124,11 @@
  * @cur_master:		Pointer to the current master in this mode. Optimization
  *			Only valid after enable. Cleared as disable.
  * @hw_pp		Handle to the pingpong blocks used for the display. No.
- *                      pingpong blocks can be different than num_phys_encs.
+ *			pingpong blocks can be different than num_phys_encs.
  * @hw_dsc:		Array of DSC block handles used for the display.
+ * @intfs_swapped	Whether or not the phys_enc interfaces have been swapped
+ *			for partial update right-only cases, such as pingpong
+ *			split where virtual pingpong does not generate IRQs
  * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
  *			notification of the VBLANK
  * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
@@ -139,7 +143,6 @@
  *				Bit0 = phys_encs[0] etc.
  * @crtc_frame_event_cb:	callback handler for frame event
  * @crtc_frame_event_cb_data:	callback handler private data
- * @crtc_frame_event:		callback event
  * @frame_done_timeout:		frame done timeout in Hz
  * @frame_done_timer:		watchdog timer for frame done event
  * @rsc_client:			rsc client pointer
@@ -155,6 +158,9 @@
  * @topology:                   topology of the display
  * @mode_set_complete:          flag to indicate modeset completion
  * @rsc_cfg:			rsc configuration
+ * @cur_conn_roi:		current connector roi
+ * @prv_conn_roi:		previous connector roi to optimize if unchanged
+ * @disable_inprogress:		sde encoder disable is in progress.
  */
 struct sde_encoder_virt {
 	struct drm_encoder base;
@@ -169,6 +175,8 @@
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
 
+	bool intfs_swapped;
+
 	void (*crtc_vblank_cb)(void *);
 	void *crtc_vblank_cb_data;
 
@@ -177,7 +185,6 @@
 	DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
 	void (*crtc_frame_event_cb)(void *, u32 event);
 	void *crtc_frame_event_cb_data;
-	u32 crtc_frame_event;
 
 	atomic_t frame_done_timeout;
 	struct timer_list frame_done_timer;
@@ -195,17 +202,52 @@
 	bool mode_set_complete;
 
 	struct sde_encoder_rsc_config rsc_cfg;
+	struct sde_rect cur_conn_roi;
+	struct sde_rect prv_conn_roi;
+	bool disable_inprogress;
 };
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
 
-inline bool _sde_is_dsc_enabled(struct sde_encoder_virt *sde_enc)
+bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
+
 {
-	struct msm_compression_info *comp_info = &sde_enc->disp_info.comp_info;
+	struct sde_encoder_virt *sde_enc;
+	struct msm_compression_info *comp_info;
+
+	if (!drm_enc)
+		return false;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	comp_info = &sde_enc->disp_info.comp_info;
 
 	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
 }
 
+bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
+{
+	enum sde_rm_topology_name topology;
+	struct sde_encoder_virt *sde_enc;
+	struct drm_connector *drm_conn;
+
+	if (!drm_enc)
+		return false;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	if (!sde_enc->cur_master)
+		return false;
+
+	drm_conn = sde_enc->cur_master->connector;
+	if (!drm_conn)
+		return false;
+
+	topology = sde_connector_get_topology_name(drm_conn);
+	if (topology == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE)
+		return true;
+
+	return false;
+}
+
 static inline int _sde_encoder_power_enable(struct sde_encoder_virt *sde_enc,
 								bool enable)
 {
@@ -320,7 +362,22 @@
 
 	sde_enc = to_sde_encoder_virt(phys_enc->parent);
 	hw_mdptop = phys_enc->hw_mdptop;
-	cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+
+	/**
+	 * disable split modes since encoder will be operating in as the only
+	 * encoder, either for the entire use case in the case of, for example,
+	 * single DSI, or for this frame in the case of left/right only partial
+	 * update.
+	 */
+	if (phys_enc->split_role == ENC_ROLE_SOLO) {
+		if (hw_mdptop->ops.setup_split_pipe)
+			hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+		if (hw_mdptop->ops.setup_pp_split)
+			hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+		return;
+	}
+
+	cfg.en = true;
 	cfg.mode = phys_enc->intf_mode;
 	cfg.intf = interface;
 
@@ -334,8 +391,7 @@
 	else
 		cfg.pp_split_slave = INTF_MAX;
 
-	if (phys_enc->split_role != ENC_ROLE_SLAVE) {
-		/* master/solo encoder */
+	if (phys_enc->split_role == ENC_ROLE_MASTER) {
 		SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
 
 		if (hw_mdptop->ops.setup_split_pipe)
@@ -555,8 +611,14 @@
 
 static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
 		struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
-		u32 common_mode, bool ich_reset)
+		u32 common_mode, bool ich_reset, bool enable)
 {
+	if (!enable) {
+		if (hw_pp->ops.disable_dsc)
+			hw_pp->ops.disable_dsc(hw_pp);
+		return;
+	}
+
 	if (hw_dsc->ops.dsc_config)
 		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
 
@@ -570,9 +632,27 @@
 		hw_pp->ops.enable_dsc(hw_pp);
 }
 
+static void _sde_encoder_get_connector_roi(
+		struct sde_encoder_virt *sde_enc,
+		struct sde_rect *merged_conn_roi)
+{
+	struct drm_connector *drm_conn;
+	struct sde_connector_state *c_state;
+
+	if (!sde_enc || !merged_conn_roi)
+		return;
+
+	drm_conn = sde_enc->phys_encs[0]->connector;
+
+	if (!drm_conn || !drm_conn->state)
+		return;
+
+	c_state = to_sde_connector_state(drm_conn->state);
+	sde_kms_rect_merge_rectangles(&c_state->rois, merged_conn_roi);
+}
+
 static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode = 0;
@@ -580,22 +660,18 @@
 	struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
 	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
 	struct msm_display_dsc_info *dsc =
 		&sde_enc->disp_info.comp_info.dsc_info;
 
-	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL ||
-						hw_mdp_top == NULL) {
+	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL || !enc_master) {
 		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
 		return -EINVAL;
 	}
 
-	pic_width = dsc->pic_width;
-	pic_height = dsc->pic_height;
+	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
 
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
-
-	this_frame_slices = pic_width / dsc->slice_width;
+	this_frame_slices = roi->w / dsc->slice_width;
 	intf_ip_w = this_frame_slices * dsc->slice_width;
 	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
 
@@ -608,132 +684,208 @@
 		dsc_common_mode = DSC_MODE_VIDEO;
 
 	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+		roi->w, roi->h, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
 
 	_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
-			ich_res);
+			ich_res, true);
 
 	return 0;
 }
-static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode;
 
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
-	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
-	struct msm_display_dsc_info *dsc =
-		&sde_enc->disp_info.comp_info.dsc_info;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+	struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
+	bool half_panel_partial_update;
+	int i;
 
-	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
-		l_hw_pp == NULL || r_hw_pp == NULL) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		hw_pp[i] = sde_enc->hw_pp[i];
+		hw_dsc[i] = sde_enc->hw_dsc[i];
+
+		if (!hw_pp[i] || !hw_dsc[i]) {
+			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+			return -EINVAL;
+		}
 	}
 
-	pic_width = dsc->pic_width * sde_enc->display_num_of_h_tiles;
-	pic_height = dsc->pic_height;
+	half_panel_partial_update =
+			hweight_long(params->affected_displays) == 1;
 
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
-
-	this_frame_slices = pic_width / dsc->slice_width;
-	intf_ip_w = this_frame_slices * dsc->slice_width;
-
-	intf_ip_w /= 2;
-	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
-
-	enc_ip_w = intf_ip_w;
-	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
-
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+	dsc_common_mode = 0;
+	if (!half_panel_partial_update)
+		dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
 	if (enc_master->intf_mode == INTF_MODE_VIDEO)
 		dsc_common_mode |= DSC_MODE_VIDEO;
 
-	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+	memcpy(&dsc[0], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[0]));
+	memcpy(&dsc[1], &sde_enc->disp_info.comp_info.dsc_info, sizeof(dsc[1]));
 
-	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
-			ich_res);
-	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
-			ich_res);
+	/*
+	 * Since both DSC use same pic dimension, set same pic dimension
+	 * to both DSC structures.
+	 */
+	_sde_encoder_dsc_update_pic_dim(&dsc[0], roi->w, roi->h);
+	_sde_encoder_dsc_update_pic_dim(&dsc[1], roi->w, roi->h);
+
+	this_frame_slices = roi->w / dsc[0].slice_width;
+	intf_ip_w = this_frame_slices * dsc[0].slice_width;
+
+	if (!half_panel_partial_update)
+		intf_ip_w /= 2;
+
+	/*
+	 * In this topology when both interfaces are active, they have same
+	 * load so intf_ip_w will be same.
+	 */
+	_sde_encoder_dsc_pclk_param_calc(&dsc[0], intf_ip_w);
+	_sde_encoder_dsc_pclk_param_calc(&dsc[1], intf_ip_w);
+
+	/*
+	 * In this topology, since there is no dsc_merge, uncompressed input
+	 * to encoder and interface is same.
+	 */
+	enc_ip_w = intf_ip_w;
+	_sde_encoder_dsc_initial_line_calc(&dsc[0], enc_ip_w);
+	_sde_encoder_dsc_initial_line_calc(&dsc[1], enc_ip_w);
+
+	/*
+	 * __is_ich_reset_override_needed should be called only after
+	 * updating pic dimension, mdss_panel_dsc_update_pic_dim.
+	 */
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+			half_panel_partial_update, &dsc[0]);
+
+	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
+			roi->w, roi->h, dsc_common_mode);
+
+	for (i = 0; i < sde_enc->num_phys_encs; i++) {
+		bool active = !!((1 << i) & params->affected_displays);
+
+		SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+				dsc_common_mode, i, active);
+		_sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i],
+				dsc_common_mode, ich_res, active);
+	}
 
 	return 0;
 }
 
-static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
-	int pic_width, pic_height;
 	int this_frame_slices;
 	int intf_ip_w, enc_ip_w;
 	int ich_res, dsc_common_mode;
 
 	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
-	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
-	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
-	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
-	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
-	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	const struct sde_rect *roi = &sde_enc->cur_conn_roi;
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
 	struct msm_display_dsc_info *dsc =
 		&sde_enc->disp_info.comp_info.dsc_info;
+	bool half_panel_partial_update;
+	int i;
 
-	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
-					l_hw_pp == NULL || r_hw_pp == NULL) {
-		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
-		return -EINVAL;
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		hw_pp[i] = sde_enc->hw_pp[i];
+		hw_dsc[i] = sde_enc->hw_dsc[i];
+
+		if (!hw_pp[i] || !hw_dsc[i]) {
+			SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+			return -EINVAL;
+		}
 	}
 
-	pic_width = dsc->pic_width;
-	pic_height = dsc->pic_height;
-	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
+	half_panel_partial_update =
+			hweight_long(params->affected_displays) == 1;
 
-	this_frame_slices = pic_width / dsc->slice_width;
+	dsc_common_mode = 0;
+	if (!half_panel_partial_update)
+		dsc_common_mode |= DSC_MODE_SPLIT_PANEL | DSC_MODE_MULTIPLEX;
+	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+		dsc_common_mode |= DSC_MODE_VIDEO;
+
+	_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
+
+	this_frame_slices = roi->w / dsc->slice_width;
 	intf_ip_w = this_frame_slices * dsc->slice_width;
 	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
 
 	/*
-	 * when using 2 encoders for the same stream, no. of slices
-	 * need to be same on both the encoders.
+	 * dsc merge case: when using 2 encoders for the same stream,
+	 * no. of slices need to be same on both the encoders.
 	 */
 	enc_ip_w = intf_ip_w / 2;
 	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
 
-	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
-
-	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
-	if (enc_master->intf_mode == INTF_MODE_VIDEO)
-		dsc_common_mode |= DSC_MODE_VIDEO;
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(
+			half_panel_partial_update, dsc);
 
 	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
-		pic_width, pic_height, dsc_common_mode);
-	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
-			dsc_common_mode);
+			roi->w, roi->h, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
+			dsc_common_mode, i, params->affected_displays);
 
-	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
-			ich_res);
-	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
-			ich_res);
+	_sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode,
+			ich_res, true);
+	_sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode,
+			ich_res, !half_panel_partial_update);
 
 	return 0;
 }
 
-static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc)
+static int _sde_encoder_update_roi(struct drm_encoder *drm_enc)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct drm_connector *drm_conn;
+	struct drm_display_mode *adj_mode;
+	struct sde_rect roi;
+
+	if (!drm_enc || !drm_enc->crtc || !drm_enc->crtc->state)
+		return -EINVAL;
+	sde_enc = to_sde_encoder_virt(drm_enc);
+
+	if (!sde_enc->cur_master)
+		return -EINVAL;
+
+	adj_mode = &sde_enc->base.crtc->state->adjusted_mode;
+	drm_conn = sde_enc->cur_master->connector;
+
+	_sde_encoder_get_connector_roi(sde_enc, &roi);
+	if (sde_kms_rect_is_null(&roi)) {
+		roi.w = adj_mode->hdisplay;
+		roi.h = adj_mode->vdisplay;
+	}
+
+	memcpy(&sde_enc->prv_conn_roi, &sde_enc->cur_conn_roi,
+			sizeof(sde_enc->prv_conn_roi));
+	memcpy(&sde_enc->cur_conn_roi, &roi, sizeof(sde_enc->cur_conn_roi));
+
+	return 0;
+}
+
+static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc,
+		struct sde_encoder_kickoff_params *params)
 {
 	enum sde_rm_topology_name topology;
-	struct drm_connector *drm_conn = sde_enc->phys_encs[0]->connector;
+	struct drm_connector *drm_conn;
 	int ret = 0;
 
+	if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
+			!sde_enc->phys_encs[0]->connector)
+		return -EINVAL;
+
+	drm_conn = sde_enc->phys_encs[0]->connector;
+
 	topology = sde_connector_get_topology_name(drm_conn);
 	if (topology == SDE_RM_TOPOLOGY_NONE) {
 		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
@@ -743,15 +895,19 @@
 	SDE_DEBUG_ENC(sde_enc, "\n");
 	SDE_EVT32(DRMID(&sde_enc->base));
 
+	if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
+			&sde_enc->prv_conn_roi))
+		return ret;
+
 	switch (topology) {
 	case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
 		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
-		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
+		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc, params);
 		break;
 	case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
-		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
+		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc, params);
 		break;
 	default:
 		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
@@ -1217,7 +1373,6 @@
 	struct sde_kms *sde_kms;
 	struct sde_hw_mdp *hw_mdptop;
 	int i = 0;
-	int ret = 0;
 	struct sde_watchdog_te_status te_cfg = { 0 };
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
@@ -1252,12 +1407,6 @@
 				sde_enc->cur_master->hw_mdptop,
 				sde_kms->catalog);
 
-	if (_sde_is_dsc_enabled(sde_enc)) {
-		ret = _sde_encoder_dsc_setup(sde_enc);
-		if (ret)
-			SDE_ERROR_ENC(sde_enc, "failed to setup DSC:%d\n", ret);
-	}
-
 	if (hw_mdptop->ops.setup_vsync_sel) {
 		for (i = 0; i < sde_enc->num_phys_encs; i++)
 			te_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
@@ -1308,6 +1457,7 @@
 	SDE_EVT32(DRMID(drm_enc));
 
 	sde_enc->cur_master = NULL;
+	sde_enc->disable_inprogress = false;
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
@@ -1366,6 +1516,7 @@
 
 	priv = drm_enc->dev->dev_private;
 	sde_kms = to_sde_kms(priv->kms);
+	sde_enc->disable_inprogress = true;
 
 	SDE_EVT32(DRMID(drm_enc));
 
@@ -1432,6 +1583,7 @@
 	if (!drm_enc || !phy_enc)
 		return;
 
+	SDE_ATRACE_BEGIN("encoder_vblank_callback");
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
 	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
@@ -1440,6 +1592,7 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 
 	atomic_inc(&phy_enc->vsync_cnt);
+	SDE_ATRACE_END("encoder_vblank_callback");
 }
 
 static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
@@ -1448,8 +1601,10 @@
 	if (!phy_enc)
 		return;
 
+	SDE_ATRACE_BEGIN("encoder_underrun_callback");
 	atomic_inc(&phy_enc->underrun_cnt);
 	SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+	SDE_ATRACE_END("encoder_underrun_callback");
 }
 
 void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
@@ -1516,7 +1671,6 @@
 	for (i = 0; i < sde_enc->num_phys_encs; i++)
 		if (sde_enc->phys_encs[i] == ready_phys) {
 			clear_bit(i, sde_enc->frame_busy_mask);
-			sde_enc->crtc_frame_event |= event;
 			SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
 					sde_enc->frame_busy_mask[0]);
 		}
@@ -1528,10 +1682,12 @@
 		sde_encoder_resource_control(drm_enc,
 				SDE_ENC_RC_EVENT_FRAME_DONE);
 
+		if (sde_enc->disable_inprogress)
+			event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
+
 		if (sde_enc->crtc_frame_event_cb)
 			sde_enc->crtc_frame_event_cb(
-					sde_enc->crtc_frame_event_cb_data,
-					sde_enc->crtc_frame_event);
+				sde_enc->crtc_frame_event_cb_data, event);
 	}
 }
 
@@ -1713,7 +1869,6 @@
 	}
 
 	pending_flush = 0x0;
-	sde_enc->crtc_frame_event = 0;
 
 	/* update pending counts and trigger kickoff ctl flush atomically */
 	spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
@@ -1763,6 +1918,65 @@
 	spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
 }
 
+static void _sde_encoder_ppsplit_swap_intf_for_right_only_update(
+		struct drm_encoder *drm_enc,
+		unsigned long *affected_displays,
+		int num_active_phys)
+{
+	struct sde_encoder_virt *sde_enc;
+	struct sde_encoder_phys *master;
+	enum sde_rm_topology_name topology;
+	bool is_right_only;
+
+	if (!drm_enc || !affected_displays)
+		return;
+
+	sde_enc = to_sde_encoder_virt(drm_enc);
+	master = sde_enc->cur_master;
+	if (!master || !master->connector)
+		return;
+
+	topology = sde_connector_get_topology_name(master->connector);
+	if (topology != SDE_RM_TOPOLOGY_PPSPLIT)
+		return;
+
+	/*
+	 * For pingpong split, the slave pingpong won't generate IRQs. For
+	 * right-only updates, we can't swap pingpongs, or simply swap the
+	 * master/slave assignment, we actually have to swap the interfaces
+	 * so that the master physical encoder will use a pingpong/interface
+	 * that generates irqs on which to wait.
+	 */
+	is_right_only = !test_bit(0, affected_displays) &&
+			test_bit(1, affected_displays);
+
+	if (is_right_only && !sde_enc->intfs_swapped) {
+		/* right-only update swap interfaces */
+		swap(sde_enc->phys_encs[0]->intf_idx,
+				sde_enc->phys_encs[1]->intf_idx);
+		sde_enc->intfs_swapped = true;
+	} else if (!is_right_only && sde_enc->intfs_swapped) {
+		/* left-only or full update, swap back */
+		swap(sde_enc->phys_encs[0]->intf_idx,
+				sde_enc->phys_encs[1]->intf_idx);
+		sde_enc->intfs_swapped = false;
+	}
+
+	SDE_DEBUG_ENC(sde_enc,
+			"right_only %d swapped %d phys0->intf%d, phys1->intf%d\n",
+			is_right_only, sde_enc->intfs_swapped,
+			sde_enc->phys_encs[0]->intf_idx - INTF_0,
+			sde_enc->phys_encs[1]->intf_idx - INTF_0);
+	SDE_EVT32(DRMID(drm_enc), is_right_only, sde_enc->intfs_swapped,
+			sde_enc->phys_encs[0]->intf_idx - INTF_0,
+			sde_enc->phys_encs[1]->intf_idx - INTF_0,
+			*affected_displays);
+
+	/* ppsplit always uses master since ppslave invalid for irqs*/
+	if (num_active_phys == 1)
+		*affected_displays = BIT(0);
+}
+
 static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
 		struct sde_encoder_kickoff_params *params)
 {
@@ -1785,6 +1999,10 @@
 	SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
 			params->affected_displays, num_active_phys);
 
+	/* for left/right only update, ppsplit master switches interface */
+	_sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
+			&params->affected_displays, num_active_phys);
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		enum sde_enc_split_role prv_role, new_role;
 		bool active;
@@ -1814,6 +2032,9 @@
 		SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
 				phys->hw_pp->idx - PINGPONG_0, prv_role,
 				phys->split_role, active);
+		SDE_EVT32(DRMID(drm_enc), params->affected_displays,
+				phys->hw_pp->idx - PINGPONG_0, prv_role,
+				phys->split_role, active, num_active_phys);
 	}
 }
 
@@ -1892,6 +2113,8 @@
 
 	_sde_encoder_update_master(drm_enc, params);
 
+	_sde_encoder_update_roi(drm_enc);
+
 	if (sde_enc->cur_master && sde_enc->cur_master->connector) {
 		rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
 		if (rc)
@@ -1899,6 +2122,12 @@
 					sde_enc->cur_master->connector->base.id,
 					rc);
 	}
+
+	if (sde_encoder_is_dsc_enabled(drm_enc)) {
+		rc = _sde_encoder_dsc_setup(sde_enc, params);
+		if (rc)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
+	}
 }
 
 void sde_encoder_kickoff(struct drm_encoder *drm_enc)
@@ -1911,6 +2140,7 @@
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
+	SDE_ATRACE_BEGIN("encoder_kickoff");
 	sde_enc = to_sde_encoder_virt(drm_enc);
 
 	SDE_DEBUG_ENC(sde_enc, "\n");
@@ -1930,6 +2160,7 @@
 		if (phys && phys->ops.handle_post_kickoff)
 			phys->ops.handle_post_kickoff(phys);
 	}
+	SDE_ATRACE_END("encoder_kickoff");
 }
 
 int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
@@ -2357,6 +2588,9 @@
 	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
 		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
 		intf_type = INTF_HDMI;
+	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		*drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+		intf_type = INTF_DP;
 	} else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
 		*drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
 		intf_type = INTF_WB;
@@ -2454,6 +2688,7 @@
 	struct drm_encoder *drm_enc = (struct drm_encoder *) data;
 	struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
 	struct msm_drm_private *priv;
+	u32 event;
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
 		SDE_ERROR("invalid parameters\n");
@@ -2471,13 +2706,14 @@
 		return;
 	}
 
-	SDE_EVT32(DRMID(drm_enc), 2, sde_enc->crtc_frame_event);
-	SDE_ERROR_ENC(sde_enc, "frame done timeout, frame_event %d\n",
-			sde_enc->crtc_frame_event);
+	SDE_ERROR_ENC(sde_enc, "frame done timeout\n");
 
-	sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data,
-			sde_enc->crtc_frame_event |
-			SDE_ENCODER_FRAME_EVENT_ERROR);
+	event =	SDE_ENCODER_FRAME_EVENT_ERROR;
+	if (sde_enc->disable_inprogress)
+		event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE;
+
+	SDE_EVT32(DRMID(drm_enc), event);
+	sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event);
 }
 
 static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index 7292a12..d3a9bb4 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -27,6 +27,7 @@
 #define SDE_ENCODER_FRAME_EVENT_DONE		BIT(0)
 #define SDE_ENCODER_FRAME_EVENT_ERROR		BIT(1)
 #define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD	BIT(2)
+#define SDE_ENCODER_FRAME_EVENT_DURING_DISABLE	BIT(3)
 
 /**
  * Encoder functions and data types
@@ -149,6 +150,20 @@
 void sde_encoder_virt_restore(struct drm_encoder *encoder);
 
 /**
+ * sde_encoder_is_dsc_enabled - check if encoder is in DSC mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if encoder is in DSC mode
+ */
+bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc);
+
+/**
+ * sde_encoder_is_dsc_merge - check if encoder is in DSC merge mode
+ * @drm_enc: Pointer to drm encoder object
+ * @Return: true if encoder is in DSC merge mode
+ */
+bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc);
+
+/**
  * sde_encoder_init - initialize virtual encoder object
  * @dev:        Pointer to drm device structure
  * @disp_info:  Pointer to display information structure
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 3d6dc32..c2ef28d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -294,6 +294,7 @@
  * @bypass_irqreg:	Bypass irq register/unregister if non-zero
  * @wbdone_complete:	for wbdone irq synchronization
  * @wb_cfg:		Writeback hardware configuration
+ * @cdp_cfg:		Writeback CDP configuration
  * @intf_cfg:		Interface hardware configuration
  * @wb_roi:		Writeback region-of-interest
  * @wb_fmt:		Writeback pixel format
@@ -315,6 +316,7 @@
 	u32 bypass_irqreg;
 	struct completion wbdone_complete;
 	struct sde_hw_wb_cfg wb_cfg;
+	struct sde_hw_wb_cdp_cfg cdp_cfg;
 	struct sde_hw_intf_cfg intf_cfg;
 	struct sde_rect wb_roi;
 	const struct sde_format *wb_fmt;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 572bd9e..7adab09 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -615,7 +615,8 @@
 			phys_enc->hw_pp->idx - PINGPONG_0);
 	drm_mode_debug_printmodeline(&phys_enc->cached_mode);
 
-	_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
+	if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+		_sde_encoder_phys_cmd_update_intf_cfg(phys_enc);
 	sde_encoder_phys_cmd_tearcheck_config(phys_enc);
 }
 
@@ -699,6 +700,8 @@
 		}
 	}
 
+	if (phys_enc->hw_pp->ops.enable_tearcheck)
+		phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
@@ -832,15 +835,28 @@
 		struct sde_encoder_phys *phys_enc,
 		enum sde_enc_split_role role)
 {
-	struct sde_encoder_phys_cmd *cmd_enc =
-		to_sde_encoder_phys_cmd(phys_enc);
-	enum sde_enc_split_role old_role = phys_enc->split_role;
+	struct sde_encoder_phys_cmd *cmd_enc;
+	enum sde_enc_split_role old_role;
+	bool is_ppsplit;
+
+	if (!phys_enc)
+		return;
+
+	cmd_enc = to_sde_encoder_phys_cmd(phys_enc);
+	old_role = phys_enc->split_role;
+	is_ppsplit = _sde_encoder_phys_is_ppsplit(phys_enc);
+
+	phys_enc->split_role = role;
 
 	SDE_DEBUG_CMDENC(cmd_enc, "old role %d new role %d\n",
 			old_role, role);
 
-	phys_enc->split_role = role;
-	if (role == ENC_ROLE_SKIP || role == old_role)
+	/*
+	 * ppsplit solo needs to reprogram because intf may have swapped without
+	 * role changing on left-only, right-only back-to-back commits
+	 */
+	if (!(is_ppsplit && role == ENC_ROLE_SOLO) &&
+			(role == old_role || role == ENC_ROLE_SKIP))
 		return;
 
 	sde_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 5cb84b4..488f5c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -222,7 +222,7 @@
  * @rot_fetch_lines: number of line to prefill, or 0 to disable
  */
 static void programmable_rot_fetch_config(struct sde_encoder_phys *phys_enc,
-		u64 rot_fetch_lines)
+		u32 rot_fetch_lines)
 {
 	struct sde_encoder_phys_vid *vid_enc =
 		to_sde_encoder_phys_vid(phys_enc);
@@ -232,9 +232,12 @@
 	u32 horiz_total = 0;
 	u32 vert_total = 0;
 	u32 rot_fetch_start_vsync_counter = 0;
+	u32 flush_mask = 0;
 	unsigned long lock_flags;
 
-	if (!phys_enc || !vid_enc->hw_intf ||
+	if (!phys_enc || !vid_enc->hw_intf || !phys_enc->hw_ctl ||
+			!phys_enc->hw_ctl->ops.get_bitmask_intf ||
+			!phys_enc->hw_ctl->ops.update_pending_flush ||
 			!vid_enc->hw_intf->ops.setup_rot_start)
 		return;
 
@@ -253,9 +256,14 @@
 	}
 
 	SDE_DEBUG_VIDENC(vid_enc,
-		"rot_fetch_lines %llu rot_fetch_start_vsync_counter %u\n",
+		"rot_fetch_lines %u rot_fetch_start_vsync_counter %u\n",
 		rot_fetch_lines, rot_fetch_start_vsync_counter);
 
+	phys_enc->hw_ctl->ops.get_bitmask_intf(
+			phys_enc->hw_ctl, &flush_mask, vid_enc->hw_intf->idx);
+	phys_enc->hw_ctl->ops.update_pending_flush(
+			phys_enc->hw_ctl, flush_mask);
+
 	spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
 	vid_enc->hw_intf->ops.setup_rot_start(vid_enc->hw_intf, &f);
 	spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 28a2b16..1657b9b 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -21,6 +21,7 @@
 #include "sde_core_irq.h"
 #include "sde_wb.h"
 #include "sde_vbif.h"
+#include "sde_crtc.h"
 
 #define to_sde_encoder_phys_wb(x) \
 	container_of(x, struct sde_encoder_phys_wb, base)
@@ -104,6 +105,48 @@
 }
 
 /**
+ * sde_encoder_phys_wb_set_qos_remap - set QoS remapper for writeback
+ * @phys_enc:	Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_qos_remap(
+		struct sde_encoder_phys *phys_enc)
+{
+	struct sde_encoder_phys_wb *wb_enc;
+	struct sde_hw_wb *hw_wb;
+	struct drm_crtc *crtc;
+	struct sde_vbif_set_qos_params qos_params;
+
+	if (!phys_enc || !phys_enc->parent || !phys_enc->parent->crtc) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	wb_enc = to_sde_encoder_phys_wb(phys_enc);
+	crtc = phys_enc->parent->crtc;
+
+	if (!wb_enc->hw_wb || !wb_enc->hw_wb->caps) {
+		SDE_ERROR("invalid writeback hardware\n");
+		return;
+	}
+
+	hw_wb = wb_enc->hw_wb;
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = hw_wb->caps->vbif_idx;
+	qos_params.xin_id = hw_wb->caps->xin_id;
+	qos_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+	qos_params.num = hw_wb->idx - WB_0;
+	qos_params.is_rt = sde_crtc_get_client_type(crtc) != NRT_CLIENT;
+
+	SDE_DEBUG("[qos_remap] wb:%d vbif:%d xin:%d rt:%d\n",
+			qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt);
+
+	sde_vbif_set_qos_remap(phys_enc->sde_kms, &qos_params);
+}
+
+/**
  * sde_encoder_phys_setup_cdm - setup chroma down block
  * @phys_enc:	Pointer to physical encoder
  * @fb:		Pointer to output framebuffer
@@ -205,16 +248,18 @@
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb;
 	struct sde_hw_wb_cfg *wb_cfg;
+	struct sde_hw_wb_cdp_cfg *cdp_cfg;
 	const struct msm_format *format;
 	int ret, mmu_id;
 
-	if (!phys_enc) {
+	if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) {
 		SDE_ERROR("invalid encoder\n");
 		return;
 	}
 
 	hw_wb = wb_enc->hw_wb;
 	wb_cfg = &wb_enc->wb_cfg;
+	cdp_cfg = &wb_enc->cdp_cfg;
 	memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
 
 	wb_cfg->intf_mode = phys_enc->intf_mode;
@@ -282,6 +327,21 @@
 	if (hw_wb->ops.setup_outformat)
 		hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
 
+	if (hw_wb->ops.setup_cdp) {
+		memset(cdp_cfg, 0, sizeof(struct sde_hw_wb_cdp_cfg));
+
+		cdp_cfg->enable = phys_enc->sde_kms->catalog->perf.cdp_cfg
+				[SDE_PERF_CDP_USAGE_NRT].wr_enable;
+		cdp_cfg->ubwc_meta_enable =
+				SDE_FORMAT_IS_UBWC(wb_cfg->dest.format);
+		cdp_cfg->tile_amortize_enable =
+				SDE_FORMAT_IS_UBWC(wb_cfg->dest.format) ||
+				SDE_FORMAT_IS_TILE(wb_cfg->dest.format);
+		cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+		hw_wb->ops.setup_cdp(hw_wb, cdp_cfg);
+	}
+
 	if (hw_wb->ops.setup_outaddress)
 		hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
 }
@@ -528,6 +588,8 @@
 
 	sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
 
+	sde_encoder_phys_wb_set_qos_remap(phys_enc);
+
 	sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
 
 	sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 1faa46e2..eb62716 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -79,7 +79,6 @@
 /* maximum XIN halt timeout in usec */
 #define VBIF_XIN_HALT_TIMEOUT		0x4000
 
-#define DEFAULT_CREQ_LUT_NRT		0x0
 #define DEFAULT_PIXEL_RAM_SIZE		(50 * 1024)
 
 /* access property value based on prop_type and hardware index */
@@ -137,7 +136,6 @@
 	QSEED_TYPE,
 	CSC_TYPE,
 	PANIC_PER_PIPE,
-	CDP,
 	SRC_SPLIT,
 	DIM_LAYER,
 	SMART_DMA_REV,
@@ -160,6 +158,13 @@
 	PERF_DOWNSCALING_PREFILL_LINES,
 	PERF_XTRA_PREFILL_LINES,
 	PERF_AMORTIZABLE_THRESHOLD,
+	PERF_DANGER_LUT,
+	PERF_SAFE_LUT,
+	PERF_QOS_LUT_LINEAR,
+	PERF_QOS_LUT_MACROTILE,
+	PERF_QOS_LUT_NRT,
+	PERF_QOS_LUT_CWB,
+	PERF_CDP_SETTING,
 	PERF_PROP_MAX,
 };
 
@@ -170,8 +175,6 @@
 	SSPP_XIN,
 	SSPP_CLK_CTRL,
 	SSPP_CLK_STATUS,
-	SSPP_DANGER,
-	SSPP_SAFE,
 	SSPP_SCALE_SIZE,
 	SSPP_VIG_BLOCKS,
 	SSPP_RGB_BLOCKS,
@@ -285,6 +288,10 @@
 	VBIF_DEFAULT_OT_WR_LIMIT,
 	VBIF_DYNAMIC_OT_RD_LIMIT,
 	VBIF_DYNAMIC_OT_WR_LIMIT,
+	VBIF_QOS_RT_REMAP,
+	VBIF_QOS_NRT_REMAP,
+	VBIF_MEMTYPE_0,
+	VBIF_MEMTYPE_1,
 	VBIF_PROP_MAX,
 };
 
@@ -344,7 +351,6 @@
 	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
 	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
 	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
-	{CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
 	{SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
 	{DIM_LAYER, "qcom,sde-has-dim-layer", false, PROP_TYPE_BOOL},
 	{SMART_DMA_REV, "qcom,sde-smart-dma-rev", false, PROP_TYPE_STRING},
@@ -376,6 +382,18 @@
 			false, PROP_TYPE_U32},
 	{PERF_AMORTIZABLE_THRESHOLD, "qcom,sde-amortizable-threshold",
 			false, PROP_TYPE_U32},
+	{PERF_DANGER_LUT, "qcom,sde-danger-lut", false, PROP_TYPE_U32_ARRAY},
+	{PERF_SAFE_LUT, "qcom,sde-safe-lut", false, PROP_TYPE_U32_ARRAY},
+	{PERF_QOS_LUT_LINEAR, "qcom,sde-qos-lut-linear", false,
+			PROP_TYPE_U32_ARRAY},
+	{PERF_QOS_LUT_MACROTILE, "qcom,sde-qos-lut-macrotile", false,
+			PROP_TYPE_U32_ARRAY},
+	{PERF_QOS_LUT_NRT, "qcom,sde-qos-lut-nrt", false,
+			PROP_TYPE_U32_ARRAY},
+	{PERF_QOS_LUT_CWB, "qcom,sde-qos-lut-cwb", false,
+			PROP_TYPE_U32_ARRAY},
+	{PERF_CDP_SETTING, "qcom,sde-cdp-setting", false,
+			PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type sspp_prop[] = {
@@ -387,8 +405,6 @@
 		PROP_TYPE_BIT_OFFSET_ARRAY},
 	{SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
 		PROP_TYPE_BIT_OFFSET_ARRAY},
-	{SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
-	{SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
 	{SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
 	{SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
 	{SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
@@ -512,6 +528,12 @@
 		PROP_TYPE_U32_ARRAY},
 	{VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
 		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_RT_REMAP, "qcom,sde-vbif-qos-rt-remap", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_QOS_NRT_REMAP, "qcom,sde-vbif-qos-nrt-remap", false,
+		PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_0, "qcom,sde-vbif-memtype-0", false, PROP_TYPE_U32_ARRAY},
+	{VBIF_MEMTYPE_1, "qcom,sde-vbif-memtype-1", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
@@ -786,6 +808,8 @@
 	sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
 	sspp->type = SSPP_TYPE_VIG;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
+	if (sde_cfg->vbif_qos_nlvl == 8)
+		set_bit(SDE_SSPP_QOS_8LVL, &sspp->features);
 	(*vig_count)++;
 
 	if (!prop_value)
@@ -878,6 +902,8 @@
 	sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
 	sspp->type = SSPP_TYPE_RGB;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
+	if (sde_cfg->vbif_qos_nlvl == 8)
+		set_bit(SDE_SSPP_QOS_8LVL, &sspp->features);
 	(*rgb_count)++;
 
 	if (!prop_value)
@@ -948,6 +974,8 @@
 			sspp->id - SSPP_VIG0);
 	sspp->type = SSPP_TYPE_DMA;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
+	if (sde_cfg->vbif_qos_nlvl == 8)
+		set_bit(SDE_SSPP_QOS_8LVL, &sspp->features);
 	(*dma_count)++;
 }
 
@@ -964,7 +992,6 @@
 	struct sde_sspp_cfg *sspp;
 	struct sde_sspp_sub_blks *sblk;
 	u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
-	u32 danger_count = 0, safe_count = 0;
 	struct device_node *snp = NULL;
 
 	prop_value = kzalloc(SSPP_PROP_MAX *
@@ -979,16 +1006,6 @@
 	if (rc)
 		goto end;
 
-	rc = _validate_dt_entry(np, &sspp_prop[SSPP_DANGER], 1,
-			&prop_count[SSPP_DANGER], &danger_count);
-	if (rc)
-		goto end;
-
-	rc = _validate_dt_entry(np, &sspp_prop[SSPP_SAFE], 1,
-			&prop_count[SSPP_SAFE], &safe_count);
-	if (rc)
-		goto end;
-
 	rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
 					prop_exists, prop_value);
 	if (rc)
@@ -1049,6 +1066,16 @@
 
 		set_bit(SDE_SSPP_SRC, &sspp->features);
 
+		if (sde_cfg->has_cdp)
+			set_bit(SDE_SSPP_CDP, &sspp->features);
+
+		if (sde_cfg->ts_prefill_rev == 1) {
+			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
+		} else if (sde_cfg->ts_prefill_rev == 2) {
+			set_bit(SDE_SSPP_TS_PREFILL, &sspp->features);
+			set_bit(SDE_SSPP_TS_PREFILL_REC1, &sspp->features);
+		}
+
 		sblk->smart_dma_priority =
 			PROP_VALUE_ACCESS(prop_value, SSPP_SMART_DMA, i);
 
@@ -1086,19 +1113,6 @@
 		sblk->maxvdeciexp = MAX_VERT_DECIMATION;
 
 		sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
-		sblk->danger_lut_linear =
-			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 0);
-		sblk->danger_lut_tile =
-			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 1);
-		sblk->danger_lut_nrt =
-			PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 2);
-		sblk->safe_lut_linear =
-			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 0);
-		sblk->safe_lut_tile =
-			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 1);
-		sblk->safe_lut_nrt =
-			PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 2);
-		sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
 		sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
 		sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
 
@@ -1121,15 +1135,8 @@
 		}
 
 		SDE_DEBUG(
-			"xin:%d danger:%x/%x/%x safe:%x/%x/%x creq:%x ram:%d clk%d:%x/%d\n",
+			"xin:%d ram:%d clk%d:%x/%d\n",
 			sspp->xin_id,
-			sblk->danger_lut_linear,
-			sblk->danger_lut_tile,
-			sblk->danger_lut_nrt,
-			sblk->safe_lut_linear,
-			sblk->safe_lut_tile,
-			sblk->safe_lut_nrt,
-			sblk->creq_lut_nrt,
 			sblk->pixel_ram_size,
 			sspp->clk_ctrl,
 			sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
@@ -1400,6 +1407,8 @@
 			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
 
 		intf->prog_fetch_lines_worst_case =
+				!prop_exists[INTF_PREFETCH] ?
+				sde_cfg->perf.min_prefill_lines :
 				PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
 
 		of_property_read_string_index(np,
@@ -1501,6 +1510,13 @@
 		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
 		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
 
+		if (sde_cfg->has_cdp)
+			set_bit(SDE_WB_CDP, &wb->features);
+
+		set_bit(SDE_WB_QOS, &wb->features);
+		if (sde_cfg->vbif_qos_nlvl == 8)
+			set_bit(SDE_WB_QOS_8LVL, &wb->features);
+
 		if (sde_cfg->has_wb_ubwc)
 			set_bit(SDE_WB_UBWC, &wb->features);
 
@@ -1926,7 +1942,7 @@
 	int rc, prop_count[VBIF_PROP_MAX], i, j, k;
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[VBIF_PROP_MAX];
-	u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+	u32 off_count, vbif_len;
 	struct sde_vbif_cfg *vbif;
 
 	if (!sde_cfg) {
@@ -1948,12 +1964,32 @@
 		goto end;
 
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+			&prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], NULL);
 	if (rc)
 		goto end;
 
 	rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
-			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+			&prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_RT_REMAP], 1,
+			&prop_count[VBIF_QOS_RT_REMAP], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_QOS_NRT_REMAP], 1,
+			&prop_count[VBIF_QOS_NRT_REMAP], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_0], 1,
+			&prop_count[VBIF_MEMTYPE_0], NULL);
+	if (rc)
+		goto end;
+
+	rc = _validate_dt_entry(np, &vbif_prop[VBIF_MEMTYPE_1], 1,
+			&prop_count[VBIF_MEMTYPE_1], NULL);
 	if (rc)
 		goto end;
 
@@ -2048,6 +2084,76 @@
 				vbif->dynamic_ot_rd_tbl.count ||
 				vbif->dynamic_ot_wr_tbl.count)
 			set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+
+		vbif->qos_rt_tbl.npriority_lvl =
+				prop_count[VBIF_QOS_RT_REMAP];
+		SDE_DEBUG("qos_rt_tbl.npriority_lvl=%u\n",
+				vbif->qos_rt_tbl.npriority_lvl);
+		if (vbif->qos_rt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_rt_tbl.priority_lvl = kcalloc(
+				vbif->qos_rt_tbl.npriority_lvl, sizeof(u32),
+				GFP_KERNEL);
+			if (!vbif->qos_rt_tbl.priority_lvl) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		} else if (vbif->qos_rt_tbl.npriority_lvl) {
+			vbif->qos_rt_tbl.npriority_lvl = 0;
+			vbif->qos_rt_tbl.priority_lvl = NULL;
+			SDE_ERROR("invalid qos rt table\n");
+		}
+
+		for (j = 0; j < vbif->qos_rt_tbl.npriority_lvl; j++) {
+			vbif->qos_rt_tbl.priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value,
+						VBIF_QOS_RT_REMAP, j);
+			SDE_DEBUG("lvl[%d]=%u\n", j,
+					vbif->qos_rt_tbl.priority_lvl[j]);
+		}
+
+		vbif->qos_nrt_tbl.npriority_lvl =
+				prop_count[VBIF_QOS_NRT_REMAP];
+		SDE_DEBUG("qos_nrt_tbl.npriority_lvl=%u\n",
+				vbif->qos_nrt_tbl.npriority_lvl);
+
+		if (vbif->qos_nrt_tbl.npriority_lvl == sde_cfg->vbif_qos_nlvl) {
+			vbif->qos_nrt_tbl.priority_lvl = kcalloc(
+				vbif->qos_nrt_tbl.npriority_lvl, sizeof(u32),
+				GFP_KERNEL);
+			if (!vbif->qos_nrt_tbl.priority_lvl) {
+				rc = -ENOMEM;
+				goto end;
+			}
+		} else if (vbif->qos_nrt_tbl.npriority_lvl) {
+			vbif->qos_nrt_tbl.npriority_lvl = 0;
+			vbif->qos_nrt_tbl.priority_lvl = NULL;
+			SDE_ERROR("invalid qos nrt table\n");
+		}
+
+		for (j = 0; j < vbif->qos_nrt_tbl.npriority_lvl; j++) {
+			vbif->qos_nrt_tbl.priority_lvl[j] =
+				PROP_VALUE_ACCESS(prop_value,
+						VBIF_QOS_NRT_REMAP, j);
+			SDE_DEBUG("lvl[%d]=%u\n", j,
+					vbif->qos_nrt_tbl.priority_lvl[j]);
+		}
+
+		if (vbif->qos_rt_tbl.npriority_lvl ||
+				vbif->qos_nrt_tbl.npriority_lvl)
+			set_bit(SDE_VBIF_QOS_REMAP, &vbif->features);
+
+		vbif->memtype_count = prop_count[VBIF_MEMTYPE_0] +
+					prop_count[VBIF_MEMTYPE_1];
+		if (vbif->memtype_count > MAX_XIN_COUNT) {
+			vbif->memtype_count = 0;
+			SDE_ERROR("too many memtype defs, ignoring entries\n");
+		}
+		for (j = 0, k = 0; j < prop_count[VBIF_MEMTYPE_0]; j++)
+			vbif->memtype[k++] = PROP_VALUE_ACCESS(
+					prop_value, VBIF_MEMTYPE_0, j);
+		for (j = 0; j < prop_count[VBIF_MEMTYPE_1]; j++)
+			vbif->memtype[k++] = PROP_VALUE_ACCESS(
+					prop_value, VBIF_MEMTYPE_1, j);
 	}
 
 end:
@@ -2300,6 +2406,7 @@
 	struct sde_prop_value *prop_value = NULL;
 	bool prop_exists[PERF_PROP_MAX];
 	const char *str = NULL;
+	int j, k;
 
 	if (!cfg) {
 		SDE_ERROR("invalid argument\n");
@@ -2319,6 +2426,41 @@
 	if (rc)
 		goto freeprop;
 
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_DANGER_LUT], 1,
+			&prop_count[PERF_DANGER_LUT], NULL);
+	if (rc)
+		goto freeprop;
+
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_SAFE_LUT], 1,
+			&prop_count[PERF_SAFE_LUT], NULL);
+	if (rc)
+		goto freeprop;
+
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_LINEAR], 1,
+			&prop_count[PERF_QOS_LUT_LINEAR], NULL);
+	if (rc)
+		goto freeprop;
+
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_MACROTILE], 1,
+			&prop_count[PERF_QOS_LUT_MACROTILE], NULL);
+	if (rc)
+		goto freeprop;
+
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_NRT], 1,
+			&prop_count[PERF_QOS_LUT_NRT], NULL);
+	if (rc)
+		goto freeprop;
+
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_QOS_LUT_CWB], 1,
+			&prop_count[PERF_QOS_LUT_CWB], NULL);
+	if (rc)
+		goto freeprop;
+
+	rc = _validate_dt_entry(np, &sde_perf_prop[PERF_CDP_SETTING], 1,
+			&prop_count[PERF_CDP_SETTING], NULL);
+	if (rc)
+		goto freeprop;
+
 	rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
 			prop_count, prop_exists, prop_value);
 	if (rc)
@@ -2392,6 +2534,93 @@
 					PERF_AMORTIZABLE_THRESHOLD, 0) :
 			DEFAULT_AMORTIZABLE_THRESHOLD;
 
+	if (prop_exists[PERF_DANGER_LUT] && prop_count[PERF_DANGER_LUT] <=
+			SDE_QOS_LUT_USAGE_MAX) {
+		for (j = 0; j < prop_count[PERF_DANGER_LUT]; j++) {
+			cfg->perf.danger_lut_tbl[j] =
+					PROP_VALUE_ACCESS(prop_value,
+						PERF_DANGER_LUT, j);
+			SDE_DEBUG("danger usage:%d lut:0x%x\n",
+					j, cfg->perf.danger_lut_tbl[j]);
+		}
+	}
+
+	if (prop_exists[PERF_SAFE_LUT] && prop_count[PERF_SAFE_LUT] <=
+			SDE_QOS_LUT_USAGE_MAX) {
+		for (j = 0; j < prop_count[PERF_SAFE_LUT]; j++) {
+			cfg->perf.safe_lut_tbl[j] =
+					PROP_VALUE_ACCESS(prop_value,
+						PERF_SAFE_LUT, j);
+			SDE_DEBUG("safe usage:%d lut:0x%x\n",
+					j, cfg->perf.safe_lut_tbl[j]);
+		}
+	}
+
+	for (j = 0; j < SDE_QOS_LUT_USAGE_MAX; j++) {
+		static const u32 prop_key[SDE_QOS_LUT_USAGE_MAX] = {
+			[SDE_QOS_LUT_USAGE_LINEAR] =
+					PERF_QOS_LUT_LINEAR,
+			[SDE_QOS_LUT_USAGE_MACROTILE] =
+					PERF_QOS_LUT_MACROTILE,
+			[SDE_QOS_LUT_USAGE_NRT] =
+					PERF_QOS_LUT_NRT,
+			[SDE_QOS_LUT_USAGE_CWB] =
+					PERF_QOS_LUT_CWB,
+		};
+		const u32 entry_size = 3;
+		int m, count;
+		int key = prop_key[j];
+
+		if (!prop_exists[key])
+			continue;
+
+		count = prop_count[key] / entry_size;
+
+		cfg->perf.qos_lut_tbl[j].entries = kcalloc(count,
+			sizeof(struct sde_qos_lut_entry), GFP_KERNEL);
+		if (!cfg->perf.qos_lut_tbl[j].entries) {
+			rc = -ENOMEM;
+			goto end;
+		}
+
+		for (k = 0, m = 0; k < count; k++, m += entry_size) {
+			u64 lut_hi, lut_lo;
+
+			cfg->perf.qos_lut_tbl[j].entries[k].fl =
+					PROP_VALUE_ACCESS(prop_value, key, m);
+			lut_hi = PROP_VALUE_ACCESS(prop_value, key, m + 1);
+			lut_lo = PROP_VALUE_ACCESS(prop_value, key, m + 2);
+			cfg->perf.qos_lut_tbl[j].entries[k].lut =
+					(lut_hi << 32) | lut_lo;
+			SDE_DEBUG("usage:%d.%d fl:%d lut:0x%llx\n",
+				j, k,
+				cfg->perf.qos_lut_tbl[j].entries[k].fl,
+				cfg->perf.qos_lut_tbl[j].entries[k].lut);
+		}
+		cfg->perf.qos_lut_tbl[j].nentry = count;
+	}
+
+	if (prop_exists[PERF_CDP_SETTING]) {
+		const u32 prop_size = 2;
+		u32 count = prop_count[PERF_CDP_SETTING] / prop_size;
+
+		count = min_t(u32, count, SDE_PERF_CDP_USAGE_MAX);
+
+		for (j = 0; j < count; j++) {
+			cfg->perf.cdp_cfg[j].rd_enable =
+					PROP_VALUE_ACCESS(prop_value,
+					PERF_CDP_SETTING, j * prop_size);
+			cfg->perf.cdp_cfg[j].wr_enable =
+					PROP_VALUE_ACCESS(prop_value,
+					PERF_CDP_SETTING, j * prop_size + 1);
+			SDE_DEBUG("cdp usage:%d rd:%d wr:%d\n",
+				j, cfg->perf.cdp_cfg[j].rd_enable,
+				cfg->perf.cdp_cfg[j].wr_enable);
+		}
+
+		cfg->has_cdp = true;
+	}
+
 freeprop:
 	kfree(prop_value);
 end:
@@ -2510,11 +2739,17 @@
 		/* update msm8998 target here */
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 25;
+		sde_cfg->vbif_qos_nlvl = 4;
+		sde_cfg->ts_prefill_rev = 1;
+		sde_cfg->perf.min_prefill_lines = 25;
 		break;
 	case SDE_HW_VER_400:
-		/* update msm8998 and sdm845 target here */
+		/* update sdm845 target here */
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->perf.min_prefill_lines = 24;
 		break;
 	default:
 		sde_cfg->perf.min_prefill_lines = 0xffff;
@@ -2549,8 +2784,13 @@
 	for (i = 0; i < sde_cfg->vbif_count; i++) {
 		kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
 		kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+		kfree(sde_cfg->vbif[i].qos_rt_tbl.priority_lvl);
+		kfree(sde_cfg->vbif[i].qos_nrt_tbl.priority_lvl);
 	}
 
+	for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++)
+		kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
+
 	kfree(sde_cfg->dma_formats);
 	kfree(sde_cfg->cursor_formats);
 	kfree(sde_cfg->vig_formats);
@@ -2582,6 +2822,10 @@
 	if (rc)
 		goto end;
 
+	rc = sde_perf_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = sde_rot_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
@@ -2632,10 +2876,6 @@
 	if (rc)
 		goto end;
 
-	rc = sde_perf_parse_dt(np, sde_cfg);
-	if (rc)
-		goto end;
-
 	return sde_cfg;
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index cfb1b67..beff43c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -60,6 +60,8 @@
 #define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
 #define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
 
+#define MAX_XIN_COUNT 16
+
 /**
  * Supported UBWC feature versions
  */
@@ -79,7 +81,6 @@
  * @SDE_MDP_UBWC_1_0,      This chipsets supports Universal Bandwidth
  *                         compression initial revision
  * @SDE_MDP_UBWC_1_5,      Universal Bandwidth compression version 1.5
- * @SDE_MDP_CDP,           Client driven prefetch
  * @SDE_MDP_MAX            Maximum value
 
  */
@@ -89,7 +90,6 @@
 	SDE_MDP_BWC,
 	SDE_MDP_UBWC_1_0,
 	SDE_MDP_UBWC_1_5,
-	SDE_MDP_CDP,
 	SDE_MDP_MAX
 };
 
@@ -107,10 +107,14 @@
  * @SDE_SSPP_PCC,            Color correction support
  * @SDE_SSPP_CURSOR,         SSPP can be used as a cursor layer
  * @SDE_SSPP_QOS,            SSPP support QoS control, danger/safe/creq
+ * @SDE_SSPP_QOS_8LVL,       SSPP support 8-level QoS control
  * @SDE_SSPP_EXCL_RECT,      SSPP supports exclusion rect
  * @SDE_SSPP_SMART_DMA_V1,   SmartDMA 1.0 support
  * @SDE_SSPP_SMART_DMA_V2,   SmartDMA 2.0 support
  * @SDE_SSPP_SBUF,           SSPP support inline stream buffer
+ * @SDE_SSPP_TS_PREFILL      Supports prefill with traffic shaper
+ * @SDE_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @SDE_SSPP_CDP             Supports client driven prefetch
  * @SDE_SSPP_MAX             maximum value
  */
 enum {
@@ -126,10 +130,14 @@
 	SDE_SSPP_PCC,
 	SDE_SSPP_CURSOR,
 	SDE_SSPP_QOS,
+	SDE_SSPP_QOS_8LVL,
 	SDE_SSPP_EXCL_RECT,
 	SDE_SSPP_SMART_DMA_V1,
 	SDE_SSPP_SMART_DMA_V2,
 	SDE_SSPP_SBUF,
+	SDE_SSPP_TS_PREFILL,
+	SDE_SSPP_TS_PREFILL_REC1,
+	SDE_SSPP_CDP,
 	SDE_SSPP_MAX
 };
 
@@ -237,6 +245,9 @@
  * @SDE_WB_PIPE_ALPHA       Writeback supports pipe alpha
  * @SDE_WB_XY_ROI_OFFSET    Writeback supports x/y-offset of out ROI in
  *                          the destination image
+ * @SDE_WB_QOS,             Writeback supports QoS control, danger/safe/creq
+ * @SDE_WB_QOS_8LVL,        Writeback supports 8-level QoS control
+ * @SDE_WB_CDP              Writeback supports client driven prefetch
  * @SDE_WB_MAX              maximum value
  */
 enum {
@@ -252,16 +263,21 @@
 	SDE_WB_YUV_CONFIG,
 	SDE_WB_PIPE_ALPHA,
 	SDE_WB_XY_ROI_OFFSET,
+	SDE_WB_QOS,
+	SDE_WB_QOS_8LVL,
+	SDE_WB_CDP,
 	SDE_WB_MAX
 };
 
 /**
  * VBIF sub-blocks and features
  * @SDE_VBIF_QOS_OTLIM        VBIF supports OT Limit
+ * @SDE_VBIF_QOS_REMAP        VBIF supports QoS priority remap
  * @SDE_VBIF_MAX              maximum value
  */
 enum {
 	SDE_VBIF_QOS_OTLIM = 0x1,
+	SDE_VBIF_QOS_REMAP,
 	SDE_VBIF_MAX
 };
 
@@ -338,17 +354,41 @@
 };
 
 /**
+ * enum sde_qos_lut_usage - define QoS LUT use cases
+ */
+enum sde_qos_lut_usage {
+	SDE_QOS_LUT_USAGE_LINEAR,
+	SDE_QOS_LUT_USAGE_MACROTILE,
+	SDE_QOS_LUT_USAGE_NRT,
+	SDE_QOS_LUT_USAGE_CWB,
+	SDE_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct sde_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct sde_qos_lut_entry {
+	u32 fl;
+	u64 lut;
+};
+
+/**
+ * struct sde_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct sde_qos_lut_tbl {
+	u32 nentry;
+	struct sde_qos_lut_entry *entries;
+};
+
+/**
  * struct sde_sspp_sub_blks : SSPP sub-blocks
  * @maxdwnscale: max downscale ratio supported(without DECIMATION)
  * @maxupscale:  maxupscale ratio supported
  * @maxwidth:    max pixelwidth supported by this pipe
- * @danger_lut_linear: LUT to generate danger signals for linear format
- * @safe_lut_linear: LUT to generate safe signals for linear format
- * @danger_lut_tile: LUT to generate danger signals for tile format
- * @safe_lut_tile: LUT to generate safe signals for tile format
- * @danger_lut_nrt: LUT to generate danger signals for non-realtime use case
- * @safe_lut_nrt: LUT to generate safe signals for non-realtime use case
- * @creq_lut_nrt: LUT to generate creq signals for non-realtime use case
  * @creq_vblank: creq priority during vertical blanking
  * @danger_vblank: danger priority during vertical blanking
  * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
@@ -365,13 +405,6 @@
  */
 struct sde_sspp_sub_blks {
 	u32 maxlinewidth;
-	u32 danger_lut_linear;
-	u32 safe_lut_linear;
-	u32 danger_lut_tile;
-	u32 safe_lut_tile;
-	u32 danger_lut_nrt;
-	u32 safe_lut_nrt;
-	u32 creq_lut_nrt;
 	u32 creq_vblank;
 	u32 danger_vblank;
 	u32 pixel_ram_size;
@@ -653,6 +686,16 @@
 };
 
 /**
+ * struct sde_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl      num of priority level
+ * @priority_lvl       pointer to array of priority level in ascending order
+ */
+struct sde_vbif_qos_tbl {
+	u32 npriority_lvl;
+	u32 *priority_lvl;
+};
+
+/**
  * struct sde_vbif_cfg - information of VBIF blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -662,6 +705,10 @@
  * @xin_halt_timeout   maximum time (in usec) for xin to halt
  * @dynamic_ot_rd_tbl  dynamic OT read configuration table
  * @dynamic_ot_wr_tbl  dynamic OT write configuration table
+ * @qos_rt_tbl         real-time QoS priority table
+ * @qos_nrt_tbl        non-real-time QoS priority table
+ * @memtype_count      number of defined memtypes
+ * @memtype            array of xin memtype definitions
  */
 struct sde_vbif_cfg {
 	SDE_HW_BLK_INFO;
@@ -670,6 +717,10 @@
 	u32 xin_halt_timeout;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
 	struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+	struct sde_vbif_qos_tbl qos_rt_tbl;
+	struct sde_vbif_qos_tbl qos_nrt_tbl;
+	u32 memtype_count;
+	u32 memtype[MAX_XIN_COUNT];
 };
 /**
  * struct sde_reg_dma_cfg - information of lut dma blocks
@@ -686,6 +737,27 @@
 };
 
 /**
+ * Define CDP use cases
+ * @SDE_PERF_CDP_UDAGE_RT: real-time use cases
+ * @SDE_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+	SDE_PERF_CDP_USAGE_RT,
+	SDE_PERF_CDP_USAGE_NRT,
+	SDE_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct sde_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct sde_perf_cdp_cfg {
+	bool rd_enable;
+	bool wr_enable;
+};
+
+/**
  * struct sde_perf_cfg - performance control settings
  * @max_bw_low         low threshold of maximum bandwidth (kbps)
  * @max_bw_high        high threshold of maximum bandwidth (kbps)
@@ -702,6 +774,10 @@
  * @downscaling_prefill_lines  downscaling latency in lines
  * @amortizable_theshold minimum y position for traffic shaping prefill
  * @min_prefill_lines  minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg            cdp use case configurations
  */
 struct sde_perf_cfg {
 	u32 max_bw_low;
@@ -719,6 +795,10 @@
 	u32 downscaling_prefill_lines;
 	u32 amortizable_threshold;
 	u32 min_prefill_lines;
+	u32 safe_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+	u32 danger_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+	struct sde_qos_lut_tbl qos_lut_tbl[SDE_QOS_LUT_USAGE_MAX];
+	struct sde_perf_cdp_cfg cdp_cfg[SDE_PERF_CDP_USAGE_MAX];
 };
 
 /**
@@ -736,7 +816,7 @@
  * @csc_type           csc or csc_10bit support.
  * @smart_dma_rev      Supported version of SmartDMA feature.
  * @has_src_split      source split feature status
- * @has_cdp            Client driver prefetch feature status
+ * @has_cdp            Client driven prefetch feature status
  * @has_wb_ubwc        UBWC feature supported on WB
  * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @has_sbuf           indicate if stream buffer is available
@@ -746,6 +826,8 @@
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
  * @wb_formats         Supported formats for wb
+ * @vbif_qos_nlvl      number of vbif QoS priority level
+ * @ts_prefill_rev     prefill traffic shaper feature revision
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
@@ -765,6 +847,8 @@
 	bool has_sbuf;
 	u32 sbuf_headroom;
 	bool has_idle_pc;
+	u32 vbif_qos_nlvl;
+	u32 ts_prefill_rev;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 24f16c6..53a48c8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -395,37 +395,32 @@
 		SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 68-71 */
+	/* irq_idx: 72-75 */
 	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
 		SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
 		SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
-	/* irq_idx: 72-75 */
+	/* irq_idx: 76-79 */
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
 		SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 76-79 */
+	/* irq_idx: 80-83 */
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
 		SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 80-83 */
+	/* irq_idx: 84-87 */
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
 		SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
 	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
 		SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
-	/* irq_idx: 84-87 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
 	/* irq_idx: 88-91 */
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
 	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
@@ -986,7 +981,7 @@
 			sde_intr_set[reg_idx].status_off) &
 					sde_irq_map[irq_idx].irq_mask;
 	if (intr_status && clear)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[irq_idx].clr_off,
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
 				intr_status);
 
 	spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 1f17378..be83afe 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -116,7 +116,7 @@
 	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
 	p->hsync_skew - 1;
 
-	if (ctx->cap->type == INTF_EDP) {
+	if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
 		display_v_start += p->hsync_pulse_width + p->h_back_porch;
 		display_v_end -= p->h_front_porch;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 7780c5b..fedc72c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -71,7 +71,7 @@
 	if (stage == SDE_STAGE_BASE)
 		rc = -EINVAL;
 	else if (stage <= sblk->maxblendstages)
-		rc = sblk->blendstage_base[stage - 1];
+		rc = sblk->blendstage_base[stage - SDE_STAGE_0];
 	else
 		rc = -EINVAL;
 
@@ -198,7 +198,7 @@
 {
 	struct sde_hw_blk_reg_map *c = &ctx->hw;
 	int stage_off;
-	u32 val = 0;
+	u32 val = 0, alpha = 0;
 
 	stage_off = _stage_offset(ctx, dim_layer->stage);
 	if (stage_off < 0) {
@@ -206,13 +206,13 @@
 		return;
 	}
 
-	val = (dim_layer->color_fill.color_1 & 0xFFF) << 16 |
-			(dim_layer->color_fill.color_0 & 0xFFF);
+	alpha = dim_layer->color_fill.color_3 & 0xFF;
+	val = ((dim_layer->color_fill.color_1 << 2) & 0xFFF) << 16 |
+			((dim_layer->color_fill.color_0 << 2) & 0xFFF);
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_0 + stage_off, val);
 
-	val = 0;
-	val = (dim_layer->color_fill.color_3 & 0xFFF) << 16 |
-			(dim_layer->color_fill.color_2 & 0xFFF);
+	val = (alpha << 4) << 16 |
+			((dim_layer->color_fill.color_2 << 2) & 0xFFF);
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_COLOR_1 + stage_off, val);
 
 	val = dim_layer->rect.h << 16 | dim_layer->rect.w;
@@ -222,9 +222,14 @@
 	SDE_REG_WRITE(c, LM_FG_COLOR_FILL_XY + stage_off, val);
 
 	val = BIT(16); /* enable dim layer */
+	val |= SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
 	if (dim_layer->flags & SDE_DRM_DIM_LAYER_EXCLUSIVE)
 		val |= BIT(17);
+	else
+		val &= ~BIT(17);
 	SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
+	val = (alpha << 16) | (0xff - alpha);
+	SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, val);
 }
 
 static void sde_hw_lm_setup_misr(struct sde_hw_mixer *ctx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index d15b804..d5f03a6a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -563,6 +563,10 @@
 	case SDE_HW_ROT_CMD_COMMIT:
 		cmd_type = SDE_ROTATOR_INLINE_CMD_COMMIT;
 		break;
+	case SDE_HW_ROT_CMD_START:
+		cmd_type = SDE_ROTATOR_INLINE_CMD_START;
+		priv_handle = data->priv_handle;
+		break;
 	case SDE_HW_ROT_CMD_CLEANUP:
 		cmd_type = SDE_ROTATOR_INLINE_CMD_CLEANUP;
 		priv_handle = data->priv_handle;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index a4f5b49..e490052 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -24,11 +24,13 @@
  * enum sde_hw_rot_cmd_type - type of rotator hardware command
  * @SDE_HW_ROT_CMD_VALDIATE: validate rotator command; do not commit
  * @SDE_HW_ROT_CMD_COMMIT: commit/execute rotator command
+ * @SDE_HW_ROT_CMD_START: mdp is ready to start
  * @SDE_HW_ROT_CMD_CLEANUP: cleanup rotator command after it is done
  */
 enum sde_hw_rot_cmd_type {
 	SDE_HW_ROT_CMD_VALIDATE,
 	SDE_HW_ROT_CMD_COMMIT,
+	SDE_HW_ROT_CMD_START,
 	SDE_HW_ROT_CMD_CLEANUP,
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index a1f5cee..bb9f9c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -70,6 +70,8 @@
 #define SSPP_QOS_CTRL                      0x6C
 #define SSPP_DECIMATION_CONFIG             0xB4
 #define SSPP_SRC_ADDR_SW_STATUS            0x70
+#define SSPP_CREQ_LUT_0                    0x74
+#define SSPP_CREQ_LUT_1                    0x78
 #define SSPP_SW_PIX_EXT_C0_LR              0x100
 #define SSPP_SW_PIX_EXT_C0_TB              0x104
 #define SSPP_SW_PIX_EXT_C0_REQ_PIXELS      0x108
@@ -79,11 +81,17 @@
 #define SSPP_SW_PIX_EXT_C3_LR              0x120
 #define SSPP_SW_PIX_EXT_C3_TB              0x124
 #define SSPP_SW_PIX_EXT_C3_REQ_PIXELS      0x128
+#define SSPP_TRAFFIC_SHAPER                0x130
+#define SSPP_CDP_CNTL                      0x134
 #define SSPP_UBWC_ERROR_STATUS             0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL        0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL   0x154
+#define SSPP_TRAFFIC_SHAPER_REC1           0x158
 #define SSPP_EXCL_REC_SIZE                 0x1B4
 #define SSPP_EXCL_REC_XY                   0x1B8
 #define SSPP_VIG_OP_MODE                   0x0
 #define SSPP_VIG_CSC_10_OP_MODE            0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX        0xFF
 
 /* SSPP_QOS_CTRL */
 #define SSPP_QOS_CTRL_VBLANK_EN            BIT(16)
@@ -186,6 +194,9 @@
 #define VIG_CSC_10_EN          BIT(0)
 #define CSC_10BIT_OFFSET       4
 
+/* traffic shaper clock in Hz */
+#define TS_CLK			19200000
+
 static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
 		int s_id,
 		u32 *idx)
@@ -974,7 +985,13 @@
 	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
 		return;
 
-	SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+	if (ctx->cap && test_bit(SDE_SSPP_QOS_8LVL, &ctx->cap->features)) {
+		SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+		SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+				cfg->creq_lut >> 32);
+	} else {
+		SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+	}
 }
 
 static void sde_hw_sspp_setup_qos_ctrl(struct sde_hw_pipe *ctx,
@@ -1041,6 +1058,75 @@
 	status->rd_ptr[1] = val & 0xffff;
 }
 
+static void sde_hw_sspp_setup_ts_prefill(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_ts_cfg *cfg,
+		enum sde_sspp_multirect_index index)
+{
+	u32 idx;
+	u32 ts_offset, ts_prefill_offset;
+	u32 ts_count = 0, ts_bytes = 0;
+	const struct sde_sspp_cfg *cap;
+
+	if (!ctx || !cfg || !ctx->cap)
+		return;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	cap = ctx->cap;
+
+	if (index == SDE_SSPP_RECT_0 &&
+			test_bit(SDE_SSPP_TS_PREFILL, &cap->features)) {
+		ts_offset = SSPP_TRAFFIC_SHAPER;
+		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_PREFILL;
+	} else if (index == SDE_SSPP_RECT_1 &&
+			test_bit(SDE_SSPP_TS_PREFILL_REC1, &cap->features)) {
+		ts_offset = SSPP_TRAFFIC_SHAPER_REC1;
+		ts_prefill_offset = SSPP_TRAFFIC_SHAPER_REC1_PREFILL;
+	} else {
+		return;
+	}
+
+	if (cfg->time) {
+		ts_bytes = mult_frac(TS_CLK * 1000000ULL, cfg->size,
+				cfg->time);
+		if (ts_bytes > SSPP_TRAFFIC_SHAPER_BPC_MAX)
+			ts_bytes = SSPP_TRAFFIC_SHAPER_BPC_MAX;
+	}
+
+	if (ts_bytes) {
+		ts_count = DIV_ROUND_UP_ULL(cfg->size, ts_bytes);
+		ts_bytes |= BIT(31) | BIT(27);
+	}
+
+	SDE_REG_WRITE(&ctx->hw, ts_offset, ts_bytes);
+	SDE_REG_WRITE(&ctx->hw, ts_prefill_offset, ts_count);
+}
+
+static void sde_hw_sspp_setup_cdp(struct sde_hw_pipe *ctx,
+		struct sde_hw_pipe_cdp_cfg *cfg)
+{
+	u32 idx;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+		return;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->tile_amortize_enable)
+		cdp_cntl |= BIT(2);
+	if (cfg->preload_ahead == SDE_SSPP_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	SDE_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
 static void _setup_layer_ops(struct sde_hw_pipe *c,
 		unsigned long features)
 {
@@ -1062,6 +1148,9 @@
 		c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
 	}
 
+	if (test_bit(SDE_SSPP_TS_PREFILL, &features))
+		c->ops.setup_ts_prefill = sde_hw_sspp_setup_ts_prefill;
+
 	if (test_bit(SDE_SSPP_CSC, &features) ||
 		test_bit(SDE_SSPP_CSC_10BIT, &features))
 		c->ops.setup_csc = sde_hw_sspp_setup_csc;
@@ -1099,6 +1188,9 @@
 		c->ops.setup_sys_cache = sde_hw_sspp_setup_sys_cache;
 		c->ops.get_sbuf_status = sde_hw_sspp_get_sbuf_status;
 	}
+
+	if (test_bit(SDE_SSPP_CDP, &features))
+		c->ops.setup_cdp = sde_hw_sspp_setup_cdp;
 }
 
 static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index 1b81e54..d52c0e5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -300,7 +300,7 @@
 struct sde_hw_pipe_qos_cfg {
 	u32 danger_lut;
 	u32 safe_lut;
-	u32 creq_lut;
+	u64 creq_lut;
 	u32 creq_vblank;
 	u32 danger_vblank;
 	bool vblank_en;
@@ -308,6 +308,30 @@
 };
 
 /**
+ * enum CDP preload ahead address size
+ */
+enum {
+	SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+	SDE_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	SDE_SSPP_CDP_PRELOAD_AHEAD_32,
+ *	SDE_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_pipe_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
  * enum system cache rotation operation mode
  */
 enum {
@@ -343,6 +367,16 @@
 };
 
 /**
+ * struct sde_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct sde_hw_pipe_ts_cfg {
+	u64 size;
+	u64 time;
+};
+
+/**
  * Maximum number of stream buffer plane
  */
 #define SDE_PIPE_SBUF_PLANE_NUM	2
@@ -554,6 +588,24 @@
 	 */
 	void (*get_sbuf_status)(struct sde_hw_pipe *ctx,
 			struct sde_hw_pipe_sbuf_status *status);
+
+	/**
+	 * setup_ts_prefill - setup prefill traffic shaper
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to traffic shaper configuration
+	 * @index: rectangle index in multirect
+	 */
+	void (*setup_ts_prefill)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_ts_cfg *cfg,
+			enum sde_sspp_multirect_index index);
+
+	/**
+	 * setup_cdp - setup client driven prefetch
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to cdp configuration
+	 */
+	void (*setup_cdp)(struct sde_hw_pipe *ctx,
+			struct sde_hw_pipe_cdp_cfg *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index bd212e2..19f999e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -216,10 +216,12 @@
 
 	reg = SDE_REG_READ(c, MDP_VSYNC_SEL);
 	for (i = 0; i < cfg->pp_count; i++) {
+		int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
 		if (watchdog_te)
-			reg |= 0xF << pp_offset[cfg->ppnumber[i] - 1];
+			reg |= 0xF << pp_offset[pp_idx];
 		else
-			reg &= ~(0xF << pp_offset[cfg->ppnumber[i] - 1]);
+			reg &= ~(0xF << pp_offset[pp_idx]);
 	}
 
 	SDE_REG_WRITE(c, MDP_VSYNC_SEL, reg);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 9cb4494..faf25c7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -80,12 +80,12 @@
  * struct sde_watchdog_te_status - configure watchdog timer to generate TE
  * @pp_count: number of ping pongs active
  * @frame_rate: Display frame rate
- * @ppnumber: base address of ping pong info
+ * @ppnumber: ping pong index array
  */
 struct sde_watchdog_te_status {
 	u32 pp_count;
 	u32 frame_rate;
-	u32 ppnumber[];
+	u32 ppnumber[PINGPONG_MAX];
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index 048ec47..b5c273a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -31,8 +31,42 @@
 #define VBIF_IN_WR_LIM_CONF2		0x00C8
 #define VBIF_OUT_RD_LIM_CONF0		0x00D0
 #define VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0	0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1	0x0164
 #define VBIF_XIN_HALT_CTRL0		0x0200
 #define VBIF_XIN_HALT_CTRL1		0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000	0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000	0x0590
+
+static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
+		u32 xin_id, u32 value)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 reg_off;
+	u32 bit_off;
+	u32 reg_val;
+
+	/*
+	 * Assume 4 bits per bit field, 8 fields per 32-bit register so
+	 * 16 bit fields maximum across two registers
+	 */
+	if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+		return;
+
+	c = &vbif->hw;
+
+	if (xin_id >= 8) {
+		xin_id -= 8;
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+	} else {
+		reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+	}
+	bit_off = (xin_id & 0x7) * 4;
+	reg_val = SDE_REG_READ(c, reg_off);
+	reg_val &= ~(0x7 << bit_off);
+	reg_val |= (value & 0x7) << bit_off;
+	SDE_REG_WRITE(c, reg_off, reg_val);
+}
 
 static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
 		u32 xin_id, bool rd, u32 limit)
@@ -104,6 +138,35 @@
 	return (reg_val & BIT(xin_id)) ? true : false;
 }
 
+static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
+		u32 xin_id, u32 level, u32 remap_level)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+	if (!vbif)
+		return;
+
+	c = &vbif->hw;
+
+	reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+	reg_shift = (xin_id & 0x7) * 4;
+
+	reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+	reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+	mask = 0x7 << reg_shift;
+
+	reg_val &= ~mask;
+	reg_val |= (remap_level << reg_shift) & mask;
+
+	reg_val_lvl &= ~mask;
+	reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+	SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+	SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
 static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
 		unsigned long cap)
 {
@@ -111,6 +174,9 @@
 	ops->get_limit_conf = sde_hw_get_limit_conf;
 	ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
 	ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
+	if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
+		ops->set_qos_remap = sde_hw_set_qos_remap;
+	ops->set_mem_type = sde_hw_set_mem_type;
 }
 
 static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
index de7fac0..80a9e5a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,6 +61,25 @@
 	 */
 	bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
 			u32 xin_id);
+
+	/**
+	 * set_qos_remap - set QoS priority remap
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @level: priority level
+	 * @remap_level: remapped level
+	 */
+	void (*set_qos_remap)(struct sde_hw_vbif *vbif,
+			u32 xin_id, u32 level, u32 remap_level);
+
+	/**
+	 * set_mem_type - set memory type
+	 * @vbif: vbif context driver
+	 * @xin_id: client interface identifier
+	 * @value: memory type value
+	 */
+	void (*set_mem_type)(struct sde_hw_vbif *vbif,
+			u32 xin_id, u32 value);
 };
 
 struct sde_hw_vbif {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index 98aff0f..378b904 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -41,13 +41,21 @@
 #define WB_N16_INIT_PHASE_Y_C12		0x06C
 #define WB_OUT_SIZE			0x074
 #define WB_ALPHA_X_VALUE		0x078
+#define WB_DANGER_LUT			0x084
+#define WB_SAFE_LUT			0x088
+#define WB_QOS_CTRL			0x090
+#define WB_CREQ_LUT_0			0x098
+#define WB_CREQ_LUT_1			0x09C
 #define WB_UBWC_STATIC_CTRL		0x144
 #define WB_CSC_BASE			0x260
 #define WB_DST_ADDR_SW_STATUS		0x2B0
-#define WB_CDP_CTRL			0x2B4
+#define WB_CDP_CNTL			0x2B4
 #define WB_OUT_IMAGE_SIZE		0x2C0
 #define WB_OUT_XY			0x2C4
 
+/* WB_QOS_CTRL */
+#define WB_QOS_CTRL_DANGER_SAFE_EN	BIT(0)
+
 static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -88,7 +96,6 @@
 	u32 write_config = 0;
 	u32 opmode = 0;
 	u32 dst_addr_sw = 0;
-	u32 cdp_settings = 0x0;
 
 	chroma_samp = fmt->chroma_sample;
 
@@ -157,18 +164,6 @@
 	SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
 	SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
 	SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
-
-	/* Enable CDP */
-	cdp_settings = BIT(0);
-
-	if (!SDE_FORMAT_IS_LINEAR(fmt))
-		cdp_settings |= BIT(1);
-
-	/* Enable 64 transactions if line mode*/
-	if (data->intf_mode == INTF_MODE_WB_LINE)
-		cdp_settings |= BIT(3);
-
-	SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
 }
 
 static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
@@ -185,6 +180,68 @@
 	SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
 }
 
+static void sde_hw_wb_setup_danger_safe_lut(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_qos_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	if (!ctx || !cfg)
+		return;
+
+	SDE_REG_WRITE(c, WB_DANGER_LUT, cfg->danger_lut);
+	SDE_REG_WRITE(c, WB_SAFE_LUT, cfg->safe_lut);
+}
+
+static void sde_hw_wb_setup_creq_lut(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_qos_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (ctx->caps && test_bit(SDE_WB_QOS_8LVL, &ctx->caps->features)) {
+		SDE_REG_WRITE(c, WB_CREQ_LUT_0, cfg->creq_lut);
+		SDE_REG_WRITE(c, WB_CREQ_LUT_1, cfg->creq_lut >> 32);
+	}
+}
+
+static void sde_hw_wb_setup_qos_ctrl(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_qos_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c = &ctx->hw;
+	u32 qos_ctrl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	if (cfg->danger_safe_en)
+		qos_ctrl |= WB_QOS_CTRL_DANGER_SAFE_EN;
+
+	SDE_REG_WRITE(c, WB_QOS_CTRL, qos_ctrl);
+}
+
+static void sde_hw_wb_setup_cdp(struct sde_hw_wb *ctx,
+		struct sde_hw_wb_cdp_cfg *cfg)
+{
+	struct sde_hw_blk_reg_map *c;
+	u32 cdp_cntl = 0;
+
+	if (!ctx || !cfg)
+		return;
+
+	c = &ctx->hw;
+
+	if (cfg->enable)
+		cdp_cntl |= BIT(0);
+	if (cfg->ubwc_meta_enable)
+		cdp_cntl |= BIT(1);
+	if (cfg->preload_ahead == SDE_WB_CDP_PRELOAD_AHEAD_64)
+		cdp_cntl |= BIT(3);
+
+	SDE_REG_WRITE(c, WB_CDP_CNTL, cdp_cntl);
+}
+
 static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
 	unsigned long features)
 {
@@ -193,6 +250,16 @@
 
 	if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
 		ops->setup_roi = sde_hw_wb_roi;
+
+	if (test_bit(SDE_WB_QOS, &features)) {
+		ops->setup_danger_safe_lut =
+			sde_hw_wb_setup_danger_safe_lut;
+		ops->setup_creq_lut = sde_hw_wb_setup_creq_lut;
+		ops->setup_qos_ctrl = sde_hw_wb_setup_qos_ctrl;
+	}
+
+	if (test_bit(SDE_WB_CDP, &features))
+		ops->setup_cdp = sde_hw_wb_setup_cdp;
 }
 
 struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index 9d17fb3..ca3c386 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -29,6 +29,44 @@
 };
 
 /**
+ * enum CDP preload ahead address size
+ */
+enum {
+	SDE_WB_CDP_PRELOAD_AHEAD_32,
+	SDE_WB_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct sde_hw_wb_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ *	SDE_WB_CDP_PRELOAD_AHEAD_32,
+ *	SDE_WB_CDP_PRELOAD_AHEAD_64
+ */
+struct sde_hw_wb_cdp_cfg {
+	bool enable;
+	bool ubwc_meta_enable;
+	bool tile_amortize_enable;
+	u32 preload_ahead;
+};
+
+/**
+ * struct sde_hw_wb_qos_cfg : Writeback pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @danger_safe_en: enable danger safe generation
+ */
+struct sde_hw_wb_qos_cfg {
+	u32 danger_lut;
+	u32 safe_lut;
+	u64 creq_lut;
+	bool danger_safe_en;
+};
+
+/**
  *
  * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
  *  Assumption is these functions will be called after clocks are enabled
@@ -57,6 +95,38 @@
 
 	void (*setup_roi)(struct sde_hw_wb *ctx,
 		struct sde_hw_wb_cfg *wb);
+
+	/**
+	 * setup_danger_safe_lut - setup danger safe LUTs
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 */
+	void (*setup_danger_safe_lut)(struct sde_hw_wb *ctx,
+			struct sde_hw_wb_qos_cfg *cfg);
+
+	/**
+	 * setup_creq_lut - setup CREQ LUT
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 */
+	void (*setup_creq_lut)(struct sde_hw_wb *ctx,
+			struct sde_hw_wb_qos_cfg *cfg);
+
+	/**
+	 * setup_qos_ctrl - setup QoS control
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe QoS configuration
+	 */
+	void (*setup_qos_ctrl)(struct sde_hw_wb *ctx,
+			struct sde_hw_wb_qos_cfg *cfg);
+
+	/**
+	 * setup_cdp - setup CDP
+	 * @ctx: Pointer to pipe context
+	 * @cfg: Pointer to pipe CDP configuration
+	 */
+	void (*setup_cdp)(struct sde_hw_wb *ctx,
+			struct sde_hw_wb_cdp_cfg *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index e3b658a..eeb7a00 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,90 +49,14 @@
 	return IRQ_HANDLED;
 }
 
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
-	struct sde_kms *sde_kms;
-
-	if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
-		SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
-		return;
-	}
-	sde_kms = irq_data_get_irq_chip_data(irqd);
-
-	/* memory barrier */
-	smp_mb__before_atomic();
-	set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
-	/* memory barrier */
-	smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
-	.name = "sde",
-	.irq_mask = sde_hw_irq_mask,
-	.irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
-		unsigned int irq, irq_hw_number_t hwirq)
-{
-	struct sde_kms *sde_kms;
-	int rc;
-
-	if (!domain || !domain->host_data) {
-		SDE_ERROR("invalid parameters domain %d\n", domain != 0);
-		return -EINVAL;
-	}
-	sde_kms = domain->host_data;
-
-	irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
-	rc = irq_set_chip_data(irq, sde_kms);
-
-	return rc;
-}
-
-static const struct irq_domain_ops sde_hw_irqdomain_ops = {
-	.map = sde_hw_irqdomain_map,
-	.xlate = irq_domain_xlate_onecell,
-};
-
 void sde_irq_preinstall(struct msm_kms *kms)
 {
 	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct device *dev;
-	struct irq_domain *domain;
 
 	if (!sde_kms->dev || !sde_kms->dev->dev) {
 		pr_err("invalid device handles\n");
 		return;
 	}
-	dev = sde_kms->dev->dev;
-
-	domain = irq_domain_add_linear(dev->of_node, 32,
-			&sde_hw_irqdomain_ops, sde_kms);
-	if (!domain) {
-		pr_err("failed to add irq_domain\n");
-		return;
-	}
-
-	sde_kms->irq_controller.enabled_mask = 0;
-	sde_kms->irq_controller.domain = domain;
 
 	sde_core_irq_preinstall(sde_kms);
 }
@@ -162,9 +86,5 @@
 	}
 
 	sde_core_irq_uninstall(sde_kms);
-
-	if (sde_kms->irq_controller.domain) {
-		irq_domain_remove(sde_kms->irq_controller.domain);
-		sde_kms->irq_controller.domain = NULL;
-	}
+	sde_core_irq_domain_fini(sde_kms);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8cc196a..c783ab0 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -25,10 +25,13 @@
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
+#include "msm_gem.h"
 
 #include "dsi_display.h"
 #include "dsi_drm.h"
 #include "sde_wb.h"
+#include "dp_display.h"
+#include "dp_drm.h"
 
 #include "sde_kms.h"
 #include "sde_core_irq.h"
@@ -512,8 +515,28 @@
 			wb_display_get_displays(sde_kms->wb_displays,
 					sde_kms->wb_display_count);
 	}
+
+	/* dp */
+	sde_kms->dp_displays = NULL;
+	sde_kms->dp_display_count = dp_display_get_num_of_displays();
+	if (sde_kms->dp_display_count) {
+		sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
+				sizeof(void *), GFP_KERNEL);
+		if (!sde_kms->dp_displays) {
+			SDE_ERROR("failed to allocate dp displays\n");
+			goto exit_deinit_dp;
+		}
+		sde_kms->dp_display_count =
+			dp_display_get_displays(sde_kms->dp_displays,
+					sde_kms->dp_display_count);
+	}
 	return 0;
 
+exit_deinit_dp:
+	kfree(sde_kms->dp_displays);
+	sde_kms->dp_display_count = 0;
+	sde_kms->dp_displays = NULL;
+
 exit_deinit_wb:
 	kfree(sde_kms->wb_displays);
 	sde_kms->wb_display_count = 0;
@@ -579,6 +602,14 @@
 		.soft_reset =   NULL,
 		.get_topology = sde_wb_get_topology
 	};
+	static const struct sde_connector_ops dp_ops = {
+		.post_init  = dp_connector_post_init,
+		.detect     = dp_connector_detect,
+		.get_modes  = dp_connector_get_modes,
+		.mode_valid = dp_connector_mode_valid,
+		.get_info   = dp_connector_get_info,
+		.get_topology   = dp_connector_get_topology,
+	};
 	struct msm_display_info info;
 	struct drm_encoder *encoder;
 	void *display, *connector;
@@ -590,7 +621,8 @@
 		return -EINVAL;
 	}
 
-	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+	max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
+				sde_kms->dp_display_count;
 	if (max_encoders > ARRAY_SIZE(priv->encoders)) {
 		max_encoders = ARRAY_SIZE(priv->encoders);
 		SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -679,6 +711,47 @@
 			sde_encoder_destroy(encoder);
 		}
 	}
+	/* dp */
+	for (i = 0; i < sde_kms->dp_display_count &&
+			priv->num_encoders < max_encoders; ++i) {
+		display = sde_kms->dp_displays[i];
+		encoder = NULL;
+
+		memset(&info, 0x0, sizeof(info));
+		rc = dp_connector_get_info(&info, display);
+		if (rc) {
+			SDE_ERROR("dp get_info %d failed\n", i);
+			continue;
+		}
+
+		encoder = sde_encoder_init(dev, &info);
+		if (IS_ERR_OR_NULL(encoder)) {
+			SDE_ERROR("dp encoder init failed %d\n", i);
+			continue;
+		}
+
+		rc = dp_drm_bridge_init(display, encoder);
+		if (rc) {
+			SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
+			sde_encoder_destroy(encoder);
+			continue;
+		}
+
+		connector = sde_connector_init(dev,
+					encoder,
+					NULL,
+					display,
+					&dp_ops,
+					DRM_CONNECTOR_POLL_HPD,
+					DRM_MODE_CONNECTOR_DisplayPort);
+		if (connector) {
+			priv->encoders[priv->num_encoders++] = encoder;
+		} else {
+			SDE_ERROR("dp %d connector init failed\n", i);
+			dp_drm_bridge_deinit(display);
+			sde_encoder_destroy(encoder);
+		}
+	}
 
 	return 0;
 }
@@ -744,6 +817,9 @@
 	priv = dev->dev_private;
 	catalog = sde_kms->catalog;
 
+	ret = sde_core_irq_domain_add(sde_kms);
+	if (ret)
+		goto fail_irq;
 	/*
 	 * Query for underlying display drivers, and create connectors,
 	 * bridges and encoders for them.
@@ -821,6 +897,8 @@
 	return 0;
 fail:
 	_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+	sde_core_irq_domain_fini(sde_kms);
 	return ret;
 }
 
@@ -950,6 +1028,13 @@
 	}
 }
 
+static void sde_kms_set_gem_flags(struct msm_gem_object *msm_obj,
+		uint32_t flags)
+{
+	if (msm_obj)
+		msm_obj->flags |= flags;
+}
+
 struct sde_kms_fbo *sde_kms_fbo_alloc(struct drm_device *dev, u32 width,
 		u32 height, u32 pixel_format, u64 modifier[4], u32 flags)
 {
@@ -1036,10 +1121,13 @@
 			fbo->bo[0] = NULL;
 			goto done;
 		}
+
+		/* insert extra bo flags */
+		sde_kms_set_gem_flags(to_msm_bo(fbo->bo[0]), MSM_BO_KEEPATTRS);
 	} else {
 		mutex_lock(&dev->struct_mutex);
 		fbo->bo[0] = msm_gem_new(dev, fbo->layout.total_size,
-				MSM_BO_SCANOUT | MSM_BO_WC);
+				MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_KEEPATTRS);
 		if (IS_ERR(fbo->bo[0])) {
 			mutex_unlock(&dev->struct_mutex);
 			SDE_ERROR("failed to new gem buffer\n");
@@ -1150,6 +1238,10 @@
 		sde_hw_intr_destroy(sde_kms->hw_intr);
 	sde_kms->hw_intr = NULL;
 
+	if (sde_kms->power_event)
+		sde_power_handle_unregister_event(
+				&priv->phandle, sde_kms->power_event);
+
 	_sde_kms_release_displays(sde_kms);
 
 	/* safe to call these more than once during shutdown */
@@ -1355,6 +1447,16 @@
 	return ptr;
 }
 
+static void sde_kms_handle_power_event(u32 event_type, void *usr)
+{
+	struct sde_kms *sde_kms = usr;
+
+	if (!sde_kms)
+		return;
+
+	if (event_type == SDE_POWER_EVENT_POST_ENABLE)
+		sde_vbif_init_memtypes(sde_kms);
+}
 
 static int sde_kms_hw_init(struct msm_kms *kms)
 {
@@ -1539,6 +1641,14 @@
 		goto perf_err;
 	}
 
+	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+		rc = PTR_ERR(sde_kms->hw_intr);
+		SDE_ERROR("hw_intr init failed: %d\n", rc);
+		sde_kms->hw_intr = NULL;
+		goto hw_intr_init_err;
+	}
+
 	/*
 	 * _sde_kms_drm_obj_init should create the DRM related objects
 	 * i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1564,23 +1674,20 @@
 	 */
 	dev->mode_config.allow_fb_modifiers = true;
 
-	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
-	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
-		rc = PTR_ERR(sde_kms->hw_intr);
-		if (!sde_kms->hw_intr)
-			rc = -EINVAL;
-		SDE_ERROR("hw_intr init failed: %d\n", rc);
-		sde_kms->hw_intr = NULL;
-		goto hw_intr_init_err;
-	}
+	/*
+	 * Handle (re)initializations during power enable
+	 */
+	sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
+	sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
+			SDE_POWER_EVENT_POST_ENABLE,
+			sde_kms_handle_power_event, sde_kms, "kms");
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 	return 0;
 
-hw_intr_init_err:
-	_sde_kms_drm_obj_destroy(sde_kms);
 drm_obj_init_err:
 	sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
 perf_err:
 power_error:
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 1f56d73..f73cb21 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -159,6 +159,7 @@
 	struct sde_power_client *core_client;
 
 	struct ion_client *iclient;
+	struct sde_power_event *power_event;
 
 	/* directory entry for debugfs */
 	struct dentry *debugfs_danger;
@@ -188,6 +189,8 @@
 	void **dsi_displays;
 	int wb_display_count;
 	void **wb_displays;
+	int dp_display_count;
+	void **dp_displays;
 
 	bool has_danger_ctrl;
 };
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
index dcc0bd5..b77d64d 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -166,7 +166,7 @@
 	r = min((r1->x + r1->w), (r2->x + r2->w));
 	b = min((r1->y + r1->h), (r2->y + r2->h));
 
-	if (r < l || b < t) {
+	if (r <= l || b <= t) {
 		memset(result, 0, sizeof(*result));
 	} else {
 		result->x = l;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index ad207d6..d63fec1 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -99,6 +99,7 @@
  * @csc_cfg: Decoded user configuration for csc
  * @csc_usr_ptr: Points to csc_cfg if valid user config available
  * @csc_ptr: Points to sde_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
  * @catalog: Points to sde catalog structure
  * @sbuf_mode: force stream buffer mode if set
  * @sbuf_writeback: force stream buffer writeback if set
@@ -126,6 +127,7 @@
 	bool is_error;
 	bool is_rt_pipe;
 	bool is_virtual;
+	struct list_head mplane_list;
 	struct sde_mdss_cfg *catalog;
 	u32 sbuf_mode;
 	u32 sbuf_writeback;
@@ -222,93 +224,89 @@
 static inline int _sde_plane_calc_fill_level(struct drm_plane *plane,
 		const struct sde_format *fmt, u32 src_width)
 {
-	struct sde_plane *psde;
+	struct sde_plane *psde, *tmp;
+	struct sde_plane_state *pstate;
+	struct sde_plane_rot_state *rstate;
 	u32 fixed_buff_size;
 	u32 total_fl;
+	u32 hflip_bytes;
 
-	if (!plane || !fmt) {
+	if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
 		SDE_ERROR("invalid arguments\n");
 		return 0;
 	}
 
 	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(plane->state);
+	rstate = &pstate->rot;
 	fixed_buff_size = psde->pipe_sblk->pixel_ram_size;
 
+	list_for_each_entry(tmp, &psde->mplane_list, mplane_list) {
+		if (!sde_plane_enabled(tmp->base.state))
+			continue;
+		SDE_DEBUG("plane%d/%d src_width:%d/%d\n",
+				psde->base.base.id, tmp->base.base.id,
+				src_width, tmp->pipe_cfg.src_rect.w);
+		src_width = max_t(u32, src_width, tmp->pipe_cfg.src_rect.w);
+	}
+
+	if ((rstate->out_rotation & DRM_REFLECT_X) &&
+			SDE_FORMAT_IS_LINEAR(fmt))
+		hflip_bytes = (src_width + 32) * fmt->bpp;
+	else
+		hflip_bytes = 0;
+
 	if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
 		if (fmt->chroma_sample == SDE_CHROMA_420) {
 			/* NV12 */
-			total_fl = (fixed_buff_size / 2) /
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
 				((src_width + 32) * fmt->bpp);
 		} else {
 			/* non NV12 */
-			total_fl = (fixed_buff_size) /
-				((src_width + 32) * fmt->bpp);
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
+				((src_width + 32) * fmt->bpp * 2);
 		}
 	} else {
-		total_fl = (fixed_buff_size * 2) /
-			((src_width + 32) * fmt->bpp);
+		if (pstate->multirect_mode == SDE_SSPP_MULTIRECT_PARALLEL) {
+			total_fl = (fixed_buff_size / 2 - hflip_bytes) /
+				((src_width + 32) * fmt->bpp * 2);
+		} else {
+			total_fl = (fixed_buff_size - hflip_bytes) /
+				((src_width + 32) * fmt->bpp * 2);
+		}
 	}
 
-	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u hf:%d fl:%u\n",
 			plane->base.id, psde->pipe - SSPP_VIG0,
 			(char *)&fmt->base.pixel_format,
-			src_width, total_fl);
+			src_width, hflip_bytes, total_fl);
 
 	return total_fl;
 }
 
 /**
- * _sde_plane_get_qos_lut_linear - get linear LUT mapping
+ * _sde_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl:		Pointer to LUT table
  * @total_fl:		fill level
  * Return: LUT setting corresponding to the fill level
  */
-static inline u32 _sde_plane_get_qos_lut_linear(u32 total_fl)
+static u64 _sde_plane_get_qos_lut(const struct sde_qos_lut_tbl *tbl,
+		u32 total_fl)
 {
-	u32 qos_lut;
+	int i;
 
-	if (total_fl <= 4)
-		qos_lut = 0x1B;
-	else if (total_fl <= 5)
-		qos_lut = 0x5B;
-	else if (total_fl <= 6)
-		qos_lut = 0x15B;
-	else if (total_fl <= 7)
-		qos_lut = 0x55B;
-	else if (total_fl <= 8)
-		qos_lut = 0x155B;
-	else if (total_fl <= 9)
-		qos_lut = 0x555B;
-	else if (total_fl <= 10)
-		qos_lut = 0x1555B;
-	else if (total_fl <= 11)
-		qos_lut = 0x5555B;
-	else if (total_fl <= 12)
-		qos_lut = 0x15555B;
-	else
-		qos_lut = 0x55555B;
+	if (!tbl || !tbl->nentry || !tbl->entries)
+		return 0;
 
-	return qos_lut;
-}
+	for (i = 0; i < tbl->nentry; i++)
+		if (total_fl <= tbl->entries[i].fl)
+			return tbl->entries[i].lut;
 
-/**
- * _sde_plane_get_qos_lut_macrotile - get macrotile LUT mapping
- * @total_fl:		fill level
- * Return: LUT setting corresponding to the fill level
- */
-static inline u32 _sde_plane_get_qos_lut_macrotile(u32 total_fl)
-{
-	u32 qos_lut;
+	/* if last fl is zero, use as default */
+	if (!tbl->entries[i-1].fl)
+		return tbl->entries[i-1].lut;
 
-	if (total_fl <= 10)
-		qos_lut = 0x1AAff;
-	else if (total_fl <= 11)
-		qos_lut = 0x5AAFF;
-	else if (total_fl <= 12)
-		qos_lut = 0x15AAFF;
-	else
-		qos_lut = 0x55AAFF;
-
-	return qos_lut;
+	return 0;
 }
 
 /**
@@ -321,8 +319,8 @@
 {
 	struct sde_plane *psde;
 	const struct sde_format *fmt = NULL;
-	u32 qos_lut;
-	u32 total_fl = 0;
+	u64 qos_lut;
+	u32 total_fl = 0, lut_usage;
 
 	if (!plane || !fb) {
 		SDE_ERROR("invalid arguments plane %d fb %d\n",
@@ -332,7 +330,7 @@
 
 	psde = to_sde_plane(plane);
 
-	if (!psde->pipe_hw || !psde->pipe_sblk) {
+	if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
 		SDE_ERROR("invalid arguments\n");
 		return;
 	} else if (!psde->pipe_hw->ops.setup_creq_lut) {
@@ -340,7 +338,7 @@
 	}
 
 	if (!psde->is_rt_pipe) {
-		qos_lut = psde->pipe_sblk->creq_lut_nrt;
+		lut_usage = SDE_QOS_LUT_USAGE_NRT;
 	} else {
 		fmt = sde_get_sde_format_ext(
 				fb->pixel_format,
@@ -350,19 +348,21 @@
 				psde->pipe_cfg.src_rect.w);
 
 		if (SDE_FORMAT_IS_LINEAR(fmt))
-			qos_lut = _sde_plane_get_qos_lut_linear(total_fl);
+			lut_usage = SDE_QOS_LUT_USAGE_LINEAR;
 		else
-			qos_lut = _sde_plane_get_qos_lut_macrotile(total_fl);
+			lut_usage = SDE_QOS_LUT_USAGE_MACROTILE;
 	}
 
+	qos_lut = _sde_plane_get_qos_lut(
+			&psde->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
 	psde->pipe_qos_cfg.creq_lut = qos_lut;
 
 	trace_sde_perf_set_qos_luts(psde->pipe - SSPP_VIG0,
 			(fmt) ? fmt->base.pixel_format : 0,
-			psde->is_rt_pipe, total_fl, qos_lut,
-			(fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
+			psde->is_rt_pipe, total_fl, qos_lut, lut_usage);
 
-	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%x\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
 			plane->base.id,
 			psde->pipe - SSPP_VIG0,
 			fmt ? (char *)&fmt->base.pixel_format : NULL,
@@ -390,7 +390,7 @@
 
 	psde = to_sde_plane(plane);
 
-	if (!psde->pipe_hw || !psde->pipe_sblk) {
+	if (!psde->pipe_hw || !psde->pipe_sblk || !psde->catalog) {
 		SDE_ERROR("invalid arguments\n");
 		return;
 	} else if (!psde->pipe_hw->ops.setup_danger_safe_lut) {
@@ -398,8 +398,10 @@
 	}
 
 	if (!psde->is_rt_pipe) {
-		danger_lut = psde->pipe_sblk->danger_lut_nrt;
-		safe_lut = psde->pipe_sblk->safe_lut_nrt;
+		danger_lut = psde->catalog->perf.danger_lut_tbl
+				[SDE_QOS_LUT_USAGE_NRT];
+		safe_lut = psde->catalog->perf.safe_lut_tbl
+				[SDE_QOS_LUT_USAGE_NRT];
 	} else {
 		fmt = sde_get_sde_format_ext(
 				fb->pixel_format,
@@ -407,11 +409,15 @@
 				drm_format_num_planes(fb->pixel_format));
 
 		if (SDE_FORMAT_IS_LINEAR(fmt)) {
-			danger_lut = psde->pipe_sblk->danger_lut_linear;
-			safe_lut = psde->pipe_sblk->safe_lut_linear;
+			danger_lut = psde->catalog->perf.danger_lut_tbl
+					[SDE_QOS_LUT_USAGE_LINEAR];
+			safe_lut = psde->catalog->perf.safe_lut_tbl
+					[SDE_QOS_LUT_USAGE_LINEAR];
 		} else {
-			danger_lut = psde->pipe_sblk->danger_lut_tile;
-			safe_lut = psde->pipe_sblk->safe_lut_tile;
+			danger_lut = psde->catalog->perf.danger_lut_tbl
+					[SDE_QOS_LUT_USAGE_MACROTILE];
+			safe_lut = psde->catalog->perf.safe_lut_tbl
+					[SDE_QOS_LUT_USAGE_MACROTILE];
 		}
 	}
 
@@ -585,6 +591,99 @@
 	sde_vbif_set_ot_limit(sde_kms, &ot_params);
 }
 
+/**
+ * _sde_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane:		Pointer to drm plane
+ */
+static void _sde_plane_set_qos_remap(struct drm_plane *plane)
+{
+	struct sde_plane *psde;
+	struct sde_vbif_set_qos_params qos_params;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	memset(&qos_params, 0, sizeof(qos_params));
+	qos_params.vbif_idx = VBIF_RT;
+	qos_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
+	qos_params.xin_id = psde->pipe_hw->cap->xin_id;
+	qos_params.num = psde->pipe_hw->idx - SSPP_VIG0;
+	qos_params.is_rt = psde->is_rt_pipe;
+
+	SDE_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d\n",
+			plane->base.id, qos_params.num,
+			qos_params.vbif_idx,
+			qos_params.xin_id, qos_params.is_rt);
+
+	sde_vbif_set_qos_remap(sde_kms, &qos_params);
+}
+
+/**
+ * _sde_plane_set_ts_prefill - set prefill with traffic shaper
+ * @plane:	Pointer to drm plane
+ * @pstate:	Pointer to sde plane state
+ */
+static void _sde_plane_set_ts_prefill(struct drm_plane *plane,
+		struct sde_plane_state *pstate)
+{
+	struct sde_plane *psde;
+	struct sde_hw_pipe_ts_cfg cfg;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid arguments");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+	psde = to_sde_plane(plane);
+	if (!psde->pipe_hw) {
+		SDE_ERROR("invalid pipe reference\n");
+		return;
+	}
+
+	if (!psde->pipe_hw || !psde->pipe_hw->ops.setup_ts_prefill)
+		return;
+
+	_sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_VBLANK_AMORTIZE);
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.size = sde_plane_get_property(pstate,
+			PLANE_PROP_PREFILL_SIZE);
+	cfg.time = sde_plane_get_property(pstate,
+			PLANE_PROP_PREFILL_TIME);
+
+	SDE_DEBUG("plane%d size:%llu time:%llu\n",
+			plane->base.id, cfg.size, cfg.time);
+	SDE_EVT32(DRMID(plane), cfg.size, cfg.time);
+	psde->pipe_hw->ops.setup_ts_prefill(psde->pipe_hw, &cfg,
+			pstate->multirect_index);
+}
+
 /* helper to update a state's input fence pointer from the property */
 static void _sde_plane_set_input_fence(struct sde_plane *psde,
 		struct sde_plane_state *pstate, uint64_t fd)
@@ -1269,33 +1368,31 @@
 static u32 sde_plane_rot_calc_prefill(struct drm_plane *plane)
 {
 	struct drm_plane_state *state;
-	struct drm_crtc_state *cstate;
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_kms *sde_kms;
 	u32 blocksize = 128;
 	u32 prefill_line = 0;
 
-	if (!plane || !plane->state || !plane->state->fb ||
-			!plane->state->crtc || !plane->state->crtc->state) {
+	if (!plane || !plane->state || !plane->state->fb) {
 		SDE_ERROR("invalid parameters\n");
 		return 0;
 	}
 
 	sde_kms = _sde_plane_get_kms(plane);
 	state = plane->state;
-	cstate = state->crtc->state;
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
-	if (!rstate->rot_hw || !rstate->rot_hw->caps || !rstate->out_src_h ||
-			!sde_kms || !sde_kms->catalog) {
-		SDE_ERROR("invalid parameters\n");
+	if (!sde_kms || !sde_kms->catalog) {
+		SDE_ERROR("invalid kms\n");
 		return 0;
 	}
 
-	sde_format_get_block_size(rstate->out_fb_format, &blocksize,
-			&blocksize);
+	if (rstate->out_fb_format)
+		sde_format_get_block_size(rstate->out_fb_format,
+				&blocksize, &blocksize);
+
 	prefill_line = blocksize + sde_kms->catalog->sbuf_headroom;
 
 	SDE_DEBUG("plane%d prefill:%u\n", plane->base.id, prefill_line);
@@ -1317,7 +1414,7 @@
 	struct sde_plane_rot_state *rstate = pstate ? &pstate->rot : NULL;
 	bool sbuf_mode = rstate ? rstate->out_sbuf : false;
 
-	if (prefill && sbuf_mode)
+	if (prefill)
 		*prefill = sde_plane_rot_calc_prefill(plane);
 
 	return sbuf_mode;
@@ -2041,6 +2138,23 @@
 }
 
 /**
+ * sde_plane_rot_flush - perform final flush related rotator options
+ * @plane: Pointer to drm plane
+ * @pstate: Pointer to sde plane state
+ */
+static void sde_plane_rot_flush(struct drm_plane *plane,
+		struct sde_plane_state *pstate)
+{
+	if (!plane || !pstate || !pstate->rot.rot_hw ||
+			!pstate->rot.rot_hw->ops.commit)
+		return;
+
+	pstate->rot.rot_hw->ops.commit(pstate->rot.rot_hw,
+			&pstate->rot.rot_cmd,
+			SDE_HW_ROT_CMD_START);
+}
+
+/**
  * sde_plane_rot_destroy_state - destroy state for rotator stage
  * @plane: Pointer to drm plane
  * @state: Pointer to state to be destroyed
@@ -2331,16 +2445,16 @@
  * sde_plane_get_ctl_flush - get control flush for the given plane
  * @plane: Pointer to drm plane structure
  * @ctl: Pointer to hardware control driver
- * @flush: Pointer to flush control word
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
  */
 void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
-		u32 *flush)
+		u32 *flush_sspp, u32 *flush_rot)
 {
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
-	u32 bitmask;
 
-	if (!plane || !flush) {
+	if (!plane || !flush_sspp) {
 		SDE_ERROR("invalid parameters\n");
 		return;
 	}
@@ -2348,13 +2462,15 @@
 	pstate = to_sde_plane_state(plane->state);
 	rstate = &pstate->rot;
 
-	bitmask = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
+	*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, sde_plane_pipe(plane));
 
+	if (!flush_rot)
+		return;
+
+	*flush_rot = 0x0;
 	if (sde_plane_is_sbuf_mode(plane, NULL) && rstate->rot_hw &&
 			ctl->ops.get_bitmask_rot)
-		ctl->ops.get_bitmask_rot(ctl, &bitmask, rstate->rot_hw->idx);
-
-	*flush = bitmask;
+		ctl->ops.get_bitmask_rot(ctl, flush_rot, rstate->rot_hw->idx);
 }
 
 static int sde_plane_prepare_fb(struct drm_plane *plane,
@@ -2709,13 +2825,15 @@
 void sde_plane_flush(struct drm_plane *plane)
 {
 	struct sde_plane *psde;
+	struct sde_plane_state *pstate;
 
-	if (!plane) {
+	if (!plane || !plane->state) {
 		SDE_ERROR("invalid plane\n");
 		return;
 	}
 
 	psde = to_sde_plane(plane);
+	pstate = to_sde_plane_state(plane->state);
 
 	/*
 	 * These updates have to be done immediately before the plane flush
@@ -2736,7 +2854,10 @@
 
 	/* flag h/w flush complete */
 	if (plane->state)
-		to_sde_plane_state(plane->state)->pending = false;
+		pstate->pending = false;
+
+	/* signal inline rotator start */
+	sde_plane_rot_flush(plane, pstate);
 }
 
 static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
@@ -2833,6 +2954,10 @@
 		case PLANE_PROP_BLEND_OP:
 			/* no special action required */
 			break;
+		case PLANE_PROP_PREFILL_SIZE:
+		case PLANE_PROP_PREFILL_TIME:
+			pstate->dirty |= SDE_PLANE_DIRTY_PERF;
+			break;
 		case PLANE_PROP_ROT_DST_X:
 		case PLANE_PROP_ROT_DST_Y:
 		case PLANE_PROP_ROT_DST_W:
@@ -2965,6 +3090,23 @@
 		psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags,
 				pstate->multirect_index);
 
+		if (psde->pipe_hw->ops.setup_cdp) {
+			struct sde_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+			memset(cdp_cfg, 0, sizeof(struct sde_hw_pipe_cdp_cfg));
+
+			cdp_cfg->enable = psde->catalog->perf.cdp_cfg
+					[SDE_PERF_CDP_USAGE_RT].rd_enable;
+			cdp_cfg->ubwc_meta_enable =
+					SDE_FORMAT_IS_UBWC(fmt);
+			cdp_cfg->tile_amortize_enable =
+					SDE_FORMAT_IS_UBWC(fmt) ||
+					SDE_FORMAT_IS_TILE(fmt);
+			cdp_cfg->preload_ahead = SDE_WB_CDP_PRELOAD_AHEAD_64;
+
+			psde->pipe_hw->ops.setup_cdp(psde->pipe_hw, cdp_cfg);
+		}
+
 		if (psde->pipe_hw->ops.setup_sys_cache) {
 			if (rstate->out_sbuf) {
 				if (rstate->nplane < 2)
@@ -2988,7 +3130,7 @@
 						SDE_PIPE_SC_OP_MODE_OFFLINE;
 				pstate->sc_cfg.rd_en = false;
 				pstate->sc_cfg.rd_scid = 0;
-				pstate->sc_cfg.rd_noallocate = false;
+				pstate->sc_cfg.rd_noallocate = true;
 				pstate->sc_cfg.rd_op_type =
 					SDE_PIPE_SC_RD_OP_TYPE_CACHEABLE;
 			}
@@ -3024,8 +3166,12 @@
 	if (plane->type != DRM_PLANE_TYPE_CURSOR) {
 		_sde_plane_set_qos_ctrl(plane, true, SDE_PLANE_QOS_PANIC_CTRL);
 		_sde_plane_set_ot_limit(plane, crtc);
+		if (pstate->dirty & SDE_PLANE_DIRTY_PERF)
+			_sde_plane_set_ts_prefill(plane, pstate);
 	}
 
+	_sde_plane_set_qos_remap(plane);
+
 	/* clear dirty */
 	pstate->dirty = 0x0;
 
@@ -3210,6 +3356,13 @@
 		msm_property_install_range(&psde->property_info, "color_fill",
 				0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
 
+	msm_property_install_range(&psde->property_info,
+			"prefill_size", 0x0, 0, ~0, 0,
+			PLANE_PROP_PREFILL_SIZE);
+	msm_property_install_range(&psde->property_info,
+			"prefill_time", 0x0, 0, ~0, 0,
+			PLANE_PROP_PREFILL_TIME);
+
 	info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
 	if (!info) {
 		SDE_ERROR("failed to allocate info memory\n");
@@ -3994,7 +4147,7 @@
 		uint32_t pipe, bool primary_plane,
 		unsigned long possible_crtcs, u32 master_plane_id)
 {
-	struct drm_plane *plane = NULL;
+	struct drm_plane *plane = NULL, *master_plane = NULL;
 	const struct sde_format_extended *format_list;
 	struct sde_format_extended *virt_format_list = NULL;
 	struct sde_plane *psde;
@@ -4038,6 +4191,13 @@
 	psde->pipe = pipe;
 	psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
 	psde->is_virtual = (master_plane_id != 0);
+	INIT_LIST_HEAD(&psde->mplane_list);
+	master_plane = drm_plane_find(dev, master_plane_id);
+	if (master_plane) {
+		struct sde_plane *mpsde = to_sde_plane(master_plane);
+
+		list_add_tail(&psde->mplane_list, &mpsde->mplane_list);
+	}
 
 	/* initialize underlying h/w driver */
 	psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 2056a70..f83a891 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -95,6 +95,7 @@
 #define SDE_PLANE_DIRTY_RECTS	0x1
 #define SDE_PLANE_DIRTY_FORMAT	0x2
 #define SDE_PLANE_DIRTY_SHARPEN	0x4
+#define SDE_PLANE_DIRTY_PERF	0x8
 #define SDE_PLANE_DIRTY_ALL	0xFFFFFFFF
 
 /**
@@ -109,6 +110,7 @@
  * @multirect_index: index of the rectangle of SSPP
  * @multirect_mode: parallel or time multiplex multirect mode
  * @pending:	whether the current update is still pending
+ * @cdp_cfg:	CDP configuration
  */
 struct sde_plane_state {
 	struct drm_plane_state base;
@@ -125,6 +127,8 @@
 	/* @sc_cfg: system_cache configuration */
 	struct sde_hw_pipe_sc_cfg sc_cfg;
 	struct sde_plane_rot_state rot;
+
+	struct sde_hw_pipe_cdp_cfg cdp_cfg;
 };
 
 /**
@@ -168,10 +172,11 @@
  * sde_plane_get_ctl_flush - get control flush mask
  * @plane:   Pointer to DRM plane object
  * @ctl: Pointer to control hardware
- * @flush: Pointer to updated flush mask
+ * @flush_sspp: Pointer to sspp flush control word
+ * @flush_rot: Pointer to rotator flush control word
  */
 void sde_plane_get_ctl_flush(struct drm_plane *plane, struct sde_hw_ctl *ctl,
-		u32 *flush);
+		u32 *flush_sspp, u32 *flush_rot);
 
 /**
  * sde_plane_is_sbuf_mode - return status of stream buffer mode
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 427a93b..6ad2c43 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -175,7 +175,7 @@
 	iter->type = type;
 }
 
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
 {
 	struct list_head *blk_list;
 
@@ -217,6 +217,17 @@
 	return false;
 }
 
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+	bool ret;
+
+	mutex_lock(&rm->rm_lock);
+	ret = _sde_rm_get_hw_locked(rm, i);
+	mutex_unlock(&rm->rm_lock);
+
+	return ret;
+}
+
 static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
 {
 	switch (type) {
@@ -288,6 +299,8 @@
 	sde_hw_mdp_destroy(rm->hw_mdp);
 	rm->hw_mdp = NULL;
 
+	mutex_destroy(&rm->rm_lock);
+
 	return 0;
 }
 
@@ -390,6 +403,9 @@
 
 	/* Clear, setup lists */
 	memset(rm, 0, sizeof(*rm));
+
+	mutex_init(&rm->rm_lock);
+
 	INIT_LIST_HEAD(&rm->rsvps);
 	for (type = 0; type < SDE_HW_BLK_MAX; type++)
 		INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -584,7 +600,7 @@
 
 	if (lm_cfg->dspp != DSPP_MAX) {
 		sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
-		while (sde_rm_get_hw(rm, &iter)) {
+		while (_sde_rm_get_hw_locked(rm, &iter)) {
 			if (iter.blk->id == lm_cfg->dspp) {
 				*dspp = iter.blk;
 				break;
@@ -605,7 +621,7 @@
 	}
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (iter.blk->id == lm_cfg->pingpong) {
 			*pp = iter.blk;
 			break;
@@ -656,7 +672,7 @@
 	/* Find a primary mixer */
 	sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
 	while (lm_count != reqs->topology->num_lm &&
-			sde_rm_get_hw(rm, &iter_i)) {
+			_sde_rm_get_hw_locked(rm, &iter_i)) {
 		memset(&lm, 0, sizeof(lm));
 		memset(&dspp, 0, sizeof(dspp));
 		memset(&pp, 0, sizeof(pp));
@@ -675,7 +691,7 @@
 		sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
 
 		while (lm_count != reqs->topology->num_lm &&
-				sde_rm_get_hw(rm, &iter_j)) {
+				_sde_rm_get_hw_locked(rm, &iter_j)) {
 			if (iter_i.blk == iter_j.blk)
 				continue;
 
@@ -711,7 +727,7 @@
 		/* reserve a free PINGPONG_SLAVE block */
 		rc = -ENAVAIL;
 		sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
-		while (sde_rm_get_hw(rm, &iter_i)) {
+		while (_sde_rm_get_hw_locked(rm, &iter_i)) {
 			struct sde_pingpong_cfg *pp_cfg =
 				(struct sde_pingpong_cfg *)
 				(iter_i.blk->catalog);
@@ -742,7 +758,7 @@
 	memset(&ctls, 0, sizeof(ctls));
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		unsigned long caps;
 		bool has_split_display, has_ppsplit;
 
@@ -793,7 +809,7 @@
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
 
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
 			continue;
 
@@ -820,7 +836,7 @@
 	struct sde_cdm_cfg *cdm;
 
 	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		bool match = false;
 
 		if (RESERVED_BY_OTHER(iter.blk, rsvp))
@@ -865,7 +881,7 @@
 
 	/* Find the block entry in the rm, and note the reservation */
 	sde_rm_init_hw_iter(&iter, 0, type);
-	while (sde_rm_get_hw(rm, &iter)) {
+	while (_sde_rm_get_hw_locked(rm, &iter)) {
 		if (iter.blk->id != id)
 			continue;
 
@@ -1071,7 +1087,7 @@
  * @rm:	KMS handle
  * @rsvp:	RSVP pointer to release and release resources for
  */
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
 		struct drm_connector *conn)
@@ -1123,16 +1139,18 @@
 		return;
 	}
 
+	mutex_lock(&rm->rm_lock);
+
 	rsvp = _sde_rm_get_rsvp(rm, enc);
 	if (!rsvp) {
 		SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
-		return;
+		goto end;
 	}
 
 	conn = _sde_rm_get_connector(enc);
 	if (!conn) {
 		SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
-		return;
+		goto end;
 	}
 
 	top_ctrl = sde_connector_get_property(conn->state,
@@ -1152,6 +1170,9 @@
 				CONNECTOR_PROP_TOPOLOGY_NAME,
 				SDE_RM_TOPOLOGY_NONE);
 	}
+
+end:
+	mutex_unlock(&rm->rm_lock);
 }
 
 static int _sde_rm_commit_rsvp(
@@ -1219,13 +1240,15 @@
 			crtc_state->crtc->base.id, test_only);
 	SDE_EVT32(enc->base.id, conn_state->connector->base.id);
 
+	mutex_lock(&rm->rm_lock);
+
 	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
 
 	ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
 			conn_state, &reqs);
 	if (ret) {
 		SDE_ERROR("failed to populate hw requirements\n");
-		return ret;
+		goto end;
 	}
 
 	/*
@@ -1240,8 +1263,10 @@
 	 * replace the current with the next.
 	 */
 	rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
-	if (!rsvp_nxt)
-		return -ENOMEM;
+	if (!rsvp_nxt) {
+		ret = -ENOMEM;
+		goto end;
+	}
 
 	rsvp_cur = _sde_rm_get_rsvp(rm, enc);
 
@@ -1293,5 +1318,8 @@
 
 	_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
 
+end:
+	mutex_unlock(&rm->rm_lock);
+
 	return ret;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 059952a..b4a801a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -72,6 +72,7 @@
  * @hw_mdp: hardware object for mdp_top
  * @lm_max_width: cached layer mixer maximum width
  * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
  */
 struct sde_rm {
 	struct drm_device *dev;
@@ -80,6 +81,7 @@
 	struct sde_hw_mdp *hw_mdp;
 	uint32_t lm_max_width;
 	uint32_t rsvp_next_seq;
+	struct mutex rm_lock;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 2a4e6b5..6962bef 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,15 +24,15 @@
 
 TRACE_EVENT(sde_perf_set_qos_luts,
 	TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
-		u32 lut, bool linear),
-	TP_ARGS(pnum, fmt, rt, fl, lut, linear),
+		u32 lut, u32 lut_usage),
+	TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
 	TP_STRUCT__entry(
 			__field(u32, pnum)
 			__field(u32, fmt)
 			__field(bool, rt)
 			__field(u32, fl)
-			__field(u32, lut)
-			__field(bool, linear)
+			__field(u64, lut)
+			__field(u32, lut_usage)
 	),
 	TP_fast_assign(
 			__entry->pnum = pnum;
@@ -40,12 +40,12 @@
 			__entry->rt = rt;
 			__entry->fl = fl;
 			__entry->lut = lut;
-			__entry->linear = linear;
+			__entry->lut_usage = lut_usage;
 	),
-	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%x lin=%d",
+	TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
 			__entry->pnum, __entry->fmt,
 			__entry->rt, __entry->fl,
-			__entry->lut, __entry->linear)
+			__entry->lut, __entry->lut_usage)
 );
 
 TRACE_EVENT(sde_perf_set_danger_luts,
@@ -159,27 +159,69 @@
 			__get_str(counter_name), __entry->value)
 )
 
+#define SDE_TRACE_EVTLOG_SIZE	15
 TRACE_EVENT(sde_evtlog,
-	TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
-	TP_ARGS(tag, tag_id, value1, value2),
+	TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 data[]),
+	TP_ARGS(tag, tag_id, cnt, data),
 	TP_STRUCT__entry(
 			__field(int, pid)
 			__string(evtlog_tag, tag)
 			__field(u32, tag_id)
-			__field(u64, value1)
-			__field(u64, value2)
+			__array(u32, data, SDE_TRACE_EVTLOG_SIZE)
 	),
 	TP_fast_assign(
 			__entry->pid = current->tgid;
 			__assign_str(evtlog_tag, tag);
 			__entry->tag_id = tag_id;
-			__entry->value1 = value1;
-			__entry->value2 = value2;
+			if (cnt > SDE_TRACE_EVTLOG_SIZE)
+				cnt = SDE_TRACE_EVTLOG_SIZE;
+			memcpy(__entry->data, data, cnt * sizeof(u32));
+			memset(&__entry->data[cnt], 0,
+				(SDE_TRACE_EVTLOG_SIZE - cnt) * sizeof(u32));
 	),
-	TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
-			__entry->tag_id, __entry->value1, __entry->value2)
+	TP_printk("%d|%s:%d|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u|%u",
+			__entry->pid, __get_str(evtlog_tag),
+			__entry->tag_id,
+			__entry->data[0], __entry->data[1],
+			__entry->data[2], __entry->data[3],
+			__entry->data[4], __entry->data[5],
+			__entry->data[6], __entry->data[7],
+			__entry->data[8], __entry->data[9],
+			__entry->data[10], __entry->data[11],
+			__entry->data[12], __entry->data[13],
+			__entry->data[14])
 )
 
+TRACE_EVENT(sde_perf_crtc_update,
+	TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate,
+		bool stop_req, u32 update_bus, u32 update_clk),
+	TP_ARGS(crtc, bw_ctl, core_clk_rate,
+		stop_req, update_bus, update_clk),
+	TP_STRUCT__entry(
+			__field(u32, crtc)
+			__field(u64, bw_ctl)
+			__field(u32, core_clk_rate)
+			__field(bool, stop_req)
+			__field(u32, update_bus)
+			__field(u32, update_clk)
+	),
+	TP_fast_assign(
+			__entry->crtc = crtc;
+			__entry->bw_ctl = bw_ctl;
+			__entry->core_clk_rate = core_clk_rate;
+			__entry->stop_req = stop_req;
+			__entry->update_bus = update_bus;
+			__entry->update_clk = update_clk;
+	),
+	 TP_printk("crtc=%d bw=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+			 __entry->crtc,
+			 __entry->bw_ctl,
+			 __entry->core_clk_rate,
+			 __entry->stop_req,
+			 __entry->update_bus,
+			 __entry->update_clk)
+);
+
 #define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
 #define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
 #define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
index c0c8248..e63fe8c 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -210,6 +210,81 @@
 	return;
 }
 
+void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
+		struct sde_vbif_set_qos_params *params)
+{
+	struct sde_hw_vbif *vbif = NULL;
+	struct sde_hw_mdp *mdp;
+	bool forced_on = false;
+	const struct sde_vbif_qos_tbl *qos_tbl;
+	int i;
+
+	if (!sde_kms || !params || !sde_kms->hw_mdp) {
+		SDE_ERROR("invalid arguments\n");
+		return;
+	}
+	mdp = sde_kms->hw_mdp;
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		if (sde_kms->hw_vbif[i] &&
+				sde_kms->hw_vbif[i]->idx == params->vbif_idx) {
+			vbif = sde_kms->hw_vbif[i];
+			break;
+		}
+	}
+
+	if (!vbif || !vbif->cap) {
+		SDE_ERROR("invalid vbif %d\n", params->vbif_idx);
+		return;
+	}
+
+	if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+		SDE_DEBUG("qos remap not supported\n");
+		return;
+	}
+
+	qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+			&vbif->cap->qos_nrt_tbl;
+
+	if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+		SDE_DEBUG("qos tbl not defined\n");
+		return;
+	}
+
+	forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+	for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+		SDE_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+				params->vbif_idx, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+		vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+				qos_tbl->priority_lvl[i]);
+	}
+
+	if (forced_on)
+		mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms)
+{
+	struct sde_hw_vbif *vbif;
+	int i, j;
+
+	if (!sde_kms) {
+		SDE_ERROR("invalid argument\n");
+		return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+		vbif = sde_kms->hw_vbif[i];
+		if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+			for (j = 0; j < vbif->cap->memtype_count; j++)
+				vbif->ops.set_mem_type(
+						vbif, j, vbif->cap->memtype[j]);
+		}
+	}
+}
+
 #ifdef CONFIG_DEBUG_FS
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
 {
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
index 4b1cb1c..f1da68b1 100644
--- a/drivers/gpu/drm/msm/sde/sde_vbif.h
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -27,6 +27,29 @@
 	u32 clk_ctrl;
 };
 
+struct sde_vbif_set_memtype_params {
+	u32 xin_id;
+	u32 vbif_idx;
+	u32 clk_ctrl;
+	bool is_cacheable;
+};
+
+/**
+ * struct sde_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct sde_vbif_set_qos_params {
+	u32 vbif_idx;
+	u32 xin_id;
+	u32 clk_ctrl;
+	u32 num;
+	bool is_rt;
+};
+
 /**
  * sde_vbif_set_ot_limit - set OT limit for vbif client
  * @sde_kms:	SDE handler
@@ -35,6 +58,20 @@
 void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
 		struct sde_vbif_set_ot_params *params);
 
+/**
+ * sde_vbif_set_qos_remap - set QoS priority level remap
+ * @sde_kms:	SDE handler
+ * @params:	Pointer to QoS configuration parameters
+ */
+void sde_vbif_set_qos_remap(struct sde_kms *sde_kms,
+		struct sde_vbif_set_qos_params *params);
+
+/**
+ * sde_vbif_init_memtypes - initialize xin memory types for vbif
+ * @sde_kms:	SDE handler
+ */
+void sde_vbif_init_memtypes(struct sde_kms *sde_kms);
+
 #ifdef CONFIG_DEBUG_FS
 int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
 void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index b2665be..ceda16e 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -273,6 +273,7 @@
 		return -EINVAL;
 	}
 
+	memset(info, 0, sizeof(struct msm_display_info));
 	info->intf_type = DRM_MODE_CONNECTOR_VIRTUAL;
 	info->num_of_h_tiles = 1;
 	info->h_tile_instance[0] = sde_wb_get_index(display);
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 699396f..67c664f 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -99,8 +99,7 @@
 	evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY;
 	evtlog->last++;
 
-	trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
-			i > 1 ? log->data[1] : 0);
+	trace_sde_evtlog(name, line, log->data_cnt, log->data);
 exit:
 	spin_unlock_irqrestore(&evtlog->spin_lock, flags);
 }
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
new file mode 100644
index 0000000..12165e8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_edid_parser.h"
+
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+	RESERVED,
+	AUDIO_DATA_BLOCK,
+	VIDEO_DATA_BLOCK,
+	VENDOR_SPECIFIC_DATA_BLOCK,
+	SPEAKER_ALLOCATION_DATA_BLOCK,
+	VESA_DTC_DATA_BLOCK,
+	RESERVED2,
+	USE_EXTENDED_TAG
+};
+
+static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
+{
+	u8 *edid_ext = NULL;
+	int i;
+
+	/* No EDID or EDID extensions */
+	if (edid == NULL || edid->extensions == 0)
+		return NULL;
+
+	/* Find CEA extension */
+	for (i = 0; i < edid->extensions; i++) {
+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+		if (edid_ext[0] == ext_id)
+			break;
+	}
+
+	if (i == edid->extensions)
+		return NULL;
+
+	return edid_ext;
+}
+
+static u8 *sde_find_cea_extension(struct edid *edid)
+{
+	return sde_find_edid_extension(edid, SDE_CEA_EXT);
+}
+
+static int
+sde_cea_db_payload_len(const u8 *db)
+{
+	return db[0] & 0x1f;
+}
+
+static int
+sde_cea_db_tag(const u8 *db)
+{
+	return db[0] >> 5;
+}
+
+static int
+sde_cea_revision(const u8 *cea)
+{
+	return cea[1];
+}
+
+static int
+sde_cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+	/* Data block offset in CEA extension block */
+	*start = 4;
+	*end = cea[2];
+	if (*end == 0)
+		*end = 127;
+	if (*end < 4 || *end > 127)
+		return -ERANGE;
+	return 0;
+}
+
+#define sde_for_each_cea_db(cea, i, start, end) \
+for ((i) = (start); \
+(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
+(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
+				(db[1] == blk_id))
+				return db;
+		}
+	}
+	return NULL;
+}
+
+static u8 *
+sde_edid_find_block(struct edid *edid, int blk_id)
+{
+	u8 *db = NULL;
+	u8 *cea = NULL;
+
+	if (!edid) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	cea = sde_find_cea_extension(edid);
+
+	if (cea && sde_cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (sde_cea_db_offsets(cea, &start, &end))
+			return NULL;
+
+		sde_for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+			if (sde_cea_db_tag(db) == blk_id)
+				return db;
+		}
+	}
+	return NULL;
+}
+
+
+static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
+	u8 type, u8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	u32 offset = start_offset;
+	u32 dbc_offset = in_buf[2];
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	/*
+	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+	 *   collection present.
+	 * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+	 *   collection present and no DTD data present.
+	 */
+	if ((dbc_offset == 0) || (dbc_offset == 4)) {
+		SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+		return NULL;
+	}
+
+	while (offset < dbc_offset) {
+		u8 block_len = in_buf[offset] & 0x1F;
+
+		if ((offset + block_len <= dbc_offset) &&
+		    (in_buf[offset] >> 5) == type) {
+			*len = block_len;
+			SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
+				type, offset, block_len);
+
+			return in_buf + offset;
+		}
+		offset += 1 + block_len;
+	}
+
+	return NULL;
+}
+
+static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
+{
+	char *vendor_id;
+	u32 id_codes;
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vendor_id = edid_ctrl->vendor_id;
+	id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
+		edid_ctrl->edid->mfg_id[1];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+	SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_y420_support(struct drm_connector *connector,
+u32 video_format)
+{
+	u8 cea_mode = 0;
+	struct drm_display_mode *mode;
+
+	/* Need to add Y420 support flag to the modes */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		cea_mode = drm_match_cea_mode(mode);
+		if ((cea_mode != 0) && (cea_mode == video_format)) {
+			SDE_EDID_DEBUG("%s found match for %d ", __func__,
+			video_format);
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
+		}
+	}
+}
+
+static void sde_edid_parse_Y420CMDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u32 offset = 0;
+	u8 len = 0;
+	u8 svd_len = 0;
+	const u8 *svd = NULL;
+	u32 i = 0, j = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: edid_ctrl is NULL\n", __func__);
+		return;
+	}
+
+	if (!db) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	len = db[0] & 0x1f;
+
+	if (len < 7)
+		return;
+	/* Byte 3 to L+1 contain SVDs */
+	offset += 2;
+
+	svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
+
+	if (svd) {
+		/*moving to the next byte as vic info begins there*/
+		++svd;
+		svd_len = svd[0] & 0x1f;
+	}
+
+	for (i = 0; i < svd_len; i++, j++) {
+		video_format = *svd & 0x7F;
+		if (db[offset] & (1 << j))
+			sde_edid_set_y420_support(connector, video_format);
+
+		if (j & 0x80) {
+			j = j/8;
+			offset++;
+			if (offset >= len)
+				break;
+		}
+	}
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+
+}
+
+static void sde_edid_parse_Y420VDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+	u8 len = db[0] & 0x1f;
+	u32 i = 0;
+	u32 video_format = 0;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	/* Offset to byte 3 */
+	db += 2;
+	for (i = 0; i < len - 1; i++) {
+		video_format = *(db + i) & 0x7F;
+		/*
+		 * mode was already added in get_modes()
+		 * only need to set the Y420 support flag
+		 */
+		sde_edid_set_y420_support(connector, video_format);
+	}
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_mode_format(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+	const u8 *db = NULL;
+	struct drm_display_mode *mode;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	/* Set YUV mode support flags for YCbcr420VDB */
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_VIDEO_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
+
+	/* Set RGB supported on all modes where YUV is not set */
+	list_for_each_entry(mode, &connector->probed_modes, head) {
+		if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
+			mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+	}
+
+
+	db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+			Y420_CAPABILITY_MAP_DATA_BLOCK);
+	if (db)
+		sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
+	else
+		SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
+
+	SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_extract_audio_data_blocks(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len = 0;
+	u8 adb_max = 0;
+	const u8 *adb = NULL;
+	u32 offset = DBC_START_OFFSET;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	edid_ctrl->adb_size = 0;
+
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+
+	do {
+		len = 0;
+		adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+			&len);
+
+		if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+			adb_max >= MAX_NUMBER_ADB)) {
+			if (!edid_ctrl->adb_size) {
+				SDE_DEBUG("No/Invalid Audio Data Block\n");
+				return;
+			}
+
+			continue;
+		}
+
+		memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+			adb + 1, len);
+		offset = (adb - cea) + 1 + len;
+
+		edid_ctrl->adb_size += len;
+		adb_max++;
+	} while (adb);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void _sde_edid_extract_speaker_allocation_data(
+	struct sde_edid_ctrl *edid_ctrl)
+{
+	u8 len;
+	const u8 *sadb = NULL;
+	u8 *cea = NULL;
+
+	if (!edid_ctrl) {
+		SDE_ERROR("invalid edid_ctrl\n");
+		return;
+	}
+	SDE_EDID_DEBUG("%s +", __func__);
+	cea = sde_find_cea_extension(edid_ctrl->edid);
+	if (!cea) {
+		SDE_DEBUG("CEA extension not found\n");
+		return;
+	}
+
+	sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
+		SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+		SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+		return;
+	}
+
+	memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+	edid_ctrl->sadb_size = len;
+
+	SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+		sadb[1],
+		(sadb[1] & BIT(0)) ? "FL/FR," : "",
+		(sadb[1] & BIT(1)) ? "LFE," : "",
+		(sadb[1] & BIT(2)) ? "FC," : "",
+		(sadb[1] & BIT(3)) ? "RL/RR," : "",
+		(sadb[1] & BIT(4)) ? "RC," : "",
+		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+struct sde_edid_ctrl *sde_edid_init(void)
+{
+	struct sde_edid_ctrl *edid_ctrl = NULL;
+
+	SDE_EDID_DEBUG("%s +\n", __func__);
+	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+	if (!edid_ctrl) {
+		SDE_ERROR("edid_ctrl alloc failed\n");
+		return NULL;
+	}
+	memset((edid_ctrl), 0, sizeof(*edid_ctrl));
+	SDE_EDID_DEBUG("%s -\n", __func__);
+	return edid_ctrl;
+}
+
+void sde_free_edid(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	kfree(edid_ctrl->edid);
+	edid_ctrl->edid = NULL;
+}
+
+void sde_edid_deinit(void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	sde_free_edid((void *)&edid_ctrl);
+	kfree(edid_ctrl);
+	SDE_EDID_DEBUG("%s -", __func__);
+}
+
+int _sde_edid_update_modes(struct drm_connector *connector,
+	void *input)
+{
+	int rc = 0;
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	SDE_EDID_DEBUG("%s +", __func__);
+	if (edid_ctrl->edid) {
+		drm_mode_connector_update_edid_property(connector,
+			edid_ctrl->edid);
+
+		rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+		sde_edid_set_mode_format(connector, edid_ctrl);
+		SDE_EDID_DEBUG("%s -", __func__);
+		return rc;
+	}
+
+	drm_mode_connector_update_edid_property(connector, NULL);
+	SDE_EDID_DEBUG("%s null edid -", __func__);
+	return rc;
+}
+
+bool sde_detect_hdmi_monitor(void *input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+	return drm_detect_hdmi_monitor(edid_ctrl->edid);
+}
+
+void sde_get_edid(struct drm_connector *connector,
+				  struct i2c_adapter *adapter, void **input)
+{
+	struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+	edid_ctrl->edid = drm_get_edid(connector, adapter);
+	SDE_EDID_DEBUG("%s +\n", __func__);
+
+	if (!edid_ctrl->edid)
+		SDE_ERROR("EDID read failed\n");
+
+	if (edid_ctrl->edid) {
+		sde_edid_extract_vendor_id(edid_ctrl);
+		_sde_edid_extract_audio_data_blocks(edid_ctrl);
+		_sde_edid_extract_speaker_allocation_data(edid_ctrl);
+	}
+	SDE_EDID_DEBUG("%s -\n", __func__);
+};
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
new file mode 100644
index 0000000..1143dc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_EDID_PARSER_H_
+#define _SDE_EDID_PARSER_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+#define EDID_VENDOR_ID_SIZE     4
+
+#define SDE_CEA_EXT    0x02
+#define SDE_EXTENDED_TAG 0x07
+
+enum extended_data_block_types {
+	VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+	VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+	HDMI_VIDEO_DATA_BLOCK = 0x04,
+	HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
+	Y420_VIDEO_DATA_BLOCK = 0x0E,
+	VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+	Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+	VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+	INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+#ifdef SDE_EDID_DEBUG_ENABLE
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_ERROR(fmt, ##args)
+#else
+#define SDE_EDID_DEBUG(fmt, args...)   SDE_DEBUG(fmt, ##args)
+#endif
+
+/*
+ * struct hdmi_edid_hdr_data - HDR Static Metadata
+ * @eotf: Electro-Optical Transfer Function
+ * @metadata_type_one: Static Metadata Type 1 support
+ * @max_luminance: Desired Content Maximum Luminance
+ * @avg_luminance: Desired Content Frame-average Luminance
+ * @min_luminance: Desired Content Minimum Luminance
+ */
+struct sde_edid_hdr_data {
+	u32 eotf;
+	bool metadata_type_one;
+	u32 max_luminance;
+	u32 avg_luminance;
+	u32 min_luminance;
+};
+
+struct sde_edid_sink_caps {
+	u32 max_pclk_in_hz;
+	bool scdc_present;
+	bool scramble_support; /* scramble support for less than 340Mcsc */
+	bool read_req_support;
+	bool osd_disparity;
+	bool dual_view_support;
+	bool ind_view_support;
+};
+
+struct sde_edid_ctrl {
+	struct edid *edid;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+	int adb_size;
+	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+	int sadb_size;
+	bool hdr_supported;
+	char vendor_id[EDID_VENDOR_ID_SIZE];
+	struct sde_edid_sink_caps sink_caps;
+	struct sde_edid_hdr_data hdr_data;
+};
+
+/**
+ * sde_edid_init() - init edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ * Return: handle to sde_edid_ctrl for the client.
+ */
+struct sde_edid_ctrl *sde_edid_init(void);
+
+/**
+ * sde_edid_deinit() - deinit edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_edid_deinit(void **edid_ctrl);
+
+/**
+ * sde_get_edid() - get edid info.
+ * @connector:   Handle to the drm_connector.
+ * @adapter:     handle to i2c adapter for DDC read
+ * @edid_ctrl:   Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_get_edid(struct drm_connector *connector,
+struct i2c_adapter *adapter,
+void **edid_ctrl);
+
+/**
+ * sde_free_edid() - free edid structure.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_free_edid(void **edid_ctrl);
+
+/**
+ * sde_detect_hdmi_monitor() - detect HDMI mode.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+bool sde_detect_hdmi_monitor(void *edid_ctrl);
+
+/**
+ * _sde_edid_update_modes() - populate EDID modes.
+ * @edid_ctrl:     Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+int _sde_edid_update_modes(struct drm_connector *connector,
+							void *edid_ctrl);
+
+#endif /* _SDE_EDID_PARSER_H_ */
+
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 1e4f6b1..452a3be 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -333,6 +333,31 @@
 		return -EINVAL;
 	}
 
+	pdbus->ab_rt = ab_quota_rt;
+	pdbus->ib_rt = ib_quota_rt;
+	pdbus->ab_nrt = ab_quota_nrt;
+	pdbus->ib_nrt = ib_quota_nrt;
+
+	if (pdbus->enable) {
+		ab_quota_rt = max_t(u64, ab_quota_rt,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
+		ib_quota_rt = max_t(u64, ib_quota_rt,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+		ab_quota_nrt = max_t(u64, ab_quota_nrt,
+				SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA);
+		ib_quota_nrt = max_t(u64, ib_quota_nrt,
+				SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
+	} else {
+		ab_quota_rt = max_t(u64, ab_quota_rt,
+				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
+		ib_quota_rt = max_t(u64, ib_quota_rt,
+				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
+		ab_quota_nrt = max_t(u64, ab_quota_nrt,
+				SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA);
+		ib_quota_nrt = max_t(u64, ib_quota_nrt,
+				SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA);
+	}
+
 	if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt)  {
 		new_uc_idx = 0;
 	} else {
@@ -343,7 +368,6 @@
 		u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
 		u32 total_axi_port_cnt = pdbus->axi_port_cnt;
 		u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
-		int match_cnt = 0;
 
 		if (!bw_table || !total_axi_port_cnt ||
 		    total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
@@ -383,20 +407,6 @@
 			}
 		}
 
-		for (i = 0; i < total_axi_port_cnt; i++) {
-			vect = &bw_table->usecase
-				[pdbus->curr_bw_uc_idx].vectors[i];
-			/* avoid performing updates for small changes */
-			if ((ab_quota[i] == vect->ab) &&
-				(ib_quota[i] == vect->ib))
-				match_cnt++;
-		}
-
-		if (match_cnt == total_axi_port_cnt) {
-			pr_debug("skip BW vote\n");
-			return 0;
-		}
-
 		new_uc_idx = (pdbus->curr_bw_uc_idx %
 			(bw_table->num_usecases - 1)) + 1;
 
@@ -571,19 +581,12 @@
 							bool enable)
 {
 	int rc = 0;
-	u64 ab_quota_rt, ab_quota_nrt;
-	u64 ib_quota_rt, ib_quota_nrt;
 
-	ab_quota_rt = ab_quota_nrt = enable ?
-			SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA :
-			SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA;
-	ib_quota_rt = ib_quota_nrt = enable ?
-			SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA :
-			SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA;
+	pdbus->enable = enable;
 
 	if (pdbus->data_bus_hdl)
-		rc = _sde_power_data_bus_set_quota(pdbus, ab_quota_rt,
-				ab_quota_nrt, ib_quota_rt, ib_quota_nrt);
+		rc = _sde_power_data_bus_set_quota(pdbus, pdbus->ab_rt,
+				pdbus->ab_nrt, pdbus->ib_rt, pdbus->ib_nrt);
 
 	if (rc)
 		pr_err("failed to set data bus vote rc=%d enable:%d\n",
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 38bf21f..c526b71 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -16,9 +16,9 @@
 
 #define MAX_CLIENT_NAME_LEN 128
 
-#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	6000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA	2000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA	0
-#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	6000000000
+#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	2000000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
 #include <linux/sde_io_util.h>
@@ -93,6 +93,11 @@
  * @bus_channels: number of memory bus channels
  * @curr_bw_uc_idx: current use case index of data bus
  * @ao_bw_uc_idx: active only use case index of data bus
+ * @ab_rt: realtime ab quota
+ * @ib_rt: realtime ib quota
+ * @ab_nrt: non-realtime ab quota
+ * @ib_nrt: non-realtime ib quota
+ * @enable: true if bus is enabled
  */
 struct sde_power_data_bus_handle {
 	struct msm_bus_scale_pdata *data_bus_scale_table;
@@ -102,6 +107,11 @@
 	u32 bus_channels;
 	u32 curr_bw_uc_idx;
 	u32 ao_bw_uc_idx;
+	u64 ab_rt;
+	u64 ib_rt;
+	u64 ab_nrt;
+	u64 ib_nrt;
+	bool enable;
 };
 
 /*
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index cab7e0f..ac79968 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -31,16 +31,16 @@
 #include "sde_dbg.h"
 
 /* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
-#define TCS_CASE_EXECUTION_TIME				1064000
+#define SINGLE_TCS_EXECUTION_TIME				1064000
 
 /* this time is ~1ms - only wake tcs in any mode */
-#define RSC_BACKOFF_TIME_NS		 (TCS_CASE_EXECUTION_TIME + 100)
+#define RSC_BACKOFF_TIME_NS		 (SINGLE_TCS_EXECUTION_TIME + 100)
 
 /* this time is ~1ms - only wake TCS in mode-0 */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS	((TCS_CASE_EXECUTION_TIME >> 1) + 100)
+#define RSC_MODE_THRESHOLD_TIME_IN_NS	(SINGLE_TCS_EXECUTION_TIME + 100)
 
 /* this time is ~2ms - sleep+ wake TCS in mode-1 */
-#define RSC_TIME_SLOT_0_NS		((TCS_CASE_EXECUTION_TIME * 2) + 100)
+#define RSC_TIME_SLOT_0_NS		((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER		5
@@ -123,6 +123,7 @@
 void sde_rsc_client_destroy(struct sde_rsc_client *client)
 {
 	struct sde_rsc_priv *rsc;
+	enum sde_rsc_state state;
 
 	if (!client) {
 		pr_debug("invalid client\n");
@@ -138,9 +139,13 @@
 		goto end;
 
 	mutex_lock(&rsc->client_lock);
-	if (client->current_state != SDE_RSC_IDLE_STATE)
+	state = client->current_state;
+	mutex_unlock(&rsc->client_lock);
+
+	if (state != SDE_RSC_IDLE_STATE)
 		sde_rsc_client_state_update(client, SDE_RSC_IDLE_STATE,
 								NULL, -1);
+	mutex_lock(&rsc->client_lock);
 	list_del_init(&client->list);
 	mutex_unlock(&rsc->client_lock);
 
@@ -215,6 +220,39 @@
 }
 EXPORT_SYMBOL(sde_rsc_unregister_event);
 
+bool is_sde_rsc_available(int rsc_index)
+{
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return false;
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(is_sde_rsc_available);
+
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	struct sde_rsc_priv *rsc;
+
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return SDE_RSC_IDLE_STATE;
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return SDE_RSC_IDLE_STATE;
+	}
+
+	rsc = rsc_prv_list[rsc_index];
+	return rsc->current_state;
+}
+EXPORT_SYMBOL(get_sde_rsc_current_state);
+
 static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
 	struct sde_power_client *pclient, bool enable)
 {
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 3332a05..e5ae0ad 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -206,7 +206,7 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
 						0xa7e9a920, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
-						0x002079e7, rsc->debug_mode);
+						0x002089e7, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 8894fee..cbacbb6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -130,7 +130,7 @@
 		poll = false;
 	}
 
-	if (list_empty(&therm->alarm.head) && poll)
+	if (poll)
 		nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
 	spin_unlock_irqrestore(&therm->lock, flags);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 91198d7..e2fecce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -83,7 +83,7 @@
 	spin_unlock_irqrestore(&fan->lock, flags);
 
 	/* schedule next fan update, if not at target speed already */
-	if (list_empty(&fan->alarm.head) && target != duty) {
+	if (target != duty) {
 		u16 bump_period = fan->bios.bump_period;
 		u16 slow_down_period = fan->bios.slow_down_period;
 		u64 delay;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 59701b7..ff9fbe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -53,7 +53,7 @@
 	duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
 	nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
 
-	if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+	if (percent != (duty * 100)) {
 		u64 next_change = (percent * fan->period_us) / 100;
 		if (!duty)
 			next_change = fan->period_us - next_change;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index b9703c0..9a79e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -185,7 +185,7 @@
 	spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
 
 	/* schedule the next poll in one second */
-	if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+	if (therm->func->temp_get(therm) >= 0)
 		nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 07dc82b..f2a86ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -36,23 +36,29 @@
 	unsigned long flags;
 	LIST_HEAD(exec);
 
-	/* move any due alarms off the pending list */
+	/* Process pending alarms. */
 	spin_lock_irqsave(&tmr->lock, flags);
 	list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
-		if (alarm->timestamp <= nvkm_timer_read(tmr))
-			list_move_tail(&alarm->head, &exec);
+		/* Have we hit the earliest alarm that hasn't gone off? */
+		if (alarm->timestamp > nvkm_timer_read(tmr)) {
+			/* Schedule it.  If we didn't race, we're done. */
+			tmr->func->alarm_init(tmr, alarm->timestamp);
+			if (alarm->timestamp > nvkm_timer_read(tmr))
+				break;
+		}
+
+		/* Move to completed list.  We'll drop the lock before
+		 * executing the callback so it can reschedule itself.
+		 */
+		list_move_tail(&alarm->head, &exec);
 	}
 
-	/* reschedule interrupt for next alarm time */
-	if (!list_empty(&tmr->alarms)) {
-		alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
-		tmr->func->alarm_init(tmr, alarm->timestamp);
-	} else {
+	/* Shut down interrupt if no more pending alarms. */
+	if (list_empty(&tmr->alarms))
 		tmr->func->alarm_fini(tmr);
-	}
 	spin_unlock_irqrestore(&tmr->lock, flags);
 
-	/* execute any pending alarm handlers */
+	/* Execute completed callbacks. */
 	list_for_each_entry_safe(alarm, atemp, &exec, head) {
 		list_del_init(&alarm->head);
 		alarm->func(alarm);
@@ -65,24 +71,37 @@
 	struct nvkm_alarm *list;
 	unsigned long flags;
 
-	alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
-	/* append new alarm to list, in soonest-alarm-first order */
+	/* Remove alarm from pending list.
+	 *
+	 * This both protects against the corruption of the list,
+	 * and implements alarm rescheduling/cancellation.
+	 */
 	spin_lock_irqsave(&tmr->lock, flags);
-	if (!nsec) {
-		if (!list_empty(&alarm->head))
-			list_del(&alarm->head);
-	} else {
+	list_del_init(&alarm->head);
+
+	if (nsec) {
+		/* Insert into pending list, ordered earliest to latest. */
+		alarm->timestamp = nvkm_timer_read(tmr) + nsec;
 		list_for_each_entry(list, &tmr->alarms, head) {
 			if (list->timestamp > alarm->timestamp)
 				break;
 		}
+
 		list_add_tail(&alarm->head, &list->head);
+
+		/* Update HW if this is now the earliest alarm. */
+		list = list_first_entry(&tmr->alarms, typeof(*list), head);
+		if (list == alarm) {
+			tmr->func->alarm_init(tmr, alarm->timestamp);
+			/* This shouldn't happen if callers aren't stupid.
+			 *
+			 * Worst case scenario is that it'll take roughly
+			 * 4 seconds for the next alarm to trigger.
+			 */
+			WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+		}
 	}
 	spin_unlock_irqrestore(&tmr->lock, flags);
-
-	/* process pending alarms */
-	nvkm_timer_alarm_trigger(tmr);
 }
 
 void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7b9ce87..7f48249 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -76,8 +76,8 @@
 	u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
 
 	if (stat & 0x00000001) {
-		nvkm_timer_alarm_trigger(tmr);
 		nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+		nvkm_timer_alarm_trigger(tmr);
 		stat &= ~0x00000001;
 	}
 
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7ba4508..ea36dc4 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,12 @@
 	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
 	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
 
+	/* disable mclk switching if the refresh is >120Hz, even if the
+        * blanking period would allow it
+        */
+	if (r600_dpm_get_vrefresh(rdev) > 120)
+		return true;
+
 	if (vblank_time < switch_limit)
 		return true;
 	else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index f6ff41a..edee6a5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7416,7 +7416,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
@@ -7446,7 +7446,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_RX_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0b6b576..6068b8a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4933,7 +4933,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
@@ -4964,7 +4964,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_RX_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a951881..f2eac6b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3995,7 +3995,7 @@
 			WREG32(DC_HPD5_INT_CONTROL, tmp);
 		}
 		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
-			tmp = RREG32(DC_HPD5_INT_CONTROL);
+			tmp = RREG32(DC_HPD6_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
 			WREG32(DC_HPD6_INT_CONTROL, tmp);
 		}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e0c143b..30bd4a6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	48
+#define KMS_DRIVER_MINOR	49
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index deb9511..3168567 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@
 
 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
-	args->vram_size = rdev->mc.real_vram_size;
-	args->vram_visible = (u64)man->size << PAGE_SHIFT;
+	args->vram_size = (u64)man->size << PAGE_SHIFT;
+	args->vram_visible = rdev->mc.visible_vram_size;
 	args->vram_visible -= rdev->vram_pin_size;
 	args->gart_size = rdev->mc.gtt_size;
 	args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 877af4a..3333e8a 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6330,7 +6330,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
@@ -6361,7 +6361,7 @@
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
-		tmp = RREG32(DC_HPD5_INT_CONTROL);
+		tmp = RREG32(DC_HPD6_INT_CONTROL);
 		tmp |= DC_HPDx_RX_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 35cc16f..c18fc31 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1602,7 +1602,14 @@
 int ttm_bo_wait(struct ttm_buffer_object *bo,
 		bool interruptible, bool no_wait)
 {
-	long timeout = no_wait ? 0 : 15 * HZ;
+	long timeout = 15 * HZ;
+
+	if (no_wait) {
+		if (reservation_object_test_signaled_rcu(bo->resv, true))
+			return 0;
+		else
+			return -EBUSY;
+	}
 
 	timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
 						      interruptible, timeout);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index e5cfd69..58ef5ee 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -114,6 +114,7 @@
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
+#define A6XX_RBBM_VBIF_GX_RESET_STATUS           0x215
 #define A6XX_RBBM_PERFCTR_CP_0_LO                0x400
 #define A6XX_RBBM_PERFCTR_CP_0_HI                0x401
 #define A6XX_RBBM_PERFCTR_CP_1_LO                0x402
@@ -782,6 +783,7 @@
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
 #define A6XX_GMU_CM3_ITCM_START			0x1B400
 #define A6XX_GMU_CM3_DTCM_START			0x1C400
+#define A6XX_GMU_NMI_CONTROL_STATUS		0x1CBF0
 #define A6XX_GMU_BOOT_SLUMBER_OPTION		0x1CBF8
 #define A6XX_GMU_GX_VOTE_IDX			0x1CBF9
 #define A6XX_GMU_MX_VOTE_IDX			0x1CBFA
@@ -793,6 +795,22 @@
 #define A6XX_GMU_CM3_BOOT_CONFIG		0x1F801
 #define A6XX_GMU_CM3_FW_BUSY			0x1F81A
 #define A6XX_GMU_CM3_FW_INIT_RESULT		0x1F81C
+#define A6XX_GMU_CM3_CFG			0x1F82D
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE	0x1F840
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0	0x1F841
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1	0x1F842
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L	0x1F844
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H	0x1F845
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L	0x1F846
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H	0x1F847
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L	0x1F848
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H	0x1F849
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L	0x1F84A
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H	0x1F84B
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L	0x1F84C
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H	0x1F84D
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L	0x1F84E
+#define A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H	0x1F84F
 #define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL	0x1F8C0
 #define A6XX_GMU_PWR_COL_INTER_FRAME_HYST	0x1F8C1
 #define A6XX_GMU_PWR_COL_SPTPRAC_HYST		0x1F8C2
@@ -802,6 +820,8 @@
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE    0x1F8EC
 #define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
+#define A6XX_GMU_LLM_GLM_SLEEP_CTRL		0x1F957
+#define A6XX_GMU_LLM_GLM_SLEEP_STATUS		0x1F958
 
 /* HFI registers*/
 #define A6XX_GMU_ALWAYS_ON_COUNTER_L		0x1F888
@@ -831,6 +851,10 @@
 #define A6XX_GMU_HOST2GMU_INTR_INFO_3		0x1F99E
 #define A6XX_GMU_GENERAL_7			0x1F9CC
 
+/* ISENSE registers */
+#define A6XX_GMU_ISENSE_CTRL			0x1F95D
+#define A6XX_GPU_CS_ENABLE_REG			0x23120
+
 #define A6XX_GMU_AO_INTERRUPT_EN		0x23B03
 #define A6XX_GMU_AO_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_AO_HOST_INTERRUPT_STATUS	0x23B05
@@ -839,6 +863,8 @@
 #define A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL      0x23B0A
 #define A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL       0x23B0B
 #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2	0x23B0D
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK	0x23B0E
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
@@ -851,6 +877,9 @@
 #define A6XX_GMU_AHB_FENCE_RANGE_0		0x23B11
 #define A6XX_GMU_AHB_FENCE_RANGE_1		0x23B12
 
+/* GPUCC registers */
+#define A6XX_GPU_CC_GX_GDSCR                   0x24403
+
 /* GPU RSC sequencer registers */
 #define	A6XX_RSCC_PDC_SEQ_START_ADDR			0x23408
 #define A6XX_RSCC_PDC_MATCH_VALUE_LO			0x23409
@@ -865,6 +894,10 @@
 #define A6XX_RSCC_OVERRIDE_START_ADDR			0x23500
 #define A6XX_RSCC_SEQ_BUSY_DRV0				0x23501
 #define A6XX_RSCC_SEQ_MEM_0_DRV0			0x23580
+#define A6XX_RSCC_TCS0_DRV0_STATUS			0x23746
+#define A6XX_RSCC_TCS1_DRV0_STATUS                      0x238AE
+#define A6XX_RSCC_TCS2_DRV0_STATUS                      0x23A16
+#define A6XX_RSCC_TCS3_DRV0_STATUS                      0x23B7E
 
 /* GPU PDC sequencer registers in AOSS.RPMh domain */
 #define	PDC_GPU_ENABLE_PDC			0x21140
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 2e92335..f581cff 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -56,9 +56,6 @@
 #define DRIVER_VERSION_MAJOR   3
 #define DRIVER_VERSION_MINOR   1
 
-/* Number of times to try hard reset */
-#define NUM_TIMES_RESET_RETRY 5
-
 #define KGSL_LOG_LEVEL_DEFAULT 3
 
 static void adreno_input_work(struct work_struct *work);
@@ -514,8 +511,6 @@
 	.id_table = adreno_input_ids,
 };
 
-static int adreno_soft_reset(struct kgsl_device *device);
-
 /*
  * _soft_reset() - Soft reset GPU
  * @adreno_dev: Pointer to adreno device
@@ -526,7 +521,7 @@
  * all the HW logic, restores GPU registers to default state and
  * flushes out pending VBIF transactions.
  */
-static void _soft_reset(struct adreno_device *adreno_dev)
+static int _soft_reset(struct adreno_device *adreno_dev)
 {
 	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
 	unsigned int reg;
@@ -555,6 +550,8 @@
 
 	if (gpudev->regulator_enable)
 		gpudev->regulator_enable(adreno_dev);
+
+	return 0;
 }
 
 
@@ -1623,7 +1620,7 @@
  * Power up the GPU and initialize it.  If priority is specified then elevate
  * the thread priority for the duration of the start operation
  */
-static int adreno_start(struct kgsl_device *device, int priority)
+int adreno_start(struct kgsl_device *device, int priority)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int nice = task_nice(current);
@@ -1640,38 +1637,6 @@
 	return ret;
 }
 
-/**
- * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
- * @device: Pointer to the device whose VBIF pipe is to be cleared
- */
-static int adreno_vbif_clear_pending_transactions(struct kgsl_device *device)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
-	unsigned int val;
-	unsigned long wait_for_vbif;
-	int ret = 0;
-
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
-	/* wait for the transactions to clear */
-	wait_for_vbif = jiffies + msecs_to_jiffies(100);
-	while (1) {
-		adreno_readreg(adreno_dev,
-			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
-		if ((val & mask) == mask)
-			break;
-		if (time_after(jiffies, wait_for_vbif)) {
-			KGSL_DRV_ERR(device,
-				"Wait limit reached for VBIF XIN Halt\n");
-			ret = -ETIMEDOUT;
-			break;
-		}
-	}
-	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
-	return ret;
-}
-
 static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
 {
 	int i;
@@ -2340,12 +2305,20 @@
  * The GPU hardware is reset but we never pull power so we can skip
  * a lot of the standard adreno_stop/adreno_start sequence
  */
-static int adreno_soft_reset(struct kgsl_device *device)
+int adreno_soft_reset(struct kgsl_device *device)
 {
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	int ret;
 
+	if (gpudev->oob_set) {
+		ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
+				OOB_CPINIT_CHECK_MASK,
+				OOB_CPINIT_CLEAR_MASK);
+		if (ret)
+			return ret;
+	}
+
 	kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
 	adreno_set_active_ctxs_null(adreno_dev);
 
@@ -2359,7 +2332,15 @@
 	adreno_perfcounter_save(adreno_dev);
 
 	/* Reset the GPU */
-	_soft_reset(adreno_dev);
+	if (gpudev->soft_reset)
+		ret = gpudev->soft_reset(adreno_dev);
+	else
+		ret = _soft_reset(adreno_dev);
+	if (ret) {
+		if (gpudev->oob_clear)
+			gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+		return ret;
+	}
 
 	/* Set the page table back to the default page table */
 	adreno_ringbuffer_set_global(adreno_dev, 0);
@@ -2401,6 +2382,9 @@
 	/* Restore physical performance counter values after soft reset */
 	adreno_perfcounter_restore(adreno_dev);
 
+	if (gpudev->oob_clear)
+		gpudev->oob_clear(adreno_dev, OOB_CPINIT_CLEAR_MASK);
+
 	return ret;
 }
 
@@ -2979,11 +2963,11 @@
 }
 
 static void adreno_clk_set_options(struct kgsl_device *device, const char *name,
-	struct clk *clk)
+	struct clk *clk, bool on)
 {
 	if (ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options)
 		ADRENO_GPU_DEVICE(ADRENO_DEVICE(device))->clk_set_options(
-			ADRENO_DEVICE(device), name, clk);
+			ADRENO_DEVICE(device), name, clk, on);
 }
 
 static void adreno_iommu_sync(struct kgsl_device *device, bool sync)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 78cecd0..7a6581c 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -159,10 +159,12 @@
 #define KGSL_END_OF_PROFILE_IDENTIFIER	0x2DEFADE2
 #define KGSL_PWRON_FIXUP_IDENTIFIER	0x2AFAFAFA
 
+/* Number of times to try hard reset */
+#define NUM_TIMES_RESET_RETRY 5
+
 /* One cannot wait forever for the core to idle, so set an upper limit to the
  * amount of time to wait for the core to go idle
  */
-
 #define ADRENO_IDLE_TIMEOUT (20 * 1000)
 
 #define ADRENO_UCHE_GMEM_BASE	0x100000
@@ -204,6 +206,7 @@
 #define ADRENO_TIMEOUT_FAULT BIT(2)
 #define ADRENO_IOMMU_PAGE_FAULT BIT(3)
 #define ADRENO_PREEMPT_FAULT BIT(4)
+#define ADRENO_GMU_FAULT BIT(5)
 
 #define ADRENO_SPTP_PC_CTRL 0
 #define ADRENO_PPD_CTRL     1
@@ -499,6 +502,7 @@
  * attached and enabled
  * @ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED - Set if a CACHE_FLUSH_TS irq storm
  * is in progress
+ * @ADRENO_DEVICE_HARD_RESET - Set if soft reset fails and hard reset is needed
  */
 enum adreno_device_flags {
 	ADRENO_DEVICE_PWRON = 0,
@@ -515,6 +519,7 @@
 	ADRENO_DEVICE_GPMU_INITIALIZED = 11,
 	ADRENO_DEVICE_ISDB_ENABLED = 12,
 	ADRENO_DEVICE_CACHE_FLUSH_TS_SUSPENDED = 13,
+	ADRENO_DEVICE_HARD_RESET = 14,
 };
 
 /**
@@ -636,6 +641,7 @@
 	ADRENO_REG_GMU_HOST2GMU_INTR_SET,
 	ADRENO_REG_GMU_HOST2GMU_INTR_CLR,
 	ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
+	ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
 	ADRENO_REG_REGISTER_MAX,
 };
 
@@ -846,7 +852,7 @@
 	void (*preemption_schedule)(struct adreno_device *);
 	void (*enable_64bit)(struct adreno_device *);
 	void (*clk_set_options)(struct adreno_device *,
-				const char *, struct clk *);
+				const char *, struct clk *, bool on);
 	void (*llc_configure_gpu_scid)(struct adreno_device *adreno_dev);
 	void (*llc_configure_gpuhtw_scid)(struct adreno_device *adreno_dev);
 	void (*llc_enable_overrides)(struct adreno_device *adreno_dev);
@@ -864,6 +870,8 @@
 	int (*wait_for_gmu_idle)(struct adreno_device *);
 	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
 				unsigned int fsynr1);
+	int (*reset)(struct kgsl_device *, int fault);
+	int (*soft_reset)(struct adreno_device *);
 };
 
 /**
@@ -952,6 +960,8 @@
 extern int adreno_wake_nice;
 extern unsigned int adreno_wake_timeout;
 
+int adreno_start(struct kgsl_device *device, int priority);
+int adreno_soft_reset(struct kgsl_device *device);
 long adreno_ioctl(struct kgsl_device_private *dev_priv,
 		unsigned int cmd, unsigned long arg);
 
@@ -1707,4 +1717,37 @@
 	kgsl_active_count_put(KGSL_DEVICE(adreno_dev));
 }
 
+/**
+ * adreno_vbif_clear_pending_transactions() - Clear transactions in VBIF pipe
+ * @device: Pointer to the device whose VBIF pipe is to be cleared
+ */
+static inline int adreno_vbif_clear_pending_transactions(
+	struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	unsigned int mask = gpudev->vbif_xin_halt_ctrl0_mask;
+	unsigned int val;
+	unsigned long wait_for_vbif;
+	int ret = 0;
+
+	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, mask);
+	/* wait for the transactions to clear */
+	wait_for_vbif = jiffies + msecs_to_jiffies(100);
+	while (1) {
+		adreno_readreg(adreno_dev,
+			ADRENO_REG_VBIF_XIN_HALT_CTRL1, &val);
+		if ((val & mask) == mask)
+			break;
+		if (time_after(jiffies, wait_for_vbif)) {
+			KGSL_DRV_ERR(device,
+				"Wait limit reached for VBIF XIN Halt\n");
+			ret = -ETIMEDOUT;
+			break;
+		}
+	}
+	adreno_writereg(adreno_dev, ADRENO_REG_VBIF_XIN_HALT_CTRL0, 0);
+	return ret;
+}
+
 #endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 6c8b677..13c36e6 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -55,7 +55,7 @@
 	{ adreno_is_a530, a530_vbif },
 	{ adreno_is_a512, a540_vbif },
 	{ adreno_is_a510, a530_vbif },
-	{ adreno_is_a508, a530_vbif },
+	{ adreno_is_a508, a540_vbif },
 	{ adreno_is_a505, a530_vbif },
 	{ adreno_is_a506, a530_vbif },
 };
@@ -1608,11 +1608,15 @@
 }
 
 static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
-	const char *name, struct clk *clk)
+	const char *name, struct clk *clk, bool on)
 {
+
+	if (!adreno_is_a540(adreno_dev) && !adreno_is_a512(adreno_dev) &&
+		!adreno_is_a508(adreno_dev))
+		return;
+
 	/* Handle clock settings for GFX PSCBCs */
-	if (adreno_is_a540(adreno_dev) || adreno_is_a512(adreno_dev) ||
-		adreno_is_a508(adreno_dev)) {
+	if (on) {
 		if (!strcmp(name, "mem_iface_clk")) {
 			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
 			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
@@ -1620,6 +1624,11 @@
 			clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
 			clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
 		}
+	} else {
+		if (!strcmp(name, "core_clk")) {
+			clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+			clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+		}
 	}
 }
 
@@ -3008,6 +3017,8 @@
 				A5XX_VBIF_XIN_HALT_CTRL1),
 	ADRENO_REG_DEFINE(ADRENO_REG_VBIF_VERSION,
 				A5XX_VBIF_VERSION),
+	ADRENO_REG_DEFINE(ADRENO_REG_GPMU_POWER_COUNTER_ENABLE,
+				A5XX_GPMU_POWER_COUNTER_ENABLE),
 };
 
 static const struct adreno_reg_offsets a5xx_reg_offsets = {
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index e157e7b..33854ea 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -29,8 +29,6 @@
 #include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
-#define OOB_REQUEST_TIMEOUT	10 /* ms */
-
 #define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
 		(ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
 
@@ -789,8 +787,10 @@
 	wmb();
 }
 
-#define GMU_START_TIMEOUT 10	/* ms */
-#define GPU_START_TIMEOUT 100	/* ms */
+#define GMU_START_TIMEOUT	10	/* ms */
+#define GPU_START_TIMEOUT	100	/* ms */
+#define GPU_RESET_TIMEOUT	1	/* ms */
+#define GPU_RESET_TIMEOUT_US	10	/* us */
 
 /*
  * timed_poll_check() - polling *gmu* register at given offset until
@@ -962,7 +962,9 @@
 			GPU_START_TIMEOUT,
 			check_mask)) {
 		ret = -ETIMEDOUT;
-		dev_err(&gmu->pdev->dev, "OOB set timed out\n");
+		dev_err(&gmu->pdev->dev,
+			"OOB set timed out, mask %x\n", set_mask);
+		WARN_ON(true);
 	}
 
 	kgsl_gmu_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, clear_mask);
@@ -1005,6 +1007,7 @@
 #define SPTPRAC_POWEROFF_STATUS_MASK	BIT(2)
 #define SPTPRAC_POWERON_STATUS_MASK	BIT(3)
 #define SPTPRAC_CTRL_TIMEOUT		10 /* ms */
+#define A6XX_RETAIN_FF_ENABLE_ENABLE_MASK BIT(11)
 
 /*
  * a6xx_sptprac_enable() - Power on SPTPRAC
@@ -1045,6 +1048,10 @@
 	if (!gmu->pdev)
 		return;
 
+	/* Ensure that retention is on */
+	kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
+			A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+
 	kgsl_gmu_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
 			SPTPRAC_POWEROFF_CTRL_MASK);
 
@@ -1073,7 +1080,7 @@
 	ret = regulator_enable(gmu->gx_gdsc);
 	if (ret) {
 		dev_err(&gmu->pdev->dev,
-				"Failed to turn on GPU HM HS\n");
+			"Failed to turn on GPU HM HS\n");
 		return ret;
 	}
 
@@ -1099,6 +1106,10 @@
 	if (!regulator_is_enabled(gmu->gx_gdsc))
 		return 0;
 
+	/* Ensure that retention is on */
+	kgsl_gmu_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
+			A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
+
 	clk_disable_unprepare(pwr->grp_clks[0]);
 
 	clk_set_rate(pwr->grp_clks[0],
@@ -1119,11 +1130,14 @@
 
 	/* If GMU does not control HM we must */
 	if (gmu->idle_level < GPU_HW_IFPC) {
+
 		ret = a6xx_hm_enable(ADRENO_DEVICE(device));
 		if (ret) {
 			dev_err(&gmu->pdev->dev, "Failed to power on GPU HM\n");
 			return ret;
 		}
+
+
 	}
 
 	/* If GMU does not control SPTPRAC we must */
@@ -1162,19 +1176,6 @@
 }
 
 /*
- * a6xx_hm_sptprac_control() - Turn HM and SPTPRAC on or off
- * @device: Pointer to KGSL device
- * @on: True to turn on or false to turn off
- */
-static int a6xx_hm_sptprac_control(struct kgsl_device *device, bool on)
-{
-	if (on)
-		return a6xx_hm_sptprac_enable(device);
-	else
-		return a6xx_hm_sptprac_disable(device);
-}
-
-/*
  * a6xx_gfx_rail_on() - request GMU to power GPU at given OPP.
  * @device: Pointer to KGSL device
  *
@@ -1201,11 +1202,13 @@
 			OOB_BOOT_SLUMBER_CLEAR_MASK);
 
 	if (ret)
-		dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
+		dev_err(&gmu->pdev->dev, "Boot OOB timed out\n");
 
 	return ret;
 }
 
+#define GMU_POWER_STATE_SLUMBER 15
+
 /*
  * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
  * @device: Pointer to KGSL device
@@ -1219,6 +1222,9 @@
 	int perf_idx = gmu->num_gpupwrlevels - pwr->default_pwrlevel - 1;
 	int ret, state;
 
+	/* Disable the power counter so that the GMU is not busy */
+	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
 	if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = hfi_notify_slumber(gmu, perf_idx, bus_level);
 		return ret;
@@ -1235,7 +1241,7 @@
 	a6xx_oob_clear(adreno_dev, OOB_BOOT_SLUMBER_CLEAR_MASK);
 
 	if (ret)
-		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
+		dev_err(&gmu->pdev->dev, "Notify slumber OOB timed out\n");
 	else {
 		kgsl_gmu_regread(device,
 			A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
@@ -1281,13 +1287,15 @@
 	kgsl_gmu_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
 
 	/* Turn on the HM and SPTP head switches */
-	ret = a6xx_hm_sptprac_control(device, true);
+	ret = a6xx_hm_sptprac_enable(device);
+
+	/* Enable the power counter because it was disabled before slumber */
+	kgsl_gmu_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
 
 	return ret;
-
 error_rsc:
 	dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
-	return -EINVAL;
+		return -EINVAL;
 }
 
 static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
@@ -1296,7 +1304,7 @@
 	int val, ret = 0;
 
 	/* Turn off the SPTP and HM head switches */
-	ret = a6xx_hm_sptprac_control(device, false);
+	ret = a6xx_hm_sptprac_disable(device);
 
 	/* RSC sleep sequence */
 	kgsl_gmu_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0, 1);
@@ -1339,7 +1347,12 @@
 	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
 	int ret, i;
 
-	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
+	switch (boot_state) {
+	case GMU_COLD_BOOT:
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_enable(device);
+		if (ret)
+			return ret;
 
 		/* Turn on TCM retention */
 		kgsl_gmu_regwrite(device, A6XX_GMU_GENERAL_7, 1);
@@ -1347,7 +1360,7 @@
 		if (!test_and_set_bit(GMU_BOOT_INIT_DONE, &gmu->flags)) {
 			_load_gmu_rpmh_ucode(device);
 			/* Turn on the HM and SPTP head switches */
-			ret = a6xx_hm_sptprac_control(device, true);
+			ret = a6xx_hm_sptprac_enable(device);
 			if (ret)
 				return ret;
 		} else {
@@ -1371,10 +1384,19 @@
 					gmu->load_mode);
 			return -EINVAL;
 		}
-	} else {
+		break;
+	case GMU_WARM_BOOT:
 		ret = a6xx_rpmh_power_on_gpu(device);
 		if (ret)
 			return ret;
+		break;
+	case GMU_RESET:
+		/* Turn on the HM and SPTP head switches */
+		ret = a6xx_hm_sptprac_enable(device);
+		if (ret)
+			return ret;
+	default:
+		break;
 	}
 
 	/* Clear init result to make sure we are getting fresh value */
@@ -1394,8 +1416,7 @@
 	if (ret)
 		return ret;
 
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)
-			&& boot_state == GMU_COLD_BOOT) {
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
 		ret = a6xx_gfx_rail_on(device);
 		if (ret) {
 			a6xx_oob_clear(adreno_dev,
@@ -1425,7 +1446,7 @@
 		unsigned int perf_idx, unsigned int bw_idx)
 {
 	struct hfi_dcvs_cmd dcvs_cmd = {
-		.ack_type = ACK_BLOCK,
+		.ack_type = ACK_NONBLOCK,
 		.freq = {
 			.perf_idx = perf_idx,
 			.clkset_opt = OPTION_AT_LEAST,
@@ -1439,10 +1460,6 @@
 	union gpu_perf_vote vote;
 	int ret;
 
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND)
-		dcvs_cmd.ack_type = ACK_NONBLOCK;
-
 	kgsl_gmu_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, dcvs_cmd.ack_type);
 
 	vote.fvote = dcvs_cmd.freq;
@@ -1455,7 +1472,7 @@
 		OOB_DCVS_CLEAR_MASK);
 
 	if (ret) {
-		dev_err(&gmu->pdev->dev, "OOB set after GMU booted timed out\n");
+		dev_err(&gmu->pdev->dev, "DCVS OOB timed out\n");
 		goto done;
 	}
 
@@ -1469,43 +1486,6 @@
 	return ret;
 }
 
-/*
- * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
- * @adreno_dev: Pointer to adreno device
- * @mode: requested power mode
- * @arg1: first argument for mode control
- * @arg2: second argument for mode control
- */
-static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
-		unsigned int mode, unsigned int arg1, unsigned int arg2)
-{
-	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct gmu_device *gmu = &device->gmu;
-	int ret;
-
-	switch (mode) {
-	case GMU_FW_START:
-		ret = a6xx_gmu_fw_start(device, arg1);
-		break;
-	case GMU_FW_STOP:
-		ret = a6xx_rpmh_power_off_gpu(device);
-		break;
-	case GMU_DCVS_NOHFI:
-		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
-		break;
-	case GMU_NOTIFY_SLUMBER:
-		ret = a6xx_notify_slumber(device);
-		break;
-	default:
-		dev_err(&gmu->pdev->dev,
-				"unsupported GMU power ctrl mode:%d\n", mode);
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
 static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
 {
 	unsigned int reg;
@@ -1521,10 +1501,17 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
+	unsigned int status, status2;
 
 	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
 			0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
-		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
+		kgsl_gmu_regread(device,
+				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &status);
+		kgsl_gmu_regread(device,
+				A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
+		dev_err(&gmu->pdev->dev,
+				"GMU not idling: status=0x%x, status2=0x%x\n",
+				status, status2);
 		return -ETIMEDOUT;
 	}
 
@@ -1585,6 +1572,299 @@
 			ADRENO_FW(adreno_dev, ADRENO_FW_SQE));
 }
 
+#define VBIF_RESET_ACK_TIMEOUT	100
+#define VBIF_RESET_ACK_MASK	0x00f0
+
+static int a6xx_soft_reset(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int reg;
+	unsigned long time;
+	bool vbif_acked = false;
+
+	/*
+	 * For the soft reset case with GMU enabled this part is done
+	 * by the GMU firmware
+	 */
+	if (kgsl_gmu_isenabled(device) &&
+		!test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv))
+		return 0;
+
+
+	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 1);
+	/*
+	 * Do a dummy read to get a brief read cycle delay for the
+	 * reset to take effect
+	 */
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, &reg);
+	adreno_writereg(adreno_dev, ADRENO_REG_RBBM_SW_RESET_CMD, 0);
+
+	/* Wait for the VBIF reset ack to complete */
+	time = jiffies + msecs_to_jiffies(VBIF_RESET_ACK_TIMEOUT);
+
+	do {
+		kgsl_regread(device, A6XX_RBBM_VBIF_GX_RESET_STATUS, &reg);
+		if ((reg & VBIF_RESET_ACK_MASK) == VBIF_RESET_ACK_MASK) {
+			vbif_acked = true;
+			break;
+		}
+		cpu_relax();
+	} while (!time_after(jiffies, time));
+
+	if (!vbif_acked)
+		return -ETIMEDOUT;
+
+	a6xx_sptprac_enable(adreno_dev);
+
+	return 0;
+}
+
+#define A6XX_STATE_OF_CHILD             (BIT(4) | BIT(5))
+#define A6XX_IDLE_FULL_LLM              BIT(0)
+#define A6XX_WAKEUP_ACK                 BIT(1)
+#define A6XX_IDLE_FULL_ACK              BIT(0)
+#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS   (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+static void a6xx_isense_disable(struct kgsl_device *device)
+{
+	unsigned int val;
+	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return;
+
+	kgsl_gmu_regread(device, A6XX_GPU_CS_ENABLE_REG, &val);
+	if (val) {
+		kgsl_gmu_regwrite(device, A6XX_GPU_CS_ENABLE_REG, 0);
+		kgsl_gmu_regwrite(device, A6XX_GMU_ISENSE_CTRL, 0);
+	}
+}
+
+static int a6xx_llm_glm_handshake(struct kgsl_device *device)
+{
+	unsigned int val;
+	const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct gmu_device *gmu = &device->gmu;
+
+	if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		return 0;
+
+	kgsl_gmu_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
+	if (!(val & A6XX_STATE_OF_CHILD)) {
+		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
+		kgsl_gmu_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
+				A6XX_IDLE_FULL_LLM);
+		if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
+				A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
+				A6XX_IDLE_FULL_ACK)) {
+			dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int a6xx_complete_rpmh_votes(struct kgsl_device *device)
+{
+	int ret = 0;
+
+	if (!kgsl_gmu_isenabled(device))
+		return ret;
+
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS0_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS1_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS2_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+	ret |= timed_poll_check(device, A6XX_RSCC_TCS3_DRV0_STATUS, BIT(0),
+			GPU_RESET_TIMEOUT, BIT(0));
+
+	return ret;
+}
+
+static int a6xx_gmu_suspend(struct kgsl_device *device)
+{
+	/* Max GX clients on A6xx is 2: GMU and KMD */
+	int ret = 0, max_client_num = 2;
+	struct gmu_device *gmu = &device->gmu;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* do it only if LM feature is enabled */
+	/* Disable ISENSE if it's on */
+	a6xx_isense_disable(device);
+
+	/* LLM-GLM handshake sequence */
+	a6xx_llm_glm_handshake(device);
+
+	/* If SPTP_RAC is on, turn off SPTP_RAC HS */
+	a6xx_sptprac_disable(adreno_dev);
+
+	/* Disconnect GPU from BUS. Clear and reconnected after reset */
+	adreno_vbif_clear_pending_transactions(device);
+	/* Unnecessary: a6xx_soft_reset(adreno_dev); */
+
+	/* Check no outstanding RPMh voting */
+	a6xx_complete_rpmh_votes(device);
+
+	if (gmu->idle_level < GPU_HW_IFPC) {
+		/* HM GDSC is controlled by KGSL */
+		ret = a6xx_hm_disable(ADRENO_DEVICE(device));
+		if (ret)
+			dev_err(&gmu->pdev->dev,
+				"suspend: fail: power off GPU HM\n");
+	} else if (gmu->gx_gdsc) {
+		if (regulator_is_enabled(gmu->gx_gdsc)) {
+			/* Switch gx gdsc control from GMU to CPU
+			 * force non-zero reference count in clk driver
+			 * so next disable call will turn
+			 * off the GDSC
+			 */
+			ret = regulator_enable(gmu->gx_gdsc);
+			if (ret)
+				dev_err(&gmu->pdev->dev,
+					"suspend fail: gx enable\n");
+
+			while ((max_client_num)) {
+				ret = regulator_disable(gmu->gx_gdsc);
+				if (!regulator_is_enabled(gmu->gx_gdsc))
+					break;
+				max_client_num -= 1;
+			}
+
+			if (!max_client_num)
+				dev_err(&gmu->pdev->dev,
+					"suspend fail: cannot disable gx\n");
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * a6xx_rpmh_gpu_pwrctrl() - GPU power control via RPMh/GMU interface
+ * @adreno_dev: Pointer to adreno device
+ * @mode: requested power mode
+ * @arg1: first argument for mode control
+ * @arg2: second argument for mode control
+ */
+static int a6xx_rpmh_gpu_pwrctrl(struct adreno_device *adreno_dev,
+		unsigned int mode, unsigned int arg1, unsigned int arg2)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+	int ret;
+
+	switch (mode) {
+	case GMU_FW_START:
+		ret = a6xx_gmu_fw_start(device, arg1);
+		break;
+	case GMU_SUSPEND:
+		ret = a6xx_gmu_suspend(device);
+		break;
+	case GMU_FW_STOP:
+		ret = a6xx_rpmh_power_off_gpu(device);
+		break;
+	case GMU_DCVS_NOHFI:
+		ret = a6xx_gmu_dcvs_nohfi(device, arg1, arg2);
+		break;
+	case GMU_NOTIFY_SLUMBER:
+		ret = a6xx_notify_slumber(device);
+		break;
+	default:
+		dev_err(&gmu->pdev->dev,
+				"unsupported GMU power ctrl mode:%d\n", mode);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * a6xx_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @fault: Type of fault. Needed to skip soft reset for MMU fault
+ *
+ * Try to reset the GPU to recover from a fault.  First, try to do a low latency
+ * soft reset.  If the soft reset fails for some reason, then bring out the big
+ * guns and toggle the footswitch.
+ */
+static int a6xx_reset(struct kgsl_device *device, int fault)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int ret = -EINVAL;
+	int i = 0;
+
+	/* Use the regular reset sequence for No GMU */
+	if (!kgsl_gmu_isenabled(device))
+		return adreno_reset(device, fault);
+
+	/* Transition from ACTIVE to RESET state */
+	kgsl_pwrctrl_change_state(device, KGSL_STATE_RESET);
+
+	/* Try soft reset first */
+	if (!(fault & ADRENO_IOMMU_PAGE_FAULT)) {
+		int acked;
+
+		/* NMI */
+		kgsl_gmu_regwrite(device, A6XX_GMU_NMI_CONTROL_STATUS, 0);
+		kgsl_gmu_regwrite(device, A6XX_GMU_CM3_CFG, (1 << 9));
+
+		for (i = 0; i < 10; i++) {
+			kgsl_gmu_regread(device,
+					A6XX_GMU_NMI_CONTROL_STATUS, &acked);
+
+			/* NMI FW ACK recevied */
+			if (acked == 0x1)
+				break;
+
+			udelay(100);
+		}
+
+		if (acked)
+			ret = adreno_soft_reset(device);
+		if (ret)
+			KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+	}
+	if (ret) {
+		/* If soft reset failed/skipped, then pull the power */
+		set_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
+		/* since device is officially off now clear start bit */
+		clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
+
+		/* Keep trying to start the device until it works */
+		for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
+			ret = adreno_start(device, 0);
+			if (!ret)
+				break;
+
+			msleep(20);
+		}
+	}
+
+	clear_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv);
+
+	if (ret)
+		return ret;
+
+	if (i != 0)
+		KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
+
+	/*
+	 * If active_cnt is non-zero then the system was active before
+	 * going into a reset - put it back in that state
+	 */
+
+	if (atomic_read(&device->active_cnt))
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
+	else
+		kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
+
+	return ret;
+}
+
 static void a6xx_cp_hw_err_callback(struct adreno_device *adreno_dev, int bit)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1671,7 +1951,6 @@
 	wmb();
 }
 
-
 /*
  * a6xx_llc_configure_gpu_scid() - Program the sub-cache ID for all GPU blocks
  * @adreno_dev: The adreno device pointer
@@ -2162,17 +2441,59 @@
 		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
 };
 
+static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
+	{ KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
+};
+
 static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
 	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
 		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
 };
 
+static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
+	/*
+	 * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
+	 * busy count (see the PWR group above). Mark it as broken
+	 * so it's not re-used.
+	 */
+	{ KGSL_PERFCOUNTER_BROKEN, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
+};
+
 #define A6XX_PERFCOUNTER_GROUP(offset, name) \
 	ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
 
 #define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
 	ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
 
+#define A6XX_POWER_COUNTER_GROUP(offset, name) \
+	ADRENO_POWER_COUNTER_GROUP(a6xx, offset, name)
+
 static struct adreno_perfcount_group a6xx_perfcounter_groups
 				[KGSL_PERFCOUNTER_GROUP_MAX] = {
 	A6XX_PERFCOUNTER_GROUP(CP, cp),
@@ -2194,8 +2515,11 @@
 	A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
 	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
 		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
 };
 
 static struct adreno_perfcounters a6xx_perfcounters = {
@@ -2203,6 +2527,30 @@
 	ARRAY_SIZE(a6xx_perfcounter_groups),
 };
 
+/* Program the GMU power counter to count GPU busy cycles */
+static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
+		unsigned int counter)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+	/*
+	 * We have a limited number of power counters. Since we're not using
+	 * total GPU cycle count, return error if requested.
+	 */
+	if (counter == 0)
+		return -EINVAL;
+
+	if (!device->gmu.pdev)
+		return -ENODEV;
+
+	kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0);
+	kgsl_regrmw(device,
+		A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
+	kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
+
+	return 0;
+}
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
@@ -2293,7 +2641,6 @@
 				A6XX_GMU_HOST2GMU_INTR_CLR),
 	ADRENO_REG_DEFINE(ADRENO_REG_GMU_HOST2GMU_INTR_RAW_INFO,
 				A6XX_GMU_HOST2GMU_INTR_RAW_INFO),
-
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
 				A6XX_RBBM_SECVID_TRUST_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
@@ -2325,6 +2672,7 @@
 	.regulator_enable = a6xx_sptprac_enable,
 	.regulator_disable = a6xx_sptprac_disable,
 	.perfcounters = &a6xx_perfcounters,
+	.enable_pwr_counters = a6xx_enable_pwr_counters,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
 	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
@@ -2337,4 +2685,6 @@
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
 	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
 	.iommu_fault_block = a6xx_iommu_fault_block,
+	.reset = a6xx_reset,
+	.soft_reset = a6xx_soft_reset,
 };
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 17ee6e6..54acd73 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -206,8 +206,38 @@
 };
 
 static const unsigned int a6xx_gmu_registers[] = {
-	/* GMU */
+	/* GMU GX */
+	0x1A800, 0x1A800, 0x1A810, 0x1A813, 0x1A816, 0x1A816, 0x1A818, 0x1A81B,
+	0x1A81E, 0x1A81E, 0x1A820, 0x1A823, 0x1A826, 0x1A826, 0x1A828, 0x1A82B,
+	0x1A82E, 0x1A82E, 0x1A830, 0x1A833, 0x1A836, 0x1A836, 0x1A838, 0x1A83B,
+	0x1A83E, 0x1A83E, 0x1A840, 0x1A843, 0x1A846, 0x1A846, 0x1A880, 0x1A884,
+	0x1A900, 0x1A92B, 0x1A940, 0x1A940,
+	/* GMU TCM */
 	0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
+	/* GMU CX */
+	0x1F400, 0x1F407, 0x1F410, 0x1F412, 0x1F500, 0x1F500, 0x1F507, 0x1F50A,
+	0x1F800, 0x1F804, 0x1F807, 0x1F808, 0x1F80B, 0x1F80C, 0x1F80F, 0x1F81C,
+	0x1F824, 0x1F82A, 0x1F82D, 0x1F830, 0x1F840, 0x1F853, 0x1F887, 0x1F889,
+	0x1F8A0, 0x1F8A2, 0x1F8A4, 0x1F8AF, 0x1F8C0, 0x1F8C3, 0x1F8D0, 0x1F8D0,
+	0x1F8E4, 0x1F8E4, 0x1F8E8, 0x1F8EC, 0x1F900, 0x1F903, 0x1F940, 0x1F940,
+	0x1F942, 0x1F944, 0x1F94C, 0x1F94D, 0x1F94F, 0x1F951, 0x1F954, 0x1F954,
+	0x1F957, 0x1F958, 0x1F95D, 0x1F95D, 0x1F962, 0x1F962, 0x1F964, 0x1F965,
+	0x1F980, 0x1F986, 0x1F990, 0x1F99E, 0x1F9C0, 0x1F9C0, 0x1F9C5, 0x1F9CC,
+	0x1F9E0, 0x1F9E2, 0x1F9F0, 0x1F9F0, 0x1FA00, 0x1FA03,
+	/* GPU RSCC */
+	0x23740, 0x23742, 0x23744, 0x23747, 0x2374C, 0x23787, 0x237EC, 0x237EF,
+	0x237F4, 0x2382F, 0x23894, 0x23897, 0x2389C, 0x238D7, 0x2393C, 0x2393F,
+	0x23944, 0x2397F,
+	/* GMU AO */
+	0x23B00, 0x23B16, 0x23C00, 0x23C00,
+	/* GPU CC */
+	0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440B,
+	0x24415, 0x2441C, 0x2441E, 0x2442D, 0x2443C, 0x2443D, 0x2443F, 0x24440,
+	0x24442, 0x24449, 0x24458, 0x2445A, 0x24540, 0x2455E, 0x24800, 0x24802,
+	0x24C00, 0x24C02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25C00, 0x25C02,
+	0x26000, 0x26002,
+	/* GPU CC ACD */
+	0x26400, 0x26416, 0x26420, 0x26427,
 };
 
 static const struct adreno_vbif_snapshot_registers
@@ -1264,59 +1294,14 @@
 	}
 }
 
-static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
-		u8 *buf, size_t remain, void *priv)
-{
-	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
-	struct kgsl_snapshot_registers *regs = priv;
-	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
-	int count = 0, j, k;
-
-	/* Figure out how many registers we are going to dump */
-	for (j = 0; j < regs->count; j++) {
-		int start = regs->regs[j * 2];
-		int end = regs->regs[j * 2 + 1];
-
-		count += (end - start + 1);
-	}
-
-	if (remain < (count * 8) + sizeof(*header)) {
-		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
-		return 0;
-	}
-
-	for (j = 0; j < regs->count; j++) {
-		unsigned int start = regs->regs[j * 2];
-		unsigned int end = regs->regs[j * 2 + 1];
-
-		for (k = start; k <= end; k++) {
-			unsigned int val;
-
-			kgsl_gmu_regread(device, k, &val);
-			*data++ = k;
-			*data++ = val;
-		}
-	}
-
-	header->count = count;
-
-	/* Return the size of the section */
-	return (count * 8) + sizeof(*header);
-}
-
 static void a6xx_snapshot_gmu(struct kgsl_device *device,
 		struct kgsl_snapshot *snapshot)
 {
-	struct kgsl_snapshot_registers gmu_regs = {
-		.regs = a6xx_gmu_registers,
-		.count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
-	};
-
 	if (!kgsl_gmu_isenabled(device))
 		return;
 
-	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
-			snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
+	adreno_snapshot_registers(device, snapshot, a6xx_gmu_registers,
+					ARRAY_SIZE(a6xx_gmu_registers) / 2);
 }
 
 /* a6xx_snapshot_sqe() - Dump SQE data in snapshot */
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index b831d0d..e8b1c67 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -2097,7 +2097,12 @@
 	/* Turn off all the timers */
 	del_timer_sync(&dispatcher->timer);
 	del_timer_sync(&dispatcher->fault_timer);
-	del_timer_sync(&adreno_dev->preempt.timer);
+	/*
+	 * Deleting uninitialized timer will block for ever on kernel debug
+	 * disable build. Hence skip del timer if it is not initialized.
+	 */
+	if (adreno_is_preemption_enabled(adreno_dev))
+		del_timer_sync(&adreno_dev->preempt.timer);
 
 	mutex_lock(&device->mutex);
 
@@ -2183,7 +2188,11 @@
 		kgsl_process_event_group(device, &hung_rb->events);
 	}
 
-	ret = adreno_reset(device, fault);
+	if (gpudev->reset)
+		ret = gpudev->reset(device, fault);
+	else
+		ret = adreno_reset(device, fault);
+
 	mutex_unlock(&device->mutex);
 	/* if any other fault got in until reset then ignore */
 	atomic_set(&dispatcher->fault, 0);
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index cd95003..0da4da9 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -654,7 +654,7 @@
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct adreno_perfcount_register *reg;
-	unsigned int shift = counter << 3;
+	unsigned int shift = (counter << 3) % (sizeof(unsigned int) * 8);
 
 	if (adreno_is_a530(adreno_dev)) {
 		if (countable > 43)
@@ -662,13 +662,16 @@
 	} else if (adreno_is_a540(adreno_dev)) {
 		if (countable > 47)
 			return;
+	} else if (adreno_is_a6xx(adreno_dev)) {
+		if (countable > 34)
+			return;
 	} else
 		/* return on platforms that have no GPMU */
 		return;
 
 	reg = &counters->groups[group].regs[counter];
 	kgsl_regrmw(device, reg->select, 0xff << shift, countable << shift);
-	kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
+	adreno_writereg(adreno_dev, ADRENO_REG_GPMU_POWER_COUNTER_ENABLE, 1);
 	reg->value = 0;
 }
 
@@ -684,7 +687,7 @@
 
 	reg = &counters->groups[group].regs[counter];
 	kgsl_regwrite(device, reg->select, countable);
-	kgsl_regwrite(device, A5XX_GPMU_POWER_COUNTER_ENABLE, 1);
+	adreno_writereg(adreno_dev, ADRENO_REG_GPMU_POWER_COUNTER_ENABLE, 1);
 	reg->value = 0;
 }
 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 9d847ae..bff1fda 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -54,21 +54,10 @@
 
 	/* Read always on registers */
 	if (!adreno_is_a3xx(adreno_dev)) {
-		if (kgsl_gmu_isenabled(KGSL_DEVICE(adreno_dev))) {
-			uint32_t val_lo, val_hi;
-
-			adreno_read_gmureg(adreno_dev,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO, &val_lo);
-			adreno_read_gmureg(adreno_dev,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI, &val_hi);
-
-			time->ticks = (val_lo | ((uint64_t)val_hi << 32));
-		} else {
-			adreno_readreg64(adreno_dev,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
-				ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
-				&time->ticks);
-		}
+		adreno_readreg64(adreno_dev,
+			ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+			ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+			&time->ticks);
 
 		/* Mask hi bits as they may be incorrect on some targets */
 		if (ADRENO_GPUREV(adreno_dev) >= 400 &&
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index d836cbb..6a39792 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -257,6 +257,13 @@
 	kgsl_mem_entry_put(entry);
 }
 
+static inline void
+kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
+{
+	if (entry)
+		queue_work(kgsl_driver.mem_workqueue, &entry->work);
+}
+
 static inline struct kgsl_mem_entry *
 kgsl_mem_entry_create(void)
 {
@@ -266,6 +273,7 @@
 		kref_init(&entry->refcount);
 		/* put this ref in userspace memory alloc and map ioctls */
 		kref_get(&entry->refcount);
+		INIT_WORK(&entry->work, _deferred_put);
 	}
 
 	return entry;
@@ -1244,7 +1252,8 @@
 	spin_lock(&private->mem_lock);
 	idr_for_each_entry(&private->mem_idr, entry, id) {
 		if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
-			ret = kgsl_mem_entry_get(entry);
+			if (!entry->pending_free)
+				ret = kgsl_mem_entry_get(entry);
 			break;
 		}
 	}
@@ -1877,7 +1886,7 @@
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return ret;
 }
@@ -1895,7 +1904,7 @@
 		return -EINVAL;
 
 	ret = gpumem_free_entry(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return ret;
 }
@@ -1932,8 +1941,7 @@
 {
 	struct kgsl_mem_entry *entry = priv;
 
-	INIT_WORK(&entry->work, _deferred_put);
-	queue_work(kgsl_driver.mem_workqueue, &entry->work);
+	kgsl_mem_entry_put_deferred(entry);
 }
 
 static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1997,7 +2005,7 @@
 	else
 		ret = -EINVAL;
 
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 	return ret;
 }
 
@@ -3377,7 +3385,13 @@
 	if (entry == NULL)
 		return -EINVAL;
 
+	if (!kgsl_mem_entry_set_pend(entry)) {
+		kgsl_mem_entry_put(entry);
+		return -EBUSY;
+	}
+
 	if (entry->memdesc.cur_bindings != 0) {
+		kgsl_mem_entry_unset_pend(entry);
 		kgsl_mem_entry_put(entry);
 		return -EINVAL;
 	}
@@ -3386,7 +3400,7 @@
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return 0;
 }
@@ -3446,7 +3460,13 @@
 	if (entry == NULL)
 		return -EINVAL;
 
+	if (!kgsl_mem_entry_set_pend(entry)) {
+		kgsl_mem_entry_put(entry);
+		return -EBUSY;
+	}
+
 	if (entry->bind_tree.rb_node != NULL) {
+		kgsl_mem_entry_unset_pend(entry);
 		kgsl_mem_entry_put(entry);
 		return -EINVAL;
 	}
@@ -3455,7 +3475,7 @@
 
 	/* One put for find_id(), one put for the kgsl_mem_entry_create() */
 	kgsl_mem_entry_put(entry);
-	kgsl_mem_entry_put(entry);
+	kgsl_mem_entry_put_deferred(entry);
 
 	return 0;
 }
@@ -4853,7 +4873,7 @@
 		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
 
 	kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
-		WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+		WQ_MEM_RECLAIM, 0);
 
 	kgsl_events_init();
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index be379e3..ca1f181 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -40,6 +40,8 @@
  * that the KGSL module believes a device is idle (has been inactive	*
  * past its timer) and all system resources are released.  SUSPEND is	*
  * requested by the kernel and will be enforced upon all open devices.	*
+ * RESET indicates that GPU or GMU hang happens. KGSL is handling	*
+ * snapshot or recover GPU from hang.					*
  */
 
 #define KGSL_STATE_NONE		0x00000000
@@ -49,6 +51,7 @@
 #define KGSL_STATE_SUSPEND	0x00000010
 #define KGSL_STATE_AWARE	0x00000020
 #define KGSL_STATE_SLUMBER	0x00000080
+#define KGSL_STATE_RESET	0x00000100
 
 /**
  * enum kgsl_event_results - result codes passed to an event callback when the
@@ -176,7 +179,7 @@
 		unsigned int prelevel, unsigned int postlevel, bool post);
 	void (*regulator_disable_poll)(struct kgsl_device *device);
 	void (*clk_set_options)(struct kgsl_device *device,
-		const char *name, struct clk *clk);
+		const char *name, struct clk *clk, bool on);
 	void (*gpu_model)(struct kgsl_device *device, char *str,
 		size_t bufsz);
 	void (*stop_fault_timer)(struct kgsl_device *device);
@@ -529,18 +532,49 @@
 		priv->stats[type].max = priv->stats[type].cur;
 }
 
+static inline bool kgsl_is_register_offset(struct kgsl_device *device,
+				unsigned int offsetwords)
+{
+	return ((offsetwords * sizeof(uint32_t)) < device->reg_len);
+}
+
+static inline bool kgsl_is_gmu_offset(struct kgsl_device *device,
+				unsigned int offsetwords)
+{
+	struct gmu_device *gmu = &device->gmu;
+
+	return (gmu->pdev &&
+		(offsetwords >= gmu->gmu2gpu_offset) &&
+		((offsetwords - gmu->gmu2gpu_offset) * sizeof(uint32_t) <
+			gmu->reg_len));
+}
+
 static inline void kgsl_regread(struct kgsl_device *device,
 				unsigned int offsetwords,
 				unsigned int *value)
 {
-	device->ftbl->regread(device, offsetwords, value);
+	if (kgsl_is_register_offset(device, offsetwords))
+		device->ftbl->regread(device, offsetwords, value);
+	else if (device->ftbl->gmu_regread &&
+			kgsl_is_gmu_offset(device, offsetwords))
+		device->ftbl->gmu_regread(device, offsetwords, value);
+	else {
+		WARN(1, "Out of bounds register read: 0x%x\n", offsetwords);
+		*value = 0;
+	}
 }
 
 static inline void kgsl_regwrite(struct kgsl_device *device,
 				 unsigned int offsetwords,
 				 unsigned int value)
 {
-	device->ftbl->regwrite(device, offsetwords, value);
+	if (kgsl_is_register_offset(device, offsetwords))
+		device->ftbl->regwrite(device, offsetwords, value);
+	else if (device->ftbl->gmu_regwrite &&
+			kgsl_is_gmu_offset(device, offsetwords))
+		device->ftbl->gmu_regwrite(device, offsetwords, value);
+	else
+		WARN(1, "Out of bounds register write: 0x%x\n", offsetwords);
 }
 
 static inline void kgsl_gmu_regread(struct kgsl_device *device,
@@ -567,9 +601,9 @@
 {
 	unsigned int val = 0;
 
-	device->ftbl->regread(device, offsetwords, &val);
+	kgsl_regread(device, offsetwords, &val);
 	val &= ~mask;
-	device->ftbl->regwrite(device, offsetwords, val | bits);
+	kgsl_regwrite(device, offsetwords, val | bits);
 }
 
 static inline void kgsl_gmu_regrmw(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 8de1a7e..f87e4da 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -748,6 +748,7 @@
 {
 	struct gmu_device *gmu = data;
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
@@ -756,9 +757,12 @@
 			ADRENO_REG_GMU_AO_HOST_INTERRUPT_CLR, status);
 
 	/* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
-	if (status & GMU_INT_WDOG_BITE)
+	if (status & GMU_INT_WDOG_BITE) {
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU watchdog expired interrupt received\n");
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
+	}
 	if (status & GMU_INT_HOST_AHB_BUS_ERR)
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"AHB bus error interrupt received\n");
@@ -775,6 +779,7 @@
 	struct kgsl_hfi *hfi = data;
 	struct gmu_device *gmu = container_of(hfi, struct gmu_device, hfi);
 	struct kgsl_device *device = container_of(gmu, struct kgsl_device, gmu);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	unsigned int status = 0;
 
 	adreno_read_gmureg(ADRENO_DEVICE(device),
@@ -784,9 +789,12 @@
 
 	if (status & HFI_IRQ_MSGQ_MASK)
 		tasklet_hi_schedule(&hfi->tasklet);
-	if (status & HFI_IRQ_CM3_FAULT_MASK)
+	if (status & HFI_IRQ_CM3_FAULT_MASK) {
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"GMU CM3 fault interrupt received\n");
+		adreno_set_gpu_fault(adreno_dev, ADRENO_GMU_FAULT);
+		adreno_dispatcher_schedule(device);
+	}
 	if (status & ~HFI_IRQ_MASK)
 		dev_err_ratelimited(&gmu->pdev->dev,
 				"Unhandled HFI interrupts 0x%lx\n",
@@ -850,14 +858,6 @@
 	}
 
 	if (is_gmu) {
-		if (!devm_request_mem_region(&gmu->pdev->dev, res->start,
-					resource_size(res),
-					res->name)) {
-			dev_err(&gmu->pdev->dev,
-				"GMU regs request mem region failed\n");
-			return -ENOMEM;
-		}
-
 		gmu->reg_phys = res->start;
 		gmu->reg_len = resource_size(res);
 		gmu->reg_virt = devm_ioremap(&gmu->pdev->dev, res->start,
@@ -1253,32 +1253,78 @@
 	return ret;
 }
 
+static int gmu_fast_boot(struct kgsl_device *device)
+{
+	int ret;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	hfi_stop(gmu);
+	clear_bit(GMU_HFI_ON, &gmu->flags);
+
+	ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
+		GMU_RESET, 0);
+	if (ret)
+		return ret;
+
+	/*FIXME: enabling WD interrupt*/
+
+	ret = hfi_start(gmu, GMU_WARM_BOOT);
+	if (ret)
+		return ret;
+
+	ret = gpudev->oob_set(adreno_dev, OOB_CPINIT_SET_MASK,
+			OOB_CPINIT_CHECK_MASK, OOB_CPINIT_CLEAR_MASK);
+
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
+
+	return ret;
+}
+
+static int gmu_suspend(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct gmu_device *gmu = &device->gmu;
+
+	if (!test_bit(GMU_CLK_ON, &gmu->flags))
+		return 0;
+
+	/* Pending message in all queues are abandoned */
+	hfi_stop(gmu);
+	clear_bit(GMU_HFI_ON, &gmu->flags);
+	gmu_irq_disable(device);
+
+	if (gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_SUSPEND, 0, 0))
+		return -EINVAL;
+
+	gmu_disable_clks(gmu);
+	gmu_disable_gdsc(gmu);
+	return 0;
+}
+
 /* To be called to power on both GPU and GMU */
 int gmu_start(struct kgsl_device *device)
 {
-	int ret = 0;
+	int ret = 0, perf_idx;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
 	struct gmu_device *gmu = &device->gmu;
 	int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
 
-	if (!kgsl_gmu_isenabled(device))
-		return 0;
+	switch (device->state) {
+	case KGSL_STATE_INIT:
+	case KGSL_STATE_SUSPEND:
+		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
+		gmu_enable_gdsc(gmu);
+		gmu_enable_clks(gmu);
 
-	if (test_bit(GMU_CLK_ON, &gmu->flags))
-		return 0;
-
-	ret = gmu_enable_gdsc(gmu);
-	if (ret)
-		return ret;
-
-	gmu_enable_clks(gmu);
-
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
 		/* Convert to RPMh frequency index */
-		int perf_idx = gmu->num_gpupwrlevels -
+		perf_idx = gmu->num_gpupwrlevels -
 				pwr->default_pwrlevel - 1;
 
 		/* Vote for 300MHz DDR for GMU to init */
@@ -1305,8 +1351,16 @@
 		ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
 		if (ret)
 			goto error_gpu;
-	} else {
-		int perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
+
+		msm_bus_scale_client_update_request(gmu->pcl, 0);
+		break;
+
+	case KGSL_STATE_SLUMBER:
+		WARN_ON(test_bit(GMU_CLK_ON, &gmu->flags));
+		gmu_enable_gdsc(gmu);
+		gmu_enable_clks(gmu);
+
+		perf_idx = gmu->num_gpupwrlevels - gmu->wakeup_pwrlevel - 1;
 
 		ret = gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_START,
 				GMU_WARM_BOOT, 0);
@@ -1325,6 +1379,46 @@
 				goto error_gpu;
 			gmu->wakeup_pwrlevel = pwr->default_pwrlevel;
 		}
+		break;
+
+	case KGSL_STATE_RESET:
+		if (test_bit(ADRENO_DEVICE_HARD_RESET, &adreno_dev->priv)) {
+			gmu_suspend(device);
+			gmu_enable_gdsc(gmu);
+			gmu_enable_clks(gmu);
+
+			perf_idx = gmu->num_gpupwrlevels -
+				pwr->active_pwrlevel - 1;
+
+			bus_level =
+				pwr->pwrlevels[pwr->active_pwrlevel].bus_freq;
+			ret = gpudev->rpmh_gpu_pwrctrl(
+				adreno_dev, GMU_FW_START, GMU_RESET, 0);
+			if (ret)
+				goto error_clks;
+
+			gmu_irq_enable(device);
+
+			ret = hfi_start(gmu, GMU_WARM_BOOT);
+			if (ret)
+				goto error_gpu;
+
+			/* Send DCVS level prior to reset*/
+			ret = gmu_dcvs_set(gmu, perf_idx, bus_level);
+			if (ret)
+				goto error_gpu;
+
+			ret = gpudev->oob_set(adreno_dev,
+				OOB_CPINIT_SET_MASK,
+				OOB_CPINIT_CHECK_MASK,
+				OOB_CPINIT_CLEAR_MASK);
+
+		} else {
+			gmu_fast_boot(device);
+		}
+		break;
+	default:
+		break;
 	}
 
 	/*
@@ -1332,30 +1426,20 @@
 	 * In v2, this function call shall move ahead
 	 * of hfi_start() to save power.
 	 */
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
+		gpudev->oob_clear(adreno_dev,
+				OOB_BOOT_SLUMBER_CLEAR_MASK);
 
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
-		msm_bus_scale_client_update_request(gmu->pcl, 0);
-		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
-			gpudev->oob_clear(adreno_dev,
-					OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
-
-	return 0;
+	return ret;
 
 error_gpu:
 	hfi_stop(gmu);
 	gmu_irq_disable(device);
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND) {
 		if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
 			gpudev->oob_clear(adreno_dev,
 					OOB_BOOT_SLUMBER_CLEAR_MASK);
-	}
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_FW_STOP, 0, 0);
 error_bus:
-	if (device->state == KGSL_STATE_INIT ||
-			device->state == KGSL_STATE_SUSPEND)
 		msm_bus_scale_client_update_request(gmu->pcl, 0);
 error_clks:
 	gmu_disable_clks(gmu);
@@ -1393,8 +1477,7 @@
 
 	if (!idle || (gpudev->wait_for_gmu_idle &&
 			gpudev->wait_for_gmu_idle(adreno_dev))) {
-		dev_err(&gmu->pdev->dev, "Failure to stop GMU");
-		return;
+		dev_err(&gmu->pdev->dev, "Stopping GMU before it is idle\n");
 	}
 
 	/* Pending message in all queues are abandoned */
@@ -1459,8 +1542,6 @@
 
 	if (gmu->reg_virt) {
 		devm_iounmap(&gmu->pdev->dev, gmu->reg_virt);
-		devm_release_mem_region(&gmu->pdev->dev,
-				gmu->reg_phys, gmu->reg_len);
 		gmu->reg_virt = NULL;
 	}
 
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index 4cfc120..a741beb 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -139,7 +139,7 @@
 enum gmu_pwrctrl_mode {
 	GMU_FW_START,
 	GMU_FW_STOP,
-	GMU_POWER_RESET,
+	GMU_SUSPEND,
 	GMU_DCVS_NOHFI,
 	GMU_NOTIFY_SLUMBER,
 	INVALID_POWER_CTRL
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 7811079..4dd7b8e 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -156,9 +156,6 @@
 		*ab = pwr->bus_ab_mbytes;
 	else
 		*ab = (pwr->bus_percent_ab * max_bw) / 100;
-
-	if (*ab > ib)
-		*ab = ib;
 }
 
 /**
@@ -2052,10 +2049,6 @@
 
 			if (!strcmp(name, "isense_clk"))
 				pwr->isense_clk_indx = i;
-
-			if (device->ftbl->clk_set_options)
-				device->ftbl->clk_set_options(device, name,
-					pwr->grp_clks[i]);
 			break;
 		}
 	}
@@ -2480,6 +2473,22 @@
 	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
 }
 
+static void
+kgsl_pwrctrl_clk_set_options(struct kgsl_device *device, bool on)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i;
+
+	for (i = 0; i < KGSL_MAX_CLKS; i++) {
+		if (pwr->grp_clks[i] == NULL)
+			continue;
+
+		if (device->ftbl->clk_set_options)
+			device->ftbl->clk_set_options(device, clocks[i],
+				pwr->grp_clks[i], on);
+	}
+}
+
 /**
  * _init() - Get the GPU ready to start, but don't turn anything on
  * @device - Pointer to the kgsl_device struct
@@ -2493,6 +2502,8 @@
 		/* Force power on to do the stop */
 		status = kgsl_pwrctrl_enable(device);
 	case KGSL_STATE_ACTIVE:
+		/* fall through */
+	case KGSL_STATE_RESET:
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 		del_timer_sync(&device->idle_timer);
 		kgsl_pwrscale_midframe_timer_cancel(device);
@@ -2527,6 +2538,7 @@
 		device->ftbl->resume(device);
 		/* fall through */
 	case KGSL_STATE_SLUMBER:
+		kgsl_pwrctrl_clk_set_options(device, true);
 		status = device->ftbl->start(device,
 				device->pwrctrl.superfast);
 		device->pwrctrl.superfast = false;
@@ -2563,6 +2575,7 @@
 				device->pwrctrl.interval_timeout);
 		break;
 	case KGSL_STATE_AWARE:
+		kgsl_pwrctrl_clk_set_options(device, true);
 		/* Enable state before turning on irq */
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
@@ -2595,6 +2608,11 @@
 	int status = 0;
 
 	switch (device->state) {
+	case KGSL_STATE_RESET:
+		if (!kgsl_gmu_isenabled(device))
+			break;
+		status = gmu_start(device);
+		break;
 	case KGSL_STATE_INIT:
 		status = kgsl_pwrctrl_enable(device);
 		break;
@@ -2645,6 +2663,7 @@
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
 		/* fallthrough */
 	case KGSL_STATE_SLUMBER:
+	case KGSL_STATE_RESET:
 		break;
 	case KGSL_STATE_AWARE:
 		KGSL_PWR_WARN(device,
@@ -2681,6 +2700,7 @@
 		status = kgsl_pwrctrl_enable(device);
 		device->ftbl->suspend_context(device);
 		device->ftbl->stop(device);
+		kgsl_pwrctrl_clk_set_options(device, false);
 		kgsl_pwrctrl_disable(device);
 		kgsl_pwrscale_sleep(device);
 		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
@@ -2787,6 +2807,8 @@
 		break;
 	case KGSL_STATE_SUSPEND:
 		status = _suspend(device);
+	case KGSL_STATE_RESET:
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_RESET);
 		break;
 	default:
 		KGSL_PWR_INFO(device, "bad state request 0x%x\n", state);
@@ -2838,6 +2860,8 @@
 		return "SUSPEND";
 	case KGSL_STATE_SLUMBER:
 		return "SLUMBER";
+	case KGSL_STATE_RESET:
+		return "RESET";
 	default:
 		break;
 	}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 7f8ff39..e46f656 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,6 +28,8 @@
 #define UHID_NAME	"uhid"
 #define UHID_BUFSIZE	32
 
+static DEFINE_MUTEX(uhid_open_mutex);
+
 struct uhid_device {
 	struct mutex devlock;
 	bool running;
@@ -142,15 +144,26 @@
 static int uhid_hid_open(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
+	int retval = 0;
 
-	return uhid_queue_event(uhid, UHID_OPEN);
+	mutex_lock(&uhid_open_mutex);
+	if (!hid->open++) {
+		retval = uhid_queue_event(uhid, UHID_OPEN);
+		if (retval)
+			hid->open--;
+	}
+	mutex_unlock(&uhid_open_mutex);
+	return retval;
 }
 
 static void uhid_hid_close(struct hid_device *hid)
 {
 	struct uhid_device *uhid = hid->driver_data;
 
-	uhid_queue_event(uhid, UHID_CLOSE);
+	mutex_lock(&uhid_open_mutex);
+	if (!--hid->open)
+		uhid_queue_event(uhid, UHID_CLOSE);
+	mutex_unlock(&uhid_open_mutex);
 }
 
 static int uhid_hid_parse(struct hid_device *hid)
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0e07a76..c6a922e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1400,37 +1400,38 @@
 {
 	unsigned char *data = wacom->data;
 
-	if (wacom->pen_input)
+	if (wacom->pen_input) {
 		dev_dbg(wacom->pen_input->dev.parent,
 			"%s: received report #%d\n", __func__, data[0]);
-	else if (wacom->touch_input)
+
+		if (len == WACOM_PKGLEN_PENABLED ||
+		    data[0] == WACOM_REPORT_PENABLED)
+			return wacom_tpc_pen(wacom);
+	}
+	else if (wacom->touch_input) {
 		dev_dbg(wacom->touch_input->dev.parent,
 			"%s: received report #%d\n", __func__, data[0]);
 
-	switch (len) {
-	case WACOM_PKGLEN_TPC1FG:
-		return wacom_tpc_single_touch(wacom, len);
-
-	case WACOM_PKGLEN_TPC2FG:
-		return wacom_tpc_mt_touch(wacom);
-
-	case WACOM_PKGLEN_PENABLED:
-		return wacom_tpc_pen(wacom);
-
-	default:
-		switch (data[0]) {
-		case WACOM_REPORT_TPC1FG:
-		case WACOM_REPORT_TPCHID:
-		case WACOM_REPORT_TPCST:
-		case WACOM_REPORT_TPC1FGE:
+		switch (len) {
+		case WACOM_PKGLEN_TPC1FG:
 			return wacom_tpc_single_touch(wacom, len);
 
-		case WACOM_REPORT_TPCMT:
-		case WACOM_REPORT_TPCMT2:
-			return wacom_mt_touch(wacom);
+		case WACOM_PKGLEN_TPC2FG:
+			return wacom_tpc_mt_touch(wacom);
 
-		case WACOM_REPORT_PENABLED:
-			return wacom_tpc_pen(wacom);
+		default:
+			switch (data[0]) {
+			case WACOM_REPORT_TPC1FG:
+			case WACOM_REPORT_TPCHID:
+			case WACOM_REPORT_TPCST:
+			case WACOM_REPORT_TPC1FGE:
+				return wacom_tpc_single_touch(wacom, len);
+
+			case WACOM_REPORT_TPCMT:
+			case WACOM_REPORT_TPCMT2:
+				return wacom_mt_touch(wacom);
+
+			}
 		}
 	}
 
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index 8a57ed2..d26e0d0 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -64,7 +64,7 @@
 #define ITCHIN			(0xEF4)
 #define ITTRIGIN		(0xEF8)
 
-#define CTI_MAX_TRIGGERS	(8)
+#define CTI_MAX_TRIGGERS	(32)
 #define CTI_MAX_CHANNELS	(4)
 #define AFFINITY_LEVEL_L2	1
 
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77ee..a2e3dd7 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@
 		    int value, int index, void *data, int len)
 {
 	struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+	void *dmadata = kmalloc(len, GFP_KERNEL);
+	int ret;
+
+	if (!dmadata)
+		return -ENOMEM;
 
 	/* do control transfer */
-	return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+	ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
 			       cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
-			       USB_DIR_IN, value, index, data, len, 2000);
+			       USB_DIR_IN, value, index, dmadata, len, 2000);
+
+	memcpy(data, dmadata, len);
+	kfree(dmadata);
+	return ret;
 }
 
 static int usb_write(struct i2c_adapter *adapter, int cmd,
 		     int value, int index, void *data, int len)
 {
 	struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+	void *dmadata = kmemdup(data, len, GFP_KERNEL);
+	int ret;
+
+	if (!dmadata)
+		return -ENOMEM;
 
 	/* do control transfer */
-	return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+	ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
 			       cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
-			       value, index, data, len, 2000);
+			       value, index, dmadata, len, 2000);
+
+	kfree(dmadata);
+	return ret;
 }
 
 static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index b5beea53..ab646a9 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -217,7 +217,15 @@
 	if (ret < 0 || value < 0)
 		ret = -EINVAL;
 
-	return ret;
+	ret = sensor_hub_get_feature(st->hsdev,
+				     st->poll.report_id,
+				     st->poll.index, sizeof(value), &value);
+	if (ret < 0 || value < 0)
+		return -EINVAL;
+
+	st->poll_interval = value;
+
+	return 0;
 }
 EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
 
@@ -259,7 +267,16 @@
 	if (ret < 0 || value < 0)
 		ret = -EINVAL;
 
-	return ret;
+	ret = sensor_hub_get_feature(st->hsdev,
+				     st->sensitivity.report_id,
+				     st->sensitivity.index, sizeof(value),
+				     &value);
+	if (ret < 0 || value < 0)
+		return -EINVAL;
+
+	st->raw_hystersis = value;
+
+	return 0;
 }
 EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
 
@@ -355,6 +372,9 @@
 	/* Default unit of measure is milliseconds */
 	if (st->poll.units == 0)
 		st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND;
+
+	st->poll_interval = -1;
+
 	return 0;
 
 }
@@ -377,6 +397,8 @@
 					HID_USAGE_SENSOR_PROY_POWER_STATE,
 					&st->power_state);
 
+	st->raw_hystersis = -1;
+
 	sensor_hub_input_get_attribute_info(hsdev,
 			HID_FEATURE_REPORT, usage_id,
 			HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index ecf592d..6082934 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -51,6 +51,8 @@
 			st->report_state.report_id,
 			st->report_state.index,
 			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+
+		poll_value = hid_sensor_read_poll_value(st);
 	} else {
 		int val;
 
@@ -87,9 +89,7 @@
 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
 			       st->power_state.index,
 			       sizeof(state_val), &state_val);
-	if (state)
-		poll_value = hid_sensor_read_poll_value(st);
-	if (poll_value > 0)
+	if (state && poll_value)
 		msleep_interruptible(poll_value * 2);
 
 	return 0;
@@ -127,6 +127,20 @@
 	struct hid_sensor_common *attrb = container_of(work,
 						       struct hid_sensor_common,
 						       work);
+
+	if (attrb->poll_interval >= 0)
+		sensor_hub_set_feature(attrb->hsdev, attrb->poll.report_id,
+				       attrb->poll.index,
+				       sizeof(attrb->poll_interval),
+				       &attrb->poll_interval);
+
+	if (attrb->raw_hystersis >= 0)
+		sensor_hub_set_feature(attrb->hsdev,
+				       attrb->sensitivity.report_id,
+				       attrb->sensitivity.index,
+				       sizeof(attrb->raw_hystersis),
+				       &attrb->raw_hystersis);
+
 	_hid_sensor_power_state(attrb, true);
 }
 
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index e690dd1..4b0f942 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -184,9 +184,9 @@
 	.address = (chan),					\
 	.scan_type = {						\
 		.sign = 'u',					\
-		.realbits = '8',				\
-		.storagebits = '8',				\
-		.shift = '0',					\
+		.realbits = 8,					\
+		.storagebits = 8,				\
+		.shift = 0,					\
 	},							\
 	.ext_info = ad7303_ext_info,				\
 }
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index e5a533c..f762eb8 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -175,11 +175,12 @@
 	}
 	H6 = sign_extend32(tmp, 7);
 
-	var = ((s32)data->t_fine) - 76800;
-	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
-		* (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
-		+ 2097152) * H2 + 8192) >> 14);
-	var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
+	var = ((s32)data->t_fine) - (s32)76800;
+	var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
+		+ (s32)16384) >> 15) * (((((((var * H6) >> 10)
+		* (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
+		+ (s32)2097152) * H2 + 8192) >> 14);
+	var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
 
 	return var >> 12;
 };
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 5656deb..0204595 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -50,7 +50,6 @@
 #define AS3935_TUNE_CAP		0x08
 #define AS3935_CALIBRATE	0x3D
 
-#define AS3935_WRITE_DATA	BIT(15)
 #define AS3935_READ_DATA	BIT(14)
 #define AS3935_ADDRESS(x)	((x) << 8)
 
@@ -105,7 +104,7 @@
 {
 	u8 *buf = st->buf;
 
-	buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
+	buf[0] = AS3935_ADDRESS(reg) >> 8;
 	buf[1] = val;
 
 	return spi_write(st->spi, buf, 2);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0f58f46..8fd108d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -444,8 +444,8 @@
 	fl6.saddr = src_in->sin6_addr;
 	fl6.flowi6_oif = addr->bound_dev_if;
 
-	dst = ip6_route_output(addr->net, NULL, &fl6);
-	if ((ret = dst->error))
+	ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+	if (ret < 0)
 		goto put;
 
 	rt = (struct rt6_info *)dst;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index bd786b7..bb72976 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -751,6 +751,9 @@
 	/* release the cpu */
 	hfi1_put_proc_affinity(fdata->rec_cpu_num);
 
+	/* clean up rcv side */
+	hfi1_user_exp_rcv_free(fdata);
+
 	/*
 	 * Clear any left over, unhandled events so the next process that
 	 * gets this context doesn't get confused.
@@ -790,7 +793,7 @@
 
 	dd->rcd[uctxt->ctxt] = NULL;
 
-	hfi1_user_exp_rcv_free(fdata);
+	hfi1_user_exp_rcv_grp_free(uctxt);
 	hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
 
 	uctxt->rcvwait_to = 0;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3b5bc9..34cfd34 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1757,6 +1757,7 @@
 			    !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
 				dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
 					   rcd->ctxt);
+				ret = -ENOMEM;
 				goto bail_rcvegrbuf_phys;
 			}
 
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 83198a8..4bd5b5c 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -2366,8 +2366,11 @@
 		ret = hfi1_rvt_get_rwqe(qp, 1);
 		if (ret < 0)
 			goto nack_op_err;
-		if (!ret)
+		if (!ret) {
+			/* peer will send again */
+			rvt_put_ss(&qp->r_sge);
 			goto rnr_nak;
+		}
 		wc.ex.imm_data = ohdr->u.rc.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
 		goto send_last;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 64d2652..db0f140 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -250,36 +250,40 @@
 	return ret;
 }
 
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt)
+{
+	struct tid_group *grp, *gptr;
+
+	list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
+				 list) {
+		list_del_init(&grp->list);
+		kfree(grp);
+	}
+	hfi1_clear_tids(uctxt);
+}
+
 int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
 {
 	struct hfi1_ctxtdata *uctxt = fd->uctxt;
-	struct tid_group *grp, *gptr;
 
-	if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
-		return 0;
 	/*
 	 * The notifier would have been removed when the process'es mm
 	 * was freed.
 	 */
-	if (fd->handler)
+	if (fd->handler) {
 		hfi1_mmu_rb_unregister(fd->handler);
-
-	kfree(fd->invalid_tids);
-
-	if (!uctxt->cnt) {
+	} else {
 		if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
 			unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
 		if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
 			unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
-		list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
-					 list) {
-			list_del_init(&grp->list);
-			kfree(grp);
-		}
-		hfi1_clear_tids(uctxt);
 	}
 
+	kfree(fd->invalid_tids);
+	fd->invalid_tids = NULL;
+
 	kfree(fd->entry_to_rb);
+	fd->entry_to_rb = NULL;
 	return 0;
 }
 
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9f..d1d7d3d 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
@@ -70,6 +70,7 @@
 		(tid) |= EXP_TID_SET(field, (value));			\
 	} while (0)
 
+void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt);
 int hfi1_user_exp_rcv_init(struct file *);
 int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
 int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index be2d02b..1fb31a4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1828,7 +1828,7 @@
 		klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
 		klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
 		klms[i].key = cpu_to_be32(lkey);
-		mr->ibmr.length += sg_dma_len(sg);
+		mr->ibmr.length += sg_dma_len(sg) - sg_offset;
 
 		sg_offset = 0;
 	}
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 2097512..f3fe787 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2067,8 +2067,10 @@
 		ret = qib_get_rwqe(qp, 1);
 		if (ret < 0)
 			goto nack_op_err;
-		if (!ret)
+		if (!ret) {
+			rvt_put_ss(&qp->r_sge);
 			goto rnr_nak;
+		}
 		wc.ex.imm_data = ohdr->u.rc.imm_data;
 		hdrsize += 4;
 		wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index dd96670..cb9726e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -166,6 +166,7 @@
 #define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
 #define SMR_VALID			(1 << 31)
 #define SMR_MASK_SHIFT			16
+#define SMR_MASK_MASK			0x7FFF
 #define SMR_ID_SHIFT			0
 
 #define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
@@ -335,10 +336,12 @@
 	enum arm_smmu_s2cr_type		type;
 	enum arm_smmu_s2cr_privcfg	privcfg;
 	u8				cbndx;
+	bool				cb_handoff;
 };
 
 #define s2cr_init_val (struct arm_smmu_s2cr){				\
 	.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,	\
+	.cb_handoff = false,						\
 }
 
 struct arm_smmu_smr {
@@ -409,8 +412,8 @@
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
 #define ARM_SMMU_OPT_FATAL_ASF		(1 << 1)
-#define ARM_SMMU_OPT_SKIP_INIT		(1 << 2)
 #define ARM_SMMU_OPT_DYNAMIC		(1 << 3)
+#define ARM_SMMU_OPT_3LVL_TABLES	(1 << 4)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -527,8 +530,8 @@
 static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
 	{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
-	{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
 	{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
+	{ ARM_SMMU_OPT_3LVL_TABLES, "qcom,use-3-lvl-tables" },
 	{ 0, NULL},
 };
 
@@ -546,8 +549,15 @@
 static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
 static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
 
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+				    dma_addr_t iova);
+
 static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
 
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu,
+				struct device *dev);
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
 	return container_of(dom, struct arm_smmu_domain, domain);
@@ -1567,6 +1577,8 @@
 		oas = smmu->ipa_size;
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
+			if (smmu->options & ARM_SMMU_OPT_3LVL_TABLES)
+				ias = min(ias, 39UL);
 		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
 			fmt = ARM_32_LPAE_S1;
 			ias = min(ias, 32UL);
@@ -1608,14 +1620,11 @@
 	if (is_iommu_pt_coherent(smmu_domain))
 		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
 
-	/* Dynamic domains must set cbndx through domain attribute */
-	if (!dynamic) {
-		ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
-				      smmu->num_context_banks);
-		if (ret < 0)
-			goto out_unlock;
-		cfg->cbndx = ret;
-	}
+	ret = arm_smmu_alloc_cb(domain, smmu, dev);
+	if (ret < 0)
+		goto out_unlock;
+	cfg->cbndx = ret;
+
 	if (smmu->version < ARM_SMMU_V2) {
 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
 		cfg->irptndx %= smmu->num_context_irqs;
@@ -2185,6 +2194,23 @@
 	return ret;
 }
 
+static uint64_t arm_smmu_iova_to_pte(struct iommu_domain *domain,
+	      dma_addr_t iova)
+{
+	uint64_t ret;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+	if (!ops)
+		return 0;
+
+	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	ret = ops->iova_to_pte(ops, iova);
+	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	return ret;
+}
+
 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 			     size_t size)
 {
@@ -2219,14 +2245,18 @@
 	return ret;
 }
 
+#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
 static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
 			   struct scatterlist *sg, unsigned int nents, int prot)
 {
 	int ret;
-	size_t size;
+	size_t size, batch_size, size_to_unmap = 0;
 	unsigned long flags;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+	unsigned int idx_start, idx_end;
+	struct scatterlist *sg_start, *sg_end;
+	unsigned long __saved_iova_start;
 
 	if (!ops)
 		return -ENODEV;
@@ -2235,17 +2265,45 @@
 	if (ret)
 		return ret;
 
-	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
-	ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
-	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	__saved_iova_start = iova;
+	idx_start = idx_end = 0;
+	sg_start = sg_end = sg;
+	while (idx_end < nents) {
+		batch_size = sg_end->length;
+		sg_end = sg_next(sg_end);
+		idx_end++;
+		while ((idx_end < nents) &&
+		       (batch_size + sg_end->length < MAX_MAP_SG_BATCH_SIZE)) {
 
-	if (!ret)
-		arm_smmu_unmap(domain, iova, size);
+			batch_size += sg_end->length;
+			sg_end = sg_next(sg_end);
+			idx_end++;
+		}
 
-	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+		spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+		ret = ops->map_sg(ops, iova, sg_start, idx_end - idx_start,
+				  prot, &size);
+		spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+		/* Returns 0 on error */
+		if (!ret) {
+			size_to_unmap = iova + size - __saved_iova_start;
+			goto out;
+		}
+
+		iova += batch_size;
+		idx_start = idx_end;
+		sg_start = sg_end;
+	}
+
+out:
 	arm_smmu_assign_table(smmu_domain);
 
-	return ret;
+	if (size_to_unmap) {
+		arm_smmu_unmap(domain, __saved_iova_start, size_to_unmap);
+		iova = __saved_iova_start;
+	}
+	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+	return iova - __saved_iova_start;
 }
 
 static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
@@ -2972,6 +3030,7 @@
 	.enable_config_clocks	= arm_smmu_enable_config_clocks,
 	.disable_config_clocks	= arm_smmu_disable_config_clocks,
 	.is_iova_coherent	= arm_smmu_is_iova_coherent,
+	.iova_to_pte = arm_smmu_iova_to_pte,
 };
 
 #define IMPL_DEF1_MICRO_MMU_CTRL	0
@@ -3162,12 +3221,10 @@
 	 * Reset stream mapping groups: Initial values mark all SMRn as
 	 * invalid and all S2CRn as bypass unless overridden.
 	 */
-	if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
-		for (i = 0; i < smmu->num_mapping_groups; ++i)
-			arm_smmu_write_sme(smmu, i);
+	for (i = 0; i < smmu->num_mapping_groups; ++i)
+		arm_smmu_write_sme(smmu, i);
 
-		arm_smmu_context_bank_reset(smmu);
-	}
+	arm_smmu_context_bank_reset(smmu);
 
 	/* Invalidate the TLB, just in case */
 	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -3224,6 +3281,92 @@
 	}
 }
 
+
+/*
+ * Some context banks needs to be transferred from bootloader to HLOS in a way
+ * that allows ongoing traffic. The current expectation is that these context
+ * banks operate in bypass mode.
+ * Additionally, there must be exactly one device in devicetree with stream-ids
+ * overlapping those used by the bootloader.
+ */
+static int arm_smmu_alloc_cb(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu,
+				struct device *dev)
+{
+	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+	u32 i, idx;
+	int cb = -EINVAL;
+	bool dynamic;
+
+	/* Dynamic domains must set cbndx through domain attribute */
+	dynamic = is_dynamic_domain(domain);
+	if (dynamic)
+		return INVALID_CBNDX;
+
+	mutex_lock(&smmu->stream_map_mutex);
+	for_each_cfg_sme(fwspec, i, idx) {
+		if (smmu->s2crs[idx].cb_handoff)
+			cb = smmu->s2crs[idx].cbndx;
+	}
+
+	if (cb < 0) {
+		mutex_unlock(&smmu->stream_map_mutex);
+		return __arm_smmu_alloc_bitmap(smmu->context_map,
+						smmu->num_s2_context_banks,
+						smmu->num_context_banks);
+	}
+
+	for (i = 0; i < smmu->num_mapping_groups; i++) {
+		if (smmu->s2crs[i].cbndx == cb) {
+			smmu->s2crs[i].cbndx = 0;
+			smmu->s2crs[i].cb_handoff = false;
+			smmu->s2crs[i].count -= 1;
+		}
+	}
+	mutex_unlock(&smmu->stream_map_mutex);
+
+	return cb;
+}
+
+static int arm_smmu_handoff_cbs(struct arm_smmu_device *smmu)
+{
+	u32 i, raw_smr, raw_s2cr;
+	struct arm_smmu_smr smr;
+	struct arm_smmu_s2cr s2cr;
+
+	for (i = 0; i < smmu->num_mapping_groups; i++) {
+		raw_smr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+					ARM_SMMU_GR0_SMR(i));
+		if (!(raw_smr & SMR_VALID))
+			continue;
+
+		smr.mask = (raw_smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+		smr.id = (u16)raw_smr;
+		smr.valid = true;
+
+		raw_s2cr = readl_relaxed(ARM_SMMU_GR0(smmu) +
+					ARM_SMMU_GR0_S2CR(i));
+		s2cr.group = NULL;
+		s2cr.count = 1;
+		s2cr.type = (raw_s2cr >> S2CR_TYPE_SHIFT) & S2CR_TYPE_MASK;
+		s2cr.privcfg = (raw_s2cr >> S2CR_PRIVCFG_SHIFT) &
+				S2CR_PRIVCFG_MASK;
+		s2cr.cbndx = (u8)raw_s2cr;
+		s2cr.cb_handoff = true;
+
+		if (s2cr.type != S2CR_TYPE_TRANS)
+			continue;
+
+		smmu->smrs[i] = smr;
+		smmu->s2crs[i] = s2cr;
+		bitmap_set(smmu->context_map, s2cr.cbndx, 1);
+		dev_dbg(smmu->dev, "Handoff smr: %x s2cr: %x cb: %d\n",
+			raw_smr, raw_s2cr, s2cr.cbndx);
+	}
+
+	return 0;
+}
+
 static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
 {
 	struct device *dev = smmu->dev;
@@ -3483,6 +3626,7 @@
 	smmu->streamid_mask = size - 1;
 	if (id & ID0_SMS) {
 		u32 smr;
+		int i;
 
 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
 		size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
@@ -3497,14 +3641,25 @@
 		 * bits are set, so check each one separately. We can reject
 		 * masters later if they try to claim IDs outside these masks.
 		 */
+		for (i = 0; i < size; i++) {
+			smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
+			if (!(smr & SMR_VALID))
+				break;
+		}
+		if (i == size) {
+			dev_err(smmu->dev,
+				"Unable to compute streamid_masks\n");
+			return -ENODEV;
+		}
+
 		smr = smmu->streamid_mask << SMR_ID_SHIFT;
-		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
 		smmu->streamid_mask = smr >> SMR_ID_SHIFT;
 
 		smr = smmu->streamid_mask << SMR_MASK_SHIFT;
-		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(i));
+		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(i));
 		smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
 
 		/* Zero-initialised to mark as invalid */
@@ -3796,6 +3951,10 @@
 	if (err)
 		goto out_power_off;
 
+	err = arm_smmu_handoff_cbs(smmu);
+	if (err)
+		goto out_power_off;
+
 	err = arm_smmu_parse_impl_def_registers(smmu);
 	if (err)
 		goto out_power_off;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ac3059d..560bb43 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -765,6 +765,51 @@
 	return ERR_PTR(-ENOMEM);
 }
 
+/*
+ * Based off of similar code from dma-iommu.c, but modified to use a different
+ * iova allocator
+ */
+static void fast_smmu_reserve_pci_windows(struct device *dev,
+			    struct dma_fast_smmu_mapping *mapping)
+{
+	struct pci_host_bridge *bridge;
+	struct resource_entry *window;
+	phys_addr_t start, end;
+	struct pci_dev *pci_dev;
+	unsigned long flags;
+
+	if (!dev_is_pci(dev))
+		return;
+
+	pci_dev = to_pci_dev(dev);
+	bridge = pci_find_host_bridge(pci_dev->bus);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	resource_list_for_each_entry(window, &bridge->windows) {
+		if (resource_type(window->res) != IORESOURCE_MEM &&
+		    resource_type(window->res) != IORESOURCE_IO)
+			continue;
+
+		start = round_down(window->res->start - window->offset,
+				FAST_PAGE_SIZE);
+		end = round_up(window->res->end - window->offset,
+				FAST_PAGE_SIZE);
+		start = max_t(unsigned long, mapping->base, start);
+		end = min_t(unsigned long, mapping->base + mapping->size, end);
+		if (start >= end)
+			continue;
+
+		dev_dbg(dev, "iova allocator reserved 0x%pa-0x%pa\n",
+				&start, &end);
+
+		start = (start - mapping->base) >> FAST_PAGE_SHIFT;
+		end = (end - mapping->base) >> FAST_PAGE_SHIFT;
+		bitmap_set(mapping->bitmap, start, end - start);
+	}
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+
 /**
  * fast_smmu_attach_device
  * @dev: valid struct device pointer
@@ -798,6 +843,8 @@
 	mapping->fast->domain = domain;
 	mapping->fast->dev = dev;
 
+	fast_smmu_reserve_pci_windows(dev, mapping->fast);
+
 	group = dev->iommu_group;
 	if (!group) {
 		dev_err(dev, "No iommu associated with device\n");
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b9e50c1..87fcbf7 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2049,11 +2049,14 @@
 	if (context_copied(context)) {
 		u16 did_old = context_domain_id(context);
 
-		if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+		if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
 			iommu->flush.flush_context(iommu, did_old,
 						   (((u16)bus) << 8) | devfn,
 						   DMA_CCMD_MASK_NOBIT,
 						   DMA_CCMD_DEVICE_INVL);
+			iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+						 DMA_TLB_DSI_FLUSH);
+		}
 	}
 
 	pgd = domain->pgd;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f7739ae..ea72b9c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -735,7 +735,8 @@
 		arm_lpae_iopte *table_base = table;
 		int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
 		int entry_size = ARM_LPAE_GRANULE(data);
-		int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
+		int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) >>
+				data->pg_shift;
 		int entries = min_t(int, size / entry_size,
 			max_entries - tl_offset);
 		int table_len = entries * sizeof(*table);
@@ -854,6 +855,19 @@
 	return 0;
 }
 
+static uint64_t arm_lpae_iova_get_pte(struct io_pgtable_ops *ops,
+					 unsigned long iova)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte pte;
+	int lvl;
+
+	if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+		return pte;
+
+	return 0;
+}
+
 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
 					 unsigned long iova)
 {
@@ -983,6 +997,7 @@
 		.unmap		= arm_lpae_unmap,
 		.iova_to_phys	= arm_lpae_iova_to_phys,
 		.is_iova_coherent = arm_lpae_is_iova_coherent,
+		.iova_to_pte	= arm_lpae_iova_get_pte,
 	};
 
 	return data;
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 1599121..a686ad0 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -149,6 +149,8 @@
 				    unsigned long iova);
 	bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
 				unsigned long iova);
+	uint64_t (*iova_to_pte)(struct io_pgtable_ops *ops,
+		    unsigned long iova);
 
 };
 
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 0c49a64..6bb435b 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -154,6 +154,7 @@
 static LIST_HEAD(iommu_debug_devices);
 static struct dentry *debugfs_tests_dir;
 static u32 iters_per_op = 1;
+static void *test_virt_addr;
 
 struct iommu_debug_device {
 	struct device *dev;
@@ -1207,6 +1208,68 @@
 	return -EIO;
 }
 
+static ssize_t __iommu_debug_dma_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	struct dma_iommu_mapping *dma_mapping;
+	ssize_t retval = -EINVAL;
+	int val;
+
+	if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+		pr_err("Invalid format. Expected a hex or decimal integer");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (val) {
+		if (dev->archdata.mapping)
+			if (dev->archdata.mapping->domain) {
+				pr_err("Already attached.\n");
+				retval = -EINVAL;
+				goto out;
+			}
+		if (WARN(dev->archdata.iommu,
+			"Attachment tracking out of sync with device\n")) {
+			retval = -EINVAL;
+			goto out;
+		}
+
+		dma_mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+				(SZ_1G * 4ULL));
+
+		if (!dma_mapping)
+			goto out;
+
+		if (arm_iommu_attach_device(dev, dma_mapping))
+			goto out_release_mapping;
+		pr_err("Attached\n");
+	} else {
+		if (!dev->archdata.mapping) {
+			pr_err("No mapping. Did you already attach?\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		if (!dev->archdata.mapping->domain) {
+			pr_err("No domain. Did you already attach?\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		arm_iommu_detach_device(dev);
+		arm_iommu_release_mapping(dev->archdata.mapping);
+		pr_err("Detached\n");
+	}
+	retval = count;
+	return retval;
+
+out_release_mapping:
+	arm_iommu_release_mapping(dma_mapping);
+out:
+	return retval;
+}
+
 static ssize_t __iommu_debug_attach_write(struct file *file,
 					  const char __user *ubuf,
 					  size_t count, loff_t *offset,
@@ -1260,6 +1323,81 @@
 	return retval;
 }
 
+static ssize_t iommu_debug_dma_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset)
+{
+	return __iommu_debug_dma_attach_write(file, ubuf, count, offset);
+
+}
+
+static ssize_t iommu_debug_dma_attach_read(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	char c[2];
+
+	if (*offset)
+		return 0;
+
+	if (!dev->archdata.mapping)
+		c[0] = '0';
+	else
+		c[0] = dev->archdata.mapping->domain ? '1' : '0';
+
+	c[1] = '\n';
+	if (copy_to_user(ubuf, &c, 2)) {
+		pr_err("copy_to_user failed\n");
+		return -EFAULT;
+	}
+	*offset = 1;		/* non-zero means we're done */
+
+	return 2;
+}
+
+static const struct file_operations iommu_debug_dma_attach_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_dma_attach_write,
+	.read	= iommu_debug_dma_attach_read,
+};
+
+static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
+					       char __user *ubuf,
+					       size_t count, loff_t *offset)
+{
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+	int buf_len = sizeof(buf);
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, buf_len);
+
+	if (!test_virt_addr)
+		strlcpy(buf, "FAIL\n", buf_len);
+	else
+		snprintf(buf, buf_len, "0x%pK\n", test_virt_addr);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_test_virt_addr_fops = {
+	.open	= simple_open,
+	.read	= iommu_debug_test_virt_addr_read,
+};
+
 static ssize_t iommu_debug_attach_write(struct file *file,
 					  const char __user *ubuf,
 					  size_t count, loff_t *offset)
@@ -1309,6 +1447,75 @@
 	.read	= iommu_debug_attach_read,
 };
 
+static ssize_t iommu_debug_pte_write(struct file *file,
+				      const char __user *ubuf,
+				      size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	dma_addr_t iova;
+
+	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+		pr_err("Invalid format for iova\n");
+		ddev->iova = 0;
+		return -EINVAL;
+	}
+
+	ddev->iova = iova;
+	pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+	return count;
+}
+
+
+static ssize_t iommu_debug_pte_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	uint64_t pte;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		return -EINVAL;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, sizeof(buf));
+
+	pte = iommu_iova_to_pte(dev->archdata.mapping->domain,
+			ddev->iova);
+
+	if (!pte)
+		strlcpy(buf, "FAIL\n", sizeof(buf));
+	else
+		snprintf(buf, sizeof(buf), "pte=%016llx\n", pte);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_pte_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_pte_write,
+	.read	= iommu_debug_pte_read,
+};
+
 static ssize_t iommu_debug_atos_write(struct file *file,
 				      const char __user *ubuf,
 				      size_t count, loff_t *offset)
@@ -1370,6 +1577,55 @@
 	.read	= iommu_debug_atos_read,
 };
 
+static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	phys_addr_t phys;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		return -EINVAL;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, sizeof(buf));
+
+	phys = iommu_iova_to_phys_hard(dev->archdata.mapping->domain,
+			ddev->iova);
+	if (!phys)
+		strlcpy(buf, "FAIL\n", sizeof(buf));
+	else
+		snprintf(buf, sizeof(buf), "%pa\n", &phys);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_dma_atos_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_atos_write,
+	.read	= iommu_debug_dma_atos_read,
+};
+
 static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
 				     size_t count, loff_t *offset)
 {
@@ -1450,6 +1706,159 @@
 	.write	= iommu_debug_map_write,
 };
 
+/*
+ * Performs DMA mapping of a given virtual address and size to an iova address.
+ * User input format: (addr,len,dma attr) where dma attr is:
+ *				0: normal mapping
+ *				1: force coherent mapping
+ *				2: force non-cohernet mapping
+ *				3: use system cache
+ */
+static ssize_t iommu_debug_dma_map_write(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *offset)
+{
+	ssize_t retval = -EINVAL;
+	int ret;
+	char *comma1, *comma2;
+	char buf[100];
+	unsigned long addr;
+	void *v_addr;
+	dma_addr_t iova;
+	size_t size;
+	unsigned int attr;
+	unsigned long dma_attrs;
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+
+	if (count >= sizeof(buf)) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	memset(buf, 0, sizeof(buf));
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	*comma1 = *comma2 = '\0';
+
+	if (kstrtoul(buf, 0, &addr))
+		goto invalid_format;
+	v_addr = (void *)addr;
+
+	if (kstrtosize_t(comma1 + 1, 0, &size))
+		goto invalid_format;
+
+	if (kstrtouint(comma2 + 1, 0, &attr))
+		goto invalid_format;
+
+	if (v_addr < test_virt_addr || v_addr > (test_virt_addr + SZ_1M - 1))
+		goto invalid_addr;
+
+	if (attr == 0)
+		dma_attrs = 0;
+	else if (attr == 1)
+		dma_attrs = DMA_ATTR_FORCE_COHERENT;
+	else if (attr == 2)
+		dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+	else if (attr == 3)
+		dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+	else
+		goto invalid_format;
+
+	iova = dma_map_single_attrs(dev, v_addr, size,
+					DMA_TO_DEVICE, dma_attrs);
+
+	if (dma_mapping_error(dev, iova)) {
+		pr_err("Failed to perform dma_map_single\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	retval = count;
+	pr_err("Mapped 0x%p to %pa (len=0x%zx)\n",
+			v_addr, &iova, size);
+	ddev->iova = iova;
+		pr_err("Saved iova=%pa for future PTE commands\n", &iova);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
+	return retval;
+
+invalid_addr:
+	pr_err("Invalid addr given! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n");
+	return retval;
+}
+
+static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
+	     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+	dma_addr_t iova;
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		return -EINVAL;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, sizeof(buf));
+
+	iova = ddev->iova;
+	snprintf(buf, sizeof(buf), "%pa\n", &iova);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_dma_map_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_dma_map_write,
+	.read	= iommu_debug_dma_map_read,
+};
+
 static ssize_t iommu_debug_unmap_write(struct file *file,
 				       const char __user *ubuf,
 				       size_t count, loff_t *offset)
@@ -1515,6 +1924,91 @@
 	.write	= iommu_debug_unmap_write,
 };
 
+static ssize_t iommu_debug_dma_unmap_write(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	ssize_t retval = 0;
+	char *comma1, *comma2;
+	char buf[100];
+	size_t size;
+	unsigned int attr;
+	dma_addr_t iova;
+	unsigned long dma_attrs;
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+
+	if (count >= sizeof(buf)) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!dev->archdata.mapping) {
+		pr_err("No mapping. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+	if (!dev->archdata.mapping->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	memset(buf, 0, sizeof(buf));
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	*comma1 = *comma2 = '\0';
+
+	if (kstrtoux(buf, 0, &iova))
+		goto invalid_format;
+
+	if (kstrtosize_t(comma1 + 1, 0, &size))
+		goto invalid_format;
+
+	if (kstrtouint(comma2 + 1, 0, &attr))
+		goto invalid_format;
+
+	if (attr == 0)
+		dma_attrs = 0;
+	else if (attr == 1)
+		dma_attrs = DMA_ATTR_FORCE_COHERENT;
+	else if (attr == 2)
+		dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
+	else if (attr == 3)
+		dma_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+	else
+		goto invalid_format;
+
+	dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
+
+	retval = count;
+	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: iova,len, dma attr\n");
+	return retval;
+}
+
+static const struct file_operations iommu_debug_dma_unmap_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_dma_unmap_write,
+};
+
 static ssize_t iommu_debug_config_clocks_write(struct file *file,
 					       const char __user *ubuf,
 					       size_t count, loff_t *offset)
@@ -1624,6 +2118,13 @@
 		goto err_rmdir;
 	}
 
+	if (!debugfs_create_file("test_virt_addr", 0400, dir, ddev,
+				&iommu_debug_test_virt_addr_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/test_virt_addr debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
 	if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
 				 &iommu_debug_profiling_fops)) {
 		pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
@@ -1666,6 +2167,13 @@
 		goto err_rmdir;
 	}
 
+	if (!debugfs_create_file("dma_attach", 0600, dir, ddev,
+				 &iommu_debug_dma_attach_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_attach debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
 	if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
 				 &iommu_debug_attach_fops)) {
 		pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
@@ -1687,6 +2195,13 @@
 		goto err_rmdir;
 	}
 
+	if (!debugfs_create_file("dma_atos", 0600, dir, ddev,
+				 &iommu_debug_dma_atos_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_atos debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
 	if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
 				 &iommu_debug_map_fops)) {
 		pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
@@ -1694,6 +2209,13 @@
 		goto err_rmdir;
 	}
 
+	if (!debugfs_create_file("dma_map", 0600, dir, ddev,
+					 &iommu_debug_dma_map_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_map debugfs file\n",
+		       dev_name(dev));
+			goto err_rmdir;
+	}
+
 	if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
 				 &iommu_debug_unmap_fops)) {
 		pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
@@ -1701,6 +2223,20 @@
 		goto err_rmdir;
 	}
 
+	if (!debugfs_create_file("dma_unmap", 0200, dir, ddev,
+					 &iommu_debug_dma_unmap_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/dma_unmap debugfs file\n",
+		       dev_name(dev));
+			goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("pte", 0600, dir, ddev,
+			&iommu_debug_pte_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/pte debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
 	if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
 				 &iommu_debug_config_clocks_fops)) {
 		pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
@@ -1734,6 +2270,11 @@
 		return -ENODEV;
 	}
 
+	test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
+
+	if (!test_virt_addr)
+		return -ENOMEM;
+
 	return bus_for_each_dev(&platform_bus_type, NULL, NULL,
 				snarf_iommu_devices);
 }
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index e81bb48..6c3f8a2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1282,6 +1282,15 @@
 	return domain->ops->iova_to_phys_hard(domain, iova);
 }
 
+uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+				    dma_addr_t iova)
+{
+	if (unlikely(domain->ops->iova_to_pte == NULL))
+		return 0;
+
+	return domain->ops->iova_to_pte(domain, iova);
+}
+
 bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
 {
 	if (unlikely(domain->ops->is_iova_coherent == NULL))
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 41515bb..ee50a61 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -299,3 +299,5 @@
 config STM32_EXTI
 	bool
 	select IRQ_DOMAIN
+
+source "drivers/irqchip/qcom/Kconfig"
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 987bd89..450059c 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -75,3 +75,4 @@
 obj-$(CONFIG_EZNPS_GIC)			+= irq-eznps.o
 obj-$(CONFIG_ARCH_ASPEED)		+= irq-aspeed-vic.o
 obj-$(CONFIG_STM32_EXTI) 		+= irq-stm32-exti.o
+obj-$(CONFIG_QTI_PDC)			+= qcom/
diff --git a/drivers/irqchip/qcom/Kconfig b/drivers/irqchip/qcom/Kconfig
new file mode 100644
index 0000000..e4a7a88
--- /dev/null
+++ b/drivers/irqchip/qcom/Kconfig
@@ -0,0 +1,15 @@
+config QTI_PDC
+        bool "QTI PDC"
+        depends on ARCH_QCOM
+	select IRQ_DOMAIN
+	select IRQ_DOMAIN_HIERARCHY
+        help
+          QTI Power Domain Controller driver to manage and configure wakeup
+          IRQs
+
+config QTI_PDC_SDM845
+        bool "QTI PDC SDM845"
+        select QTI_PDC
+        default y if ARCH_SDM845
+        help
+          QTI Power Domain Controller for SDM845
diff --git a/drivers/irqchip/qcom/Makefile b/drivers/irqchip/qcom/Makefile
new file mode 100644
index 0000000..1b7856d
--- /dev/null
+++ b/drivers/irqchip/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QTI_PDC)			+= pdc.o
+obj-$(CONFIG_QTI_PDC_SDM845)		+= pdc-sdm845.o
diff --git a/drivers/irqchip/qcom/pdc-sdm845.c b/drivers/irqchip/qcom/pdc-sdm845.c
new file mode 100644
index 0000000..178cf1f0
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc-sdm845.c
@@ -0,0 +1,139 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/irqchip.h>
+#include "pdc.h"
+
+static struct pdc_pin sdm845_data[] = {
+	{0, 512}, /* rpmh_wake */
+	{1, 513}, /* ee0_apps_hlos_spmi_periph_irq */
+	{2, 514}, /* ee1_apps_trustzone_spmi_periph_irq */
+	{3, 515}, /* secure_wdog_expired */
+	{4, 516}, /* secure_wdog_bark_irq */
+	{5, 517}, /* aop_wdog_expired_irq */
+	{6, 518}, /* qmp_usb3_lfps_rxterm_irq */
+	{7, 519}, /* qmp_usb3_lfps_rxterm_irq */
+	{8, 520}, /* eud_p0_dmse_int_mx */
+	{9, 521}, /* eud_p0_dpse_int_mx */
+	{10, 522}, /* eud_p1_dmse_int_mx */
+	{11, 523}, /* eud_p1_dpse_int_mx */
+	{12, 524}, /* eud_int_mx[1] */
+	{13, 525}, /* ssc_xpu_irq_summary */
+	{14, 526}, /* wd_bite_apps */
+	{15, 527}, /* ssc_vmidmt_irq_summary */
+	{16, 528}, /* q6ss_irq_out_apps_ipc[4] */
+	{17, 529}, /* not-connected */
+	{18, 530}, /* aoss_pmic_arb_mpu_xpu_summary_irq */
+	{19, 531}, /* apps_pdc_irq_in_19 */
+	{20, 532}, /* apps_pdc_irq_in_20 */
+	{21, 533}, /* apps_pdc_irq_in_21 */
+	{22, 534}, /* pdc_apps_epcb_timeout_summary_irq */
+	{23, 535}, /* spmi_protocol_irq */
+	{24, 536}, /* tsense0_tsense_max_min_int */
+	{25, 537}, /* tsense1_tsense_max_min_int */
+	{26, 538}, /* tsense0_upper_lower_intr */
+	{27, 539}, /* tsense1_upper_lower_intr */
+	{28, 540}, /* tsense0_critical_intr */
+	{29, 541}, /* tsense1_critical_intr */
+	{30, 542}, /* core_bi_px_gpio_1 */
+	{31, 543}, /* core_bi_px_gpio_3 */
+	{32, 544}, /* core_bi_px_gpio_5 */
+	{33, 545}, /* core_bi_px_gpio_10 */
+	{34, 546}, /* core_bi_px_gpio_11 */
+	{35, 547}, /* core_bi_px_gpio_20 */
+	{36, 548}, /* core_bi_px_gpio_22 */
+	{37, 549}, /* core_bi_px_gpio_24 */
+	{38, 550}, /* core_bi_px_gpio_26 */
+	{39, 551}, /* core_bi_px_gpio_30 */
+	{41, 553}, /* core_bi_px_gpio_32 */
+	{42, 554}, /* core_bi_px_gpio_34 */
+	{43, 555}, /* core_bi_px_gpio_36 */
+	{44, 556}, /* core_bi_px_gpio_37 */
+	{45, 557}, /* core_bi_px_gpio_38 */
+	{46, 558}, /* core_bi_px_gpio_39 */
+	{47, 559}, /* core_bi_px_gpio_40 */
+	{49, 561}, /* core_bi_px_gpio_43 */
+	{50, 562}, /* core_bi_px_gpio_44 */
+	{51, 563}, /* core_bi_px_gpio_46 */
+	{52, 564}, /* core_bi_px_gpio_48 */
+	{54, 566}, /* core_bi_px_gpio_52 */
+	{55, 567}, /* core_bi_px_gpio_53 */
+	{56, 568}, /* core_bi_px_gpio_54 */
+	{57, 569}, /* core_bi_px_gpio_56 */
+	{58, 570}, /* core_bi_px_gpio_57 */
+	{59, 571}, /* core_bi_px_gpio_58 */
+	{60, 572}, /* core_bi_px_gpio_59 */
+	{61, 573}, /* core_bi_px_gpio_60 */
+	{62, 574}, /* core_bi_px_gpio_61 */
+	{63, 575}, /* core_bi_px_gpio_62 */
+	{64, 576}, /* core_bi_px_gpio_63 */
+	{65, 577}, /* core_bi_px_gpio_64 */
+	{66, 578}, /* core_bi_px_gpio_66 */
+	{67, 579}, /* core_bi_px_gpio_68 */
+	{68, 580}, /* core_bi_px_gpio_71 */
+	{69, 581}, /* core_bi_px_gpio_73 */
+	{70, 582}, /* core_bi_px_gpio_77 */
+	{71, 583}, /* core_bi_px_gpio_78 */
+	{72, 584}, /* core_bi_px_gpio_79 */
+	{73, 585}, /* core_bi_px_gpio_80 */
+	{74, 586}, /* core_bi_px_gpio_84 */
+	{75, 587}, /* core_bi_px_gpio_85 */
+	{76, 588}, /* core_bi_px_gpio_86 */
+	{77, 589}, /* core_bi_px_gpio_88 */
+	{79, 591}, /* core_bi_px_gpio_91 */
+	{80, 592}, /* core_bi_px_gpio_92 */
+	{81, 593}, /* core_bi_px_gpio_95 */
+	{82, 594}, /* core_bi_px_gpio_96 */
+	{83, 595}, /* core_bi_px_gpio_97 */
+	{84, 596}, /* core_bi_px_gpio_101 */
+	{85, 597}, /* core_bi_px_gpio_103 */
+	{86, 598}, /* core_bi_px_gpio_104 */
+	{87, 599}, /* core_bi_px_to_mpm[6] */
+	{88, 600}, /* core_bi_px_to_mpm[0] */
+	{89, 601}, /* core_bi_px_to_mpm[1] */
+	{90, 602}, /* core_bi_px_gpio_115 */
+	{91, 603}, /* core_bi_px_gpio_116 */
+	{92, 604}, /* core_bi_px_gpio_117 */
+	{93, 605}, /* core_bi_px_gpio_118 */
+	{94, 641}, /* core_bi_px_gpio_119 */
+	{95, 642}, /* core_bi_px_gpio_120 */
+	{96, 643}, /* core_bi_px_gpio_121 */
+	{97, 644}, /* core_bi_px_gpio_122 */
+	{98, 645}, /* core_bi_px_gpio_123 */
+	{99, 646}, /* core_bi_px_gpio_124 */
+	{100, 647}, /* core_bi_px_gpio_125 */
+	{101, 648}, /* core_bi_px_to_mpm[5] */
+	{102, 649}, /* core_bi_px_gpio_127 */
+	{103, 650}, /* core_bi_px_gpio_128 */
+	{104, 651}, /* core_bi_px_gpio_129 */
+	{105, 652}, /* core_bi_px_gpio_130 */
+	{106, 653}, /* core_bi_px_gpio_132 */
+	{107, 654}, /* core_bi_px_gpio_133 */
+	{108, 655}, /* core_bi_px_gpio_145 */
+	{119, 666}, /* core_bi_px_to_mpm[2] */
+	{120, 667}, /* core_bi_px_to_mpm[3] */
+	{121, 668}, /* core_bi_px_to_mpm[4] */
+	{122, 669}, /* core_bi_px_gpio_41 */
+	{123, 670}, /* core_bi_px_gpio_89 */
+	{124, 671}, /* core_bi_px_gpio_31 */
+	{125, 672}, /* core_bi_px_gpio_49 */
+	{-1}
+};
+
+static int __init qcom_pdc_gic_init(struct device_node *node,
+		struct device_node *parent)
+{
+	return qcom_pdc_init(node, parent, sdm845_data);
+}
+
+IRQCHIP_DECLARE(pdc_sdm845, "qcom,pdc-sdm845", qcom_pdc_gic_init);
diff --git a/drivers/irqchip/qcom/pdc.c b/drivers/irqchip/qcom/pdc.c
new file mode 100644
index 0000000..923552f
--- /dev/null
+++ b/drivers/irqchip/qcom/pdc.c
@@ -0,0 +1,299 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "pdc.h"
+#define CREATE_TRACE_POINTS
+#include "trace/events/pdc.h"
+
+#define MAX_IRQS 126
+#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
+#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
+
+enum pdc_register_offsets {
+	IRQ_ENABLE_BANK = 0x10,
+	IRQ_i_CFG = 0x110,
+};
+
+static DEFINE_SPINLOCK(pdc_lock);
+static void __iomem *pdc_base;
+
+static int get_pdc_pin(irq_hw_number_t hwirq, void *data)
+{
+	int i;
+	struct pdc_pin *pdc_data = (struct pdc_pin *) data;
+
+	for (i = 0; pdc_data[i].pin >= 0; i++) {
+		if (pdc_data[i].hwirq == hwirq)
+			return pdc_data[i].pin;
+	}
+
+	return -EINVAL;
+}
+
+static inline int pdc_enable_intr(struct irq_data *d, bool on)
+{
+	int pin_out = get_pdc_pin(d->hwirq, d->chip_data);
+	unsigned int index, mask;
+	u32 enable, r_enable;
+	unsigned long flags;
+
+	if (pin_out < 0)
+		return 0;
+
+	index = pin_out / 32;
+	mask = pin_out % 32;
+	spin_lock_irqsave(&pdc_lock, flags);
+
+	enable = readl_relaxed(pdc_base + IRQ_ENABLE_BANK + (index *
+					sizeof(uint32_t)));
+	if (on)
+		enable = ENABLE_INTR(enable, mask);
+	else
+		enable = CLEAR_INTR(enable, mask);
+
+	writel_relaxed(enable, pdc_base + IRQ_ENABLE_BANK + (index *
+						sizeof(uint32_t)));
+
+	do {
+		r_enable = readl_relaxed(pdc_base + IRQ_ENABLE_BANK +
+					(index * sizeof(uint32_t)));
+		if (r_enable == enable)
+			break;
+		udelay(5);
+	} while (1);
+
+	spin_unlock_irqrestore(&pdc_lock, flags);
+
+	trace_irq_pin_config("enable", (u32)pin_out, (u32)d->hwirq,
+			0, on);
+
+	return 0;
+}
+
+static void qcom_pdc_gic_mask(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_mask_parent(d);
+}
+
+static void qcom_pdc_gic_unmask(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_unmask_parent(d);
+}
+
+static void qcom_pdc_gic_enable(struct irq_data *d)
+{
+	pdc_enable_intr(d, true);
+	irq_chip_enable_parent(d);
+}
+
+static void qcom_pdc_gic_disable(struct irq_data *d)
+{
+	pdc_enable_intr(d, false);
+	irq_chip_disable_parent(d);
+}
+
+/*
+ * GIC does not handle falling edge or active low. To allow falling edge and
+ * active low interrupts to be handled at GIC, PDC has an inverter that inverts
+ * falling edge into a rising edge and active low into an active high.
+ * For the inverter to work, the polarity bit in the IRQ_CONFIG register has to
+ * set as per the table below.
+ * (polarity, falling edge, rising edge )  ORIG          POL CONV     POLARITY
+ * 3'b0 00  Level sensitive active low    (~~~|_____)   (___|~~~~~)   LOW
+ * 3'b0 01  Rising edge sensitive         (___|~~|__)   (~~~|__|~~)   NOT USED
+ * 3'b0 10  Falling edge sensitive        (~~~|__|~~)   (___|~~|__)   LOW
+ * 3'b0 11  Dual Edge sensitive                                       NOT USED
+ * 3'b1 00  Level senstive active High    (___|~~~~~)   (___|~~~~~)   HIGH
+ * 3'b1 01  Falling Edge sensitive        (~~~|__|~~)   (~~~|__|~~)   NOT USED
+ * 3'b1 10  Rising edge sensitive         (___|~~|__)   (___|~~|__)   HIGH
+ * 3'b1 11  Dual Edge sensitive                                       HIGH
+ */
+enum pdc_irq_config_bits {
+	POLARITY_LOW = 0, //0 00
+	FALLING_EDGE = 2, //0 10
+	POLARITY_HIGH = 4,//1 00
+	RISING_EDGE = 6,  //1 10
+	DUAL_EDGE = 7,    //1 11
+};
+
+static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
+{
+	int pin_out = get_pdc_pin(d->hwirq, d->chip_data);
+	u32 pdc_type = 0, config;
+
+	if (pin_out < 0)
+		goto fwd_to_parent;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		pdc_type = RISING_EDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		pdc_type = FALLING_EDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		pdc_type = DUAL_EDGE;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		pdc_type = POLARITY_HIGH;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		pdc_type = POLARITY_LOW;
+		break;
+	default:
+		pdc_type = POLARITY_HIGH;
+		break;
+	}
+	writel_relaxed(pdc_type, pdc_base + IRQ_i_CFG +
+			(pin_out * sizeof(uint32_t)));
+
+	do {
+		config = readl_relaxed(pdc_base + IRQ_i_CFG +
+				(pin_out * sizeof(uint32_t)));
+		if (config == pdc_type)
+			break;
+		udelay(5);
+	} while (1);
+
+	trace_irq_pin_config("type_config", (u32)pin_out, (u32)d->hwirq,
+			pdc_type, 0);
+
+	/*
+	 * If type is edge triggered, forward that as Rising edge as PDC
+	 * takes care of converting falling edge to rising edge signal
+	 */
+	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+		type = IRQ_TYPE_EDGE_RISING;
+
+	/*
+	 * If type is level, then forward that as level high as PDC
+	 * takes care of converting falling edge to rising edge signal
+	 */
+	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+		type = IRQ_TYPE_LEVEL_HIGH;
+
+fwd_to_parent:
+
+	return irq_chip_set_type_parent(d, type);
+}
+
+static struct irq_chip qcom_pdc_gic_chip = {
+	.name			= "PDC-GIC",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= qcom_pdc_gic_mask,
+	.irq_enable		= qcom_pdc_gic_enable,
+	.irq_unmask		= qcom_pdc_gic_unmask,
+	.irq_disable		= qcom_pdc_gic_disable,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_set_type		= qcom_pdc_gic_set_type,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND |
+					IRQCHIP_SET_TYPE_MASKED |
+					IRQCHIP_SKIP_SET_WAKE,
+	.irq_set_vcpu_affinity	= irq_chip_set_vcpu_affinity_parent,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int qcom_pdc_translate(struct irq_domain *d,
+	struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type)
+{
+	return d->parent->ops->translate(d->parent, fwspec, hwirq, type);
+}
+
+static int qcom_pdc_alloc(struct irq_domain *domain,
+	unsigned int virq, unsigned int nr_irqs, void *data)
+{
+	struct irq_fwspec *fwspec = data;
+	struct irq_fwspec parent_fwspec;
+	irq_hw_number_t hwirq;
+	int i;
+	unsigned int type;
+	int ret;
+
+	ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+	if (ret)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++)
+		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+			&qcom_pdc_gic_chip, domain->host_data);
+
+	parent_fwspec = *fwspec;
+	parent_fwspec.fwnode = domain->parent->fwnode;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+					    &parent_fwspec);
+}
+
+static const struct irq_domain_ops qcom_pdc_ops = {
+	.translate	= qcom_pdc_translate,
+	.alloc		= qcom_pdc_alloc,
+	.free		= irq_domain_free_irqs_common,
+};
+
+int qcom_pdc_init(struct device_node *node,
+		struct device_node *parent, void *data)
+{
+	struct irq_domain *parent_domain;
+	int ret;
+	struct irq_domain *pdc_domain;
+
+	pdc_base = of_iomap(node, 0);
+	if (!pdc_base) {
+		pr_err("%s(): unable to map PDC registers\n", node->full_name);
+		return -ENXIO;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("unable to obtain PDC parent domain\n");
+		ret = -ENXIO;
+		goto failure;
+	}
+
+	pdc_domain = irq_domain_add_hierarchy(parent_domain, 0, MAX_IRQS,
+			node, &qcom_pdc_ops, data);
+	if (!pdc_domain) {
+		pr_err("GIC domain add failed\n");
+		ret = -ENOMEM;
+		goto failure;
+	}
+
+	pdc_domain->name = "qcom,pdc";
+
+	return 0;
+
+failure:
+	iounmap(pdc_base);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_pdc_init);
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/drivers/irqchip/qcom/pdc.h
similarity index 69%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to drivers/irqchip/qcom/pdc.h
index 4b3fa93..7c4d89c 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/drivers/irqchip/qcom/pdc.h
@@ -8,16 +8,16 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
+ *
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
+#include <linux/irq.h>
+#include <linux/device.h>
+
+struct pdc_pin {
+	int pin;
+	irq_hw_number_t hwirq;
 };
+
+int qcom_pdc_init(struct device_node *node,
+		struct device_node *parent, void *data);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index fdc4b30..2678a00 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -63,11 +63,13 @@
 #define	FLASH_LED_REG_MITIGATION_SEL(base)	(base + 0x6E)
 #define	FLASH_LED_REG_MITIGATION_SW(base)	(base + 0x6F)
 #define	FLASH_LED_REG_LMH_LEVEL(base)		(base + 0x70)
+#define	FLASH_LED_REG_MULTI_STROBE_CTRL(base)	(base + 0x71)
+#define	FLASH_LED_REG_LPG_INPUT_CTRL(base)	(base + 0x72)
 #define	FLASH_LED_REG_CURRENT_DERATE_EN(base)	(base + 0x76)
 
 #define	FLASH_LED_HDRM_VOL_MASK			GENMASK(7, 4)
 #define	FLASH_LED_CURRENT_MASK			GENMASK(6, 0)
-#define	FLASH_LED_ENABLE_MASK			GENMASK(2, 0)
+#define	FLASH_LED_STROBE_MASK			GENMASK(1, 0)
 #define	FLASH_HW_STROBE_MASK			GENMASK(2, 0)
 #define	FLASH_LED_ISC_WARMUP_DELAY_MASK		GENMASK(1, 0)
 #define	FLASH_LED_CURRENT_DERATE_EN_MASK	GENMASK(2, 0)
@@ -91,6 +93,9 @@
 #define	THERMAL_DERATE_SLOW_SHIFT		4
 #define	THERMAL_DERATE_SLOW_MASK		GENMASK(6, 4)
 #define	THERMAL_DERATE_FAST_MASK		GENMASK(2, 0)
+#define	LED1N2_FLASH_ONCE_ONLY_BIT		BIT(0)
+#define	LED3_FLASH_ONCE_ONLY_BIT		BIT(1)
+#define	LPG_INPUT_SEL_BIT			BIT(0)
 
 #define	VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us)	(val_us / 8)
 #define	VPH_DROOP_HYST_MV_TO_VAL(val_mv)	(val_mv / 25)
@@ -127,11 +132,11 @@
 #define	FLASH_LED_LMH_MITIGATION_DISABLE	0
 #define	FLASH_LED_CHGR_MITIGATION_ENABLE	BIT(4)
 #define	FLASH_LED_CHGR_MITIGATION_DISABLE	0
-#define	FLASH_LED_MITIGATION_SEL_DEFAULT	2
+#define	FLASH_LED_LMH_MITIGATION_SEL_DEFAULT	2
 #define	FLASH_LED_MITIGATION_SEL_MAX		2
 #define	FLASH_LED_CHGR_MITIGATION_SEL_SHIFT	4
-#define	FLASH_LED_MITIGATION_THRSH_DEFAULT	0xA
-#define	FLASH_LED_MITIGATION_THRSH_MAX		0x1F
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT	0xA
+#define	FLASH_LED_CHGR_MITIGATION_THRSH_MAX	0x1F
 #define	FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV	3700000
 #define	FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM	400000
 #define	FLASH_LED_IRES_BASE			3
@@ -152,12 +157,17 @@
 #define	FLASH_LED_MOD_ENABLE			BIT(7)
 #define	FLASH_LED_DISABLE			0x00
 #define	FLASH_LED_SAFETY_TMR_DISABLED		0x13
-#define	FLASH_LED_MIN_CURRENT_MA		25
 #define	FLASH_LED_MAX_TOTAL_CURRENT_MA		3750
 
 /* notifier call chain for flash-led irqs */
 static ATOMIC_NOTIFIER_HEAD(irq_notifier_list);
 
+enum flash_charger_mitigation {
+	FLASH_DISABLE_CHARGER_MITIGATION,
+	FLASH_HW_CHARGER_MITIGATION_BY_ILED_THRSHLD,
+	FLASH_SW_CHARGER_MITIGATION,
+};
+
 enum flash_led_type {
 	FLASH_LED_TYPE_FLASH,
 	FLASH_LED_TYPE_TORCH,
@@ -169,6 +179,12 @@
 	LED3,
 };
 
+enum strobe_type {
+	SW_STROBE = 0,
+	HW_STROBE,
+	LPG_STROBE,
+};
+
 /*
  * Configurations for each individual LED
  */
@@ -182,13 +198,15 @@
 	int				ires_ua;
 	int				max_current;
 	int				current_ma;
+	int				prev_current_ma;
 	u8				duration;
 	u8				id;
 	u8				type;
 	u8				ires;
 	u8				hdrm_val;
 	u8				current_reg_val;
-	u8				trigger;
+	u8				strobe_ctrl;
+	u8				strobe_sel;
 	bool				led_on;
 };
 
@@ -226,6 +244,7 @@
 	int			thermal_thrsh1;
 	int			thermal_thrsh2;
 	int			thermal_thrsh3;
+	int			hw_strobe_option;
 	u32			led1n2_iclamp_low_ma;
 	u32			led1n2_iclamp_mid_ma;
 	u32			led3_iclamp_low_ma;
@@ -240,7 +259,6 @@
 	u8			chgr_mitigation_sel;
 	u8			lmh_level;
 	u8			iled_thrsh_val;
-	u8			hw_strobe_option;
 	bool			hdrm_auto_mode_en;
 	bool			thermal_derate_en;
 	bool			otst_ramp_bkup_en;
@@ -261,6 +279,7 @@
 	int				num_fnodes;
 	int				num_snodes;
 	int				enable;
+	int				total_current_ma;
 	u16				base;
 	bool				trigger_lmh;
 	bool				trigger_chgr;
@@ -487,10 +506,12 @@
 	if (rc < 0)
 		return rc;
 
+	val = led->pdata->chgr_mitigation_sel
+				<< FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
 	rc = qpnp_flash_led_masked_write(led,
 			FLASH_LED_REG_MITIGATION_SEL(led->base),
 			FLASH_LED_CHGR_MITIGATION_SEL_MASK,
-			led->pdata->chgr_mitigation_sel);
+			val);
 	if (rc < 0)
 		return rc;
 
@@ -548,6 +569,28 @@
 			return rc;
 	}
 
+	if (led->pdata->hw_strobe_option > 0) {
+		rc = qpnp_flash_led_masked_write(led,
+				FLASH_LED_REG_STROBE_CFG(led->base),
+				FLASH_LED_STROBE_MASK,
+				led->pdata->hw_strobe_option);
+		if (rc < 0)
+			return rc;
+	}
+
+	if (led->fnode[LED3].strobe_sel == LPG_STROBE) {
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_MULTI_STROBE_CTRL(led->base),
+			LED3_FLASH_ONCE_ONLY_BIT, 0);
+		if (rc < 0)
+			return rc;
+
+		rc = qpnp_flash_led_masked_write(led,
+			FLASH_LED_REG_LPG_INPUT_CTRL(led->base),
+			LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT);
+		if (rc < 0)
+			return rc;
+	}
 	return 0;
 }
 
@@ -877,14 +920,29 @@
 	return max_avail_current;
 }
 
+static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode)
+{
+	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+
+	if (fnode->current_ma)
+		led->total_current_ma += fnode->current_ma
+						- fnode->prev_current_ma;
+	else
+		led->total_current_ma -= fnode->prev_current_ma;
+
+	fnode->prev_current_ma = fnode->current_ma;
+}
+
 static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value)
 {
 	int prgm_current_ma = value;
+	int min_ma = fnode->ires_ua / 1000;
+	struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
 
 	if (value <= 0)
 		prgm_current_ma = 0;
-	else if (value < FLASH_LED_MIN_CURRENT_MA)
-		prgm_current_ma = FLASH_LED_MIN_CURRENT_MA;
+	else if (value < min_ma)
+		prgm_current_ma = min_ma;
 
 	prgm_current_ma = min(prgm_current_ma, fnode->max_current);
 	fnode->current_ma = prgm_current_ma;
@@ -892,6 +950,13 @@
 	fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma,
 					fnode->ires_ua);
 	fnode->led_on = prgm_current_ma != 0;
+
+	if (led->pdata->chgr_mitigation_sel == FLASH_SW_CHARGER_MITIGATION) {
+		qpnp_flash_led_aggregate_max_current(fnode);
+		led->trigger_chgr = false;
+		if (led->total_current_ma >= 1000)
+			led->trigger_chgr = true;
+	}
 }
 
 static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
@@ -950,7 +1015,7 @@
 
 		led->fnode[i].led_on = false;
 
-		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
 			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
 					led->pdata->hw_strobe_option, false);
 			if (rc < 0) {
@@ -1004,13 +1069,6 @@
 	if (rc < 0)
 		return rc;
 
-	rc = qpnp_flash_led_masked_write(led,
-					FLASH_LED_REG_STROBE_CFG(led->base),
-					FLASH_LED_ENABLE_MASK,
-					led->pdata->hw_strobe_option);
-	if (rc < 0)
-		return rc;
-
 	val = 0;
 	for (i = 0; i < led->num_fnodes; i++) {
 		if (!led->fnode[i].led_on ||
@@ -1018,13 +1076,13 @@
 			continue;
 
 		addr_offset = led->fnode[i].id;
-		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT)
-			mask = FLASH_HW_STROBE_MASK;
-		else
+		if (led->fnode[i].strobe_sel == SW_STROBE)
 			mask = FLASH_LED_HW_SW_STROBE_SEL_BIT;
+		else
+			mask = FLASH_HW_STROBE_MASK;
 		rc = qpnp_flash_led_masked_write(led,
 			FLASH_LED_REG_STROBE_CTRL(led->base + addr_offset),
-			mask, led->fnode[i].trigger);
+			mask, led->fnode[i].strobe_ctrl);
 		if (rc < 0)
 			return rc;
 
@@ -1042,7 +1100,7 @@
 
 		val |= FLASH_LED_ENABLE << led->fnode[i].id;
 
-		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+		if (led->fnode[i].strobe_sel == HW_STROBE) {
 			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
 					led->pdata->hw_strobe_option, true);
 			if (rc < 0) {
@@ -1159,10 +1217,6 @@
 		*max_current = rc;
 	}
 
-	led->trigger_chgr = false;
-	if (options & PRE_FLASH)
-		led->trigger_chgr = true;
-
 	return 0;
 }
 
@@ -1336,9 +1390,9 @@
 			struct flash_node_data *fnode, struct device_node *node)
 {
 	const char *temp_string;
-	int rc;
+	int rc, min_ma;
 	u32 val;
-	bool strobe_sel = 0, edge_trigger = 0, active_high = 0;
+	bool hw_strobe = 0, edge_trigger = 0, active_high = 0;
 
 	fnode->pdev = led->pdev;
 	fnode->cdev.brightness_set = qpnp_flash_led_brightness_set;
@@ -1392,10 +1446,11 @@
 		return rc;
 	}
 
+	min_ma = fnode->ires_ua / 1000;
 	rc = of_property_read_u32(node, "qcom,max-current", &val);
 	if (!rc) {
-		if (val < FLASH_LED_MIN_CURRENT_MA)
-			val = FLASH_LED_MIN_CURRENT_MA;
+		if (val < min_ma)
+			val = min_ma;
 		fnode->max_current = val;
 		fnode->cdev.max_brightness = val;
 	} else {
@@ -1405,11 +1460,10 @@
 
 	rc = of_property_read_u32(node, "qcom,current-ma", &val);
 	if (!rc) {
-		if (val < FLASH_LED_MIN_CURRENT_MA ||
-				val > fnode->max_current)
+		if (val < min_ma || val > fnode->max_current)
 			pr_warn("Invalid operational current specified, capping it\n");
-		if (val < FLASH_LED_MIN_CURRENT_MA)
-			val = FLASH_LED_MIN_CURRENT_MA;
+		if (val < min_ma)
+			val = min_ma;
 		if (val > fnode->max_current)
 			val = fnode->max_current;
 		fnode->current_ma = val;
@@ -1457,14 +1511,52 @@
 		return rc;
 	}
 
-	strobe_sel = of_property_read_bool(node, "qcom,hw-strobe-sel");
-	if (strobe_sel) {
+	fnode->strobe_sel = SW_STROBE;
+	rc = of_property_read_u32(node, "qcom,strobe-sel", &val);
+	if (rc < 0) {
+		if (rc != -EINVAL) {
+			pr_err("Unable to read qcom,strobe-sel property\n");
+			return rc;
+		}
+	} else {
+		if (val < SW_STROBE || val > LPG_STROBE) {
+			pr_err("Incorrect strobe selection specified %d\n",
+				val);
+			return -EINVAL;
+		}
+		fnode->strobe_sel = (u8)val;
+	}
+
+	/*
+	 * LPG strobe is allowed only for LED3 and HW strobe option should be
+	 * option 2 or 3.
+	 */
+	if (fnode->strobe_sel == LPG_STROBE) {
+		if (led->pdata->hw_strobe_option ==
+				FLASH_LED_HW_STROBE_OPTION_1) {
+			pr_err("Incorrect strobe option for LPG strobe\n");
+			return -EINVAL;
+		}
+		if (fnode->id != LED3) {
+			pr_err("Incorrect LED chosen for LPG strobe\n");
+			return -EINVAL;
+		}
+	}
+
+	if (fnode->strobe_sel == HW_STROBE) {
 		edge_trigger = of_property_read_bool(node,
 						"qcom,hw-strobe-edge-trigger");
 		active_high = !of_property_read_bool(node,
 						"qcom,hw-strobe-active-low");
+		hw_strobe = 1;
+	} else if (fnode->strobe_sel == LPG_STROBE) {
+		/* LPG strobe requires level trigger and active high */
+		edge_trigger = 0;
+		active_high =  1;
+		hw_strobe = 1;
 	}
-	fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high;
+	fnode->strobe_ctrl = (hw_strobe << 2) | (edge_trigger << 1) |
+				active_high;
 
 	rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
 	if (rc < 0) {
@@ -1480,7 +1572,7 @@
 		fnode->strobe_pinctrl = NULL;
 	}
 
-	if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
+	if (fnode->strobe_sel == HW_STROBE) {
 		if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) {
 			fnode->hw_strobe_gpio = of_get_named_gpio(node,
 						"qcom,hw-strobe-gpio", 0);
@@ -1860,9 +1952,10 @@
 
 	led->pdata->vph_droop_hysteresis <<= FLASH_LED_VPH_DROOP_HYST_SHIFT;
 
+	led->pdata->hw_strobe_option = -EINVAL;
 	rc = of_property_read_u32(node, "qcom,hw-strobe-option", &val);
 	if (!rc) {
-		led->pdata->hw_strobe_option = (u8)val;
+		led->pdata->hw_strobe_option = val;
 	} else if (rc != -EINVAL) {
 		pr_err("Unable to parse hw strobe option, rc=%d\n", rc);
 		return rc;
@@ -1957,7 +2050,7 @@
 		return rc;
 	}
 
-	led->pdata->lmh_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
+	led->pdata->lmh_mitigation_sel = FLASH_LED_LMH_MITIGATION_SEL_DEFAULT;
 	rc = of_property_read_u32(node, "qcom,lmh-mitigation-sel", &val);
 	if (!rc) {
 		led->pdata->lmh_mitigation_sel = val;
@@ -1971,7 +2064,7 @@
 		return -EINVAL;
 	}
 
-	led->pdata->chgr_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
+	led->pdata->chgr_mitigation_sel = FLASH_SW_CHARGER_MITIGATION;
 	rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
 	if (!rc) {
 		led->pdata->chgr_mitigation_sel = val;
@@ -1985,9 +2078,7 @@
 		return -EINVAL;
 	}
 
-	led->pdata->chgr_mitigation_sel <<= FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
-
-	led->pdata->iled_thrsh_val = FLASH_LED_MITIGATION_THRSH_DEFAULT;
+	led->pdata->iled_thrsh_val = FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT;
 	rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
 	if (!rc) {
 		led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
@@ -1996,7 +2087,7 @@
 		return rc;
 	}
 
-	if (led->pdata->iled_thrsh_val > FLASH_LED_MITIGATION_THRSH_MAX) {
+	if (led->pdata->iled_thrsh_val > FLASH_LED_CHGR_MITIGATION_THRSH_MAX) {
 		pr_err("Invalid iled_thrsh_val specified\n");
 		return -EINVAL;
 	}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6c7f6c4..d2cb1e8 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -60,6 +60,7 @@
 	void *data;
 	int err = -EBUSY;
 
+again:
 	spin_lock_irqsave(&chan->lock, flags);
 
 	if (!chan->msg_count || chan->active_req)
@@ -85,6 +86,16 @@
 exit:
 	spin_unlock_irqrestore(&chan->lock, flags);
 
+	/*
+	 * If the controller returns -EAGAIN, then it means, our spinlock
+	 * here is preventing the controller from receiving its interrupt,
+	 * that would help clear the controller channels that are currently
+	 * blocked waiting on the interrupt response.
+	 * Unlock and retry again.
+	 */
+	if (err == -EAGAIN)
+		goto again;
+
 	if (!err && (chan->txdone_method & TXDONE_BY_POLL))
 		/* kick start the timer immediately to avoid delays */
 		hrtimer_start(&chan->mbox->poll_hrt, ktime_set(0, 0),
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index b328a2a..1f649d6 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -28,7 +28,6 @@
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
-#include <linux/workqueue.h>
 
 #include <asm-generic/io.h>
 
@@ -95,10 +94,10 @@
 	struct mbox_chan *chan;
 	struct tcs_mbox_msg *msg;
 	u32 m; /* m-th TCS */
-	struct tasklet_struct tasklet;
 	int err;
 	int idx;
 	bool in_use;
+	struct list_head list;
 };
 
 struct tcs_response_pool {
@@ -122,16 +121,18 @@
 
 /* One per MBOX controller */
 struct tcs_drv {
+	struct mbox_controller mbox;
 	const char *name;
-	void *base; /* start address of the RSC's registers */
-	void *reg_base; /* start address for DRV specific register */
+	void __iomem *base; /* start address of the RSC's registers */
+	void __iomem *reg_base; /* start address for DRV specific register */
 	int drv_id;
 	struct platform_device *pdev;
-	struct mbox_controller mbox;
 	struct tcs_mbox tcs[TCS_TYPE_NR];
 	int num_assigned;
 	int num_tcs;
-	struct workqueue_struct *wq;
+	struct tasklet_struct tasklet;
+	struct list_head response_pending;
+	spinlock_t drv_lock;
 	struct tcs_response_pool *resp_pool;
 	atomic_t tcs_in_use[MAX_POOL_SIZE];
 	/* Debug info */
@@ -141,8 +142,6 @@
 	atomic_t tcs_irq_count[MAX_POOL_SIZE];
 };
 
-static void tcs_notify_tx_done(unsigned long data);
-
 static int tcs_response_pool_init(struct tcs_drv *drv)
 {
 	struct tcs_response_pool *pool;
@@ -153,11 +152,10 @@
 		return -ENOMEM;
 
 	for (i = 0; i < MAX_POOL_SIZE; i++) {
-		tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
-						(unsigned long) &pool->resp[i]);
 		pool->resp[i].drv = drv;
 		pool->resp[i].idx = i;
 		pool->resp[i].m = TCS_M_INIT;
+		INIT_LIST_HEAD(&pool->resp[i].list);
 	}
 
 	spin_lock_init(&pool->lock);
@@ -188,6 +186,9 @@
 	}
 	spin_unlock_irqrestore(&pool->lock, flags);
 
+	if (pos == MAX_POOL_SIZE)
+		pr_err("response pool is full\n");
+
 	return resp;
 }
 
@@ -240,11 +241,11 @@
 		return;
 
 	msg = resp->msg;
-	pr_info("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
+	pr_debug("Response object idx=%d:\n\tfor-tcs=%d\tin-use=%d\n",
 			resp->idx, resp->m, resp->in_use);
-	pr_info("Msg: state=%d\n", msg->state);
+	pr_debug("Msg: state=%d\n", msg->state);
 	for (i = 0; i < msg->num_payload; i++)
-		pr_info("addr=0x%x data=0x%x complete=0x%x\n",
+		pr_debug("addr=0x%x data=0x%x complete=0x%x\n",
 				msg->payload[i].addr,
 				msg->payload[i].data,
 				msg->payload[i].complete);
@@ -364,7 +365,15 @@
 
 static inline void send_tcs_response(struct tcs_response *resp)
 {
-	tasklet_schedule(&resp->tasklet);
+	struct tcs_drv *drv = resp->drv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drv->drv_lock, flags);
+	INIT_LIST_HEAD(&resp->list);
+	list_add_tail(&resp->list, &drv->response_pending);
+	spin_unlock_irqrestore(&drv->drv_lock, flags);
+
+	tasklet_schedule(&drv->tasklet);
 }
 
 static inline void enable_tcs_irq(struct tcs_drv *drv, int m, bool enable)
@@ -455,12 +464,12 @@
 		/* Clear the TCS IRQ status */
 		write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
 
+		/* Notify the client that this request is completed. */
+		atomic_set(&drv->tcs_in_use[m], 0);
+
 		/* Clean up response object and notify mbox in tasklet */
 		if (resp)
 			send_tcs_response(resp);
-
-		/* Notify the client that this request is completed. */
-		atomic_set(&drv->tcs_in_use[m], 0);
 	}
 
 	return IRQ_HANDLED;
@@ -475,19 +484,38 @@
 	mbox_chan_txdone(chan, err);
 }
 
-/**
- * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
- */
-static void tcs_notify_tx_done(unsigned long data)
+static void respond_tx_done(struct tcs_response *resp)
 {
-	struct tcs_response *resp = (struct tcs_response *) data;
 	struct mbox_chan *chan = resp->chan;
 	struct tcs_mbox_msg *msg = resp->msg;
 	int err = resp->err;
 	int m = resp->m;
 
-	mbox_notify_tx_done(chan, msg, m, err);
 	free_response(resp);
+	mbox_notify_tx_done(chan, msg, m, err);
+}
+
+/**
+ * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
+ */
+static void tcs_notify_tx_done(unsigned long data)
+{
+	struct tcs_drv *drv = (struct tcs_drv *)data;
+	struct tcs_response *resp;
+	unsigned long flags;
+
+	do {
+		spin_lock_irqsave(&drv->drv_lock, flags);
+		if (list_empty(&drv->response_pending)) {
+			spin_unlock_irqrestore(&drv->drv_lock, flags);
+			break;
+		}
+		resp = list_first_entry(&drv->response_pending,
+					struct tcs_response, list);
+		list_del(&resp->list);
+		spin_unlock_irqrestore(&drv->drv_lock, flags);
+		respond_tx_done(resp);
+	} while (1);
 }
 
 static void __tcs_buffer_write(struct tcs_drv *drv, int d, int m, int n,
@@ -673,8 +701,11 @@
 	if (IS_ERR(tcs))
 		return PTR_ERR(tcs);
 
-	if (trigger)
+	if (trigger) {
 		resp = setup_response(drv, msg, chan, TCS_M_INIT, 0);
+		if (IS_ERR_OR_NULL(resp))
+			return -EBUSY;
+	}
 
 	/* Identify the sequential slots that we can write to */
 	spin_lock_irqsave(&tcs->tcs_lock, flags);
@@ -686,28 +717,21 @@
 		return slot;
 	}
 
-	if (trigger) {
-		ret = check_for_req_inflight(drv, tcs, msg);
-		if (ret) {
-			spin_unlock_irqrestore(&tcs->tcs_lock, flags);
-			return ret;
-		}
-	}
-
-	/* Mark the slots as in-use, before we unlock */
-	if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
-		bitmap_set(tcs->slots, slot, msg->num_payload);
-
-	/* Copy the addresses of the resources over to the slots */
-	for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
-		tcs->cmd_addr[slot + i] = msg->payload[i].addr;
-
+	/* Figure out the TCS-m and CMD-n to write to */
 	offset = slot / tcs->ncpt;
 	m = offset + tcs->tcs_offset;
 	n = slot % tcs->ncpt;
 
-	/* Block, if we have an address from the msg in flight */
 	if (trigger) {
+		/* Block, if we have an address from the msg in flight */
+		ret = check_for_req_inflight(drv, tcs, msg);
+		if (ret) {
+			spin_unlock_irqrestore(&tcs->tcs_lock, flags);
+			if (resp)
+				free_response(resp);
+			return ret;
+		}
+
 		resp->m = m;
 		/* Mark the TCS as busy */
 		atomic_set(&drv->tcs_in_use[m], 1);
@@ -716,6 +740,14 @@
 		if (tcs->type != ACTIVE_TCS)
 			enable_tcs_irq(drv, m, true);
 		drv->tcs_last_sent_ts[m] = arch_counter_get_cntvct();
+	} else {
+		/* Mark the slots as in-use, before we unlock */
+		if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
+			bitmap_set(tcs->slots, slot, msg->num_payload);
+
+		/* Copy the addresses of the resources over to the slots */
+		for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
+			tcs->cmd_addr[slot + i] = msg->payload[i].addr;
 	}
 
 	/* Write to the TCS or AMC */
@@ -758,6 +790,32 @@
 	return 0;
 }
 
+static void print_tcs_regs(struct tcs_drv *drv, int m)
+{
+	int n;
+	struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
+	void __iomem *base = drv->reg_base;
+	u32 enable, addr, data, msgid;
+
+	if (!tcs || tcs_is_free(drv, m))
+		return;
+
+	enable = read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+	if (!enable)
+		return;
+
+	pr_debug("TCS-%d contents:\n", m);
+	for (n = 0; n < tcs->ncpt; n++) {
+		if (!(enable & BIT(n)))
+			continue;
+		addr = read_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n);
+		data = read_tcs_reg(base, TCS_DRV_CMD_DATA, m, n);
+		msgid = read_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n);
+		pr_debug("\tn=%d addr=0x%x data=0x%x hdr=0x%x\n",
+						n, addr, data, msgid);
+	}
+}
+
 static void dump_tcs_stats(struct tcs_drv *drv)
 {
 	int i;
@@ -766,12 +824,13 @@
 	for (i = 0; i < drv->num_tcs; i++) {
 		if (!atomic_read(&drv->tcs_in_use[i]))
 			continue;
-		pr_info("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
+		pr_debug("Time: %llu: TCS-%d:\n\tReq Sent:%d Last Sent:%llu\n\tResp Recv:%d Last Recvd:%llu\n",
 				curr, i,
 				atomic_read(&drv->tcs_send_count[i]),
 				drv->tcs_last_sent_ts[i],
 				atomic_read(&drv->tcs_irq_count[i]),
 				drv->tcs_last_recv_ts[i]);
+		print_tcs_regs(drv, i);
 		print_response(drv, i);
 	}
 }
@@ -840,7 +899,7 @@
 		if (ret != -EBUSY)
 			break;
 		udelay(100);
-	} while (++count < 10);
+	} while (++count < 100);
 
 tx_fail:
 	/* If there was an error in the request, schedule a response */
@@ -849,7 +908,8 @@
 				drv, msg, chan, TCS_M_INIT, ret);
 
 		dev_err(dev, "Error sending RPMH message %d\n", ret);
-		send_tcs_response(resp);
+		if (resp)
+			send_tcs_response(resp);
 		ret = 0;
 	}
 
@@ -857,6 +917,7 @@
 	if (ret == -EBUSY) {
 		dev_err(dev, "TCS Busy, retrying RPMH message send\n");
 		dump_tcs_stats(drv);
+		ret = -EAGAIN;
 	}
 
 	return ret;
@@ -967,6 +1028,7 @@
 	}
 
 	chan = &mbox->chans[drv->num_assigned++];
+	chan->con_priv = drv;
 
 	return chan;
 }
@@ -1108,6 +1170,9 @@
 	drv->mbox.is_idle = tcs_drv_is_idle;
 	drv->num_tcs = st;
 	drv->pdev = pdev;
+	INIT_LIST_HEAD(&drv->response_pending);
+	spin_lock_init(&drv->drv_lock);
+	tasklet_init(&drv->tasklet, tcs_notify_tx_done, (unsigned long)drv);
 
 	drv->name = of_get_property(pdev->dev.of_node, "label", NULL);
 	if (!drv->name)
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 3b53f34..e7b8f49 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -357,6 +357,7 @@
 config DM_RAID
        tristate "RAID 1/4/5/6/10 target"
        depends on BLK_DEV_DM
+       select MD_RAID0
        select MD_RAID1
        select MD_RAID10
        select MD_RAID456
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 125aedc..8bf9667 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -215,7 +215,7 @@
  * Buffers are freed after this timeout
  */
 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
-static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
 
 static unsigned long dm_bufio_peak_allocated;
 static unsigned long dm_bufio_allocated_kmem_cache;
@@ -923,10 +923,11 @@
 {
 	unsigned long buffers;
 
-	if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
-		mutex_lock(&dm_bufio_clients_lock);
-		__cache_size_refresh();
-		mutex_unlock(&dm_bufio_clients_lock);
+	if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+		if (mutex_trylock(&dm_bufio_clients_lock)) {
+			__cache_size_refresh();
+			mutex_unlock(&dm_bufio_clients_lock);
+		}
 	}
 
 	buffers = dm_bufio_cache_size_per_client >>
@@ -1540,10 +1541,10 @@
 	return true;
 }
 
-static unsigned get_retain_buffers(struct dm_bufio_client *c)
+static unsigned long get_retain_buffers(struct dm_bufio_client *c)
 {
-        unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
-        return retain_bytes / c->block_size;
+        unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+        return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
 }
 
 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1553,7 +1554,7 @@
 	struct dm_buffer *b, *tmp;
 	unsigned long freed = 0;
 	unsigned long count = nr_to_scan;
-	unsigned retain_target = get_retain_buffers(c);
+	unsigned long retain_target = get_retain_buffers(c);
 
 	for (l = 0; l < LIST_SIZE; l++) {
 		list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1779,11 +1780,19 @@
 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
 {
 	struct dm_buffer *b, *tmp;
-	unsigned retain_target = get_retain_buffers(c);
-	unsigned count;
+	unsigned long retain_target = get_retain_buffers(c);
+	unsigned long count;
+	LIST_HEAD(write_list);
 
 	dm_bufio_lock(c);
 
+	__check_watermark(c, &write_list);
+	if (unlikely(!list_empty(&write_list))) {
+		dm_bufio_unlock(c);
+		__flush_write_list(&write_list);
+		dm_bufio_lock(c);
+	}
+
 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
 	list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
 		if (count <= retain_target)
@@ -1808,6 +1817,8 @@
 
 	mutex_lock(&dm_bufio_clients_lock);
 
+	__cache_size_refresh();
+
 	list_for_each_entry(c, &dm_bufio_all_clients, client_list)
 		__evict_old_buffers(c, max_age_hz);
 
@@ -1930,7 +1941,7 @@
 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
 
-module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
+module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
 
 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6955778..6937ca4 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1383,17 +1383,19 @@
 
 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
 {
-	int r;
+	int r = -EINVAL;
 	flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
 				 clear_clean_shutdown);
 
 	WRITE_LOCK(cmd);
+	if (cmd->fail_io)
+		goto out;
+
 	r = __commit_transaction(cmd, mutator);
 	if (r)
 		goto out;
 
 	r = __begin_transaction(cmd);
-
 out:
 	WRITE_UNLOCK(cmd);
 	return r;
@@ -1405,7 +1407,8 @@
 	int r = -EINVAL;
 
 	READ_LOCK(cmd);
-	r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+	if (!cmd->fail_io)
+		r = dm_sm_get_nr_free(cmd->metadata_sm, result);
 	READ_UNLOCK(cmd);
 
 	return r;
@@ -1417,7 +1420,8 @@
 	int r = -EINVAL;
 
 	READ_LOCK(cmd);
-	r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+	if (!cmd->fail_io)
+		r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
 	READ_UNLOCK(cmd);
 
 	return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e477af8..ac8235b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -119,7 +119,8 @@
 
 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void trigger_event(struct work_struct *work);
-static void activate_path(struct work_struct *work);
+static void activate_or_offline_path(struct pgpath *pgpath);
+static void activate_path_work(struct work_struct *work);
 static void process_queued_bios(struct work_struct *work);
 
 /*-----------------------------------------------
@@ -144,7 +145,7 @@
 
 	if (pgpath) {
 		pgpath->is_active = true;
-		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
+		INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
 	}
 
 	return pgpath;
@@ -1515,10 +1516,8 @@
 	spin_unlock_irqrestore(&m->lock, flags);
 }
 
-static void activate_path(struct work_struct *work)
+static void activate_or_offline_path(struct pgpath *pgpath)
 {
-	struct pgpath *pgpath =
-		container_of(work, struct pgpath, activate_path.work);
 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
 
 	if (pgpath->is_active && !blk_queue_dying(q))
@@ -1527,6 +1526,14 @@
 		pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
 }
 
+static void activate_path_work(struct work_struct *work)
+{
+	struct pgpath *pgpath =
+		container_of(work, struct pgpath, activate_path.work);
+
+	activate_or_offline_path(pgpath);
+}
+
 static int noretry_error(int error)
 {
 	switch (error) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index a15091a..4477bf9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -485,11 +485,11 @@
 	if (r < 0)
 		return r;
 
-	r = save_sm_roots(pmd);
+	r = dm_tm_pre_commit(pmd->tm);
 	if (r < 0)
 		return r;
 
-	r = dm_tm_pre_commit(pmd->tm);
+	r = save_sm_roots(pmd);
 	if (r < 0)
 		return r;
 
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index f0aad08..ed25f30 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -80,8 +80,6 @@
 unsigned dm_get_md_type(struct mapped_device *md);
 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
 
-int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
-
 /*
  * To check the return value from dm_table_find_target().
  */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 24925f2..eddd360 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6752,6 +6752,7 @@
 	void __user *argp = (void __user *)arg;
 	struct mddev *mddev = NULL;
 	int ro;
+	bool did_set_md_closing = false;
 
 	if (!md_ioctl_valid(cmd))
 		return -ENOTTY;
@@ -6841,7 +6842,9 @@
 			err = -EBUSY;
 			goto out;
 		}
+		WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
 		set_bit(MD_CLOSING, &mddev->flags);
+		did_set_md_closing = true;
 		mutex_unlock(&mddev->open_mutex);
 		sync_blockdev(bdev);
 	}
@@ -7041,6 +7044,8 @@
 		mddev->hold_active = 0;
 	mddev_unlock(mddev);
 out:
+	if(did_set_md_closing)
+		clear_bit(MD_CLOSING, &mddev->flags);
 	return err;
 }
 #ifdef CONFIG_COMPAT
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 20a4032..7a75b50 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -897,8 +897,12 @@
 		else
 			*result_key = le64_to_cpu(ro_node(s)->keys[0]);
 
-		if (next_block || flags & INTERNAL_NODE)
-			block = value64(ro_node(s), i);
+		if (next_block || flags & INTERNAL_NODE) {
+			if (find_highest)
+				block = value64(ro_node(s), i);
+			else
+				block = value64(ro_node(s), 0);
+		}
 
 	} while (flags & INTERNAL_NODE);
 
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a..32adf6b 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@
 
 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
 {
+	int r;
+	uint32_t old_count;
 	enum allocation_event ev;
 	struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
 
-	return sm_ll_dec(&smd->ll, b, &ev);
+	r = sm_ll_dec(&smd->ll, b, &ev);
+	if (!r && (ev == SM_FREE)) {
+		/*
+		 * It's only free if it's also free in the last
+		 * transaction.
+		 */
+		r = sm_ll_lookup(&smd->old_ll, b, &old_count);
+		if (!r && !old_count)
+			smd->nr_allocated_this_transaction--;
+	}
+
+	return r;
 }
 
 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cce6057b..f34ad2b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2253,6 +2253,10 @@
 		err = -ENOMEM;
 
 	mutex_unlock(&conf->cache_size_mutex);
+
+	conf->slab_cache = sc;
+	conf->active_name = 1-conf->active_name;
+
 	/* Step 4, return new stripes to service */
 	while(!list_empty(&newstripes)) {
 		nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2270,8 +2274,6 @@
 	}
 	/* critical section pass, GFP_NOIO no longer needed */
 
-	conf->slab_cache = sc;
-	conf->active_name = 1-conf->active_name;
 	if (!err)
 		conf->pool_size = newsize;
 	return err;
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 5afb9c5..fd0f25e 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3852,7 +3852,9 @@
 			FE_CAN_MUTE_TS |
 			FE_CAN_2G_MODULATION,
 		.frequency_min = 42000000,
-		.frequency_max = 1002000000
+		.frequency_max = 1002000000,
+		.symbol_rate_min = 870000,
+		.symbol_rate_max = 11700000
 	},
 	.init = cxd2841er_init_tc,
 	.sleep = cxd2841er_sleep_tc,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
index 4f246e1..9a30d64 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -628,9 +628,46 @@
 	return rc;
 }
 
-static int cam_cpas_util_apply_client_ahb_vote(struct cam_cpas *cpas_core,
+static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
+	struct device *dev, unsigned long freq, enum cam_vote_level *req_level)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct dev_pm_opp *opp;
+	unsigned int corner;
+	enum cam_vote_level level = CAM_SVS_VOTE;
+	unsigned long corner_freq = freq;
+	int i;
+
+	if (!dev || !req_level) {
+		pr_err("Invalid params %pK, %pK\n", dev, req_level);
+		return -EINVAL;
+	}
+
+	opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
+	if (IS_ERR(opp)) {
+		pr_err("Error on OPP freq :%ld, %pK\n", corner_freq, opp);
+		return -EINVAL;
+	}
+
+	corner = dev_pm_opp_get_voltage(opp);
+
+	for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++)
+		if (corner == soc_private->vdd_ahb[i].vdd_corner)
+			level = soc_private->vdd_ahb[i].ahb_level;
+
+	CPAS_CDBG("From OPP table : freq=[%ld][%ld], corner=%d, level=%d\n",
+		freq, corner_freq, corner, level);
+
+	*req_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
 	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote)
 {
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
 	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
 	enum cam_vote_level required_level;
 	enum cam_vote_level highest_level;
@@ -642,12 +679,14 @@
 	}
 
 	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
-		pr_err("Dynamic AHB vote not supported\n");
-		return -EINVAL;
+		rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev,
+			ahb_vote->vote.freq, &required_level);
+		if (rc)
+			return rc;
+	} else {
+		required_level = ahb_vote->vote.level;
 	}
 
-	required_level = ahb_vote->vote.level;
-
 	if (cpas_client->ahb_level == required_level)
 		return 0;
 
@@ -708,7 +747,7 @@
 		ahb_vote->vote.freq,
 		cpas_core->cpas_client[client_indx]->ahb_level);
 
-	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core,
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
 		cpas_core->cpas_client[client_indx], ahb_vote);
 
 unlock_client:
@@ -780,7 +819,7 @@
 	CPAS_CDBG("AHB :client[%d] type[%d], level[%d], applied[%d]\n",
 		client_indx, ahb_vote->type, ahb_vote->vote.level,
 		cpas_client->ahb_level);
-	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
 		ahb_vote);
 	if (rc)
 		goto done;
@@ -800,8 +839,8 @@
 			goto done;
 		}
 
-		if (cpas_core->internal_ops.power_on_settings) {
-			rc = cpas_core->internal_ops.power_on_settings(cpas_hw);
+		if (cpas_core->internal_ops.power_on) {
+			rc = cpas_core->internal_ops.power_on(cpas_hw);
 			if (rc) {
 				cam_cpas_soc_disable_resources(
 					&cpas_hw->soc_info);
@@ -873,6 +912,15 @@
 	cpas_core->streamon_clients--;
 
 	if (cpas_core->streamon_clients == 0) {
+		if (cpas_core->internal_ops.power_off) {
+			rc = cpas_core->internal_ops.power_off(cpas_hw);
+			if (rc) {
+				pr_err("failed in power_off settings rc=%d\n",
+					rc);
+				/* Do not return error, passthrough */
+			}
+		}
+
 		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info);
 		if (rc) {
 			pr_err("disable_resorce failed, rc=%d\n", rc);
@@ -883,7 +931,7 @@
 
 	ahb_vote.type = CAM_VOTE_ABSOLUTE;
 	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
-	rc = cam_cpas_util_apply_client_ahb_vote(cpas_core, cpas_client,
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
 		&ahb_vote);
 	if (rc)
 		goto done;
@@ -1282,15 +1330,22 @@
 	cpas_hw_intf->hw_ops.write = NULL;
 	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
 
+	cpas_core->work_queue = alloc_workqueue("cam-cpas",
+		WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS);
+	if (!cpas_core->work_queue) {
+		rc = -ENOMEM;
+		goto release_mem;
+	}
+
 	internal_ops = &cpas_core->internal_ops;
 	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
-	if (rc != 0)
-		goto release_mem;
+	if (rc)
+		goto release_workq;
 
 	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
 		internal_ops->handle_irq, cpas_hw);
 	if (rc)
-		goto release_mem;
+		goto release_workq;
 
 	soc_private = (struct cam_cpas_private_soc *)
 		cpas_hw->soc_info.soc_private;
@@ -1375,6 +1430,9 @@
 	cam_cpas_util_client_cleanup(cpas_hw);
 deinit_platform_res:
 	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_workq:
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
 release_mem:
 	mutex_destroy(&cpas_hw->hw_mutex);
 	kfree(cpas_core);
@@ -1406,6 +1464,8 @@
 	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
 	cam_cpas_util_client_cleanup(cpas_hw);
 	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
 	mutex_destroy(&cpas_hw->hw_mutex);
 	kfree(cpas_core);
 	kfree(cpas_hw);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
index c181302..6d4fafe 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -17,6 +17,7 @@
 #include "cam_cpas_hw_intf.h"
 
 #define CPAS_MAX_CLIENTS 20
+#define CAM_CPAS_INFLIGHT_WORKS 5
 
 #define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
 #define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
@@ -45,7 +46,8 @@
  * @init_hw_version: Function pointer for hw init based on version
  * @handle_irq: Function poniter for irq handling
  * @setup_regbase: Function pointer for setup rebase indices
- * @power_on_settings: Function pointer for hw core specific power on settings
+ * @power_on: Function pointer for hw core specific power on settings
+ * @power_off: Function pointer for hw core specific power off settings
  *
  */
 struct cam_cpas_internal_ops {
@@ -56,7 +58,8 @@
 	irqreturn_t (*handle_irq)(int irq_num, void *data);
 	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
 		int32_t regbase_index[], int32_t num_reg_map);
-	int (*power_on_settings)(struct cam_hw_info *cpas_hw);
+	int (*power_on)(struct cam_hw_info *cpas_hw);
+	int (*power_off)(struct cam_hw_info *cpas_hw);
 };
 
 /**
@@ -167,6 +170,7 @@
  * @ahb_bus_client: AHB Bus client info
  * @axi_ports_list_head: Head pointing to list of AXI ports
  * @internal_ops: CPAS HW internal ops
+ * @work_queue: Work queue handle
  *
  */
 struct cam_cpas {
@@ -180,6 +184,7 @@
 	struct cam_cpas_bus_client ahb_bus_client;
 	struct list_head axi_ports_list_head;
 	struct cam_cpas_internal_ops internal_ops;
+	struct workqueue_struct *work_queue;
 };
 
 int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
index d2c3e06..9ee5a43 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -29,6 +29,13 @@
 
 #define BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
 
+/* Number of times to retry while polling */
+#define CAM_CPAS_POLL_RETRY_CNT 5
+/* Minimum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MIN_USECS 200
+/* Maximum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MAX_USECS 250
+
 /**
  * enum cam_cpas_hw_type - Enum for CPAS HW type
  */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
index fdebdc7..b774625 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -51,21 +51,23 @@
 static struct cam_cpas_intf *g_cpas_intf;
 
 int cam_cpas_get_hw_info(uint32_t *camera_family,
-	struct cam_hw_version *camera_version)
+	struct cam_hw_version *camera_version,
+	struct cam_hw_version *cpas_version)
 {
 	if (!CAM_CPAS_INTF_INITIALIZED()) {
 		pr_err("cpas intf not initialized\n");
 		return -ENODEV;
 	}
 
-	if (!camera_family || !camera_version) {
-		pr_err("invalid input %pK %pK\n", camera_family,
-			camera_version);
+	if (!camera_family || !camera_version || !cpas_version) {
+		pr_err("invalid input %pK %pK %pK\n", camera_family,
+			camera_version, cpas_version);
 		return -EINVAL;
 	}
 
 	*camera_family = g_cpas_intf->hw_caps.camera_family;
 	*camera_version = g_cpas_intf->hw_caps.camera_version;
+	*cpas_version = g_cpas_intf->hw_caps.cpas_version;
 
 	return 0;
 }
@@ -344,7 +346,7 @@
 		}
 
 		rc = cam_cpas_get_hw_info(&query.camera_family,
-			&query.camera_version);
+			&query.camera_version, &query.cpas_version);
 		if (rc)
 			break;
 
@@ -428,6 +430,7 @@
 static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
 	unsigned int cmd, unsigned long arg)
 {
+	struct cam_control cmd_data;
 	int32_t rc;
 	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
 
@@ -436,9 +439,16 @@
 		return -ENODEV;
 	}
 
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		pr_err("Failed to copy from user_ptr=%pK size=%zu\n",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
 	switch (cmd) {
 	case VIDIOC_CAM_CONTROL:
-		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
 		break;
 	default:
 		pr_err("Invalid command %d for CPAS!\n", cmd);
@@ -446,6 +456,15 @@
 		break;
 	}
 
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			pr_err("Failed to copy to user_ptr=%pK size=%zu\n",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
 	return rc;
 }
 #endif
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
index 0a8e6bb..0c71ece 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -22,6 +22,26 @@
 #include "cam_cpas_hw.h"
 #include "cam_cpas_soc.h"
 
+static int cam_cpas_get_vote_level_from_string(const char *string,
+	enum cam_vote_level *vote_level)
+{
+	if (!vote_level || !string)
+		return -EINVAL;
+
+	if (strnstr("suspend", string, strlen(string)))
+		*vote_level = CAM_SUSPEND_VOTE;
+	else if (strnstr("svs", string, strlen(string)))
+		*vote_level = CAM_SVS_VOTE;
+	else if (strnstr("nominal", string, strlen(string)))
+		*vote_level = CAM_NOMINAL_VOTE;
+	else if (strnstr("turbo", string, strlen(string)))
+		*vote_level = CAM_TURBO_VOTE;
+	else
+		*vote_level = CAM_SVS_VOTE;
+
+	return 0;
+}
+
 int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
 	struct cam_cpas_private_soc *soc_private)
 {
@@ -89,6 +109,42 @@
 	soc_private->axi_camnoc_based = of_property_read_bool(of_node,
 		"client-bus-camnoc-based");
 
+	count = of_property_count_u32_elems(of_node, "vdd-corners");
+	if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
+		(of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
+		count)) {
+		const char *ahb_string;
+
+		for (i = 0; i < count; i++) {
+			rc = of_property_read_u32_index(of_node, "vdd-corners",
+				i, &soc_private->vdd_ahb[i].vdd_corner);
+			if (rc) {
+				pr_err("vdd-corners failed at index=%d\n", i);
+				return -ENODEV;
+			}
+
+			rc = of_property_read_string_index(of_node,
+				"vdd-corner-ahb-mapping", i, &ahb_string);
+			if (rc) {
+				pr_err("no ahb-mapping at index=%d\n", i);
+				return -ENODEV;
+			}
+
+			rc = cam_cpas_get_vote_level_from_string(ahb_string,
+				&soc_private->vdd_ahb[i].ahb_level);
+			if (rc) {
+				pr_err("invalid ahb-string at index=%d\n", i);
+				return -EINVAL;
+			}
+
+			CPAS_CDBG("Vdd-AHB mapping [%d] : [%d] [%s] [%d]\n", i,
+				soc_private->vdd_ahb[i].vdd_corner,
+				ahb_string, soc_private->vdd_ahb[i].ahb_level);
+		}
+
+		soc_private->num_vdd_ahb_mapping = count;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
index fdd9386..d3dfbbd 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -16,6 +16,19 @@
 #include "cam_soc_util.h"
 
 #define CAM_CPAS_MAX_CLIENTS 20
+#define CAM_REGULATOR_LEVEL_MAX 16
+
+/**
+ * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
+ *
+ * @vdd_corner : Voltage corner value
+ * @ahb_level : AHB vote level corresponds to this vdd_corner
+ *
+ */
+struct cam_cpas_vdd_ahb_mapping {
+	unsigned int vdd_corner;
+	enum cam_vote_level ahb_level;
+};
 
 /**
  * struct cam_cpas_private_soc : CPAS private DT info
@@ -27,6 +40,8 @@
  * @axi_camnoc_based: Whether AXi access is camnoc based
  * @client_axi_port_name: AXI Port name for each client
  * @axi_port_list_node : Node representing AXI Ports list
+ * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
+ * @vdd_ahb : AHB level mapping info for the supported vdd levels
  *
  */
 struct cam_cpas_private_soc {
@@ -37,6 +52,8 @@
 	bool axi_camnoc_based;
 	const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
 	struct device_node *axi_port_list_node;
+	uint32_t num_vdd_ahb_mapping;
+	struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
 };
 
 int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
index fa8ab89..95e26c5 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -81,7 +81,8 @@
 	internal_ops->init_hw_version = NULL;
 	internal_ops->handle_irq = NULL;
 	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
-	internal_ops->power_on_settings = NULL;
+	internal_ops->power_on = NULL;
+	internal_ops->power_off = NULL;
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 415de47..b901410 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/timer.h>
+#include <linux/slab.h>
 
 #include "cam_cpas_hw_intf.h"
 #include "cam_cpas_hw.h"
@@ -105,15 +106,64 @@
 static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
 	struct cam_hw_soc_info *soc_info)
 {
-	uint32_t reg_value;
+	uint32_t reg_value[4];
 	int i;
+	int size = camnoc_info->error_logger_size;
 	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
 
-	for (i = 0; i < camnoc_info->error_logger_size; i++) {
-		reg_value = cam_io_r_mb(
+	for (i = 0; (i + 3) < size; i = i + 4) {
+		reg_value[0] = cam_io_r_mb(
 			soc_info->reg_map[camnoc_index].mem_base +
 			camnoc_info->error_logger[i]);
-		pr_err("ErrorLogger[%d] : 0x%x\n", i, reg_value);
+		reg_value[1] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 1]);
+		reg_value[2] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 2]);
+		reg_value[3] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 3]);
+		pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x] [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0],
+			reg_value[1], reg_value[2], reg_value[3]);
+	}
+
+	if ((i + 2) < size) {
+		reg_value[0] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		reg_value[1] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 1]);
+		reg_value[2] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 2]);
+		pr_err("offset[0x%x] values [0x%x] [0x%x] [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0],
+			reg_value[1], reg_value[2]);
+		i = i + 3;
+	}
+
+	if ((i + 1) < size) {
+		reg_value[0] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		reg_value[1] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i + 1]);
+		pr_err("offset[0x%x] values [0x%x] [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0],
+			reg_value[1]);
+		i = i + 2;
+	}
+
+	if (i < size) {
+		reg_value[0] = cam_io_r_mb(
+			soc_info->reg_map[camnoc_index].mem_base +
+			camnoc_info->error_logger[i]);
+		pr_err("offset[0x%x] values [0x%x]\n",
+			camnoc_info->error_logger[i], reg_value[0]);
 	}
 
 	return 0;
@@ -128,9 +178,10 @@
 	reg_value = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
 		camnoc_info->irq_err[i].err_status.offset);
 
-	pr_err("Dumping ubwc error status : 0x%x\n", reg_value);
+	pr_err("Dumping ubwc error status [%d]: offset[0x%x] value[0x%x]\n",
+		i, camnoc_info->irq_err[i].err_status.offset, reg_value);
 
-	return 0;
+	return reg_value;
 }
 
 static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw)
@@ -172,65 +223,130 @@
 	return 0;
 }
 
-irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
+	enum cam_camnoc_hw_irq_type irq_type, uint32_t irq_data)
 {
-	uint32_t irq_status;
-	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
-	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
-	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
-	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int i;
+	struct cam_cpas_client *cpas_client;
+
+	CPAS_CDBG("Notify CB : num_clients=%d, registered=%d, started=%d\n",
+		cpas_core->num_clients, cpas_core->registered_clients,
+		cpas_core->streamon_clients);
+
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
+			cpas_client = cpas_core->cpas_client[i];
+			if (cpas_client->data.cam_cpas_client_cb) {
+				CPAS_CDBG("Calling client CB %d : %d 0x%x\n",
+					i, irq_type, irq_data);
+				cpas_client->data.cam_cpas_client_cb(
+					cpas_client->data.client_handle,
+					cpas_client->data.userdata,
+					(enum cam_camnoc_irq_type)irq_type,
+					irq_data);
+			}
+		}
+	}
+}
+
+static void cam_cpastop_work(struct work_struct *work)
+{
+	struct cam_cpas_work_payload *payload;
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_hw_soc_info *soc_info;
 	int i;
 	enum cam_camnoc_hw_irq_type irq_type;
+	uint32_t irq_data;
 
-	irq_status = cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
-		camnoc_info->irq_sbm->sbm_status.offset);
+	payload = container_of(work, struct cam_cpas_work_payload, work);
+	if (!payload) {
+		pr_err("NULL payload");
+		return;
+	}
 
-	pr_err("IRQ callback, irq_status=0x%x\n", irq_status);
+	cpas_hw = payload->hw;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_info = &cpas_hw->soc_info;
 
 	for (i = 0; i < camnoc_info->irq_err_size; i++) {
-		if ((irq_status & camnoc_info->irq_err[i].sbm_port) &&
+		if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
 			(camnoc_info->irq_err[i].enable)) {
 			irq_type = camnoc_info->irq_err[i].irq_type;
 			pr_err("Error occurred, type=%d\n", irq_type);
+			irq_data = 0;
 
 			switch (irq_type) {
 			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
-				cam_cpastop_handle_errlogger(cpas_core,
-					soc_info);
+				irq_data = cam_cpastop_handle_errlogger(
+					cpas_core, soc_info);
 				break;
 			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
 			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
-				cam_cpastop_handle_ubwc_err(cpas_core,
-					soc_info, i);
+				irq_data = cam_cpastop_handle_ubwc_err(
+					cpas_core, soc_info, i);
 				break;
 			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
-				cam_cpastop_handle_ahb_timeout_err(cpas_hw);
+				irq_data = cam_cpastop_handle_ahb_timeout_err(
+					cpas_hw);
 				break;
 			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
 				CPAS_CDBG("TEST IRQ\n");
 				break;
 			default:
+				pr_err("Invalid IRQ type\n");
 				break;
 			}
 
-			irq_status &= ~camnoc_info->irq_err[i].sbm_port;
+			cam_cpastop_notify_clients(cpas_core, irq_type,
+				irq_data);
+
+			payload->irq_status &=
+				~camnoc_info->irq_err[i].sbm_port;
 		}
 	}
 
-	if (irq_status)
-		pr_err("IRQ not handled, irq_status=0x%x\n", irq_status);
+	if (payload->irq_status)
+		pr_err("IRQ not handled irq_status=0x%x\n",
+			payload->irq_status);
+
+	kfree(payload);
+}
+
+static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	struct cam_cpas_work_payload *payload;
+
+	payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
+	if (!payload)
+		return IRQ_HANDLED;
+
+	payload->irq_status = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	CPAS_CDBG("IRQ callback, irq_status=0x%x\n", payload->irq_status);
+
+	payload->hw = cpas_hw;
+	INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
 
 	if (TEST_IRQ_ENABLE)
 		cam_cpastop_disable_test_irq(cpas_hw);
 
 	cam_cpastop_reset_irq(cpas_hw);
 
+	queue_work(cpas_core->work_queue, &payload->work);
+
 	return IRQ_HANDLED;
 }
 
-static int cam_cpastop_static_settings(struct cam_hw_info *cpas_hw)
+static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
 {
 	int i;
 
@@ -256,6 +372,38 @@
 	return 0;
 }
 
+static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int rc = 0;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list =
+		camnoc_info->errata_wa_list;
+
+	if (!errata_wa_list)
+		return 0;
+
+	if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
+		struct cam_cpas_hw_errata_wa *errata_wa =
+			&errata_wa_list->camnoc_flush_slave_pending_trans;
+
+		rc = cam_io_poll_value_wmask(
+			soc_info->reg_map[camnoc_index].mem_base +
+			errata_wa->data.reg_info.offset,
+			errata_wa->data.reg_info.value,
+			errata_wa->data.reg_info.mask,
+			CAM_CPAS_POLL_RETRY_CNT,
+			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
+		if (rc) {
+			pr_err("camnoc flush slave pending trans failed\n");
+			/* Do not return error, passthrough */
+		}
+	}
+
+	return rc;
+}
+
 static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
 	struct cam_cpas_hw_caps *hw_caps)
 {
@@ -295,7 +443,8 @@
 	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
 	internal_ops->handle_irq = cam_cpastop_handle_irq;
 	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
-	internal_ops->power_on_settings = cam_cpastop_static_settings;
+	internal_ops->power_on = cam_cpastop_poweron;
+	internal_ops->power_off = cam_cpastop_poweroff;
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index 99aae3f..d5bb363 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -147,6 +147,31 @@
 };
 
 /**
+ * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
+ *
+ * @enable: Whether to enable this errata workround
+ * @data: HW Errata workaround data
+ *
+ */
+struct cam_cpas_hw_errata_wa {
+	bool enable;
+	union {
+		struct cam_cpas_reg reg_info;
+	} data;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
+ *
+ * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
+ *         camnoc slave pending transactions before turning off CPAS_TOP gdsc
+ *
+ */
+struct cam_cpas_hw_errata_wa_list {
+	struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
+};
+
+/**
  * struct cam_camnoc_info : Overall CAMNOC settings info
  *
  * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
@@ -156,6 +181,7 @@
  * @irq_err_size: Array size of IRQ Error settings
  * @error_logger: Pointer to CAMNOC IRQ Error logger read registers
  * @error_logger_size: Array size of IRQ Error logger
+ * @errata_wa_list: HW Errata workaround info
  *
  */
 struct cam_camnoc_info {
@@ -166,6 +192,23 @@
 	int irq_err_size;
 	uint32_t *error_logger;
 	int error_logger_size;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list;
+};
+
+/**
+ * struct cam_cpas_work_payload : Struct for cpas work payload data
+ *
+ * @hw: Pointer to HW info
+ * @irq_status: IRQ status value
+ * @irq_data: IRQ data
+ * @work: Work handle
+ *
+ */
+struct cam_cpas_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
 };
 
 #endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
index 12c8e66..b30cd05 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -279,17 +279,16 @@
 			.value = 3,
 		},
 		.danger_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 0,
 			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
-			.value = 0x0,
+			.value = 0xFFFFFF00,
 		},
 		.safe_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
 			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
-			.value = 0x0,
+			.value = 0x3,
 		},
 		.ubwc_ctl = {
 			.enable = true,
@@ -328,18 +327,16 @@
 			.value = 3,
 		},
 		.danger_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 0,
 			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
-			.value = 0x0,
+			.value = 0xFFFFFF00,
 		},
 		.safe_lut = {
-			.enable = false,
+			.enable = true,
 			.access_type = CAM_REG_TYPE_READ_WRITE,
-			.masked_value = 0,
 			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
-			.value = 0x0,
+			.value = 0x3,
 		},
 		.ubwc_ctl = {
 			.enable = true,
@@ -516,6 +513,18 @@
 	0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
 };
 
+static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = true,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
 struct cam_camnoc_info cam170_cpas100_camnoc_info = {
 	.specific = &cam_cpas100_camnoc_specific[0],
 	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
@@ -527,6 +536,7 @@
 	.error_logger = &slave_error_logger[0],
 	.error_logger_size = sizeof(slave_error_logger) /
 		sizeof(slave_error_logger[0]),
+	.errata_wa_list = &cam170_cpas100_errata_wa_list,
 };
 
 #endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index f6b0729..27b8504 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -312,13 +312,15 @@
  * @camera_family  : Camera family type. One of
  *                   CAM_FAMILY_CAMERA_SS
  *                   CAM_FAMILY_CPAS_SS
- * @camera_version : Camera version
+ * @camera_version : Camera platform version
+ * @cpas_version   : Camera cpas version
  *
  * @return 0 on success.
  *
  */
 int cam_cpas_get_hw_info(
 	uint32_t                 *camera_family,
-	struct cam_hw_version    *camera_version);
+	struct cam_hw_version    *camera_version,
+	struct cam_hw_version    *cpas_version);
 
 #endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index 76dd1f3..5a4e6e9 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -59,15 +59,22 @@
 		}
 
 		if (!bubble_state) {
-			CDBG("%s: Sync success: fd 0x%x\n", __func__,
+			CDBG("%s: Sync with success: fd 0x%x\n", __func__,
 				   req_isp->fence_map_out[j].sync_id);
-			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+			if (rc)
+				pr_err("%s: Sync failed with rc = %d\n",
+					__func__, rc);
+
 		} else if (!req_isp->bubble_report) {
-			CDBG("%s: Sync failure: fd 0x%x\n", __func__,
+			CDBG("%s: Sync with failure: fd 0x%x\n", __func__,
 				   req_isp->fence_map_out[j].sync_id);
-			cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR);
+			if (rc)
+				pr_err("%s: Sync failed with rc = %d\n",
+					__func__, rc);
 		} else {
 			/*
 			 * Ignore the buffer done if bubble detect is on
@@ -277,7 +284,7 @@
 
 	ctx_isp->frame_id++;
 	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
-	pr_err("%s: next substate %d\n", __func__,
+	CDBG("%s: next substate %d\n", __func__,
 		ctx_isp->substate_activated);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 259e773..2bc4b00 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -23,6 +23,7 @@
 #include "cam_isp_packet_parser.h"
 #include "cam_ife_hw_mgr.h"
 #include "cam_cdm_intf_api.h"
+#include "cam_packet_util.h"
 
 #undef CDBG
 #define CDBG(fmt, args...) pr_debug(fmt, ##args)
@@ -1493,19 +1494,6 @@
 	if (i == ctx->num_base)
 		master_base_idx = ctx->base[0].idx;
 
-	/* Stop the master CIDs first */
-	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
-
-	/* stop rest of the CIDs  */
-	for (i = 0; i < ctx->num_base; i++) {
-		if (i == master_base_idx)
-			continue;
-		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
-			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
-	}
-
-
 	/* Stop the master CSID path first */
 	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
 			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
@@ -1519,6 +1507,18 @@
 			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
 	}
 
+	/* Stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+	/* stop rest of the CIDs  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+	}
+
 	if (cam_cdm_stream_off(ctx->cdm_handle))
 		pr_err("%s%d: CDM stream off failed %d\n",
 			__func__, __LINE__, ctx->cdm_handle);
@@ -1920,6 +1920,13 @@
 	if (rc)
 		return rc;
 
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->mgr_common.cmd_iommu_hdl);
+	if (rc) {
+		pr_err("%s: Patch ISP packet failed.\n", __func__);
+		return rc;
+	}
+
 	prepare->num_hw_update_entries = 0;
 	prepare->num_in_map_entries = 0;
 	prepare->num_out_map_entries = 0;
@@ -2884,7 +2891,7 @@
 	int i, j;
 	struct cam_iommu_handle cdm_handles;
 
-	pr_info("%s: Enter\n", __func__);
+	CDBG("%s: Enter\n", __func__);
 
 	memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
 
@@ -3018,7 +3025,7 @@
 
 	/* Create Worker for ife_hw_mgr with 10 tasks */
 	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
-			&g_ife_hw_mgr.workq);
+			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ);
 
 	if (rc < 0) {
 		pr_err("%s: Unable to create worker\n", __func__);
@@ -3037,7 +3044,7 @@
 	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
 	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
 
-	pr_info("%s: Exit\n", __func__);
+	CDBG("%s: Exit\n", __func__);
 	return 0;
 end:
 	if (rc) {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
index b608320..3c72279 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -315,15 +315,17 @@
 	struct cam_isp_resource_node       *res;
 	struct cam_ife_hw_mgr_res          *hw_mgr_res;
 	struct cam_isp_hw_get_buf_update    update_buf;
-	uint32_t kmd_buf_remain_size,  i, j, k, out_buf, in_buf,
-		res_id_out, res_id_in, num_plane, io_cfg_used_bytes, num_ent;
+	uint32_t                            kmd_buf_remain_size;
+	uint32_t                            i, j, num_out_buf, num_in_buf;
+	uint32_t                            res_id_out, res_id_in, plane_id;
+	uint32_t                            io_cfg_used_bytes, num_ent;
 	size_t size;
 
 	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
 			&prepare->packet->payload +
 			prepare->packet->io_configs_offset);
-	out_buf = 0;
-	in_buf  = 0;
+	num_out_buf = 0;
+	num_in_buf  = 0;
 	io_cfg_used_bytes = 0;
 
 	/* Max one hw entries required for each base */
@@ -357,17 +359,18 @@
 			CDBG("%s:%d configure output io with fill fence %d\n",
 				__func__, __LINE__, fill_fence);
 			if (fill_fence) {
-				if (out_buf < prepare->max_out_map_entries) {
-					prepare->out_map_entries[out_buf].
+				if (num_out_buf <
+					prepare->max_out_map_entries) {
+					prepare->out_map_entries[num_out_buf].
 						resource_handle =
 							io_cfg[i].resource_type;
-					prepare->out_map_entries[out_buf].
+					prepare->out_map_entries[num_out_buf].
 						sync_id = io_cfg[i].fence;
-					out_buf++;
+					num_out_buf++;
 				} else {
 					pr_err("%s:%d ln_out:%d max_ln:%d\n",
 						__func__, __LINE__,
-						out_buf,
+						num_out_buf,
 						prepare->max_out_map_entries);
 					return -EINVAL;
 				}
@@ -385,23 +388,22 @@
 			CDBG("%s:%d configure input io with fill fence %d\n",
 				__func__, __LINE__, fill_fence);
 			if (fill_fence) {
-				if (in_buf < prepare->max_in_map_entries) {
-					prepare->in_map_entries[in_buf].
+				if (num_in_buf < prepare->max_in_map_entries) {
+					prepare->in_map_entries[num_in_buf].
 						resource_handle =
 							io_cfg[i].resource_type;
-					prepare->in_map_entries[in_buf].
+					prepare->in_map_entries[num_in_buf].
 						sync_id =
 							io_cfg[i].fence;
-					in_buf++;
+					num_in_buf++;
 				} else {
 					pr_err("%s:%d ln_in:%d imax_ln:%d\n",
 						__func__, __LINE__,
-						in_buf,
+						num_in_buf,
 						prepare->max_in_map_entries);
 					return -EINVAL;
 				}
 			}
-			/*TO DO get the input FE address and add to list */
 			continue;
 		} else {
 			pr_err("%s:%d Invalid io config direction :%d\n",
@@ -427,27 +429,36 @@
 			}
 
 			memset(io_addr, 0, sizeof(io_addr));
-			num_plane = 0;
-			for (k = 0; k < CAM_PACKET_MAX_PLANES; k++) {
-				if (!io_cfg[i].mem_handle[k])
-					continue;
 
-				rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[k],
-					iommu_hdl, &io_addr[num_plane], &size);
+			for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
+						plane_id++) {
+				if (!io_cfg[i].mem_handle[plane_id])
+					break;
+
+				rc = cam_mem_get_io_buf(
+					io_cfg[i].mem_handle[plane_id],
+					iommu_hdl, &io_addr[plane_id], &size);
 				if (rc) {
 					pr_err("%s:%d no io addr for plane%d\n",
-						__func__, __LINE__, k);
+						__func__, __LINE__, plane_id);
 					rc = -ENOMEM;
 					return rc;
 				}
+
+				if (io_addr[plane_id] >> 32) {
+					pr_err("Invalid mapped address\n");
+					rc = -EINVAL;
+					return rc;
+				}
+
 				/* need to update with offset */
-				io_addr[num_plane] += io_cfg->offsets[k];
+				io_addr[plane_id] +=
+						io_cfg[i].offsets[plane_id];
 				CDBG("%s: get io_addr for plane %d: 0x%llx\n",
-					__func__, num_plane,
-					io_addr[num_plane]);
-				num_plane++;
+					__func__, plane_id,
+					io_addr[plane_id]);
 			}
-			if (!num_plane) {
+			if (!plane_id) {
 				pr_err("%s:%d No valid planes for res%d\n",
 					__func__, __LINE__, res->res_id);
 				rc = -ENOMEM;
@@ -471,7 +482,8 @@
 					io_cfg_used_bytes/4;
 			update_buf.cdm.size = kmd_buf_remain_size;
 			update_buf.image_buf = io_addr;
-			update_buf.num_buf   = num_plane;
+			update_buf.num_buf   = plane_id;
+			update_buf.io_cfg    = &io_cfg[i];
 
 			CDBG("%s:%d: cmd buffer 0x%pK, size %d\n", __func__,
 				__LINE__, update_buf.cdm.cmd_buf_addr,
@@ -509,8 +521,8 @@
 	}
 
 	if (fill_fence) {
-		prepare->num_out_map_entries = out_buf;
-		prepare->num_in_map_entries  = in_buf;
+		prepare->num_out_map_entries = num_out_buf;
+		prepare->num_in_map_entries  = num_in_buf;
 	}
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index 6306df3..f09fdc7 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -988,8 +988,8 @@
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
 
-	/* select rotate period as  5 frame */
-	val =  5 << 8;
+	/* static frame with split color bar */
+	val =  1 << 5;
 	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
 		csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
 	/* config pix pattern */
@@ -1133,7 +1133,7 @@
 	if (rc)
 		return rc;
 
-	/**
+	/*
 	 * configure the IPP and enable the time stamp capture.
 	 * enable the HW measrurement blocks
 	 */
@@ -1417,7 +1417,7 @@
 	if (rc)
 		return rc;
 
-	/**
+	/*
 	 * RDI path config and enable the time stamp capture
 	 * Enable the measurement blocks
 	 */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index ea34406..6c6f38b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -149,14 +149,16 @@
  * @Brief:         Get cdm commands for buffer updates.
  *
  * @ cdm:          Command buffer information
- * @ image_buf:    Contain the image buffer information
+ * @ image_buf:    image buffer address array
  * @ num_buf:      Number of buffers in the image_buf array
+ * @ io_cfg:       IO buffer config information sent from UMD
  *
  */
 struct cam_isp_hw_get_buf_update {
 	struct cam_isp_hw_get_cdm_args  cdm;
 	uint64_t                       *image_buf;
 	uint32_t                        num_buf;
+	struct cam_buf_io_cfg          *io_cfg;
 };
 
 #endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 6e62dcf..5e629b6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -28,6 +28,16 @@
 
 #define FRAME_BASED_EN 0
 
+#define MAX_BUF_UPDATE_REG_NUM   20
+#define MAX_REG_VAL_PAIR_SIZE    \
+		(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val)    \
+		do {                                               \
+			buf_array[index++] = offset;               \
+			buf_array[index++] = val;                  \
+		} while (0)
+
 static uint32_t irq_reg_offset[CAM_IFE_BUS_IRQ_REGISTERS_MAX] = {
 	0x0000205C,
 	0x00002060,
@@ -64,6 +74,8 @@
 	void                                       *bus_irq_controller;
 	void                                       *vfe_irq_controller;
 	struct cam_vfe_bus_ver2_reg_offset_common  *common_reg;
+	uint32_t                                    io_buf_update[
+							MAX_REG_VAL_PAIR_SIZE];
 };
 
 struct cam_vfe_bus_ver2_wm_resource_data {
@@ -73,6 +85,7 @@
 
 	uint32_t             irq_enabled;
 
+	uint32_t             init_cfg_done;
 	uint32_t             offset;
 	uint32_t             width;
 	uint32_t             height;
@@ -83,10 +96,21 @@
 	uint32_t             burst_len;
 	uint32_t             frame_based;
 
+	uint32_t             en_ubwc;
+	uint32_t             packer_cfg;
+	uint32_t             tile_cfg;
+	uint32_t             h_init;
+	uint32_t             v_init;
+	uint32_t             ubwc_meta_stride;
+	uint32_t             ubwc_mode_cfg;
+	uint32_t             ubwc_meta_offset;
+
 	uint32_t             irq_subsample_period;
 	uint32_t             irq_subsample_pattern;
 	uint32_t             framedrop_period;
 	uint32_t             framedrop_pattern;
+
+	uint32_t             en_cfg;
 };
 
 struct cam_vfe_bus_ver2_comp_grp_data {
@@ -598,15 +622,52 @@
 
 	rsrc_data->width = out_port_info->width;
 	rsrc_data->height = out_port_info->height;
-	if (plane == PLANE_C) {
-		switch (rsrc_data->format) {
-		case CAM_FORMAT_NV21:
-		case CAM_FORMAT_NV12:
-			rsrc_data->height /= 2;
+
+	if (rsrc_data->index < 3) {
+		rsrc_data->width = rsrc_data->width * 5/4 * rsrc_data->height;
+		rsrc_data->height = 1;
+		rsrc_data->pack_fmt = 0x0;
+		rsrc_data->en_cfg = 0x3;
+	} else if (rsrc_data->index < 5) {
+		switch (plane) {
+		case PLANE_Y:
+			switch (rsrc_data->format) {
+			case CAM_FORMAT_UBWC_NV12:
+			case CAM_FORMAT_UBWC_NV12_4R:
+			case CAM_FORMAT_UBWC_TP10:
+				rsrc_data->en_ubwc = 1;
+				break;
+			default:
+				break;
+			}
+			break;
+		case PLANE_C:
+			switch (rsrc_data->format) {
+			case CAM_FORMAT_NV21:
+			case CAM_FORMAT_NV12:
+				rsrc_data->height /= 2;
+				break;
+			case CAM_FORMAT_UBWC_NV12:
+			case CAM_FORMAT_UBWC_NV12_4R:
+			case CAM_FORMAT_UBWC_TP10:
+				rsrc_data->height /= 2;
+				rsrc_data->en_ubwc = 1;
+				break;
+			default:
+				break;
+			}
 			break;
 		default:
-			break;
+			pr_err("Invalid plane type %d\n", plane);
+			return -EINVAL;
 		}
+		rsrc_data->pack_fmt = 0xE;
+		rsrc_data->en_cfg = 0x1;
+	} else {
+		rsrc_data->width = rsrc_data->width * 4;
+		rsrc_data->height = rsrc_data->height / 2;
+		rsrc_data->pack_fmt = 0x0;
+		rsrc_data->en_cfg = 0x1;
 	}
 
 	if (vfe_out_res_id >= CAM_ISP_IFE_OUT_RES_RDI_0 &&
@@ -638,7 +699,16 @@
 	rsrc_data->irq_subsample_pattern = 0;
 	rsrc_data->framedrop_period = 0;
 	rsrc_data->framedrop_pattern = 0;
-
+	rsrc_data->packer_cfg = 0;
+	rsrc_data->en_ubwc = 0;
+	rsrc_data->tile_cfg = 0;
+	rsrc_data->h_init = 0;
+	rsrc_data->v_init = 0;
+	rsrc_data->ubwc_meta_stride = 0;
+	rsrc_data->ubwc_mode_cfg = 0;
+	rsrc_data->ubwc_meta_offset = 0;
+	rsrc_data->init_cfg_done = 0;
+	rsrc_data->en_cfg = 0;
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
 
 	return 0;
@@ -651,52 +721,18 @@
 		wm_res->res_priv;
 	struct cam_vfe_bus_ver2_common_data        *common_data =
 		rsrc_data->common_data;
-	uint32_t                                    width;
-	uint32_t                                    height;
-	uint32_t                                    pack_fmt;
-	uint32_t                                    stride;
-	uint32_t                                    en_cfg;
-
-	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
-		rsrc_data->width, rsrc_data->height);
-	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
-		rsrc_data->pack_fmt & PACKER_FMT_MAX);
-	CDBG("WM res %d stride = %d, burst len = %d\n",
-		rsrc_data->index, rsrc_data->width, 0xf);
 
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_addr);
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->header_cfg);
 	cam_io_w_mb(0, common_data->mem_base + rsrc_data->hw_regs->frame_inc);
 	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
 
-	if (rsrc_data->index < 3) {
-		width = rsrc_data->width * 5/4 * rsrc_data->height;
-		height = 1;
-		pack_fmt = 0x0;
-		stride = rsrc_data->width * 5/4 * rsrc_data->height;
-		en_cfg = 0x3;
-	} else if (rsrc_data->index < 5) {
-		width = rsrc_data->width;
-		height = rsrc_data->height;
-		pack_fmt = 0xE;
-		stride = rsrc_data->width;
-		en_cfg = 0x1;
-	} else {
-		width = rsrc_data->width * 4;
-		height = rsrc_data->height / 2;
-		pack_fmt = 0x0;
-		stride = rsrc_data->width * 4;
-		en_cfg = 0x1;
-	}
-
-	cam_io_w_mb(width,
+	cam_io_w_mb(rsrc_data->width,
 		common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg);
-	cam_io_w(height,
+	cam_io_w(rsrc_data->height,
 		common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg);
-	cam_io_w(pack_fmt,
+	cam_io_w(rsrc_data->pack_fmt,
 		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
-	cam_io_w(stride,
-		common_data->mem_base + rsrc_data->hw_regs->stride);
 
 	cam_io_w(0xFFFFFFFF, common_data->mem_base +
 		rsrc_data->hw_regs->irq_subsample_pattern);
@@ -708,34 +744,14 @@
 	cam_io_w(0x0,
 		common_data->mem_base + rsrc_data->hw_regs->framedrop_period);
 
-	/* UBWC registers */
-	switch (rsrc_data->format) {
-	case CAM_FORMAT_UBWC_NV12:
-		/* Program UBWC registers */
-		break;
-	default:
-		break;
-	}
-
-	/* Subscribe IRQ */
-	if (rsrc_data->irq_enabled) {
-		/*
-		 * Currently all WM IRQ are subscribed in one place. Need to
-		 * make it dynamic later.
-		 */
-	}
-
-	/* Enable WM */
-	cam_io_w_mb(en_cfg, common_data->mem_base + rsrc_data->hw_regs->cfg);
-
 	CDBG("WM res %d width = %d, height = %d\n", rsrc_data->index,
-		width, height);
+		rsrc_data->width, rsrc_data->height);
 	CDBG("WM res %d pk_fmt = %d\n", rsrc_data->index,
-		pack_fmt & PACKER_FMT_MAX);
+		rsrc_data->pack_fmt & PACKER_FMT_MAX);
 	CDBG("WM res %d stride = %d, burst len = %d\n",
-		rsrc_data->index, stride, 0xf);
+		rsrc_data->index, rsrc_data->stride, 0xf);
 	CDBG("enable WM res %d offset 0x%x val 0x%x\n", rsrc_data->index,
-		(uint32_t) rsrc_data->hw_regs->cfg, en_cfg);
+		(uint32_t) rsrc_data->hw_regs->cfg, rsrc_data->en_cfg);
 
 	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
 
@@ -1622,10 +1638,11 @@
 {
 	struct cam_vfe_bus_ver2_priv             *bus_priv;
 	struct cam_isp_hw_get_buf_update         *update_buf;
+	struct cam_buf_io_cfg                    *io_cfg;
 	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
 	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
-	uint32_t  reg_val_pair[8];
-	uint32_t i, size = 0;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, size = 0;
 
 	/*
 	 * Need the entire buf io config so we can get the stride info
@@ -1643,14 +1660,181 @@
 		return -EINVAL;
 	}
 
-	if (update_buf->num_buf < vfe_out_data->num_wm) {
+	if (update_buf->num_buf != vfe_out_data->num_wm) {
 		pr_err("Failed! Invalid number buffers:%d required:%d\n",
 			update_buf->num_buf, vfe_out_data->num_wm);
 		return -ENOMEM;
 	}
 
-	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(
-		vfe_out_data->num_wm);
+	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+	io_cfg = update_buf->io_cfg;
+
+	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+
+		/* For initial configuration program all bus registers */
+		if (wm_data->stride != io_cfg->planes[i].plane_stride ||
+			!wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->stride,
+				io_cfg->planes[i].plane_stride);
+			wm_data->stride = io_cfg->planes[i].plane_stride;
+		}
+		CDBG("image stride 0x%x\n", wm_data->stride);
+
+		if (wm_data->framedrop_pattern != io_cfg->framedrop_pattern ||
+			!wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_pattern,
+				io_cfg->framedrop_pattern);
+			wm_data->framedrop_pattern = io_cfg->framedrop_pattern;
+		}
+		CDBG("framedrop pattern 0x%x\n", wm_data->framedrop_pattern);
+
+		if (wm_data->framedrop_period != io_cfg->framedrop_period ||
+			!wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_period,
+				io_cfg->framedrop_period);
+			wm_data->framedrop_period = io_cfg->framedrop_period;
+		}
+		CDBG("framedrop period 0x%x\n", wm_data->framedrop_period);
+
+		if (wm_data->irq_subsample_period != io_cfg->subsample_period
+			|| !wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_period,
+				io_cfg->subsample_period);
+			wm_data->irq_subsample_period =
+				io_cfg->subsample_period;
+		}
+		CDBG("irq subsample period 0x%x\n",
+			wm_data->irq_subsample_period);
+
+		if (wm_data->irq_subsample_pattern != io_cfg->subsample_pattern
+			|| !wm_data->init_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_pattern,
+				io_cfg->subsample_pattern);
+			wm_data->irq_subsample_pattern =
+				io_cfg->subsample_pattern;
+		}
+		CDBG("irq subsample pattern 0x%x\n",
+			wm_data->irq_subsample_pattern);
+
+		if (wm_data->en_ubwc) {
+			if (!wm_data->hw_regs->ubwc_regs) {
+				pr_err("%s: No UBWC register to configure.\n",
+					__func__);
+				return -EINVAL;
+			}
+			if (wm_data->packer_cfg !=
+				io_cfg->planes[i].packer_config ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->packer_cfg,
+					io_cfg->planes[i].packer_config);
+				wm_data->packer_cfg =
+					io_cfg->planes[i].packer_config;
+			}
+			CDBG("packer cfg 0x%x\n", wm_data->packer_cfg);
+
+			if (wm_data->tile_cfg != io_cfg->planes[i].tile_config
+				|| !wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->tile_cfg,
+					io_cfg->planes[i].tile_config);
+				wm_data->tile_cfg =
+					io_cfg->planes[i].tile_config;
+			}
+			CDBG("tile cfg 0x%x\n", wm_data->tile_cfg);
+
+			if (wm_data->h_init != io_cfg->planes[i].h_init ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->h_init,
+					io_cfg->planes[i].h_init);
+				wm_data->h_init = io_cfg->planes[i].h_init;
+			}
+			CDBG("h_init 0x%x\n", wm_data->h_init);
+
+			if (wm_data->v_init != io_cfg->planes[i].v_init ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->v_init,
+					io_cfg->planes[i].v_init);
+				wm_data->v_init = io_cfg->planes[i].v_init;
+			}
+			CDBG("v_init 0x%x\n", wm_data->v_init);
+
+			if (wm_data->ubwc_meta_stride !=
+				io_cfg->planes[i].meta_stride ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->
+					meta_stride,
+					io_cfg->planes[i].meta_stride);
+				wm_data->ubwc_meta_stride =
+					io_cfg->planes[i].meta_stride;
+			}
+			CDBG("meta stride 0x%x\n", wm_data->ubwc_meta_stride);
+
+			if (wm_data->ubwc_mode_cfg !=
+				io_cfg->planes[i].mode_config ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->mode_cfg,
+					io_cfg->planes[i].mode_config);
+				wm_data->ubwc_mode_cfg =
+					io_cfg->planes[i].mode_config;
+			}
+			CDBG("ubwc mode cfg 0x%x\n", wm_data->ubwc_mode_cfg);
+
+			if (wm_data->ubwc_meta_offset !=
+				io_cfg->planes[i].meta_offset ||
+				!wm_data->init_cfg_done) {
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->ubwc_regs->
+					meta_offset,
+					io_cfg->planes[i].meta_offset);
+				wm_data->ubwc_meta_offset =
+					io_cfg->planes[i].meta_offset;
+			}
+			CDBG("ubwc meta offset 0x%x\n",
+				wm_data->ubwc_meta_offset);
+
+			/* UBWC meta address */
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->ubwc_regs->meta_addr,
+				update_buf->image_buf[i]);
+			CDBG("ubwc meta addr 0x%llx\n",
+				update_buf->image_buf[i]);
+		}
+
+		/* WM Image address */
+		if (wm_data->en_ubwc)
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->image_addr,
+				(update_buf->image_buf[i] +
+				io_cfg->planes[i].meta_size));
+		else
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->image_addr,
+				update_buf->image_buf[i]);
+
+		CDBG("image address 0x%x\n", reg_val_pair[j-1]);
+
+		/* enable the WM */
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->cfg,
+			wm_data->en_cfg);
+
+		/* set initial configuration done */
+		if (!wm_data->init_cfg_done)
+			wm_data->init_cfg_done = 1;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
 
 	/* cdm util returns dwords, need to convert to bytes */
 	if ((size * 4) > update_buf->cdm.size) {
@@ -1659,18 +1843,9 @@
 		return -ENOMEM;
 	}
 
-	for (i = 0 ; i < vfe_out_data->num_wm; i++) {
-		wm_data = vfe_out_data->wm_res[i]->res_priv;
-		reg_val_pair[2 * i] = wm_data->hw_regs->image_addr;
-		reg_val_pair[2 * i + 1] = update_buf->image_buf[i];
-		CDBG("offset 0x%x, value 0x%llx\n",
-			wm_data->hw_regs->image_addr,
-			(uint64_t) update_buf->image_buf[i]);
-	}
-
 	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
-		update_buf->cdm.cmd_buf_addr,
-		vfe_out_data->num_wm, reg_val_pair);
+		update_buf->cdm.cmd_buf_addr, j/2, reg_val_pair);
+
 	/* cdm util returns dwords, need to convert to bytes */
 	update_buf->cdm.used_bytes = size * 4;
 
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index e62c101..ed251eb 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -1092,10 +1092,9 @@
 	slot = &in_q->slot[in_q->wr_idx];
 
 	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
-		slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
-		CRM_ERR("in_q overwrite %d", slot->status);
-		/* @TODO: error handling */
-	}
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
+		CRM_WARN("in_q overwrite %d", slot->status);
+
 	CRM_DBG("sched_req %lld at slot %d",
 		sched_req->req_id, in_q->wr_idx);
 
@@ -1106,7 +1105,6 @@
 	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
 	mutex_unlock(&link->req.lock);
 
-	complete(&link->workq_comp);
 end:
 	return rc;
 }
@@ -1371,6 +1369,7 @@
 		goto end;
 	}
 
+	CRM_DBG("E: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
 	link = (struct cam_req_mgr_core_link *)
 		cam_get_device_priv(add_req->link_hdl);
 
@@ -1404,6 +1403,7 @@
 	dev_req->dev_hdl = add_req->dev_hdl;
 	task->process_cb = &cam_req_mgr_process_add_req;
 	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+	CRM_DBG("X: dev %x dev req %lld", add_req->dev_hdl, add_req->req_id);
 
 end:
 	return rc;
@@ -1813,7 +1813,8 @@
 	/* Create worker for current link */
 	snprintf(buf, sizeof(buf), "%x-%x",
 		link_info->session_hdl, link->link_hdl);
-	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS, &link->workq);
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
+		&link->workq, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
 		CRM_ERR("FATAL: unable to create worker");
 		__cam_req_mgr_destroy_link_info(link);
@@ -1919,11 +1920,10 @@
 			struct cam_req_mgr_sched_request *sched_req)
 {
 	int                               rc = 0;
-	struct crm_workq_task            *task = NULL;
 	struct cam_req_mgr_core_link     *link = NULL;
 	struct cam_req_mgr_core_session  *session = NULL;
 	struct cam_req_mgr_sched_request *sched;
-	struct crm_task_payload          *task_data;
+	struct crm_task_payload           task_data;
 
 	if (!sched_req) {
 		CRM_ERR("csl_req is NULL");
@@ -1942,14 +1942,10 @@
 		CRM_WARN("session ptr NULL %x", sched_req->link_hdl);
 		return -EINVAL;
 	}
+	CRM_DBG("link %x req %lld", sched_req->link_hdl, sched_req->req_id);
 
-	task = cam_req_mgr_workq_get_task(link->workq);
-	if (!task)
-		return -ENOMEM;
-
-	task_data = (struct crm_task_payload *)task->payload;
-	task_data->type = CRM_WORKQ_TASK_SCHED_REQ;
-	sched = (struct cam_req_mgr_sched_request *)&task_data->u;
+	task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data.u;
 	sched->req_id = sched_req->req_id;
 	sched->link_hdl = sched_req->link_hdl;
 	if (session->force_err_recovery == AUTO_RECOVERY) {
@@ -1958,14 +1954,10 @@
 		sched->bubble_enable =
 		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
 	}
-	task->process_cb = &cam_req_mgr_process_sched_req;
-	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
 
-	/* Blocking call */
-	init_completion(&link->workq_comp);
-	rc = wait_for_completion_timeout(
-		&link->workq_comp,
-		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+	rc = cam_req_mgr_process_sched_req(link, &task_data);
+
+	CRM_DBG("DONE dev %x req %lld", sched_req->link_hdl, sched_req->req_id);
 end:
 	return rc;
 }
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 889ee9c..3ee0e2f 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -27,7 +27,7 @@
 #define FORCE_ENABLE_RECOVERY   1
 #define AUTO_RECOVERY           0
 
-#define CRM_WORKQ_NUM_TASKS 30
+#define CRM_WORKQ_NUM_TASKS 60
 
 /**
  * enum crm_workq_task_type
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
index 13affe9..1a8356a 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -461,6 +461,27 @@
 	return rc;
 }
 
+int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+	uint32_t id,
+	uint32_t type)
+{
+	struct v4l2_event event;
+	struct cam_req_mgr_message *ev_header;
+
+	if (!msg)
+		return -EINVAL;
+
+	event.id = id;
+	event.type = type;
+	ev_header = CAM_REQ_MGR_GET_PAYLOAD_PTR(event,
+		struct cam_req_mgr_message);
+	memcpy(ev_header, msg, sizeof(struct cam_req_mgr_message));
+	v4l2_event_queue(g_dev.video, &event);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_req_mgr_notify_frame_message);
+
 void cam_video_device_cleanup(void)
 {
 	video_unregister_device(g_dev.video);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
index 430e46e..77faed9 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
@@ -40,4 +40,11 @@
 	spinlock_t cam_eventq_lock;
 };
 
+#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type)        \
+	(type *)((char *)ev.u.data)
+
+int cam_req_mgr_notify_frame_message(struct cam_req_mgr_message *msg,
+	uint32_t id,
+	uint32_t type);
+
 #endif /* _CAM_REQ_MGR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
index f53e41c..38dcb42 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -12,16 +12,30 @@
 
 #include "cam_req_mgr_workq.h"
 
+#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_lock_irqsave(&(workq)->lock_bh, (flags)); \
+	else \
+		spin_lock_bh(&(workq)->lock_bh); \
+}
+
+#define WORKQ_RELEASE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \
+	else	\
+		spin_unlock_bh(&(workq)->lock_bh); \
+}
 
 struct crm_workq_task *cam_req_mgr_workq_get_task(
 	struct cam_req_mgr_core_workq *workq)
 {
 	struct crm_workq_task *task = NULL;
+	unsigned long flags = 0;
 
 	if (!workq)
 		return NULL;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	if (list_empty(&workq->task.empty_head))
 		goto end;
 
@@ -33,7 +47,8 @@
 	}
 
 end:
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
+
 	return task;
 }
 
@@ -41,8 +56,9 @@
 {
 	struct cam_req_mgr_core_workq *workq =
 		(struct cam_req_mgr_core_workq *)task->parent;
+	unsigned long flags = 0;
 
-	spin_lock_bh(&workq->lock_bh);
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_del_init(&task->entry);
 	task->cancel = 0;
 	task->process_cb = NULL;
@@ -50,7 +66,7 @@
 	list_add_tail(&task->entry,
 		&workq->task.empty_head);
 	atomic_add(1, &workq->task.free_cnt);
-	spin_unlock_bh(&workq->lock_bh);
+	WORKQ_RELEASE_LOCK(workq, flags);
 }
 
 /**
@@ -131,6 +147,7 @@
 {
 	int rc = 0;
 	struct cam_req_mgr_core_workq *workq = NULL;
+	unsigned long flags = 0;
 
 	if (!task) {
 		CRM_WARN("NULL task pointer can not schedule");
@@ -148,24 +165,25 @@
 		goto end;
 	}
 
-	spin_lock_bh(&workq->lock_bh);
 	if (task->cancel == 1) {
 		cam_req_mgr_workq_put_task(task);
 		CRM_WARN("task aborted and queued back to pool");
 		rc = 0;
-		spin_unlock_bh(&workq->lock_bh);
 		goto end;
 	}
 	task->priv = priv;
 	task->priority =
 		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
 		? prio : CRM_TASK_PRIORITY_0;
+
+	WORKQ_ACQUIRE_LOCK(workq, flags);
 	list_add_tail(&task->entry,
 		&workq->task.process_head[task->priority]);
+	WORKQ_RELEASE_LOCK(workq, flags);
+
 	atomic_add(1, &workq->task.pending_cnt);
 	CRM_DBG("enq task %pK pending_cnt %d",
 		task, atomic_read(&workq->task.pending_cnt));
-	spin_unlock_bh(&workq->lock_bh);
 
 	queue_work(workq->job, &workq->work);
 
@@ -174,7 +192,7 @@
 }
 
 int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
-	struct cam_req_mgr_core_workq **workq)
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq)
 {
 	int32_t i;
 	struct crm_workq_task  *task;
@@ -209,6 +227,7 @@
 		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
 			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
 		INIT_LIST_HEAD(&crm_workq->task.empty_head);
+		crm_workq->in_irq = in_irq;
 		crm_workq->task.num_task = num_tasks;
 		crm_workq->task.pool = (struct crm_workq_task *)
 			kzalloc(sizeof(struct crm_workq_task) *
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
index 7d8ca59..eb3b804 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -25,9 +25,16 @@
 
 /* Task priorities, lower the number higher the priority*/
 enum crm_task_priority {
-	CRM_TASK_PRIORITY_0 = 0,
-	CRM_TASK_PRIORITY_1 = 1,
-	CRM_TASK_PRIORITY_MAX = 2,
+	CRM_TASK_PRIORITY_0,
+	CRM_TASK_PRIORITY_1,
+	CRM_TASK_PRIORITY_MAX,
+};
+
+/* workqueue will be used from irq context or not */
+enum crm_workq_context {
+	CRM_WORKQ_USAGE_NON_IRQ,
+	CRM_WORKQ_USAGE_IRQ,
+	CRM_WORKQ_USAGE_INVALID,
 };
 
 /** struct crm_workq_task
@@ -58,8 +65,9 @@
  * @work       : work token used by workqueue
  * @job        : workqueue internal job struct
  * task -
- * @lock       : lock for task structs
- * @free_cnt   :  num of free/available tasks
+ * @lock_bh    : lock for task structs
+ * @in_irq     : set true if workque can be used in irq context
+ * @free_cnt   : num of free/available tasks
  * @empty_head : list  head of available taska which can be used
  *               or acquired in order to enqueue a task to workq
  * @pool       : pool of tasks used for handling events in workq context
@@ -70,6 +78,7 @@
 	struct work_struct         work;
 	struct workqueue_struct   *job;
 	spinlock_t                 lock_bh;
+	uint32_t                   in_irq;
 
 	/* tasks */
 	struct {
@@ -91,11 +100,12 @@
  *             of session handle and link handle
  * @num_task : Num_tasks to be allocated for workq
  * @workq    : Double pointer worker
+ * @in_irq   : Set to one if workq might be used in irq context
  * This function will allocate and create workqueue and pass
  * the workq pointer to caller.
  */
 int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
-	struct cam_req_mgr_core_workq **workq);
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq);
 
 /**
  * cam_req_mgr_workq_destroy()
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
index c837232..4888e5b 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -531,7 +531,8 @@
 	rc = camera_io_dev_read(
 		&(s_ctrl->io_master_info),
 		slave_info->sensor_id_reg_addr,
-		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD);
+		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
+		CAMERA_SENSOR_I2C_TYPE_WORD);
 
 	CDBG("%s:%d read id: 0x%x expected id 0x%x:\n",
 			__func__, __LINE__, chipid, slave_info->sensor_id);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
index bdae1d1..6292a9f 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -5,4 +5,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
index 1261c4b..06e8104 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -75,4 +75,64 @@
 	enum camera_sensor_i2c_type addr_type,
 	uint32_t delay_ms);
 
-#endif /* _CAM_SENSOR_I2C_H_ */
+
+/**
+ * cam_qup_i2c_read : QUP based i2c read
+ * @client    : QUP I2C client structure
+ * @data      : I2C data
+ * @addr_type : I2c address type
+ * @data_type : I2C data type
+ *
+ * This API handles QUP I2C read
+ */
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * cam_qup_i2c_read_seq : QUP based I2C sequential read
+ * @client    : QUP I2C client structure
+ * @data      : I2C data
+ * @addr_type : I2c address type
+ * @num_bytes : number of bytes to read
+ * This API handles QUP I2C Sequential read
+ */
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte);
+
+/**
+ * cam_qup_i2c_poll : QUP based I2C poll operation
+ * @client    : QUP I2C client structure
+ * @addr      : I2C address
+ * @data      : I2C data
+ * @data_mask : I2C data mask
+ * @data_type : I2C data type
+ * @addr_type : I2C addr type
+ * @delay_ms  : Delay in milli seconds
+ *
+ * This API implements QUP based I2C poll
+ */
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms);
+
+/**
+ * cam_qup_i2c_write_table : QUP based I2C write random
+ * @client        : QUP I2C client structure
+ * @write_setting : I2C register settings
+ *
+ * This API handles QUP I2C random write
+ */
+
+int32_t cam_qup_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+#endif /*_CAM_SENSOR_I2C_H*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
index f889abc..3e1b331 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -29,6 +29,10 @@
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_poll(io_master_info->cci_client,
 			addr, data, mask, data_type, addr_type, delay_ms);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_poll(io_master_info->client,
+			addr, data, data_mask, addr_type, data_type,
+			delay_ms);
 	} else {
 		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
 			__LINE__, io_master_info->master_type);
@@ -38,6 +42,7 @@
 
 int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type)
 {
 	if (!io_master_info) {
@@ -47,7 +52,10 @@
 
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_read(io_master_info->cci_client,
-			addr, data, data_type, data_type);
+			addr, data, addr_type, data_type);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_read(io_master_info->client,
+			addr, data, addr_type, data_type);
 	} else {
 		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
 			__LINE__, io_master_info->master_type);
@@ -67,6 +75,9 @@
 	if (io_master_info->master_type == CCI_MASTER) {
 		return cam_cci_i2c_write_table(io_master_info,
 			write_setting);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_write_table(io_master_info,
+			write_setting);
 	} else {
 		pr_err("%s:%d Invalid Comm. Master:%d\n", __func__,
 			__LINE__, io_master_info->master_type);
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
index 757ac17..f721afd 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -38,16 +38,33 @@
  * @io_master_info: I2C/SPI master information
  * @addr: I2C address
  * @data: I2C data
+ * @addr_type: I2C addr_type
  * @data_type: I2C data type
  *
  * This API abstracts read functionality based on master type
  */
 int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
 	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
 	enum camera_sensor_i2c_type data_type);
 
 /**
  * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr type
+ * @num_bytes: number of bytes
+ *
+ * This API abstracts sequential read functionality based on master type
+ */
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_bytes);
+
+
+/**
+ * @io_master_info: I2C/SPI master information
  *
  * This API initializes the I2C/SPI master based on master type
  */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
new file mode 100644
index 0000000..b25b1855
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_io.h"
+
+#define I2C_REG_DATA_MAX       (8*1024)
+#define I2C_REG_MAX_BUF_SIZE   8
+
+static int32_t cam_qup_i2c_rxdata(
+	struct i2c_client *dev_client, unsigned char *rxdata,
+	enum camera_sensor_i2c_type addr_type,
+	int data_length)
+{
+	int32_t rc = 0;
+	uint16_t saddr = dev_client->addr >> 1;
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = addr_type,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = data_length,
+			.buf   = rxdata,
+		},
+	};
+	rc = i2c_transfer(dev_client->adapter, msgs, 2);
+	if (rc < 0)
+		pr_err("%s:failed 0x%x\n", __func__, saddr);
+	return rc;
+}
+
+
+static int32_t cam_qup_i2c_txdata(
+	struct camera_io_master *dev_client, unsigned char *txdata,
+	int length)
+{
+	int32_t rc = 0;
+	uint16_t saddr = dev_client->client->addr >> 1;
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+	if (rc < 0)
+		pr_err("%s: failed 0x%x\n", __func__, saddr);
+	return rc;
+}
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = -EINVAL;
+	unsigned char *buf = NULL;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("ERR: %s Failed with addr/data_type verfication\n",
+			__func__);
+		return rc;
+	}
+
+	buf = kzalloc(addr_type + data_type, GFP_KERNEL);
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = addr >> 8;
+		buf[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = addr >> 16;
+		buf[1] = addr >> 8;
+		buf[2] = addr;
+	} else {
+		buf[0] = addr >> 24;
+		buf[1] = addr >> 16;
+		buf[2] = addr >> 8;
+		buf[3] = addr;
+	}
+
+	rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
+	if (rc < 0) {
+		pr_err("%s fail\n", __func__);
+		goto read_fail;
+	}
+
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = buf[0];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+		*data = buf[0] << 8 | buf[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = buf[0] << 16 | buf[1] << 8 | buf[2];
+	else
+		*data = buf[0] << 24 | buf[1] << 16 |
+			buf[2] << 8 | buf[3];
+
+	CDBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+read_fail:
+	kfree(buf);
+	buf = NULL;
+	return rc;
+}
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte)
+{
+	int32_t rc = -EFAULT;
+	unsigned char *buf = NULL;
+	int i;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		pr_err("ERR: %s Failed with addr_type verification\n",
+			__func__);
+		return rc;
+	}
+
+	if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
+		pr_err("%s: Error num_byte:0x%x max supported:0x%x\n",
+			__func__, num_byte, I2C_REG_DATA_MAX);
+		return rc;
+	}
+
+	buf = kzalloc(addr_type + num_byte, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = addr >> BITS_PER_BYTE;
+		buf[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = addr >> 16;
+		buf[1] = addr >> 8;
+		buf[2] = addr;
+	} else {
+		buf[0] = addr >> 24;
+		buf[1] = addr >> 16;
+		buf[2] = addr >> 8;
+		buf[3] = addr;
+	}
+
+	rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
+	if (rc < 0) {
+		pr_err("%s fail\n", __func__);
+		goto read_seq_fail;
+	}
+
+	for (i = 0; i < num_byte; i++)
+		data[i] = buf[i];
+
+read_seq_fail:
+	kfree(buf);
+	buf = NULL;
+	return rc;
+}
+
+static int32_t cam_qup_i2c_compare(struct i2c_client *client,
+	uint32_t addr, uint32_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type)
+{
+	int32_t rc;
+	uint32_t reg_data = 0;
+
+	rc = cam_qup_i2c_read(client, addr, &reg_data,
+		addr_type, data_type);
+	if (rc < 0)
+		return rc;
+
+	reg_data = reg_data & 0xFFFF;
+	if (data != (reg_data & ~data_mask))
+		return I2C_COMPARE_MISMATCH;
+
+	return I2C_COMPARE_MATCH;
+}
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms)
+{
+	int32_t rc = 0;
+	int i = 0;
+
+	if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
+		pr_err("%s:%d invalid delay = %d max_delay = %d\n",
+			__func__, __LINE__, delay_ms, MAX_POLL_DELAY_MS);
+		return -EINVAL;
+	}
+
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+		return -EINVAL;
+
+	for (i = 0; i < delay_ms; i++) {
+		rc = cam_qup_i2c_compare(client,
+			addr, data, data_mask, data_type, addr_type);
+		if (rc == I2C_COMPARE_MATCH)
+			return rc;
+
+		usleep_range(1000, 1010);
+	}
+	/* If rc is MISMATCH then read is successful but poll is failure */
+	if (rc == I2C_COMPARE_MISMATCH)
+		pr_err("%s:%d poll failed rc=%d(non-fatal)\n",
+			__func__, __LINE__, rc);
+	if (rc < 0)
+		pr_err("%s:%d poll failed rc=%d\n", __func__, __LINE__, rc);
+
+	return rc;
+}
+
+static int32_t cam_qup_i2c_write(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_array *reg_setting,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = 0;
+	unsigned char buf[I2C_REG_MAX_BUF_SIZE];
+	uint8_t len = 0;
+
+	CDBG("%s reg addr = 0x%x data type: %d\n",
+			  __func__, reg_setting->reg_addr, data_type);
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = reg_setting->reg_addr;
+		CDBG("%s byte %d: 0x%x\n", __func__,
+			len, buf[len]);
+		len = 1;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = reg_setting->reg_addr >> 8;
+		buf[1] = reg_setting->reg_addr;
+		CDBG("%s byte %d: 0x%x\n", __func__,
+			len, buf[len]);
+		CDBG("%s byte %d: 0x%x\n", __func__,
+			len+1, buf[len+1]);
+		len = 2;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = reg_setting->reg_addr >> 16;
+		buf[1] = reg_setting->reg_addr >> 8;
+		buf[2] = reg_setting->reg_addr;
+		len = 3;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[0] = reg_setting->reg_addr >> 24;
+		buf[1] = reg_setting->reg_addr >> 16;
+		buf[2] = reg_setting->reg_addr >> 8;
+		buf[3] = reg_setting->reg_addr;
+		len = 4;
+	} else {
+		pr_err("%s: Invalid I2C addr type\n", __func__);
+		return -EINVAL;
+	}
+
+	CDBG("Data: 0x%x\n", reg_setting->reg_data);
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[len] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		len += 1;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[len] = reg_setting->reg_data >> 8;
+		buf[len+1] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		len += 2;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[len] = reg_setting->reg_data >> 16;
+		buf[len + 1] = reg_setting->reg_data >> 8;
+		buf[len + 2] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+		len += 3;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[len] = reg_setting->reg_data >> 24;
+		buf[len + 1] = reg_setting->reg_data >> 16;
+		buf[len + 2] = reg_setting->reg_data >> 8;
+		buf[len + 3] = reg_setting->reg_data;
+		CDBG("Byte %d: 0x%x\n", len, buf[len]);
+		CDBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+		CDBG("Byte %d: 0x%x\n", len+2, buf[len+2]);
+		CDBG("Byte %d: 0x%x\n", len+3, buf[len+3]);
+		len += 4;
+	} else {
+		pr_err("%s: Invalid Data Type\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = cam_qup_i2c_txdata(client, buf, len);
+	if (rc < 0)
+		pr_err("%s fail\n", __func__);
+	return rc;
+}
+
+int32_t cam_qup_i2c_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = -EINVAL;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+
+	if (!client || !write_setting)
+		return rc;
+
+	if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+		return rc;
+
+	reg_setting = write_setting->reg_setting;
+
+	for (i = 0; i < write_setting->size; i++) {
+		CDBG("%s addr 0x%x data 0x%x\n", __func__,
+			reg_setting->reg_addr, reg_setting->reg_data);
+
+		rc = cam_qup_i2c_write(client, reg_setting,
+			write_setting->addr_type, write_setting->data_type);
+		if (rc < 0)
+			break;
+		reg_setting++;
+	}
+
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
index 901632a..96f40e1 100644
--- a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -82,7 +82,7 @@
 		sync_cb->sync_obj = sync_obj;
 		INIT_WORK(&sync_cb->cb_dispatch_work,
 			cam_sync_util_cb_dispatch);
-
+		list_add_tail(&sync_cb->list, &row->callback_list);
 		sync_cb->status = row->state;
 		queue_work(sync_dev->work_queue,
 			&sync_cb->cb_dispatch_work);
diff --git a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
index 1e42f75..0ffea5b 100644
--- a/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
+++ b/drivers/media/platform/msm/camera/icp/fw_inc/hfi_intf.h
@@ -44,6 +44,7 @@
 	struct hfi_mem msg_q;
 	struct hfi_mem dbg_q;
 	struct hfi_mem sec_heap;
+	struct hfi_mem shmem;
 	void __iomem *icp_base;
 };
 
diff --git a/drivers/media/platform/msm/camera/icp/hfi.c b/drivers/media/platform/msm/camera/icp/hfi.c
index 4315865..15e0315 100644
--- a/drivers/media/platform/msm/camera/icp/hfi.c
+++ b/drivers/media/platform/msm/camera/icp/hfi.c
@@ -19,6 +19,8 @@
 #include <asm/errno.h>
 #include <linux/timer.h>
 #include <media/cam_icp.h>
+#include <linux/iopoll.h>
+
 #include "cam_io_util.h"
 #include "hfi_reg.h"
 #include "hfi_sys_defs.h"
@@ -336,7 +338,7 @@
 		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
 	} else {
 		cam_io_w((uint32_t)ICP_FLAG_CSR_A5_EN |
-			ICP_FLAG_CSR_WAKE_UP_EN,
+			ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI,
 			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
 	}
 
@@ -460,8 +462,10 @@
 	}
 
 	cam_io_w((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
-	cam_io_w((uint32_t)0x7400000, icp_base + HFI_REG_SHARED_MEM_PTR);
-	cam_io_w((uint32_t)0x6400000, icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
 	cam_io_w((uint32_t)hfi_mem->sec_heap.iova,
 		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
 	cam_io_w((uint32_t)hfi_mem->sec_heap.len,
@@ -472,25 +476,17 @@
 	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
 	pr_debug("hw version : %u[%x]\n", hw_version, hw_version);
 
-	do {
-		msleep(500);
-		status = cam_io_r(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
-	} while (status != ICP_INIT_RESP_SUCCESS);
-
-	if (status == ICP_INIT_RESP_SUCCESS) {
-		g_hfi->hfi_state = FW_RESP_DONE;
-		rc = 0;
-	} else {
-		rc = -ENODEV;
-		pr_err("FW initialization failed");
+	rc = readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+		status, status != ICP_INIT_RESP_SUCCESS, 15, 200);
+	if (rc) {
+		pr_err("timed out , status = %u\n", status);
 		goto regions_fail;
 	}
 
 	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
 	g_hfi->hfi_state = FW_START_SENT;
 
-	pr_debug("fw version : %u[%x]\n", fw_version, fw_version);
-	pr_debug("hfi init is successful\n");
+	HFI_DBG("fw version : %u[%x]\n", fw_version, fw_version);
 	cam_io_w((uint32_t)INTR_ENABLE, icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
 
 	return rc;
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 2fa39c8..43491a9 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -26,6 +26,8 @@
 #include <linux/debugfs.h>
 #include <media/cam_defs.h>
 #include <media/cam_icp.h>
+#include <linux/debugfs.h>
+
 #include "cam_sync_api.h"
 #include "cam_packet_util.h"
 #include "cam_hw.h"
@@ -55,6 +57,23 @@
 
 static struct cam_icp_hw_mgr icp_hw_mgr;
 
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+	icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+	if (!icp_hw_mgr.dentry)
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("a5_debug",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.a5_debug)) {
+		debugfs_remove_recursive(icp_hw_mgr.dentry);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static int cam_icp_stop_cpas(struct cam_icp_hw_mgr *hw_mgr_priv)
 {
 	struct cam_hw_intf *a5_dev_intf = NULL;
@@ -568,7 +587,12 @@
 	uint64_t kvaddr;
 	size_t len;
 
-	pr_err("Allocating FW for iommu handle: %x\n", icp_hw_mgr.iommu_hdl);
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_MEM_MGR_REGION_SHARED,
+		&icp_hw_mgr.hfi_mem.shmem);
+	if (rc)
+		return -ENOMEM;
+
 	rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
 		&iova, &kvaddr, &len);
 	if (rc < 0) {
@@ -764,7 +788,7 @@
 			msecs_to_jiffies((timeout)));
 	if (!rem_jiffies) {
 		rc = -ETIMEDOUT;
-		pr_err("timeout/err in iconfig command: %d\n", rc);
+		pr_err("FW response timeout: %d\n", rc);
 	}
 
 	return rc;
@@ -870,6 +894,7 @@
 
 	cam_icp_free_hfi_mem();
 	hw_mgr->fw_download = false;
+	debugfs_remove_recursive(icp_hw_mgr.dentry);
 	mutex_unlock(&hw_mgr->hw_mgr_mutex);
 
 	return 0;
@@ -886,6 +911,8 @@
 	struct cam_icp_a5_set_irq_cb irq_cb;
 	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
 	struct hfi_mem_info hfi_mem;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 	int rc = 0;
 
 	if (!hw_mgr) {
@@ -1014,9 +1041,12 @@
 	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
 	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
 
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
 	rc = cam_hfi_init(0, &hfi_mem,
 		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
-		false);
+		hw_mgr->a5_debug);
 	if (rc < 0) {
 		pr_err("hfi_init is failed\n");
 		goto set_irq_failed;
@@ -1033,7 +1063,13 @@
 		NULL, 0);
 
 	ICP_DBG("Wait for INIT DONE Message\n");
-	wait_for_completion(&hw_mgr->a5_complete);
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+		goto set_irq_failed;
+	}
 
 	ICP_DBG("Done Waiting for INIT DONE Message\n");
 
@@ -1041,6 +1077,10 @@
 		a5_dev_intf->hw_priv,
 		CAM_ICP_A5_CMD_POWER_COLLAPSE,
 		NULL, 0);
+	if (rc) {
+		pr_err("icp power collapse failed\n");
+		goto set_irq_failed;
+	}
 
 	hw_mgr->fw_download = true;
 
@@ -1428,6 +1468,8 @@
 	int rc = 0;
 	struct hfi_cmd_work_data *task_data;
 	struct hfi_cmd_ipebps_async ioconfig_cmd;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 
 	ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
 	ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
@@ -1451,7 +1493,13 @@
 	task->process_cb = cam_icp_mgr_process_cmd;
 	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
 	ICP_DBG("fw_hdl = %x ctx_data = %pK\n", ctx_data->fw_handle, ctx_data);
-	wait_for_completion(&ctx_data->wait_complete);
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+	}
 
 	return rc;
 }
@@ -1462,6 +1510,8 @@
 {
 	struct hfi_cmd_create_handle create_handle;
 	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 	int rc = 0;
 
 	create_handle.size = sizeof(struct hfi_cmd_create_handle);
@@ -1479,7 +1529,13 @@
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
 	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
-	wait_for_completion(&ctx_data->wait_complete);
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+	}
 
 	return rc;
 }
@@ -1489,6 +1545,8 @@
 {
 	struct hfi_cmd_ping_pkt ping_pkt;
 	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
 	int rc = 0;
 
 	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
@@ -1505,7 +1563,14 @@
 	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
 	task->process_cb = cam_icp_mgr_process_cmd;
 	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr, CRM_TASK_PRIORITY_0);
-	wait_for_completion(&ctx_data->wait_complete);
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		pr_err("FW response timed out %d\n", rc);
+	}
+
 
 	return rc;
 }
@@ -1904,14 +1969,14 @@
 	}
 
 	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
-					&icp_hw_mgr.cmd_work);
+		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ);
 	if (rc < 0) {
 		pr_err("unable to create a worker\n");
 		goto cmd_work_failed;
 	}
 
 	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
-					&icp_hw_mgr.msg_work);
+		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ);
 	if (rc < 0) {
 		pr_err("unable to create a worker\n");
 		goto msg_work_failed;
@@ -1929,6 +1994,9 @@
 	if (!icp_hw_mgr.msg_work_data)
 		goto msg_work_data_failed;
 
+	rc = cam_icp_hw_mgr_create_debugfs_entry();
+	if (rc)
+		goto msg_work_data_failed;
 
 	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
 		icp_hw_mgr.msg_work->task.pool[i].payload =
@@ -1940,7 +2008,6 @@
 
 	init_completion(&icp_hw_mgr.a5_complete);
 
-	pr_err("Exit\n");
 	return rc;
 
 msg_work_data_failed:
diff --git a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index e5ffa7a..32d796a 100644
--- a/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -23,6 +23,8 @@
 #include "hfi_session_defs.h"
 #include "cam_req_mgr_workq.h"
 #include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+
 
 #define CAM_ICP_ROLE_PARENT     1
 #define CAM_ICP_ROLE_CHILD      2
@@ -56,6 +58,7 @@
 	struct cam_mem_mgr_memory_desc dbg_q;
 	struct cam_mem_mgr_memory_desc sec_heap;
 	struct cam_mem_mgr_memory_desc fw_buf;
+	struct cam_smmu_region_info shmem;
 };
 
 /**
@@ -176,6 +179,8 @@
 	struct hfi_cmd_work_data *cmd_work_data;
 	struct hfi_msg_work_data *msg_work_data;
 	uint32_t ctxt_cnt;
+	struct dentry *dentry;
+	bool a5_debug;
 };
 
 #endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 34243e6..15b8a2d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -227,9 +227,10 @@
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 ot_lim;
-	u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
-		params->reg_off_vbif_lim_conf;
-	u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
+	u32 reg_off_vbif_lim_conf = ((params->xin_id / mdata->npriority_lvl)
+					* mdata->npriority_lvl)
+					+ params->reg_off_vbif_lim_conf;
+	u32 bit_off_vbif_lim_conf = (params->xin_id % mdata->npriority_lvl) * 8;
 	u32 reg_val;
 	u32 sts;
 	bool forced_on;
@@ -420,6 +421,136 @@
 	}
 }
 
+static void sde_mdp_parse_cdp_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[SDE_ROT_OP_MAX] = {0};
+
+	len = sde_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-rot-cdp-setting");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-cdp-setting", data, len);
+		if (rc) {
+			SDEROT_ERR("invalid CDP setting\n");
+			goto end;
+		}
+
+		set_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+		mdata->enable_cdp[SDE_ROT_RD] = data[SDE_ROT_RD];
+		mdata->enable_cdp[SDE_ROT_WR] = data[SDE_ROT_WR];
+		return;
+	}
+end:
+	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+}
+
+static void sde_mdp_parse_rot_lut_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[4];
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-qos-lut");
+	if (len == 4) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-qos-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
+			mdata->lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
+			mdata->lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
+			mdata->lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
+			set_bit(SDE_QOS_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("qos lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-danger-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-danger-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].danger_lut
+							= data[SDE_ROT_RD];
+			mdata->lut_cfg[SDE_ROT_WR].danger_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("danger lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-rot-safe-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-rot-safe-lut", data, len);
+		if (!rc) {
+			mdata->lut_cfg[SDE_ROT_RD].safe_lut = data[SDE_ROT_RD];
+			mdata->lut_cfg[SDE_ROT_WR].safe_lut = data[SDE_ROT_WR];
+			set_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map);
+		} else {
+			SDEROT_DBG("safe lut setting not found\n");
+		}
+	}
+}
+
+static void sde_mdp_parse_inline_rot_lut_setting(struct platform_device *pdev,
+		struct sde_rot_data_type *mdata)
+{
+	int rc;
+	u32 len, data[4];
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-qos-lut");
+	if (len == 4) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-qos-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0 = data[0];
+			mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1 = data[1];
+			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0 = data[2];
+			mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1 = data[3];
+			set_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline qos lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-inline-rot-danger-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-danger-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut
+							= data[SDE_ROT_RD];
+			mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline danger lut setting not found\n");
+		}
+	}
+
+	len = sde_mdp_parse_dt_prop_len(pdev, "qcom,mdss-inline-rot-safe-lut");
+	if (len == SDE_ROT_OP_MAX) {
+		rc = sde_mdp_parse_dt_handler(pdev,
+			"qcom,mdss-inline-rot-safe-lut", data, len);
+		if (!rc) {
+			mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut
+							= data[SDE_ROT_RD];
+			mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut
+							= data[SDE_ROT_WR];
+			set_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map);
+		} else {
+			SDEROT_DBG("inline safe lut setting not found\n");
+		}
+	}
+}
+
 static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
 		struct sde_rot_data_type *mdata)
 {
@@ -444,8 +575,14 @@
 		SDEROT_DBG(
 			"Could not read optional property: highest bank bit\n");
 
+	sde_mdp_parse_cdp_setting(pdev, mdata);
+
 	sde_mdp_parse_vbif_qos(pdev, mdata);
 
+	sde_mdp_parse_rot_lut_setting(pdev, mdata);
+
+	sde_mdp_parse_inline_rot_lut_setting(pdev, mdata);
+
 	mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
 
 	return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 9194b44..313c709 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -40,6 +40,9 @@
 #define SDE_MDP_HW_REV_301	SDE_MDP_REV(3, 0, 1)	/* 8998 v1.1 */
 #define SDE_MDP_HW_REV_400	SDE_MDP_REV(4, 0, 0)	/* sdm845 v1.0 */
 
+#define SDE_MDP_VBIF_4_LEVEL_REMAPPER	4
+#define SDE_MDP_VBIF_8_LEVEL_REMAPPER	8
+
 struct sde_mult_factor {
 	uint32_t numer;
 	uint32_t denom;
@@ -77,9 +80,19 @@
 	SDE_QOS_PER_PIPE_LUT,
 	SDE_QOS_SIMPLIFIED_PREFILL,
 	SDE_QOS_VBLANK_PANIC_CTRL,
+	SDE_QOS_LUT,
+	SDE_QOS_DANGER_LUT,
+	SDE_QOS_SAFE_LUT,
 	SDE_QOS_MAX,
 };
 
+enum sde_inline_qos_settings {
+	SDE_INLINE_QOS_LUT,
+	SDE_INLINE_QOS_DANGER_LUT,
+	SDE_INLINE_QOS_SAFE_LUT,
+	SDE_INLINE_QOS_MAX,
+};
+
 /**
  * enum sde_rot_type: SDE rotator HW version
  * @SDE_ROT_TYPE_V1_0: V1.0 HW version
@@ -98,6 +111,7 @@
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
+ * @SDE_CAPS_PARTIALWR: partial write override
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
@@ -106,6 +120,7 @@
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
 	SDE_CAPS_SBUF_1,
 	SDE_CAPS_UBWC_2,
+	SDE_CAPS_PARTIALWR,
 	SDE_CAPS_MAX,
 };
 
@@ -115,6 +130,12 @@
 	SDE_MAX_BUS_CLIENTS
 };
 
+enum sde_rot_op {
+	SDE_ROT_RD,
+	SDE_ROT_WR,
+	SDE_ROT_OP_MAX
+};
+
 enum sde_rot_regdump_access {
 	SDE_ROT_REGDUMP_READ,
 	SDE_ROT_REGDUMP_WRITE,
@@ -165,6 +186,13 @@
 	enum sde_rot_regdump_access access;
 };
 
+struct sde_rot_lut_cfg {
+	u32 creq_lut_0;
+	u32 creq_lut_1;
+	u32 danger_lut;
+	u32 safe_lut;
+};
+
 struct sde_rot_data_type {
 	u32 mdss_version;
 
@@ -177,6 +205,7 @@
 
 	/* bitmap to track qos applicable settings */
 	DECLARE_BITMAP(sde_qos_map, SDE_QOS_MAX);
+	DECLARE_BITMAP(sde_inline_qos_map, SDE_QOS_MAX);
 
 	/* bitmap to track capability settings */
 	DECLARE_BITMAP(sde_caps_map, SDE_CAPS_MAX);
@@ -210,6 +239,11 @@
 	void *sde_rot_hw;
 	int sec_cam_en;
 
+	u32 enable_cdp[SDE_ROT_OP_MAX];
+
+	struct sde_rot_lut_cfg lut_cfg[SDE_ROT_OP_MAX];
+	struct sde_rot_lut_cfg inline_lut_cfg[SDE_ROT_OP_MAX];
+
 	struct ion_client *iclient;
 
 	bool clk_always_on;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 30fda07..44a29aa 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -60,6 +60,9 @@
 /* waiting for hw time out, 3 vsync for 30fps*/
 #define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
 
+/* waiting for inline hw start */
+#define ROT_INLINE_START_TIMEOUT_IN_MS 2000
+
 /* default pixel per clock ratio */
 #define ROT_PIXEL_PER_CLK_NUMERATOR	36
 #define ROT_PIXEL_PER_CLK_DENOMINATOR	10
@@ -299,13 +302,13 @@
 	return 0;
 }
 
-static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
+static int sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
 	int ret;
 
-	if (WARN_ON(mgr->regulator_enable == on)) {
+	if (mgr->regulator_enable == on) {
 		SDEROT_ERR("Regulators already in selected mode on=%d\n", on);
-		return;
+		return 0;
 	}
 
 	SDEROT_EVTLOG(on);
@@ -327,9 +330,9 @@
 		ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
 			mgr->module_power.num_vreg, on);
 	if (ret) {
-		SDEROT_WARN("Rotator regulator failed to %s\n",
-			on ? "enable" : "disable");
-		return;
+		pr_err("rotator regulator failed to %s ret:%d client:%d\n",
+		      on ? "enable" : "disable", ret, mgr->rsc_client != NULL);
+		return ret;
 	}
 
 	if (mgr->ops_hw_post_pmevent)
@@ -341,6 +344,7 @@
 	}
 
 	mgr->regulator_enable = on;
+	return 0;
 }
 
 static int sde_rotator_enable_clk(struct sde_rot_mgr *mgr, int clk_idx)
@@ -1508,6 +1512,8 @@
 	if (entry->item.ts)
 		entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get();
 
+	SDEROT_EVTLOG(entry->item.session_id, 1);
+
 	queue_work(entry->doneq->rot_work_queue, &entry->done_work);
 	sde_rot_mgr_unlock(mgr);
 	return;
@@ -1564,6 +1570,13 @@
 		entry->item.flags,
 		entry->dnsc_factor_w, entry->dnsc_factor_h);
 
+	wait_for_completion_timeout(
+			&entry->item.inline_start,
+			msecs_to_jiffies(ROT_INLINE_START_TIMEOUT_IN_MS));
+
+	if (entry->item.ts)
+		entry->item.ts[SDE_ROTATOR_TS_START] = ktime_get();
+
 	SDEROT_EVTLOG(entry->item.session_id, 0);
 	ret = mgr->ops_wait_for_entry(hw, entry);
 	if (ret) {
@@ -2332,11 +2345,36 @@
 	for (i = 0; i < count; i++) {
 		req->entries[i].item = items[i];
 		req->entries[i].private = private;
+
+		init_completion(&req->entries[i].item.inline_start);
+		complete_all(&req->entries[i].item.inline_start);
 	}
 
 	return req;
 }
 
+void sde_rotator_req_reset_start(struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!req)
+		return;
+
+	for (i = 0; i < req->count; i++)
+		reinit_completion(&req->entries[i].item.inline_start);
+}
+
+void sde_rotator_req_set_start(struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!req)
+		return;
+
+	for (i = 0; i < req->count; i++)
+		complete_all(&req->entries[i].item.inline_start);
+}
+
 void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
 	struct sde_rot_entry_container *req)
@@ -2885,12 +2923,11 @@
 	}
 
 	*pmgr = mgr;
-
-	pm_runtime_set_suspended(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
-	if (!pm_runtime_enabled(&pdev->dev)) {
-		SDEROT_ERR("fail to enable power, force on\n");
-		sde_rotator_footswitch_ctrl(mgr, true);
+	ret = sde_rotator_footswitch_ctrl(mgr, true);
+	if (ret) {
+		SDEROT_ERR("res_init failed %d\n", ret);
+		ret = -EPROBE_DEFER;
+		goto error_fs_en_fail;
 	}
 
 	/* enable power and clock before h/w initialization/query */
@@ -2931,6 +2968,9 @@
 	/* disable power and clock after h/w initialization/query */
 	sde_rotator_clk_ctrl(mgr, false);
 	sde_rotator_resource_ctrl(mgr, false);
+	sde_rotator_footswitch_ctrl(mgr, false);
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
 
 	return 0;
 
@@ -2940,7 +2980,8 @@
 error_map_hw_ops:
 	sde_rotator_clk_ctrl(mgr, false);
 	sde_rotator_resource_ctrl(mgr, false);
-	pm_runtime_disable(mgr->device);
+	sde_rotator_footswitch_ctrl(mgr, false);
+error_fs_en_fail:
 	sde_rotator_res_destroy(mgr);
 error_res_init:
 error_parse_dt:
@@ -3024,8 +3065,7 @@
 
 	SDEROT_DBG("begin runtime_active\n");
 	ATRACE_BEGIN("runtime_active");
-	sde_rotator_footswitch_ctrl(mgr, true);
-	return 0;
+	return sde_rotator_footswitch_ctrl(mgr, true);
 }
 
 /*
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 0051e96..7b8a066 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -21,6 +21,7 @@
 #include <linux/types.h>
 #include <linux/cdev.h>
 #include <linux/pm_runtime.h>
+#include <linux/completion.h>
 
 #include "sde_rotator_base.h"
 #include "sde_rotator_util.h"
@@ -115,6 +116,7 @@
 	SDE_ROTATOR_TS_QUEUE,		/* wait for h/w resource */
 	SDE_ROTATOR_TS_COMMIT,		/* prepare h/w command */
 	SDE_ROTATOR_TS_FLUSH,		/* initiate h/w processing */
+	SDE_ROTATOR_TS_START,		/* h/w triggered (if inline) */
 	SDE_ROTATOR_TS_DONE,		/* receive h/w completion */
 	SDE_ROTATOR_TS_RETIRE,		/* signal destination buffer fence */
 	SDE_ROTATOR_TS_SRCDQB,		/* dequeue source buffer */
@@ -199,6 +201,9 @@
 
 	/* Time stamp for profiling purposes */
 	ktime_t		*ts;
+
+	/* Completion structure for inline rotation */
+	struct completion inline_start;
 };
 
 /*
@@ -604,6 +609,23 @@
 	u32 count, u32 flags);
 
 /*
+ * sde_rotator_req_reset_start - reset inline h/w 'start' indicator
+ *	For inline rotations, the time of rotation start is not controlled
+ *	by the rotator driver. This function resets an internal 'start'
+ *	indicator that allows the rotator to delay its rotator
+ *	timeout waiting until such time as the inline rotation has
+ *	really started.
+ * @req: Pointer to rotation request
+ */
+void sde_rotator_req_reset_start(struct sde_rot_entry_container *req);
+
+/*
+ * sde_rotator_req_set_start - set inline h/w 'start' indicator
+ * @req: Pointer to rotation request
+ */
+void sde_rotator_req_set_start(struct sde_rot_entry_container *req);
+
+/*
  * sde_rotator_req_finish - notify manager that client is finished with the
  *	given request and manager can release the request as required
  * @rot_dev: Pointer to rotator device
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index e9ff67c..3e686e9 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -789,7 +789,7 @@
 					start_time));
 
 		seq_printf(s,
-			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
+			"s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld st:%lld d:%lld sdq:%lld ddq:%lld t:%lld oht:%lld\n",
 			i,
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
 					ts[SDE_ROTATOR_TS_SRCQB])),
@@ -801,8 +801,10 @@
 					ts[SDE_ROTATOR_TS_QUEUE])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
 					ts[SDE_ROTATOR_TS_COMMIT])),
-			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_START],
 					ts[SDE_ROTATOR_TS_FLUSH])),
+			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+					ts[SDE_ROTATOR_TS_START])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
 					ts[SDE_ROTATOR_TS_DONE])),
 			ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 90b7194..2e91d54 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1467,7 +1467,9 @@
 		int scid = llcc_get_slice_id(ctx->slice);
 
 		/* allocate slot for timestamp */
-		ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+		ts = stats->ts[stats->count % SDE_ROTATOR_NUM_EVENTS];
+		if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT)
+			stats->count++;
 
 		if (cmd->rot90)
 			flags |= SDE_ROTATION_90;
@@ -1637,6 +1639,8 @@
 			goto error_handle_request;
 		}
 
+		sde_rotator_req_reset_start(req);
+
 		sde_rotator_commit_request(rot_dev->mgr, ctx->private, req);
 
 		request->committed = true;
@@ -1644,6 +1648,15 @@
 		/* save request in private handle */
 		cmd->priv_handle = request;
 
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_START) {
+		if (!cmd->priv_handle) {
+			ret = -EINVAL;
+			SDEROT_ERR("invalid private handle\n");
+			goto error_invalid_handle;
+		}
+
+		request = cmd->priv_handle;
+		sde_rotator_req_set_start(request->req);
 	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
 		if (!cmd->priv_handle) {
 			ret = -EINVAL;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
index 051db78..de448a4 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -65,6 +65,8 @@
 #define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2		0x00C8
 #define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0		0x00D0
 #define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0		0x00D4
+#define MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000		0x0550
+#define MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000		0x0590
 
 #define SDE_MDP_REG_TRAFFIC_SHAPER_EN			BIT(31)
 #define SDE_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num)	(0x030 + (num * 4))
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index 27fd0c3..705eb27 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -25,11 +25,13 @@
  * enum sde_rotator_inline_cmd_type - inline rotator command stages
  * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
  * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
+ * @SDE_ROTATOR_INLINE_CMD_START: ready to start inline rotation
  * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
  */
 enum sde_rotator_inline_cmd_type {
 	SDE_ROTATOR_INLINE_CMD_VALIDATE,
 	SDE_ROTATOR_INLINE_CMD_COMMIT,
+	SDE_ROTATOR_INLINE_CMD_START,
 	SDE_ROTATOR_INLINE_CMD_CLEANUP,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 980df9f..b582934 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -351,12 +351,12 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
 	SDE_PIX_FMT_RGBA_1010102,
 	SDE_PIX_FMT_RGBX_1010102,
-	/* SDE_PIX_FMT_ARGB_2101010 */
-	/* SDE_PIX_FMT_XRGB_2101010 */
+	SDE_PIX_FMT_ARGB_2101010,
+	SDE_PIX_FMT_XRGB_2101010,
 	SDE_PIX_FMT_BGRA_1010102,
 	SDE_PIX_FMT_BGRX_1010102,
-	/* SDE_PIX_FMT_ABGR_2101010 */
-	/* SDE_PIX_FMT_XBGR_2101010 */
+	SDE_PIX_FMT_ABGR_2101010,
+	SDE_PIX_FMT_XBGR_2101010,
 	SDE_PIX_FMT_RGBA_1010102_UBWC,
 	SDE_PIX_FMT_RGBX_1010102_UBWC,
 	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
@@ -493,6 +493,12 @@
 		SDE_ROT_REGDUMP_VBIF },
 };
 
+struct sde_rot_cdp_params {
+	bool enable;
+	struct sde_mdp_format_params *fmt;
+	u32 offset;
+};
+
 /* Invalid software timestamp value for initialization */
 #define SDE_REGDMA_SWTS_INVALID	(~0)
 
@@ -675,9 +681,13 @@
 /**
  * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
  * on provided session_id. Each rotator has a different session_id.
+ * @rot: Pointer to rotator hw
+ * @session_id: Identifier for rotator session
+ * @sequence_id: Identifier for rotation request within the session
+ * @q_id: Rotator queue identifier
  */
 static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
-		struct sde_hw_rotator *rot, u32 session_id,
+		struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
 		enum sde_rot_queue_prio q_id)
 {
 	int i;
@@ -686,10 +696,12 @@
 	for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
 		ctx = rot->rotCtx[q_id][i];
 
-		if (ctx && (ctx->session_id == session_id)) {
+		if (ctx && (ctx->session_id == session_id) &&
+				(ctx->sequence_id == sequence_id)) {
 			SDEROT_DBG(
-				"rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d\n",
-				q_id, i, ctx, ctx->session_id);
+				"rotCtx sloti[%d][%d] ==> ctx:%p | session-id:%d | sequence-id:%d\n",
+				q_id, i, ctx, ctx->session_id,
+				ctx->sequence_id);
 			return ctx;
 		}
 	}
@@ -741,6 +753,76 @@
 }
 
 /*
+ * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
+ * levels, enable write gather enable and avoid clk gating setting for
+ * debug purpose.
+ *
+ * @rot: Pointer to rotator hw
+ */
+static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
+{
+	u32 i, mask, vbif_qos, reg_val = 0;
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+	/* VBIF_ROT QoS remapper setting */
+	switch (mdata->npriority_lvl) {
+
+	case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			reg_val = SDE_VBIF_READ(mdata,
+					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
+			mask = 0x3 << (XIN_SSPP * 2);
+			vbif_qos = mdata->vbif_nrt_qos[i];
+			reg_val |= vbif_qos << (XIN_SSPP * 2);
+			/* ensure write is issued after the read operation */
+			mb();
+			SDE_VBIF_WRITE(mdata,
+					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
+					reg_val);
+		}
+		break;
+
+	case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
+		mask = mdata->npriority_lvl - 1;
+		for (i = 0; i < mdata->npriority_lvl; i++) {
+			/* RD and WR client */
+			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
+							<< (XIN_SSPP * 4);
+			reg_val |= (mdata->vbif_nrt_qos[i] & mask)
+							<< (XIN_WRITEBACK * 4);
+
+			SDE_VBIF_WRITE(mdata,
+				MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
+				reg_val);
+			SDE_VBIF_WRITE(mdata,
+				MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
+				reg_val);
+		}
+		break;
+
+	default:
+		SDEROT_DBG("invalid vbif remapper levels\n");
+	}
+
+	/* Enable write gather for writeback to remove write gaps, which
+	 * may hang AXI/BIMC/SDE.
+	 */
+	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
+			BIT(XIN_WRITEBACK));
+
+	/*
+	 * For debug purpose, disable clock gating, i.e. Clocks always on
+	 */
+	if (mdata->clk_always_on) {
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
+		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
+				0xFFFF);
+		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
+	}
+}
+
+/*
  * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
  * @ctx: Pointer to rotator context
  * @mask: Bit mask location of the timestamp
@@ -796,6 +878,156 @@
 }
 
 /*
+ * sde_hw_rotator_cdp_configs - configures the CDP registers
+ * @ctx: Pointer to rotator context
+ * @params: Pointer to parameters needed for CDP configs
+ */
+static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
+		struct sde_rot_cdp_params *params)
+{
+	int reg_val;
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	if (!params->enable) {
+		SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
+		goto end;
+	}
+
+	reg_val = BIT(0); /* enable cdp */
+
+	if (sde_mdp_is_ubwc_format(params->fmt))
+		reg_val |= BIT(1); /* enable UBWC meta cdp */
+
+	if (sde_mdp_is_ubwc_format(params->fmt)
+			|| sde_mdp_is_tilea4x_format(params->fmt)
+			|| sde_mdp_is_tilea5x_format(params->fmt))
+		reg_val |= BIT(2); /* enable tile amortize */
+
+	reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
+
+	SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
+
+end:
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
+ * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
+ * for the WRITEBACK rotator for inline and offline rotation.
+ *
+ * @ctx: Pointer to rotator context
+ */
+static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	/* Offline rotation setting */
+	if (!ctx->sbuf_mode) {
+		/* QOS LUT WR setting */
+		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
+					mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
+					mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
+		}
+
+		/* Danger LUT WR setting */
+		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
+					mdata->lut_cfg[SDE_ROT_WR].danger_lut);
+
+		/* Safe LUT WR setting */
+		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
+					mdata->lut_cfg[SDE_ROT_WR].safe_lut);
+
+	/* Inline rotation setting */
+	} else {
+		/* QOS LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
+				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
+				mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
+		}
+
+		/* Danger LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
+
+		/* Safe LUT WR setting */
+		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
+	}
+
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
+ * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
+ * for the SSPP rotator for inline and offline rotation.
+ *
+ * @ctx: Pointer to rotator context
+ */
+static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	u32 *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
+	/* Offline rotation setting */
+	if (!ctx->sbuf_mode) {
+		/* QOS LUT RD setting */
+		if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
+					mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
+					mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
+		}
+
+		/* Danger LUT RD setting */
+		if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
+					mdata->lut_cfg[SDE_ROT_RD].danger_lut);
+
+		/* Safe LUT RD setting */
+		if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
+					mdata->lut_cfg[SDE_ROT_RD].safe_lut);
+
+	/* inline rotation setting */
+	} else {
+		/* QOS LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
+				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
+				mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
+		}
+
+		/* Danger LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
+
+		/* Safe LUT RD setting */
+		if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
+					mdata->sde_inline_qos_map))
+			SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
+				mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
+	}
+
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+}
+
+/*
  * sde_hw_rotator_setup_fetchengine - setup fetch engine
  * @ctx: Pointer to rotator context
  * @queue_id: Priority queue identifier
@@ -814,6 +1046,7 @@
 	struct sde_hw_rotator *rot = ctx->rot;
 	struct sde_mdp_format_params *fmt;
 	struct sde_mdp_data *data;
+	struct sde_rot_cdp_params cdp_params = {0};
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	u32 *wrptr;
 	u32 opmode = 0;
@@ -985,13 +1218,29 @@
 		ctx->is_secure = false;
 	}
 
+	/* Update command queue write ptr */
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+
+	/* CDP register RD setting */
+	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
+					 mdata->enable_cdp[SDE_ROT_RD] : false;
+	cdp_params.fmt = fmt;
+	cdp_params.offset = ROT_SSPP_CDP_CNTL;
+	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
+
+	/* QOS LUT/ Danger LUT/ Safe Lut WR setting */
+	sde_hw_rotator_setup_qos_lut_rd(ctx);
+
+	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
 	/*
 	 * Determine if traffic shaping is required. Only enable traffic
 	 * shaping when content is 4k@30fps. The actual traffic shaping
 	 * bandwidth calculation is done in output setup.
 	 */
-	if (((cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD) &&
-			(cfg->fps <= 30)) {
+	if (((!ctx->sbuf_mode)
+			&& (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
+			&& (cfg->fps <= 30)) {
 		SDEROT_DBG("Enable Traffic Shaper\n");
 		ctx->is_traffic_shaping = true;
 	} else {
@@ -1017,9 +1266,11 @@
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
+	struct sde_rot_cdp_params cdp_params = {0};
 	u32 *wrptr;
 	u32 pack = 0;
 	u32 dst_format = 0;
+	u32 partial_write = 0;
 	int i;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
@@ -1103,8 +1354,13 @@
 			cfg->v_downscale_factor |
 			(cfg->h_downscale_factor << 16));
 
+	/* partial write check */
+	if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) &&
+			!sde_mdp_is_ubwc_format(fmt))
+		partial_write = BIT(10);
+
 	/* write config setup for bank configuration */
-	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
+	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write |
 			(ctx->rot->highest_bank & 0x3) << 8);
 
 	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
@@ -1120,8 +1376,23 @@
 	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
 			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
 
+	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
+
+	/* CDP register WR setting */
+	cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
+					mdata->enable_cdp[SDE_ROT_WR] : false;
+	cdp_params.fmt = fmt;
+	cdp_params.offset = ROT_WB_CDP_CNTL;
+	sde_hw_rotator_cdp_configs(ctx, &cdp_params);
+
+	/* QOS LUT/ Danger LUT/ Safe LUT WR setting */
+	sde_hw_rotator_setup_qos_lut_wr(ctx);
+
+	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
+
 	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
-	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
+	if (!ctx->sbuf_mode &&
+			(ctx->is_traffic_shaping || cfg->prefill_bw)) {
 		u32 bw;
 
 		/*
@@ -1849,6 +2120,7 @@
  * @rot: Pointer to rotator hw
  * @hw: Pointer to rotator resource
  * @session_id: Session identifier of this context
+ * @sequence_id: Sequence identifier of this request
  * @sbuf_mode: true if stream buffer is requested
  *
  * This function allocates a new rotator context for the given session id.
@@ -1857,6 +2129,7 @@
 		struct sde_hw_rotator *rot,
 		struct sde_rot_hw_resource *hw,
 		u32    session_id,
+		u32    sequence_id,
 		bool   sbuf_mode)
 {
 	struct sde_hw_rotator_context *ctx;
@@ -1871,6 +2144,7 @@
 	ctx->rot        = rot;
 	ctx->q_id       = hw->wb_id;
 	ctx->session_id = session_id;
+	ctx->sequence_id = sequence_id;
 	ctx->hwres      = hw;
 	ctx->timestamp  = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
 	ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
@@ -1961,7 +2235,7 @@
 	item = &entry->item;
 
 	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
-			item->output.sbuf);
+			item->sequence_id, item->output.sbuf);
 	if (!ctx) {
 		SDEROT_ERR("Failed allocating rotator context!!\n");
 		return -EINVAL;
@@ -2136,7 +2410,7 @@
 			item->input.format, item->output.format,
 			entry->perf->config.frame_rate);
 
-	if (mdata->default_ot_rd_limit) {
+	if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
 		struct sde_mdp_set_ot_params ot_params;
 
 		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
@@ -2158,7 +2432,7 @@
 		sde_mdp_set_ot_limit(&ot_params);
 	}
 
-	if (mdata->default_ot_wr_limit) {
+	if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
 		struct sde_mdp_set_ot_params ot_params;
 
 		memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
@@ -2189,46 +2463,9 @@
 		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
 	}
 
-	/* Set CDP control registers to 0 if CDP is disabled */
-	if (!test_bit(SDE_QOS_CDP, mdata->sde_qos_map)) {
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CDP_CNTL, 0x0);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROT_WB_CDP_CNTL, 0x0);
-	}
-
-	if (mdata->npriority_lvl > 0) {
-		u32 mask, reg_val, i, vbif_qos;
-
-		for (i = 0; i < mdata->npriority_lvl; i++) {
-			reg_val = SDE_VBIF_READ(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
-			mask = 0x3 << (XIN_SSPP * 2);
-			reg_val &= ~(mask);
-			vbif_qos = mdata->vbif_nrt_qos[i];
-			reg_val |= vbif_qos << (XIN_SSPP * 2);
-			/* ensure write is issued after the read operation */
-			mb();
-			SDE_VBIF_WRITE(mdata,
-					MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
-					reg_val);
-		}
-	}
-
-	/* Enable write gather for writeback to remove write gaps, which
-	 * may hang AXI/BIMC/SDE.
-	 */
-	SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
-			BIT(XIN_WRITEBACK));
-
-	/*
-	 * For debug purpose, disable clock gating, i.e. Clocks always on
-	 */
-	if (mdata->clk_always_on) {
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
-		SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
-				0xFFFF);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
-	}
+	/* VBIF QoS and other settings */
+	if (!ctx->sbuf_mode)
+		sde_hw_rotator_vbif_setting(rot);
 
 	return 0;
 
@@ -2258,7 +2495,8 @@
 	rot = resinfo->rot;
 
 	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
+	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+			entry->item.sequence_id, hw->wb_id);
 	if (!ctx) {
 		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
 				entry->item.session_id);
@@ -2295,7 +2533,8 @@
 	rot = resinfo->rot;
 
 	/* Lookup rotator context from session-id */
-	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id, hw->wb_id);
+	ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
+			entry->item.sequence_id, hw->wb_id);
 	if (!ctx) {
 		SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
 				entry->item.session_id);
@@ -2337,7 +2576,6 @@
 
 	clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
 	set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
-	clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
 	set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
 	set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
 	clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
@@ -2368,6 +2606,7 @@
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_PARTIALWR,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
 		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
 		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index aa762dd..d2b81d5 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -81,6 +81,8 @@
 #define ROT_SSPP_CREQ_LUT                       (SDE_ROT_SSPP_OFFSET+0x68)
 #define ROT_SSPP_QOS_CTRL                       (SDE_ROT_SSPP_OFFSET+0x6C)
 #define ROT_SSPP_SRC_ADDR_SW_STATUS             (SDE_ROT_SSPP_OFFSET+0x70)
+#define ROT_SSPP_CREQ_LUT_0                     (SDE_ROT_SSPP_OFFSET+0x74)
+#define ROT_SSPP_CREQ_LUT_1                     (SDE_ROT_SSPP_OFFSET+0x78)
 #define ROT_SSPP_CURRENT_SRC0_ADDR              (SDE_ROT_SSPP_OFFSET+0xA4)
 #define ROT_SSPP_CURRENT_SRC1_ADDR              (SDE_ROT_SSPP_OFFSET+0xA8)
 #define ROT_SSPP_CURRENT_SRC2_ADDR              (SDE_ROT_SSPP_OFFSET+0xAC)
@@ -167,6 +169,8 @@
 #define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
 #define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
 #define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
+#define ROT_WB_CREQ_LUT_0                       (SDE_ROT_WB_OFFSET+0x098)
+#define ROT_WB_CREQ_LUT_1                       (SDE_ROT_WB_OFFSET+0x09C)
 #define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
 #define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
 #define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 22eaa3f..67f7f4b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -204,6 +204,7 @@
  * ram segment size allocation. Each rotator context can be any priority. A
  * incremental timestamp is used to identify and assigned to each context.
  * @list: list of pending context
+ * @sequence_id: unique sequence identifier for rotation request
  * @sbuf_mode: true if stream buffer is requested
  * @start_ctrl: start control register update value
  * @sys_cache_mode: sys cache mode register update value
@@ -216,6 +217,7 @@
 	struct sde_rot_hw_resource *hwres;
 	enum   sde_rot_queue_prio q_id;
 	u32    session_id;
+	u32    sequence_id;
 	u32    *regdma_base;
 	u32    *regdma_wrptr;
 	u32    timestamp;
@@ -402,7 +404,7 @@
 	spin_lock_irqsave(&rot->rotisr_lock, flags);
 	rot->rotCtx[ctx->q_id][idx] = ctx;
 	if (ctx->sbuf_mode)
-		list_add_tail(&rot->sbuf_ctx[ctx->q_id], &ctx->list);
+		list_add_tail(&ctx->list, &rot->sbuf_ctx[ctx->q_id]);
 	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
 	SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index a477340..8d54e20 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -515,13 +515,13 @@
 		buffer = HFI_BUFFER_EXTRADATA_OUTPUT2;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH_1:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH_1;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1;
 		break;
 	case HAL_BUFFER_INTERNAL_SCRATCH_2:
-		buffer = HFI_BUFFER_INTERNAL_SCRATCH_2;
+		buffer = HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2;
 		break;
 	case HAL_BUFFER_INTERNAL_PERSIST:
 		buffer = HFI_BUFFER_INTERNAL_PERSIST;
@@ -1369,13 +1369,13 @@
 		pkt->size += sizeof(u32) * 2;
 		break;
 	}
-	case HAL_CONFIG_VPE_OPERATIONS:
+	case HAL_PARAM_VPE_ROTATION:
 	{
-		struct hfi_operations_type *hfi;
-		struct hal_operations *prop =
-			(struct hal_operations *) pdata;
-		pkt->rg_property_data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS;
-		hfi = (struct hfi_operations_type *) &pkt->rg_property_data[1];
+		struct hfi_vpe_rotation_type *hfi;
+		struct hal_vpe_rotation *prop =
+			(struct hal_vpe_rotation *) pdata;
+		pkt->rg_property_data[0] = HFI_PROPERTY_PARAM_VPE_ROTATION;
+		hfi = (struct hfi_vpe_rotation_type *)&pkt->rg_property_data[1];
 		switch (prop->rotate) {
 		case HAL_ROTATE_NONE:
 			hfi->rotation = HFI_ROTATE_NONE;
@@ -1411,7 +1411,7 @@
 			rc = -EINVAL;
 			break;
 		}
-		pkt->size += sizeof(u32) + sizeof(struct hfi_operations_type);
+		pkt->size += sizeof(u32) + sizeof(struct hfi_vpe_rotation_type);
 		break;
 	}
 	case HAL_PARAM_VENC_INTRA_REFRESH:
@@ -1523,14 +1523,6 @@
 			sizeof(struct hfi_vui_timing_info);
 		break;
 	}
-	case HAL_CONFIG_VPE_DEINTERLACE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-				HFI_PROPERTY_CONFIG_VPE_DEINTERLACE,
-				((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_PARAM_VENC_GENERATE_AUDNAL:
 	{
 		create_pkt_enable(pkt->rg_property_data,
@@ -1863,14 +1855,6 @@
 		pkt->size += sizeof(u32) + sizeof(*work_mode);
 		break;
 	}
-	case HAL_PARAM_USE_SYS_CACHE:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_USE_SYS_CACHE,
-			(((struct hal_enable *) pdata)->enable));
-		pkt->size += sizeof(u32) * 2;
-		break;
-	}
 	/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
 	case HAL_CONFIG_BUFFER_REQUIREMENTS:
 	case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index b424fbb..f678f56 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -110,6 +110,8 @@
 	struct hfi_profile_level *profile_level;
 	struct hfi_bit_depth *pixel_depth;
 	struct hfi_pic_struct *pic_struct;
+	struct hfi_buffer_requirements *buf_req;
+	struct hfi_index_extradata_input_crop_payload *crop_info;
 	u32 entropy_mode = 0;
 	u8 *data_ptr;
 	int prop_id;
@@ -231,6 +233,41 @@
 				data_ptr +=
 					sizeof(u32);
 				break;
+			case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+				data_ptr = data_ptr + sizeof(u32);
+				buf_req =
+					(struct hfi_buffer_requirements *)
+						data_ptr;
+				event_notify.capture_buf_count =
+					buf_req->buffer_count_min;
+				dprintk(VIDC_DBG,
+					"Capture Count : 0x%x\n",
+						event_notify.capture_buf_count);
+				data_ptr +=
+					sizeof(struct hfi_buffer_requirements);
+				break;
+			case HFI_INDEX_EXTRADATA_INPUT_CROP:
+				data_ptr = data_ptr + sizeof(u32);
+				crop_info = (struct
+				hfi_index_extradata_input_crop_payload *)
+						data_ptr;
+				event_notify.crop_data.left = crop_info->left;
+				event_notify.crop_data.top = crop_info->top;
+				event_notify.crop_data.width = crop_info->width;
+				event_notify.crop_data.height =
+					crop_info->height;
+				dprintk(VIDC_DBG,
+					"CROP info : Left = %d Top = %d\n",
+						crop_info->left,
+						crop_info->top);
+				dprintk(VIDC_DBG,
+					"CROP info : Width = %d Height = %d\n",
+						crop_info->width,
+						crop_info->height);
+				data_ptr +=
+					sizeof(struct
+					hfi_index_extradata_input_crop_payload);
+				break;
 			default:
 				dprintk(VIDC_ERR,
 					"%s cmd: %#x not supported\n",
@@ -1082,19 +1119,19 @@
 			buffreq->buffer[5].buffer_type =
 				HAL_BUFFER_EXTRADATA_OUTPUT2;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH:
 			memcpy(&buffreq->buffer[6], hfi_buf_req,
 			sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[6].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH_1:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1:
 			memcpy(&buffreq->buffer[7], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[7].buffer_type =
 				HAL_BUFFER_INTERNAL_SCRATCH_1;
 			break;
-		case HFI_BUFFER_INTERNAL_SCRATCH_2:
+		case HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2:
 			memcpy(&buffreq->buffer[8], hfi_buf_req,
 				sizeof(struct hfi_buffer_requirements));
 			buffreq->buffer[8].buffer_type =
diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c
index 3d3d567..b116622 100644
--- a/drivers/media/platform/msm/vidc/msm_smem.c
+++ b/drivers/media/platform/msm/vidc/msm_smem.c
@@ -74,6 +74,14 @@
 			goto mem_map_failed;
 		}
 
+		/* Check if the dmabuf size matches expected size */
+		if (buf->size < *buffer_size) {
+			rc = -EINVAL;
+			dprintk(VIDC_ERR,
+				"Size mismatch! Dmabuf size: %zu Expected Size: %lu",
+				buf->size, *buffer_size);
+			goto mem_buf_size_mismatch;
+		}
 		/* Prepare a dma buf for dma on the given device */
 		attach = dma_buf_attach(buf, cb->dev);
 		if (IS_ERR_OR_NULL(attach)) {
@@ -94,10 +102,17 @@
 		trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0,
 			align, *iova, *buffer_size);
 
-		/* Map a scatterlist into an SMMU with system cacheability */
-		rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
-			table->nents, DMA_BIDIRECTIONAL,
-			buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+		/* Map a scatterlist into SMMU */
+		if (smem_client->res->sys_cache_present) {
+			/* with sys cache attribute & delayed unmap */
+			rc = msm_dma_map_sg_attrs(cb->dev, table->sgl,
+				table->nents, DMA_BIDIRECTIONAL,
+				buf, DMA_ATTR_IOMMU_USE_UPSTREAM_HINT);
+		} else {
+			/* with delayed unmap */
+			rc = msm_dma_map_sg_lazy(cb->dev, table->sgl,
+				table->nents, DMA_BIDIRECTIONAL, buf);
+		}
 
 		if (rc != table->nents) {
 			dprintk(VIDC_ERR,
@@ -144,6 +159,7 @@
 	dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
 mem_map_table_failed:
 	dma_buf_detach(buf, attach);
+mem_buf_size_mismatch:
 mem_buf_attach_failed:
 	dma_buf_put(buf);
 mem_map_failed:
@@ -194,12 +210,12 @@
 	}
 }
 
-static int ion_user_to_kernel(struct smem_client *client, int fd, u32 offset,
+static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size,
 		struct msm_smem *mem, enum hal_buffer buffer_type)
 {
 	struct ion_handle *hndl = NULL;
 	ion_phys_addr_t iova = 0;
-	unsigned long buffer_size = 0;
+	unsigned long buffer_size = size;
 	int rc = 0;
 	unsigned long align = SZ_4K;
 	unsigned long ion_flags = 0;
@@ -210,10 +226,11 @@
 	dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl);
 	if (IS_ERR_OR_NULL(hndl)) {
 		dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n",
-				client, fd, offset, hndl);
+				client, fd, size, hndl);
 		rc = -ENOMEM;
 		goto fail_import_fd;
 	}
+
 	mem->kvaddr = NULL;
 	rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags);
 	if (rc) {
@@ -434,7 +451,7 @@
 	ion_client_destroy(client->clnt);
 }
 
-struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
+struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size,
 		enum hal_buffer buffer_type)
 {
 	struct smem_client *client = clt;
@@ -452,7 +469,7 @@
 	}
 	switch (client->mem_type) {
 	case SMEM_ION:
-		rc = ion_user_to_kernel(clt, fd, offset, mem, buffer_type);
+		rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type);
 		break;
 	default:
 		dprintk(VIDC_ERR, "Mem type not supported\n");
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index c5c4269..5c34f28 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -225,6 +225,14 @@
 	return 0;
 }
 
+static int msm_v4l2_g_crop(struct file *file, void *fh,
+			struct v4l2_crop *a)
+{
+	struct msm_vidc_inst *vidc_inst = get_vidc_inst(file, fh);
+
+	return msm_vidc_g_crop(vidc_inst, a);
+}
+
 static int msm_v4l2_enum_framesizes(struct file *file, void *fh,
 				struct v4l2_frmsizeenum *fsize)
 {
@@ -265,6 +273,7 @@
 	.vidioc_encoder_cmd = msm_v4l2_encoder_cmd,
 	.vidioc_s_parm = msm_v4l2_s_parm,
 	.vidioc_g_parm = msm_v4l2_g_parm,
+	.vidioc_g_crop = msm_v4l2_g_crop,
 	.vidioc_enum_framesizes = msm_v4l2_enum_framesizes,
 };
 
@@ -785,6 +794,8 @@
 	if (rc) {
 		dprintk(VIDC_ERR,
 			"Failed to register platform driver\n");
+		msm_vidc_debugfs_deinit_drv();
+		debugfs_remove_recursive(vidc_driver->debugfs_root);
 		kfree(vidc_driver);
 		vidc_driver = NULL;
 	}
@@ -795,6 +806,7 @@
 static void __exit msm_vidc_exit(void)
 {
 	platform_driver_unregister(&msm_vidc_driver);
+	msm_vidc_debugfs_deinit_drv();
 	debugfs_remove_recursive(vidc_driver->debugfs_root);
 	mutex_destroy(&vidc_driver->lock);
 	kfree(vidc_driver);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 14eb3ab..d44684e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1117,7 +1117,7 @@
 	struct hal_h264_entropy_control h264_entropy_control;
 	struct hal_intra_period intra_period;
 	struct hal_idr_period idr_period;
-	struct hal_operations operations;
+	struct hal_vpe_rotation vpe_rotation;
 	struct hal_intra_refresh intra_refresh;
 	struct hal_multi_slice_control multi_slice_control;
 	struct hal_h264_db_control h264_db_control;
@@ -1202,6 +1202,16 @@
 		else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
 			num_b = ctrl->val;
 
+		if ((num_b < inst->capability.bframe.min) ||
+			(num_b > inst->capability.bframe.max)) {
+			dprintk(VIDC_ERR,
+				"Error setting num b frames %d min, max supported is %d, %d\n",
+				num_b, inst->capability.bframe.min,
+				inst->capability.bframe.max);
+			rc = -ENOTSUPP;
+			break;
+		}
+
 		property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
 		intra_period.pframes = num_p;
 		intra_period.bframes = num_b;
@@ -1335,19 +1345,12 @@
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
 	{
-		if (!(inst->capability.pixelprocess_capabilities &
-			HAL_VIDEO_ENCODER_ROTATION_CAPABILITY)) {
-			dprintk(VIDC_ERR, "Rotation not supported: %#x\n",
-				ctrl->id);
-			rc = -ENOTSUPP;
-			break;
-		}
-		property_id = HAL_CONFIG_VPE_OPERATIONS;
-		operations.rotate = msm_comm_v4l2_to_hal(
+		property_id = HAL_PARAM_VPE_ROTATION;
+		vpe_rotation.rotate = msm_comm_v4l2_to_hal(
 				V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
 				ctrl->val);
-		operations.flip = HAL_FLIP_NONE;
-		pdata = &operations;
+		vpe_rotation.flip = HAL_FLIP_NONE;
+		pdata = &vpe_rotation;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 05d6d63..2e952a3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -168,6 +168,15 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.blur_height);
 		break;
+	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bframe);
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_mbs);
+		break;
+	case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+		msm_vidc_ctrl_get_range(ctrl, &inst->capability.slice_bytes);
+		break;
 	default:
 		rc = -EINVAL;
 	}
@@ -256,6 +265,29 @@
 }
 EXPORT_SYMBOL(msm_vidc_s_ctrl);
 
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *crop)
+{
+	struct msm_vidc_inst *inst = instance;
+
+	if (!inst || !crop)
+		return -EINVAL;
+
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		dprintk(VIDC_ERR,
+			"Session = %pK : Encoder Crop is not implemented yet\n",
+				inst);
+		return -EPERM;
+	}
+
+	crop->c.left = inst->prop.crop_info.left;
+	crop->c.top = inst->prop.crop_info.top;
+	crop->c.width = inst->prop.crop_info.width;
+	crop->c.height = inst->prop.crop_info.height;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_vidc_g_crop);
+
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
 {
 	struct msm_vidc_inst *inst = instance;
@@ -525,7 +557,7 @@
 
 	handle = msm_comm_smem_user_to_kernel(inst,
 				p->reserved[0],
-				p->reserved[1],
+				p->length,
 				buffer_type);
 	if (!handle) {
 		dprintk(VIDC_ERR,
@@ -596,8 +628,10 @@
 		goto exit;
 	}
 
-	dprintk(VIDC_DBG, "[MAP] Create binfo = %pK fd = %d type = %d\n",
-			binfo, b->m.planes[0].reserved[0], b->type);
+	dprintk(VIDC_DBG,
+		"[MAP] Create binfo = %pK fd = %d size = %d type = %d\n",
+		binfo, b->m.planes[0].reserved[0],
+		b->m.planes[0].length, b->type);
 
 	for (i = 0; i < b->length; ++i) {
 		rc = 0;
@@ -869,6 +903,7 @@
 	struct buffer_info *bi, *dummy;
 	int i, rc = 0;
 	int found_buf = 0;
+	struct vb2_buf_entry *temp, *next;
 
 	if (!inst)
 		return -EINVAL;
@@ -927,6 +962,16 @@
 	default:
 		break;
 	}
+
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		if (temp->vb->type == buffer_type) {
+			list_del(&temp->list);
+			kfree(temp);
+		}
+	}
+	mutex_unlock(&inst->pendingq.lock);
+
 	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_release_buffer);
@@ -1447,8 +1492,6 @@
 		}
 	}
 
-	msm_comm_set_use_sys_cache(inst);
-
 	/*
 	 * For seq_changed_insufficient, driver should set session_continue
 	 * to firmware after the following sequence
@@ -1934,6 +1977,7 @@
 		ctrl->val = bufreq->buffer_count_min_host;
 		break;
 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+		msm_comm_try_get_bufreqs(inst);
 		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
 		if (!bufreq) {
 			dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index a52fe05..05af186 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -371,7 +371,7 @@
 	struct msm_vidc_inst *temp;
 	struct msm_vidc_core *core;
 	unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
-	unsigned long mbs_per_frame;
+	unsigned long mbs_per_second;
 
 	if (!inst || !inst->core) {
 		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
@@ -394,14 +394,21 @@
 
 	list_for_each_entry(temp, &core->instances, list) {
 
-		mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+		if (!temp ||
+				temp->state < MSM_VIDC_START_DONE ||
+				temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
+			continue;
+
+		mbs_per_second = msm_comm_get_inst_load(temp,
+		LOAD_CALC_NO_QUIRKS);
+
 		cycles = temp->clk_data.entry->vpp_cycles;
-		if (inst->session_type == MSM_VIDC_ENCODER)
+		if (temp->session_type == MSM_VIDC_ENCODER)
 			cycles = temp->flags & VIDC_LOW_POWER ?
-				inst->clk_data.entry->low_power_cycles :
+				temp->clk_data.entry->low_power_cycles :
 				cycles;
 
-		load = cycles * mbs_per_frame;
+		load = cycles * mbs_per_second;
 
 		ops_left = load ? (freq_left / load) : 0;
 		/* Convert remaining operating rate to Q16 format */
@@ -418,7 +425,7 @@
 				ctrl->name, ctrl->default_value, ctrl->val);
 			v4l2_ctrl_modify_range(ctrl, ctrl->minimum,
 				ctrl->val + ops_left, ctrl->step,
-				ctrl->minimum);
+				ctrl->default_value);
 			dprintk(VIDC_DBG,
 				"%s: Updated Range = %lld --> %lld\n",
 				ctrl->name, ctrl->minimum, ctrl->maximum);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 058af0e..fe61e6f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1166,12 +1166,12 @@
 	if (!rc) {
 		dprintk(VIDC_ERR, "Wait interrupted or timed out: %d\n",
 				SESSION_MSG_INDEX(cmd));
-		msm_comm_kill_session(inst);
 		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 		dprintk(VIDC_ERR,
 			"sess resp timeout can potentially crash the system\n");
 		msm_comm_print_debug_info(inst);
 		msm_vidc_handle_hw_error(inst->core);
+		msm_comm_kill_session(inst);
 		rc = -EIO;
 	} else {
 		rc = 0;
@@ -1554,6 +1554,14 @@
 	inst->entropy_mode = event_notify->entropy_mode;
 	inst->profile = event_notify->profile;
 	inst->level = event_notify->level;
+	inst->prop.crop_info.left =
+		event_notify->crop_data.left;
+	inst->prop.crop_info.top =
+		event_notify->crop_data.top;
+	inst->prop.crop_info.height =
+		event_notify->crop_data.height;
+	inst->prop.crop_info.width =
+		event_notify->crop_data.width;
 
 	ptr = (u32 *)seq_changed_event.u.data;
 	ptr[0] = event_notify->height;
@@ -1561,6 +1569,10 @@
 	ptr[2] = event_notify->bit_depth;
 	ptr[3] = event_notify->pic_struct;
 	ptr[4] = event_notify->colour_space;
+	ptr[5] = event_notify->crop_data.top;
+	ptr[6] = event_notify->crop_data.left;
+	ptr[7] = event_notify->crop_data.height;
+	ptr[8] = event_notify->crop_data.width;
 
 	dprintk(VIDC_DBG,
 		"Event payload: height = %d width = %d\n",
@@ -1571,6 +1583,13 @@
 		event_notify->bit_depth, event_notify->pic_struct,
 			event_notify->colour_space);
 
+	dprintk(VIDC_DBG,
+		"Event payload: CROP top = %d left = %d Height = %d Width = %d\n",
+			event_notify->crop_data.top,
+			event_notify->crop_data.left,
+			event_notify->crop_data.height,
+			event_notify->crop_data.width);
+
 	mutex_lock(&inst->lock);
 	inst->in_reconfig = true;
 	inst->reconfig_height = event_notify->height;
@@ -4245,14 +4264,13 @@
 			__func__, inst,
 			SESSION_MSG_INDEX(HAL_SESSION_PROPERTY_INFO));
 		inst->state = MSM_VIDC_CORE_INVALID;
-		msm_comm_kill_session(inst);
 		call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
 		dprintk(VIDC_ERR,
 			"SESS_PROP timeout can potentially crash the system\n");
-		if (inst->core->resources.debug_timeout)
-			msm_comm_print_debug_info(inst);
+		msm_comm_print_debug_info(inst);
 
 		msm_vidc_handle_hw_error(inst->core);
+		msm_comm_kill_session(inst);
 		rc = -ETIMEDOUT;
 		goto exit;
 	} else {
@@ -5636,40 +5654,3 @@
 	return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height);
 }
 
-void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst)
-{
-	struct hal_enable syscache_use;
-	int rc = 0;
-
-
-	if (!inst->core->resources.sys_cache_enabled)
-		goto exit;
-
-	syscache_use.enable = false;
-	inst->clk_data.use_sys_cache = false;
-
-	if (inst->flags & VIDC_REALTIME)
-		syscache_use.enable = true;
-
-	if (inst->flags & VIDC_THUMBNAIL)
-		syscache_use.enable = false;
-
-	dprintk(VIDC_DBG,
-		"set_use_sys_cache: enable = %d inst = %pK flags =%d\n",
-		syscache_use.enable, inst, inst->flags);
-	rc = msm_comm_try_set_prop(inst, HAL_PARAM_USE_SYS_CACHE,
-		&syscache_use);
-	if (rc) {
-		dprintk(VIDC_ERR, "set_use_sys_cache: failed!!\n");
-			inst->clk_data.use_sys_cache = false;
-		goto exit;
-	}
-
-	inst->clk_data.use_sys_cache = syscache_use.enable;
-
-	return;
-
-exit:
-	return;
-}
-
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index f62c132..c197776 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -12,6 +12,7 @@
  */
 
 #define CREATE_TRACE_POINTS
+#define MAX_SSR_STRING_LEN 10
 #include "msm_vidc_debug.h"
 #include "vidc_hfi_api.h"
 
@@ -31,10 +32,12 @@
 bool msm_vidc_thermal_mitigation_disabled = !true;
 bool msm_vidc_clock_scaling = true;
 bool msm_vidc_debug_timeout = !true;
+bool msm_vidc_syscache_disable = !true;
 
 #define MAX_DBG_BUF_SIZE 4096
 
 struct debug_buffer {
+	struct mutex lock;
 	char ptr[MAX_DBG_BUF_SIZE];
 	char *curr;
 	u32 filled_size;
@@ -62,8 +65,11 @@
 	va_list args;
 	u32 size;
 
+	char *curr = buffer->curr;
+	char *end = buffer->ptr + MAX_DBG_BUF_SIZE;
+
 	va_start(args, fmt);
-	size = vscnprintf(buffer->curr, MAX_DBG_BUF_SIZE - 1, fmt, args);
+	size = vscnprintf(curr, end - curr, fmt, args);
 	va_end(args);
 	buffer->curr += size;
 	buffer->filled_size += size;
@@ -77,12 +83,15 @@
 	struct hfi_device *hdev;
 	struct hal_fw_info fw_info = { {0} };
 	int i = 0, rc = 0;
+	ssize_t len = 0;
 
 	if (!core || !core->device) {
 		dprintk(VIDC_ERR, "Invalid params, core: %pK\n", core);
 		return 0;
 	}
 	hdev = core->device;
+
+	mutex_lock(&dbg_buf.lock);
 	INIT_DBG_BUF(dbg_buf);
 	write_str(&dbg_buf, "===============================\n");
 	write_str(&dbg_buf, "CORE %d: %pK\n", core->id, core);
@@ -106,8 +115,11 @@
 			completion_done(&core->completions[SYS_MSG_INDEX(i)]) ?
 			"pending" : "done");
 	}
-	return simple_read_from_buffer(buf, count, ppos,
+	len = simple_read_from_buffer(buf, count, ppos,
 			dbg_buf.ptr, dbg_buf.filled_size);
+
+	mutex_unlock(&dbg_buf.lock);
+	return len;
 }
 
 static const struct file_operations core_info_fops = {
@@ -123,21 +135,36 @@
 
 static ssize_t trigger_ssr_write(struct file *filp, const char __user *buf,
 		size_t count, loff_t *ppos) {
-	u32 ssr_trigger_val;
-	int rc;
+	unsigned long ssr_trigger_val = 0;
+	int rc = 0;
 	struct msm_vidc_core *core = filp->private_data;
+	size_t size = MAX_SSR_STRING_LEN;
+	char kbuf[MAX_SSR_STRING_LEN + 1] = {0};
 
 	if (!buf)
 		return -EINVAL;
 
-	rc = kstrtou32(buf, 0, &ssr_trigger_val);
-	if (rc < 0) {
+	if (!count)
+		goto exit;
+
+	if (count < size)
+		size = count;
+
+	if (copy_from_user(kbuf, buf, size)) {
+		dprintk(VIDC_WARN, "%s User memory fault\n", __func__);
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	rc = kstrtoul(kbuf, 0, &ssr_trigger_val);
+	if (rc) {
 		dprintk(VIDC_WARN, "returning error err %d\n", rc);
 		rc = -EINVAL;
 	} else {
 		msm_vidc_trigger_ssr(core, ssr_trigger_val);
 		rc = count;
 	}
+exit:
 	return rc;
 }
 
@@ -149,8 +176,10 @@
 struct dentry *msm_vidc_debugfs_init_drv(void)
 {
 	bool ok = false;
-	struct dentry *dir = debugfs_create_dir("msm_vidc", NULL);
+	struct dentry *dir = NULL;
 
+	mutex_init(&dbg_buf.lock);
+	dir = debugfs_create_dir("msm_vidc", NULL);
 	if (IS_ERR_OR_NULL(dir)) {
 		dir = NULL;
 		goto failed_create_dir;
@@ -185,7 +214,9 @@
 	__debugfs_create(bool, "clock_scaling",
 			&msm_vidc_clock_scaling) &&
 	__debugfs_create(bool, "debug_timeout",
-			&msm_vidc_debug_timeout);
+			&msm_vidc_debug_timeout) &&
+	__debugfs_create(bool, "disable_video_syscache",
+			&msm_vidc_syscache_disable);
 
 #undef __debugfs_create
 
@@ -271,11 +302,14 @@
 {
 	struct msm_vidc_inst *inst = file->private_data;
 	int i, j;
+	ssize_t len = 0;
 
 	if (!inst) {
 		dprintk(VIDC_ERR, "Invalid params, inst %pK\n", inst);
 		return 0;
 	}
+
+	mutex_lock(&dbg_buf.lock);
 	INIT_DBG_BUF(dbg_buf);
 	write_str(&dbg_buf, "===============================\n");
 	write_str(&dbg_buf, "INSTANCE: %pK (%s)\n", inst,
@@ -330,8 +364,10 @@
 
 	publish_unreleased_reference(inst);
 
-	return simple_read_from_buffer(buf, count, ppos,
+	len = simple_read_from_buffer(buf, count, ppos,
 		dbg_buf.ptr, dbg_buf.filled_size);
+	mutex_unlock(&dbg_buf.lock);
+	return len;
 }
 
 static const struct file_operations inst_info_fops = {
@@ -414,3 +450,8 @@
 	}
 }
 
+void msm_vidc_debugfs_deinit_drv(void)
+{
+	mutex_destroy(&dbg_buf.lock);
+}
+
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
index f5c8e5a..f4c851a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h
@@ -64,6 +64,7 @@
 extern bool msm_vidc_thermal_mitigation_disabled;
 extern bool msm_vidc_clock_scaling;
 extern bool msm_vidc_debug_timeout;
+extern bool msm_vidc_syscache_disable;
 
 #define VIDC_MSG_PRIO2STRING(__level) ({ \
 	char *__str; \
@@ -127,6 +128,7 @@
 		struct dentry *parent);
 void msm_vidc_debugfs_update(struct msm_vidc_inst *inst,
 		enum msm_vidc_debugfs_event e);
+void msm_vidc_debugfs_deinit_drv(void);
 
 static inline void tic(struct msm_vidc_inst *i, enum profiling_points p,
 				 char *b)
@@ -184,7 +186,7 @@
 {
 	bool enable_fatal;
 
-	enable_fatal = core->resources.debug_timeout;
+	enable_fatal = msm_vidc_debug_timeout;
 
 	/* Video driver can decide FATAL handling of HW errors
 	 * based on multiple factors. This condition check will
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 17c3045..5edd3d5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -175,9 +175,17 @@
 	struct video_device vdev;
 };
 
+struct session_crop {
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+};
+
 struct session_prop {
 	u32 width[MAX_PORT_NUM];
 	u32 height[MAX_PORT_NUM];
+	struct session_crop crop_info;
 	u32 fps;
 	u32 bitrate;
 };
@@ -224,7 +232,6 @@
 	u32 core_id;
 	enum hal_work_mode work_mode;
 	bool low_latency_mode;
-	bool use_sys_cache;
 };
 
 struct profile_data {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 5cf4628..19ca561 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -275,12 +275,12 @@
 			"cache-slice-names", c, &vsc->name);
 	}
 
-	res->sys_cache_enabled = true;
+	res->sys_cache_present = true;
 
 	return 0;
 
 err_load_subcache_table_fail:
-	res->sys_cache_enabled = false;
+	res->sys_cache_present = false;
 	subcaches->count = 0;
 	subcaches->subcache_tbl = NULL;
 
@@ -969,7 +969,7 @@
 	res->debug_timeout = of_property_read_bool(pdev->dev.of_node,
 			"qcom,debug-timeout");
 
-	res->debug_timeout |= msm_vidc_debug_timeout;
+	msm_vidc_debug_timeout |= res->debug_timeout;
 
 	of_property_read_u32(pdev->dev.of_node,
 			"qcom,pm-qos-latency-us", &res->pm_qos_latency_us);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index d76985e..b07785a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -159,6 +159,7 @@
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
+	bool sys_cache_present;
 	bool sys_cache_enabled;
 	struct subcache_set subcache_set;
 	struct reg_set reg_set;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 5a8dd26..6139e46 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -120,6 +120,11 @@
 	return device->state != VENUS_STATE_DEINIT;
 }
 
+static inline bool is_sys_cache_present(struct venus_hfi_device *device)
+{
+	return device->res->sys_cache_present;
+}
+
 static void __dump_packet(u8 *packet, enum vidc_msg_prio log_level)
 {
 	u32 c = 0, packet_size = *(u32 *)packet;
@@ -1044,8 +1049,12 @@
 	}
 
 	dprintk(VIDC_DBG, "Suspending Venus\n");
-	rc = flush_delayed_work(&venus_hfi_pm_work);
+	flush_delayed_work(&venus_hfi_pm_work);
 
+	mutex_lock(&device->lock);
+	if (device->power_enabled)
+		rc = -EBUSY;
+	mutex_unlock(&device->lock);
 	return rc;
 }
 
@@ -3492,7 +3501,7 @@
 		goto exit;
 	}
 
-	if (!device->res->sys_cache_enabled)
+	if (!is_sys_cache_present(device))
 		goto exit;
 
 	venus_hfi_for_each_subcache_reverse(device, sinfo) {
@@ -3519,7 +3528,7 @@
 		return -EINVAL;
 	}
 
-	if (!device->res->sys_cache_enabled)
+	if (!is_sys_cache_present(device))
 		return 0;
 
 	venus_hfi_for_each_subcache(device, sinfo) {
@@ -3764,7 +3773,7 @@
 	struct hfi_resource_subcache_type *sc_res;
 	struct vidc_resource_hdr rhdr;
 
-	if (!device->res->sys_cache_enabled)
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
 	memset((void *)resource, 0x0, (sizeof(u32) * VIDC_MAX_SUBCACHE_SIZE));
@@ -3812,6 +3821,8 @@
 
 	dprintk(VIDC_DBG, "Activated & Set Subcaches to Venus\n");
 
+	device->res->sys_cache_enabled = true;
+
 	return 0;
 
 err_fail_set_subacaches:
@@ -3830,7 +3841,7 @@
 	struct hfi_resource_subcache_type *sc_res;
 	struct vidc_resource_hdr rhdr;
 
-	if (!device->res->sys_cache_enabled)
+	if (msm_vidc_syscache_disable || !is_sys_cache_present(device))
 		return 0;
 
 	dprintk(VIDC_DBG, "Disabling Subcaches\n");
@@ -3877,6 +3888,8 @@
 		}
 	}
 
+	device->res->sys_cache_enabled = false;
+
 	return rc;
 }
 
@@ -4159,7 +4172,7 @@
 	struct venus_hfi_device *device = dev;
 	u32 smem_block_size = 0;
 	u8 *smem_table_ptr;
-	char version[VENUS_VERSION_LENGTH];
+	char version[VENUS_VERSION_LENGTH] = "";
 	const u32 smem_image_index_venus = 14 * 128;
 
 	if (!device || !fw_info) {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 48a6f17..5601f1b 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -56,13 +56,6 @@
 #define  HFI_ERR_SESSION_START_CODE_NOT_FOUND		\
 	(HFI_OX_BASE + 0x1004)
 
-#define HFI_BUFFER_INTERNAL_SCRATCH (HFI_OX_BASE + 0x1)
-#define HFI_BUFFER_EXTRADATA_INPUT (HFI_OX_BASE + 0x2)
-#define HFI_BUFFER_EXTRADATA_OUTPUT (HFI_OX_BASE + 0x3)
-#define HFI_BUFFER_EXTRADATA_OUTPUT2 (HFI_OX_BASE + 0x4)
-#define HFI_BUFFER_INTERNAL_SCRATCH_1 (HFI_OX_BASE + 0x5)
-#define HFI_BUFFER_INTERNAL_SCRATCH_2 (HFI_OX_BASE + 0x6)
-#define HFI_BUFFER_INTERNAL_RECON (HFI_OX_BASE + 0x9)
 
 #define HFI_BUFFER_MODE_DYNAMIC (HFI_OX_BASE + 0x3)
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index da18377..86e4f42 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -154,10 +154,9 @@
 	HAL_PARAM_VENC_SESSION_QP_RANGE,
 	HAL_CONFIG_VENC_INTRA_PERIOD,
 	HAL_CONFIG_VENC_IDR_PERIOD,
-	HAL_CONFIG_VPE_OPERATIONS,
+	HAL_PARAM_VPE_ROTATION,
 	HAL_PARAM_VENC_INTRA_REFRESH,
 	HAL_PARAM_VENC_MULTI_SLICE_CONTROL,
-	HAL_CONFIG_VPE_DEINTERLACE,
 	HAL_SYS_DEBUG_CONFIG,
 	HAL_CONFIG_BUFFER_REQUIREMENTS,
 	HAL_CONFIG_PRIORITY,
@@ -224,7 +223,6 @@
 	HAL_PARAM_VIDEO_CORES_USAGE,
 	HAL_PARAM_VIDEO_WORK_MODE,
 	HAL_PARAM_SECURE,
-	HAL_PARAM_USE_SYS_CACHE,
 };
 
 enum hal_domain {
@@ -636,7 +634,7 @@
 	HAL_UNUSED_FLIP = 0x10000000,
 };
 
-struct hal_operations {
+struct hal_vpe_rotation {
 	enum hal_rotate rotate;
 	enum hal_flip flip;
 };
@@ -1021,7 +1019,7 @@
 	struct hal_quantization_range quantization_range;
 	struct hal_intra_period intra_period;
 	struct hal_idr_period idr_period;
-	struct hal_operations operations;
+	struct hal_vpe_rotation vpe_rotation;
 	struct hal_intra_refresh intra_refresh;
 	struct hal_multi_slice_control multi_slice_control;
 	struct hal_debug_config debug_config;
@@ -1214,6 +1212,16 @@
 	} data;
 };
 
+struct hal_index_extradata_input_crop_payload {
+	u32 size;
+	u32 version;
+	u32 port_index;
+	u32 left;
+	u32 top;
+	u32 width;
+	u32 height;
+};
+
 struct msm_vidc_cb_event {
 	u32 device_id;
 	void *session_id;
@@ -1229,6 +1237,8 @@
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
+	u32 capture_buf_count;
+	struct hal_index_extradata_input_crop_payload crop_data;
 };
 
 struct msm_vidc_cb_data_done {
@@ -1316,16 +1326,6 @@
 	int num_sessions;
 };
 
-struct hal_index_extradata_input_crop_payload {
-	u32 size;
-	u32 version;
-	u32 port_index;
-	u32 left;
-	u32 top;
-	u32 width;
-	u32 height;
-};
-
 struct hal_cmd_sys_get_property_packet {
 	u32 size;
 	u32 packet_type;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index fc638f0..616fc09 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -155,6 +155,13 @@
 #define HFI_BUFFER_OUTPUT2				(HFI_COMMON_BASE + 0x3)
 #define HFI_BUFFER_INTERNAL_PERSIST		(HFI_COMMON_BASE + 0x4)
 #define HFI_BUFFER_INTERNAL_PERSIST_1		(HFI_COMMON_BASE + 0x5)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH	(HFI_COMMON_BASE + 0x6)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_1	(HFI_COMMON_BASE + 0x7)
+#define HFI_BUFFER_COMMON_INTERNAL_SCRATCH_2	(HFI_COMMON_BASE + 0x8)
+#define HFI_BUFFER_COMMON_INTERNAL_RECON	(HFI_COMMON_BASE + 0x9)
+#define HFI_BUFFER_EXTRADATA_OUTPUT		(HFI_COMMON_BASE + 0xA)
+#define HFI_BUFFER_EXTRADATA_OUTPUT2		(HFI_COMMON_BASE + 0xB)
+#define HFI_BUFFER_EXTRADATA_INPUT		(HFI_COMMON_BASE + 0xC)
 
 #define  HFI_BITDEPTH_8				(HFI_COMMON_BASE + 0x0)
 #define  HFI_BITDEPTH_9				(HFI_COMMON_BASE + 0x1)
@@ -220,8 +227,6 @@
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x010)
 #define  HFI_PROPERTY_PARAM_SECURE_SESSION		\
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x011)
-#define  HFI_PROPERTY_PARAM_USE_SYS_CACHE				\
-	(HFI_PROPERTY_PARAM_COMMON_START + 0x012)
 #define  HFI_PROPERTY_PARAM_WORK_MODE                       \
 	(HFI_PROPERTY_PARAM_COMMON_START + 0x015)
 
@@ -322,8 +327,6 @@
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x004)
 #define  HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE                \
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x005)
-#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
-	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
 #define  HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER	\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x008)
 #define  HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME			\
@@ -339,15 +342,15 @@
 #define HFI_PROPERTY_CONFIG_VENC_SESSION_QP			\
 	(HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012)
 
+#define HFI_PROPERTY_PARAM_VPE_COMMON_START				\
+	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000)
+#define HFI_PROPERTY_PARAM_VPE_ROTATION				\
+	(HFI_PROPERTY_PARAM_VPE_COMMON_START + 0x001)
 
 #define HFI_PROPERTY_CONFIG_VPE_COMMON_START				\
 	(HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000)
 #define  HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE		\
 	(HFI_PROPERTY_CONFIG_COMMON_START + 0x010)
-#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE				\
-	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x001)
-#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS				\
-	(HFI_PROPERTY_CONFIG_VPE_COMMON_START + 0x002)
 
 struct hfi_pic_struct {
 	u32 progressive_only;
@@ -469,7 +472,7 @@
 	u32 idr_period;
 };
 
-struct hfi_operations_type {
+struct hfi_vpe_rotation_type {
 	u32 rotation;
 	u32 flip;
 };
@@ -713,12 +716,7 @@
 
 #define HFI_FLIP_NONE					(HFI_COMMON_BASE + 0x1)
 #define HFI_FLIP_HORIZONTAL				(HFI_COMMON_BASE + 0x2)
-#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x3)
-
-struct hfi_operations {
-	u32 rotate;
-	u32 flip;
-};
+#define HFI_FLIP_VERTICAL				(HFI_COMMON_BASE + 0x4)
 
 #define HFI_RESOURCE_SYSCACHE 0x00000002
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 27e7cf6..7c24da5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -206,6 +206,7 @@
 		}
 		s5p_mfc_clock_on();
 		ret = s5p_mfc_init_hw(dev);
+		s5p_mfc_clock_off();
 		if (ret)
 			mfc_err("Failed to reinit FW\n");
 	}
@@ -663,9 +664,9 @@
 				break;
 			}
 			s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-			wake_up_ctx(ctx, reason, err);
 			WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
 			s5p_mfc_clock_off();
+			wake_up_ctx(ctx, reason, err);
 			s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 		} else {
 			s5p_mfc_handle_frame(ctx, reason, err);
@@ -679,15 +680,11 @@
 	case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
 		ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
 		ctx->state = MFCINST_GOT_INST;
-		clear_work_bit(ctx);
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
-		clear_work_bit(ctx);
 		ctx->inst_no = MFC_NO_INSTANCE_SET;
 		ctx->state = MFCINST_FREE;
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	case S5P_MFC_R2H_CMD_SYS_INIT_RET:
@@ -697,9 +694,9 @@
 		if (ctx)
 			clear_work_bit(ctx);
 		s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
-		wake_up_dev(dev, reason, err);
 		clear_bit(0, &dev->hw_lock);
 		clear_bit(0, &dev->enter_suspend);
+		wake_up_dev(dev, reason, err);
 		break;
 
 	case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
@@ -714,9 +711,7 @@
 		break;
 
 	case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
-		clear_work_bit(ctx);
 		ctx->state = MFCINST_RUNNING;
-		wake_up(&ctx->queue);
 		goto irq_cleanup_hw;
 
 	default:
@@ -735,6 +730,8 @@
 		mfc_err("Failed to unlock hw\n");
 
 	s5p_mfc_clock_off();
+	clear_work_bit(ctx);
+	wake_up(&ctx->queue);
 
 	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 	spin_unlock(&dev->irqlock);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 4f8c7ef..db525cd 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1332,8 +1332,8 @@
 			}
 		}
 	}
-	if (ep_in == NULL) {
-		dev_dbg(&intf->dev, "inbound and/or endpoint not found");
+	if (!ep_in || !ep_out) {
+		dev_dbg(&intf->dev, "required endpoints not found\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index 8263c4b..bf4b3ca 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -674,10 +674,8 @@
 
 	spin_lock_init(&adev->slock);
 	err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto err_free_card;
 
 	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
 			&snd_cx231xx_pcm_capture);
@@ -691,10 +689,9 @@
 	INIT_WORK(&dev->wq_trigger, audio_trigger);
 
 	err = snd_card_register(card);
-	if (err < 0) {
-		snd_card_free(card);
-		return err;
-	}
+	if (err < 0)
+		goto err_free_card;
+
 	adev->sndcard = card;
 	adev->udev = dev->udev;
 
@@ -704,6 +701,11 @@
 					    hs_config_info[0].interface_info.
 					    audio_index + 1];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+		err = -ENODEV;
+		goto err_free_card;
+	}
+
 	adev->end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -713,13 +715,20 @@
 		"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
 		adev->end_point_addr, adev->num_alt);
 	adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
-
-	if (adev->alt_max_pkt_size == NULL)
-		return -ENOMEM;
+	if (!adev->alt_max_pkt_size) {
+		err = -ENOMEM;
+		goto err_free_card;
+	}
 
 	for (i = 0; i < adev->num_alt; i++) {
-		u16 tmp =
-		    le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+			err = -ENODEV;
+			goto err_free_pkt_size;
+		}
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
 				wMaxPacketSize);
 		adev->alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -729,6 +738,13 @@
 	}
 
 	return 0;
+
+err_free_pkt_size:
+	kfree(adev->alt_max_pkt_size);
+err_free_card:
+	snd_card_free(card);
+
+	return err;
 }
 
 static int cx231xx_audio_fini(struct cx231xx *dev)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 36bc254..be9e333 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1397,6 +1397,9 @@
 
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
 	dev->video_mode.num_alt = uif->num_altsetting;
 
@@ -1410,7 +1413,12 @@
 		return -ENOMEM;
 
 	for (i = 0; i < dev->video_mode.num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
 		dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
 		dev_dbg(dev->dev,
 			"Alternate setting %i, max size= %i\n", i,
@@ -1427,6 +1435,9 @@
 	}
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->vbi_mode.end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -1443,8 +1454,12 @@
 		return -ENOMEM;
 
 	for (i = 0; i < dev->vbi_mode.num_alt; i++) {
-		u16 tmp =
-		    le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
 				desc.wMaxPacketSize);
 		dev->vbi_mode.alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1464,6 +1479,9 @@
 	}
 	uif = udev->actconfig->interface[idx];
 
+	if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+		return -ENODEV;
+
 	dev->sliced_cc_mode.end_point_addr =
 	    uif->altsetting[0].endpoint[isoc_pipe].desc.
 			bEndpointAddress;
@@ -1478,7 +1496,12 @@
 		return -ENOMEM;
 
 	for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+			return -ENODEV;
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
 				desc.wMaxPacketSize);
 		dev->sliced_cc_mode.alt_max_pkt_size[i] =
 		    (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1647,6 +1670,11 @@
 		}
 		uif = udev->actconfig->interface[idx];
 
+		if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+			retval = -ENODEV;
+			goto err_video_alt;
+		}
+
 		dev->ts1_mode.end_point_addr =
 		    uif->altsetting[0].endpoint[isoc_pipe].
 				desc.bEndpointAddress;
@@ -1664,7 +1692,14 @@
 		}
 
 		for (i = 0; i < dev->ts1_mode.num_alt; i++) {
-			u16 tmp = le16_to_cpu(uif->altsetting[i].
+			u16 tmp;
+
+			if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+				retval = -ENODEV;
+				goto err_video_alt;
+			}
+
+			tmp = le16_to_cpu(uif->altsetting[i].
 						endpoint[isoc_pipe].desc.
 						wMaxPacketSize);
 			dev->ts1_mode.alt_max_pkt_size[i] =
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 47ce9d5..563f690 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -812,6 +812,9 @@
 
 	/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
 
+	if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+		return -ENODEV;
+
 	purb = usb_alloc_urb(0, GFP_KERNEL);
 	if (purb == NULL)
 		return -ENOMEM;
diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
index d66f56c..1f7bce6 100644
--- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
@@ -12,6 +12,8 @@
 #include <linux/kconfig.h>
 #include "dibusb.h"
 
+MODULE_LICENSE("GPL");
+
 /* 3000MC/P stuff */
 // Config Adjacent channels  Perf -cal22
 static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
index 4284f69..475a3c0 100644
--- a/drivers/media/usb/dvb-usb/digitv.c
+++ b/drivers/media/usb/dvb-usb/digitv.c
@@ -33,6 +33,9 @@
 
 	wo = (rbuf == NULL || rlen == 0); /* write-only */
 
+	if (wlen > 4 || rlen > 4)
+		return -EIO;
+
 	memset(st->sndbuf, 0, 7);
 	memset(st->rcvbuf, 0, 7);
 
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index c3e6734..4a0cc54 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -205,6 +205,20 @@
 
 	switch (num) {
 	case 2:
+		if (msg[0].len != 1) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
+		if (2 + msg[1].len > sizeof(buf6)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[1].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
 		/* read si2109 register by number */
 		buf6[0] = msg[0].addr << 1;
 		buf6[1] = msg[0].len;
@@ -220,6 +234,13 @@
 	case 1:
 		switch (msg[0].addr) {
 		case 0x68:
+			if (2 + msg[0].len > sizeof(buf6)) {
+				warn("i2c wr: len=%d is too big!\n",
+				     msg[0].len);
+				num = -EOPNOTSUPP;
+				break;
+			}
+
 			/* write to si2109 register */
 			buf6[0] = msg[0].addr << 1;
 			buf6[1] = msg[0].len;
@@ -263,6 +284,13 @@
 		/* first write first register number */
 		u8 ibuf[MAX_XFER_SIZE], obuf[3];
 
+		if (2 + msg[0].len != sizeof(obuf)) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			ret = -EOPNOTSUPP;
+			goto unlock;
+		}
+
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
@@ -463,6 +491,12 @@
 		/* first write first register number */
 		u8 ibuf[MAX_XFER_SIZE], obuf[3];
 
+		if (2 + msg[0].len != sizeof(obuf)) {
+			warn("i2c rd: len=%d is not 1!\n",
+			     msg[0].len);
+			ret = -EOPNOTSUPP;
+			goto unlock;
+		}
 		if (2 + msg[1].len > sizeof(ibuf)) {
 			warn("i2c rd: len=%d is too big!\n",
 			     msg[1].len);
@@ -697,6 +731,13 @@
 			msg[0].buf[0] = state->data[1];
 			break;
 		default:
+			if (3 + msg[0].len > sizeof(state->data)) {
+				warn("i2c wr: len=%d is too big!\n",
+				     msg[0].len);
+				num = -EOPNOTSUPP;
+				break;
+			}
+
 			/* always i2c write*/
 			state->data[0] = 0x08;
 			state->data[1] = msg[0].addr;
@@ -712,6 +753,19 @@
 		break;
 	case 2:
 		/* always i2c read */
+		if (4 + msg[0].len > sizeof(state->data)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[0].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+		if (1 + msg[1].len > sizeof(state->data)) {
+			warn("i2c rd: len=%d is too big!\n",
+			     msg[1].len);
+			num = -EOPNOTSUPP;
+			break;
+		}
+
 		state->data[0] = 0x09;
 		state->data[1] = msg[0].len;
 		state->data[2] = msg[1].len;
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index ecc207f..9e0d6a4 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -78,6 +78,9 @@
 	u8 *s, *r = NULL;
 	int ret = 0;
 
+	if (4 + rlen > 64)
+		return -EIO;
+
 	s = kzalloc(wlen+4, GFP_KERNEL);
 	if (!s)
 		return -ENOMEM;
@@ -381,6 +384,22 @@
 		write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
 		read = msg[i].flags & I2C_M_RD;
 
+		if (3 + msg[i].len > sizeof(obuf)) {
+			err("i2c wr len=%d too high", msg[i].len);
+			break;
+		}
+		if (write_read) {
+			if (3 + msg[i+1].len > sizeof(ibuf)) {
+				err("i2c rd len=%d too high", msg[i+1].len);
+				break;
+			}
+		} else if (read) {
+			if (3 + msg[i].len > sizeof(ibuf)) {
+				err("i2c rd len=%d too high", msg[i].len);
+				break;
+			}
+		}
+
 		obuf[0] = (msg[i].addr << 1) | (write_read | read);
 		if (read)
 			obuf[1] = 0;
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 40aaaa9..78542ff 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -188,6 +188,9 @@
 		return -EIO;
 	}
 
+	if (alt->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
 
 	n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index c8b4eb2..bfdf723 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1506,7 +1506,14 @@
 	}
 
 	for (i = 0; i < usbvision->num_alt; i++) {
-		u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
+		u16 tmp;
+
+		if (uif->altsetting[i].desc.bNumEndpoints < 2) {
+			ret = -ENODEV;
+			goto err_pkt;
+		}
+
+		tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
 				      wMaxPacketSize);
 		usbvision->alt_max_pkt_size[i] =
 			(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index cc128db..e3735bf 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -604,6 +604,14 @@
 	ptr = pdest = frm->lpvbits;
 
 	if (frm->ulState == ZR364XX_READ_IDLE) {
+		if (purb->actual_length < 128) {
+			/* header incomplete */
+			dev_info(&cam->udev->dev,
+				 "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
+				 __func__, purb->actual_length);
+			return -EINVAL;
+		}
+
 		frm->ulState = ZR364XX_READ_FRAME;
 		frm->cur_size = 0;
 
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index fbaf05e..e8ba149 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1926,6 +1926,19 @@
 	case WCD934X_ANA_MBHC_ELECT:
 	case WCD934X_ANA_MBHC_ZDET:
 	case WCD934X_ANA_MICB2:
+	case WCD934X_CODEC_RPM_CLK_MCLK_CFG:
+	case WCD934X_CLK_SYS_MCLK_PRG:
+	case WCD934X_CHIP_TIER_CTRL_EFUSE_CTL:
+	case WCD934X_ANA_BIAS:
+	case WCD934X_ANA_BUCK_CTL:
+	case WCD934X_ANA_RCO:
+	case WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL:
+	case WCD934X_CODEC_RPM_CLK_GATE:
+	case WCD934X_BIAS_VBG_FINE_ADJ:
+	case WCD934X_CODEC_CPR_SVS_CX_VDD:
+	case WCD934X_CODEC_CPR_SVS2_CX_VDD:
+	case WCD934X_CDC_TOP_TOP_CFG1:
+	case WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL:
 		return true;
 	}
 
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 30ad689e..0502e39d 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -293,7 +293,7 @@
 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
 	struct wcd9xxx_core_resource *wcd9xxx_res = data;
 	int num_irq_regs = wcd9xxx_res->num_irq_regs;
-	u8 status[num_irq_regs], status1[num_irq_regs];
+	u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
 
 	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
 		dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
@@ -317,6 +317,23 @@
 				"Failed to read interrupt status: %d\n", ret);
 		goto err_disable_irq;
 	}
+	/*
+	 * If status is 0 return without clearing.
+	 * status contains: HW status - masked interrupts
+	 * status1 contains: unhandled interrupts - masked interrupts
+	 * unmasked_status contains: unhandled interrupts
+	 */
+	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
+		pr_debug("%s: status is 0\n", __func__);
+		wcd9xxx_unlock_sleep(wcd9xxx_res);
+		return IRQ_HANDLED;
+	}
+
+	/*
+	 * Copy status to unmask_status before masking, otherwise SW may miss
+	 * to clear masked interrupt in corner case.
+	 */
+	memcpy(unmask_status, status, sizeof(unmask_status));
 
 	/* Apply masking */
 	for (i = 0; i < num_irq_regs; i++)
@@ -340,6 +357,8 @@
 			wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
 			status1[BIT_BYTE(irqdata.intr_num)] &=
 					~BYTE_BIT_MASK(irqdata.intr_num);
+			unmask_status[BIT_BYTE(irqdata.intr_num)] &=
+					~BYTE_BIT_MASK(irqdata.intr_num);
 		}
 	}
 
@@ -361,12 +380,13 @@
 					   linebuf, sizeof(linebuf), false);
 			pr_warn("%s: status1 : %s\n", __func__, linebuf);
 		}
-
-		memset(status, 0xff, num_irq_regs);
-
+		/*
+		 * unmask_status contains unhandled interrupts, hence clear all
+		 * unhandled interrupts.
+		 */
 		ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
 			wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
-			status, num_irq_regs);
+			unmask_status, num_irq_regs);
 		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
 			regmap_write(wcd9xxx_res->wcd_core_regmap,
 				wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0ac1cf7..e203ba6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -776,7 +776,7 @@
 
 config UID_SYS_STATS
 	bool "Per-UID statistics"
-	depends on PROFILING
+	depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
 	help
 	  Per UID based cpu time statistics exported to /proc/uid_cputime
 	  Per UID based io statistics exported to /proc/uid_io
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index dd99b06..fa4fe02 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1496,8 +1496,6 @@
 	if ((rc = cxl_native_register_psl_err_irq(adapter)))
 		goto err;
 
-	/* Release the context lock as adapter is configured */
-	cxl_adapter_context_unlock(adapter);
 	return 0;
 
 err:
@@ -1596,6 +1594,9 @@
 	if ((rc = cxl_sysfs_adapter_add(adapter)))
 		goto err_put1;
 
+	/* Release the context lock as adapter is configured */
+	cxl_adapter_context_unlock(adapter);
+
 	return adapter;
 
 err_put1:
@@ -1778,7 +1779,7 @@
 {
 	struct cxl *adapter = pci_get_drvdata(pdev);
 	struct cxl_afu *afu;
-	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
 	int i;
 
 	/* At this point, we could still have an interrupt pending.
@@ -1883,16 +1884,26 @@
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
-		result = cxl_vphb_error_detected(afu, state);
-
-		/* Only continue if everyone agrees on NEED_RESET */
-		if (result != PCI_ERS_RESULT_NEED_RESET)
-			return result;
+		afu_result = cxl_vphb_error_detected(afu, state);
 
 		cxl_context_detach_all(afu);
 		cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
 		pci_deconfigure_afu(afu);
+
+		/* Disconnect trumps all, NONE trumps NEED_RESET */
+		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+			result = PCI_ERS_RESULT_DISCONNECT;
+		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+			 (result == PCI_ERS_RESULT_NEED_RESET))
+			result = PCI_ERS_RESULT_NONE;
 	}
+
+	/* should take the context lock here */
+	if (cxl_adapter_context_lock(adapter) != 0)
+		dev_warn(&adapter->dev,
+			 "Couldn't take context lock with %d active-contexts\n",
+			 atomic_read(&adapter->contexts_num));
+
 	cxl_deconfigure_adapter(adapter);
 
 	return result;
@@ -1911,6 +1922,13 @@
 	if (cxl_configure_adapter(adapter, pdev))
 		goto err;
 
+	/*
+	 * Unlock context activation for the adapter. Ideally this should be
+	 * done in cxl_pci_resume but cxlflash module tries to activate the
+	 * master context as part of slot_reset callback.
+	 */
+	cxl_adapter_context_unlock(adapter);
+
 	for (i = 0; i < adapter->slices; i++) {
 		afu = adapter->afu[i];
 
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 06e0dc3..4c9fa8f 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -143,7 +143,8 @@
 					list) {
 			if (addr >= region_elt->vaddr &&
 			addr < region_elt->vaddr + region_elt->len &&
-			addr + len <= region_elt->vaddr + region_elt->len)
+			addr + len <= region_elt->vaddr + region_elt->len &&
+			addr + len > addr)
 				pr_err("\t%s[%pK]:%pK, %ld --> %pK\n",
 					__func__, audio,
 					region_elt->vaddr,
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 877c4d1..c1857c7 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -1261,7 +1261,7 @@
 				atomic_read(&data->ioctl_count) <= 1)) {
 			pr_err("Interrupted from abort\n");
 			ret = -ERESTARTSYS;
-			break;
+			return ret;
 		}
 	}
 
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 127a052..871040e 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -50,7 +50,8 @@
 
 #define UID_STATE_TOTAL_CURR	2
 #define UID_STATE_TOTAL_LAST	3
-#define UID_STATE_SIZE		4
+#define UID_STATE_DEAD_TASKS	4
+#define UID_STATE_SIZE		5
 
 struct uid_entry {
 	uid_t uid;
@@ -215,35 +216,44 @@
 	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
 }
 
-static void add_uid_io_curr_stats(struct uid_entry *uid_entry,
-			struct task_struct *task)
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+			struct task_struct *task, int slot)
 {
-	struct io_stats *io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
+	struct io_stats *io_slot = &uid_entry->io[slot];
 
-	io_curr->read_bytes += task->ioac.read_bytes;
-	io_curr->write_bytes += compute_write_bytes(task);
-	io_curr->rchar += task->ioac.rchar;
-	io_curr->wchar += task->ioac.wchar;
-	io_curr->fsync += task->ioac.syscfs;
+	io_slot->read_bytes += task->ioac.read_bytes;
+	io_slot->write_bytes += compute_write_bytes(task);
+	io_slot->rchar += task->ioac.rchar;
+	io_slot->wchar += task->ioac.wchar;
+	io_slot->fsync += task->ioac.syscfs;
 }
 
-static void clean_uid_io_last_stats(struct uid_entry *uid_entry,
-			struct task_struct *task)
+static void compute_uid_io_bucket_stats(struct io_stats *io_bucket,
+					struct io_stats *io_curr,
+					struct io_stats *io_last,
+					struct io_stats *io_dead)
 {
-	struct io_stats *io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
+	io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
+		io_last->read_bytes;
+	io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
+		io_last->write_bytes;
+	io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
+	io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
+	io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
 
-	io_last->read_bytes -= task->ioac.read_bytes;
-	io_last->write_bytes -= compute_write_bytes(task);
-	io_last->rchar -= task->ioac.rchar;
-	io_last->wchar -= task->ioac.wchar;
-	io_last->fsync -= task->ioac.syscfs;
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+
+	memset(io_dead, 0, sizeof(struct io_stats));
 }
 
 static void update_io_stats_all_locked(void)
 {
 	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
-	struct io_stats *io_bucket, *io_curr, *io_last;
 	struct user_namespace *user_ns = current_user_ns();
 	unsigned long bkt;
 	uid_t uid;
@@ -258,70 +268,38 @@
 		uid_entry = find_or_register_uid(uid);
 		if (!uid_entry)
 			continue;
-		add_uid_io_curr_stats(uid_entry, task);
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
 	hash_for_each(hash_table, bkt, uid_entry, hash) {
-		io_bucket = &uid_entry->io[uid_entry->state];
-		io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
-		io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
-		io_bucket->read_bytes +=
-			io_curr->read_bytes - io_last->read_bytes;
-		io_bucket->write_bytes +=
-			io_curr->write_bytes - io_last->write_bytes;
-		io_bucket->rchar += io_curr->rchar - io_last->rchar;
-		io_bucket->wchar += io_curr->wchar - io_last->wchar;
-		io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
-		io_last->read_bytes = io_curr->read_bytes;
-		io_last->write_bytes = io_curr->write_bytes;
-		io_last->rchar = io_curr->rchar;
-		io_last->wchar = io_curr->wchar;
-		io_last->fsync = io_curr->fsync;
+		compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+					&uid_entry->io[UID_STATE_TOTAL_CURR],
+					&uid_entry->io[UID_STATE_TOTAL_LAST],
+					&uid_entry->io[UID_STATE_DEAD_TASKS]);
 	}
 }
 
-static void update_io_stats_uid_locked(uid_t target_uid)
+static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
 {
-	struct uid_entry *uid_entry;
 	struct task_struct *task, *temp;
-	struct io_stats *io_bucket, *io_curr, *io_last;
 	struct user_namespace *user_ns = current_user_ns();
 
-	uid_entry = find_or_register_uid(target_uid);
-	if (!uid_entry)
-		return;
-
 	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
 		sizeof(struct io_stats));
 
 	rcu_read_lock();
 	do_each_thread(temp, task) {
-		if (from_kuid_munged(user_ns, task_uid(task)) != target_uid)
+		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
 			continue;
-		add_uid_io_curr_stats(uid_entry, task);
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
 	} while_each_thread(temp, task);
 	rcu_read_unlock();
 
-	io_bucket = &uid_entry->io[uid_entry->state];
-	io_curr = &uid_entry->io[UID_STATE_TOTAL_CURR];
-	io_last = &uid_entry->io[UID_STATE_TOTAL_LAST];
-
-	io_bucket->read_bytes +=
-		io_curr->read_bytes - io_last->read_bytes;
-	io_bucket->write_bytes +=
-		io_curr->write_bytes - io_last->write_bytes;
-	io_bucket->rchar += io_curr->rchar - io_last->rchar;
-	io_bucket->wchar += io_curr->wchar - io_last->wchar;
-	io_bucket->fsync += io_curr->fsync - io_last->fsync;
-
-	io_last->read_bytes = io_curr->read_bytes;
-	io_last->write_bytes = io_curr->write_bytes;
-	io_last->rchar = io_curr->rchar;
-	io_last->wchar = io_curr->wchar;
-	io_last->fsync = io_curr->fsync;
+	compute_uid_io_bucket_stats(&uid_entry->io[uid_entry->state],
+				&uid_entry->io[UID_STATE_TOTAL_CURR],
+				&uid_entry->io[UID_STATE_TOTAL_LAST],
+				&uid_entry->io[UID_STATE_DEAD_TASKS]);
 }
 
 static int uid_io_show(struct seq_file *m, void *v)
@@ -406,7 +384,7 @@
 		return count;
 	}
 
-	update_io_stats_uid_locked(uid);
+	update_io_stats_uid_locked(uid_entry);
 
 	uid_entry->state = state;
 
@@ -444,8 +422,7 @@
 	uid_entry->utime += utime;
 	uid_entry->stime += stime;
 
-	update_io_stats_uid_locked(uid);
-	clean_uid_io_last_stats(uid_entry, task);
+	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
 
 exit:
 	rt_mutex_unlock(&uid_lock);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d8e9599..9ac6568 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1718,6 +1718,8 @@
 
 	/* We couldn't get a response from the card.  Give up. */
 	if (err) {
+		if (card->err_in_sdr104)
+			return ERR_RETRY;
 		/* Check if the card is removed */
 		if (mmc_detect_card_removed(card->host))
 			return ERR_NOMEDIUM;
@@ -2208,7 +2210,8 @@
 	     brq->data.error == -ETIMEDOUT ||
 	     brq->cmd.error == -EILSEQ ||
 	     brq->cmd.error == -EIO ||
-	     brq->cmd.error == -ETIMEDOUT))
+	     brq->cmd.error == -ETIMEDOUT ||
+	     brq->sbc.error))
 		card->err_in_sdr104 = true;
 
 	/*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 790f191..1848cdf 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -456,6 +456,22 @@
 }
 EXPORT_SYMBOL(mmc_clk_update_freq);
 
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+	if (!host->card)
+		return;
+
+	if (host->sdr104_wa && mmc_card_sd(host->card) &&
+	    (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+	    !host->card->sdr104_blocked) {
+		pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+			mmc_hostname(host), __func__);
+		mmc_host_clear_sdr104(host);
+		mmc_hw_reset(host);
+		host->card->sdr104_blocked = true;
+	}
+}
+
 static int mmc_devfreq_set_target(struct device *dev,
 				unsigned long *freq, u32 devfreq_flags)
 {
@@ -507,6 +523,9 @@
 	if (abort)
 		goto out;
 
+	if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+		goto rel_host;
+
 	/*
 	 * In case we were able to claim host there is no need to
 	 * defer the frequency change. It will be done now
@@ -515,15 +534,18 @@
 
 	mmc_host_clk_hold(host);
 	err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
-	if (err && err != -EAGAIN)
+	if (err && err != -EAGAIN) {
 		pr_err("%s: clock scale to %lu failed with error %d\n",
 			mmc_hostname(host), *freq, err);
-	else
+		mmc_recovery_fallback_lower_speed(host);
+	} else {
 		pr_debug("%s: clock change to %lu finished successfully (%s)\n",
 			mmc_hostname(host), *freq, current->comm);
+	}
 
 
 	mmc_host_clk_release(host);
+rel_host:
 	mmc_release_host(host);
 out:
 	return err;
@@ -544,6 +566,9 @@
 	if (!host->clk_scaling.enable)
 		return;
 
+	if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+		return;
+
 	spin_lock_bh(&host->clk_scaling.lock);
 
 	if (host->clk_scaling.clk_scaling_in_progress ||
@@ -564,13 +589,15 @@
 
 	err = mmc_clk_update_freq(host, target_freq,
 		host->clk_scaling.state);
-	if (err && err != -EAGAIN)
+	if (err && err != -EAGAIN) {
 		pr_err("%s: failed on deferred scale clocks (%d)\n",
 			mmc_hostname(host), err);
-	else
+		mmc_recovery_fallback_lower_speed(host);
+	} else {
 		pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
 			mmc_hostname(host),
 			target_freq, current->comm);
+	}
 	host->clk_scaling.clk_scaling_in_progress = false;
 	atomic_dec(&host->clk_scaling.devfreq_abort);
 }
@@ -586,17 +613,39 @@
 		host->card->clk_scaling_lowest,
 		host->card->clk_scaling_highest);
 
+	/*
+	 * Create the frequency table and initialize it with default values.
+	 * Initialize it with platform specific frequencies if the frequency
+	 * table supplied by platform driver is present, otherwise initialize
+	 * it with min and max frequencies supported by the card.
+	 */
 	if (!clk_scaling->freq_table) {
-		pr_debug("%s: no frequency table defined -  setting default\n",
-			mmc_hostname(host));
+		if (clk_scaling->pltfm_freq_table_sz)
+			clk_scaling->freq_table_sz =
+				clk_scaling->pltfm_freq_table_sz;
+		else
+			clk_scaling->freq_table_sz = 2;
+
 		clk_scaling->freq_table = kzalloc(
-			2*sizeof(*(clk_scaling->freq_table)), GFP_KERNEL);
+			(clk_scaling->freq_table_sz *
+			sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
 		if (!clk_scaling->freq_table)
 			return -ENOMEM;
-		clk_scaling->freq_table[0] = host->card->clk_scaling_lowest;
-		clk_scaling->freq_table[1] = host->card->clk_scaling_highest;
-		clk_scaling->freq_table_sz = 2;
-		goto out;
+
+		if (clk_scaling->pltfm_freq_table) {
+			memcpy(clk_scaling->freq_table,
+				clk_scaling->pltfm_freq_table,
+				(clk_scaling->pltfm_freq_table_sz *
+				sizeof(*(clk_scaling->pltfm_freq_table))));
+		} else {
+			pr_debug("%s: no frequency table defined -  setting default\n",
+				mmc_hostname(host));
+			clk_scaling->freq_table[0] =
+				host->card->clk_scaling_lowest;
+			clk_scaling->freq_table[1] =
+				host->card->clk_scaling_highest;
+			goto out;
+		}
 	}
 
 	if (host->card->clk_scaling_lowest >
@@ -813,7 +862,7 @@
 	devfreq_min_clk = host->clk_scaling.freq_table[0];
 
 	host->clk_scaling.curr_freq = devfreq_max_clk;
-	if (host->ios.clock < host->card->clk_scaling_highest)
+	if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
 		host->clk_scaling.curr_freq = devfreq_min_clk;
 
 	host->clk_scaling.clk_scaling_in_progress = false;
@@ -875,6 +924,10 @@
 
 	host->clk_scaling.devfreq = NULL;
 	atomic_set(&host->clk_scaling.devfreq_abort, 1);
+
+	kfree(host->clk_scaling.freq_table);
+	host->clk_scaling.freq_table = NULL;
+
 	pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
 
 	return 0;
@@ -1540,8 +1593,13 @@
 			}
 		}
 		if (!cmd->error || !cmd->retries ||
-		    mmc_card_removed(host->card))
+		    mmc_card_removed(host->card)) {
+			if (cmd->error && !cmd->retries &&
+			     cmd->opcode != MMC_SEND_STATUS &&
+			     cmd->opcode != MMC_SEND_TUNING_BLOCK)
+				mmc_recovery_fallback_lower_speed(host);
 			break;
+		}
 
 		mmc_retune_recheck(host);
 
@@ -2368,6 +2426,13 @@
 		WARN_ON(host->ios.clock);
 		/* This call will also set host->clk_gated to false */
 		__mmc_set_clock(host, host->clk_old);
+		/*
+		 * We have seen that host controller's clock tuning circuit may
+		 * go out of sync if controller clocks are gated.
+		 * To workaround this issue, we are triggering retuning of the
+		 * tuning circuit after ungating the controller clocks.
+		 */
+		mmc_retune_needed(host);
 	}
 }
 
@@ -4189,12 +4254,18 @@
 	}
 
 	if (ret) {
-		mmc_card_set_removed(host->card);
-		if (host->card->sdr104_blocked) {
-			mmc_host_set_sdr104(host);
-			host->card->sdr104_blocked = false;
+		if (host->ops->get_cd && host->ops->get_cd(host)) {
+			mmc_recovery_fallback_lower_speed(host);
+			ret = 0;
+		} else {
+			mmc_card_set_removed(host->card);
+			if (host->card->sdr104_blocked) {
+				mmc_host_set_sdr104(host);
+				host->card->sdr104_blocked = false;
+			}
+			pr_debug("%s: card remove detected\n",
+					mmc_hostname(host));
 		}
-		pr_debug("%s: card remove detected\n", mmc_hostname(host));
 	}
 
 	return ret;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 0d0d56f..0c8ff86 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -338,10 +338,15 @@
 {
 	struct mmc_host *host = data;
 
-	if (host && host->ops && host->ops->force_err_irq) {
-		mmc_host_clk_hold(host);
+	if (host && host->card && host->ops &&
+			host->ops->force_err_irq) {
+		/*
+		 * To access the force error irq reg, we need to make
+		 * sure the host is powered up and host clock is ticking.
+		 */
+		mmc_get_card(host->card);
 		host->ops->force_err_irq(host, val);
-		mmc_host_clk_release(host);
+		mmc_put_card(host->card);
 	}
 
 	return 0;
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 7262466..50dd6bd 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -157,7 +157,8 @@
 };
 
 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
-	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+		  SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
 	.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
 	.ops = &sdhci_iproc_ops,
 };
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index fe62b69..a819b88 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1838,13 +1838,13 @@
 	}
 
 	if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
-			&msm_host->mmc->clk_scaling.freq_table,
-			&msm_host->mmc->clk_scaling.freq_table_sz, 0))
+			&msm_host->mmc->clk_scaling.pltfm_freq_table,
+			&msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
 		pr_debug("%s: no clock scaling frequencies were supplied\n",
 			dev_name(dev));
-	else if (!msm_host->mmc->clk_scaling.freq_table ||
-			!msm_host->mmc->clk_scaling.freq_table_sz)
-			dev_err(dev, "bad dts clock scaling frequencies\n");
+	else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
+			!msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
+		dev_err(dev, "bad dts clock scaling frequencies\n");
 
 	/*
 	 * Few hosts can support DDR52 mode at the same lower
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3bde96a..f222f8a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -138,6 +138,74 @@
 };
 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
 
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		oobregion->offset = 40;
+		break;
+	case 128:
+		oobregion->offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	oobregion->length = ecc->total;
+	if (oobregion->offset + oobregion->length > mtd->oobsize)
+		return -ERANGE;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ecc_offset = 0;
+
+	if (section < 0 || section > 1)
+		return -ERANGE;
+
+	switch (mtd->oobsize) {
+	case 64:
+		ecc_offset = 40;
+		break;
+	case 128:
+		ecc_offset = 80;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (section == 0) {
+		oobregion->offset = 2;
+		oobregion->length = ecc_offset - 2;
+	} else {
+		oobregion->offset = ecc_offset + ecc->total;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+	.ecc = nand_ooblayout_ecc_lp_hamming,
+	.free = nand_ooblayout_free_lp_hamming,
+};
+
 static int check_offs_len(struct mtd_info *mtd,
 					loff_t ofs, uint64_t len)
 {
@@ -4565,7 +4633,7 @@
 			break;
 		case 64:
 		case 128:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
 			break;
 		default:
 			WARN(1, "No oob scheme defined for oobsize %d\n",
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 5513bfd9..c178cb0d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1856,6 +1856,15 @@
 	nand_chip->ecc.priv	= NULL;
 	nand_set_flash_node(nand_chip, dev->of_node);
 
+	if (!mtd->name) {
+		mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+					   "omap2-nand.%d", info->gpmc_cs);
+		if (!mtd->name) {
+			dev_err(&pdev->dev, "Failed to set MTD name\n");
+			return -ENOMEM;
+		}
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(nand_chip->IO_ADDR_R))
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 40a7c4a..af2f091 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -23,6 +23,11 @@
 #include <asm/sizes.h>
 #include <linux/platform_data/mtd-orion_nand.h>
 
+struct orion_nand_info {
+	struct nand_chip chip;
+	struct clk *clk;
+};
+
 static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
 	struct nand_chip *nc = mtd_to_nand(mtd);
@@ -75,20 +80,21 @@
 
 static int __init orion_nand_probe(struct platform_device *pdev)
 {
+	struct orion_nand_info *info;
 	struct mtd_info *mtd;
 	struct nand_chip *nc;
 	struct orion_nand_data *board;
 	struct resource *res;
-	struct clk *clk;
 	void __iomem *io_base;
 	int ret = 0;
 	u32 val = 0;
 
-	nc = devm_kzalloc(&pdev->dev,
-			sizeof(struct nand_chip),
+	info = devm_kzalloc(&pdev->dev,
+			sizeof(struct orion_nand_info),
 			GFP_KERNEL);
-	if (!nc)
+	if (!info)
 		return -ENOMEM;
+	nc = &info->chip;
 	mtd = nand_to_mtd(nc);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -145,15 +151,13 @@
 	if (board->dev_ready)
 		nc->dev_ready = board->dev_ready;
 
-	platform_set_drvdata(pdev, mtd);
+	platform_set_drvdata(pdev, info);
 
 	/* Not all platforms can gate the clock, so it is not
 	   an error if the clock does not exists. */
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_prepare_enable(clk);
-		clk_put(clk);
-	}
+	info->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(info->clk))
+		clk_prepare_enable(info->clk);
 
 	if (nand_scan(mtd, 1)) {
 		ret = -ENXIO;
@@ -170,26 +174,22 @@
 	return 0;
 
 no_dev:
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
+	if (!IS_ERR(info->clk))
+		clk_disable_unprepare(info->clk);
 
 	return ret;
 }
 
 static int orion_nand_remove(struct platform_device *pdev)
 {
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-	struct clk *clk;
+	struct orion_nand_info *info = platform_get_drvdata(pdev);
+	struct nand_chip *chip = &info->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
 
 	nand_release(mtd);
 
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
+	if (!IS_ERR(info->clk))
+		clk_disable_unprepare(info->clk);
 
 	return 0;
 }
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index edc70ff..6dcc42d 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2573,7 +2573,7 @@
 		return -1;
 
 	ad_info->aggregator_id = aggregator->aggregator_identifier;
-	ad_info->ports = aggregator->num_of_ports;
+	ad_info->ports = __agg_active_ports(aggregator);
 	ad_info->actor_key = aggregator->actor_oper_aggregator_key;
 	ad_info->partner_key = aggregator->partner_oper_aggregator_key;
 	ether_addr_copy(ad_info->partner_system,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 93aa293..9711ca4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5144,9 +5144,11 @@
 	struct be_adapter *adapter = netdev_priv(dev);
 	u8 l4_hdr = 0;
 
-	/* The code below restricts offload features for some tunneled packets.
+	/* The code below restricts offload features for some tunneled and
+	 * Q-in-Q packets.
 	 * Offload features for normal (non tunnel) packets are unchanged.
 	 */
+	features = vlan_features_check(skb, features);
 	if (!skb->encapsulation ||
 	    !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
 		return features;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3f51a44..cb45390 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -767,7 +767,7 @@
 	mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 		       mlx5_command_str(msg_to_opcode(ent->in)),
 		       msg_to_opcode(ent->in));
-	mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+	mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 }
 
 static void cmd_work_handler(struct work_struct *work)
@@ -797,6 +797,7 @@
 	}
 
 	cmd->ent_arr[ent->idx] = ent;
+	set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
 	lay = get_inst(cmd, ent->idx);
 	ent->lay = lay;
 	memset(lay, 0, sizeof(*lay));
@@ -818,6 +819,20 @@
 	if (ent->callback)
 		schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 
+	/* Skip sending command to fw if internal error */
+	if (pci_channel_offline(dev->pdev) ||
+	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		u8 status = 0;
+		u32 drv_synd;
+
+		ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
+		MLX5_SET(mbox_out, ent->out, status, status);
+		MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
+
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+		return;
+	}
+
 	/* ring doorbell after the descriptor is valid */
 	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
 	wmb();
@@ -828,7 +843,7 @@
 		poll_timeout(ent);
 		/* make sure we read the descriptor after ownership is SW */
 		rmb();
-		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
 	}
 }
 
@@ -872,7 +887,7 @@
 		wait_for_completion(&ent->done);
 	} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
 		ent->ret = -ETIMEDOUT;
-		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 	}
 
 	err = ent->ret;
@@ -1369,7 +1384,7 @@
 	}
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
 {
 	struct mlx5_cmd *cmd = &dev->cmd;
 	struct mlx5_cmd_work_ent *ent;
@@ -1389,6 +1404,19 @@
 			struct semaphore *sem;
 
 			ent = cmd->ent_arr[i];
+
+			/* if we already completed the command, ignore it */
+			if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
+						&ent->state)) {
+				/* only real completion can free the cmd slot */
+				if (!forced) {
+					mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
+						      ent->idx);
+					free_ent(cmd, ent->idx);
+				}
+				continue;
+			}
+
 			if (ent->callback)
 				cancel_delayed_work(&ent->cb_timeout_work);
 			if (ent->page_queue)
@@ -1411,7 +1439,10 @@
 				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
 					      ent->ret, deliv_status_to_str(ent->status), ent->status);
 			}
-			free_ent(cmd, ent->idx);
+
+			/* only real completion will free the entry slot */
+			if (!forced)
+				free_ent(cmd, ent->idx);
 
 			if (ent->callback) {
 				ds = ent->ts2 - ent->ts1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 126cfeb..3744e2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -751,7 +751,6 @@
 	ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
 	ptys2ethtool_supported_link(supported, eth_proto_cap);
 	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
-	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
 }
 
 static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
@@ -761,7 +760,7 @@
 	unsigned long *advertising = link_ksettings->link_modes.advertising;
 
 	ptys2ethtool_adver_link(advertising, eth_proto_cap);
-	if (tx_pause)
+	if (rx_pause)
 		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
 	if (tx_pause ^ rx_pause)
 		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
@@ -806,6 +805,8 @@
 	struct mlx5e_priv *priv    = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+	u32 rx_pause = 0;
+	u32 tx_pause = 0;
 	u32 eth_proto_cap;
 	u32 eth_proto_admin;
 	u32 eth_proto_lp;
@@ -828,11 +829,13 @@
 	an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
 	an_status        = MLX5_GET(ptys_reg, out, an_status);
 
+	mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
 
 	get_supported(eth_proto_cap, link_ksettings);
-	get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+	get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
 	get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
 
 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index aaca090..f86e9ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -234,7 +234,7 @@
 			break;
 
 		case MLX5_EVENT_TYPE_CMD:
-			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
 			break;
 
 		case MLX5_EVENT_TYPE_PORT_CHANGE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 5bcf934..2115c8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -90,7 +90,7 @@
 	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
 
 	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
-	mlx5_cmd_comp_handler(dev, vector);
+	mlx5_cmd_comp_handler(dev, vector, true);
 	return;
 
 no_trig:
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 8716b8c..6f3c805 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@
          * are "42101001.sb" or "42101002.sb"
          */
         sprintf(stir421x_fw_name, "4210%4X.sb",
-                self->usbdev->descriptor.bcdDevice);
+		le16_to_cpu(self->usbdev->descriptor.bcdDevice));
         ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
         if (ret < 0)
                 return ret;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index c2dcf02..d6a541b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -240,34 +240,6 @@
 {
 	int err;
 
-	/* The Marvell PHY has an errata which requires
-	 * that certain registers get written in order
-	 * to restart autonegotiation */
-	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1d, 0x1f);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1e, 0x200c);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1d, 0x5);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1e, 0);
-	if (err < 0)
-		return err;
-
-	err = phy_write(phydev, 0x1e, 0x100);
-	if (err < 0)
-		return err;
-
 	err = marvell_set_polarity(phydev, phydev->mdix);
 	if (err < 0)
 		return err;
@@ -301,6 +273,42 @@
 	return 0;
 }
 
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	/* This Marvell PHY has an errata which requires
+	 * that certain registers get written in order
+	 * to restart autonegotiation
+	 */
+	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1d, 0x1f);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0x200c);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1d, 0x5);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0x100);
+	if (err < 0)
+		return err;
+
+	return marvell_config_aneg(phydev);
+}
+
 static int m88e1111_config_aneg(struct phy_device *phydev)
 {
 	int err;
@@ -1491,7 +1499,7 @@
 		.probe = marvell_probe,
 		.flags = PHY_HAS_INTERRUPT,
 		.config_init = &marvell_config_init,
-		.config_aneg = &marvell_config_aneg,
+		.config_aneg = &m88e1101_config_aneg,
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 34d997c..2f260c6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -897,6 +897,8 @@
 	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx */
 	{QMI_FIXED_INTF(0x1199, 0x9079, 8)},	/* Sierra Wireless EM74xx */
 	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x907b, 10)},	/* Sierra Wireless EM74xx */
 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 51fc0c3..7ca9989 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1456,6 +1456,7 @@
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	.ndo_busy_poll		= virtnet_busy_poll,
 #endif
+	.ndo_features_check	= passthru_features_check,
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 80ef486..ee02605 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -850,6 +850,7 @@
 
 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -859,7 +860,7 @@
 {
 	struct net *net = dev_net(dev);
 
-	if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0)
+	if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
 		skb = NULL;    /* kfree_skb(skb) handled by nf code */
 
 	return skb;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index e1c338c..f15589c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -37,6 +37,7 @@
 	{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
 	{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
 	{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
+	{ USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
 
 	{ USB_DEVICE(0x0cf3, 0x7015),
 	  .driver_info = AR9287_USB },  /* Atheros */
@@ -1218,6 +1219,9 @@
 	u8 bulk_out_ep;
 	int r;
 
+	if (iface_desc->desc.bNumEndpoints < 2)
+		return -ENODEV;
+
 	/* Find bulk out endpoint */
 	for (r = 1; r >= 0; r--) {
 		endpoint = &iface_desc->endpoint[r].desc;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index a83f8f6..9afd6f2 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/etherdevice.h>
+#include <net/netlink.h>
 #include "wil6210.h"
 #include "wmi.h"
 #include "ftm.h"
@@ -55,6 +56,62 @@
 
 #define QCA_NL80211_VENDOR_ID	0x001374
 
+#define WIL_MAX_RF_SECTORS (128)
+#define WIL_CID_ALL (0xff)
+
+enum qca_wlan_vendor_attr_rf_sector {
+	QCA_ATTR_MAC_ADDR = 6,
+	QCA_ATTR_PAD = 13,
+	QCA_ATTR_TSF = 29,
+	QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
+	QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
+	QCA_ATTR_DMG_RF_MODULE_MASK = 32,
+	QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
+	QCA_ATTR_DMG_RF_SECTOR_MAX,
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_type {
+	QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
+	QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
+	QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
+};
+
+enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
+	QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+
+	/* keep last */
+	QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
+	QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
+	QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
+};
+
+static const struct
+nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
+	[QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
+	[QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
+	[QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
+	[QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
+};
+
+static const struct
+nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
+	[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
+};
+
 enum qca_nl80211_vendor_subcmds {
 	QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128,
 	QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129,
@@ -65,8 +122,25 @@
 	QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134,
 	QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135,
 	QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
+	QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
 };
 
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len);
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len);
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len);
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len);
+
 /* vendor specific commands */
 static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
 	{
@@ -111,6 +185,36 @@
 			 WIPHY_VENDOR_CMD_NEED_RUNNING,
 		.doit = wil_aoa_abort_measurement
 	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_get_cfg
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_set_cfg
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd =
+			QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_get_selected
+	},
+	{
+		.info.vendor_id = QCA_NL80211_VENDOR_ID,
+		.info.subcmd =
+			QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
+		.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+			 WIPHY_VENDOR_CMD_NEED_RUNNING,
+		.doit = wil_rf_sector_set_selected
+	},
 };
 
 /* vendor specific events */
@@ -1665,6 +1769,42 @@
 	return wil_ps_update(wil, ps_profile);
 }
 
+static int wil_cfg80211_suspend(struct wiphy *wiphy,
+				struct cfg80211_wowlan *wow)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+
+	/* Setting the wakeup trigger based on wow is TBD */
+
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 0;
+	}
+
+	rc = wil_can_suspend(wil, false);
+	if (rc)
+		goto out;
+
+	wil_dbg_pm(wil, "suspending\n");
+
+	wil_p2p_stop_discovery(wil);
+
+	wil_abort_scan(wil, true);
+
+out:
+	return rc;
+}
+
+static int wil_cfg80211_resume(struct wiphy *wiphy)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	wil_dbg_pm(wil, "resuming\n");
+
+	return 0;
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
 	.add_virtual_intf = wil_cfg80211_add_iface,
 	.del_virtual_intf = wil_cfg80211_del_iface,
@@ -1696,6 +1836,8 @@
 	.start_p2p_device = wil_cfg80211_start_p2p_device,
 	.stop_p2p_device = wil_cfg80211_stop_p2p_device,
 	.set_power_mgmt = wil_cfg80211_set_power_mgmt,
+	.suspend = wil_cfg80211_suspend,
+	.resume = wil_cfg80211_resume,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
@@ -1799,3 +1941,451 @@
 		kfree(p2p_wdev);
 	}
 }
+
+static int wil_rf_sector_status_to_rc(u8 status)
+{
+	switch (status) {
+	case WMI_RF_SECTOR_STATUS_SUCCESS:
+		return 0;
+	case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
+		return -EINVAL;
+	case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
+		return -EAGAIN;
+	case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
+		return -EOPNOTSUPP;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	u16 sector_index;
+	u8 sector_type;
+	u32 rf_modules_vec;
+	struct wmi_get_rf_sector_params_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_get_rf_sector_params_done_event evt;
+	} __packed reply;
+	struct sk_buff *msg;
+	struct nlattr *nl_cfgs, *nl_cfg;
+	u32 i;
+	struct wmi_rf_sector_info *si;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+	    !tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+
+	sector_index = nla_get_u16(
+		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+	if (sector_index >= WIL_MAX_RF_SECTORS) {
+		wil_err(wil, "Invalid sector index %d\n", sector_index);
+		return -EINVAL;
+	}
+
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	rf_modules_vec = nla_get_u32(
+		tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
+	if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
+		wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
+		return -EINVAL;
+	}
+
+	cmd.sector_idx = cpu_to_le16(sector_index);
+	cmd.sector_type = sector_type;
+	cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+		      WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	if (reply.evt.status) {
+		wil_err(wil, "get rf sector cfg failed with status %d\n",
+			reply.evt.status);
+		return wil_rf_sector_status_to_rc(reply.evt.status);
+	}
+
+	msg = cfg80211_vendor_cmd_alloc_reply_skb(
+		wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+	if (!msg)
+		return -ENOMEM;
+
+	if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+			      le64_to_cpu(reply.evt.tsf),
+			      QCA_ATTR_PAD))
+		goto nla_put_failure;
+
+	nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+	if (!nl_cfgs)
+		goto nla_put_failure;
+	for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
+		if (!(rf_modules_vec & BIT(i)))
+			continue;
+		nl_cfg = nla_nest_start(msg, i);
+		if (!nl_cfg)
+			goto nla_put_failure;
+		si = &reply.evt.sectors_info[i];
+		if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
+			       i) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
+				le32_to_cpu(si->etype0)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
+				le32_to_cpu(si->etype1)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
+				le32_to_cpu(si->etype2)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
+				le32_to_cpu(si->psh_hi)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
+				le32_to_cpu(si->psh_lo)) ||
+		    nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
+				le32_to_cpu(si->dtype_swch_off)))
+			goto nla_put_failure;
+		nla_nest_end(msg, nl_cfg);
+	}
+
+	nla_nest_end(msg, nl_cfgs);
+	rc = cfg80211_vendor_cmd_reply(msg);
+	return rc;
+nla_put_failure:
+	kfree_skb(msg);
+	return -ENOBUFS;
+}
+
+static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
+				 struct wireless_dev *wdev,
+				 const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc, tmp;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
+	u16 sector_index, rf_module_index;
+	u8 sector_type;
+	u32 rf_modules_vec = 0;
+	struct wmi_set_rf_sector_params_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_set_rf_sector_params_done_event evt;
+	} __packed reply;
+	struct nlattr *nl_cfg;
+	struct wmi_rf_sector_info *si;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+
+	sector_index = nla_get_u16(
+		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+	if (sector_index >= WIL_MAX_RF_SECTORS) {
+		wil_err(wil, "Invalid sector index %d\n", sector_index);
+		return -EINVAL;
+	}
+
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.sector_idx = cpu_to_le16(sector_index);
+	cmd.sector_type = sector_type;
+	nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
+			    tmp) {
+		rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+				      nl_cfg, wil_rf_sector_cfg_policy);
+		if (rc) {
+			wil_err(wil, "invalid sector cfg\n");
+			return -EINVAL;
+		}
+
+		if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
+		    !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
+			wil_err(wil, "missing cfg params\n");
+			return -EINVAL;
+		}
+
+		rf_module_index = nla_get_u8(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
+		if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
+			wil_err(wil, "invalid RF module index %d\n",
+				rf_module_index);
+			return -EINVAL;
+		}
+		rf_modules_vec |= BIT(rf_module_index);
+		si = &cmd.sectors_info[rf_module_index];
+		si->etype0 = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
+		si->etype1 = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
+		si->etype2 = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
+		si->psh_hi = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
+		si->psh_lo = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
+		si->dtype_swch_off = cpu_to_le32(nla_get_u32(
+			tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
+	}
+
+	cmd.rf_modules_vec = rf_modules_vec & 0xFF;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
+		      WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_get_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	u8 sector_type, mac_addr[ETH_ALEN];
+	int cid = 0;
+	struct wmi_get_selected_rf_sector_index_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_get_selected_rf_sector_index_done_event evt;
+	} __packed reply;
+	struct sk_buff *msg;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	if (tb[QCA_ATTR_MAC_ADDR]) {
+		ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+		cid = wil_find_cid(wil, mac_addr);
+		if (cid < 0) {
+			wil_err(wil, "invalid MAC address %pM\n", mac_addr);
+			return -ENOENT;
+		}
+	} else {
+		if (test_bit(wil_status_fwconnected, wil->status)) {
+			wil_err(wil, "must specify MAC address when connected\n");
+			return -EINVAL;
+		}
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.cid = (u8)cid;
+	cmd.sector_type = sector_type;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID,
+		      &cmd, sizeof(cmd),
+		      WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	if (reply.evt.status) {
+		wil_err(wil, "get rf selected sector cfg failed with status %d\n",
+			reply.evt.status);
+		return wil_rf_sector_status_to_rc(reply.evt.status);
+	}
+
+	msg = cfg80211_vendor_cmd_alloc_reply_skb(
+		wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
+	if (!msg)
+		return -ENOMEM;
+
+	if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
+			      le64_to_cpu(reply.evt.tsf),
+			      QCA_ATTR_PAD) ||
+	    nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
+			le16_to_cpu(reply.evt.sector_idx)))
+		goto nla_put_failure;
+
+	rc = cfg80211_vendor_cmd_reply(msg);
+	return rc;
+nla_put_failure:
+	kfree_skb(msg);
+	return -ENOBUFS;
+}
+
+static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
+					  u16 sector_index,
+					  u8 sector_type, u8 cid)
+{
+	struct wmi_set_selected_rf_sector_index_cmd cmd;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_set_selected_rf_sector_index_done_event evt;
+	} __packed reply;
+	int rc;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.sector_idx = cpu_to_le16(sector_index);
+	cmd.sector_type = sector_type;
+	cmd.cid = (u8)cid;
+	memset(&reply, 0, sizeof(reply));
+	rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID,
+		      &cmd, sizeof(cmd),
+		      WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
+		      &reply, sizeof(reply),
+		      500);
+	if (rc)
+		return rc;
+	return wil_rf_sector_status_to_rc(reply.evt.status);
+}
+
+static int wil_rf_sector_set_selected(struct wiphy *wiphy,
+				      struct wireless_dev *wdev,
+				      const void *data, int data_len)
+{
+	struct wil6210_priv *wil = wdev_to_wil(wdev);
+	int rc;
+	struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
+	u16 sector_index;
+	u8 sector_type, mac_addr[ETH_ALEN], i;
+	int cid = 0;
+
+	if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
+		return -EOPNOTSUPP;
+
+	rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
+		       wil_rf_sector_policy);
+	if (rc) {
+		wil_err(wil, "Invalid rf sector ATTR\n");
+		return rc;
+	}
+
+	if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
+	    !tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
+		wil_err(wil, "Invalid rf sector spec\n");
+		return -EINVAL;
+	}
+
+	sector_index = nla_get_u16(
+		tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
+	if (sector_index >= WIL_MAX_RF_SECTORS &&
+	    sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+		wil_err(wil, "Invalid sector index %d\n", sector_index);
+		return -EINVAL;
+	}
+
+	sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
+	if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
+		wil_err(wil, "Invalid sector type %d\n", sector_type);
+		return -EINVAL;
+	}
+
+	if (tb[QCA_ATTR_MAC_ADDR]) {
+		ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
+		if (!is_broadcast_ether_addr(mac_addr)) {
+			cid = wil_find_cid(wil, mac_addr);
+			if (cid < 0) {
+				wil_err(wil, "invalid MAC address %pM\n",
+					mac_addr);
+				return -ENOENT;
+			}
+		} else {
+			if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
+				wil_err(wil, "broadcast MAC valid only with unlocking\n");
+				return -EINVAL;
+			}
+			cid = -1;
+		}
+	} else {
+		if (test_bit(wil_status_fwconnected, wil->status)) {
+			wil_err(wil, "must specify MAC address when connected\n");
+			return -EINVAL;
+		}
+		/* otherwise, using cid=0 for unassociated station */
+	}
+
+	if (cid >= 0) {
+		rc = wil_rf_sector_wmi_set_selected(wil, sector_index,
+						    sector_type, cid);
+	} else {
+		/* unlock all cids */
+		rc = wil_rf_sector_wmi_set_selected(
+			wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type,
+			WIL_CID_ALL);
+		if (rc == -EINVAL) {
+			for (i = 0; i < WIL6210_MAX_CID; i++) {
+				rc = wil_rf_sector_wmi_set_selected(
+					wil, WMI_INVALID_RF_SECTOR_INDEX,
+					sector_type, i);
+				/* the FW will silently ignore and return
+				 * success for unused cid, so abort the loop
+				 * on any other error
+				 */
+				if (rc) {
+					wil_err(wil, "unlock cid %d failed with status %d\n",
+						i, rc);
+					break;
+				}
+			}
+		}
+	}
+
+	return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 5648ebb..0ac657d 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -509,6 +509,10 @@
 	void *buf;
 	size_t ret;
 
+	if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
+	    test_bit(wil_status_suspended, wil_blob->wil->status))
+		return 0;
+
 	if (pos < 0)
 		return -EINVAL;
 
@@ -1604,6 +1608,49 @@
 	.llseek		= seq_lseek,
 };
 
+/*---------suspend_stats---------*/
+static ssize_t wil_write_suspend_stats(struct file *file,
+				       const char __user *buf,
+				       size_t len, loff_t *ppos)
+{
+	struct wil6210_priv *wil = file->private_data;
+
+	memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
+
+	return len;
+}
+
+static ssize_t wil_read_suspend_stats(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct wil6210_priv *wil = file->private_data;
+	static char text[400];
+	int n;
+
+	n = snprintf(text, sizeof(text),
+		     "Suspend statistics:\n"
+		     "successful suspends:%ld failed suspends:%ld\n"
+		     "successful resumes:%ld failed resumes:%ld\n"
+		     "rejected by host:%ld rejected by device:%ld\n",
+		     wil->suspend_stats.successful_suspends,
+		     wil->suspend_stats.failed_suspends,
+		     wil->suspend_stats.successful_resumes,
+		     wil->suspend_stats.failed_resumes,
+		     wil->suspend_stats.rejected_by_host,
+		     wil->suspend_stats.rejected_by_device);
+
+	n = min_t(int, n, sizeof(text));
+
+	return simple_read_from_buffer(user_buf, count, ppos, text, n);
+}
+
+static const struct file_operations fops_suspend_stats = {
+	.read = wil_read_suspend_stats,
+	.write = wil_write_suspend_stats,
+	.open  = simple_open,
+};
+
 /*----------------*/
 static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
 				       struct dentry *dbg)
@@ -1656,6 +1703,7 @@
 	{"led_blink_time",	0644,	&fops_led_blink_time},
 	{"fw_capabilities",	0444,	&fops_fw_capabilities},
 	{"fw_version",	0444,		&fops_fw_version},
+	{"suspend_stats",	0644,	&fops_suspend_stats},
 };
 
 static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1702,6 +1750,7 @@
 	WIL_FIELD(discovery_mode, 0644,	doff_u8),
 	WIL_FIELD(chip_revision, 0444,	doff_u8),
 	WIL_FIELD(abft_len, 0644,		doff_u8),
+	WIL_FIELD(wakeup_trigger, 0644,		doff_u8),
 	{},
 };
 
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index cab1e5c..cad8a95c4 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -467,6 +467,12 @@
 
 	wil6210_unmask_irq_pseudo(wil);
 
+	if (wil->suspend_resp_rcvd) {
+		wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
+		wil->suspend_resp_comp = true;
+		wake_up_interruptible(&wil->wq);
+	}
+
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index bbdd232..f8d2c20 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -54,7 +54,7 @@
 	}
 
 	off = a - wil->csr;
-	if (size >= WIL6210_MEM_SIZE - off) {
+	if (size >= wil->bar_size - off) {
 		wil_err(wil, "Requested block does not fit into memory: "
 			"off = 0x%08x size = 0x%08x\n", off, size);
 		return NULL;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1fc4580..aff8b1b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -579,6 +579,9 @@
 
 	wil->ps_profile =  WMI_PS_PROFILE_TYPE_DEFAULT;
 
+	wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
+			      WMI_WAKEUP_TRIGGER_BCAST;
+
 	return 0;
 
 out_wmi_wq:
@@ -589,8 +592,10 @@
 
 void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
 {
-	if (wil->platform_ops.bus_request)
+	if (wil->platform_ops.bus_request) {
+		wil->bus_request_kbps = kbps;
 		wil->platform_ops.bus_request(wil->platform_handle, kbps);
+	}
 }
 
 /**
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index d472e13..cf3fadc 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -26,6 +26,10 @@
 module_param(use_msi, bool, 0444);
 MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
+static bool ftm_mode;
+module_param(ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
+
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_notify(struct notifier_block *notify_block,
@@ -36,13 +40,15 @@
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
+	const char *wil_fw_name;
 	u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 	u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
 			    RGF_USER_REVISION_ID_MASK);
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 	bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
-	wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
+	wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
+			   WIL_FW_NAME_DEFAULT;
 	wil->chip_revision = chip_revision;
 
 	switch (jtag_id) {
@@ -51,9 +57,11 @@
 		case REVISION_ID_SPARROW_D0:
 			wil->hw_name = "Sparrow D0";
 			wil->hw_version = HW_VER_SPARROW_D0;
-			if (wil_fw_verify_file_exists(wil,
-						      WIL_FW_NAME_SPARROW_PLUS))
-				wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
+			wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
+				      WIL_FW_NAME_SPARROW_PLUS;
+
+			if (wil_fw_verify_file_exists(wil, wil_fw_name))
+				wil->wil_fw_name = wil_fw_name;
 			break;
 		case REVISION_ID_SPARROW_B0:
 			wil->hw_name = "Sparrow B0";
@@ -104,8 +112,6 @@
 
 	wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
 
-	pdev->msi_enabled = 0;
-
 	pci_set_master(pdev);
 
 	wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
@@ -192,16 +198,18 @@
 		.ramdump = wil_platform_rop_ramdump,
 		.fw_recovery = wil_platform_rop_fw_recovery,
 	};
+	u32 bar_size = pci_resource_len(pdev, 0);
 
 	/* check HW */
 	dev_info(&pdev->dev, WIL_NAME
-		 " device found [%04x:%04x] (rev %x)\n",
-		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
+		 " device found [%04x:%04x] (rev %x) bar size 0x%x\n",
+		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
+		 bar_size);
 
-	if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
-		dev_err(&pdev->dev, "Not " WIL_NAME "? "
-			"BAR0 size is %lu while expecting %lu\n",
-			(ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
+	if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
+	    (bar_size > WIL6210_MAX_MEM_SIZE)) {
+		dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
+			bar_size);
 		return -ENODEV;
 	}
 
@@ -214,6 +222,7 @@
 
 	wil->pdev = pdev;
 	pci_set_drvdata(pdev, wil);
+	wil->bar_size = bar_size;
 	/* rollback to if_free */
 
 	wil->platform_handle =
@@ -241,7 +250,7 @@
 	}
 
 	rc = pci_enable_device(pdev);
-	if (rc) {
+	if (rc && pdev->msi_enabled == 0) {
 		wil_err(wil,
 			"pci_enable_device failed, retry with MSI only\n");
 		/* Work around for platforms that can't allocate IRQ:
@@ -256,6 +265,7 @@
 		goto err_plat;
 	}
 	/* rollback to err_disable_pdev */
+	pci_set_power_state(pdev, PCI_D0);
 
 	rc = pci_request_region(pdev, 0, WIL_NAME);
 	if (rc) {
@@ -276,6 +286,15 @@
 	wil_set_capabilities(wil);
 	wil6210_clear_irq(wil);
 
+	wil->keep_radio_on_during_sleep =
+		wil->platform_ops.keep_radio_on_during_sleep &&
+		wil->platform_ops.keep_radio_on_during_sleep(
+			wil->platform_handle) &&
+		test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
+
+	wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
+		 wil->keep_radio_on_during_sleep);
+
 	/* FW should raise IRQ when ready */
 	rc = wil_if_pcie_enable(wil);
 	if (rc) {
@@ -375,15 +394,16 @@
 		goto out;
 
 	rc = wil_suspend(wil, is_runtime);
-	if (rc)
-		goto out;
+	if (!rc) {
+		wil->suspend_stats.successful_suspends++;
 
-	/* TODO: how do I bring card in low power state? */
-
-	/* disable bus mastering */
-	pci_clear_master(pdev);
-	/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
-
+		/* If platform device supports keep_radio_on_during_sleep
+		 * it will control PCIe master
+		 */
+		if (!wil->keep_radio_on_during_sleep)
+			/* disable bus mastering */
+			pci_clear_master(pdev);
+	}
 out:
 	return rc;
 }
@@ -396,12 +416,21 @@
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
-	/* allow master */
-	pci_set_master(pdev);
-
+	/* If platform device supports keep_radio_on_during_sleep it will
+	 * control PCIe master
+	 */
+	if (!wil->keep_radio_on_during_sleep)
+		/* allow master */
+		pci_set_master(pdev);
 	rc = wil_resume(wil, is_runtime);
-	if (rc)
-		pci_clear_master(pdev);
+	if (rc) {
+		wil_err(wil, "device failed to resume (%d)\n", rc);
+		wil->suspend_stats.failed_resumes++;
+		if (!wil->keep_radio_on_during_sleep)
+			pci_clear_master(pdev);
+	} else {
+		wil->suspend_stats.successful_resumes++;
+	}
 
 	return rc;
 }
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 2ae4fe8..ce1f384 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -15,6 +15,7 @@
  */
 
 #include "wil6210.h"
+#include <linux/jiffies.h>
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
 {
@@ -61,20 +62,170 @@
 	wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
 		   is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
 
+	if (rc)
+		wil->suspend_stats.rejected_by_host++;
+
 	return rc;
 }
 
-int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
+{
+	int rc = 0;
+
+	/* wil_status_resuming will be cleared when getting
+	 * WMI_TRAFFIC_RESUME_EVENTID
+	 */
+	set_bit(wil_status_resuming, wil->status);
+	clear_bit(wil_status_suspended, wil->status);
+	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+	wil_unmask_irq(wil);
+
+	wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
+
+	/* Send WMI resume request to the device */
+	rc = wmi_resume(wil);
+	if (rc) {
+		wil_err(wil, "device failed to resume (%d), resetting\n", rc);
+		rc = wil_down(wil);
+		if (rc) {
+			wil_err(wil, "wil_down failed (%d)\n", rc);
+			goto out;
+		}
+		rc = wil_up(wil);
+		if (rc) {
+			wil_err(wil, "wil_up failed (%d)\n", rc);
+			goto out;
+		}
+	}
+
+	/* Wake all queues */
+	if (test_bit(wil_status_fwconnected, wil->status))
+		wil_update_net_queues_bh(wil, NULL, false);
+
+out:
+	if (rc)
+		set_bit(wil_status_suspended, wil->status);
+	return rc;
+}
+
+static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
+{
+	int rc = 0;
+	unsigned long start, data_comp_to;
+
+	wil_dbg_pm(wil, "suspend keep radio on\n");
+
+	/* Prevent handling of new tx and wmi commands */
+	set_bit(wil_status_suspending, wil->status);
+	wil_update_net_queues_bh(wil, NULL, true);
+
+	if (!wil_is_tx_idle(wil)) {
+		wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
+		wil->suspend_stats.rejected_by_host++;
+		goto reject_suspend;
+	}
+
+	if (!wil_is_rx_idle(wil)) {
+		wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
+		wil->suspend_stats.rejected_by_host++;
+		goto reject_suspend;
+	}
+
+	if (!wil_is_wmi_idle(wil)) {
+		wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
+		wil->suspend_stats.rejected_by_host++;
+		goto reject_suspend;
+	}
+
+	/* Send WMI suspend request to the device */
+	rc = wmi_suspend(wil);
+	if (rc) {
+		wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
+			   rc);
+		goto reject_suspend;
+	}
+
+	/* Wait for completion of the pending RX packets */
+	start = jiffies;
+	data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
+	if (test_bit(wil_status_napi_en, wil->status)) {
+		while (!wil_is_rx_idle(wil)) {
+			if (time_after(jiffies, data_comp_to)) {
+				if (wil_is_rx_idle(wil))
+					break;
+				wil_err(wil,
+					"TO waiting for idle RX, suspend failed\n");
+				wil->suspend_stats.failed_suspends++;
+				goto resume_after_fail;
+			}
+			wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
+			napi_synchronize(&wil->napi_rx);
+			msleep(20);
+		}
+	}
+
+	/* In case of pending WMI events, reject the suspend
+	 * and resume the device.
+	 * This can happen if the device sent the WMI events before
+	 * approving the suspend.
+	 */
+	if (!wil_is_wmi_idle(wil)) {
+		wil_err(wil, "suspend failed due to pending WMI events\n");
+		wil->suspend_stats.failed_suspends++;
+		goto resume_after_fail;
+	}
+
+	wil_mask_irq(wil);
+
+	/* Disable device reset on PERST */
+	wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+
+	if (wil->platform_ops.suspend) {
+		rc = wil->platform_ops.suspend(wil->platform_handle, true);
+		if (rc) {
+			wil_err(wil, "platform device failed to suspend (%d)\n",
+				rc);
+			wil->suspend_stats.failed_suspends++;
+			wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+			wil_unmask_irq(wil);
+			goto resume_after_fail;
+		}
+	}
+
+	/* Save the current bus request to return to the same in resume */
+	wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
+	wil6210_bus_request(wil, 0);
+
+	set_bit(wil_status_suspended, wil->status);
+	clear_bit(wil_status_suspending, wil->status);
+
+	return rc;
+
+resume_after_fail:
+	set_bit(wil_status_resuming, wil->status);
+	clear_bit(wil_status_suspending, wil->status);
+	rc = wmi_resume(wil);
+	/* if resume succeeded, reject the suspend */
+	if (!rc) {
+		rc = -EBUSY;
+		if (test_bit(wil_status_fwconnected, wil->status))
+			wil_update_net_queues_bh(wil, NULL, false);
+	}
+	return rc;
+
+reject_suspend:
+	clear_bit(wil_status_suspending, wil->status);
+	if (test_bit(wil_status_fwconnected, wil->status))
+		wil_update_net_queues_bh(wil, NULL, false);
+	return -EBUSY;
+}
+
+static int wil_suspend_radio_off(struct wil6210_priv *wil)
 {
 	int rc = 0;
 	struct net_device *ndev = wil_to_ndev(wil);
 
-	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
-
-	if (test_bit(wil_status_suspended, wil->status)) {
-		wil_dbg_pm(wil, "trying to suspend while suspended\n");
-		return 0;
-	}
+	wil_dbg_pm(wil, "suspend radio off\n");
 
 	/* if netif up, hardware is alive, shut it down */
 	if (ndev->flags & IFF_UP) {
@@ -90,7 +241,7 @@
 	wil_disable_irq(wil);
 
 	if (wil->platform_ops.suspend) {
-		rc = wil->platform_ops.suspend(wil->platform_handle);
+		rc = wil->platform_ops.suspend(wil->platform_handle, false);
 		if (rc) {
 			wil_enable_irq(wil);
 			goto out;
@@ -100,6 +251,50 @@
 	set_bit(wil_status_suspended, wil->status);
 
 out:
+	wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
+
+	return rc;
+}
+
+static int wil_resume_radio_off(struct wil6210_priv *wil)
+{
+	int rc = 0;
+	struct net_device *ndev = wil_to_ndev(wil);
+
+	wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+	wil_enable_irq(wil);
+	/* if netif up, bring hardware up
+	 * During open(), IFF_UP set after actual device method
+	 * invocation. This prevent recursive call to wil_up()
+	 * wil_status_suspended will be cleared in wil_reset
+	 */
+	if (ndev->flags & IFF_UP)
+		rc = wil_up(wil);
+	else
+		clear_bit(wil_status_suspended, wil->status);
+
+	return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
+
+	wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
+
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_dbg_pm(wil, "trying to suspend while suspended\n");
+		return 0;
+	}
+
+	if (!keep_radio_on)
+		rc = wil_suspend_radio_off(wil);
+	else
+		rc = wil_suspend_keep_radio_on(wil);
+
 	wil_dbg_pm(wil, "suspend: %s => %d\n",
 		   is_runtime ? "runtime" : "system", rc);
 
@@ -110,29 +305,24 @@
 {
 	int rc = 0;
 	struct net_device *ndev = wil_to_ndev(wil);
+	bool keep_radio_on = ndev->flags & IFF_UP &&
+			     wil->keep_radio_on_during_sleep;
 
 	wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
 	if (wil->platform_ops.resume) {
-		rc = wil->platform_ops.resume(wil->platform_handle);
+		rc = wil->platform_ops.resume(wil->platform_handle,
+					      keep_radio_on);
 		if (rc) {
 			wil_err(wil, "platform_ops.resume : %d\n", rc);
 			goto out;
 		}
 	}
 
-	wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
-	wil_enable_irq(wil);
-
-	/* if netif up, bring hardware up
-	 * During open(), IFF_UP set after actual device method
-	 * invocation. This prevent recursive call to wil_up().
-	 * wil_status_suspended will be cleared in wil_reset
-	 */
-	if (ndev->flags & IFF_UP)
-		rc = wil_up(wil);
+	if (keep_radio_on)
+		rc = wil_resume_keep_radio_on(wil);
 	else
-		clear_bit(wil_status_suspended, wil->status);
+		rc = wil_resume_radio_off(wil);
 
 out:
 	wil_dbg_pm(wil, "resume: %s => %d\n",
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 35bbf3a..8f1e79b4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -104,6 +104,51 @@
 	return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
 }
 
+/* returns true when all tx vrings are empty */
+bool wil_is_tx_idle(struct wil6210_priv *wil)
+{
+	int i;
+	unsigned long data_comp_to;
+
+	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+		struct vring *vring = &wil->vring_tx[i];
+		int vring_index = vring - wil->vring_tx;
+		struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+
+		spin_lock(&txdata->lock);
+
+		if (!vring->va || !txdata->enabled) {
+			spin_unlock(&txdata->lock);
+			continue;
+		}
+
+		data_comp_to = jiffies + msecs_to_jiffies(
+					WIL_DATA_COMPLETION_TO_MS);
+		if (test_bit(wil_status_napi_en, wil->status)) {
+			while (!wil_vring_is_empty(vring)) {
+				if (time_after(jiffies, data_comp_to)) {
+					wil_dbg_pm(wil,
+						   "TO waiting for idle tx\n");
+					spin_unlock(&txdata->lock);
+					return false;
+				}
+				wil_dbg_ratelimited(wil,
+						    "tx vring is not empty -> NAPI\n");
+				spin_unlock(&txdata->lock);
+				napi_synchronize(&wil->napi_tx);
+				msleep(20);
+				spin_lock(&txdata->lock);
+				if (!vring->va || !txdata->enabled)
+					break;
+			}
+		}
+
+		spin_unlock(&txdata->lock);
+	}
+
+	return true;
+}
+
 /* wil_val_in_range - check if value in [min,max) */
 static inline bool wil_val_in_range(int val, int min, int max)
 {
@@ -406,6 +451,18 @@
 	       (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
 }
 
+bool wil_is_rx_idle(struct wil6210_priv *wil)
+{
+	struct vring_rx_desc *_d;
+	struct vring *vring = &wil->vring_rx;
+
+	_d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
+	if (_d->dma.status & RX_DMA_STATUS_DU)
+		return false;
+
+	return true;
+}
+
 /**
  * reap 1 frame from @swhead
  *
@@ -1812,6 +1869,15 @@
 
 	spin_lock(&txdata->lock);
 
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status) ||
+	    test_bit(wil_status_resuming, wil->status)) {
+		wil_dbg_txrx(wil,
+			     "suspend/resume in progress. drop packet\n");
+		spin_unlock(&txdata->lock);
+		return -EINVAL;
+	}
+
 	rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
 	     (wil, vring, skb);
 
@@ -1864,6 +1930,11 @@
 		return;
 	}
 
+	/* Do not wake the queues in suspend flow */
+	if (test_bit(wil_status_suspending, wil->status) ||
+	    test_bit(wil_status_suspended, wil->status))
+		return;
+
 	/* check wake */
 	for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
 		struct vring *cur_vring = &wil->vring_tx[i];
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index d05bb36..eca5685 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -38,8 +38,13 @@
 extern bool disable_ap_sme;
 
 #define WIL_NAME "wil6210"
-#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
-#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
+
+#define WIL_FW_NAME_DEFAULT "wil6210.fw"
+#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
+
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
+#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
+
 #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
 
 #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
@@ -54,7 +59,8 @@
 	return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
 }
 
-#define WIL6210_MEM_SIZE (2*1024*1024UL)
+#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
+#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
 
 #define WIL_TX_Q_LEN_DEFAULT		(4000)
 #define WIL_RX_RING_SIZE_ORDER_DEFAULT	(10)
@@ -78,6 +84,15 @@
  */
 #define WIL_MAX_MPDU_OVERHEAD	(62)
 
+struct wil_suspend_stats {
+	unsigned long successful_suspends;
+	unsigned long failed_suspends;
+	unsigned long successful_resumes;
+	unsigned long failed_resumes;
+	unsigned long rejected_by_device;
+	unsigned long rejected_by_host;
+};
+
 /* Calculate MAC buffer size for the firmware. It includes all overhead,
  * as it will go over the air, and need to be 8 byte aligned
  */
@@ -288,6 +303,8 @@
 #define ISR_MISC_MBOX_EVT	BIT_DMA_EP_MISC_ICR_FW_INT(1)
 #define ISR_MISC_FW_ERROR	BIT_DMA_EP_MISC_ICR_FW_INT(3)
 
+#define WIL_DATA_COMPLETION_TO_MS 200
+
 /* Hardware definitions end */
 struct fw_map {
 	u32 from; /* linker address - from, inclusive */
@@ -416,7 +433,9 @@
 	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 	wil_status_resetting, /* reset in progress */
+	wil_status_suspending, /* suspend in progress */
 	wil_status_suspended, /* suspend completed, device is suspended */
+	wil_status_resuming, /* resume in progress */
 	wil_status_last /* keep last */
 };
 
@@ -599,6 +618,7 @@
 
 struct wil6210_priv {
 	struct pci_dev *pdev;
+	u32 bar_size;
 	struct wireless_dev *wdev;
 	void __iomem *csr;
 	DECLARE_BITMAP(status, wil_status_last);
@@ -681,9 +701,12 @@
 	struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
 	u8 discovery_mode;
 	u8 abft_len;
+	u8 wakeup_trigger;
+	struct wil_suspend_stats suspend_stats;
 
 	void *platform_handle;
 	struct wil_platform_ops platform_ops;
+	bool keep_radio_on_during_sleep;
 
 	struct pmc_ctx pmc;
 
@@ -710,6 +733,11 @@
 	struct notifier_block pm_notify;
 #endif /* CONFIG_PM_SLEEP */
 #endif /* CONFIG_PM */
+
+	bool suspend_resp_rcvd;
+	bool suspend_resp_comp;
+	u32 bus_request_kbps;
+	u32 bus_request_kbps_pre_suspend;
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -972,6 +1000,11 @@
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+bool wil_is_wmi_idle(struct wil6210_priv *wil);
+int wmi_resume(struct wil6210_priv *wil);
+int wmi_suspend(struct wil6210_priv *wil);
+bool wil_is_tx_idle(struct wil6210_priv *wil);
+bool wil_is_rx_idle(struct wil6210_priv *wil);
 
 int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
 void wil_fw_core_dump(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index f8c4117..621005b 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -33,10 +33,11 @@
  */
 struct wil_platform_ops {
 	int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
-	int (*suspend)(void *handle);
-	int (*resume)(void *handle);
+	int (*suspend)(void *handle, bool keep_device_power);
+	int (*resume)(void *handle, bool device_powered_on);
 	void (*uninit)(void *handle);
 	int (*notify)(void *handle, enum wil_platform_event evt);
+	bool (*keep_radio_on_during_sleep)(void *handle);
 };
 
 /**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 8e1825f..ae0952f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -38,6 +38,8 @@
 MODULE_PARM_DESC(led_id,
 		 " 60G device led enablement. Set the led ID (0-2) to enable");
 
+#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
+
 /**
  * WMI event receiving - theory of operations
  *
@@ -158,7 +160,7 @@
 		return NULL;
 
 	off = HOSTADDR(ptr);
-	if (off > WIL6210_MEM_SIZE - 4)
+	if (off > wil->bar_size - 4)
 		return NULL;
 
 	return wil->csr + off;
@@ -178,7 +180,7 @@
 		return NULL;
 
 	off = HOSTADDR(ptr);
-	if (off > WIL6210_MEM_SIZE - 4)
+	if (off > wil->bar_size - 4)
 		return NULL;
 
 	return wil->csr + off;
@@ -234,6 +236,16 @@
 		return -EAGAIN;
 	}
 
+	/* Allow sending only suspend / resume commands during susepnd flow */
+	if ((test_bit(wil_status_suspending, wil->status) ||
+	     test_bit(wil_status_suspended, wil->status) ||
+	     test_bit(wil_status_resuming, wil->status)) &&
+	     ((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
+	      (cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
+		wil_err(wil, "WMI: reject send_command during suspend\n");
+		return -EINVAL;
+	}
+
 	if (!head) {
 		wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
 		return -EINVAL;
@@ -893,6 +905,11 @@
 		return;
 	}
 
+	if (test_bit(wil_status_suspended, wil->status)) {
+		wil_err(wil, "suspended. cannot handle WMI event\n");
+		return;
+	}
+
 	for (n = 0;; n++) {
 		u16 len;
 		bool q;
@@ -945,6 +962,15 @@
 			struct wmi_cmd_hdr *wmi = &evt->event.wmi;
 			u16 id = le16_to_cpu(wmi->command_id);
 			u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
+			if (test_bit(wil_status_resuming, wil->status)) {
+				if (id == WMI_TRAFFIC_RESUME_EVENTID)
+					clear_bit(wil_status_resuming,
+						  wil->status);
+				else
+					wil_err(wil,
+						"WMI evt %d while resuming\n",
+						id);
+			}
 			spin_lock_irqsave(&wil->wmi_ev_lock, flags);
 			if (wil->reply_id && wil->reply_id == id) {
 				if (wil->reply_buf) {
@@ -952,6 +978,11 @@
 					       min(len, wil->reply_size));
 					immed_reply = true;
 				}
+				if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
+					wil_dbg_wmi(wil,
+						    "set suspend_resp_rcvd\n");
+					wil->suspend_resp_rcvd = true;
+				}
 			}
 			spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
 
@@ -1909,6 +1940,85 @@
 	return rc;
 }
 
+int wmi_suspend(struct wil6210_priv *wil)
+{
+	int rc;
+	struct wmi_traffic_suspend_cmd cmd = {
+		.wakeup_trigger = wil->wakeup_trigger,
+	};
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_traffic_suspend_event evt;
+	} __packed reply;
+	u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
+
+	wil->suspend_resp_rcvd = false;
+	wil->suspend_resp_comp = false;
+
+	reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
+
+	rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
+		      WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
+		      suspend_to);
+	if (rc) {
+		wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
+		if (rc == -ETIME)
+			/* wmi_call TO */
+			wil->suspend_stats.rejected_by_device++;
+		else
+			wil->suspend_stats.rejected_by_host++;
+		goto out;
+	}
+
+	wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
+
+	rc = wait_event_interruptible_timeout(wil->wq,
+					      wil->suspend_resp_comp,
+					      msecs_to_jiffies(suspend_to));
+	if (rc == 0) {
+		wil_err(wil, "TO waiting for suspend_response_completed\n");
+		if (wil->suspend_resp_rcvd)
+			/* Device responded but we TO due to another reason */
+			wil->suspend_stats.rejected_by_host++;
+		else
+			wil->suspend_stats.rejected_by_device++;
+		rc = -EBUSY;
+		goto out;
+	}
+
+	wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
+	if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
+		wil_dbg_pm(wil, "device rejected the suspend\n");
+		wil->suspend_stats.rejected_by_device++;
+	}
+	rc = reply.evt.status;
+
+out:
+	wil->suspend_resp_rcvd = false;
+	wil->suspend_resp_comp = false;
+
+	return rc;
+}
+
+int wmi_resume(struct wil6210_priv *wil)
+{
+	int rc;
+	struct {
+		struct wmi_cmd_hdr wmi;
+		struct wmi_traffic_resume_event evt;
+	} __packed reply;
+
+	reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
+
+	rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
+		      WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
+		      WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
+	if (rc)
+		return rc;
+
+	return reply.evt.status;
+}
+
 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
 				 void *d, int len)
 {
@@ -1998,3 +2108,36 @@
 	}
 	wil_dbg_wmi(wil, "event_worker: Finished\n");
 }
+
+bool wil_is_wmi_idle(struct wil6210_priv *wil)
+{
+	ulong flags;
+	struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
+	bool rc = false;
+
+	spin_lock_irqsave(&wil->wmi_ev_lock, flags);
+
+	/* Check if there are pending WMI events in the events queue */
+	if (!list_empty(&wil->pending_wmi_ev)) {
+		wil_dbg_pm(wil, "Pending WMI events in queue\n");
+		goto out;
+	}
+
+	/* Check if there is a pending WMI call */
+	if (wil->reply_id) {
+		wil_dbg_pm(wil, "Pending WMI call\n");
+		goto out;
+	}
+
+	/* Check if there are pending RX events in mbox */
+	r->head = wil_r(wil, RGF_MBOX +
+			offsetof(struct wil6210_mbox_ctl, rx.head));
+	if (r->tail != r->head)
+		wil_dbg_pm(wil, "Pending WMI mbox events\n");
+	else
+		rc = true;
+
+out:
+	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
+	return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index f7f5f4f..256f63c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -59,6 +59,7 @@
 	WMI_FW_CAPABILITY_DISABLE_AP_SME	= 4,
 	WMI_FW_CAPABILITY_WMI_ONLY		= 5,
 	WMI_FW_CAPABILITY_THERMAL_THROTTLING	= 7,
+	WMI_FW_CAPABILITY_D3_SUSPEND		= 8,
 	WMI_FW_CAPABILITY_MAX,
 };
 
@@ -157,7 +158,7 @@
 	WMI_FLASH_READ_CMDID				= 0x902,
 	WMI_FLASH_WRITE_CMDID				= 0x903,
 	/* Power management */
-	WMI_TRAFFIC_DEFERRAL_CMDID			= 0x904,
+	WMI_TRAFFIC_SUSPEND_CMDID			= 0x904,
 	WMI_TRAFFIC_RESUME_CMDID			= 0x905,
 	/* P2P */
 	WMI_P2P_CFG_CMDID				= 0x910,
@@ -500,8 +501,14 @@
 	u8 reserved[3];
 } __packed;
 
-/* WMI_TRAFFIC_DEFERRAL_CMDID */
-struct wmi_traffic_deferral_cmd {
+/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
+enum wmi_wakeup_trigger {
+	WMI_WAKEUP_TRIGGER_UCAST	= 0x01,
+	WMI_WAKEUP_TRIGGER_BCAST	= 0x02,
+};
+
+/* WMI_TRAFFIC_SUSPEND_CMDID */
+struct wmi_traffic_suspend_cmd {
 	/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
 	u8 wakeup_trigger;
 } __packed;
@@ -1084,7 +1091,7 @@
 	WMI_FLASH_READ_DONE_EVENTID			= 0x1902,
 	WMI_FLASH_WRITE_DONE_EVENTID			= 0x1903,
 	/* Power management */
-	WMI_TRAFFIC_DEFERRAL_EVENTID			= 0x1904,
+	WMI_TRAFFIC_SUSPEND_EVENTID			= 0x1904,
 	WMI_TRAFFIC_RESUME_EVENTID			= 0x1905,
 	/* P2P */
 	WMI_P2P_CFG_DONE_EVENTID			= 0x1910,
@@ -1926,14 +1933,14 @@
 	struct wmi_link_maintain_cfg lm_cfg;
 } __packed;
 
-enum wmi_traffic_deferral_status {
-	WMI_TRAFFIC_DEFERRAL_APPROVED	= 0x0,
-	WMI_TRAFFIC_DEFERRAL_REJECTED	= 0x1,
+enum wmi_traffic_suspend_status {
+	WMI_TRAFFIC_SUSPEND_APPROVED	= 0x0,
+	WMI_TRAFFIC_SUSPEND_REJECTED	= 0x1,
 };
 
-/* WMI_TRAFFIC_DEFERRAL_EVENTID */
-struct wmi_traffic_deferral_event {
-	/* enum wmi_traffic_deferral_status_e */
+/* WMI_TRAFFIC_SUSPEND_EVENTID */
+struct wmi_traffic_suspend_event {
+	/* enum wmi_traffic_suspend_status_e */
 	u8 status;
 } __packed;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 16241d2..afdbbf5 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2512,9 +2512,11 @@
 			priv->random_mac[i] |= get_random_int() &
 					       ~(request->mac_addr_mask[i]);
 		}
+		ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
+	} else {
+		eth_zero_addr(priv->random_mac);
 	}
 
-	ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
 	user_scan_cfg->num_ssids = request->n_ssids;
 	user_scan_cfg->ssid_list = request->ssids;
 
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 7a310c4..1fdb86c 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -995,6 +995,7 @@
 	if (card && card->cmd_buf) {
 		mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
 					 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(card->cmd_buf);
 	}
 	return 0;
 }
@@ -1561,6 +1562,11 @@
 		return -1;
 
 	card->cmd_buf = skb;
+	/*
+	 * Need to keep a reference, since core driver might free up this
+	 * buffer before we've unmapped it.
+	 */
+	skb_get(skb);
 
 	/* To send a command, the driver will:
 		1. Write the 64bit physical address of the data buffer to
@@ -1658,6 +1664,7 @@
 	if (card->cmd_buf) {
 		mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
 					 PCI_DMA_TODEVICE);
+		dev_kfree_skb_any(card->cmd_buf);
 		card->cmd_buf = NULL;
 	}
 
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 5dad4021..a74fad6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -359,6 +359,107 @@
 	return rtl8821ae_phy_rf6052_config(hw);
 }
 
+static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	switch (rtlhal->rfe_type) {
+	case 3:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+		break;
+	case 4:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
+		break;
+	case 5:
+		rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+		rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		break;
+	case 1:
+		if (rtlpriv->btcoexist.bt_coexistence) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77777777);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+			break;
+		}
+	case 0:
+	case 2:
+	default:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		break;
+	}
+}
+
+static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+	u8 tmp;
+
+	switch (rtlhal->rfe_type) {
+	case 0:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	case 1:
+		if (rtlpriv->btcoexist.bt_coexistence) {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		} else {
+			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+				      0x77337717);
+			rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+			rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+		}
+		break;
+	case 3:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+		break;
+	case 5:
+		rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+		rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	case 2:
+	case 4:
+	default:
+		rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+		rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+		rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+		break;
+	}
+}
+
 u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8	band,
 			   u8 rf_path)
 {
@@ -553,14 +654,9 @@
 			/* 0x82C[1:0] = 2b'00 */
 			rtl_set_bbreg(hw, 0x82c, 0x3, 0);
 		}
-		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
-				      0x77777777);
-			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
-				      0x77777777);
-			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
-			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
-		}
+
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_phy_set_rfe_reg_24g(hw);
 
 		rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
 		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
@@ -615,14 +711,8 @@
 			/* 0x82C[1:0] = 2'b00 */
 			rtl_set_bbreg(hw, 0x82c, 0x3, 1);
 
-		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
-			rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
-				      0x77337777);
-			rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
-				      0x77337777);
-			rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
-			rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
-		}
+		if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+			_rtl8812ae_phy_set_rfe_reg_5g(hw);
 
 		rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
 		rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index 1d6110f..ed69dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -2424,6 +2424,7 @@
 #define	BMASKH4BITS			0xf0000000
 #define BMASKOFDM_D			0xffc00000
 #define	BMASKCCK			0x3f3f3f3f
+#define BMASKRFEINV			0x3ff00000
 
 #define BRFREGOFFSETMASK		0xfffff
 
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 9d23692..e1c6f99 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -70,3 +70,11 @@
 source "drivers/nfc/s3fwrn5/Kconfig"
 source "drivers/nfc/st95hf/Kconfig"
 endmenu
+
+config NFC_NQ
+        tristate "QTI NCI based NFC Controller Driver for NQx"
+        depends on I2C
+        help
+          This enables the NFC driver for NQx based devices.
+          This is for i2c connected version. NCI protocol logic
+          resides in the usermode and it has no other NFC dependencies.
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index bab8ef0..b691fd4 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -17,3 +17,4 @@
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
 obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
 obj-$(CONFIG_NFC_ST95HF)	+= st95hf/
+obj-$(CONFIG_NFC_NQ)		+= nq-nci.o
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
new file mode 100644
index 0000000..baa4f94
--- /dev/null
+++ b/drivers/nfc/nq-nci.c
@@ -0,0 +1,1242 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/uaccess.h>
+#include "nq-nci.h"
+#include <linux/clk.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+struct nqx_platform_data {
+	unsigned int irq_gpio;
+	unsigned int en_gpio;
+	unsigned int clkreq_gpio;
+	unsigned int firm_gpio;
+	unsigned int ese_gpio;
+	const char *clk_src_name;
+};
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = "qcom,nq-nci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_match_table);
+
+#define MAX_BUFFER_SIZE			(320)
+#define WAKEUP_SRC_TIMEOUT		(2000)
+#define MAX_RETRY_COUNT			3
+
+struct nqx_dev {
+	wait_queue_head_t	read_wq;
+	struct	mutex		read_mutex;
+	struct	i2c_client	*client;
+	struct	miscdevice	nqx_device;
+	union  nqx_uinfo	nqx_info;
+	/* NFC GPIO variables */
+	unsigned int		irq_gpio;
+	unsigned int		en_gpio;
+	unsigned int		firm_gpio;
+	unsigned int		clkreq_gpio;
+	unsigned int		ese_gpio;
+	/* NFC VEN pin state powered by Nfc */
+	bool			nfc_ven_enabled;
+	/* NFC_IRQ state */
+	bool			irq_enabled;
+	/* NFC_IRQ wake-up state */
+	bool			irq_wake_up;
+	spinlock_t		irq_enabled_lock;
+	unsigned int		count_irq;
+	/* Initial CORE RESET notification */
+	unsigned int		core_reset_ntf;
+	/* CLK control */
+	bool			clk_run;
+	struct	clk		*s_clk;
+	/* read buffer*/
+	size_t kbuflen;
+	u8 *kbuf;
+	struct nqx_platform_data *pdata;
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			void *v);
+/*clock enable function*/
+static int nqx_clock_select(struct nqx_dev *nqx_dev);
+/*clock disable function*/
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static struct notifier_block nfcc_notifier = {
+	.notifier_call	= nfcc_reboot,
+	.next			= NULL,
+	.priority		= 0
+};
+
+unsigned int	disable_ctrl;
+
+static void nqx_init_stat(struct nqx_dev *nqx_dev)
+{
+	nqx_dev->count_irq = 0;
+}
+
+static void nqx_disable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (nqx_dev->irq_enabled) {
+		disable_irq_nosync(nqx_dev->client->irq);
+		nqx_dev->irq_enabled = false;
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+/**
+ * nqx_enable_irq()
+ *
+ * Check if interrupt is enabled or not
+ * and enable interrupt
+ *
+ * Return: void
+ */
+static void nqx_enable_irq(struct nqx_dev *nqx_dev)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	if (!nqx_dev->irq_enabled) {
+		nqx_dev->irq_enabled = true;
+		enable_irq(nqx_dev->client->irq);
+	}
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
+static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
+{
+	struct nqx_dev *nqx_dev = dev_id;
+	unsigned long flags;
+
+	if (device_may_wakeup(&nqx_dev->client->dev))
+		pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
+
+	nqx_disable_irq(nqx_dev);
+	spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+	nqx_dev->count_irq++;
+	spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+	wake_up(&nqx_dev->read_wq);
+
+	return IRQ_HANDLED;
+}
+
+static ssize_t nfc_read(struct file *filp, char __user *buf,
+					size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	unsigned char *tmp = NULL;
+	int ret;
+	int irq_gpio_val = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (count > nqx_dev->kbuflen)
+		count = nqx_dev->kbuflen;
+
+	dev_dbg(&nqx_dev->client->dev, "%s : reading %zu bytes.\n",
+			__func__, count);
+
+	mutex_lock(&nqx_dev->read_mutex);
+
+	irq_gpio_val = gpio_get_value(nqx_dev->irq_gpio);
+	if (irq_gpio_val == 0) {
+		if (filp->f_flags & O_NONBLOCK) {
+			dev_err(&nqx_dev->client->dev,
+			":f_falg has O_NONBLOCK. EAGAIN\n");
+			ret = -EAGAIN;
+			goto err;
+		}
+		while (1) {
+			ret = 0;
+			if (!nqx_dev->irq_enabled) {
+				nqx_dev->irq_enabled = true;
+				enable_irq(nqx_dev->client->irq);
+			}
+			if (!gpio_get_value(nqx_dev->irq_gpio)) {
+				ret = wait_event_interruptible(nqx_dev->read_wq,
+					!nqx_dev->irq_enabled);
+			}
+			if (ret)
+				goto err;
+			nqx_disable_irq(nqx_dev);
+
+			if (gpio_get_value(nqx_dev->irq_gpio))
+				break;
+			dev_err_ratelimited(&nqx_dev->client->dev,
+			"gpio is low, no need to read data\n");
+		}
+	}
+
+	tmp = nqx_dev->kbuf;
+	if (!tmp) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+	memset(tmp, 0x00, count);
+
+	/* Read data */
+	ret = i2c_master_recv(nqx_dev->client, tmp, count);
+	if (ret < 0) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: i2c_master_recv returned %d\n", __func__, ret);
+		goto err;
+	}
+	if (ret > count) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: received too many bytes from i2c (%d)\n",
+			__func__, ret);
+		ret = -EIO;
+		goto err;
+	}
+#ifdef NFC_KERNEL_BU
+		dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
+			__func__, tmp[0], tmp[1], tmp[2]);
+#endif
+	if (copy_to_user(buf, tmp, ret)) {
+		dev_warn(&nqx_dev->client->dev,
+			"%s : failed to copy to user space\n", __func__);
+		ret = -EFAULT;
+		goto err;
+	}
+	mutex_unlock(&nqx_dev->read_mutex);
+	return ret;
+
+err:
+	mutex_unlock(&nqx_dev->read_mutex);
+out:
+	return ret;
+}
+
+static ssize_t nfc_write(struct file *filp, const char __user *buf,
+				size_t count, loff_t *offset)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+	char *tmp = NULL;
+	int ret = 0;
+
+	if (!nqx_dev) {
+		ret = -ENODEV;
+		goto out;
+	}
+	if (count > nqx_dev->kbuflen) {
+		dev_err(&nqx_dev->client->dev, "%s: out of memory\n",
+			__func__);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	tmp = memdup_user(buf, count);
+	if (IS_ERR(tmp)) {
+		dev_err(&nqx_dev->client->dev, "%s: memdup_user failed\n",
+			__func__);
+		ret = PTR_ERR(tmp);
+		goto out;
+	}
+
+	ret = i2c_master_send(nqx_dev->client, tmp, count);
+	if (ret != count) {
+		dev_err(&nqx_dev->client->dev,
+		"%s: failed to write %d\n", __func__, ret);
+		ret = -EIO;
+		goto out_free;
+	}
+#ifdef NFC_KERNEL_BU
+	dev_dbg(&nqx_dev->client->dev,
+			"%s : i2c-%d: NfcNciTx %x %x %x\n",
+			__func__, iminor(file_inode(filp)),
+			tmp[0], tmp[1], tmp[2]);
+#endif
+	usleep_range(1000, 1100);
+out_free:
+	kfree(tmp);
+out:
+	return ret;
+}
+
+/**
+ * nqx_standby_write()
+ * @buf:       pointer to data buffer
+ * @len:       # of bytes need to transfer
+ *
+ * write data buffer over I2C and retry
+ * if NFCC is in stand by mode
+ *
+ * Return: # of bytes written or -ve value in case of error
+ */
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+				const unsigned char *buf, size_t len)
+{
+	int ret = -EINVAL;
+	int retry_cnt;
+
+	for (retry_cnt = 1; retry_cnt <= MAX_RETRY_COUNT; retry_cnt++) {
+		ret = i2c_master_send(nqx_dev->client, buf, len);
+		if (ret < 0) {
+			dev_err(&nqx_dev->client->dev,
+				"%s: write failed, Maybe in Standby Mode - Retry(%d)\n",
+				 __func__, retry_cnt);
+			usleep_range(1000, 1100);
+		} else if (ret == len)
+			break;
+	}
+	return ret;
+}
+
+/*
+ * Power management of the eSE
+ * NFC & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE ON : NFC_EN high and eSE_pwr_req high.
+ * NFC OFF & eSE OFF : NFC_EN low and eSE_pwr_req low.
+ */
+static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
+{
+	int r = -1;
+	const unsigned char svdd_off_cmd_warn[] =  {0x2F, 0x31, 0x01, 0x01};
+	const unsigned char svdd_off_cmd_done[] =  {0x2F, 0x31, 0x01, 0x00};
+
+	if (!gpio_is_valid(nqx_dev->ese_gpio)) {
+		dev_err(&nqx_dev->client->dev,
+			"%s: ese_gpio is not valid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (arg == 0) {
+		/*
+		 * We want to power on the eSE and to do so we need the
+		 * eSE_pwr_req pin and the NFC_EN pin to be high
+		 */
+		if (gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is already high\n");
+			r = 0;
+		} else {
+			/**
+			 * Let's store the NFC_EN pin state
+			 * only if the eSE is not yet on
+			 */
+			nqx_dev->nfc_ven_enabled =
+					gpio_get_value(nqx_dev->en_gpio);
+			if (!nqx_dev->nfc_ven_enabled) {
+				gpio_set_value(nqx_dev->en_gpio, 1);
+				/* hardware dependent delay */
+				usleep_range(1000, 1100);
+			}
+			gpio_set_value(nqx_dev->ese_gpio, 1);
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "ese_gpio is enabled\n");
+				r = 0;
+			}
+		}
+	} else if (arg == 1) {
+		if (nqx_dev->nfc_ven_enabled &&
+			((nqx_dev->nqx_info.info.chip_type == NFCC_NQ_220) ||
+			(nqx_dev->nqx_info.info.chip_type == NFCC_PN66T))) {
+			/**
+			 * Let's inform the CLF we're
+			 * powering off the eSE
+			 */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_warn,
+						sizeof(svdd_off_cmd_warn));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_warn sent\n", __func__);
+
+			/* let's power down the eSE */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: nqx_dev->ese_gpio set to 0\n", __func__);
+
+			/**
+			 * Time needed for the SVDD capacitor
+			 * to get discharged
+			 */
+			usleep_range(8000, 8100);
+
+			/* Let's inform the CLF the eSE is now off */
+			r = nqx_standby_write(nqx_dev, svdd_off_cmd_done,
+						sizeof(svdd_off_cmd_done));
+			if (r < 0) {
+				dev_err(&nqx_dev->client->dev,
+					"%s: write failed after max retry\n",
+					 __func__);
+				return -ENXIO;
+			}
+			dev_dbg(&nqx_dev->client->dev,
+				"%s: svdd_off_cmd_done sent\n", __func__);
+		} else {
+			/**
+			 * In case the NFC is off,
+			 * there's no need to send the i2c commands
+			 */
+			gpio_set_value(nqx_dev->ese_gpio, 0);
+		}
+
+		if (!gpio_get_value(nqx_dev->ese_gpio)) {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio is disabled\n");
+			r = 0;
+		}
+
+		if (!nqx_dev->nfc_ven_enabled) {
+			/* hardware dependent delay */
+			usleep_range(1000, 1100);
+			dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+	} else if (arg == 3) {
+		r = gpio_get_value(nqx_dev->ese_gpio);
+	}
+	return r;
+}
+
+static int nfc_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev = container_of(filp->private_data,
+				struct nqx_dev, nqx_device);
+
+	filp->private_data = nqx_dev;
+	nqx_init_stat(nqx_dev);
+
+	dev_dbg(&nqx_dev->client->dev,
+			"%s: %d,%d\n", __func__, imajor(inode), iminor(inode));
+	return ret;
+}
+
+/*
+ * nfc_ioctl_power_states() - power control
+ * @filp:	pointer to the file descriptor
+ * @arg:	mode that we want to move to
+ *
+ * Device power control. Depending on the arg value, device moves to
+ * different states
+ * (arg = 0): NFC_ENABLE	GPIO = 0, FW_DL GPIO = 0
+ * (arg = 1): NFC_ENABLE	GPIO = 1, FW_DL GPIO = 0
+ * (arg = 2): FW_DL GPIO = 1
+ *
+ * Return: -ENOIOCTLCMD if arg is not supported, 0 in any other case
+ */
+int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
+{
+	int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	if (arg == 0) {
+		/*
+		 * We are attempting a hardware reset so let us disable
+		 * interrupts to avoid spurious notifications to upper
+		 * layers.
+		 */
+		nqx_disable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value disable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (!gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+				gpio_set_value(nqx_dev->en_gpio, 0);
+			} else {
+				dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+			}
+		} else {
+			dev_dbg(&nqx_dev->client->dev, "ese_gpio invalid, set en_gpio to low\n");
+			gpio_set_value(nqx_dev->en_gpio, 0);
+		}
+		r = nqx_clock_deselect(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
+		nqx_dev->nfc_ven_enabled = false;
+		/* hardware dependent delay */
+		msleep(100);
+	} else if (arg == 1) {
+		nqx_enable_irq(nqx_dev);
+		dev_dbg(&nqx_dev->client->dev,
+			"gpio_set_value enable: %s: info: %p\n",
+			__func__, nqx_dev);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 0);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		r = nqx_clock_select(nqx_dev);
+		if (r < 0)
+			dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
+		nqx_dev->nfc_ven_enabled = true;
+		msleep(20);
+	} else if (arg == 2) {
+		/*
+		 * We are switching to Dowload Mode, toggle the enable pin
+		 * in order to set the NFCC in the new mode
+		 */
+		if (gpio_is_valid(nqx_dev->ese_gpio)) {
+			if (gpio_get_value(nqx_dev->ese_gpio)) {
+				dev_err(&nqx_dev->client->dev,
+				"FW download forbidden while ese is on\n");
+				return -EBUSY; /* Device or resource busy */
+			}
+		}
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		msleep(20);
+		if (gpio_is_valid(nqx_dev->firm_gpio))
+			gpio_set_value(nqx_dev->firm_gpio, 1);
+		msleep(20);
+		gpio_set_value(nqx_dev->en_gpio, 0);
+		msleep(100);
+		gpio_set_value(nqx_dev->en_gpio, 1);
+		msleep(20);
+	} else {
+		r = -ENOIOCTLCMD;
+	}
+
+	return r;
+}
+
+#ifdef CONFIG_COMPAT
+static long nfc_compat_ioctl(struct file *pfile, unsigned int cmd,
+				unsigned long arg)
+{
+	long r = 0;
+
+	arg = (compat_u64)arg;
+	switch (cmd) {
+	case NFC_SET_PWR:
+		nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	default:
+		r = -ENOTTY;
+	}
+	return r;
+}
+#endif
+
+/*
+ * nfc_ioctl_core_reset_ntf()
+ * @filp:       pointer to the file descriptor
+ *
+ * Allows callers to determine if a CORE_RESET_NTF has arrived
+ *
+ * Return: the value of variable core_reset_ntf
+ */
+int nfc_ioctl_core_reset_ntf(struct file *filp)
+{
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	dev_dbg(&nqx_dev->client->dev, "%s: returning = %d\n", __func__,
+		nqx_dev->core_reset_ntf);
+	return nqx_dev->core_reset_ntf;
+}
+
+/*
+ * Inside nfc_ioctl_nfcc_info
+ *
+ * @brief   nfc_ioctl_nfcc_info
+ *
+ * Check the NQ Chipset and firmware version details
+ */
+unsigned int nfc_ioctl_nfcc_info(struct file *filp, unsigned long arg)
+{
+	unsigned int r = 0;
+	struct nqx_dev *nqx_dev = filp->private_data;
+
+	r = nqx_dev->nqx_info.i;
+	dev_dbg(&nqx_dev->client->dev,
+		"nqx nfc : nfc_ioctl_nfcc_info r = %d\n", r);
+
+	return r;
+}
+
+static long nfc_ioctl(struct file *pfile, unsigned int cmd,
+			unsigned long arg)
+{
+	int r = 0;
+
+	switch (cmd) {
+	case NFC_SET_PWR:
+		r = nfc_ioctl_power_states(pfile, arg);
+		break;
+	case ESE_SET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, arg);
+		break;
+	case ESE_GET_PWR:
+		r = nqx_ese_pwr(pfile->private_data, 3);
+		break;
+	case SET_RX_BLOCK:
+		break;
+	case SET_EMULATOR_TEST_POINT:
+		break;
+	case NFCC_INITIAL_CORE_RESET_NTF:
+		r = nfc_ioctl_core_reset_ntf(pfile);
+		break;
+	case NFCC_GET_INFO:
+		r = nfc_ioctl_nfcc_info(pfile, arg);
+		break;
+	default:
+		r = -ENOIOCTLCMD;
+	}
+	return r;
+}
+
+static const struct file_operations nfc_dev_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.read  = nfc_read,
+	.write = nfc_write,
+	.open = nfc_open,
+	.unlocked_ioctl = nfc_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = nfc_compat_ioctl
+#endif
+};
+
+/* Check for availability of NQ_ NFC controller hardware */
+static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
+{
+	int ret = 0;
+
+	unsigned char raw_nci_reset_cmd[] =  {0x20, 0x00, 0x01, 0x00};
+	unsigned char raw_nci_init_cmd[] =   {0x20, 0x01, 0x00};
+	unsigned char nci_init_rsp[28];
+	unsigned char nci_reset_rsp[6];
+	unsigned char init_rsp_len = 0;
+	unsigned int enable_gpio = nqx_dev->en_gpio;
+	/* making sure that the NFCC starts in a clean state. */
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	/* hardware dependent delay */
+	msleep(20);
+	gpio_set_value(enable_gpio, 1);/* HPD : Enable*/
+	/* hardware dependent delay */
+	msleep(20);
+
+	/* send NCI CORE RESET CMD with Keep Config parameters */
+	ret = i2c_master_send(client, raw_nci_reset_cmd,
+						sizeof(raw_nci_reset_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+
+	/* Read Response of RESET command */
+	ret = i2c_master_recv(client, nci_reset_rsp,
+		sizeof(nci_reset_rsp));
+	dev_err(&client->dev,
+	"%s: - nq - reset cmd answer : NfcNciRx %x %x %x\n",
+	__func__, nci_reset_rsp[0],
+	nci_reset_rsp[1], nci_reset_rsp[2]);
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	ret = i2c_master_send(client, raw_nci_init_cmd,
+		sizeof(raw_nci_init_cmd));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_send Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	/* hardware dependent delay */
+	msleep(30);
+	/* Read Response of INIT command */
+	ret = i2c_master_recv(client, nci_init_rsp,
+		sizeof(nci_init_rsp));
+	if (ret < 0) {
+		dev_err(&client->dev,
+		"%s: - i2c_master_recv Error\n", __func__);
+		goto err_nfcc_hw_check;
+	}
+	init_rsp_len = 2 + nci_init_rsp[2]; /*payload + len*/
+	if (init_rsp_len > PAYLOAD_HEADER_LENGTH) {
+		nqx_dev->nqx_info.info.chip_type =
+				nci_init_rsp[init_rsp_len - 3];
+		nqx_dev->nqx_info.info.rom_version =
+				nci_init_rsp[init_rsp_len - 2];
+		nqx_dev->nqx_info.info.fw_major =
+				nci_init_rsp[init_rsp_len - 1];
+		nqx_dev->nqx_info.info.fw_minor =
+				nci_init_rsp[init_rsp_len];
+	}
+	dev_dbg(&nqx_dev->client->dev, "NQ NFCC chip_type = %x\n",
+		nqx_dev->nqx_info.info.chip_type);
+	dev_dbg(&nqx_dev->client->dev, "NQ fw version = %x.%x.%x\n",
+		nqx_dev->nqx_info.info.rom_version,
+		nqx_dev->nqx_info.info.fw_major,
+		nqx_dev->nqx_info.info.fw_minor);
+
+	switch (nqx_dev->nqx_info.info.chip_type) {
+	case NFCC_NQ_210:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ210 ##\n", __func__);
+		break;
+	case NFCC_NQ_220:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ220 ##\n", __func__);
+		break;
+	case NFCC_NQ_310:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ310 ##\n", __func__);
+		break;
+	case NFCC_NQ_330:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == NQ330 ##\n", __func__);
+		break;
+	case NFCC_PN66T:
+		dev_dbg(&client->dev,
+		"%s: ## NFCC == PN66T ##\n", __func__);
+		break;
+	default:
+		dev_err(&client->dev,
+		"%s: - NFCC HW not Supported\n", __func__);
+		break;
+	}
+
+	/*Disable NFC by default to save power on boot*/
+	gpio_set_value(enable_gpio, 0);/* ULPM: Disable */
+	ret = 0;
+	goto done;
+
+err_nfcc_hw_check:
+	ret = -ENXIO;
+	dev_err(&client->dev,
+		"%s: - NFCC HW not available\n", __func__);
+done:
+	return ret;
+}
+
+/*
+ * Routine to enable clock.
+ * this routine can be extended to select from multiple
+ * sources based on clk_src_name.
+ */
+static int nqx_clock_select(struct nqx_dev *nqx_dev)
+{
+	int r = 0;
+
+	nqx_dev->s_clk = clk_get(&nqx_dev->client->dev, "ref_clk");
+
+	if (nqx_dev->s_clk == NULL)
+		goto err_clk;
+
+	if (nqx_dev->clk_run == false)
+		r = clk_prepare_enable(nqx_dev->s_clk);
+
+	if (r)
+		goto err_clk;
+
+	nqx_dev->clk_run = true;
+
+	return r;
+
+err_clk:
+	r = -1;
+	return r;
+}
+
+/*
+ * Routine to disable clocks
+ */
+static int nqx_clock_deselect(struct nqx_dev *nqx_dev)
+{
+	int r = -1;
+
+	if (nqx_dev->s_clk != NULL) {
+		if (nqx_dev->clk_run == true) {
+			clk_disable_unprepare(nqx_dev->s_clk);
+			nqx_dev->clk_run = false;
+		}
+		return 0;
+	}
+	return r;
+}
+
+static int nfc_parse_dt(struct device *dev, struct nqx_platform_data *pdata)
+{
+	int r = 0;
+	struct device_node *np = dev->of_node;
+
+	pdata->en_gpio = of_get_named_gpio(np, "qcom,nq-ven", 0);
+	if ((!gpio_is_valid(pdata->en_gpio)))
+		return -EINVAL;
+	disable_ctrl = pdata->en_gpio;
+
+	pdata->irq_gpio = of_get_named_gpio(np, "qcom,nq-irq", 0);
+	if ((!gpio_is_valid(pdata->irq_gpio)))
+		return -EINVAL;
+
+	pdata->firm_gpio = of_get_named_gpio(np, "qcom,nq-firm", 0);
+	if (!gpio_is_valid(pdata->firm_gpio)) {
+		dev_warn(dev,
+			"FIRM GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->firm_gpio = -EINVAL;
+	}
+
+	pdata->ese_gpio = of_get_named_gpio(np, "qcom,nq-esepwr", 0);
+	if (!gpio_is_valid(pdata->ese_gpio)) {
+		dev_warn(dev,
+			"ese GPIO <OPTIONAL> error getting from OF node\n");
+		pdata->ese_gpio = -EINVAL;
+	}
+
+	r = of_property_read_string(np, "qcom,clk-src", &pdata->clk_src_name);
+
+	pdata->clkreq_gpio = of_get_named_gpio(np, "qcom,nq-clkreq", 0);
+
+	if (r)
+		return -EINVAL;
+	return r;
+}
+
+static inline int gpio_input_init(const struct device * const dev,
+			const int gpio, const char * const gpio_name)
+{
+	int r = gpio_request(gpio, gpio_name);
+
+	if (r) {
+		dev_err(dev, "unable to request gpio [%d]\n", gpio);
+		return r;
+	}
+
+	r = gpio_direction_input(gpio);
+	if (r)
+		dev_err(dev, "unable to set direction for gpio [%d]\n", gpio);
+
+	return r;
+}
+
+static int nqx_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	int r = 0;
+	int irqn = 0;
+	struct nqx_platform_data *platform_data;
+	struct nqx_dev *nqx_dev;
+
+	dev_dbg(&client->dev, "%s: enter\n", __func__);
+	if (client->dev.of_node) {
+		platform_data = devm_kzalloc(&client->dev,
+			sizeof(struct nqx_platform_data), GFP_KERNEL);
+		if (!platform_data) {
+			r = -ENOMEM;
+			goto err_platform_data;
+		}
+		r = nfc_parse_dt(&client->dev, platform_data);
+		if (r)
+			goto err_free_data;
+	} else
+		platform_data = client->dev.platform_data;
+
+	dev_dbg(&client->dev,
+		"%s, inside nfc-nci flags = %x\n",
+		__func__, client->flags);
+
+	if (platform_data == NULL) {
+		dev_err(&client->dev, "%s: failed\n", __func__);
+		r = -ENODEV;
+		goto err_platform_data;
+	}
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		dev_err(&client->dev, "%s: need I2C_FUNC_I2C\n", __func__);
+		r = -ENODEV;
+		goto err_free_data;
+	}
+	nqx_dev = kzalloc(sizeof(*nqx_dev), GFP_KERNEL);
+	if (nqx_dev == NULL) {
+		r = -ENOMEM;
+		goto err_free_data;
+	}
+	nqx_dev->client = client;
+	nqx_dev->kbuflen = MAX_BUFFER_SIZE;
+	nqx_dev->kbuf = kzalloc(MAX_BUFFER_SIZE, GFP_KERNEL);
+	if (!nqx_dev->kbuf) {
+		dev_err(&client->dev,
+			"failed to allocate memory for nqx_dev->kbuf\n");
+		r = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	if (gpio_is_valid(platform_data->en_gpio)) {
+		r = gpio_request(platform_data->en_gpio, "nfc_reset_gpio");
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to request nfc reset gpio [%d]\n",
+				__func__,
+				platform_data->en_gpio);
+			goto err_mem;
+		}
+		r = gpio_direction_output(platform_data->en_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to set direction for nfc reset gpio [%d]\n",
+					__func__,
+					platform_data->en_gpio);
+			goto err_en_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+		"%s: nfc reset gpio not provided\n", __func__);
+		goto err_mem;
+	}
+
+	if (gpio_is_valid(platform_data->irq_gpio)) {
+		r = gpio_request(platform_data->irq_gpio, "nfc_irq_gpio");
+		if (r) {
+			dev_err(&client->dev, "%s: unable to request nfc irq gpio [%d]\n",
+				__func__, platform_data->irq_gpio);
+			goto err_en_gpio;
+		}
+		r = gpio_direction_input(platform_data->irq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: unable to set direction for nfc irq gpio [%d]\n",
+				__func__,
+				platform_data->irq_gpio);
+			goto err_irq_gpio;
+		}
+		irqn = gpio_to_irq(platform_data->irq_gpio);
+		if (irqn < 0) {
+			r = irqn;
+			goto err_irq_gpio;
+		}
+		client->irq = irqn;
+	} else {
+		dev_err(&client->dev, "%s: irq gpio not provided\n", __func__);
+		goto err_en_gpio;
+	}
+	if (gpio_is_valid(platform_data->firm_gpio)) {
+		r = gpio_request(platform_data->firm_gpio,
+			"nfc_firm_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc firmware gpio [%d]\n",
+				__func__, platform_data->firm_gpio);
+			goto err_irq_gpio;
+		}
+		r = gpio_direction_output(platform_data->firm_gpio, 0);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc firmware gpio [%d]\n",
+			__func__, platform_data->firm_gpio);
+			goto err_firm_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: firm gpio not provided\n", __func__);
+		goto err_irq_gpio;
+	}
+	if (gpio_is_valid(platform_data->ese_gpio)) {
+		r = gpio_request(platform_data->ese_gpio,
+				"nfc-ese_pwr");
+		if (r) {
+			nqx_dev->ese_gpio = -EINVAL;
+			dev_err(&client->dev,
+				"%s: unable to request nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+			/* ese gpio optional so we should continue */
+		} else {
+			nqx_dev->ese_gpio = platform_data->ese_gpio;
+			r = gpio_direction_output(platform_data->ese_gpio, 0);
+			if (r) {
+				/*
+				 * free ese gpio and set invalid
+				 * to avoid further use
+				 */
+				gpio_free(platform_data->ese_gpio);
+				nqx_dev->ese_gpio = -EINVAL;
+				dev_err(&client->dev,
+					"%s: cannot set direction for nfc ese gpio [%d]\n",
+					__func__, platform_data->ese_gpio);
+				/* ese gpio optional so we should continue */
+			}
+		}
+	} else {
+		nqx_dev->ese_gpio = -EINVAL;
+		dev_err(&client->dev,
+			"%s: ese gpio not provided\n", __func__);
+		/* ese gpio optional so we should continue */
+	}
+	if (gpio_is_valid(platform_data->clkreq_gpio)) {
+		r = gpio_request(platform_data->clkreq_gpio,
+			"nfc_clkreq_gpio");
+		if (r) {
+			dev_err(&client->dev,
+				"%s: unable to request nfc clkreq gpio [%d]\n",
+				__func__, platform_data->clkreq_gpio);
+			goto err_ese_gpio;
+		}
+		r = gpio_direction_input(platform_data->clkreq_gpio);
+		if (r) {
+			dev_err(&client->dev,
+			"%s: cannot set direction for nfc clkreq gpio [%d]\n",
+			__func__, platform_data->clkreq_gpio);
+			goto err_clkreq_gpio;
+		}
+	} else {
+		dev_err(&client->dev,
+			"%s: clkreq gpio not provided\n", __func__);
+		goto err_ese_gpio;
+	}
+
+	nqx_dev->en_gpio = platform_data->en_gpio;
+	nqx_dev->irq_gpio = platform_data->irq_gpio;
+	nqx_dev->firm_gpio  = platform_data->firm_gpio;
+	nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
+	nqx_dev->pdata = platform_data;
+
+	/* init mutex and queues */
+	init_waitqueue_head(&nqx_dev->read_wq);
+	mutex_init(&nqx_dev->read_mutex);
+	spin_lock_init(&nqx_dev->irq_enabled_lock);
+
+	nqx_dev->nqx_device.minor = MISC_DYNAMIC_MINOR;
+	nqx_dev->nqx_device.name = "nq-nci";
+	nqx_dev->nqx_device.fops = &nfc_dev_fops;
+
+	r = misc_register(&nqx_dev->nqx_device);
+	if (r) {
+		dev_err(&client->dev, "%s: misc_register failed\n", __func__);
+		goto err_misc_register;
+	}
+
+	/* NFC_INT IRQ */
+	nqx_dev->irq_enabled = true;
+	r = request_irq(client->irq, nqx_dev_irq_handler,
+			  IRQF_TRIGGER_HIGH, client->name, nqx_dev);
+	if (r) {
+		dev_err(&client->dev, "%s: request_irq failed\n", __func__);
+		goto err_request_irq_failed;
+	}
+	nqx_disable_irq(nqx_dev);
+
+	/*
+	 * To be efficient we need to test whether nfcc hardware is physically
+	 * present before attempting further hardware initialisation.
+	 *
+	 */
+	r = nfcc_hw_check(client, nqx_dev);
+	if (r) {
+		/* make sure NFCC is not enabled */
+		gpio_set_value(platform_data->en_gpio, 0);
+		/* We don't think there is hardware switch NFC OFF */
+		goto err_request_hw_check_failed;
+	}
+
+	/* Register reboot notifier here */
+	r = register_reboot_notifier(&nfcc_notifier);
+	if (r) {
+		dev_err(&client->dev,
+			"%s: cannot register reboot notifier(err = %d)\n",
+			__func__, r);
+		/*
+		 * nfcc_hw_check function not doing memory
+		 * allocation so using same goto target here
+		 */
+		goto err_request_hw_check_failed;
+	}
+
+#ifdef NFC_KERNEL_BU
+	r = nqx_clock_select(nqx_dev);
+	if (r < 0) {
+		dev_err(&client->dev,
+			"%s: nqx_clock_select failed\n", __func__);
+		goto err_clock_en_failed;
+	}
+	gpio_set_value(platform_data->en_gpio, 1);
+#endif
+	device_init_wakeup(&client->dev, true);
+	device_set_wakeup_capable(&client->dev, true);
+	i2c_set_clientdata(client, nqx_dev);
+	nqx_dev->irq_wake_up = false;
+
+	dev_err(&client->dev,
+	"%s: probing NFCC NQxxx exited successfully\n",
+		 __func__);
+	return 0;
+
+#ifdef NFC_KERNEL_BU
+err_clock_en_failed:
+	unregister_reboot_notifier(&nfcc_notifier);
+#endif
+err_request_hw_check_failed:
+	free_irq(client->irq, nqx_dev);
+err_request_irq_failed:
+	misc_deregister(&nqx_dev->nqx_device);
+err_misc_register:
+	mutex_destroy(&nqx_dev->read_mutex);
+err_clkreq_gpio:
+	gpio_free(platform_data->clkreq_gpio);
+err_ese_gpio:
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(platform_data->ese_gpio);
+err_firm_gpio:
+	gpio_free(platform_data->firm_gpio);
+err_irq_gpio:
+	gpio_free(platform_data->irq_gpio);
+err_en_gpio:
+	gpio_free(platform_data->en_gpio);
+err_mem:
+	kfree(nqx_dev->kbuf);
+err_free_dev:
+	kfree(nqx_dev);
+err_free_data:
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, platform_data);
+err_platform_data:
+	dev_err(&client->dev,
+	"%s: probing nqxx failed, check hardware\n",
+		 __func__);
+	return r;
+}
+
+static int nqx_remove(struct i2c_client *client)
+{
+	int ret = 0;
+	struct nqx_dev *nqx_dev;
+
+	nqx_dev = i2c_get_clientdata(client);
+	if (!nqx_dev) {
+		dev_err(&client->dev,
+		"%s: device doesn't exist anymore\n", __func__);
+		ret = -ENODEV;
+		goto err;
+	}
+
+	unregister_reboot_notifier(&nfcc_notifier);
+	free_irq(client->irq, nqx_dev);
+	misc_deregister(&nqx_dev->nqx_device);
+	mutex_destroy(&nqx_dev->read_mutex);
+	gpio_free(nqx_dev->clkreq_gpio);
+	/* optional gpio, not sure was configured in probe */
+	if (nqx_dev->ese_gpio > 0)
+		gpio_free(nqx_dev->ese_gpio);
+	gpio_free(nqx_dev->firm_gpio);
+	gpio_free(nqx_dev->irq_gpio);
+	gpio_free(nqx_dev->en_gpio);
+	kfree(nqx_dev->kbuf);
+	if (client->dev.of_node)
+		devm_kfree(&client->dev, nqx_dev->pdata);
+
+	kfree(nqx_dev);
+err:
+	return ret;
+}
+
+static int nqx_suspend(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_enabled) {
+		if (!enable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = true;
+	}
+	return 0;
+}
+
+static int nqx_resume(struct device *device)
+{
+	struct i2c_client *client = to_i2c_client(device);
+	struct nqx_dev *nqx_dev = i2c_get_clientdata(client);
+
+	if (device_may_wakeup(&client->dev) && nqx_dev->irq_wake_up) {
+		if (!disable_irq_wake(client->irq))
+			nqx_dev->irq_wake_up = false;
+	}
+	return 0;
+}
+
+static const struct i2c_device_id nqx_id[] = {
+	{"nqx-i2c", 0},
+	{}
+};
+
+static const struct dev_pm_ops nfc_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(nqx_suspend, nqx_resume)
+};
+
+static struct i2c_driver nqx = {
+	.id_table = nqx_id,
+	.probe = nqx_probe,
+	.remove = nqx_remove,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "nq-nci",
+		.of_match_table = msm_match_table,
+		.pm = &nfc_pm_ops,
+	},
+};
+
+static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
+			  void *v)
+{
+	gpio_set_value(disable_ctrl, 1);
+	return NOTIFY_OK;
+}
+
+/*
+ * module load/unload record keeping
+ */
+static int __init nqx_dev_init(void)
+{
+	return i2c_add_driver(&nqx);
+}
+module_init(nqx_dev_init);
+
+static void __exit nqx_dev_exit(void)
+{
+	unregister_reboot_notifier(&nfcc_notifier);
+	i2c_del_driver(&nqx);
+}
+module_exit(nqx_dev_exit);
+
+MODULE_DESCRIPTION("NFC nqx");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
new file mode 100644
index 0000000..87715c2
--- /dev/null
+++ b/drivers/nfc/nq-nci.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __NQ_NCI_H
+#define __NQ_NCI_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <linux/semaphore.h>
+#include <linux/completion.h>
+
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/nfcinfo.h>
+
+#define NFC_SET_PWR			_IOW(0xE9, 0x01, unsigned int)
+#define ESE_SET_PWR			_IOW(0xE9, 0x02, unsigned int)
+#define ESE_GET_PWR			_IOR(0xE9, 0x03, unsigned int)
+#define SET_RX_BLOCK			_IOW(0xE9, 0x04, unsigned int)
+#define SET_EMULATOR_TEST_POINT		_IOW(0xE9, 0x05, unsigned int)
+#define NFCC_INITIAL_CORE_RESET_NTF	_IOW(0xE9, 0x10, unsigned int)
+
+#define NFC_RX_BUFFER_CNT_START		(0x0)
+#define PAYLOAD_HEADER_LENGTH		(0x3)
+#define PAYLOAD_LENGTH_MAX		(256)
+#define BYTE				(0x8)
+#define NCI_IDENTIFIER			(0x10)
+
+enum nfcc_initial_core_reset_ntf {
+	TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
+	ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
+	DEFAULT_INITIAL_CORE_RESET_NTF, /*2*/
+};
+
+enum nfcc_chip_variant {
+	NFCC_NQ_210			= 0x48,	/**< NFCC NQ210 */
+	NFCC_NQ_220			= 0x58,	/**< NFCC NQ220 */
+	NFCC_NQ_310			= 0x40,	/**< NFCC NQ310 */
+	NFCC_NQ_330			= 0x51,	/**< NFCC NQ330 */
+	NFCC_PN66T			= 0x18,	/**< NFCC PN66T */
+	NFCC_NOT_SUPPORTED	        = 0xFF	/**< NFCC is not supported */
+};
+#endif
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 351bac8..0392eb8 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -218,7 +218,10 @@
 	if (cmd_rc < 0)
 		return cmd_rc;
 
-	nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
+	if (clear_err.cleared > 0)
+		nvdimm_clear_from_poison_list(nvdimm_bus, phys,
+					      clear_err.cleared);
+
 	return clear_err.cleared;
 }
 EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5f2feee..fbeca06 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1725,7 +1725,6 @@
 		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
 					&nvme_ns_attr_group);
 		del_gendisk(ns->disk);
-		blk_mq_abort_requeue_list(ns->queue);
 		blk_cleanup_queue(ns->queue);
 	}
 
@@ -2048,8 +2047,16 @@
 			continue;
 		revalidate_disk(ns->disk);
 		blk_set_queue_dying(ns->queue);
-		blk_mq_abort_requeue_list(ns->queue);
-		blk_mq_start_stopped_hw_queues(ns->queue, true);
+
+		/*
+		 * Forcibly start all queues to avoid having stuck requests.
+		 * Note that we must ensure the queues are not stopped
+		 * when the final removal happens.
+		 */
+		blk_mq_start_hw_queues(ns->queue);
+
+		/* draining requests in requeue list */
+		blk_mq_kick_requeue_list(ns->queue);
 	}
 	mutex_unlock(&ctrl->namespaces_mutex);
 }
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8a9c186..14eac73 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1384,6 +1384,11 @@
 	if (dev->cmb) {
 		iounmap(dev->cmb);
 		dev->cmb = NULL;
+		if (dev->cmbsz) {
+			sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
+						     &dev_attr_cmb.attr, NULL);
+			dev->cmbsz = 0;
+		}
 	}
 }
 
@@ -1655,6 +1660,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 
+	nvme_release_cmb(dev);
 	pci_free_irq_vectors(pdev);
 
 	if (pci_is_enabled(pdev)) {
@@ -1993,7 +1999,6 @@
 	nvme_dev_disable(dev, true);
 	nvme_dev_remove_admin(dev);
 	nvme_free_queues(dev, 0);
-	nvme_release_cmb(dev);
 	nvme_release_prp_pools(dev);
 	nvme_dev_unmap(dev);
 	nvme_put_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3d25add..3222f3e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1011,6 +1011,19 @@
 		nvme_rdma_wr_error(cq, wc, "SEND");
 }
 
+static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
+{
+	int sig_limit;
+
+	/*
+	 * We signal completion every queue depth/2 and also handle the
+	 * degenerated case of a  device with queue_depth=1, where we
+	 * would need to signal every message.
+	 */
+	sig_limit = max(queue->queue_size / 2, 1);
+	return (++queue->sig_count % sig_limit) == 0;
+}
+
 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
 		struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
 		struct ib_send_wr *first, bool flush)
@@ -1038,9 +1051,6 @@
 	 * Would have been way to obvious to handle this in hardware or
 	 * at least the RDMA stack..
 	 *
-	 * This messy and racy code sniplet is copy and pasted from the iSER
-	 * initiator, and the magic '32' comes from there as well.
-	 *
 	 * Always signal the flushes. The magic request used for the flush
 	 * sequencer is not allocated in our driver's tagset and it's
 	 * triggered to be freed by blk_cleanup_queue(). So we need to
@@ -1048,7 +1058,7 @@
 	 * embeded in request's payload, is not freed when __ib_process_cq()
 	 * calls wr_cqe->done().
 	 */
-	if ((++queue->sig_count % 32) == 0 || flush)
+	if (nvme_rdma_queue_sig_limit(queue) || flush)
 		wr.send_flags |= IB_SEND_SIGNALED;
 
 	if (first)
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 3723f57..2c1b08a 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -263,7 +263,7 @@
 	if (!parser->range || parser->range + parser->np > parser->end)
 		return NULL;
 
-	range->pci_space = parser->range[0];
+	range->pci_space = be32_to_cpup(parser->range);
 	range->flags = of_bus_pci_get_flags(parser->range);
 	range->pci_addr = of_read_number(parser->range + 1, ns);
 	range->cpu_addr = of_translate_address(parser->node,
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 8668808..66af185 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -505,6 +505,9 @@
 
 	/* Allocate memory for the expanded device tree */
 	mem = dt_alloc(size + 4, __alignof__(struct device_node));
+	if (!mem)
+		return NULL;
+
 	memset(mem, 0, size);
 
 	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index a53982a..2db1f7a 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -57,6 +57,8 @@
 		else
 			node_set(nid, numa_nodes_parsed);
 	}
+
+	of_node_put(cpus);
 }
 
 static int __init of_numa_parse_memory_nodes(void)
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 61fc349..dafb4cd 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -72,6 +72,7 @@
 	PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
 };
 
+#define CPU_AFFINITY_ALL	-1ULL
 #define PCI_CONFIG_MMIO_LENGTH	0x2000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -868,7 +869,7 @@
 		hv_int_desc_free(hpdev, int_desc);
 	}
 
-	int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
+	int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
 	if (!int_desc)
 		goto drop_reference;
 
@@ -889,9 +890,13 @@
 	 * processors because Hyper-V only supports 64 in a guest.
 	 */
 	affinity = irq_data_get_affinity_mask(data);
-	for_each_cpu_and(cpu, affinity, cpu_online_mask) {
-		int_pkt->int_desc.cpu_mask |=
-			(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+	if (cpumask_weight(affinity) >= 32) {
+		int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+	} else {
+		for_each_cpu_and(cpu, affinity, cpu_online_mask) {
+			int_pkt->int_desc.cpu_mask |=
+				(1ULL << vmbus_cpu_number_to_vp_number(cpu));
+		}
 	}
 
 	ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index bcd10c7..1b07865 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -974,15 +974,19 @@
 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
 		  enum pci_mmap_api mmap_api)
 {
-	unsigned long nr, start, size, pci_start;
+	unsigned long nr, start, size;
+	resource_size_t pci_start = 0, pci_end;
 
 	if (pci_resource_len(pdev, resno) == 0)
 		return 0;
 	nr = vma_pages(vma);
 	start = vma->vm_pgoff;
 	size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
-	pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
-			pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+	if (mmap_api == PCI_MMAP_PROCFS) {
+		pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+				     &pci_start, &pci_end);
+		pci_start >>= PAGE_SHIFT;
+	}
 	if (start >= pci_start && start < pci_start + size &&
 			start + nr <= pci_start + size)
 		return 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6922964..579c494 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1782,8 +1782,8 @@
 		}
 	}
 	if (!list_empty(&pci_pme_list))
-		schedule_delayed_work(&pci_pme_work,
-				      msecs_to_jiffies(PME_TIMEOUT));
+		queue_delayed_work(system_freezable_wq, &pci_pme_work,
+				   msecs_to_jiffies(PME_TIMEOUT));
 	mutex_unlock(&pci_pme_list_mutex);
 }
 
@@ -1848,8 +1848,9 @@
 			mutex_lock(&pci_pme_list_mutex);
 			list_add(&pme_dev->list, &pci_pme_list);
 			if (list_is_singular(&pci_pme_list))
-				schedule_delayed_work(&pci_pme_work,
-						      msecs_to_jiffies(PME_TIMEOUT));
+				queue_delayed_work(system_freezable_wq,
+						   &pci_pme_work,
+						   msecs_to_jiffies(PME_TIMEOUT));
 			mutex_unlock(&pci_pme_list_mutex);
 		} else {
 			mutex_lock(&pci_pme_list_mutex);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 2408abe..66c8863 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -231,24 +231,33 @@
 {
 	struct pci_dev *dev = PDE_DATA(file_inode(file));
 	struct pci_filp_private *fpriv = file->private_data;
-	int i, ret, write_combine;
+	int i, ret, write_combine = 0, res_bit;
 
 	if (!capable(CAP_SYS_RAWIO))
 		return -EPERM;
 
+	if (fpriv->mmap_state == pci_mmap_io)
+		res_bit = IORESOURCE_IO;
+	else
+		res_bit = IORESOURCE_MEM;
+
 	/* Make sure the caller is mapping a real resource for this device */
 	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
-		if (pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
+		if (dev->resource[i].flags & res_bit &&
+		    pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
 			break;
 	}
 
 	if (i >= PCI_ROM_RESOURCE)
 		return -ENODEV;
 
-	if (fpriv->mmap_state == pci_mmap_mem)
-		write_combine = fpriv->write_combine;
-	else
-		write_combine = 0;
+	if (fpriv->mmap_state == pci_mmap_mem &&
+	    fpriv->write_combine) {
+		if (dev->resource[i].flags & IORESOURCE_PREFETCH)
+			write_combine = 1;
+		else
+			return -EINVAL;
+	}
 	ret = pci_mmap_page_range(dev, vma,
 				  fpriv->mmap_state, write_combine);
 	if (ret < 0)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 304e206..40ee647 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -88,14 +88,14 @@
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
 	  Technologies Inc SDM845 platform.
 
-config PINCTRL_SDM830
-	tristate "Qualcomm Technologies Inc SDM830 pin controller driver"
+config PINCTRL_SDM670
+	tristate "Qualcomm Technologies Inc SDM670 pin controller driver"
 	depends on GPIOLIB && OF
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
-	  Technologies Inc SDM830 platform.
+	  Technologies Inc SDM670 platform.
 
 config PINCTRL_SDXPOORWILLS
 	tristate "Qualcomm Technologies Inc SDXPOORWILLS pin controller driver"
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 4786960..6a49671 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -16,7 +16,7 @@
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
 obj-$(CONFIG_PINCTRL_SDM845) += pinctrl-sdm845.o
-obj-$(CONFIG_PINCTRL_SDM830) += pinctrl-sdm830.o
+obj-$(CONFIG_PINCTRL_SDM670) += pinctrl-sdm670.o
 obj-$(CONFIG_PINCTRL_SDXPOORWILLS)	+= pinctrl-sdxpoorwills.o
 obj-$(CONFIG_PINCTRL_WCD)	+= pinctrl-wcd.o
 obj-$(CONFIG_PINCTRL_LPI)	+= pinctrl-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2a1367e..9520166 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -17,6 +17,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/pinctrl/machine.h>
 #include <linux/pinctrl/pinctrl.h>
@@ -31,7 +32,7 @@
 #include <linux/reboot.h>
 #include <linux/pm.h>
 #include <linux/log2.h>
-
+#include <linux/irq.h>
 #include "../core.h"
 #include "../pinconf.h"
 #include "pinctrl-msm.h"
@@ -749,6 +750,91 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
+static void msm_dirconn_irq_mask(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_mask)
+		parent_data->chip->irq_mask(parent_data);
+}
+
+static void msm_dirconn_irq_unmask(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_unmask)
+		parent_data->chip->irq_unmask(parent_data);
+}
+
+static void msm_dirconn_irq_ack(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_ack)
+		parent_data->chip->irq_ack(parent_data);
+}
+
+static void msm_dirconn_irq_eoi(struct irq_data *d)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_eoi)
+		parent_data->chip->irq_eoi(parent_data);
+}
+
+static int msm_dirconn_irq_set_affinity(struct irq_data *d,
+		const struct cpumask *maskval, bool force)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_affinity)
+		return parent_data->chip->irq_set_affinity(parent_data,
+				maskval, force);
+	return 0;
+}
+
+static int msm_dirconn_irq_set_vcpu_affinity(struct irq_data *d,
+		void *vcpu_info)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_vcpu_affinity)
+		return parent_data->chip->irq_set_vcpu_affinity(parent_data,
+				vcpu_info);
+	return 0;
+}
+
+static int msm_dirconn_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct irq_desc *desc = irq_data_to_desc(d);
+	struct irq_data *parent_data = irq_get_irq_data(desc->parent_irq);
+
+	if (parent_data->chip->irq_set_type)
+		return parent_data->chip->irq_set_type(parent_data, type);
+
+	return 0;
+}
+
+static struct irq_chip msm_dirconn_irq_chip = {
+	.name			= "msmgpio-dc",
+	.irq_mask		= msm_dirconn_irq_mask,
+	.irq_unmask		= msm_dirconn_irq_unmask,
+	.irq_eoi		= msm_dirconn_irq_eoi,
+	.irq_ack		= msm_dirconn_irq_ack,
+	.irq_set_type		= msm_dirconn_irq_set_type,
+	.irq_set_affinity	= msm_dirconn_irq_set_affinity,
+	.irq_set_vcpu_affinity	= msm_dirconn_irq_set_vcpu_affinity,
+	.flags			= IRQCHIP_SKIP_SET_WAKE
+					| IRQCHIP_MASK_ON_SUSPEND
+					| IRQCHIP_SET_TYPE_MASKED,
+};
+
 static void msm_gpio_irq_handler(struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -783,6 +869,55 @@
 	chained_irq_exit(chip, desc);
 }
 
+static void msm_gpio_dirconn_handler(struct irq_desc *desc)
+{
+	struct irq_data *irqd = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(chip, desc);
+	generic_handle_irq(irqd->irq);
+	chained_irq_exit(chip, desc);
+}
+
+static void msm_gpio_setup_dir_connects(struct msm_pinctrl *pctrl)
+{
+	struct device_node *parent_node;
+	struct irq_domain *parent_domain;
+	struct irq_fwspec fwspec;
+	unsigned int i;
+
+	parent_node = of_irq_find_parent(pctrl->dev->of_node);
+
+	if (!parent_node)
+		return;
+
+	parent_domain = irq_find_host(parent_node);
+	if (!parent_domain)
+		return;
+
+	fwspec.fwnode = parent_domain->fwnode;
+	for (i = 0; i < pctrl->soc->n_dir_conns; i++) {
+		const struct msm_dir_conn *dirconn = &pctrl->soc->dir_conn[i];
+		unsigned int parent_irq;
+		int irq;
+
+		fwspec.param[0] = 0; /* SPI */
+		fwspec.param[1] = dirconn->hwirq;
+		fwspec.param[2] = IRQ_TYPE_NONE;
+		fwspec.param_count = 3;
+		parent_irq = irq_create_fwspec_mapping(&fwspec);
+
+		irq = irq_find_mapping(pctrl->chip.irqdomain, dirconn->gpio);
+
+		irq_set_parent(irq, parent_irq);
+		irq_set_chip(irq, &msm_dirconn_irq_chip);
+		irq_set_chip_data(irq, irq_get_irq_data(parent_irq));
+		__irq_set_handler(parent_irq, msm_gpio_dirconn_handler,
+				false, NULL);
+		irq_set_handler_data(parent_irq, irq_get_irq_data(irq));
+	}
+}
+
 static int msm_gpio_init(struct msm_pinctrl *pctrl)
 {
 	struct gpio_chip *chip;
@@ -827,6 +962,7 @@
 	gpiochip_set_chained_irqchip(chip, &msm_gpio_irq_chip, pctrl->irq,
 				     msm_gpio_irq_handler);
 
+	msm_gpio_setup_dir_connects(pctrl);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index e986fda..0e223e0 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -95,6 +95,16 @@
 	unsigned intr_polarity_bit:5;
 	unsigned intr_detection_bit:5;
 	unsigned intr_detection_width:5;
+}
+
+/**
+ * struct msm_dir_conn - Direct GPIO connect configuration
+ * @gpio:	GPIO pin number
+ * @hwirq:	The GIC interrupt that the pin is connected to
+ */;
+struct msm_dir_conn {
+	unsigned int gpio;
+	irq_hw_number_t hwirq;
 };
 
 /**
@@ -106,6 +116,8 @@
  * @groups:     An array describing all pin groups the pin SoC supports.
  * @ngroups:    The numbmer of entries in @groups.
  * @ngpio:      The number of pingroups the driver should expose as GPIOs.
+ * @dir_conn:   An array describing all the pins directly connected to GIC.
+ * @ndirconns:  The number of pins directly connected to GIC
  */
 struct msm_pinctrl_soc_data {
 	const struct pinctrl_pin_desc *pins;
@@ -115,6 +127,8 @@
 	const struct msm_pingroup *groups;
 	unsigned ngroups;
 	unsigned ngpios;
+	const struct msm_dir_conn *dir_conn;
+	unsigned int n_dir_conns;
 };
 
 int msm_pinctrl_probe(struct platform_device *pdev,
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm830.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
similarity index 96%
rename from drivers/pinctrl/qcom/pinctrl-sdm830.c
rename to drivers/pinctrl/qcom/pinctrl-sdm670.c
index fc3d0ad..c93628e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm830.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -29,7 +29,6 @@
 #define SOUTH	0x00900000
 #define WEST	0x00100000
 #define REG_SIZE 0x1000
-
 #define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
 	{						\
 		.name = "gpio" #id,			\
@@ -118,7 +117,7 @@
 		.intr_detection_bit = -1,		\
 		.intr_detection_width = -1,		\
 	}
-static const struct pinctrl_pin_desc sdm830_pins[] = {
+static const struct pinctrl_pin_desc sdm670_pins[] = {
 	PINCTRL_PIN(0, "GPIO_0"),
 	PINCTRL_PIN(1, "GPIO_1"),
 	PINCTRL_PIN(2, "GPIO_2"),
@@ -255,9 +254,12 @@
 	PINCTRL_PIN(147, "GPIO_147"),
 	PINCTRL_PIN(148, "GPIO_148"),
 	PINCTRL_PIN(149, "GPIO_149"),
-	PINCTRL_PIN(150, "SDC2_CLK"),
-	PINCTRL_PIN(151, "SDC2_CMD"),
-	PINCTRL_PIN(152, "SDC2_DATA"),
+	PINCTRL_PIN(150, "SDC1_CLK"),
+	PINCTRL_PIN(151, "SDC1_CMD"),
+	PINCTRL_PIN(152, "SDC1_DATA"),
+	PINCTRL_PIN(153, "SDC2_CLK"),
+	PINCTRL_PIN(154, "SDC2_CMD"),
+	PINCTRL_PIN(155, "SDC2_DATA"),
 };
 
 #define DECLARE_MSM_GPIO_PINS(pin) \
@@ -399,11 +401,14 @@
 DECLARE_MSM_GPIO_PINS(148);
 DECLARE_MSM_GPIO_PINS(149);
 
-static const unsigned int sdc2_clk_pins[] = { 150 };
-static const unsigned int sdc2_cmd_pins[] = { 151 };
-static const unsigned int sdc2_data_pins[] = { 152 };
+static const unsigned int sdc1_clk_pins[] = { 150 };
+static const unsigned int sdc1_cmd_pins[] = { 151 };
+static const unsigned int sdc1_data_pins[] = { 152 };
+static const unsigned int sdc2_clk_pins[] = { 153 };
+static const unsigned int sdc2_cmd_pins[] = { 154 };
+static const unsigned int sdc2_data_pins[] = { 155 };
 
-enum sdm830_functions {
+enum sdm670_functions {
 	msm_mux_qup0,
 	msm_mux_gpio,
 	msm_mux_reserved0,
@@ -456,6 +461,16 @@
 	msm_mux_qup1,
 	msm_mux_qdss_gpio4,
 	msm_mux_reserved17,
+	msm_mux_qdss_gpio5,
+	msm_mux_reserved18,
+	msm_mux_qdss_gpio6,
+	msm_mux_reserved19,
+	msm_mux_qdss_gpio7,
+	msm_mux_reserved20,
+	msm_mux_cci_timer0,
+	msm_mux_gcc_gp2,
+	msm_mux_qdss_gpio8,
+	msm_mux_reserved21,
 	msm_mux_cci_timer1,
 	msm_mux_gcc_gp3,
 	msm_mux_qdss_gpio,
@@ -470,16 +485,6 @@
 	msm_mux_cci_timer4,
 	msm_mux_qdss_gpio11,
 	msm_mux_reserved25,
-	msm_mux_qdss_gpio5,
-	msm_mux_reserved18,
-	msm_mux_qdss_gpio6,
-	msm_mux_reserved19,
-	msm_mux_qdss_gpio7,
-	msm_mux_reserved20,
-	msm_mux_cci_timer0,
-	msm_mux_gcc_gp2,
-	msm_mux_qdss_gpio8,
-	msm_mux_reserved21,
 	msm_mux_qdss_gpio12,
 	msm_mux_JITTER_BIST,
 	msm_mux_reserved26,
@@ -894,6 +899,36 @@
 static const char * const reserved17_groups[] = {
 	"gpio17",
 };
+static const char * const qdss_gpio5_groups[] = {
+	"gpio18", "gpio122",
+};
+static const char * const reserved18_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio19", "gpio41",
+};
+static const char * const reserved19_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio20", "gpio42",
+};
+static const char * const reserved20_groups[] = {
+	"gpio20",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio21",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio21",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio21", "gpio75",
+};
+static const char * const reserved21_groups[] = {
+	"gpio21",
+};
 static const char * const cci_timer1_groups[] = {
 	"gpio22",
 };
@@ -936,36 +971,6 @@
 static const char * const reserved25_groups[] = {
 	"gpio25",
 };
-static const char * const qdss_gpio5_groups[] = {
-	"gpio18", "gpio122",
-};
-static const char * const reserved18_groups[] = {
-	"gpio18",
-};
-static const char * const qdss_gpio6_groups[] = {
-	"gpio19", "gpio41",
-};
-static const char * const reserved19_groups[] = {
-	"gpio19",
-};
-static const char * const qdss_gpio7_groups[] = {
-	"gpio20", "gpio42",
-};
-static const char * const reserved20_groups[] = {
-	"gpio20",
-};
-static const char * const cci_timer0_groups[] = {
-	"gpio21",
-};
-static const char * const gcc_gp2_groups[] = {
-	"gpio21",
-};
-static const char * const qdss_gpio8_groups[] = {
-	"gpio21", "gpio75",
-};
-static const char * const reserved21_groups[] = {
-	"gpio21",
-};
 static const char * const qdss_gpio12_groups[] = {
 	"gpio26", "gpio80",
 };
@@ -1680,7 +1685,7 @@
 	"gpio123",
 };
 
-static const struct msm_function sdm830_functions[] = {
+static const struct msm_function sdm670_functions[] = {
 	FUNCTION(qup0),
 	FUNCTION(gpio),
 	FUNCTION(reserved0),
@@ -1733,6 +1738,16 @@
 	FUNCTION(qup1),
 	FUNCTION(qdss_gpio4),
 	FUNCTION(reserved17),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(reserved18),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(reserved19),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(reserved20),
+	FUNCTION(cci_timer0),
+	FUNCTION(gcc_gp2),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(reserved21),
 	FUNCTION(cci_timer1),
 	FUNCTION(gcc_gp3),
 	FUNCTION(qdss_gpio),
@@ -1747,16 +1762,6 @@
 	FUNCTION(cci_timer4),
 	FUNCTION(qdss_gpio11),
 	FUNCTION(reserved25),
-	FUNCTION(qdss_gpio5),
-	FUNCTION(reserved18),
-	FUNCTION(qdss_gpio6),
-	FUNCTION(reserved19),
-	FUNCTION(qdss_gpio7),
-	FUNCTION(reserved20),
-	FUNCTION(cci_timer0),
-	FUNCTION(gcc_gp2),
-	FUNCTION(qdss_gpio8),
-	FUNCTION(reserved21),
 	FUNCTION(qdss_gpio12),
 	FUNCTION(JITTER_BIST),
 	FUNCTION(reserved26),
@@ -1996,7 +2001,7 @@
 	FUNCTION(reserved123),
 };
 
-static const struct msm_pingroup sdm830_groups[] = {
+static const struct msm_pingroup sdm670_groups[] = {
 	PINGROUP(0, SOUTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
 	PINGROUP(1, SOUTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
 	PINGROUP(2, SOUTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
@@ -2108,9 +2113,9 @@
 		 QUP_L5, reserved76, NA, NA, NA),
 	PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
 		 QUP_L6, reserved77, NA, NA, NA),
-	PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, reserved78, NA, NA, NA, NA, NA,
+	PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, NA, reserved78, NA, NA, NA, NA,
 		 NA),
-	PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, reserved79, NA,
+	PINGROUP(79, NORTH, sec_mi2s, GP_PDM2, NA, qdss_gpio11, NA, reserved79,
 		 NA, NA, NA),
 	PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
 		 NA, NA),
@@ -2231,53 +2236,56 @@
 	PINGROUP(147, WEST, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
 	PINGROUP(148, WEST, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
 	PINGROUP(149, WEST, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
-	SDC_QDSD_PINGROUP(sdc2_clk, 0x59a000, 14, 6),
-	SDC_QDSD_PINGROUP(sdc2_cmd, 0x59a000, 11, 3),
-	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc1_clk, 0x599000, 13, 6),
+	SDC_QDSD_PINGROUP(sdc1_cmd, 0x599000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc1_data, 0x599000, 9, 0),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x99a000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x99a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x99a000, 9, 0),
 };
 
-static const struct msm_pinctrl_soc_data sdm830_pinctrl = {
-	.pins = sdm830_pins,
-	.npins = ARRAY_SIZE(sdm830_pins),
-	.functions = sdm830_functions,
-	.nfunctions = ARRAY_SIZE(sdm830_functions),
-	.groups = sdm830_groups,
-	.ngroups = ARRAY_SIZE(sdm830_groups),
+static const struct msm_pinctrl_soc_data sdm670_pinctrl = {
+	.pins = sdm670_pins,
+	.npins = ARRAY_SIZE(sdm670_pins),
+	.functions = sdm670_functions,
+	.nfunctions = ARRAY_SIZE(sdm670_functions),
+	.groups = sdm670_groups,
+	.ngroups = ARRAY_SIZE(sdm670_groups),
 	.ngpios = 136,
 };
 
-static int sdm830_pinctrl_probe(struct platform_device *pdev)
+static int sdm670_pinctrl_probe(struct platform_device *pdev)
 {
-	return msm_pinctrl_probe(pdev, &sdm830_pinctrl);
+	return msm_pinctrl_probe(pdev, &sdm670_pinctrl);
 }
 
-static const struct of_device_id sdm830_pinctrl_of_match[] = {
-	{ .compatible = "qcom,sdm830-pinctrl", },
+static const struct of_device_id sdm670_pinctrl_of_match[] = {
+	{ .compatible = "qcom,sdm670-pinctrl", },
 	{ },
 };
 
-static struct platform_driver sdm830_pinctrl_driver = {
+static struct platform_driver sdm670_pinctrl_driver = {
 	.driver = {
-		.name = "sdm830-pinctrl",
+		.name = "sdm670-pinctrl",
 		.owner = THIS_MODULE,
-		.of_match_table = sdm830_pinctrl_of_match,
+		.of_match_table = sdm670_pinctrl_of_match,
 	},
-	.probe = sdm830_pinctrl_probe,
+	.probe = sdm670_pinctrl_probe,
 	.remove = msm_pinctrl_remove,
 };
 
-static int __init sdm830_pinctrl_init(void)
+static int __init sdm670_pinctrl_init(void)
 {
-	return platform_driver_register(&sdm830_pinctrl_driver);
+	return platform_driver_register(&sdm670_pinctrl_driver);
 }
-arch_initcall(sdm830_pinctrl_init);
+arch_initcall(sdm670_pinctrl_init);
 
-static void __exit sdm830_pinctrl_exit(void)
+static void __exit sdm670_pinctrl_exit(void)
 {
-	platform_driver_unregister(&sdm830_pinctrl_driver);
+	platform_driver_unregister(&sdm670_pinctrl_driver);
 }
-module_exit(sdm830_pinctrl_exit);
+module_exit(sdm670_pinctrl_exit);
 
-MODULE_DESCRIPTION("QTI sdm830 pinctrl driver");
+MODULE_DESCRIPTION("QTI sdm670 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, sdm830_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, sdm670_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index 30c31a8..7d125eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -2377,6 +2377,84 @@
 	UFS_RESET(ufs_reset, 0x99f000),
 };
 
+static const struct msm_dir_conn sdm845_dir_conn[] = {
+	{1, 510},
+	{3, 511},
+	{5, 512},
+	{10, 513},
+	{11, 514},
+	{20, 515},
+	{22, 516},
+	{24, 517},
+	{26, 518},
+	{30, 519},
+	{31, 639},
+	{32, 521},
+	{34, 522},
+	{36, 523},
+	{37, 524},
+	{38, 525},
+	{39, 526},
+	{40, 527},
+	{41, 637},
+	{43, 529},
+	{44, 530},
+	{46, 531},
+	{48, 532},
+	{49, 640},
+	{52, 534},
+	{53, 535},
+	{54, 536},
+	{56, 537},
+	{57, 538},
+	{58, 539},
+	{59, 540},
+	{60, 541},
+	{61, 542},
+	{62, 543},
+	{63, 544},
+	{64, 545},
+	{66, 546},
+	{68, 547},
+	{71, 548},
+	{73, 549},
+	{77, 550},
+	{78, 551},
+	{79, 552},
+	{80, 553},
+	{84, 554},
+	{85, 555},
+	{86, 556},
+	{88, 557},
+	{89, 638},
+	{91, 559},
+	{92, 560},
+	{95, 561},
+	{96, 562},
+	{97, 563},
+	{101, 564},
+	{103, 565},
+	{104, 566},
+	{115, 570},
+	{116, 571},
+	{117, 572},
+	{118, 573},
+	{119, 609},
+	{120, 610},
+	{121, 611},
+	{122, 612},
+	{123, 613},
+	{124, 614},
+	{125, 615},
+	{127, 617},
+	{128, 618},
+	{129, 619},
+	{130, 620},
+	{132, 621},
+	{133, 622},
+	{145, 623},
+};
+
 static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
 	.pins = sdm845_pins,
 	.npins = ARRAY_SIZE(sdm845_pins),
@@ -2385,6 +2463,8 @@
 	.groups = sdm845_groups,
 	.ngroups = ARRAY_SIZE(sdm845_groups),
 	.ngpios = 150,
+	.dir_conn = sdm845_dir_conn,
+	.n_dir_conns = ARRAY_SIZE(sdm845_dir_conn),
 };
 
 static int sdm845_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 293371b..c5aaac5 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -125,7 +125,6 @@
 	IPA_USB_CONNECTED,
 	IPA_USB_STOPPED,
 	IPA_USB_SUSPEND_REQUESTED,
-	IPA_USB_SUSPEND_IN_PROGRESS,
 	IPA_USB_SUSPENDED,
 	IPA_USB_SUSPENDED_NO_RWAKEUP,
 	IPA_USB_RESUME_IN_PROGRESS
@@ -146,13 +145,6 @@
 #define IPA3_USB_IS_TTYPE_DPL(__ttype) \
 	((__ttype) == IPA_USB_TRANSPORT_DPL)
 
-struct finish_suspend_work_context {
-	struct work_struct work;
-	enum ipa3_usb_transport_type ttype;
-	u32 dl_clnt_hdl;
-	u32 ul_clnt_hdl;
-};
-
 struct ipa3_usb_teth_prot_conn_params {
 	u32 usb_to_ipa_clnt_hdl;
 	u32 ipa_to_usb_clnt_hdl;
@@ -168,7 +160,6 @@
 	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
 	void *user_data;
 	enum ipa3_usb_state state;
-	struct finish_suspend_work_context finish_suspend_work;
 	struct ipa_usb_xdci_chan_params ch_params;
 	struct ipa3_usb_teth_prot_conn_params teth_conn_params;
 };
@@ -221,16 +212,10 @@
 
 static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
 static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
 static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
 	ipa3_usb_wq_notify_remote_wakeup);
 static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
 	ipa3_usb_wq_dpl_notify_remote_wakeup);
-static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
-	ipa3_usb_wq_notify_suspend_completed);
-static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
-	ipa3_usb_wq_dpl_notify_suspend_completed);
 
 struct ipa3_usb_context *ipa3_usb_ctx;
 
@@ -273,8 +258,6 @@
 		return "IPA_USB_STOPPED";
 	case IPA_USB_SUSPEND_REQUESTED:
 		return "IPA_USB_SUSPEND_REQUESTED";
-	case IPA_USB_SUSPEND_IN_PROGRESS:
-		return "IPA_USB_SUSPEND_IN_PROGRESS";
 	case IPA_USB_SUSPENDED:
 		return "IPA_USB_SUSPENDED";
 	case IPA_USB_SUSPENDED_NO_RWAKEUP:
@@ -330,17 +313,11 @@
 			 * In case of failure during suspend request
 			 * handling, state is reverted to connected.
 			 */
-			(err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
-			/*
-			 * In case of failure during suspend completing
-			 * handling, state is reverted to connected.
-			 */
-			(err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
+			(err_permit && state == IPA_USB_SUSPEND_REQUESTED))
 			state_legal = true;
 		break;
 	case IPA_USB_STOPPED:
-		if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
-			state == IPA_USB_CONNECTED ||
+		if (state == IPA_USB_CONNECTED ||
 			state == IPA_USB_SUSPENDED ||
 			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
 			state_legal = true;
@@ -349,19 +326,8 @@
 		if (state == IPA_USB_CONNECTED)
 			state_legal = true;
 		break;
-	case IPA_USB_SUSPEND_IN_PROGRESS:
-		if (state == IPA_USB_SUSPEND_REQUESTED ||
-			/*
-			 * In case of failure during resume, state is reverted
-			 * to original, which could be suspend_in_progress.
-			 * Allow it.
-			 */
-			(err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
-			state_legal = true;
-		break;
 	case IPA_USB_SUSPENDED:
 		if (state == IPA_USB_SUSPEND_REQUESTED ||
-			state == IPA_USB_SUSPEND_IN_PROGRESS ||
 			/*
 			 * In case of failure during resume, state is reverted
 			 * to original, which could be suspended. Allow it
@@ -374,8 +340,7 @@
 			state_legal = true;
 		break;
 	case IPA_USB_RESUME_IN_PROGRESS:
-		if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
-			state == IPA_USB_SUSPENDED)
+		if (state == IPA_USB_SUSPENDED)
 			state_legal = true;
 		break;
 	default:
@@ -452,7 +417,6 @@
 		break;
 	case IPA_USB_OP_DISCONNECT:
 		if  (state == IPA_USB_CONNECTED ||
-			state == IPA_USB_SUSPEND_IN_PROGRESS ||
 			state == IPA_USB_SUSPENDED ||
 			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
 			is_legal = true;
@@ -483,7 +447,6 @@
 		break;
 	case IPA_USB_OP_RESUME:
 		if (state == IPA_USB_SUSPENDED ||
-			state == IPA_USB_SUSPEND_IN_PROGRESS ||
 			state == IPA_USB_SUSPENDED_NO_RWAKEUP)
 			is_legal = true;
 		break;
@@ -582,71 +545,6 @@
 	ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
 }
 
-static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
-{
-	ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
-{
-	ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
-}
-
-static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
-{
-	struct finish_suspend_work_context *finish_suspend_work_ctx;
-	unsigned long flags;
-	int result = -EFAULT;
-	struct ipa3_usb_transport_type_ctx *tctx;
-
-	mutex_lock(&ipa3_usb_ctx->general_mutex);
-	IPA_USB_DBG_LOW("entry\n");
-	finish_suspend_work_ctx = container_of(work,
-		struct finish_suspend_work_context, work);
-	tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
-
-	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
-		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-		mutex_unlock(&ipa3_usb_ctx->general_mutex);
-		return;
-	}
-	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
-	/* Stop DL/DPL channel */
-	result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
-	if (result) {
-		IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
-			result);
-		ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
-			finish_suspend_work_ctx->dl_clnt_hdl,
-			IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
-		/* Change state back to CONNECTED */
-		if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
-			finish_suspend_work_ctx->ttype))
-			IPA_USB_ERR("failed to change state to connected\n");
-		queue_work(ipa3_usb_ctx->wq,
-			IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
-			&ipa3_usb_dpl_notify_remote_wakeup_work :
-			&ipa3_usb_notify_remote_wakeup_work);
-		mutex_unlock(&ipa3_usb_ctx->general_mutex);
-		return;
-	}
-
-	/* Change ipa_usb state to SUSPENDED */
-	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
-		finish_suspend_work_ctx->ttype))
-		IPA_USB_ERR("failed to change state to suspended\n");
-
-	queue_work(ipa3_usb_ctx->wq,
-		IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
-		&ipa3_usb_dpl_notify_suspend_completed_work :
-		&ipa3_usb_notify_suspend_completed_work);
-
-	IPA_USB_DBG_LOW("exit\n");
-	mutex_unlock(&ipa3_usb_ctx->general_mutex);
-}
-
 static int ipa3_usb_cons_request_resource_cb_do(
 	enum ipa3_usb_transport_type ttype,
 	struct work_struct *remote_wakeup_work)
@@ -674,17 +572,6 @@
 		else
 			result = -EINPROGRESS;
 		break;
-	case IPA_USB_SUSPEND_IN_PROGRESS:
-		/*
-		 * This case happens due to suspend interrupt.
-		 * CONS is granted
-		 */
-		if (!rm_ctx->cons_requested) {
-			rm_ctx->cons_requested = true;
-			queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
-		}
-		result = 0;
-		break;
 	case IPA_USB_SUSPENDED:
 		if (!rm_ctx->cons_requested) {
 			rm_ctx->cons_requested = true;
@@ -727,15 +614,10 @@
 			ipa3_usb_state_to_string(
 			ipa3_usb_ctx->ttype_ctx[ttype].state));
 	switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
-	case IPA_USB_SUSPEND_IN_PROGRESS:
+	case IPA_USB_SUSPENDED:
 		/* Proceed with the suspend if no DL/DPL data */
 		if (rm_ctx->cons_requested)
 			rm_ctx->cons_requested_released = true;
-		else {
-			queue_work(ipa3_usb_ctx->wq,
-				&ipa3_usb_ctx->ttype_ctx[ttype].
-				finish_suspend_work.work);
-		}
 		break;
 	case IPA_USB_SUSPEND_REQUESTED:
 		if (rm_ctx->cons_requested)
@@ -2311,8 +2193,7 @@
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
 	orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
-		if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
-			orig_state != IPA_USB_SUSPENDED) {
+		if (orig_state != IPA_USB_SUSPENDED) {
 			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
 				flags);
 			/* Stop UL channel */
@@ -2340,8 +2221,7 @@
 	if (result)
 		goto bad_params;
 
-	if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
-		orig_state != IPA_USB_SUSPENDED) {
+	if (orig_state != IPA_USB_SUSPENDED) {
 		result = ipa3_usb_release_prod(ttype);
 		if (result) {
 			IPA_USB_ERR("failed to release PROD.\n");
@@ -2547,7 +2427,6 @@
 {
 	int result = 0;
 	unsigned long flags;
-	enum ipa3_usb_cons_state curr_cons_state;
 	enum ipa3_usb_transport_type ttype;
 
 	mutex_lock(&ipa3_usb_ctx->general_mutex);
@@ -2602,49 +2481,20 @@
 		goto release_prod_fail;
 	}
 
+	/* Check if DL/DPL data pending */
 	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-	curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
+	if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state ==
+		IPA_USB_CONS_GRANTED &&
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+
+		IPA_USB_DBG("DL/DPL data pending, invoke remote wakeup\n");
+		queue_work(ipa3_usb_ctx->wq,
+			IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			&ipa3_usb_dpl_notify_remote_wakeup_work :
+			&ipa3_usb_notify_remote_wakeup_work);
+	}
 	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-	if (curr_cons_state == IPA_USB_CONS_GRANTED) {
-		/* Change state to SUSPEND_IN_PROGRESS */
-		if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
-			false, ttype))
-			IPA_USB_ERR("fail set state to suspend_in_progress\n");
 
-		/* Check if DL/DPL data pending */
-		spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
-		if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
-			IPA_USB_DBG(
-				"DL/DPL data pending, invoke remote wakeup\n");
-			queue_work(ipa3_usb_ctx->wq,
-				IPA3_USB_IS_TTYPE_DPL(ttype) ?
-				&ipa3_usb_dpl_notify_remote_wakeup_work :
-				&ipa3_usb_notify_remote_wakeup_work);
-		}
-		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
-		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
-			ttype;
-		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
-			dl_clnt_hdl;
-		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
-			ul_clnt_hdl;
-		INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
-			finish_suspend_work.work,
-			ipa3_usb_wq_finish_suspend_work);
-
-		result = -EINPROGRESS;
-		IPA_USB_DBG("exit with suspend_in_progress\n");
-		goto bad_params;
-	}
-
-	/* Stop DL channel */
-	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
-	if (result) {
-		IPAERR("Error stopping DL/DPL channel: %d\n", result);
-		result = -EFAULT;
-		goto release_prod_fail;
-	}
 	/* Change state to SUSPENDED */
 	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
 		IPA_USB_ERR("failed to change state to suspended\n");
@@ -2803,13 +2653,11 @@
 		}
 	}
 
-	if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
-		/* Start DL/DPL channel */
-		result = ipa3_start_gsi_channel(dl_clnt_hdl);
-		if (result) {
-			IPA_USB_ERR("failed to start DL/DPL channel.\n");
-			goto start_dl_fail;
-		}
+	/* Start DL/DPL channel */
+	result = ipa3_start_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to start DL/DPL channel.\n");
+		goto start_dl_fail;
 	}
 
 	/* Change state to CONNECTED */
@@ -2824,12 +2672,10 @@
 	return 0;
 
 state_change_connected_fail:
-	if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
-		result = ipa3_stop_gsi_channel(dl_clnt_hdl);
-		if (result)
-			IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
-				result);
-	}
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result)
+		IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+			result);
 start_dl_fail:
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
 		result = ipa3_stop_gsi_channel(ul_clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 947a54c..53ab299 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1836,6 +1836,7 @@
 	struct ipa_mem_buffer mem = { 0 };
 	u32 *entry;
 	u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
 		GFP_ATOMIC);
@@ -1856,7 +1857,7 @@
 	}
 
 	cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
-		GFP_KERNEL);
+		flag);
 	if (!cmd) {
 		IPAERR("failed to allocate memory\n");
 		retval = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 0196815..80b97e7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -420,15 +420,17 @@
 	int i = 0;
 	int j;
 	int result;
-	int fail_dma_wrap = 0;
 	uint size = num_desc * sizeof(struct sps_iovec);
-	u32 mem_flag = GFP_ATOMIC;
+	gfp_t mem_flag = GFP_ATOMIC;
 	struct sps_iovec iov;
 	int ret;
+	gfp_t flag;
 
 	if (unlikely(!in_atomic))
 		mem_flag = GFP_KERNEL;
 
+	flag = mem_flag | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
 	if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
 		transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
 				&dma_addr);
@@ -437,7 +439,7 @@
 			return -EFAULT;
 		}
 	} else {
-		transfer.iovec = kmalloc(size, mem_flag);
+		transfer.iovec = kmalloc(size, flag);
 		if (!transfer.iovec) {
 			IPAERR("fail to alloc mem for sps xfr buff ");
 			IPAERR("num_desc = %d size = %d\n", num_desc, size);
@@ -457,7 +459,6 @@
 	spin_lock_bh(&sys->spinlock);
 
 	for (i = 0; i < num_desc; i++) {
-		fail_dma_wrap = 0;
 		tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
 					   mem_flag);
 		if (!tx_pkt) {
@@ -493,15 +494,6 @@
 					tx_pkt->mem.base,
 					tx_pkt->mem.size,
 					DMA_TO_DEVICE);
-
-				if (dma_mapping_error(ipa_ctx->pdev,
-					tx_pkt->mem.phys_base)) {
-					IPAERR("dma_map_single ");
-					IPAERR("failed\n");
-					fail_dma_wrap = 1;
-					goto failure;
-				}
-
 			} else {
 				tx_pkt->mem.phys_base = desc[i].dma_address;
 				tx_pkt->no_unmap_dma = true;
@@ -522,10 +514,9 @@
 			}
 		}
 
-		if (!tx_pkt->mem.phys_base) {
-			IPAERR("failed to alloc tx wrapper\n");
-			fail_dma_wrap = 1;
-			goto failure;
+		if (dma_mapping_error(ipa_ctx->pdev, tx_pkt->mem.phys_base)) {
+			IPAERR("dma_map_single failed\n");
+			goto failure_dma_map;
 		}
 
 		tx_pkt->sys = sys;
@@ -580,27 +571,30 @@
 	spin_unlock_bh(&sys->spinlock);
 	return 0;
 
+failure_dma_map:
+	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
 failure:
 	tx_pkt = transfer.user;
 	for (j = 0; j < i; j++) {
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
-		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
-			dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
-		} else {
-			dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
+		if (!tx_pkt->no_unmap_dma) {
+			if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
 		}
 		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
 		tx_pkt = next_pkt;
 	}
-	if (j < num_desc)
-		/* last desc failed */
-		if (fail_dma_wrap)
-			kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
 	if (transfer.iovec_phys) {
 		if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
 			dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
@@ -1658,6 +1652,7 @@
 	struct ipa_sys_context *sys;
 	int src_ep_idx;
 	int num_frags, f;
+	gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	if (unlikely(!ipa_ctx)) {
 		IPAERR("IPA driver was not initialized\n");
@@ -1723,7 +1718,7 @@
 
 	if (dst_ep_idx != -1) {
 		/* SW data path */
-		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
+		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), flag);
 		if (!cmd) {
 			IPAERR("failed to alloc immediate command object\n");
 			goto fail_gen;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index b60c7a6..3418896 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -651,6 +651,7 @@
 	struct ipa_ip_v6_filter_init *v6;
 	u16 avail;
 	u16 size;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
 	if (!mem) {
@@ -667,7 +668,7 @@
 			IPA_MEM_PART(v6_flt_size_ddr);
 		size = sizeof(struct ipa_ip_v6_filter_init);
 	}
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (!cmd) {
 		IPAERR("failed to alloc immediate command object\n");
 		goto fail_alloc_cmd;
@@ -840,6 +841,7 @@
 	int num_desc = 0;
 	int i;
 	u16 avail;
+	gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC);
 	if (desc == NULL) {
@@ -848,7 +850,7 @@
 		goto fail_desc;
 	}
 
-	cmd = kzalloc(16 * sizeof(*cmd), GFP_ATOMIC);
+	cmd = kzalloc(16 * sizeof(*cmd), flag);
 	if (cmd == NULL) {
 		IPAERR("fail to alloc cmd blob ip %d\n", ip);
 		rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 046f77f..d657a06 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -176,6 +176,7 @@
 	struct ipa_mem_buffer *mem;
 	struct ipa_hdr_init_local *cmd;
 	u16 len;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
 	if (!mem) {
@@ -190,7 +191,7 @@
 	 * we can use init_local ptr for init_system due to layout of the
 	 * struct
 	 */
-	cmd = kmalloc(len, GFP_KERNEL);
+	cmd = kmalloc(len, flag);
 	if (!cmd) {
 		IPAERR("failed to alloc immediate command object\n");
 		goto fail_alloc_cmd;
@@ -663,6 +664,7 @@
 	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
 	int id;
 	int mem_size;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
 		IPAERR("bad parm\n");
@@ -674,7 +676,7 @@
 		goto error;
 	}
 
-	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, flag);
 	if (!entry) {
 		IPAERR("failed to alloc hdr object\n");
 		goto error;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 96e0125..a7f983e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -325,6 +325,7 @@
 	int result;
 	u32 offset = 0;
 	size_t tmp;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	IPADBG("\n");
 	if (init->table_entries == 0) {
@@ -410,7 +411,7 @@
 
 	memset(&desc, 0, sizeof(desc));
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("no mem\n");
 		result = -ENOMEM;
@@ -428,7 +429,7 @@
 	desc[0].pyld = (void *)reg_write_nop;
 	desc[0].len = sizeof(*reg_write_nop);
 
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (!cmd) {
 		IPAERR("Failed to alloc immediate command object\n");
 		result = -ENOMEM;
@@ -573,6 +574,7 @@
 	struct ipa_desc *desc = NULL;
 	u16 size = 0, cnt = 0;
 	int ret = 0;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	IPADBG("\n");
 	if (dma->entries <= 0) {
@@ -656,7 +658,7 @@
 	}
 
 	size = sizeof(struct ipa_nat_dma);
-	cmd = kzalloc(size, GFP_KERNEL);
+	cmd = kzalloc(size, flag);
 	if (cmd == NULL) {
 		IPAERR("Failed to alloc memory\n");
 		ret = -ENOMEM;
@@ -664,7 +666,7 @@
 	}
 
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("Failed to alloc memory\n");
 		ret = -ENOMEM;
@@ -758,6 +760,7 @@
 	u8 mem_type = IPA_NAT_SHARED_MEMORY;
 	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
 	int result;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	IPADBG("\n");
 	if (ipa_ctx->nat_mem.is_tmp_mem) {
@@ -774,7 +777,7 @@
 
 	memset(&desc, 0, sizeof(desc));
 	/* NO-OP IC for ensuring that IPA pipeline is empty */
-	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), flag);
 	if (!reg_write_nop) {
 		IPAERR("no mem\n");
 		result = -ENOMEM;
@@ -792,7 +795,7 @@
 	desc[0].pyld = (void *)reg_write_nop;
 	desc[0].len = sizeof(*reg_write_nop);
 
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (cmd == NULL) {
 		IPAERR("Failed to alloc immediate command object\n");
 		result = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 21fdec0..5b70853 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -522,6 +522,7 @@
 	struct ipa_ip_v6_routing_init *v6;
 	u16 avail;
 	u16 size;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
 	if (!mem) {
@@ -538,7 +539,7 @@
 			IPA_MEM_PART(v6_rt_size_ddr);
 		size = sizeof(struct ipa_ip_v6_routing_init);
 	}
-	cmd = kmalloc(size, GFP_KERNEL);
+	cmd = kmalloc(size, flag);
 	if (!cmd) {
 		IPAERR("failed to alloc immediate command object\n");
 		goto fail_alloc_cmd;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index da62b77..bec4264 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4440,6 +4440,7 @@
 	int res;
 	struct ipa_tag_completion *comp;
 	int ep_idx;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
 
 	/* Not enough room for the required descriptors for the tag process */
 	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
@@ -4457,7 +4458,7 @@
 	}
 	sys = ipa_ctx->ep[ep_idx].sys;
 
-	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, flag);
 	if (!tag_desc) {
 		IPAERR("failed to allocate memory\n");
 		res = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 4672233..bcd602c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -80,6 +80,7 @@
 
 u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
 static struct mutex ipa_to_apps_pipe_handle_guard;
+static struct mutex add_mux_channel_lock;
 static int wwan_add_ul_flt_rule_to_ipa(void);
 static int wwan_del_ul_flt_rule_to_ipa(void);
 static void ipa_wwan_msg_free_cb(void*, u32, u32);
@@ -1527,9 +1528,11 @@
 					rmnet_mux_val.mux_id);
 				return rc;
 			}
+			mutex_lock(&add_mux_channel_lock);
 			if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
 				IPAWANERR("Exceed mux_channel limit(%d)\n",
 				rmnet_index);
+				mutex_unlock(&add_mux_channel_lock);
 				return -EFAULT;
 			}
 			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1558,6 +1561,7 @@
 					IPAWANERR("device %s reg IPA failed\n",
 						extend_ioctl_data.u.
 						rmnet_mux_val.vchannel_name);
+					mutex_unlock(&add_mux_channel_lock);
 					return -ENODEV;
 				}
 				mux_channel[rmnet_index].mux_channel_set = true;
@@ -1570,6 +1574,7 @@
 				mux_channel[rmnet_index].ul_flt_reg = false;
 			}
 			rmnet_index++;
+			mutex_unlock(&add_mux_channel_lock);
 			break;
 		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
 			IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
@@ -3084,6 +3089,7 @@
 	atomic_set(&is_ssr, 0);
 
 	mutex_init(&ipa_to_apps_pipe_handle_guard);
+	mutex_init(&add_mux_channel_lock);
 	ipa_to_apps_hdl = -1;
 
 	ipa_qmi_init();
@@ -3103,6 +3109,7 @@
 
 	ipa_qmi_cleanup();
 	mutex_destroy(&ipa_to_apps_pipe_handle_guard);
+	mutex_destroy(&add_mux_channel_lock);
 	ret = subsys_notif_unregister_notifier(subsys_notify_handle,
 					&ssr_notifier);
 	if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 31e530e..837bf38 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -232,6 +232,9 @@
 	ipa3_transport_release_resource);
 static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
 
+static void ipa3_post_init_wq(struct work_struct *work);
+static DECLARE_WORK(ipa3_post_init_work, ipa3_post_init_wq);
+
 static struct ipa3_plat_drv_res ipa3_res = {0, };
 struct msm_bus_scale_pdata *ipa3_bus_scale_table;
 
@@ -495,63 +498,6 @@
 	return 0;
 }
 
-/**
-* ipa3_flow_control() - Enable/Disable flow control on a particular client.
-* Return codes:
-* None
-*/
-void ipa3_flow_control(enum ipa_client_type ipa_client,
-		bool enable, uint32_t qmap_id)
-{
-	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
-	int ep_idx;
-	struct ipa3_ep_context *ep;
-
-	/* Check if tethered flow control is needed or not.*/
-	if (!ipa3_ctx->tethered_flow_control) {
-		IPADBG("Apps flow control is not needed\n");
-		return;
-	}
-
-	/* Check if ep is valid. */
-	ep_idx = ipa3_get_ep_mapping(ipa_client);
-	if (ep_idx == -1) {
-		IPADBG("Invalid IPA client\n");
-		return;
-	}
-
-	ep = &ipa3_ctx->ep[ep_idx];
-	if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
-		IPADBG("EP not valid/Not applicable for client.\n");
-		return;
-	}
-
-	spin_lock(&ipa3_ctx->disconnect_lock);
-	/* Check if the QMAP_ID matches. */
-	if (ep->cfg.meta.qmap_id != qmap_id) {
-		IPADBG("Flow control ind not for same flow: %u %u\n",
-			ep->cfg.meta.qmap_id, qmap_id);
-		spin_unlock(&ipa3_ctx->disconnect_lock);
-		return;
-	}
-	if (!ep->disconnect_in_progress) {
-		if (enable) {
-			IPADBG("Enabling Flow\n");
-			ep_ctrl.ipa_ep_delay = false;
-			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
-		} else {
-			IPADBG("Disabling Flow\n");
-			ep_ctrl.ipa_ep_delay = true;
-			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
-		}
-		ep_ctrl.ipa_ep_suspend = false;
-		ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
-	} else {
-		IPADBG("EP disconnect is in progress\n");
-	}
-	spin_unlock(&ipa3_ctx->disconnect_lock);
-}
-
 static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
 {
 	if (!buff) {
@@ -1863,9 +1809,11 @@
 				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
 				ep_idx, &ep_holb);
 
-			ipahal_write_reg_n_fields(
-				IPA_ENDP_INIT_CTRL_n,
-				ep_idx, &ep_suspend);
+			/* from IPA 4.0 pipe suspend is not supported */
+			if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+				ipahal_write_reg_n_fields(
+					IPA_ENDP_INIT_CTRL_n,
+					ep_idx, &ep_suspend);
 		}
 	}
 }
@@ -3979,6 +3927,15 @@
 	struct ipa3_flt_tbl *flt_tbl;
 	int i;
 
+	if (ipa3_ctx == NULL) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -ENXIO;
+	}
+
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_ctx->ipa_initialization_complete)
+		return 0;
+
 	/*
 	 * indication whether working in MHI config or non MHI config is given
 	 * in ipa3_write which is launched before ipa3_post_init. i.e. from
@@ -4113,41 +4070,15 @@
 fail_setup_apps_pipes:
 	gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
 fail_register_device:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
-	ipa_rm_exit();
-	cdev_del(&ipa3_ctx->cdev);
-	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
-	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
-	ipa3_free_dma_task_for_gsi();
 	ipa3_destroy_flt_tbl_idrs();
-	idr_destroy(&ipa3_ctx->ipa_idr);
-	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
-	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
-	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
-	kmem_cache_destroy(ipa3_ctx->hdr_cache);
-	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
-	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
-	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
-	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
-	iounmap(ipa3_ctx->mmio);
-	ipa3_disable_clks();
-	if (ipa3_clk)
-		clk_put(ipa3_clk);
-	ipa3_clk = NULL;
-	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
-	if (ipa3_bus_scale_table) {
-		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
-		ipa3_bus_scale_table = NULL;
-	}
-	kfree(ipa3_ctx->ctrl);
-	kfree(ipa3_ctx);
-	ipa3_ctx = NULL;
 	return result;
 }
 
+static void ipa3_post_init_wq(struct work_struct *work)
+{
+	ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+}
+
 static int ipa3_trigger_fw_loading_mdms(void)
 {
 	int result;
@@ -4249,9 +4180,10 @@
 	if (result) {
 		IPAERR("FW loading process has failed\n");
 			return result;
-	} else
-		ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
-
+	} else {
+			queue_work(ipa3_ctx->transport_power_mgmt_wq,
+				&ipa3_post_init_work);
+	}
 	return count;
 }
 
@@ -4722,20 +4654,6 @@
 		goto fail_device_create;
 	}
 
-	cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
-	ipa3_ctx->cdev.owner = THIS_MODULE;
-	ipa3_ctx->cdev.ops = &ipa3_drv_fops;  /* from LDD3 */
-
-	result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
-	if (result) {
-		IPAERR(":cdev_add err=%d\n", -result);
-		result = -ENODEV;
-		goto fail_cdev_add;
-	}
-	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
-			MAJOR(ipa3_ctx->dev_num),
-			MINOR(ipa3_ctx->dev_num));
-
 	if (ipa3_create_nat_device()) {
 		IPAERR("unable to create nat device\n");
 		result = -ENODEV;
@@ -4793,16 +4711,28 @@
 		}
 	}
 
+	cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+	ipa3_ctx->cdev.owner = THIS_MODULE;
+	ipa3_ctx->cdev.ops = &ipa3_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+			MAJOR(ipa3_ctx->dev_num),
+			MINOR(ipa3_ctx->dev_num));
 	return 0;
 
+fail_cdev_add:
 fail_ipa_init_interrupts:
 	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
 fail_create_apps_resource:
 	ipa_rm_exit();
 fail_ipa_rm_init:
 fail_nat_dev_add:
-	cdev_del(&ipa3_ctx->cdev);
-fail_cdev_add:
 	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
 fail_device_create:
 	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 0b8115f..564397a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -69,13 +69,15 @@
 	}
 
 	/* Enable the pipe */
-	if (IPA_CLIENT_IS_CONS(ep->client) &&
-	    (ep->keep_ipa_awake ||
-	     ipa3_ctx->resume_on_connect[ep->client] ||
-	     !ipa3_should_pipe_be_suspended(ep->client))) {
-		memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
-		ep_cfg_ctrl.ipa_ep_suspend = false;
-		res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		if (IPA_CLIENT_IS_CONS(ep->client) &&
+		    (ep->keep_ipa_awake ||
+		    ipa3_ctx->resume_on_connect[ep->client] ||
+		    !ipa3_should_pipe_be_suspended(ep->client))) {
+			memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+			ep_cfg_ctrl.ipa_ep_suspend = false;
+			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		}
 	}
 
 	return res;
@@ -97,33 +99,41 @@
 		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
 	}
 
-	/* Suspend the pipe */
-	if (IPA_CLIENT_IS_CONS(ep->client)) {
-		/*
-		 * for RG10 workaround uC needs to be loaded before pipe can
-		 * be suspended in this case.
-		 */
-		if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
-			IPADBG("uC is not loaded yet, waiting...\n");
-			res = wait_for_completion_timeout(
-				&ipa3_ctx->uc_loaded_completion_obj, 60 * HZ);
-			if (res == 0)
-				IPADBG("timeout waiting for uC to load\n");
+	/*
+	 * for IPA 4.0 and above aggregation frame is closed together with
+	 * channel STOP
+	 */
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Suspend the pipe */
+		if (IPA_CLIENT_IS_CONS(ep->client)) {
+			/*
+			 * for RG10 workaround uC needs to be loaded before
+			 * pipe can be suspended in this case.
+			 */
+			if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
+				IPADBG("uC is not loaded yet, waiting...\n");
+				res = wait_for_completion_timeout(
+					&ipa3_ctx->uc_loaded_completion_obj,
+					60 * HZ);
+				if (res == 0)
+					IPADBG("timeout waiting for uC load\n");
+			}
+
+			memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+			ep_cfg_ctrl.ipa_ep_suspend = true;
+			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		}
 
-		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-		ep_cfg_ctrl.ipa_ep_suspend = true;
-		res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
-	}
-
-	udelay(IPA_PKT_FLUSH_TO_US);
-	ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
-	if (ep_aggr.aggr_en) {
-		res = ipa3_tag_aggr_force_close(clnt_hdl);
-		if (res) {
-			IPAERR("tag process timeout, client:%d err:%d\n",
-				   clnt_hdl, res);
-			BUG();
+		udelay(IPA_PKT_FLUSH_TO_US);
+		ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl,
+			&ep_aggr);
+		if (ep_aggr.aggr_en) {
+			res = ipa3_tag_aggr_force_close(clnt_hdl);
+			if (res) {
+				IPAERR("tag process timeout client:%d err:%d\n",
+					clnt_hdl, res);
+				ipa_assert();
+			}
 		}
 	}
 
@@ -1257,10 +1267,12 @@
 		goto disable_clk_and_exit;
 	}
 
-	/* Suspend the DL/DPL EP */
-	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ep_cfg_ctrl.ipa_ep_suspend = true;
-	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Suspend the DL/DPL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
 
 	/*
 	 * Check if DL/DPL channel is empty again, data could enter the channel
@@ -1275,6 +1287,14 @@
 		goto unsuspend_dl_and_exit;
 	}
 
+	/* Stop DL channel */
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping DL/DPL channel: %d\n", result);
+		result = -EFAULT;
+		goto unsuspend_dl_and_exit;
+	}
+
 	/* STOP UL channel */
 	if (!is_dpl) {
 		source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
@@ -1283,7 +1303,7 @@
 		if (result) {
 			IPAERR("Error stopping UL channel: result = %d\n",
 				result);
-			goto unsuspend_dl_and_exit;
+			goto start_dl_and_exit;
 		}
 	}
 
@@ -1292,11 +1312,15 @@
 	IPADBG("exit\n");
 	return 0;
 
+start_dl_and_exit:
+	gsi_start_channel(dl_ep->gsi_chan_hdl);
 unsuspend_dl_and_exit:
-	/* Unsuspend the DL EP */
-	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ep_cfg_ctrl.ipa_ep_suspend = false;
-	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Unsuspend the DL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
 disable_clk_and_exit:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
 	return result;
@@ -1340,7 +1364,8 @@
 
 int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
 {
-	struct ipa3_ep_context *ul_ep, *dl_ep;
+	struct ipa3_ep_context *ul_ep = NULL;
+	struct ipa3_ep_context *dl_ep = NULL;
 	enum gsi_status gsi_res;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 
@@ -1360,10 +1385,17 @@
 		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
 
-	/* Unsuspend the DL/DPL EP */
-	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ep_cfg_ctrl.ipa_ep_suspend = false;
-	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+		/* Unsuspend the DL/DPL EP */
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	/* Start DL channel */
+	gsi_res = gsi_start_channel(dl_ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS)
+		IPAERR("Error starting DL channel: %d\n", gsi_res);
 
 	/* Start UL channel */
 	if (!is_dpl) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index bf13ac5..915f2b8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -267,7 +267,6 @@
 	int i = 0;
 	int j;
 	int result;
-	int fail_dma_wrap = 0;
 	u32 mem_flag = GFP_ATOMIC;
 	const struct ipa_gsi_ep_config *gsi_ep_cfg;
 
@@ -298,7 +297,6 @@
 	spin_lock_bh(&sys->spinlock);
 
 	for (i = 0; i < num_desc; i++) {
-		fail_dma_wrap = 0;
 		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
 					   mem_flag);
 		if (!tx_pkt) {
@@ -319,7 +317,7 @@
 			if (ipa_populate_tag_field(&desc[i], tx_pkt,
 				&tag_pyld_ret)) {
 				IPAERR("Failed to populate tag field\n");
-				goto failure;
+				goto failure_dma_map;
 			}
 		}
 
@@ -335,11 +333,6 @@
 					tx_pkt->mem.base,
 					tx_pkt->mem.size,
 					DMA_TO_DEVICE);
-				if (!tx_pkt->mem.phys_base) {
-					IPAERR("failed to do dma map.\n");
-					fail_dma_wrap = 1;
-					goto failure;
-				}
 			} else {
 					tx_pkt->mem.phys_base =
 						desc[i].dma_address;
@@ -355,17 +348,17 @@
 					desc[i].frag,
 					0, tx_pkt->mem.size,
 					DMA_TO_DEVICE);
-				if (!tx_pkt->mem.phys_base) {
-					IPAERR("dma map failed\n");
-					fail_dma_wrap = 1;
-					goto failure;
-				}
 			} else {
 				tx_pkt->mem.phys_base =
 					desc[i].dma_address;
 				tx_pkt->no_unmap_dma = true;
 			}
 		}
+		if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+			IPAERR("failed to do dma map.\n");
+			goto failure_dma_map;
+		}
+
 		tx_pkt->sys = sys;
 		tx_pkt->callback = desc[i].callback;
 		tx_pkt->user1 = desc[i].user1;
@@ -426,28 +419,31 @@
 
 	return 0;
 
+failure_dma_map:
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
 failure:
 	ipahal_destroy_imm_cmd(tag_pyld_ret);
 	tx_pkt = tx_pkt_first;
 	for (j = 0; j < i; j++) {
 		next_pkt = list_next_entry(tx_pkt, link);
 		list_del(&tx_pkt->link);
-		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
-			dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
-		} else {
-			dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
-				tx_pkt->mem.size,
-				DMA_TO_DEVICE);
+
+		if (!tx_pkt->no_unmap_dma) {
+			if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size, DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
 		}
 		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
 		tx_pkt = next_pkt;
 	}
-	if (j < num_desc)
-		/* last desc failed */
-		if (fail_dma_wrap)
-			kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
 
 	kfree(gsi_xfer_elem_array);
 
@@ -1444,8 +1440,7 @@
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 						     sys->rx_buff_sz,
 						     DMA_FROM_DEVICE);
-		if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
 			pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
 			       __func__, (void *)rx_pkt->data.dma_addr,
 			       ptr, sys);
@@ -1605,8 +1600,7 @@
 		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
-		if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
 			IPAERR("dma_map_single failure %p for %p\n",
 			       (void *)rx_pkt->data.dma_addr, ptr);
 			goto fail_dma_mapping;
@@ -1676,8 +1670,7 @@
 		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
 						     sys->rx_buff_sz,
 						     DMA_FROM_DEVICE);
-		if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+		if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
 			IPAERR("dma_map_single failure %p for %p\n",
 			       (void *)rx_pkt->data.dma_addr, ptr);
 			goto fail_dma_mapping;
@@ -1764,8 +1757,8 @@
 			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
 			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
 				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
-			if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				rx_pkt->data.dma_addr)) {
 				IPAERR("dma_map_single failure %p for %p\n",
 					(void *)rx_pkt->data.dma_addr, ptr);
 				goto fail_dma_mapping;
@@ -1780,8 +1773,8 @@
 			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
 			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
 				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
-			if (rx_pkt->data.dma_addr == 0 ||
-				rx_pkt->data.dma_addr == ~0) {
+			if (dma_mapping_error(ipa3_ctx->pdev,
+				rx_pkt->data.dma_addr)) {
 				IPAERR("dma_map_single failure %p for %p\n",
 					(void *)rx_pkt->data.dma_addr, ptr);
 				goto fail_dma_mapping;
@@ -3659,7 +3652,6 @@
 	struct ipa_mem_buffer mem_info = {0};
 	static int total_cnt;
 
-	IPADBG("\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 		ipa3_ctx->ep[clnt_hdl].valid == 0) {
 		IPAERR("bad parm 0x%x\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 410b96a..593d4fc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -490,6 +490,10 @@
 			entry->hdr,
 			entry->hdr_len,
 			DMA_TO_DEVICE);
+		if (dma_mapping_error(ipa3_ctx->pdev, entry->phys_base)) {
+			IPAERR("dma_map_single failure for entry\n");
+			goto fail_dma_mapping;
+		}
 	} else {
 		entry->is_hdr_proc_ctx = false;
 		if (list_empty(&htbl->head_free_offset_list[bin])) {
@@ -565,6 +569,9 @@
 	list_del(&entry->link);
 	dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
 			entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+	entry->is_hdr_proc_ctx = false;
+
 bad_hdr_len:
 	entry->cookie = 0;
 	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 86442b1..9a406d6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1970,8 +1970,6 @@
 int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
 void ipa3_set_resorce_groups_min_max_limits(void);
 void ipa3_suspend_apps_pipes(bool suspend);
-void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
-			uint32_t qmap_id);
 int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
 	enum ipa_ip_type ip_type,
 	bool hashable,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 799246b..60dc04f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1600,13 +1600,15 @@
 
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 	if (IPA_CLIENT_IS_CONS(ep->client)) {
-		ep_cfg_ctrl.ipa_ep_suspend = true;
-		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
-		if (result)
-			IPAERR("client (ep: %d) failed to suspend result=%d\n",
-					clnt_hdl, result);
-		else
-			IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+			ep_cfg_ctrl.ipa_ep_suspend = true;
+			result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			if (result)
+				IPAERR("(ep: %d) failed to suspend result=%d\n",
+						clnt_hdl, result);
+			else
+				IPADBG("(ep: %d) suspended\n", clnt_hdl);
+		}
 	} else {
 		ep_cfg_ctrl.ipa_ep_delay = true;
 		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 23c8241..079481d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1110,7 +1110,7 @@
 			true,
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
-			{ 0, 1, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_AP } },
 	[IPA_4_0][IPA_CLIENT_ETHERNET_PROD]	  = {
 			true, IPA_v4_0_GROUP_UL_DL,
 			true,
@@ -1631,6 +1631,16 @@
 
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 
+	/*
+	 * starting IPA 4.0 pipe no longer can be suspended. Instead,
+	 * the corresponding GSI channel should be stopped. Usually client
+	 * driver will take care of stopping the channel. For client drivers
+	 * that are not stopping the channel, IPA RM will do that based on
+	 * ipa3_should_pipe_channel_be_stopped().
+	 */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
+		return false;
+
 	if (ep->keep_ipa_awake)
 		return false;
 
@@ -1651,6 +1661,41 @@
 }
 
 /**
+ * ipa3_should_pipe_channel_be_stopped() - returns true when the client's
+ * channel should be stopped during a power save scenario. False otherwise.
+ * Most client already stops the GSI channel on suspend, and are not included
+ * in the list below.
+ *
+ * @client: [IN] IPA client
+ */
+static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
+		return false;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_ODU_EMB_CONS ||
+	    client == IPA_CLIENT_ODU_TETH_CONS)
+		return true;
+
+	return false;
+}
+
+/**
  * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
  * resource and decrement active clients counter, which may result in clock
  * gating of IPA clocks.
@@ -1695,6 +1740,19 @@
 				pipe_suspended = true;
 			}
 		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* Stop GSI channel */
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed stop gsi ch %lu\n",
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+					return res;
+				}
+			}
+		}
 	}
 	/* Sleep ~1 msec */
 	if (pipe_suspended)
@@ -1761,6 +1819,12 @@
 				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 			}
 		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			res = -EPERM;
+			goto bail;
+		}
 	}
 
 	if (res == 0) {
@@ -1824,6 +1888,19 @@
 				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 			}
 		}
+
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+			ipa3_should_pipe_channel_be_stopped(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				res = gsi_start_channel(
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start gsi ch %lu\n",
+					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
+					return res;
+				}
+			}
+		}
 	}
 
 	return res;
@@ -2714,6 +2791,12 @@
 		return -EINVAL;
 	}
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 && ep_ctrl->ipa_ep_suspend) {
+		IPAERR("pipe suspend is not supported\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+
 	IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
 		clnt_hdl,
 		ep_ctrl->ipa_ep_suspend,
@@ -4674,6 +4757,7 @@
 	struct ipa_ep_cfg_ctrl cfg;
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
+	int res;
 
 	memset(&cfg, 0, sizeof(cfg));
 	cfg.ipa_ep_suspend = suspend;
@@ -4688,7 +4772,23 @@
 	if (ep->valid) {
 		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
 			ipa_ep_idx);
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+			if (suspend) {
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed to stop LAN channel\n");
+					ipa_assert();
+				}
+			} else {
+				res = gsi_start_channel(ep->gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start LAN channel\n");
+					ipa_assert();
+				}
+			}
+		} else {
+			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		}
 		if (suspend)
 			ipa3_gsi_poll_after_suspend(ep);
 		else if (!atomic_read(&ep->sys->curr_polling_state))
@@ -4706,7 +4806,23 @@
 	if (ep->valid) {
 		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
 			ipa_ep_idx);
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
+			if (suspend) {
+				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				if (res) {
+					IPAERR("failed to stop WAN channel\n");
+					ipa_assert();
+				}
+			} else {
+				res = gsi_start_channel(ep->gsi_chan_hdl);
+				if (res) {
+					IPAERR("failed to start WAN channel\n");
+					ipa_assert();
+				}
+			}
+		} else {
+			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		}
 		if (suspend)
 			ipa3_gsi_poll_after_suspend(ep);
 		else if (!atomic_read(&ep->sys->curr_polling_state))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 1a119b9..3019e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -688,6 +688,19 @@
 		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT);
 }
 
+static void ipareg_construct_endp_init_ctrl_n_v4_0(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	WARN_ON(ep_ctrl->ipa_ep_suspend);
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
 static void ipareg_construct_endp_init_ctrl_scnd_n(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -1444,6 +1457,9 @@
 		ipareg_parse_hps_queue_weights, 0x000005a4, 0},
 
 	/* IPAv4.0 */
+	[IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n_v4_0, ipareg_parse_dummy,
+		0x00000800, 0x70 },
 	[IPA_HW_v4_0][IPA_TX_CFG] = {
 		ipareg_construct_tx_cfg_v4_0, ipareg_parse_tx_cfg_v4_0,
 		0x000001FC, 0},
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a15bd04..f408f23 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -141,6 +141,7 @@
 	u32 apps_to_ipa3_hdl;
 	u32 ipa3_to_apps_hdl;
 	struct mutex pipe_handle_guard;
+	struct mutex add_mux_channel_lock;
 };
 
 static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -1506,25 +1507,13 @@
 		break;
 	/*  Flow enable  */
 	case RMNET_IOCTL_FLOW_ENABLE:
-		IPAWANDBG("Received flow enable\n");
-		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
-			sizeof(struct rmnet_ioctl_data_s))) {
-			rc = -EFAULT;
-			break;
-		}
-		ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
-			ioctl_data.u.tcm_handle);
+		IPAWANERR("RMNET_IOCTL_FLOW_ENABLE not supported\n");
+		rc = -EFAULT;
 		break;
 	/*  Flow disable  */
 	case RMNET_IOCTL_FLOW_DISABLE:
-		IPAWANDBG("Received flow disable\n");
-		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
-			sizeof(struct rmnet_ioctl_data_s))) {
-			rc = -EFAULT;
-			break;
-		}
-		ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
-			ioctl_data.u.tcm_handle);
+		IPAWANERR("RMNET_IOCTL_FLOW_DISABLE not supported\n");
+		rc = -EFAULT;
 		break;
 	/*  Set flow handle  */
 	case RMNET_IOCTL_FLOW_SET_HNDL:
@@ -1636,10 +1625,13 @@
 					rmnet_mux_val.mux_id);
 				return rc;
 			}
+			mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
 			if (rmnet_ipa3_ctx->rmnet_index
 				>= MAX_NUM_OF_MUX_CHANNEL) {
 				IPAWANERR("Exceed mux_channel limit(%d)\n",
 				rmnet_ipa3_ctx->rmnet_index);
+				mutex_unlock(&rmnet_ipa3_ctx->
+					add_mux_channel_lock);
 				return -EFAULT;
 			}
 			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
@@ -1673,6 +1665,8 @@
 					IPAWANERR("device %s reg IPA failed\n",
 						extend_ioctl_data.u.
 						rmnet_mux_val.vchannel_name);
+					mutex_unlock(&rmnet_ipa3_ctx->
+						add_mux_channel_lock);
 					return -ENODEV;
 				}
 				mux_channel[rmnet_index].mux_channel_set = true;
@@ -1685,6 +1679,7 @@
 				mux_channel[rmnet_index].ul_flt_reg = false;
 			}
 			rmnet_ipa3_ctx->rmnet_index++;
+			mutex_unlock(&rmnet_ipa3_ctx->add_mux_channel_lock);
 			break;
 		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
 			rc = handle3_egress_format(dev, &extend_ioctl_data);
@@ -3204,6 +3199,7 @@
 	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
 
 	mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
+	mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
 	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
 	rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
 
@@ -3222,8 +3218,10 @@
 static void __exit ipa3_wwan_cleanup(void)
 {
 	int ret;
+
 	ipa3_qmi_cleanup();
 	mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
+	mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
 	ret = subsys_notif_unregister_notifier(
 		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
 	if (ret)
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 4e9bd64..e76ff14 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -40,9 +40,6 @@
 #define SMMU_SIZE	((SZ_1G * 4ULL) - SMMU_BASE)
 
 #define WIGIG_ENABLE_DELAY	50
-#define PM_OPT_SUSPEND (MSM_PCIE_CONFIG_NO_CFG_RESTORE | \
-			MSM_PCIE_CONFIG_LINKDOWN)
-#define PM_OPT_RESUME MSM_PCIE_CONFIG_NO_CFG_RESTORE
 
 #define WIGIG_SUBSYS_NAME	"WIGIG"
 #define WIGIG_RAMDUMP_SIZE    0x200000 /* maximum ramdump size */
@@ -127,6 +124,8 @@
 	bool use_cpu_boost;
 	bool is_cpu_boosted;
 	struct cpumask boost_cpu;
+
+	bool keep_radio_on_during_sleep;
 };
 
 static LIST_HEAD(dev_list);
@@ -523,30 +522,8 @@
 	return rc;
 }
 
-static int ops_suspend(void *handle)
+static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
 {
-	int rc;
-	struct msm11ad_ctx *ctx = handle;
-	struct pci_dev *pcidev;
-
-	pr_info("%s(%p)\n", __func__, handle);
-	if (!ctx) {
-		pr_err("No context\n");
-		return -ENODEV;
-	}
-	pcidev = ctx->pcidev;
-	rc = pci_save_state(pcidev);
-	if (rc) {
-		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
-		return rc;
-	}
-	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
-				 pcidev, NULL, PM_OPT_SUSPEND);
-	if (rc) {
-		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
-			rc);
-		return rc;
-	}
 	if (ctx->gpio_en >= 0)
 		gpio_direction_output(ctx->gpio_en, 0);
 
@@ -557,20 +534,12 @@
 
 	msm_11ad_disable_vregs(ctx);
 
-	return rc;
+	return 0;
 }
 
-static int ops_resume(void *handle)
+static int msm_11ad_turn_device_power_on(struct msm11ad_ctx *ctx)
 {
 	int rc;
-	struct msm11ad_ctx *ctx = handle;
-	struct pci_dev *pcidev;
-
-	pr_info("%s(%p)\n", __func__, handle);
-	if (!ctx) {
-		pr_err("No context\n");
-		return -ENODEV;
-	}
 
 	rc = msm_11ad_enable_vregs(ctx);
 	if (rc) {
@@ -588,25 +557,124 @@
 	if (ctx->sleep_clk_en >= 0)
 		gpio_direction_output(ctx->sleep_clk_en, 1);
 
-	pcidev = ctx->pcidev;
 	if (ctx->gpio_en >= 0) {
 		gpio_direction_output(ctx->gpio_en, 1);
 		msleep(WIGIG_ENABLE_DELAY);
 	}
 
+	return 0;
+
+err_disable_vregs:
+	msm_11ad_disable_vregs(ctx);
+	return rc;
+}
+
+static int msm_11ad_suspend_power_off(void *handle)
+{
+	int rc;
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+
+	pr_debug("%s\n", __func__);
+
+	if (!ctx) {
+		pr_err("%s: No context\n", __func__);
+		return -ENODEV;
+	}
+
+	pcidev = ctx->pcidev;
+
+	msm_pcie_shadow_control(ctx->pcidev, 0);
+
+	rc = pci_save_state(pcidev);
+	if (rc) {
+		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
+		goto out;
+	}
+	ctx->pristine_state = pci_store_saved_state(pcidev);
+
+	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc) {
+		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+			rc);
+		goto out;
+	}
+
+	rc = msm_11ad_turn_device_power_off(ctx);
+
+out:
+	return rc;
+}
+
+static int ops_suspend(void *handle, bool keep_device_power)
+{
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+	int rc;
+
+	pr_debug("11ad suspend: %s\n", __func__);
+	if (!ctx) {
+		pr_err("11ad suspend: No context\n");
+		return -ENODEV;
+	}
+
+	if (!keep_device_power)
+		return msm_11ad_suspend_power_off(handle);
+
+	pcidev = ctx->pcidev;
+
+	msm_pcie_shadow_control(pcidev, 0);
+
+	dev_dbg(ctx->dev, "disable device and save config\n");
+	pci_disable_device(pcidev);
+	pci_save_state(pcidev);
+	ctx->pristine_state = pci_store_saved_state(pcidev);
+	dev_dbg(ctx->dev, "moving to D3\n");
+	pci_set_power_state(pcidev, PCI_D3hot);
+
+	rc = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc)
+		dev_err(ctx->dev, "msm_pcie_pm_control(SUSPEND) failed :%d\n",
+			rc);
+
+	return rc;
+}
+
+static int msm_11ad_resume_power_on(void *handle)
+{
+	int rc;
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+
+	pr_debug("%s\n", __func__);
+
+	if (!ctx) {
+		pr_err("%s: No context\n", __func__);
+		return -ENODEV;
+	}
+	pcidev = ctx->pcidev;
+
+	rc = msm_11ad_turn_device_power_on(ctx);
+	if (rc)
+		return rc;
+
 	rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
-				 pcidev, NULL, PM_OPT_RESUME);
+				 pcidev, NULL, 0);
 	if (rc) {
 		dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
 			rc);
 		goto err_disable_power;
 	}
-	rc = msm_pcie_recover_config(pcidev);
-	if (rc) {
-		dev_err(ctx->dev, "msm_pcie_recover_config failed :%d\n",
-			rc);
-		goto err_suspend_rc;
-	}
+
+	pci_set_power_state(pcidev, PCI_D0);
+
+	if (ctx->pristine_state)
+		pci_load_saved_state(ctx->pcidev, ctx->pristine_state);
+	pci_restore_state(ctx->pcidev);
+
+	msm_pcie_shadow_control(ctx->pcidev, 1);
 
 	/* Disable L1, in case it is enabled */
 	if (ctx->l1_enabled_in_enum) {
@@ -622,18 +690,54 @@
 
 err_suspend_rc:
 	msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
-			    pcidev, NULL, PM_OPT_SUSPEND);
+			    pcidev, NULL, 0);
 err_disable_power:
-	if (ctx->gpio_en >= 0)
-		gpio_direction_output(ctx->gpio_en, 0);
+	msm_11ad_turn_device_power_off(ctx);
+	return rc;
+}
 
-	if (ctx->sleep_clk_en >= 0)
-		gpio_direction_output(ctx->sleep_clk_en, 0);
+static int ops_resume(void *handle, bool device_powered_on)
+{
+	struct msm11ad_ctx *ctx = handle;
+	struct pci_dev *pcidev;
+	int rc;
 
-	msm_11ad_disable_clocks(ctx);
-err_disable_vregs:
-	msm_11ad_disable_vregs(ctx);
+	pr_debug("11ad resume: %s\n", __func__);
+	if (!ctx) {
+		pr_err("11ad resume: No context\n");
+		return -ENODEV;
+	}
 
+	pcidev = ctx->pcidev;
+
+	if (!device_powered_on)
+		return msm_11ad_resume_power_on(handle);
+
+	rc = msm_pcie_pm_control(MSM_PCIE_RESUME, pcidev->bus->number,
+				 pcidev, NULL, 0);
+	if (rc) {
+		dev_err(ctx->dev, "msm_pcie_pm_control(RESUME) failed :%d\n",
+			rc);
+		return rc;
+	}
+	pci_set_power_state(pcidev, PCI_D0);
+
+	dev_dbg(ctx->dev, "restore state and enable device\n");
+	pci_load_saved_state(pcidev, ctx->pristine_state);
+	pci_restore_state(pcidev);
+
+	rc = pci_enable_device(pcidev);
+	if (rc) {
+		dev_err(ctx->dev, "pci_enable_device failed (%d)\n", rc);
+		goto out;
+	}
+
+	msm_pcie_shadow_control(pcidev, 1);
+
+	dev_dbg(ctx->dev, "pci set master\n");
+	pci_set_master(pcidev);
+
+out:
 	return rc;
 }
 
@@ -643,9 +747,6 @@
 	int rc;
 	int force_pt_coherent = 1;
 	int smmu_bypass = !ctx->smmu_s1_en;
-	dma_addr_t iova_base = 0;
-	dma_addr_t iova_end =  ctx->smmu_base + ctx->smmu_size - 1;
-	struct iommu_domain_geometry geometry;
 
 	if (!ctx->use_smmu)
 		return 0;
@@ -703,17 +804,6 @@
 					rc);
 				goto release_mapping;
 			}
-			memset(&geometry, 0, sizeof(geometry));
-			geometry.aperture_start = iova_base;
-			geometry.aperture_end = iova_end;
-			rc = iommu_domain_set_attr(ctx->mapping->domain,
-						   DOMAIN_ATTR_GEOMETRY,
-						   &geometry);
-			if (rc) {
-				dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
-					rc);
-				goto release_mapping;
-			}
 		}
 	}
 
@@ -992,6 +1082,8 @@
 		return -EINVAL;
 	}
 	ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
+	ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
+		"qcom,keep-radio-on-during-sleep");
 	ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
 
 	ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
@@ -1104,13 +1196,6 @@
 		}
 	}
 
-	rc = pci_save_state(pcidev);
-	if (rc) {
-		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
-		goto out_rc;
-	}
-	ctx->pristine_state = pci_store_saved_state(pcidev);
-
 	if (ctx->sleep_clk_en >= 0) {
 		rc = gpio_request(ctx->sleep_clk_en, "msm_11ad");
 		if (rc < 0) {
@@ -1146,7 +1231,7 @@
 	device_disable_async_suspend(&pcidev->dev);
 
 	list_add_tail(&ctx->list, &dev_list);
-	ops_suspend(ctx);
+	msm_11ad_suspend_power_off(ctx);
 
 	return 0;
 out_rc:
@@ -1236,6 +1321,17 @@
 		dev_warn(ctx->dev, "failed to set CPU boost affinity\n");
 }
 
+static void msm_11ad_clear_boost_affinity(struct msm11ad_ctx *ctx)
+{
+	int rc;
+
+	irq_modify_status(ctx->pcidev->irq, IRQ_NO_BALANCING, 0);
+	rc = irq_set_affinity_hint(ctx->pcidev->irq, NULL);
+	if (rc)
+		dev_warn(ctx->dev,
+			 "Failed clear affinity, rc=%d\n", rc);
+}
+
 /* hooks for the wil6210 driver */
 static int ops_bus_request(void *handle, u32 kbps /* KBytes/Sec */)
 {
@@ -1287,8 +1383,7 @@
 					dev_err(ctx->dev,
 						"Failed disable boost rc=%d\n",
 						rc);
-				irq_modify_status(ctx->pcidev->irq,
-						  IRQ_NO_BALANCING, 0);
+				msm_11ad_clear_boost_affinity(ctx);
 				dev_dbg(ctx->dev, "CPU boost disabled\n");
 			}
 			ctx->is_cpu_boosted = needs_boost;
@@ -1316,7 +1411,7 @@
 	memset(&ctx->rops, 0, sizeof(ctx->rops));
 	ctx->wil_handle = NULL;
 
-	ops_suspend(ctx);
+	msm_11ad_suspend_power_off(ctx);
 }
 
 static int msm_11ad_notify_crash(struct msm11ad_ctx *ctx)
@@ -1374,6 +1469,16 @@
 	return rc;
 }
 
+static bool ops_keep_radio_on_during_sleep(void *handle)
+{
+	struct msm11ad_ctx *ctx = (struct msm11ad_ctx *)handle;
+
+	pr_debug("%s: keep radio on during sleep is %s\n", __func__,
+		 ctx->keep_radio_on_during_sleep ? "allowed" : "not allowed");
+
+	return ctx->keep_radio_on_during_sleep;
+}
+
 void *msm_11ad_dev_init(struct device *dev, struct wil_platform_ops *ops,
 			const struct wil_platform_rops *rops, void *wil_handle)
 {
@@ -1413,6 +1518,7 @@
 	ops->resume = ops_resume;
 	ops->uninit = ops_uninit;
 	ops->notify = ops_notify;
+	ops->keep_radio_on_during_sleep = ops_keep_radio_on_during_sleep;
 
 	return ctx;
 }
@@ -1429,19 +1535,9 @@
 		return -EINVAL;
 	}
 
-	if (ctx->pristine_state) {
-		/* in old kernels, pci_load_saved_state() is not exported;
-		 * so use pci_load_and_free_saved_state()
-		 * and re-allocate ctx->saved_state again
-		 */
-		pci_load_and_free_saved_state(ctx->pcidev,
-					      &ctx->pristine_state);
-		ctx->pristine_state = pci_store_saved_state(ctx->pcidev);
-	}
-
 	ctx->subsys_handle = subsystem_get(ctx->subsysdesc.name);
 
-	return ops_resume(ctx);
+	return msm_11ad_resume_power_on(ctx);
 }
 EXPORT_SYMBOL(msm_11ad_modinit);
 
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index cd76ca2..8c43c4e 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -310,6 +310,7 @@
 	POWER_SUPPLY_ATTR(ctm_current_max),
 	POWER_SUPPLY_ATTR(hw_current_max),
 	POWER_SUPPLY_ATTR(real_type),
+	POWER_SUPPLY_ATTR(pr_swap),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 4ecf9a5..8641a45 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -39,6 +39,8 @@
 #define PL_VOTER			"PL_VOTER"
 #define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
 #define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
+#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -53,7 +55,8 @@
 	struct votable		*pl_awake_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
 	struct votable		*usb_icl_votable;
-	struct work_struct	status_change_work;
+	struct votable		*pl_enable_votable_indirect;
+	struct delayed_work	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct delayed_work	pl_taper_work;
 	struct power_supply	*main_psy;
@@ -491,6 +494,7 @@
 }
 
 #define ICL_STEP_UA	25000
+#define PL_DELAY_MS     3000
 static int usb_icl_vote_callback(struct votable *votable, void *data,
 			int icl_ua, const char *client)
 {
@@ -512,6 +516,21 @@
 	 */
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
 
+	/*
+	 * if (ICL < 1400)
+	 *	disable parallel charger using USBIN_I_VOTER
+	 * else
+	 *	instead of re-enabling here rely on status_changed_work
+	 *	(triggered via AICL completed or scheduled from here to
+	 *	unvote USBIN_I_VOTER) the status_changed_work enables
+	 *	USBIN_I_VOTER based on settled current.
+	 */
+	if (icl_ua <= 1400000)
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	else
+		schedule_delayed_work(&chip->status_change_work,
+						msecs_to_jiffies(PL_DELAY_MS));
+
 	/* rerun AICL */
 	/* get the settled current */
 	rc = power_supply_get_property(chip->main_psy,
@@ -532,8 +551,6 @@
 		power_supply_set_property(chip->main_psy,
 				POWER_SUPPLY_PROP_CURRENT_MAX,
 				&pval);
-		/* wait for ICL change */
-		msleep(100);
 	}
 
 	/* set the effective ICL */
@@ -541,9 +558,6 @@
 	power_supply_set_property(chip->main_psy,
 			POWER_SUPPLY_PROP_CURRENT_MAX,
 			&pval);
-	if (rerun_aicl)
-		/* wait for ICL change */
-		msleep(100);
 
 	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
 
@@ -643,6 +657,16 @@
 	return 0;
 }
 
+static int pl_enable_indirect_vote_callback(struct votable *votable,
+			void *data, int pl_enable, const char *client)
+{
+	struct pl_data *chip = data;
+
+	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, !pl_enable, 0);
+
+	return 0;
+}
+
 static int pl_awake_vote_callback(struct votable *votable,
 			void *data, int awake, const char *client)
 {
@@ -775,6 +799,42 @@
 	union power_supply_propval pval = {0, };
 	int new_total_settled_ua;
 	int rc;
+	int main_settled_ua;
+	int main_limited;
+	int total_current_ua;
+
+	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+
+	/*
+	 * call aicl split only when USBIN_USBIN and enabled
+	 * and if aicl changed
+	 */
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return;
+	}
+	main_settled_ua = pval.intval;
+
+	rc = power_supply_get_property(chip->batt_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return;
+	}
+	main_limited = pval.intval;
+
+	if ((main_limited && (main_settled_ua + chip->pl_settled_ua) < 1400000)
+			|| (main_settled_ua == 0)
+			|| ((total_current_ua >= 0) &&
+				(total_current_ua <= 1400000)))
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+	else
+		vote(chip->pl_enable_votable_indirect, USBIN_I_VOTER, true, 0);
+
 
 	if (get_effective_result(chip->pl_disable_votable))
 		return;
@@ -783,17 +843,10 @@
 			|| chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
 		/*
 		 * call aicl split only when USBIN_USBIN and enabled
-		 * and if aicl changed
+		 * and if settled current has changed by more than 300mA
 		 */
-		rc = power_supply_get_property(chip->main_psy,
-				       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
-				       &pval);
-		if (rc < 0) {
-			pr_err("Couldn't get aicl settled value rc=%d\n", rc);
-			return;
-		}
 
-		new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+		new_total_settled_ua = main_settled_ua + chip->pl_settled_ua;
 		pl_dbg(chip, PR_PARALLEL,
 			"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
 			chip->total_settled_ua, pval.intval,
@@ -840,7 +893,7 @@
 static void status_change_work(struct work_struct *work)
 {
 	struct pl_data *chip = container_of(work,
-			struct pl_data, status_change_work);
+			struct pl_data, status_change_work.work);
 
 	if (!chip->main_psy && is_main_available(chip)) {
 		/*
@@ -878,7 +931,7 @@
 	if ((strcmp(psy->desc->name, "parallel") == 0)
 	    || (strcmp(psy->desc->name, "battery") == 0)
 	    || (strcmp(psy->desc->name, "main") == 0))
-		schedule_work(&chip->status_change_work);
+		schedule_delayed_work(&chip->status_change_work, 0);
 
 	return NOTIFY_OK;
 }
@@ -899,7 +952,7 @@
 
 static int pl_determine_initial_status(struct pl_data *chip)
 {
-	status_change_work(&chip->status_change_work);
+	status_change_work(&chip->status_change_work.work);
 	return 0;
 }
 
@@ -968,7 +1021,18 @@
 		goto destroy_votable;
 	}
 
-	INIT_WORK(&chip->status_change_work, status_change_work);
+	chip->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+					VOTE_SET_ANY,
+					pl_enable_indirect_vote_callback,
+					chip);
+	if (IS_ERR(chip->pl_enable_votable_indirect)) {
+		rc = PTR_ERR(chip->pl_enable_votable_indirect);
+		return rc;
+	}
+
+	vote(chip->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+	INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
 	INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
 	INIT_WORK(&chip->pl_disable_forever_work, pl_disable_forever_work);
 
@@ -1001,6 +1065,7 @@
 unreg_notifier:
 	power_supply_unreg_notifier(&chip->nb);
 destroy_votable:
+	destroy_votable(chip->pl_enable_votable_indirect);
 	destroy_votable(chip->pl_awake_votable);
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
@@ -1020,11 +1085,12 @@
 	if (chip == NULL)
 		return;
 
-	cancel_work_sync(&chip->status_change_work);
+	cancel_delayed_work_sync(&chip->status_change_work);
 	cancel_delayed_work_sync(&chip->pl_taper_work);
 	cancel_work_sync(&chip->pl_disable_forever_work);
 
 	power_supply_unreg_notifier(&chip->nb);
+	destroy_votable(chip->pl_enable_votable_indirect);
 	destroy_votable(chip->pl_awake_votable);
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 5983b5c..7e6a4e8 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -219,6 +219,12 @@
 	SLOPE_LIMIT_NUM_COEFFS,
 };
 
+enum esr_timer_config {
+	TIMER_RETRY = 0,
+	TIMER_MAX,
+	NUM_ESR_TIMERS,
+};
+
 /* DT parameters for FG device */
 struct fg_dt_props {
 	bool	force_load_profile;
@@ -234,9 +240,9 @@
 	int	recharge_soc_thr;
 	int	recharge_volt_thr_mv;
 	int	rsense_sel;
-	int	esr_timer_charging;
-	int	esr_timer_awake;
-	int	esr_timer_asleep;
+	int	esr_timer_charging[NUM_ESR_TIMERS];
+	int	esr_timer_awake[NUM_ESR_TIMERS];
+	int	esr_timer_asleep[NUM_ESR_TIMERS];
 	int	rconn_mohms;
 	int	esr_clamp_mohms;
 	int	cl_start_soc;
@@ -385,6 +391,7 @@
 	int			maint_soc;
 	int			delta_soc;
 	int			last_msoc;
+	int			esr_timer_charging_default[NUM_ESR_TIMERS];
 	enum slope_limit_status	slope_limit_sts;
 	bool			profile_available;
 	bool			profile_loaded;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 27047b4..73d54c6 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -491,7 +491,7 @@
 	int i, mask = 0xff;
 	int64_t temp;
 
-	temp = DIV_ROUND_CLOSEST(val * sp[id].numrtr, sp[id].denmtr);
+	temp = (int64_t)div_s64((s64)val * sp[id].numrtr, sp[id].denmtr);
 	pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
 	for (i = 0; i < sp[id].len; i++) {
 		buf[i] = temp & mask;
@@ -1025,12 +1025,15 @@
 	*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
 }
 
-static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
-				int flags)
+static int fg_set_esr_timer(struct fg_chip *chip, int cycles_init,
+				int cycles_max, bool charging, int flags)
 {
 	u8 buf[2];
 	int rc, timer_max, timer_init;
 
+	if (cycles_init < 0 || cycles_max < 0)
+		return 0;
+
 	if (charging) {
 		timer_max = FG_SRAM_ESR_TIMER_CHG_MAX;
 		timer_init = FG_SRAM_ESR_TIMER_CHG_INIT;
@@ -1039,7 +1042,7 @@
 		timer_init = FG_SRAM_ESR_TIMER_DISCHG_INIT;
 	}
 
-	fg_encode(chip->sp, timer_max, cycles, buf);
+	fg_encode(chip->sp, timer_max, cycles_max, buf);
 	rc = fg_sram_write(chip,
 			chip->sp[timer_max].addr_word,
 			chip->sp[timer_max].addr_byte, buf,
@@ -1050,7 +1053,7 @@
 		return rc;
 	}
 
-	fg_encode(chip->sp, timer_init, cycles, buf);
+	fg_encode(chip->sp, timer_init, cycles_init, buf);
 	rc = fg_sram_write(chip,
 			chip->sp[timer_init].addr_word,
 			chip->sp[timer_init].addr_byte, buf,
@@ -1061,6 +1064,8 @@
 		return rc;
 	}
 
+	fg_dbg(chip, FG_STATUS, "esr_%s_timer set to %d/%d\n",
+		charging ? "charging" : "discharging", cycles_init, cycles_max);
 	return 0;
 }
 
@@ -1315,9 +1320,16 @@
 		return rc;
 	}
 
-	cc_soc_delta_pct = DIV_ROUND_CLOSEST(
-				abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
-				CC_SOC_30BIT);
+	cc_soc_delta_pct =
+		div64_s64((int64_t)(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+			CC_SOC_30BIT);
+
+	/* If the delta is < 50%, then skip processing full data */
+	if (cc_soc_delta_pct < 50) {
+		pr_err("cc_soc_delta_pct: %d\n", cc_soc_delta_pct);
+		return -ERANGE;
+	}
+
 	delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
 				100);
 	chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
@@ -1387,7 +1399,6 @@
 	return rc;
 }
 
-#define FULL_SOC_RAW	255
 static void fg_cap_learning_update(struct fg_chip *chip)
 {
 	int rc, batt_soc, batt_soc_msb;
@@ -2039,6 +2050,50 @@
 	return 0;
 }
 
+static int fg_esr_timer_config(struct fg_chip *chip, bool sleep)
+{
+	int rc, cycles_init, cycles_max;
+	bool end_of_charge = false;
+
+	end_of_charge = is_input_present(chip) && chip->charge_done;
+	fg_dbg(chip, FG_STATUS, "sleep: %d eoc: %d\n", sleep, end_of_charge);
+
+	/* ESR discharging timer configuration */
+	cycles_init = sleep ? chip->dt.esr_timer_asleep[TIMER_RETRY] :
+			chip->dt.esr_timer_awake[TIMER_RETRY];
+	if (end_of_charge)
+		cycles_init = 0;
+
+	cycles_max = sleep ? chip->dt.esr_timer_asleep[TIMER_MAX] :
+			chip->dt.esr_timer_awake[TIMER_MAX];
+
+	rc = fg_set_esr_timer(chip, cycles_init, cycles_max, false,
+		sleep ? FG_IMA_NO_WLOCK : FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	/* ESR charging timer configuration */
+	cycles_init = cycles_max = -EINVAL;
+	if (end_of_charge || sleep) {
+		cycles_init = chip->dt.esr_timer_charging[TIMER_RETRY];
+		cycles_max = chip->dt.esr_timer_charging[TIMER_MAX];
+	} else if (is_input_present(chip)) {
+		cycles_init = chip->esr_timer_charging_default[TIMER_RETRY];
+		cycles_max = chip->esr_timer_charging_default[TIMER_MAX];
+	}
+
+	rc = fg_set_esr_timer(chip, cycles_init, cycles_max, true,
+		sleep ? FG_IMA_NO_WLOCK : FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static void fg_batt_avg_update(struct fg_chip *chip)
 {
 	if (chip->charge_status == chip->prev_charge_status)
@@ -2112,6 +2167,10 @@
 	if (rc < 0)
 		pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
 
+	rc = fg_esr_timer_config(chip, false);
+	if (rc < 0)
+		pr_err("Error in configuring ESR timer, rc=%d\n", rc);
+
 	rc = fg_get_battery_temp(chip, &batt_temp);
 	if (!rc) {
 		rc = fg_slope_limit_config(chip, batt_temp);
@@ -3115,6 +3174,8 @@
 
 /* INIT FUNCTIONS STAY HERE */
 
+#define DEFAULT_ESR_CHG_TIMER_RETRY	8
+#define DEFAULT_ESR_CHG_TIMER_MAX	16
 static int fg_hw_init(struct fg_chip *chip)
 {
 	int rc;
@@ -3283,22 +3344,29 @@
 		return rc;
 	}
 
-	if (chip->dt.esr_timer_charging > 0) {
-		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging, true,
-				      FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in setting ESR timer, rc=%d\n", rc);
-			return rc;
-		}
+	if (chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+		chip->esr_timer_charging_default[TIMER_RETRY] =
+			DEFAULT_ESR_CHG_TIMER_RETRY;
+		chip->esr_timer_charging_default[TIMER_MAX] =
+			DEFAULT_ESR_CHG_TIMER_MAX;
+	} else {
+		/* We don't need this for pm660 at present */
+		chip->esr_timer_charging_default[TIMER_RETRY] = -EINVAL;
+		chip->esr_timer_charging_default[TIMER_MAX] = -EINVAL;
 	}
 
-	if (chip->dt.esr_timer_awake > 0) {
-		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
-				      FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in setting ESR timer, rc=%d\n", rc);
-			return rc;
-		}
+	rc = fg_set_esr_timer(chip, chip->dt.esr_timer_charging[TIMER_RETRY],
+		chip->dt.esr_timer_charging[TIMER_MAX], true, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake[TIMER_RETRY],
+		chip->dt.esr_timer_awake[TIMER_MAX], false, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in setting ESR timer, rc=%d\n", rc);
+		return rc;
 	}
 
 	if (chip->cyc_ctr.en)
@@ -3778,6 +3846,32 @@
 	return 0;
 }
 
+static int fg_parse_dt_property_u32_array(struct device_node *node,
+				const char *prop_name, int *buf, int len)
+{
+	int rc;
+
+	rc = of_property_count_elems_of_size(node, prop_name, sizeof(u32));
+	if (rc < 0) {
+		if (rc == -EINVAL)
+			return 0;
+		else
+			return rc;
+	} else if (rc != len) {
+		pr_err("Incorrect length %d for %s, rc=%d\n", len, prop_name,
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(node, prop_name, buf, len);
+	if (rc < 0) {
+		pr_err("Error in reading %s, rc=%d\n", prop_name, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
 static int fg_parse_slope_limit_coefficients(struct fg_chip *chip)
 {
 	struct device_node *node = chip->dev->of_node;
@@ -3788,17 +3882,10 @@
 	if (rc < 0)
 		return 0;
 
-	rc = of_property_count_elems_of_size(node, "qcom,slope-limit-coeffs",
-			sizeof(u32));
-	if (rc != SLOPE_LIMIT_NUM_COEFFS)
-		return -EINVAL;
-
-	rc = of_property_read_u32_array(node, "qcom,slope-limit-coeffs",
-			chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
-	if (rc < 0) {
-		pr_err("Error in reading qcom,slope-limit-coeffs, rc=%d\n", rc);
+	rc = fg_parse_dt_property_u32_array(node, "qcom,slope-limit-coeffs",
+		chip->dt.slope_limit_coeffs, SLOPE_LIMIT_NUM_COEFFS);
+	if (rc < 0)
 		return rc;
-	}
 
 	for (i = 0; i < SLOPE_LIMIT_NUM_COEFFS; i++) {
 		if (chip->dt.slope_limit_coeffs[i] > SLOPE_LIMIT_COEFF_MAX ||
@@ -3817,44 +3904,20 @@
 	struct device_node *node = chip->dev->of_node;
 	int rc, i;
 
-	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
-		sizeof(u32));
-	if (rc != KI_COEFF_SOC_LEVELS)
-		return 0;
-
-	rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
-			chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
-	if (rc < 0) {
-		pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
-			rc);
+	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg",
+		chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+	if (rc < 0)
 		return rc;
-	}
 
-	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
-		sizeof(u32));
-	if (rc != KI_COEFF_SOC_LEVELS)
-		return 0;
-
-	rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
-			chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
-	if (rc < 0) {
-		pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
-			rc);
+	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-med-dischg",
+		chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0)
 		return rc;
-	}
 
-	rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
-		sizeof(u32));
-	if (rc != KI_COEFF_SOC_LEVELS)
-		return 0;
-
-	rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
-			chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
-	if (rc < 0) {
-		pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
-			rc);
+	rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-hi-dischg",
+		chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+	if (rc < 0)
 		return rc;
-	}
 
 	for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
 		if (chip->dt.ki_coeff_soc[i] < 0 ||
@@ -3880,7 +3943,7 @@
 }
 
 #define DEFAULT_CUTOFF_VOLT_MV		3200
-#define DEFAULT_EMPTY_VOLT_MV		2800
+#define DEFAULT_EMPTY_VOLT_MV		2850
 #define DEFAULT_RECHARGE_VOLT_MV	4250
 #define DEFAULT_CHG_TERM_CURR_MA	100
 #define DEFAULT_CHG_TERM_BASE_CURR_MA	75
@@ -4099,23 +4162,26 @@
 				rc);
 	}
 
-	rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
-	if (rc < 0)
-		chip->dt.esr_timer_charging = -EINVAL;
-	else
-		chip->dt.esr_timer_charging = temp;
+	rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-charging",
+		chip->dt.esr_timer_charging, NUM_ESR_TIMERS);
+	if (rc < 0) {
+		chip->dt.esr_timer_charging[TIMER_RETRY] = -EINVAL;
+		chip->dt.esr_timer_charging[TIMER_MAX] = -EINVAL;
+	}
 
-	rc = of_property_read_u32(node, "qcom,fg-esr-timer-awake", &temp);
-	if (rc < 0)
-		chip->dt.esr_timer_awake = -EINVAL;
-	else
-		chip->dt.esr_timer_awake = temp;
+	rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-awake",
+		chip->dt.esr_timer_awake, NUM_ESR_TIMERS);
+	if (rc < 0) {
+		chip->dt.esr_timer_awake[TIMER_RETRY] = -EINVAL;
+		chip->dt.esr_timer_awake[TIMER_MAX] = -EINVAL;
+	}
 
-	rc = of_property_read_u32(node, "qcom,fg-esr-timer-asleep", &temp);
-	if (rc < 0)
-		chip->dt.esr_timer_asleep = -EINVAL;
-	else
-		chip->dt.esr_timer_asleep = temp;
+	rc = fg_parse_dt_property_u32_array(node, "qcom,fg-esr-timer-asleep",
+		chip->dt.esr_timer_asleep, NUM_ESR_TIMERS);
+	if (rc < 0) {
+		chip->dt.esr_timer_asleep[TIMER_RETRY] = -EINVAL;
+		chip->dt.esr_timer_asleep[TIMER_MAX] = -EINVAL;
+	}
 
 	chip->cyc_ctr.en = of_property_read_bool(node, "qcom,cycle-counter-en");
 	if (chip->cyc_ctr.en)
@@ -4453,15 +4519,9 @@
 	struct fg_chip *chip = dev_get_drvdata(dev);
 	int rc;
 
-	if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
-		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_asleep, false,
-				      FG_IMA_NO_WLOCK);
-		if (rc < 0) {
-			pr_err("Error in setting ESR timer during suspend, rc=%d\n",
-			       rc);
-			return rc;
-		}
-	}
+	rc = fg_esr_timer_config(chip, true);
+	if (rc < 0)
+		pr_err("Error in configuring ESR timer, rc=%d\n", rc);
 
 	cancel_delayed_work_sync(&chip->batt_avg_work);
 	if (fg_sram_dump)
@@ -4474,15 +4534,9 @@
 	struct fg_chip *chip = dev_get_drvdata(dev);
 	int rc;
 
-	if (chip->dt.esr_timer_awake > 0 && chip->dt.esr_timer_asleep > 0) {
-		rc = fg_set_esr_timer(chip, chip->dt.esr_timer_awake, false,
-				      FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in setting ESR timer during resume, rc=%d\n",
-			       rc);
-			return rc;
-		}
-	}
+	rc = fg_esr_timer_config(chip, false);
+	if (rc < 0)
+		pr_err("Error in configuring ESR timer, rc=%d\n", rc);
 
 	fg_circ_buf_clr(&chip->ibatt_circ_buf);
 	fg_circ_buf_clr(&chip->vbatt_circ_buf);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 2266a2a..becce31 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -266,8 +266,9 @@
 	debug_mask, __debug_mask, int, 0600
 );
 
-#define MICRO_1P5A	1500000
-#define MICRO_P1A	100000
+#define MICRO_1P5A		1500000
+#define MICRO_P1A		100000
+#define OTG_DEFAULT_DEGLITCH_TIME_MS	50
 static int smb2_parse_dt(struct smb2 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -304,9 +305,6 @@
 	chip->dt.no_battery = of_property_read_bool(node,
 						"qcom,batteryless-platform");
 
-	chg->external_vconn = of_property_read_bool(node,
-						"qcom,external-vconn");
-
 	rc = of_property_read_u32(node,
 				"qcom,fcc-max-ua", &chg->batt_profile_fcc_ua);
 	if (rc < 0)
@@ -400,6 +398,12 @@
 
 	chg->suspend_input_on_debug_batt = of_property_read_bool(node,
 					"qcom,suspend-input-on-debug-batt");
+
+	rc = of_property_read_u32(node, "qcom,otg-deglitch-time-ms",
+					&chg->otg_delay_ms);
+	if (rc < 0)
+		chg->otg_delay_ms = OTG_DEFAULT_DEGLITCH_TIME_MS;
+
 	return 0;
 }
 
@@ -428,6 +432,7 @@
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_PR_SWAP,
 };
 
 static int smb2_usb_get_prop(struct power_supply *psy,
@@ -450,8 +455,7 @@
 		if (!val->intval)
 			break;
 
-		rc = smblib_get_prop_typec_mode(chg, val);
-		if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
 			chg->micro_usb_mode) &&
 			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			val->intval = 0;
@@ -488,7 +492,7 @@
 		else if (chip->bad_part)
 			val->intval = POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
 		else
-			rc = smblib_get_prop_typec_mode(chg, val);
+			val->intval = chg->typec_mode;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
 		if (chg->micro_usb_mode)
@@ -532,6 +536,9 @@
 	case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
 		rc = smblib_get_charge_current(chg, &val->intval);
 		break;
+	case POWER_SUPPLY_PROP_PR_SWAP:
+		rc = smblib_get_prop_pr_swap_in_progress(chg, val);
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -590,6 +597,9 @@
 		rc = vote(chg->usb_icl_votable, CTM_VOTER,
 						val->intval >= 0, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_PR_SWAP:
+		rc = smblib_set_prop_pr_swap_in_progress(chg, val);
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -667,8 +677,7 @@
 		if (!val->intval)
 			break;
 
-		rc = smblib_get_prop_typec_mode(chg, val);
-		if ((val->intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
+		if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ||
 			chg->micro_usb_mode) &&
 			chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			val->intval = 1;
@@ -1043,7 +1052,8 @@
 		rc = smblib_get_prop_batt_voltage_now(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
-		val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+		val->intval = get_client_vote(chg->fv_votable,
+				BATT_PROFILE_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
 		rc = smblib_get_prop_charge_qnovo_enable(chg, val);
@@ -1061,7 +1071,7 @@
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		val->intval = get_client_vote(chg->fcc_votable,
-					      DEFAULT_VOTER);
+					      BATT_PROFILE_VOTER);
 		break;
 	case POWER_SUPPLY_PROP_TEMP:
 		rc = smblib_get_prop_batt_temp(chg, val);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index b1070e8..6ead522 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -637,6 +637,7 @@
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 
 	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
 
@@ -839,7 +840,6 @@
 {
 	int rc = 0;
 	bool override;
-	union power_supply_propval pval;
 
 	/* suspend and return if 25mA or less is requested */
 	if (icl_ua < USBIN_25MA)
@@ -849,14 +849,8 @@
 	if (icl_ua == INT_MAX)
 		goto override_suspend_config;
 
-	rc = smblib_get_prop_typec_mode(chg, &pval);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
-		goto enable_icl_changed_interrupt;
-	}
-
 	/* configure current */
-	if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+	if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
 		&& (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)) {
 		rc = set_sdp_current(chg, icl_ua);
 		if (rc < 0) {
@@ -864,6 +858,7 @@
 			goto enable_icl_changed_interrupt;
 		}
 	} else {
+		set_sdp_current(chg, 100000);
 		rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
@@ -877,7 +872,7 @@
 	if (icl_ua == INT_MAX) {
 		/* remove override if no voters - hw defaults is desired */
 		override = false;
-	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+	} else if (chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
 		if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
 			/* For std cable with type = SDP never override */
 			override = false;
@@ -917,15 +912,8 @@
 	int rc = 0;
 	u8 load_cfg;
 	bool override;
-	union power_supply_propval pval;
 
-	rc = smblib_get_prop_typec_mode(chg, &pval);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get typeC mode rc = %d\n", rc);
-		return rc;
-	}
-
-	if ((pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
+	if ((chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT
 		|| chg->micro_usb_mode)
 		&& (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)) {
 		rc = get_sdp_current(chg, icl_ua);
@@ -1046,16 +1034,6 @@
 	return 0;
 }
 
-static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
-			void *data, int chg_enable, const char *client)
-{
-	struct smb_charger *chg = data;
-
-	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
-
-	return 0;
-}
-
 static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
 			void *data,
 			int hvdcp_enable, const char *client)
@@ -1212,36 +1190,13 @@
 static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev)
 {
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
-	u8 otg_stat, val;
-	int rc = 0, i;
-
-	if (!chg->external_vconn) {
-		/*
-		 * Hardware based OTG soft start should complete within 1ms, so
-		 * wait for 2ms in the worst case.
-		 */
-		for (i = 0; i < MAX_OTG_SS_TRIES; ++i) {
-			usleep_range(1000, 1100);
-			rc = smblib_read(chg, OTG_STATUS_REG, &otg_stat);
-			if (rc < 0) {
-				smblib_err(chg, "Couldn't read OTG status rc=%d\n",
-									rc);
-				return rc;
-			}
-
-			if (otg_stat & BOOST_SOFTSTART_DONE_BIT)
-				break;
-		}
-
-		if (!(otg_stat & BOOST_SOFTSTART_DONE_BIT)) {
-			smblib_err(chg, "Couldn't enable VCONN; OTG soft start failed\n");
-			return -EAGAIN;
-		}
-	}
+	int rc = 0;
+	u8 val;
 
 	/*
-	 * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used
-	 * for Vconn, and it should be set with reverse polarity of CC_OUT.
+	 * When enabling VCONN using the command register the CC pin must be
+	 * selected. VCONN should be supplied to the inactive CC pin hence using
+	 * the opposite of the CC_ORIENTATION_BIT.
 	 */
 	smblib_dbg(chg, PR_OTG, "enabling VCONN\n");
 	val = chg->typec_status[3] &
@@ -1262,7 +1217,7 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int rc = 0;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	if (chg->vconn_en)
 		goto unlock;
 
@@ -1271,7 +1226,7 @@
 		chg->vconn_en = true;
 
 unlock:
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 	return rc;
 }
 
@@ -1294,7 +1249,7 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int rc = 0;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	if (!chg->vconn_en)
 		goto unlock;
 
@@ -1303,7 +1258,7 @@
 		chg->vconn_en = false;
 
 unlock:
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 	return rc;
 }
 
@@ -1312,9 +1267,9 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int ret;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	ret = chg->vconn_en;
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 	return ret;
 }
 
@@ -1417,13 +1372,6 @@
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
 	int rc;
 
-	if (!chg->external_vconn && chg->vconn_en) {
-		smblib_dbg(chg, PR_OTG, "Killing VCONN before disabling OTG\n");
-		rc = _smblib_vconn_regulator_disable(rdev);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
-	}
-
 	if (chg->wa_flags & OTG_WA) {
 		/* set OTG current limit to minimum value */
 		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
@@ -1651,6 +1599,7 @@
 {
 	union power_supply_propval pval;
 	int rc;
+	int effective_fv_uv;
 	u8 stat;
 
 	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
@@ -1669,10 +1618,11 @@
 			 * If Vbatt is within 40mV above Vfloat, then don't
 			 * treat it as overvoltage.
 			 */
-			if (pval.intval >=
-				get_effective_result(chg->fv_votable) + 40000) {
+			effective_fv_uv = get_effective_result(chg->fv_votable);
+			if (pval.intval >= effective_fv_uv + 40000) {
 				val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-				smblib_err(chg, "battery over-voltage\n");
+				smblib_err(chg, "battery over-voltage vbat_fg = %duV, fv = %duV\n",
+						pval.intval, effective_fv_uv);
 				goto done;
 			}
 		}
@@ -1933,38 +1883,18 @@
 		return rc;
 
 	smblib_dbg(chg, PR_MISC, "re-running AICL\n");
-	switch (chg->smb_version) {
-	case PMI8998_SUBTYPE:
-		rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
-							&settled_icl_ua);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
-			return rc;
-		}
-
-		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
-				max(settled_icl_ua - chg->param.usb_icl.step_u,
-				chg->param.usb_icl.step_u));
-		vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
-		break;
-	case PM660_SUBTYPE:
-		/*
-		 * Use restart_AICL instead of trigger_AICL as it runs the
-		 * complete AICL instead of starting from the last settled
-		 * value.
-		 */
-		rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
-					RESTART_AICL_BIT, RESTART_AICL_BIT);
-		if (rc < 0)
-			smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
-									rc);
-		break;
-	default:
-		smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
-				chg->smb_version);
-		return -EINVAL;
+	rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+			&settled_icl_ua);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+		return rc;
 	}
 
+	vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+			max(settled_icl_ua - chg->param.usb_icl.step_u,
+				chg->param.usb_icl.step_u));
+	vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+
 	return 0;
 }
 
@@ -1999,6 +1929,7 @@
 int smblib_dp_dm(struct smb_charger *chg, int val)
 {
 	int target_icl_ua, rc = 0;
+	union power_supply_propval pval;
 
 	switch (val) {
 	case POWER_SUPPLY_DP_DM_DP_PULSE:
@@ -2016,10 +1947,35 @@
 				rc, chg->pulse_cnt);
 		break;
 	case POWER_SUPPLY_DP_DM_ICL_DOWN:
-		chg->usb_icl_delta_ua -= 100000;
 		target_icl_ua = get_effective_result(chg->usb_icl_votable);
+		if (target_icl_ua < 0) {
+			/* no client vote, get the ICL from charger */
+			rc = power_supply_get_property(chg->usb_psy,
+					POWER_SUPPLY_PROP_HW_CURRENT_MAX,
+					&pval);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't get max current rc=%d\n",
+					rc);
+				return rc;
+			}
+			target_icl_ua = pval.intval;
+		}
+
+		/*
+		 * Check if any other voter voted on USB_ICL in case of
+		 * voter other than SW_QC3_VOTER reset and restart reduction
+		 * again.
+		 */
+		if (target_icl_ua != get_client_vote(chg->usb_icl_votable,
+							SW_QC3_VOTER))
+			chg->usb_icl_delta_ua = 0;
+
+		chg->usb_icl_delta_ua += 100000;
 		vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
-				target_icl_ua + chg->usb_icl_delta_ua);
+						target_icl_ua - 100000);
+		smblib_dbg(chg, PR_PARALLEL, "ICL DOWN ICL=%d reduction=%d\n",
+				target_icl_ua, chg->usb_icl_delta_ua);
 		break;
 	case POWER_SUPPLY_DP_DM_ICL_UP:
 	default:
@@ -2255,8 +2211,6 @@
 static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
 {
 	switch (chg->typec_status[0]) {
-	case 0:
-		return POWER_SUPPLY_TYPEC_NONE;
 	case UFP_TYPEC_RDSTD_BIT:
 		return POWER_SUPPLY_TYPEC_SOURCE_DEFAULT;
 	case UFP_TYPEC_RD1P5_BIT:
@@ -2267,7 +2221,7 @@
 		break;
 	}
 
-	return POWER_SUPPLY_TYPEC_NON_COMPLIANT;
+	return POWER_SUPPLY_TYPEC_NONE;
 }
 
 static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
@@ -2281,8 +2235,6 @@
 		return POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE;
 	case DFP_RD_OPEN_BIT:
 		return POWER_SUPPLY_TYPEC_SINK;
-	case DFP_RA_OPEN_BIT:
-		return POWER_SUPPLY_TYPEC_POWERED_CABLE_ONLY;
 	default:
 		break;
 	}
@@ -2290,20 +2242,12 @@
 	return POWER_SUPPLY_TYPEC_NONE;
 }
 
-int smblib_get_prop_typec_mode(struct smb_charger *chg,
-			       union power_supply_propval *val)
+static int smblib_get_prop_typec_mode(struct smb_charger *chg)
 {
-	if (!(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) {
-		val->intval = POWER_SUPPLY_TYPEC_NONE;
-		return 0;
-	}
-
 	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
-		val->intval = smblib_get_prop_dfp_mode(chg);
+		return smblib_get_prop_dfp_mode(chg);
 	else
-		val->intval = smblib_get_prop_ufp_mode(chg);
-
-	return 0;
+		return smblib_get_prop_ufp_mode(chg);
 }
 
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
@@ -2591,24 +2535,12 @@
 			      const union power_supply_propval *val)
 {
 	int rc;
-	bool orientation, cc_debounced, sink_attached, hvdcp;
+	bool orientation, sink_attached, hvdcp;
 	u8 stat;
 
 	if (!get_effective_result(chg->pd_allowed_votable))
 		return -EINVAL;
 
-	rc = smblib_read(chg, APSD_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc);
-		return rc;
-	}
-
-	cc_debounced = (bool)
-		(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
-	sink_attached = (bool)
-		(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
-	hvdcp = stat & QC_CHARGER_BIT;
-
 	chg->pd_active = val->intval;
 	if (chg->pd_active) {
 		vote(chg->apsd_disable_votable, PD_VOTER, true, 0);
@@ -2660,6 +2592,14 @@
 		if (rc < 0)
 			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
 	} else {
+		rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't read APSD status rc=%d\n",
+									rc);
+			return rc;
+		}
+
+		hvdcp = stat & QC_CHARGER_BIT;
 		vote(chg->apsd_disable_votable, PD_VOTER, false, 0);
 		vote(chg->pd_allowed_votable, PD_VOTER, true, 0);
 		vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0);
@@ -2679,8 +2619,8 @@
 		 * and data could be interrupted. Non-legacy DCP could also draw
 		 * more, but it may impact compliance.
 		 */
-		if (!chg->typec_legacy_valid && cc_debounced &&
-							!sink_attached && hvdcp)
+		sink_attached = chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT;
+		if (!chg->typec_legacy_valid && !sink_attached && hvdcp)
 			schedule_work(&chg->legacy_detection_work);
 	}
 
@@ -2802,6 +2742,7 @@
 		smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
 		return rc;
 	}
+
 	ccout = (stat & CC_ATTACHED_BIT) ?
 					(!!(stat & CC_ORIENTATION_BIT) + 1) : 0;
 	ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ?
@@ -3638,6 +3579,7 @@
 	vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 	vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
 	vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
+	vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
 
 	/* reset hvdcp voters */
 	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
@@ -3668,6 +3610,11 @@
 	chg->pd_hard_reset = 0;
 	chg->typec_legacy_valid = false;
 
+	/* reset back to 120mS tCC debounce */
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set 120mS tCC debounce rc=%d\n", rc);
+
 	/* enable APSD CC trigger for next insertion */
 	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
 				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
@@ -3708,12 +3655,29 @@
 	if (rc < 0)
 		smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc);
 
+	mutex_lock(&chg->vconn_oc_lock);
+	if (!chg->vconn_en)
+		goto unlock;
+
+	smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+				 VCONN_EN_VALUE_BIT, 0);
+	chg->vconn_en = false;
+
+unlock:
+	mutex_unlock(&chg->vconn_oc_lock);
+
+	/* clear exit sink based on cc */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+						EXIT_SNK_BASED_ON_CC_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't clear exit_sink_based_on_cc rc=%d\n",
+				rc);
+
 	typec_sink_removal(chg);
 	smblib_update_usb_type(chg);
 }
 
-static void smblib_handle_typec_insertion(struct smb_charger *chg,
-							bool sink_attached)
+static void smblib_handle_typec_insertion(struct smb_charger *chg)
 {
 	int rc;
 
@@ -3725,65 +3689,37 @@
 		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
 									rc);
 
-	if (sink_attached)
+	if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
 		typec_sink_insertion(chg);
 	else
 		typec_sink_removal(chg);
 }
 
-static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
-						bool rising, bool sink_attached)
+static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
 {
-	int rc;
-	union power_supply_propval pval = {0, };
+	if (chg->pr_swap_in_progress)
+		return;
 
-	if (rising) {
-		if (!chg->typec_present) {
-			chg->typec_present = true;
-			smblib_dbg(chg, PR_MISC,  "TypeC insertion\n");
-			smblib_handle_typec_insertion(chg, sink_attached);
-		}
-	} else {
-		if (chg->typec_present) {
-			chg->typec_present = false;
-			smblib_dbg(chg, PR_MISC,  "TypeC removal\n");
-			smblib_handle_typec_removal(chg);
-		}
+	chg->typec_mode = smblib_get_prop_typec_mode(chg);
+	if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) {
+		chg->typec_present = true;
+		smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n",
+			smblib_typec_mode_name[chg->typec_mode]);
+		smblib_handle_typec_insertion(chg);
+	} else if (chg->typec_present &&
+				chg->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+		chg->typec_present = false;
+		smblib_dbg(chg, PR_MISC, "TypeC removal\n");
+		smblib_handle_typec_removal(chg);
 	}
 
-	rc = smblib_get_prop_typec_mode(chg, &pval);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
-
-	smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
-		   rising ? "rising" : "falling",
-		   smblib_typec_mode_name[pval.intval]);
-}
-
-irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg)
-{
-	int rc;
-	u8 stat;
-
-	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
-		return IRQ_HANDLED;
-	}
-	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
-		stat, !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
-
-	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST,
-			!!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT)));
-	power_supply_changed(chg->usb_psy);
-
-	return IRQ_HANDLED;
+	smblib_dbg(chg, PR_INTERRUPT, "IRQ: cc-state-change; Type-C %s detected\n",
+				smblib_typec_mode_name[chg->typec_mode]);
 }
 
 static void smblib_usb_typec_change(struct smb_charger *chg)
 {
 	int rc;
-	bool debounce_done, sink_attached;
 
 	rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG,
 							chg->typec_status, 5);
@@ -3792,12 +3728,7 @@
 		return;
 	}
 
-	debounce_done =
-		(bool)(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
-	sink_attached =
-		(bool)(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT);
-
-	smblib_handle_typec_debounce_done(chg, debounce_done, sink_attached);
+	smblib_handle_typec_cc_state_change(chg);
 
 	if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT)
 		smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n");
@@ -3814,7 +3745,11 @@
 	struct smb_charger *chg = irq_data->parent_data;
 
 	if (chg->micro_usb_mode) {
-		smblib_handle_usb_typec_change_for_uusb(chg);
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
+		vote(chg->awake_votable, OTG_DELAY_VOTER, true, 0);
+		smblib_dbg(chg, PR_INTERRUPT, "Scheduling OTG work\n");
+		schedule_delayed_work(&chg->uusb_otg_work,
+				msecs_to_jiffies(chg->otg_delay_ms));
 		return IRQ_HANDLED;
 	}
 
@@ -3896,9 +3831,63 @@
 	return IRQ_HANDLED;
 }
 
+/**************
+ * Additional USB PSY getters/setters
+ * that call interrupt functions
+ ***************/
+
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+				union power_supply_propval *val)
+{
+	val->intval = chg->pr_swap_in_progress;
+	return 0;
+}
+
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+				const union power_supply_propval *val)
+{
+	int rc;
+
+	chg->pr_swap_in_progress = val->intval;
+	/*
+	 * call the cc changed irq to handle real removals while
+	 * PR_SWAP was in progress
+	 */
+	smblib_usb_typec_change(chg);
+	rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT,
+			val->intval ? TCC_DEBOUNCE_20MS_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't set tCC debounce rc=%d\n", rc);
+	return 0;
+}
+
 /***************
  * Work Queues *
  ***************/
+static void smblib_uusb_otg_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+						uusb_otg_work.work);
+	int rc;
+	u8 stat;
+	bool otg;
+
+	rc = smblib_read(chg, TYPE_C_STATUS_3_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read TYPE_C_STATUS_3 rc=%d\n", rc);
+		goto out;
+	}
+
+	otg = !!(stat & (U_USB_GND_NOVBUS_BIT | U_USB_GND_BIT));
+	extcon_set_cable_state_(chg->extcon, EXTCON_USB_HOST, otg);
+	smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_3 = 0x%02x OTG=%d\n",
+			stat, otg);
+	power_supply_changed(chg->usb_psy);
+
+out:
+	vote(chg->awake_votable, OTG_DELAY_VOTER, false, 0);
+}
+
 
 static void smblib_hvdcp_detect_work(struct work_struct *work)
 {
@@ -4035,19 +4024,6 @@
 					QUICKSTART_OTG_FASTROLESWAP_BIT, 0);
 	if (rc < 0)
 		smblib_err(chg, "Couldn't enable VBUS < 1V check rc=%d\n", rc);
-
-	if (!chg->external_vconn && chg->vconn_en) {
-		chg->vconn_attempts = 0;
-		if (success) {
-			rc = _smblib_vconn_regulator_enable(
-							chg->vconn_vreg->rdev);
-			if (rc < 0)
-				smblib_err(chg, "Couldn't enable VCONN rc=%d\n",
-									rc);
-		} else {
-			chg->vconn_en = false;
-		}
-	}
 }
 
 #define MAX_OC_FALLING_TRIES 10
@@ -4136,7 +4112,7 @@
 	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
 		return;
 
-	mutex_lock(&chg->otg_oc_lock);
+	mutex_lock(&chg->vconn_oc_lock);
 	rc = _smblib_vconn_regulator_disable(chg->vconn_vreg->rdev);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
@@ -4185,7 +4161,7 @@
 	}
 
 unlock:
-	mutex_unlock(&chg->otg_oc_lock);
+	mutex_unlock(&chg->vconn_oc_lock);
 }
 
 static void smblib_otg_ss_done_work(struct work_struct *work)
@@ -4220,8 +4196,6 @@
 	}
 
 	power_supply_changed(chg->usb_main_psy);
-	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
-				settled_ua >= USB_WEAK_INPUT_UA, 0);
 
 	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
 }
@@ -4275,14 +4249,14 @@
 	chg->typec_legacy_valid = true;
 	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
 	legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT;
-	rp_high = smblib_get_prop_ufp_mode(chg) ==
-						POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+	rp_high = chg->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
 	if (!legacy || !rp_high)
 		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
 								false, 0);
 
 unlock:
 	chg->typec_en_dis_active = 0;
+	smblib_usb_typec_change(chg);
 	mutex_unlock(&chg->lock);
 }
 
@@ -4317,7 +4291,16 @@
 		smblib_err(chg, "Couldn't find votable PL_DISABLE rc=%d\n", rc);
 		return rc;
 	}
-	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
+	chg->pl_enable_votable_indirect = find_votable("PL_ENABLE_INDIRECT");
+	if (chg->pl_enable_votable_indirect == NULL) {
+		rc = -EINVAL;
+		smblib_err(chg,
+			"Couldn't find votable PL_ENABLE_INDIRECT rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
 
 	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
@@ -4367,14 +4350,6 @@
 		return rc;
 	}
 
-	chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
-					VOTE_SET_ANY,
-					smblib_pl_enable_indirect_vote_callback,
-					chg);
-	if (IS_ERR(chg->pl_enable_votable_indirect)) {
-		rc = PTR_ERR(chg->pl_enable_votable_indirect);
-		return rc;
-	}
 
 	chg->hvdcp_disable_votable_indirect = create_votable(
 				"HVDCP_DISABLE_INDIRECT",
@@ -4450,8 +4425,6 @@
 		destroy_votable(chg->awake_votable);
 	if (chg->chg_disable_votable)
 		destroy_votable(chg->chg_disable_votable);
-	if (chg->pl_enable_votable_indirect)
-		destroy_votable(chg->pl_enable_votable_indirect);
 	if (chg->apsd_disable_votable)
 		destroy_votable(chg->apsd_disable_votable);
 	if (chg->hvdcp_hw_inov_dis_votable)
@@ -4481,6 +4454,7 @@
 	mutex_init(&chg->lock);
 	mutex_init(&chg->write_lock);
 	mutex_init(&chg->otg_oc_lock);
+	mutex_init(&chg->vconn_oc_lock);
 	INIT_WORK(&chg->bms_update_work, bms_update_work);
 	INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
 	INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
@@ -4492,6 +4466,7 @@
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
 	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
 	INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work);
+	INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work);
 	chg->fake_capacity = -EINVAL;
 	chg->fake_input_current_limited = -EINVAL;
 
@@ -4546,6 +4521,7 @@
 		cancel_delayed_work_sync(&chg->icl_change_work);
 		cancel_delayed_work_sync(&chg->pl_enable_work);
 		cancel_work_sync(&chg->legacy_detection_work);
+		cancel_delayed_work_sync(&chg->uusb_otg_work);
 		power_supply_unreg_notifier(&chg->nb);
 		smblib_destroy_votables(chg);
 		qcom_batt_deinit();
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 42b357e..f39f2c9 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -36,9 +36,7 @@
 #define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
 #define USB_PSY_VOTER			"USB_PSY_VOTER"
 #define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
-#define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
 #define PL_QNOVO_VOTER			"PL_QNOVO_VOTER"
-#define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define USBIN_V_VOTER			"USBIN_V_VOTER"
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
 #define TYPEC_SRC_VOTER			"TYPEC_SRC_VOTER"
@@ -64,6 +62,8 @@
 #define CC2_WA_VOTER			"CC2_WA_VOTER"
 #define QNOVO_VOTER			"QNOVO_VOTER"
 #define BATT_PROFILE_VOTER		"BATT_PROFILE_VOTER"
+#define OTG_DELAY_VOTER			"OTG_DELAY_VOTER"
+#define USBIN_I_VOTER			"USBIN_I_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
@@ -227,15 +227,16 @@
 	struct smb_iio		iio;
 	int			*debug_mask;
 	enum smb_mode		mode;
-	bool			external_vconn;
 	struct smb_chg_freq	chg_freq;
 	int			smb_version;
+	int			otg_delay_ms;
 
 	/* locks */
 	struct mutex		lock;
 	struct mutex		write_lock;
 	struct mutex		ps_change_lock;
 	struct mutex		otg_oc_lock;
+	struct mutex		vconn_oc_lock;
 
 	/* power supplies */
 	struct power_supply		*batt_psy;
@@ -290,6 +291,7 @@
 	struct delayed_work	icl_change_work;
 	struct delayed_work	pl_enable_work;
 	struct work_struct	legacy_detection_work;
+	struct delayed_work	uusb_otg_work;
 
 	/* cached status */
 	int			voltage_min_uv;
@@ -319,6 +321,8 @@
 	u8			typec_status[5];
 	bool			typec_legacy_valid;
 	int			fake_input_current_limited;
+	bool			pr_swap_in_progress;
+	int			typec_mode;
 
 	/* workaround flag */
 	u32			wa_flags;
@@ -452,8 +456,6 @@
 				union power_supply_propval *val);
 int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
 				union power_supply_propval *val);
-int smblib_get_prop_typec_mode(struct smb_charger *chg,
-				union power_supply_propval *val);
 int smblib_get_prop_typec_power_role(struct smb_charger *chg,
 				union power_supply_propval *val);
 int smblib_get_prop_pd_allowed(struct smb_charger *chg,
@@ -506,6 +508,10 @@
 int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
 int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
+int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
+				union power_supply_propval *val);
+int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
+				const union power_supply_propval *val);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 167666a..d8671ab 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -486,11 +486,11 @@
 #define UFP_TYPEC_OPEN_OPEN_BIT			BIT(0)
 
 #define TYPE_C_STATUS_2_REG			(USBIN_BASE + 0x0C)
-#define DFP_TYPEC_MASK				0x8F
 #define DFP_RA_OPEN_BIT				BIT(7)
 #define TIMER_STAGE_BIT				BIT(6)
 #define EXIT_UFP_MODE_BIT			BIT(5)
 #define EXIT_DFP_MODE_BIT			BIT(4)
+#define DFP_TYPEC_MASK				GENMASK(3, 0)
 #define DFP_RD_OPEN_BIT				BIT(3)
 #define DFP_RD_RA_VCONN_BIT			BIT(2)
 #define DFP_RD_RD_BIT				BIT(1)
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index b92a482..a464a81 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1416,6 +1416,7 @@
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_PARALLEL_MODE,
+	POWER_SUPPLY_PROP_INPUT_SUSPEND,
 };
 
 static int smb1351_parallel_set_chg_suspend(struct smb1351_charger *chip,
@@ -1702,6 +1703,9 @@
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
 		val->intval = chip->parallel_mode;
 		break;
+	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+		val->intval = chip->parallel_charger_suspended;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 83374bb..ca0a2c6 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -248,7 +248,7 @@
 		val->intval = chg->usb_psy_desc.type;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_MODE:
-		rc = smblib_get_prop_typec_mode(chg, val);
+		val->intval = chg->typec_mode;
 		break;
 	case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
 		rc = smblib_get_prop_typec_power_role(chg, val);
@@ -941,13 +941,6 @@
 		return rc;
 	}
 
-	rc = smblib_write(chg, THERMREG_SRC_CFG_REG,
-						THERMREG_SKIN_ADC_SRC_EN_BIT);
-	if (rc < 0) {
-		pr_err("Couldn't enable connector thermreg source rc=%d\n", rc);
-		return rc;
-	}
-
 	return 0;
 }
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ad627fb..7ad650e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2980,7 +2980,8 @@
 		goto out2;
 
 	if (rdev->supply && (rdev->desc->min_dropout_uV ||
-				!rdev->desc->ops->get_voltage)) {
+				!(rdev->desc->ops->get_voltage ||
+					rdev->desc->ops->get_voltage_sel))) {
 		int current_supply_uV;
 		int selector;
 
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index cf7c35d..deb0ce5 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -86,7 +86,7 @@
  */
 #define CPRH_MSM8998_KBSS_FUSE_COMBO_COUNT	32
 #define CPRH_SDM660_KBSS_FUSE_COMBO_COUNT	16
-#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	16
+#define CPRH_SDM845_KBSS_FUSE_COMBO_COUNT	24
 
 /*
  * Constants which define the name of each fuse corner.
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3314bf2..dfa8d50 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -520,7 +520,7 @@
 		RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
 		BIT(0), 400),
 	RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
-		RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
+		RK818_LDO2_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
 		BIT(1), 400),
 	{
 		.name = "LDO_REG3",
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
index 2987ed2..4f5f86c 100644
--- a/drivers/regulator/rpmh-regulator.c
+++ b/drivers/regulator/rpmh-regulator.c
@@ -393,10 +393,15 @@
 	 * Mask the voltage level if "off" level is supported and the regulator
 	 * has not been enabled.
 	 */
-	if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF &&
-	    (!(req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) ||
-	     !req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE]))
-		req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+	if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF) {
+		if (req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) {
+			if (!req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE])
+				req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+		} else {
+			/* Invalidate voltage level if enable is invalid. */
+			req->valid &= ~BIT(RPMH_REGULATOR_REG_ARC_LEVEL);
+		}
+	}
 
 	/*
 	 * Mark the pseudo enable bit as invalid so that it is not accidentally
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index d2c3d7c..5ca6d21 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -311,8 +311,7 @@
 
 	/* Enable setting output voltage by I2C */
 	regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
-					TPS65023_REG_CTRL2_CORE_ADJ,
-					TPS65023_REG_CTRL2_CORE_ADJ);
+			   TPS65023_REG_CTRL2_CORE_ADJ, 0);
 
 	return 0;
 }
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6d4b68c4..f3756ca 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -718,6 +718,7 @@
 };
 
 struct qeth_discipline {
+	const struct device_type *devtype;
 	void (*start_poll)(struct ccw_device *, int, unsigned long);
 	qdio_handler_t *input_handler;
 	qdio_handler_t *output_handler;
@@ -893,6 +894,9 @@
 extern struct qeth_discipline qeth_l3_discipline;
 extern const struct attribute_group *qeth_generic_attr_groups[];
 extern const struct attribute_group *qeth_osn_attr_groups[];
+extern const struct attribute_group qeth_device_attr_group;
+extern const struct attribute_group qeth_device_blkt_group;
+extern const struct device_type qeth_generic_devtype;
 extern struct workqueue_struct *qeth_wq;
 
 int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 20cf296..e8c4830 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5462,10 +5462,12 @@
 	card->discipline = NULL;
 }
 
-static const struct device_type qeth_generic_devtype = {
+const struct device_type qeth_generic_devtype = {
 	.name = "qeth_generic",
 	.groups = qeth_generic_attr_groups,
 };
+EXPORT_SYMBOL_GPL(qeth_generic_devtype);
+
 static const struct device_type qeth_osn_devtype = {
 	.name = "qeth_osn",
 	.groups = qeth_osn_attr_groups,
@@ -5591,23 +5593,22 @@
 		goto err_card;
 	}
 
-	if (card->info.type == QETH_CARD_TYPE_OSN)
-		gdev->dev.type = &qeth_osn_devtype;
-	else
-		gdev->dev.type = &qeth_generic_devtype;
-
 	switch (card->info.type) {
 	case QETH_CARD_TYPE_OSN:
 	case QETH_CARD_TYPE_OSM:
 		rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
 		if (rc)
 			goto err_card;
+
+		gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
+					? card->discipline->devtype
+					: &qeth_osn_devtype;
 		rc = card->discipline->setup(card->gdev);
 		if (rc)
 			goto err_disc;
-	case QETH_CARD_TYPE_OSD:
-	case QETH_CARD_TYPE_OSX:
+		break;
 	default:
+		gdev->dev.type = &qeth_generic_devtype;
 		break;
 	}
 
@@ -5663,8 +5664,10 @@
 		if (rc)
 			goto err;
 		rc = card->discipline->setup(card->gdev);
-		if (rc)
+		if (rc) {
+			qeth_core_free_discipline(card);
 			goto err;
+		}
 	}
 	rc = card->discipline->set_online(gdev);
 err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 75b29fd2..db6a285 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -413,12 +413,16 @@
 
 	if (card->options.layer2 == newdis)
 		goto out;
-	else {
-		card->info.mac_bits  = 0;
-		if (card->discipline) {
-			card->discipline->remove(card->gdev);
-			qeth_core_free_discipline(card);
-		}
+	if (card->info.type == QETH_CARD_TYPE_OSM) {
+		/* fixed layer, can't switch */
+		rc = -EOPNOTSUPP;
+		goto out;
+	}
+
+	card->info.mac_bits = 0;
+	if (card->discipline) {
+		card->discipline->remove(card->gdev);
+		qeth_core_free_discipline(card);
 	}
 
 	rc = qeth_core_load_discipline(card, newdis);
@@ -426,6 +430,8 @@
 		goto out;
 
 	rc = card->discipline->setup(card->gdev);
+	if (rc)
+		qeth_core_free_discipline(card);
 out:
 	mutex_unlock(&card->discipline_mutex);
 	return rc ? rc : count;
@@ -703,10 +709,11 @@
 	&dev_attr_inter_jumbo.attr,
 	NULL,
 };
-static struct attribute_group qeth_device_blkt_group = {
+const struct attribute_group qeth_device_blkt_group = {
 	.name = "blkt",
 	.attrs = qeth_blkt_device_attrs,
 };
+EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
 
 static struct attribute *qeth_device_attrs[] = {
 	&dev_attr_state.attr,
@@ -726,9 +733,10 @@
 	&dev_attr_switch_attrs.attr,
 	NULL,
 };
-static struct attribute_group qeth_device_attr_group = {
+const struct attribute_group qeth_device_attr_group = {
 	.attrs = qeth_device_attrs,
 };
+EXPORT_SYMBOL_GPL(qeth_device_attr_group);
 
 const struct attribute_group *qeth_generic_attr_groups[] = {
 	&qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 29d9fb3..0d59f9a 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
 
 #include "qeth_core.h"
 
+extern const struct attribute_group *qeth_l2_attr_groups[];
+
 int qeth_l2_create_device_attributes(struct device *);
 void qeth_l2_remove_device_attributes(struct device *);
 void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index bb27058..5d010aa 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1021,11 +1021,21 @@
 	return 0;
 }
 
+static const struct device_type qeth_l2_devtype = {
+	.name = "qeth_layer2",
+	.groups = qeth_l2_attr_groups,
+};
+
 static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
 
-	qeth_l2_create_device_attributes(&gdev->dev);
+	if (gdev->dev.type == &qeth_generic_devtype) {
+		rc = qeth_l2_create_device_attributes(&gdev->dev);
+		if (rc)
+			return rc;
+	}
 	INIT_LIST_HEAD(&card->vid_list);
 	hash_init(card->mac_htable);
 	card->options.layer2 = 1;
@@ -1037,7 +1047,8 @@
 {
 	struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
 
-	qeth_l2_remove_device_attributes(&cgdev->dev);
+	if (cgdev->dev.type == &qeth_generic_devtype)
+		qeth_l2_remove_device_attributes(&cgdev->dev);
 	qeth_set_allowed_threads(card, 0, 1);
 	wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
@@ -1095,7 +1106,6 @@
 	case QETH_CARD_TYPE_OSN:
 		card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
 					 ether_setup);
-		card->dev->flags |= IFF_NOARP;
 		break;
 	default:
 		card->dev = alloc_etherdev(0);
@@ -1108,9 +1118,12 @@
 	card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
 	card->dev->mtu = card->info.initial_mtu;
 	card->dev->netdev_ops = &qeth_l2_netdev_ops;
-	card->dev->ethtool_ops =
-		(card->info.type != QETH_CARD_TYPE_OSN) ?
-		&qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
+	if (card->info.type == QETH_CARD_TYPE_OSN) {
+		card->dev->ethtool_ops = &qeth_l2_osn_ops;
+		card->dev->flags |= IFF_NOARP;
+	} else {
+		card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+	}
 	card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
 	if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
 		card->dev->hw_features = NETIF_F_SG;
@@ -1434,6 +1447,7 @@
 }
 
 struct qeth_discipline qeth_l2_discipline = {
+	.devtype = &qeth_l2_devtype,
 	.start_poll = qeth_qdio_start_poll,
 	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
 	.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 692db49..a48ed9e 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -272,3 +272,11 @@
 	} else
 		qeth_bridgeport_an_set(card, 0);
 }
+
+const struct attribute_group *qeth_l2_attr_groups[] = {
+	&qeth_device_attr_group,
+	&qeth_device_blkt_group,
+	/* l2 specific, see l2_{create,remove}_device_attributes(): */
+	&qeth_l2_bridgeport_attr_group,
+	NULL,
+};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 272d9e7..171be5e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3157,8 +3157,13 @@
 static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+	int rc;
 
-	qeth_l3_create_device_attributes(&gdev->dev);
+	rc = qeth_l3_create_device_attributes(&gdev->dev);
+	if (rc)
+		return rc;
+	hash_init(card->ip_htable);
+	hash_init(card->ip_mc_htable);
 	card->options.layer2 = 0;
 	card->info.hwtrap = 0;
 	return 0;
@@ -3450,6 +3455,7 @@
 }
 
 struct qeth_discipline qeth_l3_discipline = {
+	.devtype = &qeth_generic_devtype,
 	.start_poll = qeth_qdio_start_poll,
 	.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
 	.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index c4fe95a..0414843 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1169,6 +1169,9 @@
 		cmd = list_first_entry_or_null(&vscsi->free_cmd,
 					       struct ibmvscsis_cmd, list);
 		if (cmd) {
+			if (cmd->abort_cmd)
+				cmd->abort_cmd = NULL;
+			cmd->flags &= ~(DELAY_SEND);
 			list_del(&cmd->list);
 			cmd->iue = iue;
 			cmd->type = UNSET_TYPE;
@@ -1748,45 +1751,99 @@
 static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 {
 	u64 msg_hi = 0;
-	/* note do not attmempt to access the IU_data_ptr with this pointer
+	/* note do not attempt to access the IU_data_ptr with this pointer
 	 * it is not valid
 	 */
 	struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
 	struct ibmvscsis_cmd *cmd, *nxt;
 	struct iu_entry *iue;
 	long rc = ADAPT_SUCCESS;
+	bool retry = false;
 
 	if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
-		list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
-			iue = cmd->iue;
+		do {
+			retry = false;
+			list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
+						 list) {
+				/*
+				 * Check to make sure abort cmd gets processed
+				 * prior to the abort tmr cmd
+				 */
+				if (cmd->flags & DELAY_SEND)
+					continue;
 
-			crq->valid = VALID_CMD_RESP_EL;
-			crq->format = cmd->rsp.format;
+				if (cmd->abort_cmd) {
+					retry = true;
+					cmd->abort_cmd->flags &= ~(DELAY_SEND);
+					cmd->abort_cmd = NULL;
+				}
 
-			if (cmd->flags & CMD_FAST_FAIL)
-				crq->status = VIOSRP_ADAPTER_FAIL;
+				/*
+				 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
+				 * the case where LIO issued a
+				 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
+				 * case then we dont send a response, since it
+				 * was already done.
+				 */
+				if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
+				    !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
+					list_del(&cmd->list);
+					ibmvscsis_free_cmd_resources(vscsi,
+								     cmd);
+					/*
+					 * With a successfully aborted op
+					 * through LIO we want to increment the
+					 * the vscsi credit so that when we dont
+					 * send a rsp to the original scsi abort
+					 * op (h_send_crq), but the tm rsp to
+					 * the abort is sent, the credit is
+					 * correctly sent with the abort tm rsp.
+					 * We would need 1 for the abort tm rsp
+					 * and 1 credit for the aborted scsi op.
+					 * Thus we need to increment here.
+					 * Also we want to increment the credit
+					 * here because we want to make sure
+					 * cmd is actually released first
+					 * otherwise the client will think it
+					 * it can send a new cmd, and we could
+					 * find ourselves short of cmd elements.
+					 */
+					vscsi->credit += 1;
+				} else {
+					iue = cmd->iue;
 
-			crq->IU_length = cpu_to_be16(cmd->rsp.len);
+					crq->valid = VALID_CMD_RESP_EL;
+					crq->format = cmd->rsp.format;
 
-			rc = h_send_crq(vscsi->dma_dev->unit_address,
-					be64_to_cpu(msg_hi),
-					be64_to_cpu(cmd->rsp.tag));
+					if (cmd->flags & CMD_FAST_FAIL)
+						crq->status = VIOSRP_ADAPTER_FAIL;
 
-			pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
-				 cmd, be64_to_cpu(cmd->rsp.tag), rc);
+					crq->IU_length = cpu_to_be16(cmd->rsp.len);
 
-			/* if all ok free up the command element resources */
-			if (rc == H_SUCCESS) {
-				/* some movement has occurred */
-				vscsi->rsp_q_timer.timer_pops = 0;
-				list_del(&cmd->list);
+					rc = h_send_crq(vscsi->dma_dev->unit_address,
+							be64_to_cpu(msg_hi),
+							be64_to_cpu(cmd->rsp.tag));
 
-				ibmvscsis_free_cmd_resources(vscsi, cmd);
-			} else {
-				srp_snd_msg_failed(vscsi, rc);
-				break;
+					pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+						 cmd, be64_to_cpu(cmd->rsp.tag), rc);
+
+					/* if all ok free up the command
+					 * element resources
+					 */
+					if (rc == H_SUCCESS) {
+						/* some movement has occurred */
+						vscsi->rsp_q_timer.timer_pops = 0;
+						list_del(&cmd->list);
+
+						ibmvscsis_free_cmd_resources(vscsi,
+									     cmd);
+					} else {
+						srp_snd_msg_failed(vscsi, rc);
+						break;
+					}
+				}
 			}
-		}
+		} while (retry);
 
 		if (!rc) {
 			/*
@@ -2707,6 +2764,7 @@
 
 	for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
 	     i++, cmd++) {
+		cmd->abort_cmd = NULL;
 		cmd->adapter = vscsi;
 		INIT_WORK(&cmd->work, ibmvscsis_scheduler);
 		list_add_tail(&cmd->list, &vscsi->free_cmd);
@@ -2925,10 +2983,7 @@
 
 	rsp->opcode = SRP_RSP;
 
-	if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING)
-		rsp->req_lim_delta = cpu_to_be32(vscsi->credit);
-	else
-		rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
+	rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
 	rsp->tag = cmd->rsp.tag;
 	rsp->flags = 0;
 
@@ -3578,9 +3633,20 @@
 {
 	struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
 						 se_cmd);
+	struct scsi_info *vscsi = cmd->adapter;
 	struct iu_entry *iue = cmd->iue;
 	int rc;
 
+	/*
+	 * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
+	 * since LIO can't do anything about it, and we dont want to
+	 * attempt an srp_transfer_data.
+	 */
+	if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
+		pr_err("write_pending failed since: %d\n", vscsi->flags);
+		return 0;
+	}
+
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
 			       1, 1);
 	if (rc) {
@@ -3659,11 +3725,28 @@
 	struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
 						 se_cmd);
 	struct scsi_info *vscsi = cmd->adapter;
+	struct ibmvscsis_cmd *cmd_itr;
+	struct iu_entry *iue = iue = cmd->iue;
+	struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
+	u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
 	uint len;
 
 	pr_debug("queue_tm_rsp %p, status %d\n",
 		 se_cmd, (int)se_cmd->se_tmr_req->response);
 
+	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
+	    cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
+		spin_lock_bh(&vscsi->intr_lock);
+		list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
+			if (tag_to_abort == cmd_itr->se_cmd.tag) {
+				cmd_itr->abort_cmd = cmd;
+				cmd->flags |= DELAY_SEND;
+				break;
+			}
+		}
+		spin_unlock_bh(&vscsi->intr_lock);
+	}
+
 	srp_build_response(vscsi, cmd, &len);
 	cmd->rsp.format = SRP_FORMAT;
 	cmd->rsp.len = len;
@@ -3671,8 +3754,8 @@
 
 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
 {
-	/* TBD: What (if anything) should we do here? */
-	pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
+	pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
+		 se_cmd, se_cmd->tag);
 }
 
 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 98b0ca7..f5683af 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -167,10 +167,12 @@
 	struct iu_rsp rsp;
 	struct work_struct work;
 	struct scsi_info *adapter;
+	struct ibmvscsis_cmd *abort_cmd;
 	/* Sense buffer that will be mapped into outgoing status */
 	unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
 	u64 init_time;
 #define CMD_FAST_FAIL	BIT(0)
+#define DELAY_SEND	BIT(1)
 	u32 flags;
 	char type;
 };
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8a7941b..289374c 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -4634,6 +4634,7 @@
 	struct MPT3SAS_DEVICE *sas_device_priv_data;
 	u32 response_code = 0;
 	unsigned long flags;
+	unsigned int sector_sz;
 
 	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
 	scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@@ -4692,6 +4693,20 @@
 	}
 
 	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+	/* In case of bogus fw or device, we could end up having
+	 * unaligned partial completion. We can force alignment here,
+	 * then scsi-ml does not need to handle this misbehavior.
+	 */
+	sector_sz = scmd->device->sector_size;
+	if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+		     xfer_cnt % sector_sz)) {
+		sdev_printk(KERN_INFO, scmd->device,
+		    "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+			    xfer_cnt, sector_sz);
+		xfer_cnt = round_down(xfer_cnt, sector_sz);
+	}
+
 	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
 		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c2ac982..967bb0d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2792,10 +2792,10 @@
 	if (sdkp->opt_xfer_blocks &&
 	    sdkp->opt_xfer_blocks <= dev_max &&
 	    sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-	    logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
-		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
-		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
-	} else
+	    sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
+		rw_max = q->limits.io_opt =
+			sdkp->opt_xfer_blocks * sdp->sector_size;
+	else
 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
 				      (sector_t)BLK_DEF_MAX_SECTORS);
 
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c8d9863..4446ed2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,11 +151,6 @@
 	return blocks << (ilog2(sdev->sector_size) - 9);
 }
 
-static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
-{
-	return blocks * sdev->sector_size;
-}
-
 /*
  * Look up the DIX operation based on whether the command is read or
  * write and whether dix and dif are enabled.
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 1b283b2..db4e7bb 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -83,6 +83,8 @@
 	tristate "QCOM specific hooks to UFS controller platform driver"
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
 	select PHY_QCOM_UFS
+	select EXTCON
+	select EXTCON_GPIO
 	help
 	  This selects the QCOM specific additions to UFSHCD platform driver.
 	  UFS host on QCOM needs some vendor specific configuration before
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index de6ecbd..7c5a1bc 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -327,6 +327,20 @@
 	return ret;
 }
 
+static int ufshcd_parse_extcon_info(struct ufs_hba *hba)
+{
+	struct extcon_dev *extcon;
+
+	extcon = extcon_get_edev_by_phandle(hba->dev, 0);
+	if (IS_ERR(extcon) && PTR_ERR(extcon) != -ENODEV)
+		return PTR_ERR(extcon);
+
+	if (!IS_ERR(extcon))
+		hba->extcon = extcon;
+
+	return 0;
+}
+
 #ifdef CONFIG_SMP
 /**
  * ufshcd_pltfrm_suspend - suspend power management function
@@ -449,6 +463,9 @@
 	ufshcd_parse_pm_levels(hba);
 	ufshcd_parse_gear_limits(hba);
 	ufshcd_parse_cmd_timeout(hba);
+	err = ufshcd_parse_extcon_info(hba);
+	if (err)
+		goto dealloc_host;
 
 	if (!dev->dma_mask)
 		dev->dma_mask = &dev->coherent_dma_mask;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 2f6cd95..1707556 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -381,6 +381,8 @@
 				 bool is_gating_context);
 static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
 					      bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
@@ -1523,6 +1525,7 @@
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+	hba->ufs_stats.clk_hold.ts = ktime_get();
 	return rc;
 }
 EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -1627,6 +1630,7 @@
 
 	hba->clk_gating.state = REQ_CLKS_OFF;
 	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+	hba->ufs_stats.clk_rel.ts = ktime_get();
 
 	hrtimer_start(&hba->clk_gating.gate_hrtimer,
 			ms_to_ktime(hba->clk_gating.delay_ms),
@@ -2053,6 +2057,22 @@
 	return;
 }
 
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+					    unsigned long delay_ms)
+{
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold_all(hba);
+	ufshcd_scsi_block_requests(hba);
+	down_write(&hba->lock);
+	/* wait for all the outstanding requests to finish */
+	ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+	ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+	up_write(&hba->lock);
+	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release_all(hba);
+	pm_runtime_put_sync(hba->dev);
+}
+
 static void ufshcd_hibern8_exit_work(struct work_struct *work)
 {
 	int ret;
@@ -2073,8 +2093,10 @@
 
 	/* Exit from hibern8 */
 	if (ufshcd_is_link_hibern8(hba)) {
+		hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
 		ufshcd_hold(hba, false);
 		ret = ufshcd_uic_hibern8_exit(hba);
+		hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
 		ufshcd_release(hba, false);
 		if (!ret) {
 			spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2102,19 +2124,32 @@
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
 	unsigned long flags, value;
+	bool change = true;
 
 	if (kstrtoul(buf, 0, &value))
 		return -EINVAL;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.delay_ms == value)
+		change = false;
+
+	if (value >= hba->clk_gating.delay_ms_pwr_save ||
+	    value >= hba->clk_gating.delay_ms_perf) {
+		dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+			value, hba->clk_gating.delay_ms_pwr_save,
+			hba->clk_gating.delay_ms_perf);
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return -EINVAL;
+	}
+
 	hba->hibern8_on_idle.delay_ms = value;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
 	/* Update auto hibern8 timer value if supported */
-	if (ufshcd_is_auto_hibern8_supported(hba) &&
+	if (change && ufshcd_is_auto_hibern8_supported(hba) &&
 	    hba->hibern8_on_idle.is_enabled)
-		ufshcd_set_auto_hibern8_timer(hba,
-					      hba->hibern8_on_idle.delay_ms);
+		__ufshcd_set_auto_hibern8_timer(hba,
+						hba->hibern8_on_idle.delay_ms);
 
 	return count;
 }
@@ -2144,7 +2179,7 @@
 
 	/* Update auto hibern8 timer value if supported */
 	if (ufshcd_is_auto_hibern8_supported(hba)) {
-		ufshcd_set_auto_hibern8_timer(hba,
+		__ufshcd_set_auto_hibern8_timer(hba,
 			value ? hba->hibern8_on_idle.delay_ms : value);
 		goto update;
 	}
@@ -2500,6 +2535,7 @@
 	int ret;
 	unsigned long flags;
 
+	hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
 	ufshcd_hold_all(hba);
 	mutex_lock(&hba->uic_cmd_mutex);
 	ufshcd_add_delay_before_dme_cmd(hba);
@@ -2513,6 +2549,7 @@
 	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	mutex_unlock(&hba->uic_cmd_mutex);
 	ufshcd_release_all(hba);
+	hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
 
 	ufsdbg_error_inject_dispatcher(hba,
 		ERR_INJECT_UIC, 0, &ret);
@@ -2999,6 +3036,7 @@
 		goto out;
 	}
 
+	hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
 	err = ufshcd_hold(hba, true);
 	if (err) {
 		err = SCSI_MLQUEUE_HOST_BUSY;
@@ -3013,6 +3051,7 @@
 	if (err) {
 		clear_bit_unlock(tag, &hba->lrb_in_use);
 		err = SCSI_MLQUEUE_HOST_BUSY;
+		hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
 		ufshcd_release(hba, true);
 		goto out;
 	}
@@ -4392,8 +4431,10 @@
 	uic_cmd.command = UIC_CMD_DME_SET;
 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 	uic_cmd.argument3 = mode;
+	hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
 	ufshcd_hold_all(hba);
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
 	ufshcd_release_all(hba);
 out:
 	return ret;
@@ -5580,6 +5621,7 @@
 			update_req_stats(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
+			hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
 			__ufshcd_release(hba, false);
 			__ufshcd_hibern8_release(hba, false);
 			if (cmd->request) {
@@ -6101,6 +6143,7 @@
 	if (unlikely((hba->clk_gating.state != CLKS_ON) &&
 	    ufshcd_is_auto_hibern8_supported(hba))) {
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
 		ufshcd_hold(hba, false);
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		clks_enabled = true;
@@ -6245,8 +6288,10 @@
 
 	hba->silence_err_logs = false;
 
-	if (clks_enabled)
+	if (clks_enabled) {
 		__ufshcd_release(hba, false);
+		hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
+	}
 out:
 	ufshcd_clear_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -6482,7 +6527,8 @@
 
 	spin_lock(hba->host->host_lock);
 	intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
-
+	hba->ufs_stats.last_intr_status = intr_status;
+	hba->ufs_stats.last_intr_ts = ktime_get();
 	/*
 	 * There could be max of hba->nutrs reqs in flight and in worst case
 	 * if the reqs get finished 1 by 1 after the interrupt status is
@@ -6561,6 +6607,7 @@
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
 	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+	hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
 	ufshcd_hold_all(hba);
 
 	spin_lock_irqsave(host->host_lock, flags);
@@ -6618,6 +6665,7 @@
 	clear_bit(free_slot, &hba->tm_condition);
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
+	hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
 
 	ufshcd_release_all(hba);
 	return err;
@@ -6917,6 +6965,23 @@
 	return err;
 }
 
+static int ufshcd_detect_device(struct ufs_hba *hba)
+{
+	int err = 0;
+
+	err = ufshcd_vops_full_reset(hba);
+	if (err)
+		dev_warn(hba->dev, "%s: full reset returned %d\n",
+			 __func__, err);
+
+	err = ufshcd_reset_device(hba);
+	if (err)
+		dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+			 __func__, err);
+
+	return ufshcd_host_reset_and_restore(hba);
+}
+
 /**
  * ufshcd_reset_and_restore - reset and re-initialize host/device
  * @hba: per-adapter instance
@@ -6933,26 +6998,10 @@
 	int retries = MAX_HOST_RESET_RETRIES;
 
 	do {
-		err = ufshcd_vops_full_reset(hba);
-		if (err)
-			dev_warn(hba->dev, "%s: full reset returned %d\n",
-				 __func__, err);
-
-		err = ufshcd_reset_device(hba);
-		if (err)
-			dev_warn(hba->dev, "%s: device reset failed. err %d\n",
-				 __func__, err);
-
-		err = ufshcd_host_reset_and_restore(hba);
+		err = ufshcd_detect_device(hba);
 	} while (err && --retries);
 
 	/*
-	 * There is no point proceeding even after failing
-	 * to recover after multiple retries.
-	 */
-	if (err)
-		BUG();
-	/*
 	 * After reset the door-bell might be cleared, complete
 	 * outstanding requests in s/w here.
 	 */
@@ -7655,10 +7704,8 @@
 	 * If we failed to initialize the device or the device is not
 	 * present, turn off the power/clocks etc.
 	 */
-	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress)
 		pm_runtime_put_sync(hba->dev);
-		ufshcd_hba_exit(hba);
-	}
 
 	trace_ufshcd_init(dev_name(hba->dev), ret,
 		ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7666,6 +7713,70 @@
 	return ret;
 }
 
+static void ufshcd_card_detect_handler(struct work_struct *work)
+{
+	struct ufs_hba *hba;
+
+	hba = container_of(work, struct ufs_hba, card_detect_work);
+	if (hba->card_detect_event &&
+	    (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
+		dev_dbg(hba->dev, "%s: card detect notification received\n",
+			 __func__);
+		pm_runtime_get_sync(hba->dev);
+		ufshcd_detect_device(hba);
+		pm_runtime_put_sync(hba->dev);
+	} else {
+		dev_dbg(hba->dev, "%s: card removed notification received\n",
+			 __func__);
+		/* TODO: remove the scsi device instances */
+	}
+}
+
+static int ufshcd_card_detect_notifier(struct notifier_block *nb,
+				       unsigned long event, void *ptr)
+{
+	struct ufs_hba *hba = container_of(nb, struct ufs_hba, card_detect_nb);
+
+	hba->card_detect_event = event;
+	schedule_work(&hba->card_detect_work);
+
+	return NOTIFY_DONE;
+}
+
+static int ufshcd_extcon_register(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!hba->extcon)
+		return 0;
+
+	hba->card_detect_nb.notifier_call = ufshcd_card_detect_notifier;
+	ret = extcon_register_notifier(hba->extcon,
+				       EXTCON_MECHANICAL,
+				       &hba->card_detect_nb);
+	if (ret)
+		dev_err(hba->dev, "%s: extcon_register_notifier() failed, ret %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
+static int ufshcd_extcon_unregister(struct ufs_hba *hba)
+{
+	int ret;
+
+	if (!hba->extcon)
+		return 0;
+
+	ret = extcon_unregister_notifier(hba->extcon, EXTCON_MECHANICAL,
+					 &hba->card_detect_nb);
+	if (ret)
+		dev_err(hba->dev, "%s: extcon_unregister_notifier() failed, ret %d\n",
+			__func__, ret);
+
+	return ret;
+}
+
 /**
  * ufshcd_async_scan - asynchronous execution for probing hba
  * @data: data pointer to pass to this function
@@ -7682,6 +7793,8 @@
 	ufshcd_hold_all(hba);
 	ufshcd_probe_hba(hba);
 	ufshcd_release_all(hba);
+
+	ufshcd_extcon_register(hba);
 }
 
 /**
@@ -8391,20 +8504,9 @@
 
 	err = ufshcd_vops_init(hba);
 	if (err)
-		goto out;
-
-	err = ufshcd_vops_setup_regulators(hba, true);
-	if (err)
-		goto out_exit;
-
-	goto out;
-
-out_exit:
-	ufshcd_vops_exit(hba);
-out:
-	if (err)
 		dev_err(hba->dev, "%s: variant %s init failed err %d\n",
 			__func__, ufshcd_get_var_name(hba), err);
+out:
 	return err;
 }
 
@@ -8413,8 +8515,6 @@
 	if (!hba->var || !hba->var->vops)
 		return;
 
-	ufshcd_vops_setup_regulators(hba, false);
-
 	ufshcd_vops_exit(hba);
 }
 
@@ -8473,6 +8573,7 @@
 static void ufshcd_hba_exit(struct ufs_hba *hba)
 {
 	if (hba->is_powered) {
+		ufshcd_extcon_unregister(hba);
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
 		if (ufshcd_is_clkscaling_supported(hba)) {
@@ -8777,10 +8878,8 @@
 		goto enable_gating;
 
 	/* UFS device & link must be active before we enter in this function */
-	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
-		ret = -EINVAL;
-		goto enable_gating;
-	}
+	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba))
+		goto set_vreg_lpm;
 
 	if (ufshcd_is_runtime_pm(pm_op)) {
 		if (ufshcd_can_autobkops_during_suspend(hba)) {
@@ -8816,6 +8915,7 @@
 	    ufshcd_is_hibern8_on_idle_allowed(hba))
 		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 
+set_vreg_lpm:
 	ufshcd_vreg_set_lpm(hba);
 
 disable_clks:
@@ -8918,6 +9018,9 @@
 	if (ret)
 		goto disable_vreg;
 
+	if (ufshcd_is_link_off(hba))
+		goto skip_dev_ops;
+
 	if (ufshcd_is_link_hibern8(hba)) {
 		ret = ufshcd_uic_hibern8_exit(hba);
 		if (!ret) {
@@ -8965,6 +9068,7 @@
 	if (hba->clk_scaling.is_allowed)
 		ufshcd_resume_clkscaling(hba);
 
+skip_dev_ops:
 	/* Schedule clock gating in case of no access to UFS device yet */
 	ufshcd_release_all(hba);
 	goto out;
@@ -9635,6 +9739,7 @@
 	int ret = 0;
 
 	/* let's not get into low power until clock scaling is completed */
+	hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
 	ufshcd_hold_all(hba);
 
 	ret = ufshcd_clock_scaling_prepare(hba);
@@ -9698,6 +9803,7 @@
 clk_scaling_unprepare:
 	ufshcd_clock_scaling_unprepare(hba);
 out:
+	hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
 	ufshcd_release_all(hba);
 	return ret;
 }
@@ -10024,6 +10130,7 @@
 	/* Initialize work queues */
 	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
 	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+	INIT_WORK(&hba->card_detect_work, ufshcd_card_detect_handler);
 
 	/* Initialize UIC command mutex */
 	mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 6966aac..a485885 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -57,6 +57,7 @@
 #include <linux/completion.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
+#include <linux/extcon.h>
 #include "unipro.h"
 
 #include <asm/irq.h>
@@ -584,6 +585,22 @@
 };
 #endif
 
+enum ufshcd_ctx {
+	QUEUE_CMD,
+	ERR_HNDLR_WORK,
+	H8_EXIT_WORK,
+	UIC_CMD_SEND,
+	PWRCTL_CMD_SEND,
+	TM_CMD_SEND,
+	XFR_REQ_COMPL,
+	CLK_SCALE_WORK,
+};
+
+struct ufshcd_clk_ctx {
+	ktime_t ts;
+	enum ufshcd_ctx ctx;
+};
+
 /**
  * struct ufs_stats - keeps usage/err statistics
  * @enabled: enable tag stats for debugfs
@@ -612,6 +629,10 @@
 	int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
 
 #endif
+	u32 last_intr_status;
+	ktime_t last_intr_ts;
+	struct ufshcd_clk_ctx clk_hold;
+	struct ufshcd_clk_ctx clk_rel;
 	u32 hibern8_exit_cnt;
 	ktime_t last_hibern8_exit_tstamp;
 	struct ufs_uic_err_reg_hist pa_err;
@@ -704,6 +725,10 @@
  * @ufs_stats: ufshcd statistics to be used via debugfs
  * @debugfs_files: debugfs files associated with the ufs stats
  * @ufshcd_dbg_print: Bitmask for enabling debug prints
+ * @extcon: pointer to external connector device
+ * @card_detect_nb: card detector notifier registered with @extcon
+ * @card_detect_work: work to exectute the card detect function
+ * @card_detect_event: card detect event, 0 = removed, 1 = inserted
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
@@ -876,6 +901,11 @@
 	/* Bitmask for enabling debug prints */
 	u32 ufshcd_dbg_print;
 
+	struct extcon_dev *extcon;
+	struct notifier_block card_detect_nb;
+	struct work_struct card_detect_work;
+	unsigned long card_detect_event;
+
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
 
@@ -1269,8 +1299,8 @@
 
 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
 {
-	if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
-		return hba->var->vops->apply_dev_quirks(hba);
+	if (hba->var && hba->var->vops && hba->var->vops->suspend)
+		return hba->var->vops->suspend(hba, op);
 	return 0;
 }
 
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index f7f0269..a72cb17 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -223,6 +223,7 @@
 		/* make sure autosuspend is not called until ADSP comes up*/
 		pm_runtime_get_noresume(dev->dev);
 		dev->state = MSM_CTRL_DOWN;
+		dev->qmi.deferred_resp = false;
 		msm_slim_sps_exit(dev, false);
 		ngd_dom_down(dev);
 		mutex_unlock(&dev->tx_lock);
@@ -2019,19 +2020,18 @@
 	if (!pm_runtime_enabled(dev) ||
 		(!pm_runtime_suspended(dev) &&
 			cdev->state == MSM_CTRL_IDLE)) {
+		cdev->qmi.deferred_resp = true;
 		ret = ngd_slim_runtime_suspend(dev);
 		/*
 		 * If runtime-PM still thinks it's active, then make sure its
 		 * status is in sync with HW status.
-		 * Since this suspend calls QMI api, it results in holding a
-		 * wakelock. That results in failure of first suspend.
-		 * Subsequent suspend should not call low-power transition
-		 * again since the HW is already in suspended state.
 		 */
 		if (!ret) {
 			pm_runtime_disable(dev);
 			pm_runtime_set_suspended(dev);
 			pm_runtime_enable(dev);
+		} else {
+			cdev->qmi.deferred_resp = false;
 		}
 	}
 	if (ret == -EBUSY) {
@@ -2053,13 +2053,29 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
+	int ret = 0;
+
+	/*
+	 * If deferred response was requested for power-off and it failed,
+	 * mark runtime-pm status as active to be consistent
+	 * with HW status
+	 */
+	if (cdev->qmi.deferred_resp) {
+		ret = msm_slim_qmi_deferred_status_req(cdev);
+		if (ret) {
+			pm_runtime_disable(dev);
+			pm_runtime_set_active(dev);
+			pm_runtime_enable(dev);
+		}
+		cdev->qmi.deferred_resp = false;
+	}
 	/*
 	 * Rely on runtime-PM to call resume in case it is enabled.
 	 * Even if it's not enabled, rely on 1st client transaction to do
 	 * clock/power on
 	 */
 	SLIM_INFO(cdev, "system resume\n");
-	return 0;
+	return ret;
 }
 #endif /* CONFIG_PM_SLEEP */
 
diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c
index e7d3381..ef10e64 100644
--- a/drivers/slimbus/slim-msm.c
+++ b/drivers/slimbus/slim-msm.c
@@ -1224,12 +1224,16 @@
 #define SLIMBUS_QMI_POWER_RESP_V01 0x0021
 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
 #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
+#define SLIMBUS_QMI_DEFERRED_STATUS_REQ 0x0023
+#define SLIMBUS_QMI_DEFERRED_STATUS_RESP 0x0023
 
-#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
 #define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
 #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
 #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
 #define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
+#define SLIMBUS_QMI_DEFERRED_STATUS_REQ_MSG_MAX_MSG_LEN 0
+#define SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN 7
 
 enum slimbus_mode_enum_type_v01 {
 	/* To force a 32 bit signed enum. Do not change or use*/
@@ -1247,6 +1251,13 @@
 	SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
 
+enum slimbus_resp_enum_type_v01 {
+	SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
+	SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
+	SLIMBUS_RESP_DEFERRED_V01 = 2,
+	SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
+};
+
 struct slimbus_select_inst_req_msg_v01 {
 	/* Mandatory */
 	/* Hardware Instance Selection */
@@ -1269,6 +1280,12 @@
 	/* Mandatory */
 	/* Power Request Operation */
 	enum slimbus_pm_enum_type_v01 pm_req;
+
+	/* Optional */
+	/* Optional Deferred Response type Operation */
+	/* Must be set to true if type is being passed */
+	uint8_t resp_type_valid;
+	enum slimbus_resp_enum_type_v01 resp_type;
 };
 
 struct slimbus_power_resp_msg_v01 {
@@ -1283,6 +1300,9 @@
 	struct qmi_response_type_v01 resp;
 };
 
+struct slimbus_deferred_status_resp {
+	struct qmi_response_type_v01 resp;
+};
 
 static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
 	{
@@ -1359,6 +1379,24 @@
 		.ei_array  = NULL,
 	},
 	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct slimbus_power_req_msg_v01,
+					   resp_type_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum slimbus_resp_enum_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct slimbus_power_req_msg_v01,
+					   resp_type),
+	},
+	{
 		.data_type = QMI_EOTI,
 		.elem_len  = 0,
 		.elem_size = 0,
@@ -1411,6 +1449,22 @@
 	},
 };
 
+static struct elem_info slimbus_deferred_status_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct slimbus_deferred_status_resp,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+	},
+};
 static void msm_slim_qmi_recv_msg(struct kthread_work *work)
 {
 	int rc;
@@ -1488,32 +1542,56 @@
 	return 0;
 }
 
+static void slim_qmi_resp_cb(struct qmi_handle *handle, unsigned int msg_id,
+			     void *msg, void *resp_cb_data, int stat)
+{
+	struct slimbus_power_resp_msg_v01 *resp = msg;
+	struct msm_slim_ctrl *dev = resp_cb_data;
+
+	if (msg_id != SLIMBUS_QMI_POWER_RESP_V01)
+		SLIM_WARN(dev, "incorrect msg id in qmi-resp CB:0x%x", msg_id);
+	else if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
+		SLIM_ERR(dev, "%s: QMI power failed 0x%x (%s)\n", __func__,
+			 resp->resp.result, get_qmi_error(&resp->resp));
+
+	complete(&dev->qmi.defer_comp);
+}
+
 static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
 				struct slimbus_power_req_msg_v01 *req)
 {
-	struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
-	struct msg_desc req_desc, resp_desc;
+	struct slimbus_power_resp_msg_v01 *resp =
+		(struct slimbus_power_resp_msg_v01 *)&dev->qmi.resp;
+	struct msg_desc req_desc;
+	struct msg_desc *resp_desc = &dev->qmi.resp_desc;
 	int rc;
 
 	req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
 	req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
 	req_desc.ei_array = slimbus_power_req_msg_v01_ei;
 
-	resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
-	resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
-	resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
+	resp_desc->msg_id = SLIMBUS_QMI_POWER_RESP_V01;
+	resp_desc->max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
+	resp_desc->ei_array = slimbus_power_resp_msg_v01_ei;
 
-	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
-			&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
-	if (rc < 0) {
+	if (dev->qmi.deferred_resp)
+		rc = qmi_send_req_nowait(dev->qmi.handle, &req_desc, req,
+				       sizeof(*req), resp_desc, resp,
+				       sizeof(*resp), slim_qmi_resp_cb, dev);
+	else
+		rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req,
+				       sizeof(*req), resp_desc, resp,
+				       sizeof(*resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0)
 		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+
+	if (rc < 0 || dev->qmi.deferred_resp)
 		return rc;
-	}
 
 	/* Check the response */
-	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
 		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
-				resp.resp.result, get_qmi_error(&resp.resp));
+				resp->resp.result, get_qmi_error(&resp->resp));
 		return -EREMOTEIO;
 	}
 
@@ -1527,6 +1605,7 @@
 	struct slimbus_select_inst_req_msg_v01 req;
 
 	kthread_init_worker(&dev->qmi.kworker);
+	init_completion(&dev->qmi.defer_comp);
 
 	dev->qmi.task = kthread_run(kthread_worker_fn,
 			&dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
@@ -1604,6 +1683,13 @@
 	else
 		req.pm_req = SLIMBUS_PM_INACTIVE_V01;
 
+	if (dev->qmi.deferred_resp) {
+		req.resp_type = SLIMBUS_RESP_DEFERRED_V01;
+		req.resp_type_valid = 1;
+	} else {
+		req.resp_type_valid = 0;
+	}
+
 	return msm_slim_qmi_send_power_request(dev, &req);
 }
 
@@ -1635,3 +1721,46 @@
 	}
 	return 0;
 }
+
+int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev)
+{
+	struct slimbus_deferred_status_resp resp = { { 0, 0 } };
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_REQ;
+	req_desc.max_msg_len = 0;
+	req_desc.ei_array = NULL;
+
+	resp_desc.msg_id = SLIMBUS_QMI_DEFERRED_STATUS_RESP;
+	resp_desc.max_msg_len =
+		SLIMBUS_QMI_DEFERRED_STATUS_RESP_STAT_MSG_MAX_MSG_LEN;
+	resp_desc.ei_array = slimbus_deferred_status_resp_msg_v01_ei;
+
+	rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
+		&resp_desc, &resp, sizeof(resp), SLIM_QMI_RESP_TOUT);
+	if (rc < 0) {
+		SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
+		return rc;
+	}
+	/* Check the response */
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
+			__func__, resp.resp.result, get_qmi_error(&resp.resp));
+		return -EREMOTEIO;
+	}
+
+	/* wait for the deferred response */
+	rc = wait_for_completion_timeout(&dev->qmi.defer_comp, HZ);
+	if (rc == 0) {
+		SLIM_WARN(dev, "slimbus power deferred response not rcvd\n");
+		return -ETIMEDOUT;
+	}
+	/* Check what response we got in callback */
+	if (dev->qmi.resp.result != QMI_RESULT_SUCCESS_V01) {
+		SLIM_WARN(dev, "QMI power req failed in CB");
+		return -EREMOTEIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 65b9fae..ee0f625 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -228,6 +228,10 @@
 	struct kthread_worker		kworker;
 	struct completion		qmi_comp;
 	struct notifier_block		nb;
+	bool				deferred_resp;
+	struct qmi_response_type_v01	resp;
+	struct msg_desc			resp_desc;
+	struct completion		defer_comp;
 };
 
 enum msm_slim_dom {
@@ -437,4 +441,5 @@
 int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master);
 int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active);
 int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev);
+int msm_slim_qmi_deferred_status_req(struct msm_slim_ctrl *dev);
 #endif
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index a50e901..ec85506 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -676,7 +676,7 @@
 
 config QTI_RPM_STATS_LOG
 	bool "Qualcomm Technologies RPM Stats Driver"
-	depends on DEBUG_FS
+	depends on SYSFS
 	default n
 	help
 	  This option enables a driver which reads RPM messages from a shared
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 9a7262e..4c59ca6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,3 +1,4 @@
+KASAN_SANITIZE_scm.o := n
 obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 93e6994..585836a 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -154,6 +154,7 @@
 	void			*sram_buf;
 	struct msm_dump_data	sram_data;
 	uint8_t			curr_list;
+	uint8_t			cti_trig;
 };
 
 static bool dcc_ready(struct dcc_drvdata *drvdata)
@@ -601,7 +602,8 @@
 		}
 
 		/* 4. Configure trigger, data sink and function type */
-		dcc_writel(drvdata, BIT(9) | ((drvdata->data_sink << 4) |
+		dcc_writel(drvdata, BIT(9) | ((drvdata->cti_trig << 8) |
+			   (drvdata->data_sink << 4) |
 			   (drvdata->func_type[list])), DCC_LL_CFG(list));
 
 		/* 5. Clears interrupt status register */
@@ -1141,8 +1143,16 @@
 
 	mutex_lock(&drvdata->mutex);
 
-	if (kstrtoul(buf, 16, &loop_cnt))
+	if (kstrtoul(buf, 16, &loop_cnt)) {
 		ret = -EINVAL;
+		goto err;
+	}
+
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
+		ret = -EINVAL;
+		goto err;
+	}
 
 	entry = devm_kzalloc(drvdata->dev, sizeof(*entry), GFP_KERNEL);
 	if (!entry) {
@@ -1152,6 +1162,7 @@
 
 	entry->loop_cnt = min_t(uint32_t, loop_cnt, MAX_LOOP_CNT);
 	entry->index = drvdata->nr_config[drvdata->curr_list]++;
+	entry->desc_type = DCC_LOOP_TYPE;
 	INIT_LIST_HEAD(&entry->list);
 	list_add_tail(&entry->list, &drvdata->cfg_head[drvdata->curr_list]);
 
@@ -1219,12 +1230,13 @@
 
 	nval = sscanf(buf, "%x %x %d", &addr, &write_val, &apb_bus);
 
-	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
-		dev_err(dev, "Select link list to program using curr_list\n");
-		return -EINVAL;
+	if (nval <= 1 || nval > 3) {
+		ret = -EINVAL;
+		goto err;
 	}
 
-	if (nval <= 1 || nval > 3) {
+	if (drvdata->curr_list >= DCC_MAX_LINK_LIST) {
+		dev_err(dev, "Select link list to program using curr_list\n");
 		ret = -EINVAL;
 		goto err;
 	}
@@ -1252,6 +1264,43 @@
 }
 static DEVICE_ATTR(config_write, 0200, NULL, dcc_write);
 
+static ssize_t dcc_show_cti_trig(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", drvdata->cti_trig);
+}
+
+static ssize_t dcc_store_cti_trig(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t size)
+{
+	unsigned long val;
+	int ret = 0;
+	struct dcc_drvdata *drvdata = dev_get_drvdata(dev);
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->enable[drvdata->curr_list]) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (val)
+		drvdata->cti_trig = 1;
+	else
+		drvdata->cti_trig = 0;
+out:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(cti_trig, 0644,
+		   dcc_show_cti_trig, dcc_store_cti_trig);
+
 static const struct device_attribute *dcc_attrs[] = {
 	&dev_attr_func_type,
 	&dev_attr_data_sink,
@@ -1266,6 +1315,7 @@
 	&dev_attr_rd_mod_wr,
 	&dev_attr_curr_list,
 	&dev_attr_config_write,
+	&dev_attr_cti_trig,
 	NULL,
 };
 
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
index 0c562ec..5156bc1 100644
--- a/drivers/soc/qcom/early_random.c
+++ b/drivers/soc/qcom/early_random.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2017, The Linux Foundation. All rights
+ * reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,7 +13,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/random.h>
+#include <linux/hw_random.h>
 #include <linux/io.h>
 
 #include <soc/qcom/scm.h>
@@ -57,7 +58,7 @@
 	if (!ret) {
 		dmac_inv_range(random_buffer, random_buffer +
 						RANDOM_BUFFER_SIZE);
-		add_device_randomness(random_buffer, SZ_512);
+		add_hwgenerator_randomness(random_buffer, SZ_512, SZ_512 << 3);
 	}
 }
 
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 1455069..92dbd48 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -26,6 +26,7 @@
 #include <linux/serial_core.h>
 #include <linux/serial.h>
 #include <linux/workqueue.h>
+#include <linux/power_supply.h>
 
 #define EUD_ENABLE_CMD 1
 #define EUD_DISABLE_CMD 0
@@ -87,15 +88,52 @@
 static void enable_eud(struct platform_device *pdev)
 {
 	struct eud_chip *priv = platform_get_drvdata(pdev);
+	struct power_supply *usb_psy = NULL;
+	union power_supply_propval pval = {0};
+	union power_supply_propval tval = {0};
+	int ret;
 
-	/* write into CSR to enable EUD */
-	writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
-	/* Enable vbus, chgr & safe mode warning interrupts */
-	writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
-			priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+	usb_psy = power_supply_get_by_name("usb");
+	if (!usb_psy) {
+		dev_warn(&pdev->dev, "Could not get usb power_supply\n");
+		return;
+	}
 
-	/* Ensure Register Writes Complete */
-	wmb();
+	ret = power_supply_get_property(usb_psy,
+			POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to read USB PRESENT: %x\n", ret);
+		return;
+	}
+
+	ret = power_supply_get_property(usb_psy,
+			POWER_SUPPLY_PROP_REAL_TYPE, &tval);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to read USB TYPE: %x\n", ret);
+		return;
+	}
+
+	if (pval.intval && (tval.intval == POWER_SUPPLY_TYPE_USB ||
+	    tval.intval == POWER_SUPPLY_TYPE_USB_CDP)) {
+		/* write into CSR to enable EUD */
+		writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
+		/* Enable vbus, chgr & safe mode warning interrupts */
+		writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+				priv->eud_reg_base + EUD_REG_INT1_EN_MASK);
+
+		/* Ensure Register Writes Complete */
+		wmb();
+
+		/*
+		 * Set the default cable state to usb connect and charger
+		 * enable
+		 */
+		extcon_set_state_sync(priv->extcon, EXTCON_USB, true);
+		extcon_set_state_sync(priv->extcon, EXTCON_CHG_USB_SDP, true);
+	} else {
+		dev_warn(&pdev->dev, "Connect USB cable before enabling EUD\n");
+		return;
+	}
 
 	dev_dbg(&pdev->dev, "%s: EUD Enabled!\n", __func__);
 }
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 3c4759c..453faa8 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -702,7 +702,8 @@
 		err = true;
 	} else if (intent->data == NULL) {
 		if (einfo->intentless) {
-			intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
+			intent->data = kmalloc(cmd.frag_size,
+						__GFP_ATOMIC | __GFP_HIGH);
 			if (!intent->data) {
 				err = true;
 				GLINK_ERR(
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 69e0ebc..b5bb719 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -191,8 +191,8 @@
 	ICNSS_FW_TEST_MODE,
 	ICNSS_PM_SUSPEND,
 	ICNSS_PM_SUSPEND_NOIRQ,
-	ICNSS_SSR_ENABLED,
-	ICNSS_PDR_ENABLED,
+	ICNSS_SSR_REGISTERED,
+	ICNSS_PDR_REGISTERED,
 	ICNSS_PD_RESTART,
 	ICNSS_MSA0_ASSIGNED,
 	ICNSS_WLFW_EXISTS,
@@ -691,6 +691,8 @@
 		goto out;
 	}
 
+	memset(&ind_msg, 0, sizeof(ind_msg));
+
 	ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
 	ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
 	ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
@@ -2345,7 +2347,7 @@
 	if (code != SUBSYS_BEFORE_SHUTDOWN)
 		return NOTIFY_OK;
 
-	if (test_bit(ICNSS_PDR_ENABLED, &priv->state))
+	if (test_bit(ICNSS_PDR_REGISTERED, &priv->state))
 		return NOTIFY_OK;
 
 	icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
@@ -2386,14 +2388,14 @@
 		icnss_pr_err("Modem register notifier failed: %d\n", ret);
 	}
 
-	set_bit(ICNSS_SSR_ENABLED, &priv->state);
+	set_bit(ICNSS_SSR_REGISTERED, &priv->state);
 
 	return ret;
 }
 
 static int icnss_modem_ssr_unregister_notifier(struct icnss_priv *priv)
 {
-	if (!test_and_clear_bit(ICNSS_SSR_ENABLED, &priv->state))
+	if (!test_and_clear_bit(ICNSS_SSR_REGISTERED, &priv->state))
 		return 0;
 
 	subsys_notif_unregister_notifier(priv->modem_notify_handler,
@@ -2407,7 +2409,7 @@
 {
 	int i;
 
-	if (!test_and_clear_bit(ICNSS_PDR_ENABLED, &priv->state))
+	if (!test_and_clear_bit(ICNSS_PDR_REGISTERED, &priv->state))
 		return 0;
 
 	for (i = 0; i < priv->total_domains; i++)
@@ -2531,9 +2533,10 @@
 	priv->service_notifier = notifier;
 	priv->total_domains = pd->total_domains;
 
-	set_bit(ICNSS_PDR_ENABLED, &priv->state);
+	set_bit(ICNSS_PDR_REGISTERED, &priv->state);
 
-	icnss_pr_dbg("PD restart enabled, state: 0x%lx\n", priv->state);
+	icnss_pr_dbg("PD notification registration happened, state: 0x%lx\n",
+		     priv->state);
 
 	return NOTIFY_OK;
 
@@ -3188,7 +3191,7 @@
 		goto out;
 	}
 
-	if (!test_bit(ICNSS_PDR_ENABLED, &priv->state)) {
+	if (!test_bit(ICNSS_PDR_REGISTERED, &priv->state)) {
 		icnss_pr_err("PD restart not enabled to trigger recovery: state: 0x%lx\n",
 			     priv->state);
 		ret = -EOPNOTSUPP;
@@ -3642,11 +3645,11 @@
 		case ICNSS_PM_SUSPEND_NOIRQ:
 			seq_puts(s, "PM SUSPEND NOIRQ");
 			continue;
-		case ICNSS_SSR_ENABLED:
-			seq_puts(s, "SSR ENABLED");
+		case ICNSS_SSR_REGISTERED:
+			seq_puts(s, "SSR REGISTERED");
 			continue;
-		case ICNSS_PDR_ENABLED:
-			seq_puts(s, "PDR ENABLED");
+		case ICNSS_PDR_REGISTERED:
+			seq_puts(s, "PDR REGISTERED");
 			continue;
 		case ICNSS_PD_RESTART:
 			seq_puts(s, "PD RESTART");
@@ -3969,6 +3972,9 @@
 	    data_len > QMI_WLFW_MAX_DATA_SIZE_V01)
 		return -EINVAL;
 
+	kfree(priv->diag_reg_read_buf);
+	priv->diag_reg_read_buf = NULL;
+
 	reg_buf = kzalloc(data_len, GFP_KERNEL);
 	if (!reg_buf)
 		return -ENOMEM;
@@ -4002,12 +4008,13 @@
 	.llseek         = seq_lseek,
 };
 
+#ifdef CONFIG_ICNSS_DEBUG
 static int icnss_debugfs_create(struct icnss_priv *priv)
 {
 	int ret = 0;
 	struct dentry *root_dentry;
 
-	root_dentry = debugfs_create_dir("icnss", 0);
+	root_dentry = debugfs_create_dir("icnss", NULL);
 
 	if (IS_ERR(root_dentry)) {
 		ret = PTR_ERR(root_dentry);
@@ -4017,19 +4024,40 @@
 
 	priv->root_dentry = root_dentry;
 
-	debugfs_create_file("fw_debug", 0644, root_dentry, priv,
+	debugfs_create_file("fw_debug", 0600, root_dentry, priv,
 			    &icnss_fw_debug_fops);
 
-	debugfs_create_file("stats", 0644, root_dentry, priv,
+	debugfs_create_file("stats", 0600, root_dentry, priv,
 			    &icnss_stats_fops);
 	debugfs_create_file("reg_read", 0600, root_dentry, priv,
 			    &icnss_regread_fops);
-	debugfs_create_file("reg_write", 0644, root_dentry, priv,
+	debugfs_create_file("reg_write", 0600, root_dentry, priv,
 			    &icnss_regwrite_fops);
 
 out:
 	return ret;
 }
+#else
+static int icnss_debugfs_create(struct icnss_priv *priv)
+{
+	int ret = 0;
+	struct dentry *root_dentry;
+
+	root_dentry = debugfs_create_dir("icnss", NULL);
+
+	if (IS_ERR(root_dentry)) {
+		ret = PTR_ERR(root_dentry);
+		icnss_pr_err("Unable to create debugfs %d\n", ret);
+		return ret;
+	}
+
+	priv->root_dentry = root_dentry;
+
+	debugfs_create_file("stats", 0600, root_dentry, priv,
+			    &icnss_stats_fops);
+	return 0;
+}
+#endif
 
 static void icnss_debugfs_destroy(struct icnss_priv *priv)
 {
@@ -4235,6 +4263,11 @@
 
 	icnss_debugfs_create(priv);
 
+	ret = device_init_wakeup(&priv->pdev->dev, true);
+	if (ret)
+		icnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+			     ret);
+
 	penv = priv;
 
 	icnss_pr_info("Platform driver probed successfully\n");
@@ -4255,6 +4288,8 @@
 {
 	icnss_pr_info("Removing driver: state: 0x%lx\n", penv->state);
 
+	device_init_wakeup(&penv->pdev->dev, false);
+
 	icnss_debugfs_destroy(penv);
 
 	icnss_modem_ssr_unregister_notifier(penv);
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index b9ce417..5ed66bf 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,9 @@
 #include <linux/of_address.h>
 #include <soc/qcom/memory_dump.h>
 #include <soc/qcom/scm.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
 
 #define MSM_DUMP_TABLE_VERSION		MSM_DUMP_MAKE_VERSION(2, 0)
 
@@ -195,3 +198,84 @@
 }
 early_initcall(init_debug_lar_unlock);
 #endif
+
+static int mem_dump_probe(struct platform_device *pdev)
+{
+	struct device_node *child_node;
+	const struct device_node *node = pdev->dev.of_node;
+	static dma_addr_t dump_addr;
+	static void *dump_vaddr;
+	struct msm_dump_data *dump_data;
+	struct msm_dump_entry dump_entry;
+	int ret;
+	u32 size, id;
+
+	for_each_available_child_of_node(node, child_node) {
+		ret = of_property_read_u32(child_node, "qcom,dump-size", &size);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find size for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,dump-id", &id);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find id for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		dump_vaddr = (void *) dma_alloc_coherent(&pdev->dev, size,
+						&dump_addr, GFP_KERNEL);
+
+		if (!dump_vaddr) {
+			dev_err(&pdev->dev, "Couldn't get memory for dumping\n");
+			continue;
+		}
+
+		memset(dump_vaddr, 0x0, size);
+
+		dump_data = devm_kzalloc(&pdev->dev,
+				sizeof(struct msm_dump_data), GFP_KERNEL);
+		if (!dump_data) {
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			continue;
+		}
+
+		dump_data->addr = dump_addr;
+		dump_data->len = size;
+		dump_entry.id = id;
+		dump_entry.addr = virt_to_phys(dump_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+		if (ret) {
+			dev_err(&pdev->dev, "Data dump setup failed, id = %d\n",
+				id);
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			devm_kfree(&pdev->dev, dump_data);
+		}
+	}
+	return 0;
+}
+
+static const struct of_device_id mem_dump_match_table[] = {
+	{.compatible = "qcom,mem-dump",},
+	{}
+};
+
+static struct platform_driver mem_dump_driver = {
+	.probe = mem_dump_probe,
+	.driver = {
+		.name = "msm_mem_dump",
+		.owner = THIS_MODULE,
+		.of_match_table = mem_dump_match_table,
+	},
+};
+
+static int __init mem_dump_init(void)
+{
+	return platform_driver_register(&mem_dump_driver);
+}
+
+pure_initcall(mem_dump_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index bf5a526..e38c53e 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -24,8 +24,10 @@
 #define NUM_LNODES	3
 #define MAX_STR_CL	50
 
-#define MSM_BUS_MAS_ALC	144
-#define MSM_BUS_RSC_APPS 8000
+#define MSM_BUS_MAS_ALC			144
+#define MSM_BUS_RSC_APPS		8000
+#define MSM_BUS_RSC_DISP		8001
+#define BCM_TCS_CMD_ACV_APPS		0x8
 
 struct bus_search_type {
 	struct list_head link;
@@ -127,16 +129,14 @@
 		goto exit_bcm_add_bus_req;
 	}
 
-	if (cur_dev->node_info->bcm_req_idx != -1)
-		goto exit_bcm_add_bus_req;
-
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_add_bus_req;
 
 	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		if (cur_dev->node_info->bcm_req_idx[i] != -1)
+			continue;
 		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 		max_num_lnodes = bcm_dev->bcmdev->num_bus_devs;
-
 		if (!bcm_dev->num_lnodes) {
 			bcm_dev->lnode_list = devm_kzalloc(dev,
 				sizeof(struct link_node) * max_num_lnodes,
@@ -183,7 +183,7 @@
 
 		lnode->in_use = 1;
 		lnode->bus_dev_id = cur_dev->node_info->id;
-		cur_dev->node_info->bcm_req_idx = lnode_idx;
+		cur_dev->node_info->bcm_req_idx[i] = lnode_idx;
 		memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
 		memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
 	}
@@ -483,11 +483,35 @@
 	return first_hop;
 }
 
+static void bcm_update_acv_req(struct msm_bus_node_device_type *cur_rsc,
+				uint64_t max_ab, uint64_t max_ib,
+				uint64_t *vec_a, uint64_t *vec_b,
+				uint32_t *acv, int ctx)
+{
+	uint32_t acv_bmsk = 0;
+	/*
+	 * Base ACV voting on current RSC until mapping is set up in commanddb
+	 * that allows us to vote ACV based on master.
+	 */
+
+	if (cur_rsc->node_info->id == MSM_BUS_RSC_APPS)
+		acv_bmsk = BCM_TCS_CMD_ACV_APPS;
+
+	if (max_ab == 0 && max_ib == 0)
+		*acv = *acv & ~acv_bmsk;
+	else
+		*acv = *acv | acv_bmsk;
+	*vec_a = 0;
+	*vec_b = *acv;
+}
+
 static void bcm_update_bus_req(struct device *dev, int ctx)
 {
 	struct msm_bus_node_device_type *cur_dev = NULL;
 	struct msm_bus_node_device_type *bcm_dev = NULL;
-	int i;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+
+	int i, j;
 	uint64_t max_ib = 0;
 	uint64_t max_ab = 0;
 	int lnode_idx = 0;
@@ -507,7 +531,7 @@
 		if (!bcm_dev)
 			goto exit_bcm_update_bus_req;
 
-		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
 		bcm_dev->lnode_list[lnode_idx].lnode_ib[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].max_ib *
 					(uint64_t)bcm_dev->bcmdev->width,
@@ -519,19 +543,19 @@
 				cur_dev->node_info->agg_params.buswidth *
 				cur_dev->node_info->agg_params.num_aggports);
 
-		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
 			if (ctx == ACTIVE_CTX) {
 				max_ib = max(max_ib,
-				max(bcm_dev->lnode_list[i].lnode_ib[ACTIVE_CTX],
-				bcm_dev->lnode_list[i].lnode_ib[DUAL_CTX]));
+				max(bcm_dev->lnode_list[j].lnode_ib[ACTIVE_CTX],
+				bcm_dev->lnode_list[j].lnode_ib[DUAL_CTX]));
 				max_ab = max(max_ab,
-				bcm_dev->lnode_list[i].lnode_ab[ACTIVE_CTX] +
-				bcm_dev->lnode_list[i].lnode_ab[DUAL_CTX]);
+				bcm_dev->lnode_list[j].lnode_ab[ACTIVE_CTX] +
+				bcm_dev->lnode_list[j].lnode_ab[DUAL_CTX]);
 			} else {
 				max_ib = max(max_ib,
-					bcm_dev->lnode_list[i].lnode_ib[ctx]);
+					bcm_dev->lnode_list[j].lnode_ib[ctx]);
 				max_ab = max(max_ab,
-					bcm_dev->lnode_list[i].lnode_ab[ctx]);
+					bcm_dev->lnode_list[j].lnode_ab[ctx]);
 			}
 		}
 		bcm_dev->node_bw[ctx].max_ab = max_ab;
@@ -540,8 +564,18 @@
 		max_ab = msm_bus_div64(max_ab, bcm_dev->bcmdev->unit_size);
 		max_ib = msm_bus_div64(max_ib, bcm_dev->bcmdev->unit_size);
 
-		bcm_dev->node_vec[ctx].vec_a = max_ab;
-		bcm_dev->node_vec[ctx].vec_b = max_ib;
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc = to_msm_bus_node(bcm_dev->node_info->
+						rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_ab, max_ib,
+					&bcm_dev->node_vec[ctx].vec_a,
+					&bcm_dev->node_vec[ctx].vec_b,
+					&cur_rsc->rscdev->acv[ctx], ctx);
+
+		} else {
+			bcm_dev->node_vec[ctx].vec_a = max_ab;
+			bcm_dev->node_vec[ctx].vec_b = max_ib;
+		}
 	}
 exit_bcm_update_bus_req:
 	return;
@@ -551,7 +585,8 @@
 {
 	struct msm_bus_node_device_type *cur_dev = NULL;
 	struct msm_bus_node_device_type *bcm_dev = NULL;
-	int i;
+	struct msm_bus_node_device_type *cur_rsc = NULL;
+	int i, j;
 	uint64_t max_query_ib = 0;
 	uint64_t max_query_ab = 0;
 	int lnode_idx = 0;
@@ -571,7 +606,7 @@
 		if (!bcm_dev)
 			goto exit_bcm_query_bus_req;
 
-		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		lnode_idx = cur_dev->node_info->bcm_req_idx[i];
 		bcm_dev->lnode_list[lnode_idx].lnode_query_ib[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].max_query_ib *
 					(uint64_t)bcm_dev->bcmdev->width,
@@ -583,25 +618,25 @@
 				cur_dev->node_info->agg_params.num_aggports *
 				cur_dev->node_info->agg_params.buswidth);
 
-		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		for (j = 0; j < bcm_dev->num_lnodes; j++) {
 			if (ctx == ACTIVE_CTX) {
 				max_query_ib = max(max_query_ib,
-				max(bcm_dev->lnode_list[i].
+				max(bcm_dev->lnode_list[j].
 					lnode_query_ib[ACTIVE_CTX],
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 					lnode_query_ib[DUAL_CTX]));
 
 				max_query_ab = max(max_query_ab,
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 						lnode_query_ab[ACTIVE_CTX] +
-				bcm_dev->lnode_list[i].
+				bcm_dev->lnode_list[j].
 						lnode_query_ab[DUAL_CTX]);
 			} else {
 				max_query_ib = max(max_query_ib,
-					bcm_dev->lnode_list[i].
+					bcm_dev->lnode_list[j].
 						lnode_query_ib[ctx]);
 				max_query_ab = max(max_query_ab,
-					bcm_dev->lnode_list[i].
+					bcm_dev->lnode_list[j].
 						lnode_query_ab[ctx]);
 			}
 		}
@@ -611,6 +646,18 @@
 		max_query_ib = msm_bus_div64(max_query_ib,
 						bcm_dev->bcmdev->unit_size);
 
+		if (bcm_dev->node_info->id == MSM_BUS_BCM_ACV) {
+			cur_rsc = to_msm_bus_node(bcm_dev->node_info->
+						rsc_devs[0]);
+			bcm_update_acv_req(cur_rsc, max_query_ab, max_query_ib,
+					&bcm_dev->node_vec[ctx].query_vec_a,
+					&bcm_dev->node_vec[ctx].query_vec_b,
+					&cur_rsc->rscdev->query_acv[ctx], ctx);
+		} else {
+			bcm_dev->node_vec[ctx].query_vec_a = max_query_ab;
+			bcm_dev->node_vec[ctx].query_vec_b = max_query_ib;
+		}
+
 		bcm_dev->node_bw[ctx].max_query_ab = max_query_ab;
 		bcm_dev->node_bw[ctx].max_query_ib = max_query_ib;
 	}
@@ -659,8 +706,6 @@
 	struct msm_bus_node_device_type *cur_rsc = NULL;
 	int ret = 0;
 
-	rt_mutex_lock(&msm_bus_adhoc_lock);
-
 	bus_dev = to_msm_bus_node(dev);
 	if (bus_dev->node_info->is_bcm_dev ||
 		bus_dev->node_info->is_fab_dev ||
@@ -683,7 +728,6 @@
 	}
 
 exit_bcm_remove_handoff_req:
-	rt_mutex_unlock(&msm_bus_adhoc_lock);
 	return ret;
 }
 
@@ -810,14 +854,18 @@
 	INIT_LIST_HEAD(&commit_list);
 }
 
-void commit_late_init_data(void)
+int commit_late_init_data(void)
 {
+	int rc;
 	rt_mutex_lock(&msm_bus_adhoc_lock);
+	rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						bcm_remove_handoff_req);
 
 	msm_bus_commit_data(&late_init_clist);
 	INIT_LIST_HEAD(&late_init_clist);
 
 	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return rc;
 }
 
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
index c950367..144b1a1 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -418,8 +418,8 @@
 				commit = true;
 			}
 			tcs_cmd_gen(cur_bcm, &cmdlist_active[k],
-				cur_bcm->node_bw[ACTIVE_CTX].max_query_ib,
-				cur_bcm->node_bw[ACTIVE_CTX].max_query_ab,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_a,
+				cur_bcm->node_vec[ACTIVE_CTX].query_vec_b,
 								commit);
 			k++;
 		}
@@ -433,26 +433,30 @@
 {
 	int ret = 0;
 	int cur_vcd = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
-	cur_vcd = cur_bcm->bcmdev->clk_domain;
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->node_info->num_rsc_devs)
-		goto exit_bcm_clist_add;
+		if (!cur_bcm->node_info->num_rsc_devs)
+			goto exit_bcm_clist_add;
 
-	if (!cur_rsc)
-		cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]);
+		if (!cur_rsc)
+			cur_rsc = to_msm_bus_node(cur_bcm->node_info->
+								rsc_devs[0]);
 
-	if (!cur_bcm->dirty) {
-		list_add_tail(&cur_bcm->link,
+		if (!cur_bcm->dirty) {
+			list_add_tail(&cur_bcm->link,
 					&cur_rsc->rscdev->bcm_clist[cur_vcd]);
-		cur_bcm->dirty = true;
+			cur_bcm->dirty = true;
+		}
+		cur_bcm->updated = false;
 	}
-	cur_bcm->updated = false;
 
 exit_bcm_clist_add:
 	return ret;
@@ -462,17 +466,20 @@
 {
 	int ret = 0;
 	int cur_vcd = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_query_list_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
-	cur_vcd = cur_bcm->bcmdev->clk_domain;
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		cur_vcd = cur_bcm->bcmdev->clk_domain;
 
-	if (!cur_bcm->query_dirty)
-		list_add_tail(&cur_bcm->query_link,
+		if (!cur_bcm->query_dirty)
+			list_add_tail(&cur_bcm->query_link,
 					&bcm_query_list_inorder[cur_vcd]);
+	}
 
 exit_bcm_query_list_add:
 	return ret;
@@ -481,20 +488,23 @@
 static int bcm_clist_clean(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_clean;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 
-	if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
+		if (cur_bcm->node_vec[DUAL_CTX].vec_a == 0 &&
 			cur_bcm->node_vec[ACTIVE_CTX].vec_a == 0 &&
 			cur_bcm->node_vec[DUAL_CTX].vec_b == 0 &&
 			cur_bcm->node_vec[ACTIVE_CTX].vec_b == 0 &&
 			init_time == false) {
-		cur_bcm->dirty = false;
-		list_del_init(&cur_bcm->link);
+			cur_bcm->dirty = false;
+			list_del_init(&cur_bcm->link);
+		}
 	}
 
 exit_bcm_clist_clean:
@@ -504,15 +514,18 @@
 static int bcm_query_list_clean(struct msm_bus_node_device_type *cur_dev)
 {
 	int ret = 0;
+	int i = 0;
 	struct msm_bus_node_device_type *cur_bcm = NULL;
 
 	if (!cur_dev->node_info->num_bcm_devs)
 		goto exit_bcm_clist_add;
 
-	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
 
-	cur_bcm->query_dirty = false;
-	list_del_init(&cur_bcm->query_link);
+		cur_bcm->query_dirty = false;
+		list_del_init(&cur_bcm->query_link);
+	}
 
 exit_bcm_clist_add:
 	return ret;
@@ -1081,7 +1094,7 @@
 static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
 				struct device *bus_dev)
 {
-	int ret = 0;
+	int ret = 0, i = 0;
 	struct msm_bus_node_info_type *node_info = NULL;
 	struct msm_bus_node_info_type *pdata_node_info = NULL;
 	struct msm_bus_node_device_type *bus_node = NULL;
@@ -1100,7 +1113,17 @@
 
 	node_info->name = pdata_node_info->name;
 	node_info->id =  pdata_node_info->id;
-	node_info->bcm_req_idx = -1;
+	node_info->bcm_req_idx = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_req_idx) {
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	for (i = 0; i < pdata_node_info->num_bcm_devs; i++)
+		node_info->bcm_req_idx[i] = -1;
+
 	node_info->bus_device_id = pdata_node_info->bus_device_id;
 	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
 	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
@@ -1666,15 +1689,9 @@
 
 int __init msm_bus_device_late_init(void)
 {
-	int rc;
-
 	MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
 	init_time = false;
-	rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
-						bcm_remove_handoff_req);
-
-	commit_late_init_data();
-	return rc;
+	return commit_late_init_data();
 }
 subsys_initcall(msm_bus_device_init_driver);
 late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
index fad7afa..cd5281a 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -71,12 +71,16 @@
 struct nodevector {
 	uint64_t vec_a;
 	uint64_t vec_b;
+	uint64_t query_vec_a;
+	uint64_t query_vec_b;
 };
 
 struct msm_bus_rsc_device_type {
 	struct rpmh_client *mbox;
 	struct list_head bcm_clist[VCD_MAX_CNT];
 	int req_state;
+	uint32_t acv[NUM_CTX];
+	uint32_t query_acv[NUM_CTX];
 };
 
 struct msm_bus_bcm_device_type {
@@ -157,7 +161,7 @@
 	struct device **black_connections;
 	struct device **bcm_devs;
 	struct device **rsc_devs;
-	int bcm_req_idx;
+	int *bcm_req_idx;
 	unsigned int bus_device_id;
 	struct device *bus_device;
 	struct rule_update_path_info rule;
@@ -201,7 +205,7 @@
 				int throttle_en, uint64_t lim_bw);
 int msm_bus_commit_data(struct list_head *clist);
 int bcm_remove_handoff_req(struct device *dev, void *data);
-void commit_late_init_data(void);
+int commit_late_init_data(void);
 int msm_bus_query_gen(struct list_head *qlist,
 				struct msm_bus_tcs_usecase *tcs_usecase);
 void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 11e1b4d..1f28712 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -464,6 +464,8 @@
 	if (region == NULL) {
 		pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
 					size);
+		priv->region_start = 0;
+		priv->region_end = 0;
 		return -ENOMEM;
 	}
 
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index df0c609c..7984dfe 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -37,7 +37,7 @@
 
 #define MAX_VDD_MSS_UV		1150000
 #define PROXY_TIMEOUT_MS	10000
-#define MAX_SSR_REASON_LEN	81U
+#define MAX_SSR_REASON_LEN	256U
 #define STOP_ACK_TIMEOUT_MS	1000
 
 #define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
@@ -276,6 +276,10 @@
 	if (!res) {
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 							"restart_reg_sec");
+		if (!res) {
+			dev_err(&pdev->dev, "No restart register defined\n");
+			return -ENOMEM;
+		}
 		q6->restart_reg_sec = true;
 	}
 
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index 414c123..2320fea 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -626,9 +626,11 @@
 	 * If pdr registration failed, register clients on next service
 	 * Do in late init to ensure that SSR subsystem is initialized
 	 */
+	mutex_lock(&notifier_mutex);
 	if (!audio_notifer_is_service_enabled(AUDIO_NOTIFIER_PDR_SERVICE))
 		audio_notifer_reg_all_clients();
 
+	mutex_unlock(&notifier_mutex);
 	return 0;
 }
 late_initcall(audio_notifier_late_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 5ca0fe5..306510f 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -34,17 +34,21 @@
 #define RPMH_MAX_REQ_IN_BATCH		10
 
 #define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name)	\
-	struct rpmh_msg name = {		\
-		.msg = { 0 },			\
-		.msg.state = s,			\
-		.msg.is_complete = true,	\
-		.msg.payload = name.cmd,	\
-		.msg.num_payload = 0,		\
-		.cmd = { { 0 } },		\
-		.waitq = q,			\
-		.wait_count = c,		\
-		.rc = rc,			\
-		.bit = -1,			\
+	struct rpmh_msg name = {			\
+		.msg = {				\
+			.state = s,			\
+			.payload = name.cmd,		\
+			.num_payload = 0,		\
+			.is_read = false,		\
+			.is_control = false,		\
+			.is_complete = true,		\
+			.invalidate = false,		\
+		},					\
+		.cmd = { { 0 } },			\
+		.completion = q,			\
+		.wait_count = c,			\
+		.rc = rc,				\
+		.bit = -1,				\
 	}
 
 struct rpmh_req {
@@ -57,7 +61,7 @@
 struct rpmh_msg {
 	struct tcs_mbox_msg msg;
 	struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
-	wait_queue_head_t *waitq;
+	struct completion *completion;
 	atomic_t *wait_count;
 	struct rpmh_client *rc;
 	int bit;
@@ -106,21 +110,31 @@
 	return msg;
 }
 
+static void free_msg_to_pool(struct rpmh_msg *rpm_msg)
+{
+	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+	unsigned long flags;
+
+	/* If we allocated the pool, set it as available */
+	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+		spin_lock_irqsave(&rpm->lock, flags);
+		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+		spin_unlock_irqrestore(&rpm->lock, flags);
+	}
+}
+
 static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
 {
 	struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
 
 	atomic_dec(rpm_msg->wait_count);
-	wake_up(rpm_msg->waitq);
 }
 
 static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
 {
 	struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
-	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
 	atomic_t *wc = rpm_msg->wait_count;
-	wait_queue_head_t *waitq = rpm_msg->waitq;
-	unsigned long flags;
+	struct completion *compl = rpm_msg->completion;
 
 	rpm_msg->err = r;
 
@@ -144,18 +158,12 @@
 	 * into an issue that the stack allocated parent object may be
 	 * invalid before we can check the ->bit value.
 	 */
-
-	/* If we allocated the pool, set it as available */
-	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
-		spin_lock_irqsave(&rpm->lock, flags);
-		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
-		spin_unlock_irqrestore(&rpm->lock, flags);
-	}
+	free_msg_to_pool(rpm_msg);
 
 	/* Signal the blocking thread we are done */
 	if (wc && atomic_dec_and_test(wc))
-		if (waitq)
-			wake_up(waitq);
+		if (compl)
+			complete(compl);
 }
 
 static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
@@ -312,9 +320,9 @@
 int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
 			u32 addr, u32 data)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(1);
-	DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
 	int ret;
 
 	if (IS_ERR_OR_NULL(rc))
@@ -333,7 +341,7 @@
 	if (ret < 0)
 		return ret;
 
-	wait_event(waitq, atomic_read(&wait_count) == 0);
+	wait_for_completion(&compl);
 
 	return rpm_msg.err;
 }
@@ -408,9 +416,9 @@
 int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
 			struct tcs_cmd *cmd, int n)
 {
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(1);
-	DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, &compl, &wait_count, rpm_msg);
 	int ret;
 
 	if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
@@ -428,7 +436,7 @@
 	if (ret)
 		return ret;
 
-	wait_event(waitq, atomic_read(&wait_count) == 0);
+	wait_for_completion(&compl);
 
 	return rpm_msg.err;
 }
@@ -454,7 +462,7 @@
 			struct tcs_cmd *cmd, int *n)
 {
 	struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
 	int count = 0;
 	int ret, i, j, k;
@@ -507,9 +515,8 @@
 	for (i = 0; i < count; i++) {
 		rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i]);
 		if (IS_ERR_OR_NULL(rpm_msg[i])) {
-			/* Clean up our call by spoofing tx_done */
 			for (j = 0 ; j < i; j++)
-				rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, 0);
+				free_msg_to_pool(rpm_msg[j]);
 			return PTR_ERR(rpm_msg[i]);
 		}
 		cmd += n[i];
@@ -520,7 +527,7 @@
 		might_sleep();
 		atomic_set(&wait_count, count);
 		for (i = 0; i < count; i++) {
-			rpm_msg[i]->waitq = &waitq;
+			rpm_msg[i]->completion = &compl;
 			rpm_msg[i]->wait_count = &wait_count;
 			/* Bypass caching and write to mailbox directly */
 			ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
@@ -530,15 +537,17 @@
 				break;
 			}
 		}
-		wait_event(waitq, atomic_read(&wait_count) == (count - i));
+		/* For those unsent requests, spoof tx_done */
+		for (j = i; j < count; j++)
+			rpmh_tx_done(&rc->client, &rpm_msg[j]->msg, ret);
+		wait_for_completion(&compl);
 	} else {
 		/* Send Sleep requests to the controller, expect no response */
 		for (i = 0; i < count; i++) {
-			rpm_msg[i]->waitq = NULL;
+			rpm_msg[i]->completion = NULL;
 			ret = mbox_send_controller_data(rc->chan,
 						&rpm_msg[i]->msg);
-			/* Clean up our call by spoofing tx_done */
-			rpmh_tx_done(&rc->client, &rpm_msg[i]->msg, ret);
+			free_msg_to_pool(rpm_msg[i]);
 		}
 		return 0;
 	}
@@ -660,10 +669,10 @@
 int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
 {
 	int ret;
-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	DECLARE_COMPLETION_ONSTACK(compl);
 	atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
 	DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
-				&waitq, &wait_count, rpm_msg);
+				&compl, &wait_count, rpm_msg);
 
 	if (IS_ERR_OR_NULL(rc) || !resp)
 		return -EINVAL;
@@ -684,7 +693,7 @@
 		return ret;
 
 	/* Wait until the response is received from RPMH */
-	wait_event(waitq, atomic_read(&wait_count) == 0);
+	wait_for_completion(&compl);
 
 	/* Read the data back from the tcs_mbox_msg structrure */
 	*resp = rpm_msg.cmd[0].data;
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 62e2384..221ae0c 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -373,13 +373,6 @@
 	mutex_unlock(&qmi_client_release_lock);
 	pr_info("Connection established between QMI handle and %d service\n",
 							data->instance_id);
-	/* Register for indication messages about service */
-	rc = qmi_register_ind_cb(data->clnt_handle, root_service_service_ind_cb,
-							(void *)data);
-	if (rc < 0)
-		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
-							data->instance_id, rc);
-
 	mutex_lock(&notif_add_lock);
 	mutex_lock(&service_list_lock);
 	list_for_each_entry(service_notif, &service_list, list) {
@@ -402,6 +395,12 @@
 	}
 	mutex_unlock(&service_list_lock);
 	mutex_unlock(&notif_add_lock);
+	/* Register for indication messages about service */
+	rc = qmi_register_ind_cb(data->clnt_handle,
+		root_service_service_ind_cb, (void *)data);
+	if (rc < 0)
+		pr_err("Indication callback register failed(instance-id: %d) rc:%d\n",
+							data->instance_id, rc);
 }
 
 static void root_service_service_exit(struct qmi_client_info *data,
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 119ede3..c252040 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -36,6 +36,7 @@
 #include <soc/qcom/boot_stats.h>
 
 #define BUILD_ID_LENGTH 32
+#define CHIP_ID_LENGTH 32
 #define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
 #define SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE 128
 #define SMEM_IMAGE_VERSION_SIZE 4096
@@ -199,6 +200,20 @@
 struct socinfo_v0_13 {
 	struct socinfo_v0_12 v0_12;
 	uint32_t nproduct_id;
+	char chip_name[CHIP_ID_LENGTH];
+};
+
+struct socinfo_v0_14 {
+	struct socinfo_v0_13 v0_13;
+	uint32_t num_clusters;
+	uint32_t ncluster_array_offset;
+	uint32_t num_defective_parts;
+	uint32_t ndefective_parts_array_offset;
+};
+
+struct socinfo_v0_15 {
+	struct socinfo_v0_14 v0_14;
+	uint32_t nmodem_supported;
 };
 
 static union {
@@ -215,10 +230,12 @@
 	struct socinfo_v0_11 v0_11;
 	struct socinfo_v0_12 v0_12;
 	struct socinfo_v0_13 v0_13;
+	struct socinfo_v0_14 v0_14;
+	struct socinfo_v0_15 v0_15;
 } *socinfo;
 
 /* max socinfo format version supported */
-#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 13)
+#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 15)
 
 static struct msm_soc_info cpu_of_id[] = {
 
@@ -547,12 +564,12 @@
 	/* sdm845 ID */
 	[321] = {MSM_CPU_SDM845, "SDM845"},
 
-	/* Bat ID */
-	[328] = {MSM_CPU_SDM830, "SDM830"},
-
 	/* sdxpoorwills ID */
 	[334] = {SDX_CPU_SDXPOORWILLS, "SDXPOORWILLS"},
 
+	/* SDM670 ID */
+	[336] = {MSM_CPU_SDM670, "SDM670"},
+
 	/* Uninitialized IDs are not known to run Linux.
 	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
 	 * considered as unknown CPU.
@@ -705,6 +722,14 @@
 		: 0;
 }
 
+static char *socinfo_get_chip_name(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 13) ?
+			socinfo->v0_13.chip_name : "N/A")
+		: "N/A";
+}
+
 static uint32_t socinfo_get_nproduct_id(void)
 {
 	return socinfo ?
@@ -713,6 +738,46 @@
 		: 0;
 }
 
+static uint32_t socinfo_get_num_clusters(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.num_clusters : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_ncluster_array_offset(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.ncluster_array_offset : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_num_defective_parts(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.num_defective_parts : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_ndefective_parts_array_offset(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 14) ?
+			socinfo->v0_14.ndefective_parts_array_offset : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_nmodem_supported(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 15) ?
+			socinfo->v0_15.nmodem_supported : 0)
+		: 0;
+}
+
 enum pmic_model socinfo_get_pmic_model(void)
 {
 	return socinfo ?
@@ -890,6 +955,15 @@
 }
 
 static ssize_t
+msm_get_chip_name(struct device *dev,
+		   struct device_attribute *attr,
+		   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			socinfo_get_chip_name());
+}
+
+static ssize_t
 msm_get_nproduct_id(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -899,6 +973,51 @@
 }
 
 static ssize_t
+msm_get_num_clusters(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_num_clusters());
+}
+
+static ssize_t
+msm_get_ncluster_array_offset(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_ncluster_array_offset());
+}
+
+static ssize_t
+msm_get_num_defective_parts(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_num_defective_parts());
+}
+
+static ssize_t
+msm_get_ndefective_parts_array_offset(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_ndefective_parts_array_offset());
+}
+
+static ssize_t
+msm_get_nmodem_supported(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_nmodem_supported());
+}
+
+static ssize_t
 msm_get_pmic_model(struct device *dev,
 			struct device_attribute *attr,
 			char *buf)
@@ -1146,10 +1265,34 @@
 	__ATTR(raw_device_number, S_IRUGO,
 			msm_get_raw_device_number, NULL);
 
+static struct device_attribute msm_soc_attr_chip_name =
+	__ATTR(chip_name, 0444,
+			msm_get_chip_name, NULL);
+
 static struct device_attribute msm_soc_attr_nproduct_id =
 	__ATTR(nproduct_id, 0444,
 			msm_get_nproduct_id, NULL);
 
+static struct device_attribute msm_soc_attr_num_clusters =
+	__ATTR(num_clusters, 0444,
+			msm_get_num_clusters, NULL);
+
+static struct device_attribute msm_soc_attr_ncluster_array_offset =
+	__ATTR(ncluster_array_offset, 0444,
+			msm_get_ncluster_array_offset, NULL);
+
+static struct device_attribute msm_soc_attr_num_defective_parts =
+	__ATTR(num_defective_parts, 0444,
+			msm_get_num_defective_parts, NULL);
+
+static struct device_attribute msm_soc_attr_ndefective_parts_array_offset =
+	__ATTR(ndefective_parts_array_offset, 0444,
+			msm_get_ndefective_parts_array_offset, NULL);
+
+static struct device_attribute msm_soc_attr_nmodem_supported =
+	__ATTR(nmodem_supported, 0444,
+			msm_get_nmodem_supported, NULL);
+
 static struct device_attribute msm_soc_attr_pmic_model =
 	__ATTR(pmic_model, S_IRUGO,
 			msm_get_pmic_model, NULL);
@@ -1255,9 +1398,9 @@
 		dummy_socinfo.id = 321;
 		strlcpy(dummy_socinfo.build_id, "sdm845 - ",
 			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_sdm830()) {
-		dummy_socinfo.id = 328;
-		strlcpy(dummy_socinfo.build_id, "sdm830 - ",
+	} else if (early_machine_is_sdm670()) {
+		dummy_socinfo.id = 336;
+		strlcpy(dummy_socinfo.build_id, "sdm670 - ",
 			sizeof(dummy_socinfo.build_id));
 	} else if (early_machine_is_sdxpoorwills()) {
 		dummy_socinfo.id = 334;
@@ -1280,9 +1423,23 @@
 	device_create_file(msm_soc_device, &images);
 
 	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 15):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_nmodem_supported);
+	case SOCINFO_VERSION(0, 14):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_num_clusters);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_ncluster_array_offset);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_num_defective_parts);
+		device_create_file(msm_soc_device,
+				&msm_soc_attr_ndefective_parts_array_offset);
 	case SOCINFO_VERSION(0, 13):
 		 device_create_file(msm_soc_device,
 					&msm_soc_attr_nproduct_id);
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_chip_name);
 	case SOCINFO_VERSION(0, 12):
 		device_create_file(msm_soc_device,
 					&msm_soc_attr_chip_family);
@@ -1522,6 +1679,53 @@
 			socinfo->v0_13.nproduct_id);
 		break;
 
+	case SOCINFO_VERSION(0, 14):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x nproduct_id=0x%x num_clusters=0x%x ncluster_array_offset=0x%x num_defective_parts=0x%x ndefective_parts_array_offset=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number,
+			socinfo->v0_13.nproduct_id,
+			socinfo->v0_14.num_clusters,
+			socinfo->v0_14.ncluster_array_offset,
+			socinfo->v0_14.num_defective_parts,
+			socinfo->v0_14.ndefective_parts_array_offset);
+		break;
+
+	case SOCINFO_VERSION(0, 15):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x nproduct_id=0x%x num_clusters=0x%x ncluster_array_offset=0x%x num_defective_parts=0x%x ndefective_parts_array_offset=0x%x nmodem_supported=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number,
+			socinfo->v0_13.nproduct_id,
+			socinfo->v0_14.num_clusters,
+			socinfo->v0_14.ncluster_array_offset,
+			socinfo->v0_14.num_defective_parts,
+			socinfo->v0_14.ndefective_parts_array_offset,
+			socinfo->v0_15.nmodem_supported);
+		break;
+
 	default:
 		pr_err("Unknown format found: v%u.%u\n", f_maj, f_min);
 		break;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index f381f16..1c7c4a1 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -53,28 +53,25 @@
 /* Uncomment the line below to test spcom against modem rather than SP */
 /* #define SPCOM_TEST_HLOS_WITH_MODEM 1 */
 
-/* Uncomment the line below to enable debug messages */
-/* #define DEBUG 1 */
-
 #define pr_fmt(fmt)	"spcom [%s]: " fmt, __func__
 
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/errno.h>
-#include <linux/printk.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/poll.h>
-#include <linux/dma-mapping.h>
+#include <linux/kernel.h>	/* min() */
+#include <linux/module.h>	/* MODULE_LICENSE */
+#include <linux/device.h>	/* class_create() */
+#include <linux/slab.h>	/* kzalloc() */
+#include <linux/fs.h>		/* file_operations */
+#include <linux/cdev.h>	/* cdev_add() */
+#include <linux/errno.h>	/* EINVAL, ETIMEDOUT */
+#include <linux/printk.h>	/* pr_err() */
+#include <linux/bitops.h>	/* BIT(x) */
+#include <linux/completion.h>	/* wait_for_completion_timeout() */
+#include <linux/poll.h>	/* POLLOUT */
+#include <linux/dma-mapping.h>	/* dma_alloc_coherent() */
 #include <linux/platform_device.h>
-#include <linux/of.h>
+#include <linux/of.h>		/* of_property_count_strings() */
 #include <linux/workqueue.h>
-#include <linux/delay.h>
-#include <linux/msm_ion.h>
+#include <linux/delay.h>	/* msleep() */
+#include <linux/msm_ion.h>	/* msm_ion_client_create() */
 
 #include <soc/qcom/glink.h>
 #include <soc/qcom/smem.h>
@@ -82,7 +79,7 @@
 
 #include <uapi/linux/spcom.h>
 
-#include "glink_private.h"
+#include "glink_private.h" /* glink_ssr() */
 
 /* "SPCM" string */
 #define SPCOM_MAGIC_ID	((uint32_t)(0x5350434D))
@@ -220,9 +217,9 @@
 	bool tx_abort;
 
 	/* rx data info */
-	int rx_buf_size;	/* allocated rx buffer size */
+	size_t rx_buf_size;	/* allocated rx buffer size */
 	bool rx_buf_ready;
-	int actual_rx_size;	/* actual data size received */
+	size_t actual_rx_size;	/* actual data size received */
 	const void *glink_rx_buf;
 
 	/* ION lock/unlock support */
@@ -276,6 +273,7 @@
 				  const void *pkt_priv);
 static struct spcom_channel *spcom_find_channel_by_name(const char *name);
 static int spcom_unlock_ion_buf(struct spcom_channel *ch, int fd);
+static void spcom_rx_abort_pending_server(void);
 
 /**
  * spcom_is_ready() - driver is initialized and ready.
@@ -301,6 +299,10 @@
  */
 static inline bool spcom_is_channel_connected(struct spcom_channel *ch)
 {
+	/* Channel must be open before it gets connected */
+	if (!spcom_is_channel_open(ch))
+		return false;
+
 	return (ch->glink_state == GLINK_CONNECTED);
 }
 
@@ -316,6 +318,10 @@
 {
 	int i;
 	int ret;
+	static bool is_predefined_created;
+
+	if (is_predefined_created)
+		return 0;
 
 	for (i = 0; i < SPCOM_MAX_CHANNELS; i++) {
 		const char *name = spcom_dev->predefined_ch_name[i];
@@ -330,6 +336,8 @@
 		}
 	}
 
+	is_predefined_created = true;
+
 	return 0;
 }
 
@@ -352,6 +360,16 @@
 	struct spcom_channel *ch = NULL;
 	const char *ch_name = "sp_kernel";
 
+	if (!cb_info) {
+		pr_err("invalid NULL cb_info param\n");
+		return;
+	}
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return;
+	}
+
 	spcom_dev->link_state = cb_info->link_state;
 
 	pr_debug("spcom_link_state_notif_cb called. transport = %s edge = %s\n",
@@ -375,6 +393,9 @@
 			pr_err("failed to find channel [%s].\n", ch_name);
 		else
 			spcom_unlock_ion_buf(ch, SPCOM_ION_FD_UNLOCK_ALL);
+
+		pr_debug("Rx-Abort pending servers.\n");
+		spcom_rx_abort_pending_server();
 		break;
 	default:
 		pr_err("unknown link_state [%d].\n", cb_info->link_state);
@@ -396,13 +417,17 @@
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
 	if (!ch) {
-		pr_err("invalid ch parameter.\n");
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+	if (!buf) {
+		pr_err("invalid NULL buf param\n");
 		return;
 	}
 
-	pr_debug("ch [%s] rx size [%d].\n", ch->name, (int) size);
+	pr_debug("ch [%s] rx size [%zu]\n", ch->name, size);
 
-	ch->actual_rx_size = (int) size;
+	ch->actual_rx_size = size;
 	ch->glink_rx_buf = (void *) buf;
 
 	complete_all(&ch->rx_done);
@@ -421,7 +446,11 @@
 	int *tx_buf = (int *) buf;
 
 	if (!ch) {
-		pr_err("invalid ch parameter.\n");
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+	if (!buf) {
+		pr_err("invalid NULL buf param\n");
 		return;
 	}
 
@@ -446,11 +475,15 @@
 	int ret;
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
 	switch (event) {
 	case GLINK_CONNECTED:
 		pr_debug("GLINK_CONNECTED, ch name [%s].\n", ch->name);
-		complete_all(&ch->connect);
-
+		ch->glink_state = event;
 		/*
 		 * if spcom_notify_state() is called within glink_open()
 		 * then ch->glink_handle is not updated yet.
@@ -466,10 +499,11 @@
 		if (ret) {
 			pr_err("glink_queue_rx_intent() err [%d]\n", ret);
 		} else {
-			pr_debug("rx buf is ready, size [%d].\n",
+			pr_debug("rx buf is ready, size [%zu].\n",
 				 ch->rx_buf_size);
 			ch->rx_buf_ready = true;
 		}
+		complete_all(&ch->connect);
 		break;
 	case GLINK_LOCAL_DISCONNECTED:
 		/*
@@ -477,6 +511,7 @@
 		 * only after *both* sides closed the channel.
 		 */
 		pr_debug("GLINK_LOCAL_DISCONNECTED, ch [%s].\n", ch->name);
+		ch->glink_state = event;
 		complete_all(&ch->disconnect);
 		break;
 	case GLINK_REMOTE_DISCONNECTED:
@@ -487,6 +522,8 @@
 		 */
 		pr_err("GLINK_REMOTE_DISCONNECTED, ch [%s].\n", ch->name);
 
+		ch->glink_state = event;
+
 		/*
 		 * Abort any blocking read() operation.
 		 * The glink notification might be after REMOTE_DISCONNECT.
@@ -504,8 +541,6 @@
 		       (int) event, ch->name);
 		return;
 	}
-
-	ch->glink_state = event;
 }
 
 /**
@@ -539,9 +574,14 @@
 {
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
 	pr_debug("ch [%s] pending rx aborted.\n", ch->name);
 
-	if (spcom_is_channel_connected(ch) && (!ch->rx_abort)) {
+	if (spcom_is_channel_open(ch) && (!ch->rx_abort)) {
 		ch->rx_abort = true;
 		complete_all(&ch->rx_done);
 	}
@@ -559,6 +599,11 @@
 {
 	struct spcom_channel *ch = (struct spcom_channel *) priv;
 
+	if (!ch) {
+		pr_err("invalid NULL channel param\n");
+		return;
+	}
+
 	pr_debug("ch [%s] pending tx aborted.\n", ch->name);
 
 	if (spcom_is_channel_connected(ch) && (!ch->tx_abort)) {
@@ -672,12 +717,11 @@
 
 	/* only one client/server may use the channel */
 	if (ch->ref_count) {
-		pr_err("channel [%s] already in use.\n", name);
-		goto exit_err;
+		pr_err("channel [%s] is BUSY, already in use by pid [%d].\n",
+			name, ch->pid);
+		mutex_unlock(&ch->lock);
+		return -EBUSY;
 	}
-	ch->ref_count++;
-	ch->pid = current_pid();
-	ch->txn_id = INITIAL_TXN_ID;
 
 	pr_debug("ch [%s] opened by PID [%d], count [%d]\n",
 		 name, ch->pid, ch->ref_count);
@@ -702,7 +746,12 @@
 	} else {
 		pr_debug("glink_open [%s] ok.\n", name);
 	}
+
+	/* init channel context after successful open */
 	ch->glink_handle = handle;
+	ch->ref_count++;
+	ch->pid = current_pid();
+	ch->txn_id = INITIAL_TXN_ID;
 
 	pr_debug("Wait for connection on channel [%s] timeout_msec [%d].\n",
 		 name, timeout_msec);
@@ -776,6 +825,8 @@
  * @size: buffer size
  *
  * ACK is expected within a very short time (few msec).
+ *
+ * Return: 0 on successful operation, negative value otherwise.
  */
 static int spcom_tx(struct spcom_channel *ch,
 		    void *buf,
@@ -840,13 +891,15 @@
  * @size: buffer size
  *
  * ACK is expected within a very short time (few msec).
+ *
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_rx(struct spcom_channel *ch,
 		     void *buf,
 		     uint32_t size,
 		     uint32_t timeout_msec)
 {
-	int ret;
+	int ret = -1;
 	unsigned long jiffies = msecs_to_jiffies(timeout_msec);
 	long timeleft = 1;
 
@@ -854,7 +907,7 @@
 
 	/* check for already pending data */
 	if (ch->actual_rx_size) {
-		pr_debug("already pending data size [%d].\n",
+		pr_debug("already pending data size [%zu]\n",
 			 ch->actual_rx_size);
 		goto copy_buf;
 	}
@@ -871,23 +924,24 @@
 
 	if (timeleft == 0) {
 		pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
-		goto exit_err;
+		mutex_unlock(&ch->lock);
+		return -ETIMEDOUT;
 	} else if (ch->rx_abort) {
-		pr_err("rx aborted.\n");
-		goto exit_err;
+		mutex_unlock(&ch->lock);
+		return -ERESTART; /* probably SSR */
 	} else if (ch->actual_rx_size) {
-		pr_debug("actual_rx_size is [%d].\n", ch->actual_rx_size);
+		pr_debug("actual_rx_size is [%zu]\n", ch->actual_rx_size);
 	} else {
 		pr_err("actual_rx_size is zero.\n");
 		goto exit_err;
 	}
 
+copy_buf:
 	if (!ch->glink_rx_buf) {
 		pr_err("invalid glink_rx_buf.\n");
 		goto exit_err;
 	}
 
-copy_buf:
 	/* Copy from glink buffer to spcom buffer */
 	size = min_t(int, ch->actual_rx_size, size);
 	memcpy(buf, ch->glink_rx_buf, size);
@@ -905,7 +959,7 @@
 		pr_err("glink_queue_rx_intent() failed, ret [%d]", ret);
 		goto exit_err;
 	} else {
-		pr_debug("queue rx_buf, size [%d].\n", ch->rx_buf_size);
+		pr_debug("queue rx_buf, size [%zu]\n", ch->rx_buf_size);
 	}
 
 	mutex_unlock(&ch->lock);
@@ -925,6 +979,8 @@
  * Server needs the size of the next request to allocate a request buffer.
  * Initially used intent-request, however this complicated the remote side,
  * so both sides are not using glink_tx() with INTENT_REQ anymore.
+ *
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_get_next_request_size(struct spcom_channel *ch)
 {
@@ -936,15 +992,22 @@
 
 	/* check if already got it via callback */
 	if (ch->actual_rx_size) {
-		pr_debug("next-req-size already ready ch [%s] size [%d].\n",
+		pr_debug("next-req-size already ready ch [%s] size [%zu]\n",
 			 ch->name, ch->actual_rx_size);
 		goto exit_ready;
 	}
 
 	pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
 	wait_for_completion(&ch->rx_done);
+
+	/* Check Rx Abort on SP reset */
+	if (ch->rx_abort) {
+		pr_err("rx aborted.\n");
+		goto exit_error;
+	}
+
 	if (ch->actual_rx_size <= 0) {
-		pr_err("invalid rx size [%d] ch [%s].\n",
+		pr_err("invalid rx size [%zu] ch [%s]\n",
 		       ch->actual_rx_size, ch->name);
 		goto exit_error;
 	}
@@ -968,6 +1031,27 @@
 
 }
 
+/**
+ * spcom_rx_abort_pending_server() - abort pending server rx on SSR.
+ *
+ * Server that is waiting for request, but has no client connected,
+ * will not get RX-ABORT or REMOTE-DISCONNECT notification,
+ * that should cancel the server pending rx operation.
+ */
+static void spcom_rx_abort_pending_server(void)
+{
+	int i;
+
+	for (i = 0 ; i < ARRAY_SIZE(spcom_dev->channels); i++) {
+		struct spcom_channel *ch = &spcom_dev->channels[i];
+
+		if (ch->is_server) {
+			pr_debug("rx-abort server on ch [%s].\n", ch->name);
+			spcom_notify_rx_abort(NULL, ch, NULL);
+		}
+	}
+}
+
 /*======================================================================*/
 /*		General API for kernel drivers				*/
 /*======================================================================*/
@@ -979,6 +1063,9 @@
  */
 bool spcom_is_sp_subsystem_link_up(void)
 {
+	if (spcom_dev == NULL)
+		return false;
+
 	return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
 }
 EXPORT_SYMBOL(spcom_is_sp_subsystem_link_up);
@@ -1001,6 +1088,11 @@
 	struct spcom_channel *ch;
 	struct spcom_client *client;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+			return NULL;
+	}
+
 	if (!info) {
 		pr_err("Invalid parameter.\n");
 			return NULL;
@@ -1042,17 +1134,26 @@
 {
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!client) {
-		pr_err("Invalid parameter.\n");
+		pr_err("Invalid client parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = client->ch;
-
-	kfree(client);
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	spcom_close(ch);
 
+	kfree(client);
+
 	return 0;
 }
 EXPORT_SYMBOL(spcom_unregister_client);
@@ -1069,6 +1170,8 @@
  * @timeout_msec: timeout waiting for response.
  *
  * The timeout depends on the specific request handling time at the remote side.
+ *
+ * Return: number of rx bytes on success, negative value on failure.
  */
 int spcom_client_send_message_sync(struct spcom_client	*client,
 				    void	*req_ptr,
@@ -1080,12 +1183,21 @@
 	int ret;
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!client || !req_ptr || !resp_ptr) {
 		pr_err("Invalid parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1120,13 +1232,25 @@
 bool spcom_client_is_server_connected(struct spcom_client *client)
 {
 	bool connected;
+	struct spcom_channel *ch;
+
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return false;
+	}
 
 	if (!client) {
 		pr_err("Invalid parameter.\n");
+		return false;
+	}
+
+	ch = client->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
 		return -EINVAL;
 	}
 
-	connected = spcom_is_channel_connected(client->ch);
+	connected = spcom_is_channel_connected(ch);
 
 	return connected;
 }
@@ -1150,6 +1274,11 @@
 	struct spcom_channel *ch;
 	struct spcom_server *server;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return NULL;
+	}
+
 	if (!info) {
 		pr_err("Invalid parameter.\n");
 		return NULL;
@@ -1188,17 +1317,26 @@
 {
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!server) {
-		pr_err("Invalid parameter.\n");
+		pr_err("Invalid server parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = server->ch;
-
-	kfree(server);
+	if (!ch) {
+		pr_err("Invalid channel parameter.\n");
+		return -EINVAL;
+	}
 
 	spcom_close(ch);
 
+	kfree(server);
+
 	return 0;
 }
 EXPORT_SYMBOL(spcom_unregister_service);
@@ -1208,7 +1346,7 @@
  *
  * @server: server handle
  *
- * Return: request size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 int spcom_server_get_next_request_size(struct spcom_server *server)
 {
@@ -1221,6 +1359,10 @@
 	}
 
 	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1243,7 +1385,7 @@
  * @req_ptr: request buffer pointer
  * @req_size: max request size
  *
- * Return: request size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 int spcom_server_wait_for_request(struct spcom_server	*server,
 				  void			*req_ptr,
@@ -1252,12 +1394,21 @@
 	int ret;
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!server || !req_ptr) {
 		pr_err("Invalid parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1285,12 +1436,21 @@
 	int ret;
 	struct spcom_channel *ch;
 
+	if (!spcom_is_ready()) {
+		pr_err("spcom is not ready.\n");
+		return -ENODEV;
+	}
+
 	if (!server || !resp_ptr) {
 		pr_err("Invalid parameter.\n");
 		return -EINVAL;
 	}
 
 	ch = server->ch;
+	if (!ch) {
+		pr_err("Invalid channel.\n");
+		return -EINVAL;
+	}
 
 	/* Check if remote side connect */
 	if (!spcom_is_channel_connected(ch)) {
@@ -1322,6 +1482,7 @@
 	int ret = 0;
 	struct spcom_user_create_channel_command *cmd = cmd_buf;
 	const char *ch_name;
+	const size_t maxlen = sizeof(cmd->ch_name);
 
 	if (cmd_size != sizeof(*cmd)) {
 		pr_err("cmd_size [%d] , expected [%d].\n",
@@ -1330,6 +1491,10 @@
 	}
 
 	ch_name = cmd->ch_name;
+	if (strnlen(cmd->ch_name, maxlen) == maxlen) {
+		pr_err("channel name is not NULL terminated\n");
+		return -EINVAL;
+	}
 
 	pr_debug("ch_name [%s].\n", ch_name);
 
@@ -1468,7 +1633,7 @@
 
 	/* Get ION handle from fd */
 	handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
-	if (handle == NULL) {
+	if (IS_ERR_OR_NULL(handle)) {
 		pr_err("fail to get ion handle.\n");
 		return -EINVAL;
 	}
@@ -1629,18 +1794,23 @@
 
 	/* Get ION handle from fd - this increments the ref count */
 	ion_handle = ion_import_dma_buf_fd(spcom_dev->ion_client, fd);
-	if (ion_handle == NULL) {
+	if (IS_ERR_OR_NULL(ion_handle)) {
 		pr_err("fail to get ion handle.\n");
 		return -EINVAL;
 	}
+
 	pr_debug("ion handle ok.\n");
 
+	/* ION buf lock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
 	/* Check if this ION buffer is already locked */
 	for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
 		if (ch->ion_handle_table[i] == ion_handle) {
-			pr_debug("fd [%d] ion buf is already locked.\n", fd);
+			pr_err("fd [%d] ion buf is already locked.\n", fd);
 			/* decrement back the ref count */
 			ion_free(spcom_dev->ion_client, ion_handle);
+			mutex_unlock(&ch->lock);
 			return -EINVAL;
 		}
 	}
@@ -1650,11 +1820,19 @@
 		if (ch->ion_handle_table[i] == NULL) {
 			ch->ion_handle_table[i] = ion_handle;
 			ch->ion_fd_table[i] = fd;
-			pr_debug("locked ion buf#[%d], fd [%d].\n", i, fd);
+			pr_debug("ch [%s] locked ion buf #%d, fd [%d].\n",
+				ch->name, i, fd);
+			mutex_unlock(&ch->lock);
 			return 0;
 		}
 	}
 
+	pr_err("no free entry to store ion handle of fd [%d].\n", fd);
+	/* decrement back the ref count */
+	ion_free(spcom_dev->ion_client, ion_handle);
+
+	mutex_unlock(&ch->lock);
+
 	return -EFAULT;
 }
 
@@ -1684,20 +1862,24 @@
 		/* unlock all ION buf */
 		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
 			if (ch->ion_handle_table[i] != NULL) {
+				pr_debug("unlocked ion buf #%d fd [%d].\n",
+					i, ch->ion_fd_table[i]);
 				ion_free(ion_client, ch->ion_handle_table[i]);
 				ch->ion_handle_table[i] = NULL;
 				ch->ion_fd_table[i] = -1;
-				pr_debug("unlocked ion buf#[%d].\n", i);
 			}
 		}
 	} else {
 		/* unlock specific ION buf */
 		for (i = 0 ; i < ARRAY_SIZE(ch->ion_handle_table) ; i++) {
+			if (ch->ion_handle_table[i] == NULL)
+				continue;
 			if (ch->ion_fd_table[i] == fd) {
+				pr_debug("unlocked ion buf #%d fd [%d].\n",
+					i, ch->ion_fd_table[i]);
 				ion_free(ion_client, ch->ion_handle_table[i]);
 				ch->ion_handle_table[i] = NULL;
 				ch->ion_fd_table[i] = -1;
-				pr_debug("unlocked ion buf#[%d].\n", i);
 				found = true;
 				break;
 			}
@@ -1731,8 +1913,13 @@
 		return -EINVAL;
 	}
 
+	/* ION buf unlock doesn't involve any rx/tx data to SP. */
+	mutex_lock(&ch->lock);
+
 	ret = spcom_unlock_ion_buf(ch, fd);
 
+	mutex_unlock(&ch->lock);
+
 	return ret;
 }
 
@@ -1766,9 +1953,9 @@
 	int swap_id;
 	char cmd_name[5] = {0}; /* debug only */
 
-	/* opcode field is the minimum length of cmd */
-	if (buf_size < sizeof(cmd->cmd_id)) {
-		pr_err("Invalid argument user buffer size %d.\n", buf_size);
+	/* Minimal command should have command-id and argument */
+	if (buf_size < sizeof(struct spcom_user_command)) {
+		pr_err("Command buffer size [%d] too small\n", buf_size);
 		return -EINVAL;
 	}
 
@@ -1813,7 +2000,7 @@
  * @buf:	command buffer.
  * @size:	command buffer size.
  *
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_handle_get_req_size(struct spcom_channel *ch,
 				      void *buf,
@@ -1841,7 +2028,7 @@
  * @buf:	command buffer.
  * @size:	command buffer size.
  *
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_handle_read_req_resp(struct spcom_channel *ch,
 				       void *buf,
@@ -1861,7 +2048,7 @@
 
 	/* Check param validity */
 	if (size > SPCOM_MAX_RESPONSE_SIZE) {
-		pr_err("ch [%s] inavlid size [%d].\n",
+		pr_err("ch [%s] invalid size [%d].\n",
 			ch->name, size);
 		return -EINVAL;
 	}
@@ -1884,7 +2071,8 @@
 	ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
 	if (ret < 0) {
 		pr_err("rx error %d.\n", ret);
-		goto exit_err;
+		kfree(rx_buf);
+		return ret;
 	} else {
 		size = ret; /* actual_rx_size */
 	}
@@ -1924,7 +2112,7 @@
  * A special size SPCOM_GET_NEXT_REQUEST_SIZE, which is bigger than the max
  * response/request tells the kernel that user space only need the size.
  *
- * Return: size in bytes.
+ * Return: size in bytes on success, negative value on failure.
  */
 static int spcom_handle_read(struct spcom_channel *ch,
 			      void *buf,
@@ -1932,8 +2120,8 @@
 {
 	if (size == SPCOM_GET_NEXT_REQUEST_SIZE) {
 		pr_debug("get next request size, ch [%s].\n", ch->name);
-		size = spcom_handle_get_req_size(ch, buf, size);
 		ch->is_server = true;
+		size = spcom_handle_get_req_size(ch, buf, size);
 	} else {
 		pr_debug("get request/response, ch [%s].\n", ch->name);
 		size = spcom_handle_read_req_resp(ch, buf, size);
@@ -1988,6 +2176,10 @@
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
 
+	/* silent error message until spss link is up */
+	if (!spcom_is_sp_subsystem_link_up())
+		return -ENODEV;
+
 	pr_debug("Open file [%s].\n", name);
 
 	if (strcmp(name, DEVICE_NAME) == 0) {
@@ -2006,8 +2198,6 @@
 		return -ENODEV;
 	}
 
-	filp->private_data = ch;
-
 	ret = spcom_open(ch, OPEN_CHANNEL_TIMEOUT_MSEC);
 	if (ret == -ETIMEDOUT) {
 		pr_err("Connection timeout channel [%s].\n", name);
@@ -2016,6 +2206,8 @@
 		return ret;
 	}
 
+	filp->private_data = ch;
+
 	pr_debug("finished.\n");
 
 	return 0;
@@ -2036,7 +2228,6 @@
 {
 	struct spcom_channel *ch;
 	const char *name = file_to_filename(filp);
-	bool connected = false;
 
 	pr_debug("Close file [%s].\n", name);
 
@@ -2058,19 +2249,18 @@
 	}
 
 	/* channel might be already closed or disconnected */
-	if (spcom_is_channel_open(ch) && spcom_is_channel_connected(ch))
-		connected = true;
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch [%s] already closed.\n", name);
+		return 0;
+	}
 
 	reinit_completion(&ch->disconnect);
 
 	spcom_close(ch);
 
-	if (connected) {
-		pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n",
-			 name);
-		wait_for_completion(&ch->disconnect);
-		pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
-	}
+	pr_debug("Wait for event GLINK_LOCAL_DISCONNECTED, ch [%s].\n", name);
+	wait_for_completion(&ch->disconnect);
+	pr_debug("GLINK_LOCAL_DISCONNECTED signaled, ch [%s].\n", name);
 
 	return 0;
 }
@@ -2102,8 +2292,8 @@
 
 	ch = filp->private_data;
 	if (!ch) {
-		pr_debug("invalid ch pointer.\n");
-		/* Allow some special commands via /dev/spcom and /dev/sp_ssr */
+		pr_err("invalid ch pointer, command not allowed.\n");
+		return -EINVAL;
 	} else {
 		/* Check if remote side connect */
 		if (!spcom_is_channel_connected(ch)) {
@@ -2147,7 +2337,7 @@
 }
 
 /**
- * spcom_device_read() - handle channel file write() from user space.
+ * spcom_device_read() - handle channel file read() from user space.
  *
  * @filp: file pointer
  *
@@ -2173,12 +2363,28 @@
 
 	ch = filp->private_data;
 
+	if (ch == NULL) {
+		pr_err("invalid ch pointer, file [%s].\n", name);
+		return -EINVAL;
+	}
+
+	if (!spcom_is_channel_open(ch)) {
+		pr_err("ch is not open, file [%s].\n", name);
+		return -EINVAL;
+	}
+
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
 		return -ENOMEM;
 
-	actual_size = spcom_handle_read(ch, buf, size);
-	if ((actual_size <= 0) || (actual_size > size)) {
+	ret = spcom_handle_read(ch, buf, size);
+	if (ret < 0) {
+		pr_err("read error [%d].\n", ret);
+		kfree(buf);
+		return ret;
+	}
+	actual_size = ret;
+	if ((actual_size == 0) || (actual_size > size)) {
 		pr_err("invalid actual_size [%d].\n", actual_size);
 		kfree(buf);
 		return -EFAULT;
@@ -2254,6 +2460,10 @@
 		done = (spcom_dev->link_state == GLINK_LINK_STATE_UP);
 		break;
 	case SPCOM_POLL_CH_CONNECT:
+		if (ch == NULL) {
+			pr_err("invalid ch pointer, file [%s].\n", name);
+			return -EINVAL;
+		}
 		pr_debug("ch [%s] SPCOM_POLL_CH_CONNECT.\n", name);
 		if (wait) {
 			reinit_completion(&ch->connect);
@@ -2329,7 +2539,7 @@
 	devt = spcom_dev->device_no + spcom_dev->channel_count;
 	priv = ch;
 	dev = device_create(cls, parent, devt, priv, name);
-	if (!dev) {
+	if (IS_ERR(dev)) {
 		pr_err("device_create failed.\n");
 		kfree(cdev);
 		return -ENODEV;
@@ -2382,7 +2592,7 @@
 				  spcom_dev->device_no, priv,
 				  DEVICE_NAME);
 
-	if (!spcom_dev->class_dev) {
+	if (IS_ERR(spcom_dev->class_dev)) {
 		pr_err("class_device_create failed %d\n", ret);
 		ret = -ENOMEM;
 		goto exit_destroy_class;
@@ -2435,6 +2645,11 @@
 
 	pr_debug("num of predefined channels [%d].\n", num_ch);
 
+	if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
+		pr_err("too many predefined channels [%d].\n", num_ch);
+		return -EINVAL;
+	}
+
 	for (i = 0; i < num_ch; i++) {
 		ret = of_property_read_string_index(np, propname, i, &name);
 		if (ret) {
@@ -2500,21 +2715,23 @@
 	pr_debug("register_link_state_cb(), transport [%s] edge [%s]\n",
 		link_info.transport, link_info.edge);
 	notif_handle = glink_register_link_state_cb(&link_info, spcom_dev);
-	if (!notif_handle) {
+	if (IS_ERR(notif_handle)) {
 		pr_err("glink_register_link_state_cb(), err [%d]\n", ret);
 		goto fail_reg_chardev;
 	}
 
 	spcom_dev->ion_client = msm_ion_client_create(DEVICE_NAME);
-	if (spcom_dev->ion_client == NULL) {
+	if (IS_ERR(spcom_dev->ion_client)) {
 		pr_err("fail to create ion client.\n");
-		goto fail_reg_chardev;
+		goto fail_ion_client;
 	}
 
 	pr_info("Driver Initialization ok.\n");
 
 	return 0;
 
+fail_ion_client:
+	glink_unregister_link_state_cb(notif_handle);
 fail_reg_chardev:
 	pr_err("Failed to init driver.\n");
 	spcom_unregister_chrdev();
@@ -2552,7 +2769,7 @@
 	if (ret)
 		pr_err("spcom_driver register failed %d\n", ret);
 
-	return 0;
+	return ret;
 }
 module_init(spcom_init);
 
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 6ff39de..f8f6829 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -36,7 +36,7 @@
 
 #define XO_FREQ			19200000
 #define PROXY_TIMEOUT_MS	10000
-#define MAX_SSR_REASON_LEN	81U
+#define MAX_SSR_REASON_LEN	256U
 #define STOP_ACK_TIMEOUT_MS	1000
 #define CRASH_STOP_ACK_TO_MS	200
 
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index e7c2bb2..21f3580 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -481,17 +481,21 @@
 	mutex_unlock(&subsys_list_lock);
 }
 
-static void for_each_subsys_device(struct subsys_device **list,
+static int for_each_subsys_device(struct subsys_device **list,
 		unsigned int count, void *data,
-		void (*fn)(struct subsys_device *, void *))
+		int (*fn)(struct subsys_device *, void *))
 {
+	int ret;
 	while (count--) {
 		struct subsys_device *dev = *list++;
 
 		if (!dev)
 			continue;
-		fn(dev, data);
+		ret = fn(dev, data);
+		if (ret)
+			return ret;
 	}
+	return 0;
 }
 
 static void notify_each_subsys_device(struct subsys_device **list,
@@ -593,21 +597,31 @@
 	return 0;
 }
 
-static void subsystem_shutdown(struct subsys_device *dev, void *data)
+static int subsystem_shutdown(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
+	int ret;
 
 	pr_info("[%s:%d]: Shutting down %s\n",
 			current->comm, current->pid, name);
-	if (dev->desc->shutdown(dev->desc, true) < 0)
-		panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
-			current->comm, current->pid, name);
+	ret = dev->desc->shutdown(dev->desc, true);
+	if (ret < 0) {
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("subsys-restart: [%s:%d]: Failed to shutdown %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Shutdown failure on %s\n", name);
+			return ret;
+		}
+	}
 	dev->crash_count++;
 	subsys_set_state(dev, SUBSYS_OFFLINE);
 	disable_all_irqs(dev);
+
+	return 0;
 }
 
-static void subsystem_ramdump(struct subsys_device *dev, void *data)
+static int subsystem_ramdump(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
 
@@ -616,15 +630,17 @@
 			pr_warn("%s[%s:%d]: Ramdump failed.\n",
 				name, current->comm, current->pid);
 	dev->do_ramdump_on_put = false;
+	return 0;
 }
 
-static void subsystem_free_memory(struct subsys_device *dev, void *data)
+static int subsystem_free_memory(struct subsys_device *dev, void *data)
 {
 	if (dev->desc->free_memory)
 		dev->desc->free_memory(dev->desc);
+	return 0;
 }
 
-static void subsystem_powerup(struct subsys_device *dev, void *data)
+static int subsystem_powerup(struct subsys_device *dev, void *data)
 {
 	const char *name = dev->desc->name;
 	int ret;
@@ -632,11 +648,17 @@
 	pr_info("[%s:%d]: Powering up %s\n", current->comm, current->pid, name);
 	init_completion(&dev->err_ready);
 
-	if (dev->desc->powerup(dev->desc) < 0) {
+	ret = dev->desc->powerup(dev->desc);
+	if (ret < 0) {
 		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
 								NULL);
-		panic("[%s:%d]: Powerup error: %s!",
-			current->comm, current->pid, name);
+		if (!dev->desc->ignore_ssr_failure) {
+			panic("[%s:%d]: Powerup error: %s!",
+				current->comm, current->pid, name);
+		} else {
+			pr_err("Powerup failure on %s\n", name);
+			return ret;
+		}
 	}
 	enable_all_irqs(dev);
 
@@ -644,11 +666,16 @@
 	if (ret) {
 		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
 								NULL);
-		panic("[%s:%d]: Timed out waiting for error ready: %s!",
-			current->comm, current->pid, name);
+		if (!dev->desc->ignore_ssr_failure)
+			panic("[%s:%d]: Timed out waiting for error ready: %s!",
+				current->comm, current->pid, name);
+		else
+			return ret;
 	}
 	subsys_set_state(dev, SUBSYS_ONLINE);
 	subsys_set_crash_status(dev, CRASH_STATUS_NO_CRASH);
+
+	return 0;
 }
 
 static int __find_subsys(struct device *dev, void *data)
@@ -910,6 +937,7 @@
 	struct subsys_tracking *track;
 	unsigned int count;
 	unsigned long flags;
+	int ret;
 
 	/*
 	 * It's OK to not take the registration lock at this point.
@@ -957,7 +985,9 @@
 	pr_debug("[%s:%d]: Starting restart sequence for %s\n",
 			current->comm, current->pid, desc->name);
 	notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
-	for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	if (ret)
+		goto err;
 	notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
 
 	notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
@@ -973,12 +1003,19 @@
 	for_each_subsys_device(list, count, NULL, subsystem_free_memory);
 
 	notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
-	for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	ret = for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	if (ret)
+		goto err;
 	notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
 
 	pr_info("[%s:%d]: Restart sequence for %s completed.\n",
 			current->comm, current->pid, desc->name);
 
+err:
+	/* Reset subsys count */
+	if (ret)
+		dev->count = 0;
+
 	mutex_unlock(&soc_order_reg_lock);
 	mutex_unlock(&track->lock);
 
@@ -1470,6 +1507,9 @@
 			desc->generic_irq = ret;
 	}
 
+	desc->ignore_ssr_failure = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ignore-ssr-failure");
+
 	order = ssr_parse_restart_orders(desc);
 	if (IS_ERR(order)) {
 		pr_err("Could not initialize SSR restart order, err = %ld\n",
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 7a784aa..8bf5659 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -136,6 +136,8 @@
 		return 0;
 	__raw_writel(1, wdog_dd->base + WDT0_RST);
 	if (wdog_dd->wakeup_irq_enable) {
+		/* Make sure register write is complete before proceeding */
+		mb();
 		wdog_dd->last_pet = sched_clock();
 		return 0;
 	}
@@ -151,8 +153,15 @@
 {
 	struct msm_watchdog_data *wdog_dd =
 			(struct msm_watchdog_data *)dev_get_drvdata(dev);
-	if (!enable || wdog_dd->wakeup_irq_enable)
+	if (!enable)
 		return 0;
+	if (wdog_dd->wakeup_irq_enable) {
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+		/* Make sure register write is complete before proceeding */
+		mb();
+		wdog_dd->last_pet = sched_clock();
+		return 0;
+	}
 	__raw_writel(1, wdog_dd->base + WDT0_EN);
 	__raw_writel(1, wdog_dd->base + WDT0_RST);
 	/* Make sure watchdog is reset before setting enable */
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 8cc77c1..08eb00a 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -73,11 +73,11 @@
 #define SPI_CS_DEASSERT		(9)
 #define SPI_SCK_ONLY		(10)
 /* M_CMD params for SPI */
-#define SPI_PRE_CMD_DELAY	(0)
-#define TIMESTAMP_BEFORE	(1)
-#define FRAGMENTATION		(2)
-#define TIMESTAMP_AFTER		(3)
-#define POST_CMD_DELAY		(4)
+#define SPI_PRE_CMD_DELAY	BIT(0)
+#define TIMESTAMP_BEFORE	BIT(1)
+#define FRAGMENTATION		BIT(2)
+#define TIMESTAMP_AFTER		BIT(3)
+#define POST_CMD_DELAY		BIT(4)
 
 #define SPI_CORE2X_VOTE		(10000)
 
@@ -172,15 +172,13 @@
 	u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
 	u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
 	u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
-	u32 demux_sel = geni_read_reg(mas->base, SE_SPI_DEMUX_SEL);
-	u32 demux_output_inv =
-			geni_read_reg(mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	u32 demux_sel = 0;
+	u32 demux_output_inv = 0;
 	int ret = 0;
 
 	loopback_cfg &= ~LOOPBACK_MSK;
 	cpol &= ~CPOL;
 	cpha &= ~CPHA;
-	demux_output_inv &= ~BIT(spi_slv->chip_select);
 
 	if (mode & SPI_LOOP)
 		loopback_cfg |= LOOPBACK_ENABLE;
@@ -194,7 +192,7 @@
 	if (spi_slv->mode & SPI_CS_HIGH)
 		demux_output_inv |= BIT(spi_slv->chip_select);
 
-	demux_sel |= BIT(spi_slv->chip_select);
+	demux_sel = spi_slv->chip_select;
 	mas->cur_speed_hz = spi_slv->max_speed_hz;
 	mas->cur_word_len = spi_slv->bits_per_word;
 
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index a5bfeab..9cc85ee 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -584,6 +584,11 @@
 			id = ffs(status) - 1;
 			status &= ~BIT(id);
 			apid = id + i * 32;
+			if (apid < pa->min_apid || apid > pa->max_apid) {
+				WARN_ONCE(true, "spurious spmi irq received for apid=%d\n",
+					apid);
+				continue;
+			}
 			enable = readl_relaxed(pa->intr +
 					pa->ver_ops->acc_enable(apid));
 			if (enable & SPMI_PIC_ACC_ENABLE_BIT)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 8d6bca6..591f274 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -97,8 +97,9 @@
 
 	switch (variable) {
 	case HW_VAR_BSSID:
-		rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
-		rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+		/* BSSIDR 2 byte alignment */
+		rtl92e_writew(dev, BSSIDR, *(u16 *)val);
+		rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
 		break;
 
 	case HW_VAR_MEDIA_STATUS:
@@ -626,7 +627,7 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
-	curCR = rtl92e_readl(dev, EPROM_CMD);
+	curCR = rtl92e_readw(dev, EPROM_CMD);
 	RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
 		 curCR);
 	priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -963,8 +964,8 @@
 	rtl92e_config_rate(dev, &rate_config);
 	priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
 	 priv->basic_rate = rate_config &= 0x15f;
-	rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
-	rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+	rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
+	rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
 
 	if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
 		rtl92e_writew(dev, ATIMWND, 2);
@@ -1184,8 +1185,7 @@
 			  struct cb_desc *cb_desc, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
-	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
-			 PCI_DMA_TODEVICE);
+	dma_addr_t mapping;
 	struct tx_fwinfo_8190pci *pTxFwInfo;
 
 	pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@@ -1196,8 +1196,6 @@
 	pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
 						  pTxFwInfo->TxRate, cb_desc);
 
-	if (pci_dma_mapping_error(priv->pdev, mapping))
-		netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
 	if (cb_desc->bAMPDUEnable) {
 		pTxFwInfo->AllowAggregation = 1;
 		pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1232,6 +1230,14 @@
 	}
 
 	memset((u8 *)pdesc, 0, 12);
+
+	mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+				 PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(priv->pdev, mapping)) {
+		netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
+		return;
+	}
+
 	pdesc->LINIP = 0;
 	pdesc->CmdInit = 1;
 	pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index a966a8e..4615a6f 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -306,11 +306,6 @@
 	pTsCommonInfo->TClasNum = TCLAS_Num;
 }
 
-static bool IsACValid(unsigned int tid)
-{
-	return tid < 7;
-}
-
 bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
 	   u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
 {
@@ -328,12 +323,6 @@
 	if (ieee->current_network.qos_data.supported == 0) {
 		UP = 0;
 	} else {
-		if (!IsACValid(TID)) {
-			netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
-				    __func__, TID);
-			return false;
-		}
-
 		switch (TID) {
 		case 0:
 		case 3:
@@ -351,6 +340,10 @@
 		case 7:
 			UP = 7;
 			break;
+		default:
+			netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
+				    __func__, TID);
+			return false;
 		}
 	}
 
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 40e50f2..01ea228 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3798,6 +3798,8 @@
 {
 	int ret = 0;
 	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
+
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
 	 * connection recovery / failure event can be triggered externally.
@@ -3823,12 +3825,14 @@
 			goto transport_err;
 
 		ret = iscsit_handle_response_queue(conn);
-		if (ret == 1)
+		if (ret == 1) {
 			goto get_immediate;
-		else if (ret == -ECONNRESET)
+		} else if (ret == -ECONNRESET) {
+			conn_freed = true;
 			goto out;
-		else if (ret < 0)
+		} else if (ret < 0) {
 			goto transport_err;
+		}
 	}
 
 transport_err:
@@ -3838,8 +3842,13 @@
 	 * responsible for cleaning up the early connection failure.
 	 */
 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
-		iscsit_take_action_for_connection_exit(conn);
+		iscsit_take_action_for_connection_exit(conn, &conn_freed);
 out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
 	return 0;
 }
 
@@ -4012,6 +4021,7 @@
 {
 	int rc;
 	struct iscsi_conn *conn = arg;
+	bool conn_freed = false;
 
 	/*
 	 * Allow ourselves to be interrupted by SIGINT so that a
@@ -4024,7 +4034,7 @@
 	 */
 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
 	if (rc < 0 || iscsi_target_check_conn_state(conn))
-		return 0;
+		goto out;
 
 	if (!conn->conn_transport->iscsit_get_rx_pdu)
 		return 0;
@@ -4033,7 +4043,15 @@
 
 	if (!signal_pending(current))
 		atomic_set(&conn->transport_failed, 1);
-	iscsit_take_action_for_connection_exit(conn);
+	iscsit_take_action_for_connection_exit(conn, &conn_freed);
+
+out:
+	if (!conn_freed) {
+		while (!kthread_should_stop()) {
+			msleep(100);
+		}
+	}
+
 	return 0;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b54e72c..efc453e 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -930,8 +930,10 @@
 	}
 }
 
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
 {
+	*conn_freed = false;
+
 	spin_lock_bh(&conn->state_lock);
 	if (atomic_read(&conn->connection_exit)) {
 		spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@
 	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
 		spin_unlock_bh(&conn->state_lock);
 		iscsit_close_connection(conn);
+		*conn_freed = true;
 		return;
 	}
 
@@ -955,4 +958,5 @@
 	spin_unlock_bh(&conn->state_lock);
 
 	iscsit_handle_connection_cleanup(conn);
+	*conn_freed = true;
 }
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f94..fbc1d84 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -9,6 +9,6 @@
 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
 extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
 
 #endif   /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 96c55bc..6128e8e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1460,5 +1460,9 @@
 			break;
 	}
 
+	while (!kthread_should_stop()) {
+		msleep(100);
+	}
+
 	return 0;
 }
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 7da9211..355d013 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -620,6 +620,7 @@
 	if (cpufreq_device->cpufreq_state == state)
 		return 0;
 
+	cpufreq_device->cpufreq_state = state;
 	/* If state is the last, isolate the CPU */
 	if (state == cpufreq_device->max_level)
 		return sched_isolate_cpu(cpu);
@@ -627,7 +628,6 @@
 		sched_unisolate_cpu(cpu);
 
 	clip_freq = cpufreq_device->freq_table[state];
-	cpufreq_device->cpufreq_state = state;
 	cpufreq_device->clipped_freq = clip_freq;
 
 	/* Check if the device has a platform mitigation function that
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 432adbc..1ab5b0c 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/thermal.h>
 #include "tsens.h"
+#include "qcom/qti_virtual_sensor.h"
 
 LIST_HEAD(tsens_device_list);
 
@@ -172,6 +173,9 @@
 		return -ENODEV;
 	}
 
+	/* Register virtual thermal sensors. */
+	qti_virtual_sensor_register(&tmdev->pdev->dev);
+
 	return 0;
 }
 
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 984241f9..65d8fd7 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -33,6 +33,9 @@
 #include <linux/thermal.h>
 #include <linux/list.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_virtual.h>
+
 #include "thermal_core.h"
 
 /***   Private data structures to represent thermal device tree data ***/
@@ -107,11 +110,82 @@
 	struct __sensor_param *senps;
 };
 
+/**
+ * struct virtual_sensor - internal representation of a virtual thermal zone
+ * @num_sensors - number of sensors this virtual sensor will reference to
+ *		  estimate temperature
+ * @tz - Array of thermal zones of the sensors this virtual sensor will use
+ *	 to estimate temperature
+ * @virt_tz - Virtual thermal zone pointer
+ * @logic - aggregation logic to be used to estimate the temperature
+ * @last_reading - last estimated temperature
+ * @coefficients - array of coefficients to be used for weighted aggregation
+ *		       logic
+ * @avg_offset - offset value to be used for the weighted aggregation logic
+ * @avg_denominator - denominator value to be used for the weighted aggregation
+ *			logic
+ */
+struct virtual_sensor {
+	int                        num_sensors;
+	struct thermal_zone_device *tz[THERMAL_MAX_VIRT_SENSORS];
+	struct thermal_zone_device *virt_tz;
+	enum aggregation_logic     logic;
+	int                        last_reading;
+	int                        coefficients[THERMAL_MAX_VIRT_SENSORS];
+	int                        avg_offset;
+	int                        avg_denominator;
+};
+
 static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
 		unsigned int trip_type_mask, int *low, int *high);
 
 /***   DT thermal zone device callbacks   ***/
 
+static int virt_sensor_read_temp(void *data, int *val)
+{
+	struct virtual_sensor *sens = data;
+	int idx, temp = 0, ret = 0;
+
+	for (idx = 0; idx < sens->num_sensors; idx++) {
+		int sens_temp = 0;
+
+		ret = thermal_zone_get_temp(sens->tz[idx], &sens_temp);
+		if (ret) {
+			pr_err("virt zone: sensor[%s] read error:%d\n",
+				sens->tz[idx]->type, ret);
+			return ret;
+		}
+		switch (sens->logic) {
+		case VIRT_WEIGHTED_AVG:
+			temp += sens_temp * sens->coefficients[idx];
+			if (idx == (sens->num_sensors - 1))
+				temp = (temp + sens->avg_offset)
+					/ sens->avg_denominator;
+			break;
+		case VIRT_MAXIMUM:
+			if (idx == 0)
+				temp = INT_MIN;
+			if (sens_temp > temp)
+				temp = sens_temp;
+			break;
+		case VIRT_MINIMUM:
+			if (idx == 0)
+				temp = INT_MAX;
+			if (sens_temp < temp)
+				temp = sens_temp;
+			break;
+		default:
+			break;
+		}
+		trace_virtual_temperature(sens->virt_tz, sens->tz[idx],
+					sens_temp, temp);
+	}
+
+	sens->last_reading = *val = temp;
+
+	return 0;
+}
+
 static int of_thermal_get_temp(struct thermal_zone_device *tz,
 			       int *temp)
 {
@@ -516,6 +590,10 @@
 	.unbind = of_thermal_unbind,
 };
 
+static struct thermal_zone_of_device_ops of_virt_ops = {
+	.get_temp = virt_sensor_read_temp,
+};
+
 /***   sensor API   ***/
 
 static struct thermal_zone_device *
@@ -727,6 +805,135 @@
 }
 
 /**
+ * devm_thermal_of_virtual_sensor_register - Register a virtual sensor.
+ *	Three types of virtual sensors are supported.
+ *	1. Weighted aggregation type:
+ *		Virtual sensor of this type calculates the weighted aggregation
+ *		of sensor temperatures using the below formula,
+ *		temp = (sensor_1_temp * coeff_1 + ... + sensor_n_temp * coeff_n)
+ *			+ avg_offset / avg_denominator
+ *		So the sensor drivers has to specify n+2 coefficients.
+ *	2. Maximum type:
+ *		Virtual sensors of this type will report the maximum of all
+ *		sensor temperatures.
+ *	3. Minimum type:
+ *		Virtual sensors of this type will report the minimum of all
+ *		sensor temperatures.
+ *
+ * @input arguments:
+ * @dev: Virtual sensor driver device pointer.
+ * @sensor_data: Virtual sensor data supported for the device.
+ *
+ * @return: Returns a virtual thermal zone pointer. Returns error if thermal
+ * zone is not created. Returns -EAGAIN, if the sensor that is required for
+ * this virtual sensor temperature estimation is not registered yet. The
+ * sensor driver can try again later.
+ */
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+		struct device *dev,
+		const struct virtual_sensor_data *sensor_data)
+{
+	int sens_idx = 0;
+	struct virtual_sensor *sens;
+	struct __thermal_zone *tz;
+	struct thermal_zone_device **ptr;
+	struct thermal_zone_device *tzd;
+	struct __sensor_param *sens_param = NULL;
+	enum thermal_device_mode mode;
+
+	if (!dev || !sensor_data)
+		return ERR_PTR(-EINVAL);
+
+	tzd = thermal_zone_get_zone_by_name(
+				sensor_data->virt_zone_name);
+	if (IS_ERR(tzd)) {
+		dev_err(dev, "sens:%s not available err: %ld\n",
+				sensor_data->virt_zone_name,
+				PTR_ERR(tzd));
+		return tzd;
+	}
+
+	mutex_lock(&tzd->lock);
+	/*
+	 * Check if the virtual zone is registered and enabled.
+	 * If so return the registered thermal zone.
+	 */
+	tzd->ops->get_mode(tzd, &mode);
+	mutex_unlock(&tzd->lock);
+	if (mode == THERMAL_DEVICE_ENABLED)
+		return tzd;
+
+	sens = devm_kzalloc(dev, sizeof(*sens), GFP_KERNEL);
+	if (!sens)
+		return ERR_PTR(-ENOMEM);
+
+	sens->virt_tz = tzd;
+	sens->logic = sensor_data->logic;
+	sens->num_sensors = sensor_data->num_sensors;
+	if (sens->logic == VIRT_WEIGHTED_AVG) {
+		int coeff_ct = sensor_data->coefficient_ct;
+
+		/*
+		 * For weighted aggregation, sensor drivers has to specify
+		 * n+2 coefficients.
+		 */
+		if (coeff_ct != sens->num_sensors) {
+			dev_err(dev, "sens:%s Invalid coefficient\n",
+					sensor_data->virt_zone_name);
+			return ERR_PTR(-EINVAL);
+		}
+		memcpy(sens->coefficients, sensor_data->coefficients,
+			       coeff_ct * sizeof(*sens->coefficients));
+		sens->avg_offset = sensor_data->avg_offset;
+		sens->avg_denominator = sensor_data->avg_denominator;
+	}
+
+	for (sens_idx = 0; sens_idx < sens->num_sensors; sens_idx++) {
+		sens->tz[sens_idx] = thermal_zone_get_zone_by_name(
+					sensor_data->sensor_names[sens_idx]);
+		if (IS_ERR(sens->tz[sens_idx])) {
+			dev_err(dev, "sens:%s sensor[%s] fetch err:%ld\n",
+				     sensor_data->virt_zone_name,
+				     sensor_data->sensor_names[sens_idx],
+				     PTR_ERR(sens->tz[sens_idx]));
+			break;
+		}
+	}
+	if (sens->num_sensors != sens_idx)
+		return ERR_PTR(-EAGAIN);
+
+	sens_param = kzalloc(sizeof(*sens_param), GFP_KERNEL);
+	if (!sens_param)
+		return ERR_PTR(-ENOMEM);
+	sens_param->sensor_data = sens;
+	sens_param->ops = &of_virt_ops;
+	INIT_LIST_HEAD(&sens_param->first_tz);
+	sens_param->trip_high = INT_MAX;
+	sens_param->trip_low = INT_MIN;
+	mutex_init(&sens_param->lock);
+
+	mutex_lock(&tzd->lock);
+	tz = tzd->devdata;
+	tz->senps = sens_param;
+	tzd->ops->get_temp = of_thermal_get_temp;
+	list_add_tail(&tz->list, &sens_param->first_tz);
+	mutex_unlock(&tzd->lock);
+
+	ptr = devres_alloc(devm_thermal_zone_of_sensor_release, sizeof(*ptr),
+			   GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	*ptr = tzd;
+	devres_add(dev, ptr);
+
+	tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
+
+	return tzd;
+}
+EXPORT_SYMBOL(devm_thermal_of_virtual_sensor_register);
+
+/**
  * devm_thermal_zone_of_sensor_register - Resource managed version of
  *				thermal_zone_of_sensor_register()
  * @dev: a valid struct device pointer of a sensor device. Must contain
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index 819c6d5..57cdd49 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/iio/consumer.h>
@@ -29,13 +30,17 @@
 #define QPNP_TM_REG_ALARM_CTRL		0x46
 
 #define QPNP_TM_TYPE			0x09
-#define QPNP_TM_SUBTYPE			0x08
+#define QPNP_TM_SUBTYPE_GEN1		0x08
+#define QPNP_TM_SUBTYPE_GEN2		0x09
 
-#define STATUS_STAGE_MASK		0x03
+#define STATUS_GEN1_STAGE_MASK		GENMASK(1, 0)
+#define STATUS_GEN2_STATE_MASK		GENMASK(6, 4)
+#define STATUS_GEN2_STATE_SHIFT		4
 
-#define SHUTDOWN_CTRL1_THRESHOLD_MASK	0x03
+#define SHUTDOWN_CTRL1_OVERRIDE_MASK	GENMASK(7, 6)
+#define SHUTDOWN_CTRL1_THRESHOLD_MASK	GENMASK(1, 0)
 
-#define ALARM_CTRL_FORCE_ENABLE		0x80
+#define ALARM_CTRL_FORCE_ENABLE		BIT(7)
 
 /*
  * Trip point values based on threshold control
@@ -58,6 +63,7 @@
 struct qpnp_tm_chip {
 	struct regmap			*map;
 	struct thermal_zone_device	*tz_dev;
+	unsigned int			subtype;
 	long				temp;
 	unsigned int			thresh;
 	unsigned int			stage;
@@ -66,6 +72,9 @@
 	struct iio_channel		*adc;
 };
 
+/* This array maps from GEN2 alarm state to GEN1 alarm stage */
+static const unsigned int alarm_state_map[8] = {0, 1, 1, 2, 2, 3, 3, 3};
+
 static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data)
 {
 	unsigned int val;
@@ -84,13 +93,14 @@
 	return regmap_write(chip->map, chip->base + addr, data);
 }
 
-/*
- * This function updates the internal temp value based on the
- * current thermal stage and threshold as well as the previous stage
+/**
+ * qpnp_tm_get_temp_stage() - return over-temperature stage
+ * @chip:		Pointer to the qpnp_tm chip
+ *
+ * Return: stage (GEN1) or state (GEN2) on success, or errno on failure.
  */
-static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip)
 {
-	unsigned int stage;
 	int ret;
 	u8 reg = 0;
 
@@ -98,16 +108,44 @@
 	if (ret < 0)
 		return ret;
 
-	stage = reg & STATUS_STAGE_MASK;
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1)
+		ret = reg & STATUS_GEN1_STAGE_MASK;
+	else
+		ret = (reg & STATUS_GEN2_STATE_MASK) >> STATUS_GEN2_STATE_SHIFT;
 
-	if (stage > chip->stage) {
+	return ret;
+}
+
+/*
+ * This function updates the internal temp value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+	unsigned int stage, stage_new, stage_old;
+	int ret;
+
+	ret = qpnp_tm_get_temp_stage(chip);
+	if (ret < 0)
+		return ret;
+	stage = ret;
+
+	if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) {
+		stage_new = stage;
+		stage_old = chip->stage;
+	} else {
+		stage_new = alarm_state_map[stage];
+		stage_old = alarm_state_map[chip->stage];
+	}
+
+	if (stage_new > stage_old) {
 		/* increasing stage, use lower bound */
-		chip->temp = (stage - 1) * TEMP_STAGE_STEP +
+		chip->temp = (stage_new - 1) * TEMP_STAGE_STEP +
 			     chip->thresh * TEMP_THRESH_STEP +
 			     TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
-	} else if (stage < chip->stage) {
+	} else if (stage_new < stage_old) {
 		/* decreasing stage, use upper bound */
-		chip->temp = stage * TEMP_STAGE_STEP +
+		chip->temp = stage_new * TEMP_STAGE_STEP +
 			     chip->thresh * TEMP_THRESH_STEP -
 			     TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
 	}
@@ -162,28 +200,37 @@
  */
 static int qpnp_tm_init(struct qpnp_tm_chip *chip)
 {
+	unsigned int stage;
 	int ret;
-	u8 reg;
+	u8 reg = 0;
 
-	chip->thresh = THRESH_MIN;
-	chip->temp = DEFAULT_TEMP;
-
-	ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
+	ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
 	if (ret < 0)
 		return ret;
 
-	chip->stage = reg & STATUS_STAGE_MASK;
+	chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+	chip->temp = DEFAULT_TEMP;
 
-	if (chip->stage)
+	ret = qpnp_tm_get_temp_stage(chip);
+	if (ret < 0)
+		return ret;
+	chip->stage = ret;
+
+	stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
+		? chip->stage : alarm_state_map[chip->stage];
+
+	if (stage)
 		chip->temp = chip->thresh * TEMP_THRESH_STEP +
-			     (chip->stage - 1) * TEMP_STAGE_STEP +
+			     (stage - 1) * TEMP_STAGE_STEP +
 			     TEMP_THRESH_MIN;
 
 	/*
 	 * Set threshold and disable software override of stage 2 and 3
 	 * shutdowns.
 	 */
-	reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+	chip->thresh = THRESH_MIN;
+	reg &= ~(SHUTDOWN_CTRL1_OVERRIDE_MASK | SHUTDOWN_CTRL1_THRESHOLD_MASK);
+	reg |= chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
 	ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
 	if (ret < 0)
 		return ret;
@@ -242,13 +289,16 @@
 		goto fail;
 	}
 
-	if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
+	if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
+				     && subtype != QPNP_TM_SUBTYPE_GEN2)) {
 		dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
 			type, subtype);
 		ret = -ENODEV;
 		goto fail;
 	}
 
+	chip->subtype = subtype;
+
 	ret = qpnp_tm_init(chip);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "init failed\n");
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 473d15a..f6e1b86 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -30,3 +30,13 @@
 	  each cluster can be used to perform quick thermal mitigations by
 	  tracking temperatures of the CPUs and taking thermal action in the
 	  hardware without s/w intervention.
+
+config QTI_VIRTUAL_SENSOR
+	bool "QTI Virtual Sensor driver"
+	depends on THERMAL_OF
+	help
+	  This driver has the information about the virtual sensors used by
+	  QTI chipset's and registers the virtual sensors to a thermal zone.
+	  The virtual sensor information includes the underlying thermal
+	  sensors to query for temperature and the aggregation logic to
+	  determine the virtual sensor temperature.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index d1a53b0..8859380 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -2,3 +2,4 @@
 qcom_tsens-y			+= tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o
 obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
 obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o
+obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o
diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c
new file mode 100644
index 0000000..3064c74
--- /dev/null
+++ b/drivers/thermal/qcom/qti_virtual_sensor.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include "qti_virtual_sensor.h"
+
+static const struct virtual_sensor_data qti_virtual_sensors[] = {
+	{
+		.virt_zone_name = "gpu-virt-max-step",
+		.num_sensors = 2,
+		.sensor_names = {"gpu0-usr",
+				"gpu1-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+	{
+		.virt_zone_name = "silver-virt-max-usr",
+		.num_sensors = 4,
+		.sensor_names = {"cpu0-silver-usr",
+				"cpu1-silver-usr",
+				"cpu2-silver-usr",
+				"cpu3-silver-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+	{
+		.virt_zone_name = "gold-virt-max-usr",
+		.num_sensors = 4,
+		.sensor_names = {"cpu0-gold-usr",
+				"cpu1-gold-usr",
+				"cpu2-gold-usr",
+				"cpu3-gold-usr"},
+		.logic = VIRT_MAXIMUM,
+	},
+};
+
+int qti_virtual_sensor_register(struct device *dev)
+{
+	int sens_ct = 0;
+	static int idx;
+	struct thermal_zone_device *tz;
+
+	sens_ct = ARRAY_SIZE(qti_virtual_sensors);
+	for (; idx < sens_ct; idx++) {
+		tz = devm_thermal_of_virtual_sensor_register(dev,
+				&qti_virtual_sensors[idx]);
+		if (IS_ERR(tz))
+			dev_dbg(dev, "sensor:%d register error:%ld\n",
+					idx, PTR_ERR(tz));
+		else
+			dev_dbg(dev, "sensor:%d registered\n", idx);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qti_virtual_sensor_register);
diff --git a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi b/drivers/thermal/qcom/qti_virtual_sensor.h
similarity index 62%
copy from arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
copy to drivers/thermal/qcom/qti_virtual_sensor.h
index 4b3fa93..371b794 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-pinctrl.dtsi
+++ b/drivers/thermal/qcom/qti_virtual_sensor.h
@@ -10,14 +10,20 @@
  * GNU General Public License for more details.
  */
 
-&soc {
-	tlmm: pinctrl@03400000 {
-		compatible = "qcom,sdm830-pinctrl";
-		reg = <0x03400000 0xc00000>;
-		interrupts = <0 208 0>;
-		gpio-controller;
-		#gpio-cells = <2>;
-		interrupt-controller;
-		#interrupt-cells = <2>;
-	};
-};
+#ifndef __QTI_VIRT_SENS_H__
+#define __QTI_VIRT_SENS_H__
+
+#ifdef CONFIG_QTI_VIRTUAL_SENSOR
+
+int qti_virtual_sensor_register(struct device *dev);
+
+#else
+
+static inline int qti_virtual_sensor_register(struct device *dev)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_QTI_VIRTUAL_SENSOR */
+
+#endif /* __QTI_VIRT_SENS_H__ */
diff --git a/drivers/thermal/qpnp-adc-tm.c b/drivers/thermal/qpnp-adc-tm.c
index 342160e..04320d8 100644
--- a/drivers/thermal/qpnp-adc-tm.c
+++ b/drivers/thermal/qpnp-adc-tm.c
@@ -1973,8 +1973,6 @@
 			chip->sensor[sen_idx].thermal_node = true;
 			snprintf(name, sizeof(name), "%s",
 				chip->adc->adc_channels[sen_idx].name);
-			chip->sensor[sen_idx].meas_interval =
-				QPNP_ADC_TM_MEAS_INTERVAL;
 			chip->sensor[sen_idx].low_thr =
 						QPNP_ADC_TM_M0_LOW_THR;
 			chip->sensor[sen_idx].high_thr =
@@ -2027,7 +2025,7 @@
 
 	rc = devm_request_irq(&pdev->dev, chip->adc->adc_irq_eoc,
 			qpnp_adc_tm_rc_thr_isr,
-		IRQF_TRIGGER_RISING, "qpnp_adc_tm_interrupt", chip);
+		IRQF_TRIGGER_HIGH, "qpnp_adc_tm_interrupt", chip);
 	if (rc)
 		dev_err(&pdev->dev, "failed to request adc irq\n");
 	else
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index ecfc4ef..6b05b7b 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -76,6 +76,14 @@
 		return next_target;
 	}
 
+	/*
+	 * If there is no new throttle request and if the thermal zone
+	 * wasn't requesting any previous mitigation, then skip the
+	 * evaluation.
+	 */
+	if (instance->target == THERMAL_NO_TARGET && !throttle)
+		return next_target;
+
 	switch (trend) {
 	case THERMAL_TREND_RAISING:
 		if (throttle) {
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 080d5a5..f24d303 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1320,7 +1320,7 @@
 	/*
 	 * Check if the device is a Fintek F81216A
 	 */
-	if (port->type == PORT_16550A)
+	if (port->type == PORT_16550A && port->iotype == UPIO_PORT)
 		fintek_8250_probe(up);
 
 	if (up->capabilities != old_capabilities) {
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 6deff2e..94ba2c3e 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -327,6 +327,11 @@
 	mb();
 }
 
+static unsigned int msm_geni_cons_get_mctrl(struct uart_port *uport)
+{
+	return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
+}
+
 static unsigned int msm_geni_serial_get_mctrl(struct uart_port *uport)
 {
 	u32 geni_ios = 0;
@@ -610,6 +615,8 @@
 {
 	struct uart_port *uport;
 	struct msm_geni_serial_port *port;
+	int locked = 1;
+	unsigned long flags;
 
 	WARN_ON(co->index < 0 || co->index >= GENI_UART_NR_PORTS);
 
@@ -618,9 +625,15 @@
 		return;
 
 	uport = &port->uport;
-	spin_lock(&uport->lock);
-	__msm_geni_serial_console_write(uport, s, count);
-	spin_unlock(&uport->lock);
+	if (oops_in_progress)
+		locked = spin_trylock_irqsave(&uport->lock, flags);
+	else
+		spin_lock_irqsave(&uport->lock, flags);
+
+	if (locked) {
+		__msm_geni_serial_console_write(uport, s, count);
+		spin_unlock_irqrestore(&uport->lock, flags);
+	}
 }
 
 static int handle_rx_console(struct uart_port *uport,
@@ -1014,9 +1027,17 @@
 static void msm_geni_serial_shutdown(struct uart_port *uport)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	unsigned long flags;
 
+	/* Stop the console before stopping the current tx */
+	if (uart_console(uport))
+		console_stop(uport->cons);
+
+	spin_lock_irqsave(&uport->lock, flags);
 	msm_geni_serial_stop_tx(uport);
 	msm_geni_serial_stop_rx(uport);
+	spin_unlock_irqrestore(&uport->lock, flags);
+
 	disable_irq(uport->irq);
 	free_irq(uport->irq, msm_port);
 	if (uart_console(uport)) {
@@ -1571,6 +1592,7 @@
 	.shutdown = msm_geni_serial_shutdown,
 	.type = msm_geni_serial_get_type,
 	.set_mctrl = msm_geni_cons_set_mctrl,
+	.get_mctrl = msm_geni_cons_get_mctrl,
 #ifdef CONFIG_CONSOLE_POLL
 	.poll_get_char	= msm_geni_serial_get_char,
 	.poll_put_char	= msm_geni_serial_poll_put_char,
@@ -1845,7 +1867,6 @@
 	if (uart_console(uport)) {
 		uart_suspend_port((struct uart_driver *)uport->private_data,
 					uport);
-		se_geni_resources_off(&port->serial_rsc);
 	} else {
 		if (!pm_runtime_status_suspended(dev)) {
 			dev_info(dev, "%s: Is still active\n", __func__);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index c5ff13f..a876d47 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -311,6 +311,12 @@
 		break;
 
 	case USB_CDC_NOTIFY_SERIAL_STATE:
+		if (le16_to_cpu(dr->wLength) != 2) {
+			dev_dbg(&acm->control->dev,
+				"%s - malformed serial state\n", __func__);
+			break;
+		}
+
 		newctrl = get_unaligned_le16(data);
 
 		if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
@@ -347,11 +353,10 @@
 
 	default:
 		dev_dbg(&acm->control->dev,
-			"%s - unknown notification %d received: index %d "
-			"len %d data0 %d data1 %d\n",
+			"%s - unknown notification %d received: index %d len %d\n",
 			__func__,
-			dr->bNotificationType, dr->wIndex,
-			dr->wLength, data[0], data[1]);
+			dr->bNotificationType, dr->wIndex, dr->wLength);
+
 		break;
 	}
 exit:
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4016dae..840930b0 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -481,11 +481,11 @@
 
 	if (userurb) {		/* Async */
 		if (when == SUBMIT)
-			dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
 					"length %u\n",
 					userurb, ep, t, d, length);
 		else
-			dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
+			dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
 					"actual_length %u status %d\n",
 					userurb, ep, t, d, length,
 					timeout_or_status);
@@ -1905,7 +1905,7 @@
 	if (as) {
 		int retval;
 
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
 		return retval;
@@ -1922,7 +1922,7 @@
 
 	as = async_getcompleted(ps);
 	if (as) {
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl(as, (void __user * __user *)arg);
 		free_async(as);
 	} else {
@@ -2053,7 +2053,7 @@
 	if (as) {
 		int retval;
 
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
 		return retval;
@@ -2070,7 +2070,7 @@
 
 	as = async_getcompleted(ps);
 	if (as) {
-		snoop(&ps->dev->dev, "reap %p\n", as->userurb);
+		snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
 		retval = processcompl_compat(as, (void __user * __user *)arg);
 		free_async(as);
 	} else {
@@ -2499,7 +2499,7 @@
 #endif
 
 	case USBDEVFS_DISCARDURB:
-		snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p);
+		snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
 		ret = proc_unlinkurb(ps, p);
 		break;
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ff45ebf..32f99da 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1723,7 +1723,7 @@
 		if (retval == 0)
 			retval = -EINPROGRESS;
 		else if (retval != -EIDRM && retval != -EBUSY)
-			dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
+			dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
 					urb, retval);
 		usb_put_dev(udev);
 	}
@@ -1890,7 +1890,7 @@
 		/* kick hcd */
 		unlink1(hcd, urb, -ESHUTDOWN);
 		dev_dbg (hcd->self.controller,
-			"shutdown urb %p ep%d%s%s\n",
+			"shutdown urb %pK ep%d%s%s\n",
 			urb, usb_endpoint_num(&ep->desc),
 			is_in ? "in" : "out",
 			({	char *s;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7388f73..fcbaa61 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -365,7 +365,8 @@
 }
 
 /* USB 2.0 spec Section 11.24.4.5 */
-static int get_hub_descriptor(struct usb_device *hdev, void *data)
+static int get_hub_descriptor(struct usb_device *hdev,
+		struct usb_hub_descriptor *desc)
 {
 	int i, ret, size;
 	unsigned dtype;
@@ -381,10 +382,18 @@
 	for (i = 0; i < 3; i++) {
 		ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
 			USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
-			dtype << 8, 0, data, size,
+			dtype << 8, 0, desc, size,
 			USB_CTRL_GET_TIMEOUT);
-		if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
+		if (hub_is_superspeed(hdev)) {
+			if (ret == size)
+				return ret;
+		} else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
+			/* Make sure we have the DeviceRemovable field. */
+			size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
+			if (ret < size)
+				return -EMSGSIZE;
 			return ret;
+		}
 	}
 	return -EINVAL;
 }
@@ -1322,7 +1331,7 @@
 	}
 	mutex_init(&hub->status_mutex);
 
-	hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
+	hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
 	if (!hub->descriptor) {
 		ret = -ENOMEM;
 		goto fail;
@@ -1330,7 +1339,7 @@
 
 	/* Request the entire hub descriptor.
 	 * hub->descriptor can handle USB_MAXCHILDREN ports,
-	 * but the hub can/will return fewer bytes here.
+	 * but a (non-SS) hub can/will return fewer bytes here.
 	 */
 	ret = get_hub_descriptor(hdev, hub->descriptor);
 	if (ret < 0) {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index a903969..5133ab9 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -333,7 +333,7 @@
 	if (!urb || !urb->complete)
 		return -EINVAL;
 	if (urb->hcpriv) {
-		WARN_ONCE(1, "URB %p submitted while active\n", urb);
+		WARN_ONCE(1, "URB %pK submitted while active\n", urb);
 		return -EBUSY;
 	}
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 38614fa..228d8af 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3008,8 +3008,9 @@
 		ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
 					msm_dwc3_pwr_irq,
 					msm_dwc3_pwr_irq_thread,
-					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
-					| IRQF_ONESHOT, "ss_phy_irq", mdwc);
+					IRQF_TRIGGER_HIGH | IRQ_TYPE_LEVEL_HIGH
+					| IRQF_EARLY_RESUME | IRQF_ONESHOT,
+					"ss_phy_irq", mdwc);
 		if (ret) {
 			dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
 					ret);
@@ -3519,13 +3520,16 @@
 		dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
 
 		mdwc->hs_phy->flags |= PHY_HOST_MODE;
-		if (dwc->maximum_speed == USB_SPEED_SUPER)
+		if (dwc->maximum_speed == USB_SPEED_SUPER) {
 			mdwc->ss_phy->flags |= PHY_HOST_MODE;
+			usb_phy_notify_connect(mdwc->ss_phy,
+						USB_SPEED_SUPER);
+		}
 
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
 		pm_runtime_get_sync(mdwc->dev);
 		dbg_event(0xFF, "StrtHost gync",
 			atomic_read(&mdwc->dev->power.usage_count));
-		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
 		if (!IS_ERR(mdwc->vbus_reg))
 			ret = regulator_enable(mdwc->vbus_reg);
 		if (ret) {
@@ -3614,8 +3618,13 @@
 		dbg_event(0xFF, "StopHost gsync",
 			atomic_read(&mdwc->dev->power.usage_count));
 		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+		if (mdwc->ss_phy->flags & PHY_HOST_MODE) {
+			usb_phy_notify_disconnect(mdwc->ss_phy,
+					USB_SPEED_SUPER);
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+		}
+
 		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
-		mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
 		platform_device_del(dwc->xhci);
 		usb_unregister_notify(&mdwc->host_nb);
 
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index bb32978..b062d58 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -815,7 +815,13 @@
 		dwc->delayed_status = true;
 
 out:
-	if (ret < 0) {
+	/*
+	 * Don't try to halt ep0 if ret is -ESHUTDOWN.
+	 * ret as -ESHUTDOWN suggests that setup packet related response
+	 * is available but queueing of ep0 is failed. Possibly ep0 is
+	 * already disabled.
+	 */
+	if (ret < 0 && ret != -ESHUTDOWN) {
 		dbg_event(0x0, "ERRSTAL", ret);
 		dwc3_ep0_stall_and_restart(dwc);
 	}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 62574bf..df0427c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -990,11 +990,16 @@
 	}
 
 	/* always enable Continue on Short Packet */
-	trb->ctrl |= DWC3_TRB_CTRL_CSP;
+	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
+		trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
+		if (req->request.short_not_ok)
+			trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
+	}
 
 	if ((!req->request.no_interrupt && !chain) ||
 			(dwc3_calc_trbs_left(dep) == 0))
-		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
+		trb->ctrl |= DWC3_TRB_CTRL_IOC;
 
 	if (chain)
 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -1438,6 +1443,11 @@
 	struct dwc3				*dwc = dep->dwc;
 	int					ret;
 
+	if (!dep->endpoint.desc) {
+		dev_dbg(dwc->dev, "(%s)'s desc is NULL.\n", dep->name);
+		return -EINVAL;
+	}
+
 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
 		return -EINVAL;
@@ -3419,13 +3429,13 @@
 	trace_dwc3_event(event->raw);
 	/* skip event processing in absence of vbus */
 	if (!dwc->vbus_active) {
-		dev_err(dwc->dev, "SKIP EVT:%x", event->raw);
+		dbg_event(0xFF, "SKIP_EVT", event->raw);
 		return;
 	}
 
 	/* If run/stop is cleared don't process any more events */
 	if (!dwc->pullups_connected) {
-		dev_err(dwc->dev, "SKIP_EVT_PULLUP:%x", event->raw);
+		dbg_event(0xFF, "SKIP_EVT_PULLUP", event->raw);
 		return;
 	}
 
@@ -3549,6 +3559,15 @@
 
 	evt = dwc->ev_buf;
 
+	/*
+	 * With PCIe legacy interrupt, test shows that top-half irq handler can
+	 * be called again after HW interrupt deassertion. Check if bottom-half
+	 * irq event handler completes before caching new event to prevent
+	 * losing events.
+	 */
+	if (evt->flags & DWC3_EVENT_PENDING)
+		return IRQ_HANDLED;
+
 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
 	count &= DWC3_GEVNTCOUNT_MASK;
 	if (!count)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 86612ac..f6c7a27 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -231,7 +231,8 @@
 
 		/* Start up the I/O watchdog timer, if it's not running */
 		if (!timer_pending(&ohci->io_watchdog) &&
-				list_empty(&ohci->eds_in_use)) {
+				list_empty(&ohci->eds_in_use) &&
+				!(ohci->flags & OHCI_QUIRK_QEMU)) {
 			ohci->prev_frame_no = ohci_frame_no(ohci);
 			mod_timer(&ohci->io_watchdog,
 					jiffies + IO_WATCHDOG_DELAY);
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bb15096..a84aebe 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -164,6 +164,15 @@
 	return 0;
 }
 
+static int ohci_quirk_qemu(struct usb_hcd *hcd)
+{
+	struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+	ohci->flags |= OHCI_QUIRK_QEMU;
+	ohci_dbg(ohci, "enabled qemu quirk\n");
+	return 0;
+}
+
 /* List of quirks for OHCI */
 static const struct pci_device_id ohci_pci_quirks[] = {
 	{
@@ -214,6 +223,13 @@
 		PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
 		.driver_data = (unsigned long)ohci_quirk_amd700,
 	},
+	{
+		.vendor		= PCI_VENDOR_ID_APPLE,
+		.device		= 0x003f,
+		.subvendor	= PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+		.subdevice	= PCI_SUBDEVICE_ID_QEMU,
+		.driver_data	= (unsigned long)ohci_quirk_qemu,
+	},
 
 	/* FIXME for some of the early AMD 760 southbridges, OHCI
 	 * won't work at all.  blacklist them.
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 37f1725..a51b189 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -418,6 +418,7 @@
 #define	OHCI_QUIRK_AMD_PLL	0x200			/* AMD PLL quirk*/
 #define	OHCI_QUIRK_AMD_PREFETCH	0x400			/* pre-fetch for ISO transfer */
 #define	OHCI_QUIRK_GLOBAL_SUSPEND	0x800		/* must suspend ports */
+#define	OHCI_QUIRK_QEMU		0x1000			/* relax timing expectations */
 
 	// there are also chip quirks/bugs in init logic
 
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 40504c8..d680eb3 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -56,7 +56,7 @@
 	}
 
 	if (max_packet) {
-		seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+		seg->bounce_buf = kzalloc(max_packet, flags);
 		if (!seg->bounce_buf) {
 			dma_pool_free(xhci->segment_pool, seg->trbs, dma);
 			kfree(seg);
@@ -1726,7 +1726,7 @@
 	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
 	for (i = 0; i < num_sp; i++) {
 		dma_addr_t dma;
-		void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+		void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
 				flags);
 		if (!buf)
 			goto fail_sp5;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 93f566c..e7d6752 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -52,6 +52,7 @@
 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
 #define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+#define PCI_DEVICE_ID_INTEL_DNV_XHCI			0x19d0
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -166,7 +167,8 @@
 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
-		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
+		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+		 pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -175,7 +177,8 @@
 	}
 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
 	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
-	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+	     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
 		xhci->quirks |= XHCI_MISSING_CAS;
 
 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index a0bc61f..6cb5ab3 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -210,7 +210,7 @@
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
-		return -ENODEV;
+		return irq;
 
 	/*
 	 * sysdev must point to a device that is known to the system firmware
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 6ddd08a..efecb87 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -194,7 +194,7 @@
 
 	dev->in_ep = in_ep;
 
-	if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
+	if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
 		dev->reads_started = 1;
 
 	dev->size = size;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 37c63cb..0ef29d2 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -554,7 +554,7 @@
 			info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
 
 			/* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
-			info.speed = le16_to_cpu(dev->udev->speed);
+			info.speed = dev->udev->speed;
 			info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
 			info.report_size = dev->report_size;
 
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c8fbe7b..c2e2b2e 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -317,9 +317,16 @@
 	int subminor;
 	int retval = 0;
 	struct usb_interface *interface;
-	struct tower_reset_reply reset_reply;
+	struct tower_reset_reply *reset_reply;
 	int result;
 
+	reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
+
+	if (!reset_reply) {
+		retval = -ENOMEM;
+		goto exit;
+	}
+
 	nonseekable_open(inode, file);
 	subminor = iminor(inode);
 
@@ -364,8 +371,8 @@
 				  USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
 				  0,
 				  0,
-				  &reset_reply,
-				  sizeof(reset_reply),
+				  reset_reply,
+				  sizeof(*reset_reply),
 				  1000);
 	if (result < 0) {
 		dev_err(&dev->udev->dev,
@@ -406,6 +413,7 @@
 	mutex_unlock(&dev->lock);
 
 exit:
+	kfree(reset_reply);
 	return retval;
 }
 
@@ -808,7 +816,7 @@
 	struct lego_usb_tower *dev = NULL;
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor* endpoint;
-	struct tower_get_version_reply get_version_reply;
+	struct tower_get_version_reply *get_version_reply = NULL;
 	int i;
 	int retval = -ENOMEM;
 	int result;
@@ -886,6 +894,13 @@
 	dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
 	dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
 
+	get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
+
+	if (!get_version_reply) {
+		retval = -ENOMEM;
+		goto error;
+	}
+
 	/* get the firmware version and log it */
 	result = usb_control_msg (udev,
 				  usb_rcvctrlpipe(udev, 0),
@@ -893,18 +908,19 @@
 				  USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
 				  0,
 				  0,
-				  &get_version_reply,
-				  sizeof(get_version_reply),
+				  get_version_reply,
+				  sizeof(*get_version_reply),
 				  1000);
 	if (result < 0) {
 		dev_err(idev, "LEGO USB Tower get version control request failed\n");
 		retval = result;
 		goto error;
 	}
-	dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
-		 "build %d\n", get_version_reply.major,
-		 get_version_reply.minor,
-		 le16_to_cpu(get_version_reply.build_no));
+	dev_info(&interface->dev,
+		 "LEGO USB Tower firmware version is %d.%d build %d\n",
+		 get_version_reply->major,
+		 get_version_reply->minor,
+		 le16_to_cpu(get_version_reply->build_no));
 
 	/* we can register the device now, as it is ready */
 	usb_set_intfdata (interface, dev);
@@ -925,9 +941,11 @@
 		 USB_MAJOR, dev->minor);
 
 exit:
+	kfree(get_version_reply);
 	return retval;
 
 error:
+	kfree(get_version_reply);
 	tower_delete(dev);
 	return retval;
 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8064514..99beda9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2780,10 +2780,11 @@
 	int ret;
 	struct usb_hcd *hcd = musb->hcd;
 
-	MUSB_HST_MODE(musb);
-	musb->xceiv->otg->default_a = 1;
-	musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-
+	if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+		MUSB_HST_MODE(musb);
+		musb->xceiv->otg->default_a = 1;
+		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+	}
 	otg_set_host(musb->xceiv->otg, &hcd->self);
 	hcd->self.otg_port = 1;
 	musb->xceiv->otg->host = &hcd->self;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e6959cc..4047426 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -220,6 +220,7 @@
 	u32				dma_remaining;
 	int				src_burst, dst_burst;
 	u16				csr;
+	u32				psize;
 	int				ch;
 	s8				dmareq;
 	s8				sync_dev;
@@ -391,15 +392,19 @@
 
 	if (chdat->tx) {
 		/* Send transfer_packet_sz packets at a time */
-		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
-			chdat->transfer_packet_sz);
+		psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+		psize &= ~0x7ff;
+		psize |= chdat->transfer_packet_sz;
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
 
 		musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
 			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
 	} else {
 		/* Receive transfer_packet_sz packets at a time */
-		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
-			chdat->transfer_packet_sz << 16);
+		psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+		psize &= ~(0x7ff << 16);
+		psize |= (chdat->transfer_packet_sz << 16);
+		musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
 
 		musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
 			TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index d951abb..f383e32 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -29,6 +29,11 @@
 #include <linux/usb/usbpd.h>
 #include "usbpd.h"
 
+/* To start USB stack for USB3.1 complaince testing */
+static bool usb_compliance_mode;
+module_param(usb_compliance_mode, bool, 0644);
+MODULE_PARM_DESC(usb_compliance_mode, "Start USB stack for USB3.1 compliance testing");
+
 enum usbpd_state {
 	PE_UNKNOWN,
 	PE_ERROR_RECOVERY,
@@ -187,6 +192,8 @@
 
 #define PD_MAX_MSG_ID		7
 
+#define PD_MAX_DATA_OBJ		7
+
 #define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
 	(((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
 	 ((pr) << 8) | ((id) << 9) | ((cnt) << 12))
@@ -308,7 +315,7 @@
 	struct list_head	rx_q;
 	spinlock_t		rx_lock;
 
-	u32			received_pdos[7];
+	u32			received_pdos[PD_MAX_DATA_OBJ];
 	u16			src_cap_id;
 	u8			selected_pdo;
 	u8			requested_pdo;
@@ -490,13 +497,12 @@
 	ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), type, 15);
 	/* TODO figure out timeout. based on tReceive=1.1ms x nRetryCount? */
 
-	/* MessageID incremented regardless of Tx error */
-	pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
-
 	if (ret < 0)
 		return ret;
 	else if (ret != num_data * sizeof(u32))
 		return -EIO;
+
+	pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
 	return 0;
 }
 
@@ -556,6 +562,7 @@
 
 static int pd_eval_src_caps(struct usbpd *pd)
 {
+	int obj_cnt;
 	union power_supply_propval val;
 	u32 first_pdo = pd->received_pdos[0];
 
@@ -572,6 +579,13 @@
 	power_supply_set_property(pd->usb_psy,
 			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
 
+	for (obj_cnt = 1; obj_cnt < PD_MAX_DATA_OBJ; obj_cnt++) {
+		if ((PD_SRC_PDO_TYPE(pd->received_pdos[obj_cnt]) ==
+					PD_SRC_PDO_TYPE_AUGMENTED) &&
+				!PD_APDO_PPS(pd->received_pdos[obj_cnt]))
+			pd->spec_rev = USBPD_REV_30;
+	}
+
 	/* Select the first PDO (vSafe5V) immediately. */
 	pd_select_pdo(pd, 1, 0, 0);
 
@@ -580,6 +594,8 @@
 
 static void pd_send_hard_reset(struct usbpd *pd)
 {
+	union power_supply_propval val = {0};
+
 	usbpd_dbg(&pd->dev, "send hard reset");
 
 	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
@@ -587,6 +603,7 @@
 	pd->hard_reset_count++;
 	pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
 	pd->in_pr_swap = false;
+	power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
 }
 
 static void kick_sm(struct usbpd *pd, int ms)
@@ -602,6 +619,8 @@
 
 static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
 {
+	union power_supply_propval val = {1};
+
 	if (type != HARD_RESET_SIG) {
 		usbpd_err(&pd->dev, "invalid signal (%d) received\n", type);
 		return;
@@ -612,6 +631,9 @@
 	/* Force CC logic to source/sink to keep Rp/Rd unchanged */
 	set_power_role(pd, pd->current_pr);
 	pd->hard_reset_recvd = true;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
 	kick_sm(pd, 0);
 }
 
@@ -662,12 +684,6 @@
 		return;
 	}
 
-	/* if spec rev differs (i.e. is older), update PHY */
-	if (PD_MSG_HDR_REV(header) < pd->spec_rev) {
-		pd->spec_rev = PD_MSG_HDR_REV(header);
-		pd_phy_update_spec_rev(pd->spec_rev);
-	}
-
 	rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
 	if (!rx_msg)
 		return;
@@ -710,7 +726,6 @@
 		.shutdown_cb		= phy_shutdown,
 		.frame_filter_val	= FRAME_FILTER_EN_SOP |
 					  FRAME_FILTER_EN_HARD_RESET,
-		.spec_rev		= USBPD_REV_20,
 	};
 	union power_supply_propval val = {0};
 	unsigned long flags;
@@ -732,6 +747,15 @@
 		break;
 
 	/* Source states */
+	case PE_SRC_DISABLED:
+		/* are we still connected? */
+		if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+			pd->current_pr = PR_NONE;
+			kick_sm(pd, 0);
+		}
+
+		break;
+
 	case PE_SRC_STARTUP:
 		if (pd->current_dr == DR_NONE) {
 			pd->current_dr = DR_DFP;
@@ -748,8 +772,6 @@
 		power_supply_set_property(pd->usb_psy,
 				POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
 
-		/* support only PD 2.0 as a source */
-		pd->spec_rev = USBPD_REV_20;
 		pd_reset_protocol(pd);
 
 		if (!pd->in_pr_swap) {
@@ -760,7 +782,6 @@
 
 			phy_params.data_role = pd->current_dr;
 			phy_params.power_role = pd->current_pr;
-			phy_params.spec_rev = pd->spec_rev;
 
 			ret = pd_phy_open(&phy_params);
 			if (ret) {
@@ -772,14 +793,15 @@
 			}
 
 			pd->pd_phy_opened = true;
-		} else {
-			pd_phy_update_spec_rev(pd->spec_rev);
 		}
 
 		pd->current_state = PE_SRC_SEND_CAPABILITIES;
 		if (pd->in_pr_swap) {
 			kick_sm(pd, SWAP_SOURCE_START_TIME);
 			pd->in_pr_swap = false;
+			val.intval = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PR_SWAP, &val);
 			break;
 		}
 
@@ -862,6 +884,10 @@
 
 	case PE_SRC_HARD_RESET:
 	case PE_SNK_HARD_RESET:
+		/* are we still connected? */
+		if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE)
+			pd->current_pr = PR_NONE;
+
 		/* hard reset may sleep; handle it in the workqueue */
 		kick_sm(pd, 0);
 		break;
@@ -888,7 +914,8 @@
 			pd->current_dr = DR_UFP;
 
 			if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
-				pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP)
+				pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+				usb_compliance_mode)
 				start_usb_peripheral(pd);
 		}
 
@@ -905,11 +932,6 @@
 		if (!val.intval)
 			break;
 
-		/*
-		 * support up to PD 3.0 as a sink; if source is 2.0,
-		 * phy_msg_received() will handle the downgrade.
-		 */
-		pd->spec_rev = USBPD_REV_30;
 		pd_reset_protocol(pd);
 
 		if (!pd->in_pr_swap) {
@@ -920,7 +942,6 @@
 
 			phy_params.data_role = pd->current_dr;
 			phy_params.power_role = pd->current_pr;
-			phy_params.spec_rev = pd->spec_rev;
 
 			ret = pd_phy_open(&phy_params);
 			if (ret) {
@@ -932,8 +953,6 @@
 			}
 
 			pd->pd_phy_opened = true;
-		} else {
-			pd_phy_update_spec_rev(pd->spec_rev);
 		}
 
 		pd->current_voltage = pd->requested_voltage = 5000000;
@@ -1548,6 +1567,11 @@
 		if (pd->current_state == PE_UNKNOWN)
 			goto sm_done;
 
+		if (pd->vconn_enabled) {
+			regulator_disable(pd->vconn);
+			pd->vconn_enabled = false;
+		}
+
 		usbpd_info(&pd->dev, "USB Type-C disconnect\n");
 
 		if (pd->pd_phy_opened) {
@@ -1567,7 +1591,6 @@
 		memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
 		rx_msg_cleanup(pd);
 
-		val.intval = 0;
 		power_supply_set_property(pd->usb_psy,
 				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
 
@@ -1583,11 +1606,7 @@
 			pd->vbus_enabled = false;
 		}
 
-		if (pd->vconn_enabled) {
-			regulator_disable(pd->vconn);
-			pd->vconn_enabled = false;
-		}
-
+		reset_vdm_state(pd);
 		if (pd->current_dr == DR_UFP)
 			stop_usb_peripheral(pd);
 		else if (pd->current_dr == DR_DFP)
@@ -1596,13 +1615,15 @@
 		pd->current_pr = PR_NONE;
 		pd->current_dr = DR_NONE;
 
-		reset_vdm_state(pd);
-
 		if (pd->current_state == PE_ERROR_RECOVERY)
 			/* forced disconnect, wait before resetting to DRP */
 			usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
 				(ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
 
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
+
 		/* set due to dual_role class "mode" change */
 		if (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
 			val.intval = pd->forced_pr;
@@ -1626,11 +1647,22 @@
 	if (pd->hard_reset_recvd) {
 		pd->hard_reset_recvd = false;
 
-		val.intval = 1;
+		if (pd->requested_current) {
+			val.intval = pd->requested_current = 0;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+		}
+
+		pd->requested_voltage = 5000000;
+		val.intval = pd->requested_voltage;
 		power_supply_set_property(pd->usb_psy,
-				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+				POWER_SUPPLY_PROP_VOLTAGE_MIN, &val);
 
 		pd->in_pr_swap = false;
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
+
 		pd->in_explicit_contract = false;
 		pd->selected_pdo = pd->requested_pdo = 0;
 		pd->rdo = 0;
@@ -1734,14 +1766,8 @@
 
 	case PE_SRC_READY:
 		if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
-			ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
-					default_src_caps,
-					ARRAY_SIZE(default_src_caps), SOP_MSG);
-			if (ret) {
-				usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
-				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
-				break;
-			}
+			pd->current_state = PE_SRC_SEND_CAPABILITIES;
+			kick_sm(pd, 0);
 		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
 			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
 					pd->sink_caps, pd->num_sink_caps,
@@ -1897,6 +1923,9 @@
 
 	case PE_SNK_WAIT_FOR_CAPABILITIES:
 		pd->in_pr_swap = false;
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
 
 		if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
 			val.intval = 0;
@@ -1916,15 +1945,6 @@
 					POWER_SUPPLY_PROP_PD_ACTIVE, &val);
 		} else if (pd->hard_reset_count < 3) {
 			usbpd_set_state(pd, PE_SNK_HARD_RESET);
-		} else if (pd->pd_connected) {
-			usbpd_info(&pd->dev, "Sink hard reset count exceeded, forcing reconnect\n");
-
-			val.intval = 0;
-			power_supply_set_property(pd->usb_psy,
-					POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
-					&val);
-
-			usbpd_set_state(pd, PE_ERROR_RECOVERY);
 		} else {
 			usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
 
@@ -2072,6 +2092,9 @@
 			}
 
 			pd->in_pr_swap = true;
+			val.intval = 1;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PR_SWAP, &val);
 			usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
 			break;
 		} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
@@ -2215,6 +2238,9 @@
 
 	case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
 		pd->in_pr_swap = true;
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
 		pd->in_explicit_contract = false;
 
 		if (pd->vbus_enabled) {
@@ -2255,6 +2281,9 @@
 		}
 
 		pd->in_pr_swap = true;
+		val.intval = 1;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
 		usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
 		break;
 
@@ -2314,6 +2343,14 @@
 sm_done:
 	kfree(rx_msg);
 
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	ret = list_empty(&pd->rx_q);
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	/* requeue if there are any new/pending RX messages */
+	if (!ret)
+		kick_sm(pd, 0);
+
 	if (!pd->sm_queued)
 		pm_relax(&pd->dev);
 }
@@ -3188,7 +3225,7 @@
 	if (ret)
 		goto free_pd;
 
-	pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE);
+	pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
 	if (!pd->wq) {
 		ret = -ENOMEM;
 		goto del_pd;
@@ -3310,6 +3347,8 @@
 		pd->dual_role->drv_data = pd;
 	}
 
+	/* default support as PD 2.0 source or sink */
+	pd->spec_rev = USBPD_REV_20;
 	pd->current_pr = PR_NONE;
 	pd->current_dr = DR_NONE;
 	list_add_tail(&pd->instance, &_usbpd);
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 4caee72..1f5306f 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -108,6 +108,7 @@
 	int tx_status;
 	u8 frame_filter_val;
 	bool in_test_data_mode;
+	bool rx_busy;
 
 	enum data_role data_role;
 	enum power_role power_role;
@@ -334,15 +335,6 @@
 }
 EXPORT_SYMBOL(pd_phy_update_roles);
 
-int pd_phy_update_spec_rev(enum pd_spec_rev rev)
-{
-	struct usb_pdphy *pdphy = __pdphy;
-
-	return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
-			MSG_CONFIG_SPEC_REV_MASK, rev);
-}
-EXPORT_SYMBOL(pd_phy_update_spec_rev);
-
 int pd_phy_open(struct pd_phy_params *params)
 {
 	int ret;
@@ -377,7 +369,9 @@
 	if (ret)
 		return ret;
 
-	ret = pd_phy_update_spec_rev(params->spec_rev);
+	/* PD 2.0  phy */
+	ret = pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+			MSG_CONFIG_SPEC_REV_MASK, USBPD_REV_20);
 	if (ret)
 		return ret;
 
@@ -492,6 +486,12 @@
 		return -EINVAL;
 	}
 
+	ret = pdphy_reg_read(pdphy, &val, USB_PDPHY_RX_ACKNOWLEDGE, 1);
+	if (ret || val || pdphy->rx_busy) {
+		dev_err(pdphy->dev, "%s: RX message pending\n", __func__);
+		return -EBUSY;
+	}
+
 	pdphy->tx_status = -EINPROGRESS;
 
 	/* write 2 byte SOP message header */
@@ -664,6 +664,15 @@
 			BIST_MODE_MASK | BIST_ENABLE, bist_mode | BIST_ENABLE);
 }
 
+static irqreturn_t pdphy_msg_rx_irq(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->rx_busy = true;
+
+	return IRQ_WAKE_THREAD;
+}
+
 static irqreturn_t pdphy_msg_rx_irq_thread(int irq, void *data)
 {
 	u8 size, rx_status, frame_type;
@@ -720,6 +729,7 @@
 		false);
 	pdphy->rx_bytes += size + 1;
 done:
+	pdphy->rx_busy = false;
 	return IRQ_HANDLED;
 }
 
@@ -805,7 +815,7 @@
 		return ret;
 
 	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
-		&pdphy->msg_rx_irq, "msg-rx", NULL,
+		&pdphy->msg_rx_irq, "msg-rx", pdphy_msg_rx_irq,
 		pdphy_msg_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
 	if (ret < 0)
 		return ret;
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
index b2663ad..1087017 100644
--- a/drivers/usb/pd/usbpd.h
+++ b/drivers/usb/pd/usbpd.h
@@ -68,7 +68,6 @@
 	enum data_role	data_role;
 	enum power_role power_role;
 	u8		frame_filter_val;
-	u8		spec_rev;
 };
 
 #if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
@@ -77,7 +76,6 @@
 int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
 	enum pd_msg_type type, unsigned int timeout_ms);
 int pd_phy_update_roles(enum data_role dr, enum power_role pr);
-int pd_phy_update_spec_rev(enum pd_spec_rev rev);
 void pd_phy_close(void);
 #else
 static inline int pd_phy_open(struct pd_phy_params *params)
@@ -101,11 +99,6 @@
 	return -ENODEV;
 }
 
-static inline int pd_phy_update_spec_rev(enum pd_spec_rev rev)
-{
-	return -ENODEV;
-}
-
 static inline void pd_phy_close(void)
 {
 }
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 8bdd9fd..59f5379 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -201,14 +201,16 @@
 
 	if (enable) {
 		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+		val = readb_relaxed(phy->base + autonomous_mode_offset);
+		val |= ARCVR_DTCT_EN;
 		if (phy->phy.flags & DEVICE_IN_SS_MODE) {
-			val =
-			readb_relaxed(phy->base + autonomous_mode_offset);
-			val |= ARCVR_DTCT_EN;
 			val |= ALFPS_DTCT_EN;
 			val &= ~ARCVR_DTCT_EVENT_SEL;
-			writeb_relaxed(val, phy->base + autonomous_mode_offset);
+		} else {
+			val &= ~ALFPS_DTCT_EN;
+			val |= ARCVR_DTCT_EVENT_SEL;
 		}
+		writeb_relaxed(val, phy->base + autonomous_mode_offset);
 		msm_ssusb_qmp_clamp_enable(phy, true);
 	} else {
 		msm_ssusb_qmp_clamp_enable(phy, false);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1dc75db..1939496 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,10 +809,10 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
 	{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
-		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
-	{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
-		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
+	{ USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
 	{ USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -1508,9 +1508,9 @@
 					(new_serial.flags & ASYNC_FLAGS));
 	priv->custom_divisor = new_serial.custom_divisor;
 
+check_and_exit:
 	write_latency_timer(port);
 
-check_and_exit:
 	if ((old_priv.flags & ASYNC_SPD_MASK) !=
 	     (priv->flags & ASYNC_SPD_MASK)) {
 		if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 71fb9e5..4fcf1ce 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -882,6 +882,8 @@
 /* Olimex */
 #define OLIMEX_VID			0x15BA
 #define OLIMEX_ARM_USB_OCD_PID		0x0003
+#define OLIMEX_ARM_USB_TINY_PID	0x0004
+#define OLIMEX_ARM_USB_TINY_H_PID	0x002a
 #define OLIMEX_ARM_USB_OCD_H_PID	0x002b
 
 /*
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index f1a8fdc..e98532f 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2349,8 +2349,11 @@
 	if (!baud) {
 		/* pick a default, any default... */
 		baud = 9600;
-	} else
+	} else {
+		/* Avoid a zero divisor. */
+		baud = min(baud, 461550);
 		tty_encode_baud_rate(tty, baud, baud);
+	}
 
 	edge_port->baud_rate = baud;
 	config->wBaudRate = (__u16)((461550L + baud/2) / baud);
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index edbc81f..70f346f 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -189,7 +189,7 @@
 		return -ENOMEM;
 
 	divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
-	put_unaligned_le32(cpu_to_le32(divisor), buf);
+	put_unaligned_le32(divisor, buf);
 	rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
 				MCT_U232_SET_BAUD_RATE_REQUEST,
 				MCT_U232_SET_REQUEST_TYPE,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index af67a0d..3bf61ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -281,6 +281,7 @@
 #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
 #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+#define TELIT_PRODUCT_ME910			0x1100
 #define TELIT_PRODUCT_LE920			0x1200
 #define TELIT_PRODUCT_LE910			0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
@@ -640,6 +641,11 @@
 	.reserved = BIT(5) | BIT(6),
 };
 
+static const struct option_blacklist_info telit_me910_blacklist = {
+	.sendsetup = BIT(0),
+	.reserved = BIT(1) | BIT(3),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
 	.sendsetup = BIT(0),
 	.reserved = BIT(1) | BIT(2),
@@ -1235,6 +1241,8 @@
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 38b3f0d..fd509ed6c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@
 	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx */
 	{DEVICE_SWI(0x1199, 0x9078)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
+	{DEVICE_SWI(0x1199, 0x907a)},	/* Sierra Wireless EM74xx QDL */
+	{DEVICE_SWI(0x1199, 0x907b)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 02bdaa9..4340b49 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -446,6 +446,10 @@
 #define SD_BLOCK_LEN  9
 
 struct ene_ub6250_info {
+
+	/* I/O bounce buffer */
+	u8		*bbuf;
+
 	/* for 6250 code */
 	struct SD_STATUS	SD_Status;
 	struct MS_STATUS	MS_Status;
@@ -493,8 +497,11 @@
 
 static void ene_ub6250_info_destructor(void *extra)
 {
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
+
 	if (!extra)
 		return;
+	kfree(info->bbuf);
 }
 
 static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
@@ -860,8 +867,9 @@
 		u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 	int result;
-	u8 ExtBuf[4];
 	u32 bn = PhyBlockAddr * 0x20 + PageNum;
 
 	result = ene_load_bincode(us, MS_RW_PATTERN);
@@ -901,7 +909,7 @@
 	bcb->CDB[2]     = (unsigned char)(PhyBlockAddr>>16);
 	bcb->CDB[6]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
@@ -910,9 +918,9 @@
 	ExtraDat->status0  = 0x10;  /* Not yet,fireware support */
 
 	ExtraDat->status1  = 0x00;  /* Not yet,fireware support */
-	ExtraDat->ovrflg   = ExtBuf[0];
-	ExtraDat->mngflg   = ExtBuf[1];
-	ExtraDat->logadr   = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+	ExtraDat->ovrflg   = bbuf[0];
+	ExtraDat->mngflg   = bbuf[1];
+	ExtraDat->logadr   = memstick_logaddr(bbuf[2], bbuf[3]);
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
@@ -1332,8 +1340,9 @@
 				u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 	int result;
-	u8 ExtBuf[4];
 
 	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -1347,7 +1356,7 @@
 	bcb->CDB[2]     = (unsigned char)(PhyBlock>>16);
 	bcb->CDB[6]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
@@ -1355,9 +1364,9 @@
 	ExtraDat->intr     = 0x80;  /* Not yet, waiting for fireware support */
 	ExtraDat->status0  = 0x10;  /* Not yet, waiting for fireware support */
 	ExtraDat->status1  = 0x00;  /* Not yet, waiting for fireware support */
-	ExtraDat->ovrflg   = ExtBuf[0];
-	ExtraDat->mngflg   = ExtBuf[1];
-	ExtraDat->logadr   = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+	ExtraDat->ovrflg   = bbuf[0];
+	ExtraDat->mngflg   = bbuf[1];
+	ExtraDat->logadr   = memstick_logaddr(bbuf[2], bbuf[3]);
 
 	return USB_STOR_TRANSPORT_GOOD;
 }
@@ -1558,9 +1567,9 @@
 	u16 PhyBlock, newblk, i;
 	u16 LogStart, LogEnde;
 	struct ms_lib_type_extdat extdat;
-	u8 buf[0x200];
 	u32 count = 0, index = 0;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
 		ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
@@ -1574,14 +1583,16 @@
 			}
 
 			if (count == PhyBlock) {
-				ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
+				ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
+						bbuf);
 				count += 0x80;
 			}
 			index = (PhyBlock % 0x80) * 4;
 
-			extdat.ovrflg = buf[index];
-			extdat.mngflg = buf[index+1];
-			extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
+			extdat.ovrflg = bbuf[index];
+			extdat.mngflg = bbuf[index+1];
+			extdat.logadr = memstick_logaddr(bbuf[index+2],
+					bbuf[index+3]);
 
 			if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
 				ms_lib_setacquired_errorblock(us, PhyBlock);
@@ -2064,9 +2075,9 @@
 {
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int result;
-	u8 buf[0x200];
 	u16 MSP_BlockSize, MSP_UserAreaBlocks;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	printk(KERN_INFO "transport --- ENE_MSInit\n");
 
@@ -2085,13 +2096,13 @@
 	bcb->CDB[0]     = 0xF1;
 	bcb->CDB[1]     = 0x01;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD) {
 		printk(KERN_ERR "Execution MS Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	/* the same part to test ENE */
-	info->MS_Status = *(struct MS_STATUS *)&buf[0];
+	info->MS_Status = *(struct MS_STATUS *) bbuf;
 
 	if (info->MS_Status.Insert && info->MS_Status.Ready) {
 		printk(KERN_INFO "Insert     = %x\n", info->MS_Status.Insert);
@@ -2100,15 +2111,15 @@
 		printk(KERN_INFO "IsMSPHG    = %x\n", info->MS_Status.IsMSPHG);
 		printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
 		if (info->MS_Status.IsMSPro) {
-			MSP_BlockSize      = (buf[6] << 8) | buf[7];
-			MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
+			MSP_BlockSize      = (bbuf[6] << 8) | bbuf[7];
+			MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
 			info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
 		} else {
 			ms_card_init(us); /* Card is MS (to ms.c)*/
 		}
 		usb_stor_dbg(us, "MS Init Code OK !!\n");
 	} else {
-		usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]);
+		usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
@@ -2118,9 +2129,9 @@
 static int ene_sd_init(struct us_data *us)
 {
 	int result;
-	u8  buf[0x200];
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+	u8 *bbuf = info->bbuf;
 
 	usb_stor_dbg(us, "transport --- ENE_SDInit\n");
 	/* SD Init Part-1 */
@@ -2154,17 +2165,17 @@
 	bcb->Flags              = US_BULK_FLAG_IN;
 	bcb->CDB[0]             = 0xF1;
 
-	result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+	result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
 	if (result != USB_STOR_XFER_GOOD) {
 		usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	info->SD_Status =  *(struct SD_STATUS *)&buf[0];
+	info->SD_Status =  *(struct SD_STATUS *) bbuf;
 	if (info->SD_Status.Insert && info->SD_Status.Ready) {
 		struct SD_STATUS *s = &info->SD_Status;
 
-		ene_get_card_status(us, (unsigned char *)&buf);
+		ene_get_card_status(us, bbuf);
 		usb_stor_dbg(us, "Insert     = %x\n", s->Insert);
 		usb_stor_dbg(us, "Ready      = %x\n", s->Ready);
 		usb_stor_dbg(us, "IsMMC      = %x\n", s->IsMMC);
@@ -2172,7 +2183,7 @@
 		usb_stor_dbg(us, "HiSpeed    = %x\n", s->HiSpeed);
 		usb_stor_dbg(us, "WtP        = %x\n", s->WtP);
 	} else {
-		usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]);
+		usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 	return USB_STOR_TRANSPORT_GOOD;
@@ -2182,13 +2193,15 @@
 static int ene_init(struct us_data *us)
 {
 	int result;
-	u8  misc_reg03 = 0;
+	u8  misc_reg03;
 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
+	u8 *bbuf = info->bbuf;
 
-	result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+	result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
+	misc_reg03 = bbuf[0];
 	if (misc_reg03 & 0x01) {
 		if (!info->SD_Status.Ready) {
 			result = ene_sd_init(us);
@@ -2305,8 +2318,9 @@
 			 const struct usb_device_id *id)
 {
 	int result;
-	u8  misc_reg03 = 0;
+	u8  misc_reg03;
 	struct us_data *us;
+	struct ene_ub6250_info *info;
 
 	result = usb_stor_probe1(&us, intf, id,
 		   (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
@@ -2315,11 +2329,16 @@
 		return result;
 
 	/* FIXME: where should the code alloc extra buf ? */
-	if (!us->extra) {
-		us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
-		if (!us->extra)
-			return -ENOMEM;
-		us->extra_destructor = ene_ub6250_info_destructor;
+	us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
+	if (!us->extra)
+		return -ENOMEM;
+	us->extra_destructor = ene_ub6250_info_destructor;
+
+	info = (struct ene_ub6250_info *)(us->extra);
+	info->bbuf = kmalloc(512, GFP_KERNEL);
+	if (!info->bbuf) {
+		kfree(us->extra);
+		return -ENOMEM;
 	}
 
 	us->transport_name = "ene_ub6250";
@@ -2331,12 +2350,13 @@
 		return result;
 
 	/* probe card type */
-	result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+	result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
 	if (result != USB_STOR_XFER_GOOD) {
 		usb_stor_disconnect(intf);
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
+	misc_reg03 = info->bbuf[0];
 	if (!(misc_reg03 & 0x01)) {
 		pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
 			"It does not support SM cards.\n");
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 6345e85..a50cf45 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -341,6 +341,7 @@
 static
 int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
 {
+	struct usb_device *udev = interface_to_usbdev(iface);
 	struct i1480_usb *i1480_usb;
 	struct i1480 *i1480;
 	struct device *dev = &iface->dev;
@@ -352,8 +353,8 @@
 			iface->cur_altsetting->desc.bInterfaceNumber);
 		goto error;
 	}
-	if (iface->num_altsetting > 1
-	    && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
+	if (iface->num_altsetting > 1 &&
+			le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
 		/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
 		result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
 		if (result < 0)
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 1626892..1cf907e 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -533,6 +533,10 @@
 		return "4:3";
 	case HDMI_PICTURE_ASPECT_16_9:
 		return "16:9";
+	case HDMI_PICTURE_ASPECT_64_27:
+		return "64:27";
+	case HDMI_PICTURE_ASPECT_256_135:
+		return "256:135";
 	case HDMI_PICTURE_ASPECT_RESERVED:
 		return "Reserved";
 	}
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 99ebf6e..5615f40 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -630,6 +630,9 @@
 		return -ENODEV;
 	}
 
+	if (iface_desc->desc.bNumEndpoints < 1)
+		return -ENODEV;
+
 	/* check out the endpoint: it has to be Interrupt & IN */
 	endpoint = &iface_desc->endpoint[0].desc;
 
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 80bb956..d1bbdc9 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -300,7 +300,7 @@
 	} else {
 		memset(buf, 0, 8);
 	}
-	memcpy(buf + 8, iname->name + iname->len - 16, 16);
+	memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
 	oname->name[0] = '_';
 	oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
 	return 0;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index c4a389a..423a21c 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1255,9 +1255,9 @@
 	if (unlikely(!name)) {
 		if (fname->usr_fname->name[0] == '_') {
 			int ret;
-			if (de->name_len < 16)
+			if (de->name_len <= 32)
 				return 0;
-			ret = memcmp(de->name + de->name_len - 16,
+			ret = memcmp(de->name + ((de->name_len - 17) & ~15),
 				     fname->crypto_buf.name + 8, 16);
 			return (ret == 0) ? 1 : 0;
 		}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index ebdc90f..11f3717 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -130,19 +130,29 @@
 			continue;
 		}
 
-		/* encrypted case */
+		if (de->hash_code != namehash)
+			goto not_match;
+
 		de_name.name = d->filename[bit_pos];
 		de_name.len = le16_to_cpu(de->name_len);
 
-		/* show encrypted name */
-		if (fname->hash) {
-			if (de->hash_code == fname->hash)
-				goto found;
-		} else if (de_name.len == name->len &&
-			de->hash_code == namehash &&
-			!memcmp(de_name.name, name->name, name->len))
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+		if (unlikely(!name->name)) {
+			if (fname->usr_fname->name[0] == '_') {
+				if (de_name.len > 32 &&
+					!memcmp(de_name.name + ((de_name.len - 17) & ~15),
+						fname->crypto_buf.name + 8, 16))
+					goto found;
+				goto not_match;
+			}
+			name->name = fname->crypto_buf.name;
+			name->len = fname->crypto_buf.len;
+		}
+#endif
+		if (de_name.len == name->len &&
+				!memcmp(de_name.name, name->name, name->len))
 			goto found;
-
+not_match:
 		if (max_slots && max_len > *max_slots)
 			*max_slots = max_len;
 		max_len = 0;
@@ -170,12 +180,7 @@
 	struct f2fs_dir_entry *de = NULL;
 	bool room = false;
 	int max_slots;
-	f2fs_hash_t namehash;
-
-	if(fname->hash)
-		namehash = cpu_to_le32(fname->hash);
-	else
-		namehash = f2fs_dentry_hash(&name);
+	f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
 
 	nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
 	nblock = bucket_blocks(level);
@@ -539,7 +544,7 @@
 
 	level = 0;
 	slots = GET_DENTRY_SLOTS(new_name->len);
-	dentry_hash = f2fs_dentry_hash(new_name);
+	dentry_hash = f2fs_dentry_hash(new_name, NULL);
 
 	current_depth = F2FS_I(dir)->i_current_depth;
 	if (F2FS_I(dir)->chash == dentry_hash) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 3a1640b..c12f695 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2016,7 +2016,8 @@
 /*
  * hash.c
  */
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+				struct fscrypt_name *fname);
 
 /*
  * node.c
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 71b7206..eb2e031 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,7 +70,8 @@
 		*buf++ = pad;
 }
 
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+				struct fscrypt_name *fname)
 {
 	__u32 hash;
 	f2fs_hash_t f2fs_hash;
@@ -79,6 +80,10 @@
 	const unsigned char *name = name_info->name;
 	size_t len = name_info->len;
 
+	/* encrypted bigname case */
+	if (fname && !fname->disk_name.name)
+		return cpu_to_le32(fname->hash);
+
 	if (is_dot_dotdot(name_info))
 		return 0;
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 1427db9..e14edc9 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -312,7 +312,7 @@
 		return NULL;
 	}
 
-	namehash = f2fs_dentry_hash(&name);
+	namehash = f2fs_dentry_hash(&name, fname);
 
 	inline_dentry = inline_data_addr(ipage);
 
@@ -549,7 +549,7 @@
 
 	f2fs_wait_on_page_writeback(ipage, NODE, true);
 
-	name_hash = f2fs_dentry_hash(new_name);
+	name_hash = f2fs_dentry_hash(new_name, NULL);
 	make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
 	f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
 
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a7943f86..74a2b44 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1805,6 +1805,8 @@
 
 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
 {
+	struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
+	struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
 	int type = CURSEG_HOT_DATA;
 	int err;
 
@@ -1831,6 +1833,11 @@
 			return err;
 	}
 
+	/* sanity check for summary blocks */
+	if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
+			sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES)
+		return -EINVAL;
+
 	return 0;
 }
 
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 7e0c002..b81998e 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1424,6 +1424,8 @@
 	unsigned int total, fsmeta;
 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+	unsigned int main_segs, blocks_per_seg;
+	int i;
 
 	total = le32_to_cpu(raw_super->segment_count);
 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1435,6 +1437,22 @@
 	if (unlikely(fsmeta >= total))
 		return 1;
 
+	main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+	blocks_per_seg = sbi->blocks_per_seg;
+
+	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+		    le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
+			return 1;
+		}
+	}
+	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
+		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
+		    le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
+			return 1;
+		}
+	}
+
 	if (unlikely(f2fs_cp_error(sbi))) {
 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
 		return 1;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4e894d3..fc9b049 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2385,8 +2385,10 @@
 		if (status != 0)
 			return status;
 	}
-	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
+	if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
+		nfs4_sequence_free_slot(&o_res->seq_res);
 		nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
+	}
 	return 0;
 }
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 965db47..142a74f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,13 +29,14 @@
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
-static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount,
+					gfp_t gfp_flags)
 {
 	p->npages = pagecount;
 	if (pagecount <= ARRAY_SIZE(p->page_array))
 		p->pagevec = p->page_array;
 	else {
-		p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+		p->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
 		if (!p->pagevec)
 			p->npages = 0;
 	}
@@ -681,6 +682,7 @@
 {
 	struct nfs_pgio_mirror *new;
 	int i;
+	gfp_t gfp_flags = GFP_KERNEL;
 
 	desc->pg_moreio = 0;
 	desc->pg_inode = inode;
@@ -700,8 +702,10 @@
 	if (pg_ops->pg_get_mirror_count) {
 		/* until we have a request, we don't have an lseg and no
 		 * idea how many mirrors there will be */
+		if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
+			gfp_flags = GFP_NOIO;
 		new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
-			      sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
+			      sizeof(struct nfs_pgio_mirror), gfp_flags);
 		desc->pg_mirrors_dynamic = new;
 		desc->pg_mirrors = new;
 
@@ -755,9 +759,12 @@
 	struct list_head *head = &mirror->pg_list;
 	struct nfs_commit_info cinfo;
 	unsigned int pagecount, pageused;
+	gfp_t gfp_flags = GFP_KERNEL;
 
 	pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
-	if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
+	if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
+		gfp_flags = GFP_NOIO;
+	if (!nfs_pgarray_set(&hdr->page_array, pagecount, gfp_flags)) {
 		nfs_pgio_error(hdr);
 		desc->pg_error = -ENOMEM;
 		return desc->pg_error;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5321183..e4772a8 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -548,9 +548,9 @@
 {
 	nfs_unlock_request(req);
 	nfs_end_page_writeback(req);
-	nfs_release_request(req);
 	generic_error_remove_page(page_file_mapping(req->wb_page),
 				  req->wb_page);
+	nfs_release_request(req);
 }
 
 /*
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index abb09b5..650226f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1273,7 +1273,8 @@
 		return NULL;
 	}
 
-	if (!(exp->ex_layout_types & (1 << layout_type))) {
+	if (layout_type >= LAYOUT_TYPE_MAX ||
+	    !(exp->ex_layout_types & (1 << layout_type))) {
 		dprintk("%s: layout type %d not supported\n",
 			__func__, layout_type);
 		return NULL;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c2d2895..2ee80e1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -4081,8 +4081,7 @@
 		struct nfsd4_getdeviceinfo *gdev)
 {
 	struct xdr_stream *xdr = &resp->xdr;
-	const struct nfsd4_layout_ops *ops =
-		nfsd4_layout_ops[gdev->gd_layout_type];
+	const struct nfsd4_layout_ops *ops;
 	u32 starting_len = xdr->buf->len, needed_len;
 	__be32 *p;
 
@@ -4099,6 +4098,7 @@
 
 	/* If maxcount is 0 then just update notifications */
 	if (gdev->gd_maxcount != 0) {
+		ops = nfsd4_layout_ops[gdev->gd_layout_type];
 		nfserr = ops->encode_getdeviceinfo(xdr, gdev);
 		if (nfserr) {
 			/*
@@ -4151,8 +4151,7 @@
 		struct nfsd4_layoutget *lgp)
 {
 	struct xdr_stream *xdr = &resp->xdr;
-	const struct nfsd4_layout_ops *ops =
-		nfsd4_layout_ops[lgp->lg_layout_type];
+	const struct nfsd4_layout_ops *ops;
 	__be32 *p;
 
 	dprintk("%s: err %d\n", __func__, nfserr);
@@ -4175,6 +4174,7 @@
 	*p++ = cpu_to_be32(lgp->lg_seg.iomode);
 	*p++ = cpu_to_be32(lgp->lg_layout_type);
 
+	ops = nfsd4_layout_ops[lgp->lg_layout_type];
 	nfserr = ops->encode_layoutget(xdr, lgp);
 out:
 	kfree(lgp->lg_content);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 7f99c96..cef9885 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -294,27 +294,37 @@
 		}
 
 		ret = copy_event_to_user(group, kevent, buf);
+		if (unlikely(ret == -EOPENSTALE)) {
+			/*
+			 * We cannot report events with stale fd so drop it.
+			 * Setting ret to 0 will continue the event loop and
+			 * do the right thing if there are no more events to
+			 * read (i.e. return bytes read, -EAGAIN or wait).
+			 */
+			ret = 0;
+		}
+
 		/*
 		 * Permission events get queued to wait for response.  Other
 		 * events can be destroyed now.
 		 */
 		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
 			fsnotify_destroy_event(group, kevent);
-			if (ret < 0)
-				break;
 		} else {
 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-			if (ret < 0) {
+			if (ret <= 0) {
 				FANOTIFY_PE(kevent)->response = FAN_DENY;
 				wake_up(&group->fanotify_data.access_waitq);
-				break;
+			} else {
+				spin_lock(&group->notification_lock);
+				list_add_tail(&kevent->list,
+					&group->fanotify_data.access_list);
+				spin_unlock(&group->notification_lock);
 			}
-			spin_lock(&group->notification_lock);
-			list_add_tail(&kevent->list,
-				      &group->fanotify_data.access_list);
-			spin_unlock(&group->notification_lock);
 #endif
 		}
+		if (ret < 0)
+			break;
 		buf += ret;
 		count -= ret;
 	}
diff --git a/fs/pnode.c b/fs/pnode.c
index b5f97c6..e4e428d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -504,9 +504,14 @@
 	if (!IS_MNT_NEW(cur) && !list_empty(&cur->mnt_slave_list))
 		return first_slave(cur);
 	do {
-		if (cur->mnt_slave.next != &cur->mnt_master->mnt_slave_list)
-			return next_slave(cur);
-		cur = cur->mnt_master;
+		struct mount *master = cur->mnt_master;
+
+		if (!master || cur->mnt_slave.next != &master->mnt_slave_list) {
+			struct mount *next = next_slave(cur);
+
+			return (next == root) ? NULL : next;
+		}
+		cur = master;
 	} while (cur != root);
 	return NULL;
 }
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5f2dc20..6047471 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -471,6 +471,7 @@
 		ent->data = NULL;
 		ent->proc_fops = NULL;
 		ent->proc_iops = NULL;
+		parent->nlink++;
 		if (proc_register(parent, ent) < 0) {
 			kfree(ent);
 			parent->nlink--;
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index a231681..e9426a6 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -109,14 +109,16 @@
 		goto out;
 
 	/* If our top's inode is gone, we may be out of date */
-	inode = d_inode(dentry);
+	inode = igrab(d_inode(dentry));
 	if (inode) {
 		data = top_data_get(SDCARDFS_I(inode));
-		if (data->abandoned) {
+		if (!data || data->abandoned) {
 			d_drop(dentry);
 			err = 0;
 		}
-		data_put(data);
+		if (data)
+			data_put(data);
+		iput(inode);
 	}
 
 out:
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f04ab23..f3469ad 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -812,9 +812,8 @@
 	uspi->s_dirblksize = UFS_SECTOR_SIZE;
 	super_block_offset=UFS_SBLOCK;
 
-	/* Keep 2Gig file limit. Some UFS variants need to override 
-	   this but as I don't know which I'll let those in the know loosen
-	   the rules */
+	sb->s_maxbytes = MAX_LFS_FILESIZE;
+
 	switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
 	case UFS_MOUNT_UFSTYPE_44BSD:
 		UFSD("ufstype=44bsd\n");
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 5a508b0..2a8cbd1 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2208,8 +2208,10 @@
 		}
 		temp = xfs_bmap_worst_indlen(bma->ip, temp);
 		temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
-		diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
-			(bma->cur ? bma->cur->bc_private.b.allocated : 0));
+		diff = (int)(temp + temp2 -
+			     (startblockval(PREV.br_startblock) -
+			      (bma->cur ?
+			       bma->cur->bc_private.b.allocated : 0)));
 		if (diff > 0) {
 			error = xfs_mod_fdblocks(bma->ip->i_mount,
 						 -((int64_t)diff), false);
@@ -2266,7 +2268,6 @@
 		temp = da_new;
 		if (bma->cur)
 			temp += bma->cur->bc_private.b.allocated;
-		ASSERT(temp <= da_old);
 		if (temp < da_old)
 			xfs_mod_fdblocks(bma->ip->i_mount,
 					(int64_t)(da_old - temp), false);
@@ -3964,7 +3965,7 @@
 {
 	struct xfs_trans	*tp = ap->tp;
 	struct xfs_mount	*mp = tp->t_mountp;
-	xfs_agblock_t		bno;
+	xfs_fsblock_t		bno;
 	struct xfs_alloc_arg	args;
 	int			error;
 
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 2849d3f..91c6891 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4376,7 +4376,7 @@
 			xfs_btree_readahead_ptr(cur, ptr, 1);
 
 			/* save for the next iteration of the loop */
-			lptr = *ptr;
+			xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
 		}
 
 		/* for each buffer in the level */
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index ef9f6ea..699a51b 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -126,6 +126,7 @@
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
 
 /* xfs_dir2_readdir.c */
 extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index c6809ff..e84af09 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -629,6 +629,112 @@
 }
 #endif	/* DEBUG */
 
+/* Verify the consistency of an inline directory. */
+int
+xfs_dir2_sf_verify(
+	struct xfs_inode		*ip)
+{
+	struct xfs_mount		*mp = ip->i_mount;
+	struct xfs_dir2_sf_hdr		*sfp;
+	struct xfs_dir2_sf_entry	*sfep;
+	struct xfs_dir2_sf_entry	*next_sfep;
+	char				*endp;
+	const struct xfs_dir_ops	*dops;
+	struct xfs_ifork		*ifp;
+	xfs_ino_t			ino;
+	int				i;
+	int				i8count;
+	int				offset;
+	int				size;
+	int				error;
+	__uint8_t			filetype;
+
+	ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+	/*
+	 * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
+	 * so we can only trust the mountpoint to have the right pointer.
+	 */
+	dops = xfs_dir_get_ops(mp, NULL);
+
+	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
+	size = ifp->if_bytes;
+
+	/*
+	 * Give up if the directory is way too short.
+	 */
+	if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
+	    size < xfs_dir2_sf_hdr_size(sfp->i8count))
+		return -EFSCORRUPTED;
+
+	endp = (char *)sfp + size;
+
+	/* Check .. entry */
+	ino = dops->sf_get_parent_ino(sfp);
+	i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
+	error = xfs_dir_ino_validate(mp, ino);
+	if (error)
+		return error;
+	offset = dops->data_first_offset;
+
+	/* Check all reported entries */
+	sfep = xfs_dir2_sf_firstentry(sfp);
+	for (i = 0; i < sfp->count; i++) {
+		/*
+		 * struct xfs_dir2_sf_entry has a variable length.
+		 * Check the fixed-offset parts of the structure are
+		 * within the data buffer.
+		 */
+		if (((char *)sfep + sizeof(*sfep)) >= endp)
+			return -EFSCORRUPTED;
+
+		/* Don't allow names with known bad length. */
+		if (sfep->namelen == 0)
+			return -EFSCORRUPTED;
+
+		/*
+		 * Check that the variable-length part of the structure is
+		 * within the data buffer.  The next entry starts after the
+		 * name component, so nextentry is an acceptable test.
+		 */
+		next_sfep = dops->sf_nextentry(sfp, sfep);
+		if (endp < (char *)next_sfep)
+			return -EFSCORRUPTED;
+
+		/* Check that the offsets always increase. */
+		if (xfs_dir2_sf_get_offset(sfep) < offset)
+			return -EFSCORRUPTED;
+
+		/* Check the inode number. */
+		ino = dops->sf_get_ino(sfp, sfep);
+		i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
+		error = xfs_dir_ino_validate(mp, ino);
+		if (error)
+			return error;
+
+		/* Check the file type. */
+		filetype = dops->sf_get_ftype(sfep);
+		if (filetype >= XFS_DIR3_FT_MAX)
+			return -EFSCORRUPTED;
+
+		offset = xfs_dir2_sf_get_offset(sfep) +
+				dops->data_entsize(sfep->namelen);
+
+		sfep = next_sfep;
+	}
+	if (i8count != sfp->i8count)
+		return -EFSCORRUPTED;
+	if ((void *)sfep != (void *)endp)
+		return -EFSCORRUPTED;
+
+	/* Make sure this whole thing ought to be in local format. */
+	if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+	    (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
+		return -EFSCORRUPTED;
+
+	return 0;
+}
+
 /*
  * Create a new (shortform) directory.
  */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 25c1e07..8a37efe 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -33,6 +33,8 @@
 #include "xfs_trace.h"
 #include "xfs_attr_sf.h"
 #include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -210,6 +212,16 @@
 	if (error)
 		return error;
 
+	/* Check inline dir contents. */
+	if (S_ISDIR(VFS_I(ip)->i_mode) &&
+	    dip->di_format == XFS_DINODE_FMT_LOCAL) {
+		error = xfs_dir2_sf_verify(ip);
+		if (error) {
+			xfs_idestroy_fork(ip, XFS_DATA_FORK);
+			return error;
+		}
+	}
+
 	if (xfs_is_reflink_inode(ip)) {
 		ASSERT(ip->i_cowfp == NULL);
 		xfs_ifork_init_cow(ip);
@@ -320,7 +332,6 @@
 	int		whichfork,
 	int		size)
 {
-
 	/*
 	 * If the size is unreasonable, then something
 	 * is wrong and we just bail out rather than crash in
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index b177ef3..82a38d8 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1629,13 +1629,28 @@
 	if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
 		return -EOPNOTSUPP;
 
-	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+	INIT_LIST_HEAD(&debris);
+
+	/*
+	 * In this first part, we use an empty transaction to gather up
+	 * all the leftover CoW extents so that we can subsequently
+	 * delete them.  The empty transaction is used to avoid
+	 * a buffer lock deadlock if there happens to be a loop in the
+	 * refcountbt because we're allowed to re-grab a buffer that is
+	 * already attached to our transaction.  When we're done
+	 * recording the CoW debris we cancel the (empty) transaction
+	 * and everything goes away cleanly.
+	 */
+	error = xfs_trans_alloc_empty(mp, &tp);
 	if (error)
 		return error;
-	cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
+
+	error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
+	if (error)
+		goto out_trans;
+	cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
 
 	/* Find all the leftover CoW staging extents. */
-	INIT_LIST_HEAD(&debris);
 	memset(&low, 0, sizeof(low));
 	memset(&high, 0, sizeof(high));
 	low.rc.rc_startblock = XFS_REFC_COW_START;
@@ -1645,10 +1660,11 @@
 	if (error)
 		goto out_cursor;
 	xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
-	xfs_buf_relse(agbp);
+	xfs_trans_brelse(tp, agbp);
+	xfs_trans_cancel(tp);
 
 	/* Now iterate the list to free the leftovers */
-	list_for_each_entry(rr, &debris, rr_list) {
+	list_for_each_entry_safe(rr, n, &debris, rr_list) {
 		/* Set up transaction. */
 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
 		if (error)
@@ -1676,8 +1692,16 @@
 		error = xfs_trans_commit(tp);
 		if (error)
 			goto out_free;
+
+		list_del(&rr->rr_list);
+		kmem_free(rr);
 	}
 
+	return error;
+out_defer:
+	xfs_defer_cancel(&dfops);
+out_trans:
+	xfs_trans_cancel(tp);
 out_free:
 	/* Free the leftover list */
 	list_for_each_entry_safe(rr, n, &debris, rr_list) {
@@ -1688,11 +1712,6 @@
 
 out_cursor:
 	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
-	xfs_buf_relse(agbp);
-	goto out_free;
-
-out_defer:
-	xfs_defer_cancel(&dfops);
-	xfs_trans_cancel(tp);
-	goto out_free;
+	xfs_trans_brelse(tp, agbp);
+	goto out_trans;
 }
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index 7917f6e..d787c67 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -21,8 +21,20 @@
 /*
  * Components of space reservations.
  */
+
+/* Worst case number of rmaps that can be held in a block. */
 #define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)    \
 		(((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0]))
+
+/* Adding one rmap could split every level up to the top of the tree. */
+#define XFS_RMAPADD_SPACE_RES(mp) ((mp)->m_rmap_maxlevels)
+
+/* Blocks we might need to add "b" rmaps to a tree. */
+#define XFS_NRMAPADD_SPACE_RES(mp, b)\
+	(((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
+	  XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
+	  XFS_RMAPADD_SPACE_RES(mp))
+
 #define XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)    \
 		(((mp)->m_alloc_mxr[0]) - ((mp)->m_alloc_mnr[0]))
 #define	XFS_EXTENTADD_SPACE_RES(mp,w)	(XFS_BM_MAXLEVELS(mp,w) - 1)
@@ -30,13 +42,12 @@
 	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
 	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
 	  XFS_EXTENTADD_SPACE_RES(mp,w))
+
+/* Blocks we might need to add "b" mappings & rmappings to a file. */
 #define XFS_SWAP_RMAP_SPACE_RES(mp,b,w)\
-	(((b + XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp) - 1) / \
-	  XFS_MAX_CONTIG_EXTENTS_PER_BLOCK(mp)) * \
-	  XFS_EXTENTADD_SPACE_RES(mp,w) + \
-	 ((b + XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) - 1) / \
-	  XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * \
-	  (mp)->m_rmap_maxlevels)
+	(XFS_NEXTENTADD_SPACE_RES((mp), (b), (w)) + \
+	 XFS_NRMAPADD_SPACE_RES((mp), (b)))
+
 #define	XFS_DAENTER_1B(mp,w)	\
 	((w) == XFS_DATA_FORK ? (mp)->m_dir_geo->fsbcount : 1)
 #define	XFS_DAENTER_DBS(mp,w)	\
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0457abe..6df0a7c 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -116,11 +116,11 @@
 
 	bsize = bh->b_size;
 	do {
+		if (off > end)
+			break;
 		next = bh->b_this_page;
 		if (off < bvec->bv_offset)
 			goto next_bh;
-		if (off > end)
-			break;
 		bh->b_end_io(bh, !error);
 next_bh:
 		off += bsize;
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 9bf57c7..c4b90e7 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -34,6 +34,8 @@
 #include "xfs_bmap.h"
 #include "xfs_icache.h"
 #include "xfs_trace.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans_space.h"
 
 
 kmem_zone_t	*xfs_bui_zone;
@@ -446,7 +448,8 @@
 		return -EIO;
 	}
 
-	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
 	if (error)
 		return error;
 	budp = xfs_trans_get_bud(tp, buip);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5328ecd..87b495e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -588,9 +588,13 @@
 		}
 		break;
 	default:
+		/* Local format data forks report no extents. */
+		if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+			bmv->bmv_entries = 0;
+			return 0;
+		}
 		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
-		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
-		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
 			return -EINVAL;
 
 		if (xfs_get_extsz_hint(ip) ||
@@ -718,7 +722,7 @@
 			 * extents.
 			 */
 			if (map[i].br_startblock == DELAYSTARTBLOCK &&
-			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+			    map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
 				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 
                         if (map[i].br_startblock == HOLESTARTBLOCK &&
@@ -911,9 +915,9 @@
 }
 
 /*
- * This is called by xfs_inactive to free any blocks beyond eof
- * when the link count isn't zero and by xfs_dm_punch_hole() when
- * punching a hole to EOF.
+ * This is called to free any blocks beyond eof. The caller must hold
+ * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
+ * reference to the inode.
  */
 int
 xfs_free_eofblocks(
@@ -928,8 +932,6 @@
 	struct xfs_bmbt_irec	imap;
 	struct xfs_mount	*mp = ip->i_mount;
 
-	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-
 	/*
 	 * Figure out if there are any blocks beyond the end
 	 * of the file.  If not, then there is nothing to do.
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index d7a67d7..1626927 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -96,12 +96,16 @@
 xfs_buf_ioacct_inc(
 	struct xfs_buf	*bp)
 {
-	if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+	if (bp->b_flags & XBF_NO_IOACCT)
 		return;
 
 	ASSERT(bp->b_flags & XBF_ASYNC);
-	bp->b_flags |= _XBF_IN_FLIGHT;
-	percpu_counter_inc(&bp->b_target->bt_io_count);
+	spin_lock(&bp->b_lock);
+	if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
+		bp->b_state |= XFS_BSTATE_IN_FLIGHT;
+		percpu_counter_inc(&bp->b_target->bt_io_count);
+	}
+	spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -109,14 +113,24 @@
  * freed and unaccount from the buftarg.
  */
 static inline void
+__xfs_buf_ioacct_dec(
+	struct xfs_buf	*bp)
+{
+	ASSERT(spin_is_locked(&bp->b_lock));
+
+	if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
+		bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
+		percpu_counter_dec(&bp->b_target->bt_io_count);
+	}
+}
+
+static inline void
 xfs_buf_ioacct_dec(
 	struct xfs_buf	*bp)
 {
-	if (!(bp->b_flags & _XBF_IN_FLIGHT))
-		return;
-
-	bp->b_flags &= ~_XBF_IN_FLIGHT;
-	percpu_counter_dec(&bp->b_target->bt_io_count);
+	spin_lock(&bp->b_lock);
+	__xfs_buf_ioacct_dec(bp);
+	spin_unlock(&bp->b_lock);
 }
 
 /*
@@ -148,9 +162,9 @@
 	 * unaccounted (released to LRU) before that occurs. Drop in-flight
 	 * status now to preserve accounting consistency.
 	 */
-	xfs_buf_ioacct_dec(bp);
-
 	spin_lock(&bp->b_lock);
+	__xfs_buf_ioacct_dec(bp);
+
 	atomic_set(&bp->b_lru_ref, 0);
 	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
 	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
@@ -953,12 +967,12 @@
 		 * ensures the decrement occurs only once per-buf.
 		 */
 		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
-			xfs_buf_ioacct_dec(bp);
+			__xfs_buf_ioacct_dec(bp);
 		goto out_unlock;
 	}
 
 	/* the last reference has been dropped ... */
-	xfs_buf_ioacct_dec(bp);
+	__xfs_buf_ioacct_dec(bp);
 	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
 		/*
 		 * If the buffer is added to the LRU take a new reference to the
@@ -1052,6 +1066,8 @@
 xfs_buf_unlock(
 	struct xfs_buf		*bp)
 {
+	ASSERT(xfs_buf_islocked(bp));
+
 	XB_CLEAR_OWNER(bp);
 	up(&bp->b_sema);
 
@@ -1790,6 +1806,28 @@
 }
 
 /*
+ * Cancel a delayed write list.
+ *
+ * Remove each buffer from the list, clear the delwri queue flag and drop the
+ * associated buffer reference.
+ */
+void
+xfs_buf_delwri_cancel(
+	struct list_head	*list)
+{
+	struct xfs_buf		*bp;
+
+	while (!list_empty(list)) {
+		bp = list_first_entry(list, struct xfs_buf, b_list);
+
+		xfs_buf_lock(bp);
+		bp->b_flags &= ~_XBF_DELWRI_Q;
+		list_del_init(&bp->b_list);
+		xfs_buf_relse(bp);
+	}
+}
+
+/*
  * Add a buffer to the delayed write list.
  *
  * This queues a buffer for writeout if it hasn't already been.  Note that
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 1c2e52b..ad514a8 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -63,7 +63,6 @@
 #define _XBF_KMEM	 (1 << 21)/* backed by heap memory */
 #define _XBF_DELWRI_Q	 (1 << 22)/* buffer on a delwri queue */
 #define _XBF_COMPOUND	 (1 << 23)/* compound buffer */
-#define _XBF_IN_FLIGHT	 (1 << 25) /* I/O in flight, for accounting purposes */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -83,14 +82,14 @@
 	{ _XBF_PAGES,		"PAGES" }, \
 	{ _XBF_KMEM,		"KMEM" }, \
 	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
-	{ _XBF_COMPOUND,	"COMPOUND" }, \
-	{ _XBF_IN_FLIGHT,	"IN_FLIGHT" }
+	{ _XBF_COMPOUND,	"COMPOUND" }
 
 
 /*
  * Internal state flags.
  */
 #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
+#define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
 
 /*
  * The xfs_buftarg contains 2 notions of "sector size" -
@@ -330,6 +329,7 @@
 extern void xfs_buf_stale(struct xfs_buf *bp);
 
 /* Delayed Write Buffer Routines */
+extern void xfs_buf_delwri_cancel(struct list_head *);
 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
 extern int xfs_buf_delwri_submit(struct list_head *);
 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 2981698..eba6316 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -71,22 +71,11 @@
 	struct xfs_da_geometry	*geo = args->geo;
 
 	ASSERT(dp->i_df.if_flags & XFS_IFINLINE);
-	/*
-	 * Give up if the directory is way too short.
-	 */
-	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
-		ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount));
-		return -EIO;
-	}
-
 	ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
 	ASSERT(dp->i_df.if_u1.if_data != NULL);
 
 	sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
 
-	if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count))
-		return -EFSCORRUPTED;
-
 	/*
 	 * If the block number in the offset is out of range, we're done.
 	 */
@@ -405,6 +394,7 @@
 
 	/*
 	 * Do we need more readahead?
+	 * Each loop tries to process 1 full dir blk; last may be partial.
 	 */
 	blk_start_plug(&plug);
 	for (mip->ra_index = mip->ra_offset = i = 0;
@@ -415,7 +405,8 @@
 		 * Read-ahead a contiguous directory block.
 		 */
 		if (i > mip->ra_current &&
-		    map[mip->ra_index].br_blockcount >= geo->fsbcount) {
+		    (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
+		    geo->fsbcount) {
 			xfs_dir3_data_readahead(dp,
 				map[mip->ra_index].br_startoff + mip->ra_offset,
 				XFS_FSB_TO_DADDR(dp->i_mount,
@@ -436,14 +427,19 @@
 		}
 
 		/*
-		 * Advance offset through the mapping table.
+		 * Advance offset through the mapping table, processing a full
+		 * dir block even if it is fragmented into several extents.
+		 * But stop if we have consumed all valid mappings, even if
+		 * it's not yet a full directory block.
 		 */
-		for (j = 0; j < geo->fsbcount; j += length ) {
+		for (j = 0;
+		     j < geo->fsbcount && mip->ra_index < mip->map_valid;
+		     j += length ) {
 			/*
 			 * The rest of this extent but not more than a dir
 			 * block.
 			 */
-			length = min_t(int, geo->fsbcount,
+			length = min_t(int, geo->fsbcount - j,
 					map[mip->ra_index].br_blockcount -
 							mip->ra_offset);
 			mip->ra_offset += length;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1209ad2..a90ec3f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1130,13 +1130,13 @@
 
 	index = startoff >> PAGE_SHIFT;
 	endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
-	end = endoff >> PAGE_SHIFT;
+	end = (endoff - 1) >> PAGE_SHIFT;
 	do {
 		int		want;
 		unsigned	nr_pages;
 		unsigned int	i;
 
-		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+		want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
 					  want);
 		/*
@@ -1163,17 +1163,6 @@
 			break;
 		}
 
-		/*
-		 * At lease we found one page.  If this is the first time we
-		 * step into the loop, and if the first page index offset is
-		 * greater than the given search offset, a hole was found.
-		 */
-		if (type == HOLE_OFF && lastoff == startoff &&
-		    lastoff < page_offset(pvec.pages[0])) {
-			found = true;
-			break;
-		}
-
 		for (i = 0; i < nr_pages; i++) {
 			struct page	*page = pvec.pages[i];
 			loff_t		b_offset;
@@ -1185,18 +1174,18 @@
 			 * file mapping. However, page->index will not change
 			 * because we have a reference on the page.
 			 *
-			 * Searching done if the page index is out of range.
-			 * If the current offset is not reaches the end of
-			 * the specified search range, there should be a hole
-			 * between them.
+			 * If current page offset is beyond where we've ended,
+			 * we've found a hole.
 			 */
-			if (page->index > end) {
-				if (type == HOLE_OFF && lastoff < endoff) {
-					*offset = lastoff;
-					found = true;
-				}
+			if (type == HOLE_OFF && lastoff < endoff &&
+			    lastoff < page_offset(pvec.pages[i])) {
+				found = true;
+				*offset = lastoff;
 				goto out;
 			}
+			/* Searching done if the page index is out of range. */
+			if (page->index > end)
+				goto out;
 
 			lock_page(page);
 			/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3fb1f3f..74304b6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -264,6 +264,22 @@
 	xfs_perag_clear_reclaim_tag(pag);
 }
 
+static void
+xfs_inew_wait(
+	struct xfs_inode	*ip)
+{
+	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
+	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
+
+	do {
+		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+		if (!xfs_iflags_test(ip, XFS_INEW))
+			break;
+		schedule();
+	} while (true);
+	finish_wait(wq, &wait.wait);
+}
+
 /*
  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
  * part of the structure. This is made more complex by the fact we store
@@ -368,14 +384,17 @@
 
 		error = xfs_reinit_inode(mp, inode);
 		if (error) {
+			bool wake;
 			/*
 			 * Re-initializing the inode failed, and we are in deep
 			 * trouble.  Try to re-add it to the reclaim list.
 			 */
 			rcu_read_lock();
 			spin_lock(&ip->i_flags_lock);
-
+			wake = !!__xfs_iflags_test(ip, XFS_INEW);
 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+			if (wake)
+				wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
 			trace_xfs_iget_reclaim_fail(ip);
 			goto out_error;
@@ -625,9 +644,11 @@
 
 STATIC int
 xfs_inode_ag_walk_grab(
-	struct xfs_inode	*ip)
+	struct xfs_inode	*ip,
+	int			flags)
 {
 	struct inode		*inode = VFS_I(ip);
+	bool			newinos = !!(flags & XFS_AGITER_INEW_WAIT);
 
 	ASSERT(rcu_read_lock_held());
 
@@ -645,7 +666,8 @@
 		goto out_unlock_noent;
 
 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
-	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+	if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
+	    __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
 		goto out_unlock_noent;
 	spin_unlock(&ip->i_flags_lock);
 
@@ -673,7 +695,8 @@
 					   void *args),
 	int			flags,
 	void			*args,
-	int			tag)
+	int			tag,
+	int			iter_flags)
 {
 	uint32_t		first_index;
 	int			last_error = 0;
@@ -715,7 +738,7 @@
 		for (i = 0; i < nr_found; i++) {
 			struct xfs_inode *ip = batch[i];
 
-			if (done || xfs_inode_ag_walk_grab(ip))
+			if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
 				batch[i] = NULL;
 
 			/*
@@ -743,6 +766,9 @@
 		for (i = 0; i < nr_found; i++) {
 			if (!batch[i])
 				continue;
+			if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+			    xfs_iflags_test(batch[i], XFS_INEW))
+				xfs_inew_wait(batch[i]);
 			error = execute(batch[i], flags, args);
 			IRELE(batch[i]);
 			if (error == -EAGAIN) {
@@ -822,12 +848,13 @@
 }
 
 int
-xfs_inode_ag_iterator(
+xfs_inode_ag_iterator_flags(
 	struct xfs_mount	*mp,
 	int			(*execute)(struct xfs_inode *ip, int flags,
 					   void *args),
 	int			flags,
-	void			*args)
+	void			*args,
+	int			iter_flags)
 {
 	struct xfs_perag	*pag;
 	int			error = 0;
@@ -837,7 +864,8 @@
 	ag = 0;
 	while ((pag = xfs_perag_get(mp, ag))) {
 		ag = pag->pag_agno + 1;
-		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
+		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
+					  iter_flags);
 		xfs_perag_put(pag);
 		if (error) {
 			last_error = error;
@@ -849,6 +877,17 @@
 }
 
 int
+xfs_inode_ag_iterator(
+	struct xfs_mount	*mp,
+	int			(*execute)(struct xfs_inode *ip, int flags,
+					   void *args),
+	int			flags,
+	void			*args)
+{
+	return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
+}
+
+int
 xfs_inode_ag_iterator_tag(
 	struct xfs_mount	*mp,
 	int			(*execute)(struct xfs_inode *ip, int flags,
@@ -865,7 +904,8 @@
 	ag = 0;
 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
 		ag = pag->pag_agno + 1;
-		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
+		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+					  0);
 		xfs_perag_put(pag);
 		if (error) {
 			last_error = error;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 8a7c849..9183f77 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -48,6 +48,11 @@
 #define XFS_IGET_UNTRUSTED	0x2
 #define XFS_IGET_DONTCACHE	0x4
 
+/*
+ * flags for AG inode iterator
+ */
+#define XFS_AGITER_INEW_WAIT	0x1	/* wait on new inodes */
+
 int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
 	     uint flags, uint lock_flags, xfs_inode_t **ipp);
 
@@ -79,6 +84,9 @@
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
 	int (*execute)(struct xfs_inode *ip, int flags, void *args),
 	int flags, void *args);
+int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
+	int (*execute)(struct xfs_inode *ip, int flags, void *args),
+	int flags, void *args, int iter_flags);
 int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
 	int (*execute)(struct xfs_inode *ip, int flags, void *args),
 	int flags, void *args, int tag);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index e50636c..7a0b4ee 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -50,6 +50,7 @@
 #include "xfs_log.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_reflink.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -1914,12 +1915,13 @@
 		 * force is true because we are evicting an inode from the
 		 * cache. Post-eof blocks must be freed, lest we end up with
 		 * broken free space accounting.
+		 *
+		 * Note: don't bother with iolock here since lockdep complains
+		 * about acquiring it in reclaim context. We have the only
+		 * reference to the inode at this point anyways.
 		 */
-		if (xfs_can_free_eofblocks(ip, true)) {
-			xfs_ilock(ip, XFS_IOLOCK_EXCL);
+		if (xfs_can_free_eofblocks(ip, true))
 			xfs_free_eofblocks(ip);
-			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-		}
 
 		return;
 	}
@@ -3562,6 +3564,12 @@
 	if (ip->i_d.di_version < 3)
 		ip->i_d.di_flushiter++;
 
+	/* Check the inline directory data. */
+	if (S_ISDIR(VFS_I(ip)->i_mode) &&
+	    ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
+	    xfs_dir2_sf_verify(ip))
+		goto corrupt_out;
+
 	/*
 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
 	 * copy out the core of the inode, because if the inode is dirty at all
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 71e8a81..c038f6e 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -217,7 +217,8 @@
 #define XFS_IRECLAIM		(1 << 0) /* started reclaiming this inode */
 #define XFS_ISTALE		(1 << 1) /* inode has been staled */
 #define XFS_IRECLAIMABLE	(1 << 2) /* inode can be reclaimed */
-#define XFS_INEW		(1 << 3) /* inode has just been allocated */
+#define __XFS_INEW_BIT		3	 /* inode has just been allocated */
+#define XFS_INEW		(1 << __XFS_INEW_BIT)
 #define XFS_ITRUNCATED		(1 << 5) /* truncated down so flush-on-close */
 #define XFS_IDIRTY_RELEASE	(1 << 6) /* dirty release already seen */
 #define __XFS_IFLOCK_BIT	7	 /* inode is being flushed right now */
@@ -467,6 +468,7 @@
 	xfs_iflags_clear(ip, XFS_INEW);
 	barrier();
 	unlock_new_inode(VFS_I(ip));
+	wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
 }
 
 static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index a391975..73cfc71 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1542,10 +1542,11 @@
 	unsigned int		cmd,
 	void			__user *arg)
 {
-	struct getbmapx		bmx;
+	struct getbmapx		bmx = { 0 };
 	int			error;
 
-	if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
+	/* struct getbmap is a strict subset of struct getbmapx. */
+	if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
 		return -EFAULT;
 
 	if (bmx.bmv_count < 2)
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 3605624..65740d1 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1151,10 +1151,10 @@
 	if (XFS_FORCED_SHUTDOWN(mp))
 		return -EIO;
 
-	lockmode = xfs_ilock_data_map_shared(ip);
+	lockmode = xfs_ilock_attr_map_shared(ip);
 
 	/* if there are no attribute fork or extents, return ENOENT */
-	if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+	if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
 		error = -ENOENT;
 		goto out_unlock;
 	}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 66e8817..d8a77db 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -585,7 +585,7 @@
 		return error;
 
 	bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
-	buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+	buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
 	do {
 		struct xfs_inobt_rec_incore	r;
 		int				stat;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 4017aa9..b57ab34 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1293,7 +1293,7 @@
 xfs_log_work_queue(
 	struct xfs_mount        *mp)
 {
-	queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
+	queue_delayed_work(mp->m_sync_workqueue, &mp->m_log->l_work,
 				msecs_to_jiffies(xfs_syncd_centisecs * 10));
 }
 
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1bf878b..5415f90 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -183,6 +183,7 @@
 	struct workqueue_struct	*m_reclaim_workqueue;
 	struct workqueue_struct	*m_log_workqueue;
 	struct workqueue_struct *m_eofblocks_workqueue;
+	struct workqueue_struct	*m_sync_workqueue;
 
 	/*
 	 * Generation of the filesysyem layout.  This is incremented by each
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index b669b12..8b9a9f1 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1384,12 +1384,7 @@
 	mp->m_qflags |= flags;
 
  error_return:
-	while (!list_empty(&buffer_list)) {
-		struct xfs_buf *bp =
-			list_first_entry(&buffer_list, struct xfs_buf, b_list);
-		list_del_init(&bp->b_list);
-		xfs_buf_relse(bp);
-	}
+	xfs_buf_delwri_cancel(&buffer_list);
 
 	if (error) {
 		xfs_warn(mp,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 475a388..9cb5c38 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -759,5 +759,6 @@
 	uint		 flags)
 {
 	ASSERT(mp->m_quotainfo);
-	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
+	xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
+				    XFS_AGITER_INEW_WAIT);
 }
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 2252f16..29a75ec 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -736,8 +736,22 @@
 	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
 	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
 
-	/* Start a rolling transaction to switch the mappings */
-	resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
+	/*
+	 * Start a rolling transaction to switch the mappings.  We're
+	 * unlikely ever to have to remap 16T worth of single-block
+	 * extents, so just cap the worst case extent count to 2^32-1.
+	 * Stick a warning in just in case, and avoid 64-bit division.
+	 */
+	BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
+	if (end_fsb - offset_fsb > UINT_MAX) {
+		error = -EFSCORRUPTED;
+		xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
+		ASSERT(0);
+		goto out;
+	}
+	resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
+			(unsigned int)(end_fsb - offset_fsb),
+			XFS_DATA_FORK);
 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
 			resblks, 0, 0, &tp);
 	if (error)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index dbbd3f1..882fb85 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -872,8 +872,15 @@
 	if (!mp->m_eofblocks_workqueue)
 		goto out_destroy_log;
 
+	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
+					       mp->m_fsname);
+	if (!mp->m_sync_workqueue)
+		goto out_destroy_eofb;
+
 	return 0;
 
+out_destroy_eofb:
+	destroy_workqueue(mp->m_eofblocks_workqueue);
 out_destroy_log:
 	destroy_workqueue(mp->m_log_workqueue);
 out_destroy_reclaim:
@@ -894,6 +901,7 @@
 xfs_destroy_mount_workqueues(
 	struct xfs_mount	*mp)
 {
+	destroy_workqueue(mp->m_sync_workqueue);
 	destroy_workqueue(mp->m_eofblocks_workqueue);
 	destroy_workqueue(mp->m_log_workqueue);
 	destroy_workqueue(mp->m_reclaim_workqueue);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 70f42ea..a280e12 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -263,6 +263,28 @@
 }
 
 /*
+ * Create an empty transaction with no reservation.  This is a defensive
+ * mechanism for routines that query metadata without actually modifying
+ * them -- if the metadata being queried is somehow cross-linked (think a
+ * btree block pointer that points higher in the tree), we risk deadlock.
+ * However, blocks grabbed as part of a transaction can be re-grabbed.
+ * The verifiers will notice the corrupt block and the operation will fail
+ * back to userspace without deadlocking.
+ *
+ * Note the zero-length reservation; this transaction MUST be cancelled
+ * without any dirty data.
+ */
+int
+xfs_trans_alloc_empty(
+	struct xfs_mount		*mp,
+	struct xfs_trans		**tpp)
+{
+	struct xfs_trans_res		resv = {0};
+
+	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
+}
+
+/*
  * Record the indicated change to the given field for application
  * to the file system's superblock when the transaction commits.
  * For now, just store the change in the transaction structure.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 61b7fbd..98024cb 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -159,6 +159,8 @@
 int		xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
 			uint blocks, uint rtextents, uint flags,
 			struct xfs_trans **tpp);
+int		xfs_trans_alloc_empty(struct xfs_mount *mp,
+			struct xfs_trans **tpp);
 void		xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
 
 struct xfs_buf	*xfs_trans_get_buf_map(struct xfs_trans *tp,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 9701f2d..a5696c1 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,6 +144,7 @@
 	struct drm_crtc *ptr;
 	struct drm_crtc_state *state;
 	struct drm_crtc_commit *commit;
+	s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
@@ -316,6 +317,8 @@
 			      struct drm_crtc *crtc);
 void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
 				 struct drm_framebuffer *fb);
+void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
+				    struct fence *fence);
 int __must_check
 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
 				  struct drm_crtc *crtc);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index ac9d7d8..1c12875 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -531,6 +531,20 @@
  * @audio_latency: audio latency info from ELD, if found
  * @null_edid_counter: track sinks that give us all zeros for the EDID
  * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @max_tmds_char: indicates the maximum TMDS Character Rate supported
+ * @scdc_present: when set the sink supports SCDC functionality
+ * @rr_capable: when set the sink is capable of initiating an SCDC read request
+ * @supports_scramble: when set the sink supports less than 340Mcsc scrambling
+ * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h (DRM_EDID_3D_*)
+ * @pt_scan_info: PT scan info obtained from the VCDB of EDID
+ * @it_scan_info: IT scan info obtained from the VCDB of EDID
+ * @ce_scan_info: CE scan info obtained from the VCDB of EDID
+ * @hdr_eotf: Electro optical transfer function obtained from HDR block
+ * @hdr_metadata_type_one: Metadata type one obtained from HDR block
+ * @hdr_max_luminance: desired max luminance obtained from HDR block
+ * @hdr_avg_luminance: desired avg luminance obtained from HDR block
+ * @hdr_min_luminance: desired min luminance obtained from HDR block
+ * @hdr_supported: does the sink support HDR content
  * @edid_corrupt: indicates whether the last read EDID was corrupt
  * @debugfs_entry: debugfs directory for this connector
  * @state: current atomic state for this connector
@@ -665,6 +679,22 @@
 	int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
 	unsigned bad_edid_counter;
 
+	/* EDID bits HDMI 2.0 */
+	int max_tmds_char;	/* in Mcsc */
+	bool scdc_present;
+	bool rr_capable;
+	bool supports_scramble;
+	int flags_3d;
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u32 hdr_eotf;
+	bool hdr_metadata_type_one;
+	u32 hdr_max_luminance;
+	u32 hdr_avg_luminance;
+	u32 hdr_min_luminance;
+	bool hdr_supported;
+
 	/* Flag for raw EDID header corruption - used in Displayport
 	 * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
 	 */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 0aa2925..f3d58c7 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -680,6 +680,35 @@
 	 * context.
 	 */
 	struct drm_modeset_acquire_ctx *acquire_ctx;
+
+	/**
+	 * @fence_context:
+	 *
+	 * timeline context used for fence operations.
+	 */
+	unsigned int fence_context;
+
+	/**
+	 * @fence_lock:
+	 *
+	 * spinlock to protect the fences in the fence_context.
+	 */
+
+	spinlock_t fence_lock;
+	/**
+	 * @fence_seqno:
+	 *
+	 * Seqno variable used as monotonic counter for the fences
+	 * created on the CRTC's timeline.
+	 */
+	unsigned long fence_seqno;
+
+	/**
+	 * @timeline_name:
+	 *
+	 * The name of the CRTC's fence timeline.
+	 */
+	char timeline_name[32];
 };
 
 /**
@@ -1160,6 +1189,17 @@
 	 */
 	struct drm_property *prop_fb_id;
 	/**
+	 * @prop_in_fence_fd: Sync File fd representing the incoming fences
+	 * for a Plane.
+	 */
+	struct drm_property *prop_in_fence_fd;
+	/**
+	 * @prop_out_fence_ptr: Sync File fd pointer representing the
+	 * outgoing fences for a CRTC. Userspace should provide a pointer to a
+	 * value of type s32, and then cast that pointer to u64.
+	 */
+	struct drm_property *prop_out_fence_ptr;
+	/**
 	 * @prop_crtc_id: Default atomic plane property to specify the
 	 * &drm_crtc.
 	 */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index c3a7d44..32bd104 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -269,6 +269,11 @@
 
 #define DRM_ELD_CEA_SAD(mnl, sad)	(20 + (mnl) + 3 * (sad))
 
+/* HDMI 2.0 */
+#define DRM_EDID_3D_INDEPENDENT_VIEW	(1 << 2)
+#define DRM_EDID_3D_DUAL_VIEW		(1 << 1)
+#define DRM_EDID_3D_OSD_DISPARITY	(1 << 0)
+
 struct edid {
 	u8 header[8];
 	/* Vendor & product info */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index f313211..3b00f64 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -12,6 +12,8 @@
 struct drm_device;
 struct drm_file;
 struct drm_mode_fb_cmd2;
+struct drm_plane;
+struct drm_plane_state;
 
 struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
 	unsigned int preferred_bpp, unsigned int num_crtc,
@@ -41,6 +43,9 @@
 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 	unsigned int plane);
 
+int drm_fb_cma_prepare_fb(struct drm_plane *plane,
+			  struct drm_plane_state *state);
+
 #ifdef CONFIG_DEBUG_FS
 struct seq_file;
 
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8b4dc62..952ef84 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -65,7 +65,7 @@
 
 	struct drm_crtc *crtc;   /* do not write directly, use drm_atomic_set_crtc_for_plane() */
 	struct drm_framebuffer *fb;  /* do not write directly, use drm_atomic_set_fb_for_plane() */
-	struct fence *fence;
+	struct fence *fence; /* do not write directly, use drm_atomic_set_fence_for_plane() */
 
 	/* Signed dest location allows it to be partially off screen */
 	int32_t crtc_x, crtc_y;
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
index c1350ce..75ddcfa 100644
--- a/include/dt-bindings/clock/mdss-10nm-pll-clk.h
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -34,4 +34,12 @@
 #define PCLK_SRC_MUX_1_CLK	15
 #define PCLK_SRC_1_CLK		16
 #define PCLK_MUX_1_CLK		17
+
+/* DP PLL clocks */
+#define	DP_VCO_CLK	0
+#define	DP_LINK_CLK_DIVSEL_TEN	1
+#define	DP_VCO_DIVIDED_TWO_CLK_SRC	2
+#define	DP_VCO_DIVIDED_FOUR_CLK_SRC	3
+#define	DP_VCO_DIVIDED_SIX_CLK_SRC	4
+#define	DP_VCO_DIVIDED_CLK_SRC_MUX	5
 #endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
index 915ac08..e773848 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdxpoorwills.h
@@ -83,19 +83,20 @@
 #define GCC_SPMI_FETCHER_CLK					65
 #define GCC_SPMI_FETCHER_CLK_SRC				66
 #define GCC_SYS_NOC_CPUSS_AHB_CLK				67
-#define GCC_USB30_MASTER_CLK					68
-#define GCC_USB30_MASTER_CLK_SRC				69
-#define GCC_USB30_MOCK_UTMI_CLK					70
-#define GCC_USB30_MOCK_UTMI_CLK_SRC				71
-#define GCC_USB30_SLEEP_CLK					72
-#define GCC_USB3_PRIM_CLKREF_CLK				73
-#define GCC_USB3_PHY_AUX_CLK					74
-#define GCC_USB3_PHY_AUX_CLK_SRC				75
-#define GCC_USB3_PHY_PIPE_CLK					76
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				77
-#define GCC_XO_DIV4_CLK						78
-#define GPLL0							79
-#define GPLL0_OUT_EVEN						80
+#define GCC_SYS_NOC_USB3_CLK					68
+#define GCC_USB30_MASTER_CLK					69
+#define GCC_USB30_MASTER_CLK_SRC				70
+#define GCC_USB30_MOCK_UTMI_CLK					71
+#define GCC_USB30_MOCK_UTMI_CLK_SRC				72
+#define GCC_USB30_SLEEP_CLK					73
+#define GCC_USB3_PRIM_CLKREF_CLK				74
+#define GCC_USB3_PHY_AUX_CLK					75
+#define GCC_USB3_PHY_AUX_CLK_SRC				76
+#define GCC_USB3_PHY_PIPE_CLK					77
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				78
+#define GCC_XO_DIV4_CLK						79
+#define GPLL0							80
+#define GPLL0_OUT_EVEN						81
 
 /* GDSCs */
 #define PCIE_GDSC						0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
index c43a9f8..323beaf 100644
--- a/include/dt-bindings/clock/qcom,gpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -40,13 +40,10 @@
 #define GPU_CC_PLL1_OUT_ODD					22
 #define GPU_CC_PLL1_OUT_TEST					23
 #define GPU_CC_PLL_TEST_CLK					24
-#define GPU_CC_RBCPR_AHB_CLK					25
-#define GPU_CC_RBCPR_CLK					26
-#define GPU_CC_RBCPR_CLK_SRC					27
-#define GPU_CC_SLEEP_CLK					28
-#define GPU_CC_GMU_CLK_SRC					29
-#define GPU_CC_CX_GFX3D_CLK					30
-#define GPU_CC_CX_GFX3D_SLV_CLK					31
+#define GPU_CC_SLEEP_CLK					25
+#define GPU_CC_GMU_CLK_SRC					26
+#define GPU_CC_CX_GFX3D_CLK					27
+#define GPU_CC_CX_GFX3D_SLV_CLK					28
 
 /* GPUCC reset clock registers */
 #define GPUCC_GPU_CC_ACD_BCR					0
@@ -54,9 +51,8 @@
 #define GPUCC_GPU_CC_GFX3D_AON_BCR				2
 #define GPUCC_GPU_CC_GMU_BCR					3
 #define GPUCC_GPU_CC_GX_BCR					4
-#define GPUCC_GPU_CC_RBCPR_BCR					5
-#define GPUCC_GPU_CC_SPDM_BCR					6
-#define GPUCC_GPU_CC_XO_BCR					7
+#define GPUCC_GPU_CC_SPDM_BCR					5
+#define GPUCC_GPU_CC_XO_BCR					6
 
 /* GFX3D clock registers */
 #define GPU_CC_PLL0						0
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
index be2210c..9d52d2e 100644
--- a/include/dt-bindings/msm/msm-bus-ids.h
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -43,6 +43,7 @@
 #define	MSM_BUS_FAB_MC_VIRT 6151
 #define	MSM_BUS_FAB_MEM_NOC 6152
 #define	MSM_BUS_FAB_IPA_VIRT 6153
+#define	MSM_BUS_FAB_CAMNOC_VIRT 6154
 
 #define MSM_BUS_FAB_MC_VIRT_DISPLAY 26000
 #define MSM_BUS_FAB_MEM_NOC_DISPLAY 26001
@@ -236,7 +237,7 @@
 #define	MSM_BUS_MASTER_MNOC_SF_MEM_NOC 133
 #define	MSM_BUS_MASTER_SNOC_GC_MEM_NOC 134
 #define	MSM_BUS_MASTER_SNOC_SF_MEM_NOC 135
-#define	MSM_BUS_MASTER_CAMNOC_HF 136
+#define	MSM_BUS_MASTER_CAMNOC_HF0 136
 #define	MSM_BUS_MASTER_CAMNOC_SF 137
 #define	MSM_BUS_MASTER_VIDEO_PROC 138
 #define	MSM_BUS_MASTER_GNOC_SNOC 139
@@ -245,7 +246,11 @@
 #define	MSM_BUS_MASTER_MEM_NOC_SNOC 142
 #define	MSM_BUS_MASTER_IPA_CORE 143
 #define	MSM_BUS_MASTER_ALC 144
-#define	MSM_BUS_MASTER_MASTER_LAST 145
+#define	MSM_BUS_MASTER_CAMNOC_HF1 145
+#define	MSM_BUS_MASTER_CAMNOC_HF0_UNCOMP 146
+#define	MSM_BUS_MASTER_CAMNOC_HF1_UNCOMP 147
+#define	MSM_BUS_MASTER_CAMNOC_SF_UNCOMP 148
+#define	MSM_BUS_MASTER_MASTER_LAST 149
 
 #define MSM_BUS_MASTER_LLCC_DISPLAY 20000
 #define MSM_BUS_MASTER_MNOC_HF_MEM_NOC_DISPLAY 20001
@@ -584,7 +589,8 @@
 #define	MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775
 #define	MSM_BUS_SLAVE_MEM_NOC_SNOC 776
 #define	MSM_BUS_SLAVE_IPA 777
-#define	MSM_BUS_SLAVE_LAST 778
+#define	MSM_BUS_SLAVE_CAMNOC_UNCOMP 778
+#define	MSM_BUS_SLAVE_LAST 779
 
 #define	MSM_BUS_SLAVE_EBI_CH0_DISPLAY 20512
 #define	MSM_BUS_SLAVE_LLCC_DISPLAY 20513
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 0000000..ace92fc
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,77 @@
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/jump_label.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+	/*
+	 * Store two sets of bpf_prog pointers, one for programs that are
+	 * pinned directly to this cgroup, and one for those that are effective
+	 * when this cgroup is accessed.
+	 */
+	struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
+	struct bpf_prog *effective[MAX_BPF_ATTACH_TYPE];
+	bool disallow_override[MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_put(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+			struct bpf_prog *prog, enum bpf_attach_type type,
+			bool overridable);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+		      enum bpf_attach_type type, bool overridable);
+
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type);
+
+/* Wrappers for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled. */
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb)			\
+({									\
+	int __ret = 0;							\
+	if (cgroup_bpf_enabled)						\
+		__ret = __cgroup_bpf_run_filter(sk, skb,		\
+						BPF_CGROUP_INET_INGRESS); \
+									\
+	__ret;								\
+})
+
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb)				\
+({									\
+	int __ret = 0;							\
+	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		\
+		typeof(sk) __sk = sk_to_full_sk(sk);			\
+		if (sk_fullsock(__sk))					\
+			__ret = __cgroup_bpf_run_filter(__sk, skb,	\
+						BPF_CGROUP_INET_EGRESS); \
+	}								\
+	__ret;								\
+})
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+				      struct cgroup *parent) {}
+
+#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 0f4548c..b008a33 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -300,6 +301,9 @@
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to store eBPF programs */
+	struct cgroup_bpf bpf;
+
 	/* ids of the ancestors at each level including self */
 	int ancestor_ids[];
 };
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b1f2d00..8ee110a 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -277,6 +277,8 @@
 		regulator
  * @level_votes: array of votes for each level
  * @num_levels: specifies the size of level_votes array
+ * @skip_handoff: do not vote for the max possible voltage during init
+ * @use_max_uV: use INT_MAX for max_uV when calling regulator_set_voltage
  * @cur_level: the currently set voltage level
  * @lock: lock to protect this struct
  */
@@ -288,6 +290,8 @@
 	int *vdd_uv;
 	int *level_votes;
 	int num_levels;
+	bool skip_handoff;
+	bool use_max_uV;
 	unsigned long cur_level;
 	struct mutex lock;
 };
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 0e1e050..cf86f52 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -440,6 +440,13 @@
 
 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
 
+void dm_lock_md_type(struct mapped_device *md);
+void dm_unlock_md_type(struct mapped_device *md);
+void dm_set_md_type(struct mapped_device *md, unsigned type);
+unsigned dm_get_md_type(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
+unsigned dm_table_get_type(struct dm_table *t);
+
 /*
  * Geometry functions.
  */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 8d7265f..fd9b89f 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -185,6 +185,16 @@
 void fence_free(struct fence *fence);
 
 /**
+ * fence_put - decreases refcount of the fence
+ * @fence:	[in]	fence to reduce refcount of
+ */
+static inline void fence_put(struct fence *fence)
+{
+	if (fence)
+		kref_put(&fence->refcount, fence_release);
+}
+
+/**
  * fence_get - increases refcount of the fence
  * @fence:	[in]	fence to increase refcount of
  *
@@ -212,13 +222,49 @@
 }
 
 /**
- * fence_put - decreases refcount of the fence
- * @fence:	[in]	fence to reduce refcount of
+ * fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
+ * @fence:	[in]	pointer to fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ * This function handles acquiring a reference to a fence that may be
+ * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * so long as the caller is using RCU on the pointer to the fence.
+ *
+ * An alternative mechanism is to employ a seqlock to protect a bunch of
+ * fences, such as used by struct reservation_object. When using a seqlock,
+ * the seqlock must be taken before and checked after a reference to the
+ * fence is acquired (as shown here).
+ *
+ * The caller is required to hold the RCU read lock.
  */
-static inline void fence_put(struct fence *fence)
+static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep)
 {
-	if (fence)
-		kref_put(&fence->refcount, fence_release);
+	do {
+		struct fence *fence;
+
+		fence = rcu_dereference(*fencep);
+		if (!fence || !fence_get_rcu(fence))
+			return NULL;
+
+		/* The atomic_inc_not_zero() inside fence_get_rcu()
+		 * provides a full memory barrier upon success (such as now).
+		 * This is paired with the write barrier from assigning
+		 * to the __rcu protected fence pointer so that if that
+		 * pointer still matches the current fence, we know we
+		 * have successfully acquire a reference to it. If it no
+		 * longer matches, we are holding a reference to some other
+		 * reallocated pointer. This is possible if the allocator
+		 * is using a freelist like SLAB_DESTROY_BY_RCU where the
+		 * fence remains valid for the RCU grace period, but it
+		 * may be reallocated. When using such allocators, we are
+		 * responsible for ensuring the reference we get is to
+		 * the right fence, as below.
+		 */
+		if (fence == rcu_access_pointer(*fencep))
+			return rcu_pointer_handoff(fence);
+
+		fence_put(fence);
+	} while (1);
 }
 
 int fence_signal(struct fence *fence);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index e974420..bc38b99a 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -35,6 +35,7 @@
 };
 
 #define HDMI_IEEE_OUI 0x000c03
+#define HDMI_IEEE_OUI_HF	0xc45dd8
 #define HDMI_INFOFRAME_HEADER_SIZE  4
 #define HDMI_AVI_INFOFRAME_SIZE    13
 #define HDMI_SPD_INFOFRAME_SIZE    25
@@ -78,6 +79,8 @@
 	HDMI_PICTURE_ASPECT_NONE,
 	HDMI_PICTURE_ASPECT_4_3,
 	HDMI_PICTURE_ASPECT_16_9,
+	HDMI_PICTURE_ASPECT_64_27,
+	HDMI_PICTURE_ASPECT_256_135,
 	HDMI_PICTURE_ASPECT_RESERVED,
 };
 
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index dd85f35..039e6ab 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,8 @@
 	unsigned usage_id;
 	atomic_t data_ready;
 	atomic_t user_requested_state;
+	int poll_interval;
+	int raw_hystersis;
 	struct iio_trigger *trigger;
 	struct hid_sensor_hub_attribute_info poll;
 	struct hid_sensor_hub_attribute_info report_state;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 3319d97..8feecd5 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -630,14 +630,16 @@
 static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
 						    netdev_features_t features)
 {
-	if (skb_vlan_tagged_multi(skb))
-		features = netdev_intersect_features(features,
-						     NETIF_F_SG |
-						     NETIF_F_HIGHDMA |
-						     NETIF_F_FRAGLIST |
-						     NETIF_F_HW_CSUM |
-						     NETIF_F_HW_VLAN_CTAG_TX |
-						     NETIF_F_HW_VLAN_STAG_TX);
+	if (skb_vlan_tagged_multi(skb)) {
+		/* In the case of multi-tagged packets, use a direct mask
+		 * instead of using netdev_interesect_features(), to make
+		 * sure that only devices supporting NETIF_F_HW_CSUM will
+		 * have checksum offloading support.
+		 */
+		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+			    NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_STAG_TX;
+	}
 
 	return features;
 }
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 72f0721..bbc65ef 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -450,6 +450,12 @@
 };
 
 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ)       | \
+			   (1 << NET_RX_SOFTIRQ)       | \
+			   (1 << BLOCK_SOFTIRQ)        | \
+			   (1 << IRQ_POLL_SOFTIRQ)     | \
+			   (1 << TASKLET_SOFTIRQ))
 
 /* map softirq index to softirq name. update 'softirq_to_name' in
  * kernel/softirq.c when adding a new softirq.
@@ -485,6 +491,7 @@
 extern void raise_softirq(unsigned int nr);
 
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
 {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index e3d181e..7bdddb3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -244,6 +244,8 @@
 	void (*tlbi_domain)(struct iommu_domain *domain);
 	int (*enable_config_clocks)(struct iommu_domain *domain);
 	void (*disable_config_clocks)(struct iommu_domain *domain);
+	uint64_t (*iova_to_pte)(struct iommu_domain *domain,
+			 dma_addr_t iova);
 
 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
 
@@ -331,6 +333,9 @@
 				      phys_addr_t offset, u64 size,
 				      int prot);
 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+
+extern uint64_t iommu_iova_to_pte(struct iommu_domain *domain,
+	    dma_addr_t iova);
 /**
  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
  * @domain: the iommu domain where the fault has happened
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f68490..e233925 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -330,7 +330,9 @@
 					     int write, void __user *buffer,
 					     size_t *length, loff_t *ppos);
 #endif
-
+extern void wait_for_kprobe_optimizer(void);
+#else
+static inline void wait_for_kprobe_optimizer(void) { }
 #endif /* CONFIG_OPTPROBES */
 #ifdef CONFIG_KPROBES_ON_FTRACE
 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h
index 4b5a339..1fe6e17 100644
--- a/include/linux/leds-qpnp-flash.h
+++ b/include/linux/leds-qpnp-flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,7 +18,6 @@
 #define ENABLE_REGULATOR	BIT(0)
 #define DISABLE_REGULATOR	BIT(1)
 #define QUERY_MAX_CURRENT	BIT(2)
-#define PRE_FLASH		BIT(3)
 
 #define FLASH_LED_PREPARE_OPTIONS_MASK	GENMASK(3, 0)
 
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f7033fa..d6ebc01 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -424,12 +424,20 @@
 }
 #endif
 
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+		phys_addr_t end_addr);
 #else
 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
 {
 	return 0;
 }
 
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+		phys_addr_t end_addr)
+{
+	return 0;
+}
+
 #endif /* CONFIG_HAVE_MEMBLOCK */
 
 #endif /* __KERNEL__ */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecc451d..e1a903a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -640,7 +640,12 @@
 
 typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
 
+enum {
+	MLX5_CMD_ENT_STATE_PENDING_COMP,
+};
+
 struct mlx5_cmd_work_ent {
+	unsigned long		state;
 	struct mlx5_cmd_msg    *in;
 	struct mlx5_cmd_msg    *out;
 	void		       *uout;
@@ -838,7 +843,7 @@
 #endif
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 959414b..227b1e2 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -232,6 +232,7 @@
 	bool lock_needed);
 extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
 	bool lock_needed, bool is_cmdq_dcmd);
+extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host);
 
 /**
  *	mmc_claim_host - exclusively claim a host
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0e6a54c..2f943a0 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -347,7 +347,9 @@
 	atomic_t	devfreq_abort;
 	bool		skip_clk_scale_freq_update;
 	int		freq_table_sz;
+	int		pltfm_freq_table_sz;
 	u32		*freq_table;
+	u32		*pltfm_freq_table;
 	unsigned long	total_busy_time_us;
 	unsigned long	target_freq;
 	unsigned long	curr_freq;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7e273e2..6744eb4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -672,6 +672,7 @@
 	 * is the first PFN that needs to be initialised.
 	 */
 	unsigned long first_deferred_pfn;
+	unsigned long static_init_size;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi b/include/linux/nfcinfo.h
similarity index 85%
rename from arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
rename to include/linux/nfcinfo.h
index b2d607d..b67a65f 100644
--- a/arch/arm64/boot/dts/qcom/sdm830-mtp.dtsi
+++ b/include/linux/nfcinfo.h
@@ -10,5 +10,9 @@
  * GNU General Public License for more details.
  */
 
-#include "sdm845-mtp.dtsi"
-#include "sdm830-pinctrl.dtsi"
+#ifndef _NFCINFO_H
+#define _NFCINFO_H
+
+#include <uapi/linux/nfc/nfcinfo.h>
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 72f9211..4381570 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -248,6 +248,7 @@
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
 	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	POWER_SUPPLY_PROP_REAL_TYPE,
+	POWER_SUPPLY_PROP_PR_SWAP,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index b0f305e..bad7710 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -177,17 +177,14 @@
 reservation_object_get_excl_rcu(struct reservation_object *obj)
 {
 	struct fence *fence;
-	unsigned seq;
-retry:
-	seq = read_seqcount_begin(&obj->seq);
+
+	if (!rcu_access_pointer(obj->fence_excl))
+		return NULL;
+
 	rcu_read_lock();
-	fence = rcu_dereference(obj->fence_excl);
-	if (read_seqcount_retry(&obj->seq, seq)) {
-		rcu_read_unlock();
-		goto retry;
-	}
-	fence = fence_get(fence);
+	fence = fence_get_rcu_safe(&obj->fence_excl);
 	rcu_read_unlock();
+
 	return fence;
 }
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index decb943..6c6ae4d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -178,7 +178,9 @@
 #endif
 
 extern void sched_update_nr_prod(int cpu, long delta, bool inc);
-extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+				     unsigned int *max_nr,
+				     unsigned int *big_max_nr);
 extern unsigned int sched_get_cpu_util(int cpu);
 
 extern void calc_global_load(unsigned long ticks);
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index f3fa9e6..f921909 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -206,6 +206,23 @@
  */
 void sde_rsc_unregister_event(struct sde_rsc_event *event);
 
+/**
+ * is_sde_rsc_available - check if display rsc available.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: true if rsc is available; false in all other cases
+ */
+bool is_sde_rsc_available(int rsc_index);
+
+/**
+ * get_sde_rsc_current_state - gets the current state of sde rsc.
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * Returns: current state if rsc available; SDE_RSC_IDLE_STATE for
+ *          all other cases
+ */
+enum sde_rsc_state get_sde_rsc_current_state(int rsc_index);
+
 #else
 
 static inline struct sde_rsc_client *sde_rsc_client_create(u32 rsc_index,
@@ -242,6 +259,15 @@
 {
 }
 
+static inline bool is_sde_rsc_available(int rsc_index)
+{
+	return false;
+}
+
+static inline enum sde_rsc_state get_sde_rsc_current_state(int rsc_index)
+{
+	return SDE_RSC_IDLE_STATE;
+}
 #endif /* CONFIG_DRM_SDE_RSC */
 
 #endif /* _SDE_RSC_H_ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index a0596ca0..a2f8109 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -24,6 +24,7 @@
 void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
 
+u64 sock_gen_cookie(struct sock *sk);
 int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
 void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
 
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8491bdc..9b0d5cb 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -43,6 +43,9 @@
 /* Default weight of a bound cooling device */
 #define THERMAL_WEIGHT_DEFAULT 0
 
+/* Max sensors that can be used for a single virtual thermalzone */
+#define THERMAL_MAX_VIRT_SENSORS 5
+
 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */
 #define THERMAL_TEMP_INVALID	-274000
 
@@ -405,6 +408,39 @@
 	enum thermal_trip_type type;
 };
 
+/* Different aggregation logic supported for virtual sensors */
+enum aggregation_logic {
+	VIRT_WEIGHTED_AVG,
+	VIRT_MAXIMUM,
+	VIRT_MINIMUM,
+	VIRT_AGGREGATION_NR,
+};
+
+/*
+ * struct virtual_sensor_data - Data structure used to provide
+ *			      information about the virtual zone.
+ * @virt_zone_name - Virtual thermal zone name
+ * @num_sensors - Number of sensors this virtual zone uses to compute
+ *		  temperature
+ * @sensor_names - Array of sensor names
+ * @logic - Temperature aggregation logic to be used
+ * @coefficients - Coefficients to be used for weighted average logic
+ * @coefficient_ct - number of coefficients provided as input
+ * @avg_offset - offset value to be used for the weighted aggregation logic
+ * @avg_denominator - denominator value to be used for the weighted aggregation
+ *			logic
+ */
+struct virtual_sensor_data {
+	int                    num_sensors;
+	char                   virt_zone_name[THERMAL_NAME_LENGTH];
+	char                   *sensor_names[THERMAL_MAX_VIRT_SENSORS];
+	enum aggregation_logic logic;
+	int                    coefficients[THERMAL_MAX_VIRT_SENSORS];
+	int                    coefficient_ct;
+	int                    avg_offset;
+	int                    avg_denominator;
+};
+
 /* Function declarations */
 #ifdef CONFIG_THERMAL_OF
 struct thermal_zone_device *
@@ -417,6 +453,9 @@
 		const struct thermal_zone_of_device_ops *ops);
 void devm_thermal_zone_of_sensor_unregister(struct device *dev,
 					    struct thermal_zone_device *tz);
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+		struct device *dev,
+		const struct virtual_sensor_data *sensor_data);
 #else
 static inline struct thermal_zone_device *
 thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -444,6 +483,14 @@
 {
 }
 
+static inline
+struct thermal_zone_device *devm_thermal_of_virtual_sensor_register(
+		struct device *dev,
+		const struct virtual_sensor_data *sensor_data)
+{
+	return ERR_PTR(-ENODEV);
+}
+
 #endif
 
 #if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
new file mode 100644
index 0000000..f2322f3
--- /dev/null
+++ b/include/linux/usb/audio-v3.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 3.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/docs/devclass_docs/USB_Audio_v3.0.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V3_H
+#define __LINUX_USB_AUDIO_V3_H
+
+#include <linux/types.h>
+
+#define UAC3_MIXER_UNIT_V3	0x05
+#define UAC3_FEATURE_UNIT_V3	0x07
+#define UAC3_CLOCK_SOURCE	0x0b
+
+#define BADD_MAXPSIZE_SYNC_MONO_16	0x0060
+#define BADD_MAXPSIZE_SYNC_MONO_24	0x0090
+#define BADD_MAXPSIZE_SYNC_STEREO_16	0x00c0
+#define BADD_MAXPSIZE_SYNC_STEREO_24	0x0120
+
+#define BADD_MAXPSIZE_ASYNC_MONO_16	0x0062
+#define BADD_MAXPSIZE_ASYNC_MONO_24	0x0093
+#define BADD_MAXPSIZE_ASYNC_STEREO_16	0x00c4
+#define BADD_MAXPSIZE_ASYNC_STEREO_24	0x0126
+
+#define BIT_RES_16_BIT		0x10
+#define BIT_RES_24_BIT		0x18
+
+#define SUBSLOTSIZE_16_BIT	0x02
+#define SUBSLOTSIZE_24_BIT	0x03
+
+#define BADD_SAMPLING_RATE	48000
+
+#define NUM_CHANNELS_MONO	1
+#define NUM_CHANNELS_STEREO	2
+#define BADD_CH_CONFIG_MONO	0
+#define BADD_CH_CONFIG_STEREO	3
+#define CLUSTER_ID_MONO		0x0001
+#define CLUSTER_ID_STEREO	0x0002
+
+#define FULL_ADC_PROFILE	0x01
+
+/* BADD Profile IDs */
+#define PROF_GENERIC_IO		0x20
+#define PROF_HEADPHONE		0x21
+#define PROF_SPEAKER		0x22
+#define PROF_MICROPHONE		0x23
+#define PROF_HEADSET		0x24
+#define PROF_HEADSET_ADAPTER	0x25
+#define PROF_SPEAKERPHONE	0x26
+
+/* BADD Entity IDs */
+#define BADD_OUT_TERM_ID_BAOF	0x03
+#define BADD_OUT_TERM_ID_BAIF	0x06
+#define BADD_IN_TERM_ID_BAOF	0x01
+#define BADD_IN_TERM_ID_BAIF	0x04
+#define BADD_FU_ID_BAOF		0x02
+#define BADD_FU_ID_BAIF		0x05
+#define BADD_CLOCK_SOURCE	0x09
+#define BADD_FU_ID_BAIOF	0x07
+#define BADD_MU_ID_BAIOF	0x08
+
+#define UAC_BIDIR_TERMINAL_HEADSET	0x0402
+#define UAC_BIDIR_TERMINAL_SPEAKERPHONE	0x0403
+
+#define NUM_BADD_DESCS		7
+
+struct uac3_input_terminal_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bTerminalID;
+	__u16 wTerminalType;
+	__u8 bAssocTerminal;
+	__u8 bCSourceID;
+	__u32 bmControls;
+	__u16 wClusterDescrID;
+	__u16 wExTerminalDescrID;
+	__u16 wConnectorsDescrID;
+	__u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_INPUT_TERMINAL_SIZE	0x14
+
+extern struct uac3_input_terminal_descriptor badd_baif_in_term_desc;
+extern struct uac3_input_terminal_descriptor badd_baof_in_term_desc;
+
+struct uac3_output_terminal_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bTerminalID;
+	__u16 wTerminalType;
+	__u8 bAssocTerminal;
+	__u8 bSourceID;
+	__u8 bCSourceID;
+	__u32 bmControls;
+	__u16 wExTerminalDescrID;
+	__u16 wConnectorsDescrID;
+	__u16 wTerminalDescrStr;
+} __packed;
+
+#define UAC3_DT_OUTPUT_TERMINAL_SIZE	0x13
+
+extern struct uac3_output_terminal_descriptor badd_baif_out_term_desc;
+extern struct uac3_output_terminal_descriptor badd_baof_out_term_desc;
+
+extern __u8 monoControls[];
+extern __u8 stereoControls[];
+extern __u8 badd_mu_src_ids[];
+
+struct uac3_mixer_unit_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bUnitID;
+	__u8 bNrInPins;
+	__u8 *baSourceID;
+	__u16 wClusterDescrID;
+	__u8 bmMixerControls;
+	__u32 bmControls;
+	__u16 wMixerDescrStr;
+} __packed;
+
+#define UAC3_DT_MIXER_UNIT_SIZE		0x10
+
+extern struct uac3_mixer_unit_descriptor badd_baiof_mu_desc;
+
+struct uac3_feature_unit_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bUnitID;
+	__u8 bSourceID;
+	__u8 *bmaControls;
+	__u16 wFeatureDescrStr;
+} __packed;
+
+extern struct uac3_feature_unit_descriptor badd_baif_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baof_fu_desc;
+extern struct uac3_feature_unit_descriptor badd_baiof_fu_desc;
+
+struct uac3_clock_source_descriptor {
+	__u8 bLength;
+	__u8 bDescriptorType;
+	__u8 bDescriptorSubtype;
+	__u8 bClockID;
+	__u8 bmAttributes;
+	__u32 bmControls;
+	__u8 bReferenceTerminal;
+	__u16 wClockSourceStr;
+} __packed;
+
+#define UAC3_DT_CLOCK_SRC_SIZE		0x0c
+
+extern struct uac3_clock_source_descriptor badd_clock_desc;
+
+extern void *badd_desc_list[];
+
+#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index 0583431..8053c8a 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -121,5 +121,6 @@
 int msm_vidc_unsubscribe_event(void *instance,
 		const struct v4l2_event_subscription *sub);
 int msm_vidc_dqevent(void *instance, struct v4l2_event *event);
+int msm_vidc_g_crop(void *instance, struct v4l2_crop *a);
 int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
 #endif
diff --git a/include/net/dst.h b/include/net/dst.h
index 6835d22..ddcff17 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -107,10 +107,16 @@
 	};
 };
 
+struct dst_metrics {
+	u32		metrics[RTAX_MAX];
+	atomic_t	refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY		0x1UL
+#define DST_METRICS_REFCOUNTED		0x2UL
 #define DST_METRICS_FLAGS		0x3UL
 #define __DST_METRICS_PTR(Y)	\
 	((u32 *)((Y) & ~DST_METRICS_FLAGS))
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index f390c3b..aa75828 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -114,11 +114,11 @@
 	__be32			fib_prefsrc;
 	u32			fib_tb_id;
 	u32			fib_priority;
-	u32			*fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+	struct dst_metrics	*fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
 	int			fib_nhs;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	int			fib_weight;
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
index dbae8e8..50e4b8c 100644
--- a/include/soc/qcom/memory_dump.h
+++ b/include/soc/qcom/memory_dump.h
@@ -62,7 +62,7 @@
 #define MSM_DUMP_MINOR(val)		(val & 0xFFFFF)
 
 
-#define MAX_NUM_ENTRIES		0x140
+#define MAX_NUM_ENTRIES		0x150
 
 enum msm_dump_data_ids {
 	MSM_DUMP_DATA_CPU_CTX = 0x00,
@@ -88,6 +88,7 @@
 	MSM_DUMP_DATA_LOG_BUF = 0x110,
 	MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
 	MSM_DUMP_DATA_SCANDUMP_PER_CPU = 0x130,
+	MSM_DUMP_DATA_LLCC_PER_INSTANCE = 0x140,
 	MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
 };
 
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index b54eefc..dc404e4 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -100,8 +100,8 @@
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdxpoorwills")
 #define early_machine_is_sdm845()	\
 	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm845")
-#define early_machine_is_sdm830()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm830")
+#define early_machine_is_sdm670()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,sdm670")
 #else
 #define of_board_is_sim()		0
 #define of_board_is_rumi()		0
@@ -141,7 +141,7 @@
 #define early_machine_is_msmfalcon()	0
 #define early_machine_is_sdxpoorwills()	0
 #define early_machine_is_sdm845()	0
-#define early_machine_is_sdm830()	0
+#define early_machine_is_sdm670()	0
 #endif
 
 #define PLATFORM_SUBTYPE_MDM	1
@@ -203,7 +203,7 @@
 	MSM_CPU_FALCON,
 	SDX_CPU_SDXPOORWILLS,
 	MSM_CPU_SDM845,
-	MSM_CPU_SDM830,
+	MSM_CPU_SDM670,
 };
 
 struct msm_soc_info {
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 5478417..9a0a53e 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -56,6 +56,8 @@
  * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown
  * @system_debug: If "set", triggers a device restart when the
  * subsystem's wdog bite handler is invoked.
+ * @ignore_ssr_failure: SSR failures are usually fatal and results in panic. If
+ * set will ignore failure.
  * @edge: GLINK logical name of the subsystem
  */
 struct subsys_desc {
@@ -91,6 +93,7 @@
 	u32 sysmon_pid;
 	int sysmon_shutdown_ret;
 	bool system_debug;
+	bool ignore_ssr_failure;
 	const char *edge;
 };
 
diff --git a/include/trace/events/pdc.h b/include/trace/events/pdc.h
new file mode 100644
index 0000000..400e959
--- /dev/null
+++ b/include/trace/events/pdc.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pdc
+
+#if !defined(_TRACE_PDC_) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PDC_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(irq_pin_config,
+
+	TP_PROTO(char *func, u32 pin, u32 hwirq, u32 type, u32 enable),
+
+	TP_ARGS(func, pin, hwirq, type, enable),
+
+	TP_STRUCT__entry(
+		__field(char *, func)
+		__field(u32, pin)
+		__field(u32, hwirq)
+		__field(u32, type)
+		__field(u32, enable)
+	),
+
+	TP_fast_assign(
+		__entry->pin = pin;
+		__entry->func = func;
+		__entry->hwirq = hwirq;
+		__entry->type = type;
+		__entry->enable = enable;
+	),
+
+	TP_printk("%s hwirq:%u pin:%u type:%u enable:%u",
+		__entry->func, __entry->pin, __entry->hwirq, __entry->type,
+		__entry->enable)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE pdc
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4a9c625..8c1746a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -1834,24 +1834,30 @@
 
 TRACE_EVENT(sched_get_nr_running_avg,
 
-	TP_PROTO(int avg, int big_avg, int iowait_avg),
+	TP_PROTO(int avg, int big_avg, int iowait_avg,
+		 unsigned int max_nr, unsigned int big_max_nr),
 
-	TP_ARGS(avg, big_avg, iowait_avg),
+	TP_ARGS(avg, big_avg, iowait_avg, max_nr, big_max_nr),
 
 	TP_STRUCT__entry(
 		__field( int,	avg			)
 		__field( int,	big_avg			)
 		__field( int,	iowait_avg		)
+		__field( unsigned int,	max_nr		)
+		__field( unsigned int,	big_max_nr	)
 	),
 
 	TP_fast_assign(
 		__entry->avg		= avg;
 		__entry->big_avg	= big_avg;
 		__entry->iowait_avg	= iowait_avg;
+		__entry->max_nr		= max_nr;
+		__entry->big_max_nr	= big_max_nr;
 	),
 
-	TP_printk("avg=%d big_avg=%d iowait_avg=%d",
-		__entry->avg, __entry->big_avg, __entry->iowait_avg)
+	TP_printk("avg=%d big_avg=%d iowait_avg=%d max_nr=%u big_max_nr=%u",
+		__entry->avg, __entry->big_avg, __entry->iowait_avg,
+		__entry->max_nr, __entry->big_max_nr)
 );
 
 TRACE_EVENT(core_ctl_eval_need,
diff --git a/include/trace/events/thermal_virtual.h b/include/trace/events/thermal_virtual.h
new file mode 100644
index 0000000..4c9ce51
--- /dev/null
+++ b/include/trace/events/thermal_virtual.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal_virtual
+
+#if !defined(_TRACE_VIRTUAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VIRTUAL_H
+
+#include <linux/thermal.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(virtual_temperature,
+
+	TP_PROTO(struct thermal_zone_device *virt_tz,
+		struct thermal_zone_device *tz, int sens_temp,
+		int est_temp),
+
+	TP_ARGS(virt_tz, tz, sens_temp, est_temp),
+
+	TP_STRUCT__entry(
+		__string(virt_zone, virt_tz->type)
+		__string(therm_zone, tz->type)
+		__field(int, sens_temp)
+		__field(int, est_temp)
+	),
+
+	TP_fast_assign(
+		__assign_str(virt_zone, virt_tz->type);
+		__assign_str(therm_zone, tz->type);
+		__entry->sens_temp = sens_temp;
+		__entry->est_temp = est_temp;
+	),
+
+	TP_printk("virt_zone=%s zone=%s temp=%d virtual zone estimated temp=%d",
+		__get_str(virt_zone), __get_str(therm_zone),
+		__entry->sens_temp,
+		__entry->est_temp)
+);
+
+#endif /* _TRACE_VIRTUAL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 67d632f..2d078c2 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -92,4 +92,6 @@
 
 #define SO_CNX_ADVICE		53
 
+#define SO_COOKIE		57
+
 #endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f66ba9c..9c927a5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -77,7 +77,8 @@
 #define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM	(7<<14)
 #define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF	(8<<14)
 #define  DRM_MODE_FLAG_SEAMLESS			(1<<19)
-
+#define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<20)
+#define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<21)
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 7cf7779..cd758c2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -525,3 +525,4 @@
 header-y += msm_dsps.h
 header-y += msm-core-interface.h
 header-y += msm_rotator.h
+header-y += nfc/
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f09c70b..b2d5be9 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -73,6 +73,8 @@
 	BPF_PROG_LOAD,
 	BPF_OBJ_PIN,
 	BPF_OBJ_GET,
+	BPF_PROG_ATTACH,
+	BPF_PROG_DETACH,
 };
 
 enum bpf_map_type {
@@ -96,8 +98,23 @@
 	BPF_PROG_TYPE_TRACEPOINT,
 	BPF_PROG_TYPE_XDP,
 	BPF_PROG_TYPE_PERF_EVENT,
+	BPF_PROG_TYPE_CGROUP_SKB,
 };
 
+enum bpf_attach_type {
+	BPF_CGROUP_INET_INGRESS,
+	BPF_CGROUP_INET_EGRESS,
+	__MAX_BPF_ATTACH_TYPE
+};
+
+#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+
+/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
+ * to the given target_fd cgroup the descendent cgroup will be able to
+ * override effective bpf program that was inherited from this cgroup
+ */
+#define BPF_F_ALLOW_OVERRIDE	(1U << 0)
+
 #define BPF_PSEUDO_MAP_FD	1
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
@@ -141,6 +158,13 @@
 		__aligned_u64	pathname;
 		__u32		bpf_fd;
 	};
+
+	struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
+		__u32		target_fd;	/* container object to attach to */
+		__u32		attach_bpf_fd;	/* eBPF program to attach */
+		__u32		attach_type;
+		__u32		attach_flags;
+	};
 } __attribute__((aligned(8)));
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -426,6 +450,67 @@
 	 */
 	BPF_FUNC_set_hash_invalid,
 
+	/**
+	 * int bpf_get_numa_node_id()
+	 *     Return: Id of current NUMA node.
+	 */
+	BPF_FUNC_get_numa_node_id,
+
+	/**
+	 * int bpf_skb_change_head()
+	 *     Grows headroom of skb and adjusts MAC header offset accordingly.
+	 *     Will extends/reallocae as required automatically.
+	 *     May change skb data pointer and will thus invalidate any check
+	 *     performed for direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: length of header to be pushed in front
+	 *     @flags: Flags (unused for now)
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_head,
+
+	/**
+	 * int bpf_xdp_adjust_head(xdp_md, delta)
+	 *     Adjust the xdp_md.data by delta
+	 *     @xdp_md: pointer to xdp_md
+	 *     @delta: An positive/negative integer to be added to xdp_md.data
+	 *     Return: 0 on success or negative on error
+	 */
+	BPF_FUNC_xdp_adjust_head,
+
+	/**
+	 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+	 *     Copy a NUL terminated string from unsafe address. In case the string
+	 *     length is smaller than size, the target is not padded with further NUL
+	 *     bytes. In case the string length is larger than size, just count-1
+	 *     bytes are copied and the last byte is set to NUL.
+	 *     @dst: destination address
+	 *     @size: maximum number of bytes to copy, including the trailing NUL
+	 *     @unsafe_ptr: unsafe address
+	 *     Return:
+	 *       > 0 length of the string including the trailing NUL on success
+	 *       < 0 error
+	 */
+	BPF_FUNC_probe_read_str,
+
+	/**
+	 * u64 bpf_bpf_get_socket_cookie(skb)
+	 *     Get the cookie for the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: 8 Bytes non-decreasing number on success or 0 if the socket
+	 *     field is missing inside sk_buff
+	 */
+	BPF_FUNC_get_socket_cookie,
+
+	/**
+	 * u32 bpf_get_socket_uid(skb)
+	 *     Get the owner uid of the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: uid of the socket owner on success or 0 if the socket pointer
+	 *     inside sk_buff is NULL
+	 */
+	BPF_FUNC_get_socket_uid,
+
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/include/uapi/linux/nfc/Kbuild b/include/uapi/linux/nfc/Kbuild
new file mode 100644
index 0000000..9071015
--- /dev/null
+++ b/include/uapi/linux/nfc/Kbuild
@@ -0,0 +1,2 @@
+#UAPI export list
+header-y += nfcinfo.h
diff --git a/include/uapi/linux/nfc/nfcinfo.h b/include/uapi/linux/nfc/nfcinfo.h
new file mode 100644
index 0000000..df178e2
--- /dev/null
+++ b/include/uapi/linux/nfc/nfcinfo.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_NFCINFO_H_
+#define _UAPI_NFCINFO_H_
+
+#include <linux/ioctl.h>
+
+#define NFCC_MAGIC 0xE9
+#define NFCC_GET_INFO _IOW(NFCC_MAGIC, 0x09, unsigned int)
+
+struct nqx_devinfo {
+	unsigned char chip_type;
+	unsigned char rom_version;
+	unsigned char fw_major;
+	unsigned char fw_minor;
+};
+
+union nqx_uinfo {
+	unsigned int i;
+	struct nqx_devinfo info;
+};
+
+#endif
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index d2314be..c6f5b09 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -26,6 +26,7 @@
 /* bInterfaceProtocol values to denote the version of the standard used */
 #define UAC_VERSION_1			0x00
 #define UAC_VERSION_2			0x20
+#define UAC_VERSION_3			0x30
 
 /* A.2 Audio Interface Subclass Codes */
 #define USB_SUBCLASS_AUDIOCONTROL	0x01
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
index 300bd87..c5cbac8 100644
--- a/include/uapi/media/cam_cpas.h
+++ b/include/uapi/media/cam_cpas.h
@@ -11,13 +11,15 @@
  *
  * @camera_family     : Camera family type
  * @reserved          : Reserved field for alignment
- * @camera_version    : Camera version
+ * @camera_version    : Camera platform version
+ * @cpas_version      : Camera CPAS version within camera platform
  *
  */
 struct cam_cpas_query_cap {
 	uint32_t                 camera_family;
 	uint32_t                 reserved;
 	struct cam_hw_version    camera_version;
+	struct cam_hw_version    cpas_version;
 };
 
 #endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index b736755..e6c1a45 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -325,4 +325,55 @@
 	uint32_t mem_cache_ops;
 };
 
+/**
+ * Request Manager : error message type
+ * @CAM_REQ_MGR_ERROR_TYPE_DEVICE: Device error message, fatal to session
+ * @CAM_REQ_MGR_ERROR_TYPE_REQUEST: Error on a single request, not fatal
+ * @CAM_REQ_MGR_ERROR_TYPE_BUFFER: Buffer was not filled, not fatal
+ */
+#define CAM_REQ_MGR_ERROR_TYPE_DEVICE           0
+#define CAM_REQ_MGR_ERROR_TYPE_REQUEST          1
+#define CAM_REQ_MGR_ERROR_TYPE_BUFFER           2
+
+/**
+ * struct cam_req_mgr_error_msg
+ * @error_type: type of error
+ * @request_id: request id of frame
+ * @device_hdl: device handle
+ * @reserved: reserved field
+ * @resource_size: size of the resource
+ */
+struct cam_req_mgr_error_msg {
+	uint32_t error_type;
+	uint32_t request_id;
+	int32_t device_hdl;
+	int32_t reserved;
+	uint64_t resource_size;
+};
+
+/**
+ * struct cam_req_mgr_frame_msg
+ * @request_id: request id of frame
+ * @frame_count: running count of frames
+ * @timestamp: timestamp of frame
+ */
+struct cam_req_mgr_frame_msg {
+	uint64_t request_id;
+	uint64_t frame_count;
+	uint64_t timestamp;
+};
+
+/**
+ * struct cam_req_mgr_message
+ * @session_hdl: session to which the frame belongs to
+ * @reserved: reserved field
+ * @u: union which can either be error or frame message
+ */
+struct cam_req_mgr_message {
+	int32_t session_hdl;
+	union {
+		struct cam_req_mgr_error_msg err_msg;
+		struct cam_req_mgr_frame_msg frame_msg;
+	} u;
+};
 #endif /* __UAPI_LINUX_CAM_REQ_MGR_H */
diff --git a/init/Kconfig b/init/Kconfig
index bdfcc0f..954de19 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1248,6 +1248,19 @@
 
 	  Say N if unsure.
 
+config CGROUP_BPF
+	bool "Support for eBPF programs attached to cgroups"
+	depends on BPF_SYSCALL
+	select SOCK_CGROUP_DATA
+	help
+	  Allow attaching eBPF programs to a cgroup using the bpf(2)
+	  syscall command BPF_PROG_ATTACH.
+
+	  In which context these programs are accessed depends on the type
+	  of attachment. For instance, programs that are attached using
+	  BPF_CGROUP_INET_INGRESS will be executed on the ingress path of
+	  inet sockets.
+
 config CGROUP_DEBUG
 	bool "Example controller"
 	default n
@@ -1257,6 +1270,10 @@
 
 	  Say N.
 
+config SOCK_CGROUP_DATA
+	bool
+	default n
+
 endif # CGROUPS
 
 config SCHED_HMP
diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c
index a557c5e..7760705 100644
--- a/init/do_mounts_dm.c
+++ b/init/do_mounts_dm.c
@@ -5,13 +5,17 @@
  *
  * This file is released under the GPL.
  */
+#include <linux/async.h>
+#include <linux/ctype.h>
 #include <linux/device-mapper.h>
 #include <linux/fs.h>
 #include <linux/string.h>
+#include <linux/delay.h>
 
 #include "do_mounts.h"
-#include "../drivers/md/dm.h"
 
+#define DM_MAX_DEVICES 256
+#define DM_MAX_TARGETS 256
 #define DM_MAX_NAME 32
 #define DM_MAX_UUID 129
 #define DM_NO_UUID "none"
@@ -19,14 +23,47 @@
 #define DM_MSG_PREFIX "init"
 
 /* Separators used for parsing the dm= argument. */
-#define DM_FIELD_SEP ' '
-#define DM_LINE_SEP ','
+#define DM_FIELD_SEP " "
+#define DM_LINE_SEP ","
+#define DM_ANY_SEP DM_FIELD_SEP DM_LINE_SEP
 
 /*
  * When the device-mapper and any targets are compiled into the kernel
- * (not a module), one target may be created and used as the root device at
- * boot time with the parameters given with the boot line dm=...
- * The code for that is here.
+ * (not a module), one or more device-mappers may be created and used
+ * as the root device at boot time with the parameters given with the
+ * boot line dm=...
+ *
+ * Multiple device-mappers can be stacked specifing the number of
+ * devices. A device can have multiple targets if the the number of
+ * targets is specified.
+ *
+ * TODO(taysom:defect 32847)
+ * In the future, the <num> field will be mandatory.
+ *
+ * <device>        ::= [<num>] <device-mapper>+
+ * <device-mapper> ::= <head> "," <target>+
+ * <head>          ::= <name> <uuid> <mode> [<num>]
+ * <target>        ::= <start> <length> <type> <options> ","
+ * <mode>          ::= "ro" | "rw"
+ * <uuid>          ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "none"
+ * <type>          ::= "verity" | "bootcache" | ...
+ *
+ * Example:
+ * 2 vboot none ro 1,
+ *     0 1768000 bootcache
+ *       device=aa55b119-2a47-8c45-946a-5ac57765011f+1
+ *       signature=76e9be054b15884a9fa85973e9cb274c93afadb6
+ *       cache_start=1768000 max_blocks=100000 size_limit=23 max_trace=20000,
+ *   vroot none ro 1,
+ *     0 1740800 verity payload=254:0 hashtree=254:0 hashstart=1740800 alg=sha1
+ *       root_hexdigest=76e9be054b15884a9fa85973e9cb274c93afadb6
+ *       salt=5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe
+ *
+ * Notes:
+ *  1. uuid is a label for the device and we set it to "none".
+ *  2. The <num> field will be optional initially and assumed to be 1.
+ *     Once all the scripts that set these fields have been set, it will
+ *     be made mandatory.
  */
 
 struct dm_setup_target {
@@ -38,381 +75,388 @@
 	struct dm_setup_target *next;
 };
 
-static struct {
+struct dm_device {
 	int minor;
 	int ro;
 	char name[DM_MAX_NAME];
 	char uuid[DM_MAX_UUID];
-	char *targets;
+	unsigned long num_targets;
 	struct dm_setup_target *target;
 	int target_count;
+	struct dm_device *next;
+};
+
+struct dm_option {
+	char *start;
+	char *next;
+	size_t len;
+	char delim;
+};
+
+static struct {
+	unsigned long num_devices;
+	char *str;
 } dm_setup_args __initdata;
 
 static __initdata int dm_early_setup;
 
-static size_t __init get_dm_option(char *str, char **next, char sep)
+static int __init get_dm_option(struct dm_option *opt, const char *accept)
 {
-	size_t len = 0;
-	char *endp = NULL;
+	char *str = opt->next;
+	char *endp;
 
 	if (!str)
 		return 0;
 
-	endp = strchr(str, sep);
+	str = skip_spaces(str);
+	opt->start = str;
+	endp = strpbrk(str, accept);
 	if (!endp) {  /* act like strchrnul */
-		len = strlen(str);
-		endp = str + len;
+		opt->len = strlen(str);
+		endp = str + opt->len;
 	} else {
-		len = endp - str;
+		opt->len = endp - str;
 	}
-
-	if (endp == str)
-		return 0;
-
-	if (!next)
-		return len;
-
+	opt->delim = *endp;
 	if (*endp == 0) {
 		/* Don't advance past the nul. */
-		*next = endp;
+		opt->next = endp;
 	} else {
-		*next = endp + 1;
+		opt->next = endp + 1;
 	}
-	return len;
+	return opt->len != 0;
 }
 
-static int __init dm_setup_args_init(void)
+static int __init dm_setup_cleanup(struct dm_device *devices)
 {
-	dm_setup_args.minor = 0;
-	dm_setup_args.ro = 0;
-	dm_setup_args.target = NULL;
-	dm_setup_args.target_count = 0;
+	struct dm_device *dev = devices;
+
+	while (dev) {
+		struct dm_device *old_dev = dev;
+		struct dm_setup_target *target = dev->target;
+		while (target) {
+			struct dm_setup_target *old_target = target;
+			kfree(target->type);
+			kfree(target->params);
+			target = target->next;
+			kfree(old_target);
+			dev->target_count--;
+		}
+		BUG_ON(dev->target_count);
+		dev = dev->next;
+		kfree(old_dev);
+	}
 	return 0;
 }
 
-static int __init dm_setup_cleanup(void)
+static char * __init dm_parse_device(struct dm_device *dev, char *str)
 {
-	struct dm_setup_target *target = dm_setup_args.target;
-	struct dm_setup_target *old_target = NULL;
-	while (target) {
-		kfree(target->type);
-		kfree(target->params);
-		old_target = target;
-		target = target->next;
-		kfree(old_target);
-		dm_setup_args.target_count--;
-	}
-	BUG_ON(dm_setup_args.target_count);
-	return 0;
-}
-
-static char * __init dm_setup_parse_device_args(char *str)
-{
-	char *next = NULL;
-	size_t len = 0;
+	struct dm_option opt;
+	size_t len;
 
 	/* Grab the logical name of the device to be exported to udev */
-	len = get_dm_option(str, &next, DM_FIELD_SEP);
-	if (!len) {
+	opt.next = str;
+	if (!get_dm_option(&opt, DM_FIELD_SEP)) {
 		DMERR("failed to parse device name");
 		goto parse_fail;
 	}
-	len = min(len + 1, sizeof(dm_setup_args.name));
-	strlcpy(dm_setup_args.name, str, len);  /* includes nul */
-	str = skip_spaces(next);
+	len = min(opt.len + 1, sizeof(dev->name));
+	strlcpy(dev->name, opt.start, len);  /* includes nul */
 
 	/* Grab the UUID value or "none" */
-	len = get_dm_option(str, &next, DM_FIELD_SEP);
-	if (!len) {
+	if (!get_dm_option(&opt, DM_FIELD_SEP)) {
 		DMERR("failed to parse device uuid");
 		goto parse_fail;
 	}
-	len = min(len + 1, sizeof(dm_setup_args.uuid));
-	strlcpy(dm_setup_args.uuid, str, len);
-	str = skip_spaces(next);
+	len = min(opt.len + 1, sizeof(dev->uuid));
+	strlcpy(dev->uuid, opt.start, len);
 
 	/* Determine if the table/device will be read only or read-write */
-	if (!strncmp("ro,", str, 3)) {
-		dm_setup_args.ro = 1;
-	} else if (!strncmp("rw,", str, 3)) {
-		dm_setup_args.ro = 0;
+	get_dm_option(&opt, DM_ANY_SEP);
+	if (!strncmp("ro", opt.start, opt.len)) {
+		dev->ro = 1;
+	} else if (!strncmp("rw", opt.start, opt.len)) {
+		dev->ro = 0;
 	} else {
 		DMERR("failed to parse table mode");
 		goto parse_fail;
 	}
-	str = skip_spaces(str + 3);
 
-	return str;
+	/* Optional number field */
+	/* XXX: The <num> field will be mandatory in the next round */
+	if (opt.delim == DM_FIELD_SEP[0]) {
+		if (!get_dm_option(&opt, DM_LINE_SEP))
+			return NULL;
+		dev->num_targets = simple_strtoul(opt.start, NULL, 10);
+	} else {
+		dev->num_targets = 1;
+	}
+	if (dev->num_targets > DM_MAX_TARGETS) {
+		DMERR("too many targets %lu > %d",
+			dev->num_targets, DM_MAX_TARGETS);
+	}
+	return opt.next;
 
 parse_fail:
 	return NULL;
 }
 
-static void __init dm_substitute_devices(char *str, size_t str_len)
+static char * __init dm_parse_targets(struct dm_device *dev, char *str)
 {
-	char *candidate = str;
-	char *candidate_end = str;
-	char old_char;
-	size_t len = 0;
-	dev_t dev;
-
-	if (str_len < 3)
-		return;
-
-	while (str && *str) {
-		candidate = strchr(str, '/');
-		if (!candidate)
-			break;
-
-		/* Avoid embedded slashes */
-		if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) {
-			str = strchr(candidate, DM_FIELD_SEP);
-			continue;
-		}
-
-		len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP);
-		str = skip_spaces(candidate_end);
-		if (len < 3 || len > 37)  /* name_to_dev_t max; maj:mix min */
-			continue;
-
-		/* Temporarily terminate with a nul */
-		if (*candidate_end)
-			candidate_end--;
-		old_char = *candidate_end;
-		*candidate_end = '\0';
-
-		DMDEBUG("converting candidate device '%s' to dev_t", candidate);
-		/* Use the boot-time specific device naming */
-		dev = name_to_dev_t(candidate);
-		*candidate_end = old_char;
-
-		DMDEBUG(" -> %u", dev);
-		/* No suitable replacement found */
-		if (!dev)
-			continue;
-
-		/* Rewrite the /dev/path as a major:minor */
-		len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev));
-		if (!len) {
-			DMERR("error substituting device major/minor.");
-			break;
-		}
-		candidate += len;
-		/* Pad out with spaces (fixing our nul) */
-		while (candidate < candidate_end)
-			*(candidate++) = DM_FIELD_SEP;
-	}
-}
-
-static int __init dm_setup_parse_targets(char *str)
-{
-	char *next = NULL;
-	size_t len = 0;
-	struct dm_setup_target **target = NULL;
+	struct dm_option opt;
+	struct dm_setup_target **target = &dev->target;
+	unsigned long num_targets = dev->num_targets;
+	unsigned long i;
 
 	/* Targets are defined as per the table format but with a
 	 * comma as a newline separator. */
-	target = &dm_setup_args.target;
-	while (str && *str) {
+	opt.next = str;
+	for (i = 0; i < num_targets; i++) {
 		*target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL);
 		if (!*target) {
-			DMERR("failed to allocate memory for target %d",
-			      dm_setup_args.target_count);
+			DMERR("failed to allocate memory for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		dm_setup_args.target_count++;
+		dev->target_count++;
 
-		(*target)->begin = simple_strtoull(str, &next, 10);
-		if (!next || *next != DM_FIELD_SEP) {
-			DMERR("failed to parse starting sector for target %d",
-			      dm_setup_args.target_count - 1);
+		if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+			DMERR("failed to parse starting sector"
+				" for target %s<%ld>", dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next + 1);
+		(*target)->begin = simple_strtoull(opt.start, NULL, 10);
 
-		(*target)->length = simple_strtoull(str, &next, 10);
-		if (!next || *next != DM_FIELD_SEP) {
-			DMERR("failed to parse length for target %d",
-			      dm_setup_args.target_count - 1);
+		if (!get_dm_option(&opt, DM_FIELD_SEP)) {
+			DMERR("failed to parse length for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next + 1);
+		(*target)->length = simple_strtoull(opt.start, NULL, 10);
 
-		len = get_dm_option(str, &next, DM_FIELD_SEP);
-		if (!len ||
-		    !((*target)->type = kstrndup(str, len, GFP_KERNEL))) {
-			DMERR("failed to parse type for target %d",
-			      dm_setup_args.target_count - 1);
+		if (get_dm_option(&opt, DM_FIELD_SEP))
+			(*target)->type = kstrndup(opt.start, opt.len,
+							GFP_KERNEL);
+		if (!((*target)->type)) {
+			DMERR("failed to parse type for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next);
-
-		len = get_dm_option(str, &next, DM_LINE_SEP);
-		if (!len ||
-		    !((*target)->params = kstrndup(str, len, GFP_KERNEL))) {
-			DMERR("failed to parse params for target %d",
-			      dm_setup_args.target_count - 1);
+		if (get_dm_option(&opt, DM_LINE_SEP))
+			(*target)->params = kstrndup(opt.start, opt.len,
+							GFP_KERNEL);
+		if (!((*target)->params)) {
+			DMERR("failed to parse params for target %s<%ld>",
+				dev->name, i);
 			goto parse_fail;
 		}
-		str = skip_spaces(next);
-
-		/* Before moving on, walk through the copied target and
-		 * attempt to replace all /dev/xxx with the major:minor number.
-		 * It may not be possible to resolve them traditionally at
-		 * boot-time. */
-		dm_substitute_devices((*target)->params, len);
-
 		target = &((*target)->next);
 	}
-	DMDEBUG("parsed %d targets", dm_setup_args.target_count);
+	DMDEBUG("parsed %d targets", dev->target_count);
 
-	return 0;
+	return opt.next;
 
 parse_fail:
-	return 1;
+	return NULL;
+}
+
+static struct dm_device * __init dm_parse_args(void)
+{
+	struct dm_device *devices = NULL;
+	struct dm_device **tail = &devices;
+	struct dm_device *dev;
+	char *str = dm_setup_args.str;
+	unsigned long num_devices = dm_setup_args.num_devices;
+	unsigned long i;
+
+	if (!str)
+		return NULL;
+	for (i = 0; i < num_devices; i++) {
+		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+		if (!dev) {
+			DMERR("failed to allocated memory for dev");
+			goto error;
+		}
+		*tail = dev;
+		tail = &dev->next;
+		/*
+		 * devices are given minor numbers 0 - n-1
+		 * in the order they are found in the arg
+		 * string.
+		 */
+		dev->minor = i;
+		str = dm_parse_device(dev, str);
+		if (!str)	/* NULL indicates error in parsing, bail */
+			goto error;
+
+		str = dm_parse_targets(dev, str);
+		if (!str)
+			goto error;
+	}
+	return devices;
+error:
+	dm_setup_cleanup(devices);
+	return NULL;
 }
 
 /*
  * Parse the command-line parameters given our kernel, but do not
  * actually try to invoke the DM device now; that is handled by
- * dm_setup_drive after the low-level disk drivers have initialised.
- * dm format is as follows:
- *  dm="name uuid fmode,[table line 1],[table line 2],..."
- * May be used with root=/dev/dm-0 as it always uses the first dm minor.
+ * dm_setup_drives after the low-level disk drivers have initialised.
+ * dm format is described at the top of the file.
+ *
+ * Because dm minor numbers are assigned in assending order starting with 0,
+ * You can assume the first device is /dev/dm-0, the next device is /dev/dm-1,
+ * and so forth.
  */
-
 static int __init dm_setup(char *str)
 {
-	dm_setup_args_init();
+	struct dm_option opt;
+	unsigned long num_devices;
 
-	str = dm_setup_parse_device_args(str);
 	if (!str) {
 		DMDEBUG("str is NULL");
 		goto parse_fail;
 	}
-
-	/* Target parsing is delayed until we have dynamic memory */
-	dm_setup_args.targets = str;
-
-	printk(KERN_INFO "dm: will configure '%s' on dm-%d\n",
-	       dm_setup_args.name, dm_setup_args.minor);
-
+	opt.next = str;
+	if (!get_dm_option(&opt, DM_FIELD_SEP))
+		goto parse_fail;
+	if (isdigit(opt.start[0])) {	/* XXX: Optional number field */
+		num_devices = simple_strtoul(opt.start, NULL, 10);
+		str = opt.next;
+	} else {
+		num_devices = 1;
+		/* Don't advance str */
+	}
+	if (num_devices > DM_MAX_DEVICES) {
+		DMDEBUG("too many devices %lu > %d",
+			num_devices, DM_MAX_DEVICES);
+	}
+	dm_setup_args.str = str;
+	dm_setup_args.num_devices = num_devices;
+	DMINFO("will configure %lu devices", num_devices);
 	dm_early_setup = 1;
 	return 1;
 
 parse_fail:
-	printk(KERN_WARNING "dm: Invalid arguments supplied to dm=.\n");
+	DMWARN("Invalid arguments supplied to dm=.");
 	return 0;
 }
 
-
-static void __init dm_setup_drive(void)
+static void __init dm_setup_drives(void)
 {
 	struct mapped_device *md = NULL;
 	struct dm_table *table = NULL;
 	struct dm_setup_target *target;
-	char *uuid = dm_setup_args.uuid;
+	struct dm_device *dev;
+	char *uuid = NULL;
 	fmode_t fmode = FMODE_READ;
+	struct dm_device *devices;
 
-	/* Finish parsing the targets. */
-	if (dm_setup_parse_targets(dm_setup_args.targets))
-		goto parse_fail;
+	devices = dm_parse_args();
 
-	if (dm_create(dm_setup_args.minor, &md)) {
-		DMDEBUG("failed to create the device");
-		goto dm_create_fail;
-	}
-	DMDEBUG("created device '%s'", dm_device_name(md));
-
-	/* In addition to flagging the table below, the disk must be
-	 * set explicitly ro/rw. */
-	set_disk_ro(dm_disk(md), dm_setup_args.ro);
-
-	if (!dm_setup_args.ro)
-		fmode |= FMODE_WRITE;
-	if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) {
-		DMDEBUG("failed to create the table");
-		goto dm_table_create_fail;
-	}
-
-	dm_lock_md_type(md);
-	target = dm_setup_args.target;
-	while (target) {
-		DMINFO("adding target '%llu %llu %s %s'",
-		       (unsigned long long) target->begin,
-		       (unsigned long long) target->length, target->type,
-		       target->params);
-		if (dm_table_add_target(table, target->type, target->begin,
-					target->length, target->params)) {
-			DMDEBUG("failed to add the target to the table");
-			goto add_target_fail;
+	for (dev = devices; dev; dev = dev->next) {
+		if (dm_create(dev->minor, &md)) {
+			DMDEBUG("failed to create the device");
+			goto dm_create_fail;
 		}
-		target = target->next;
-	}
+		DMDEBUG("created device '%s'", dm_device_name(md));
 
-	if (dm_table_complete(table)) {
-		DMDEBUG("failed to complete the table");
-		goto table_complete_fail;
-	}
+		/*
+		 * In addition to flagging the table below, the disk must be
+		 * set explicitly ro/rw.
+		 */
+		set_disk_ro(dm_disk(md), dev->ro);
 
-	if (dm_get_md_type(md) == DM_TYPE_NONE) {
+		if (!dev->ro)
+			fmode |= FMODE_WRITE;
+		if (dm_table_create(&table, fmode, dev->target_count, md)) {
+			DMDEBUG("failed to create the table");
+			goto dm_table_create_fail;
+		}
+
+		dm_lock_md_type(md);
+
+		for (target = dev->target; target; target = target->next) {
+			DMINFO("adding target '%llu %llu %s %s'",
+			       (unsigned long long) target->begin,
+			       (unsigned long long) target->length,
+			       target->type, target->params);
+			if (dm_table_add_target(table, target->type,
+						target->begin,
+						target->length,
+						target->params)) {
+				DMDEBUG("failed to add the target"
+					" to the table");
+				goto add_target_fail;
+			}
+		}
+		if (dm_table_complete(table)) {
+			DMDEBUG("failed to complete the table");
+			goto table_complete_fail;
+		}
+
+		/* Suspend the device so that we can bind it to the table. */
+		if (dm_suspend(md, 0)) {
+			DMDEBUG("failed to suspend the device pre-bind");
+			goto suspend_fail;
+		}
+
+		/* Initial table load: acquire type of table. */
 		dm_set_md_type(md, dm_table_get_type(table));
+
+		/* Setup md->queue to reflect md's type. */
 		if (dm_setup_md_queue(md, table)) {
 			DMWARN("unable to set up device queue for new table.");
 			goto setup_md_queue_fail;
 		}
-	} else if (dm_get_md_type(md) != dm_table_get_type(table)) {
-		DMWARN("can't change device type after initial table load.");
-		goto setup_md_queue_fail;
-        }
 
-	/* Suspend the device so that we can bind it to the table. */
-	if (dm_suspend(md, 0)) {
-		DMDEBUG("failed to suspend the device pre-bind");
-		goto suspend_fail;
+		/*
+		 * Bind the table to the device. This is the only way
+		 * to associate md->map with the table and set the disk
+		 * capacity directly.
+		 */
+		if (dm_swap_table(md, table)) {  /* should return NULL. */
+			DMDEBUG("failed to bind the device to the table");
+			goto table_bind_fail;
+		}
+
+		/* Finally, resume and the device should be ready. */
+		if (dm_resume(md)) {
+			DMDEBUG("failed to resume the device");
+			goto resume_fail;
+		}
+
+		/* Export the dm device via the ioctl interface */
+		if (!strcmp(DM_NO_UUID, dev->uuid))
+			uuid = NULL;
+		if (dm_ioctl_export(md, dev->name, uuid)) {
+			DMDEBUG("failed to export device with given"
+				" name and uuid");
+			goto export_fail;
+		}
+
+		dm_unlock_md_type(md);
+
+		DMINFO("dm-%d is ready", dev->minor);
 	}
-
-	/* Bind the table to the device. This is the only way to associate
-	 * md->map with the table and set the disk capacity directly. */
-	if (dm_swap_table(md, table)) {  /* should return NULL. */
-		DMDEBUG("failed to bind the device to the table");
-		goto table_bind_fail;
-	}
-
-	/* Finally, resume and the device should be ready. */
-	if (dm_resume(md)) {
-		DMDEBUG("failed to resume the device");
-		goto resume_fail;
-	}
-
-	/* Export the dm device via the ioctl interface */
-	if (!strcmp(DM_NO_UUID, dm_setup_args.uuid))
-		uuid = NULL;
-	if (dm_ioctl_export(md, dm_setup_args.name, uuid)) {
-		DMDEBUG("failed to export device with given name and uuid");
-		goto export_fail;
-	}
-	printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor);
-
-	dm_unlock_md_type(md);
-	dm_setup_cleanup();
+	dm_setup_cleanup(devices);
 	return;
 
 export_fail:
 resume_fail:
 table_bind_fail:
-suspend_fail:
 setup_md_queue_fail:
+suspend_fail:
 table_complete_fail:
 add_target_fail:
 	dm_unlock_md_type(md);
 dm_table_create_fail:
 	dm_put(md);
 dm_create_fail:
-	dm_setup_cleanup();
-parse_fail:
-	printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n",
-	       dm_setup_args.minor, dm_setup_args.name);
+	DMWARN("starting dm-%d (%s) failed",
+	       dev->minor, dev->name);
+	dm_setup_cleanup(devices);
 }
 
 __setup("dm=", dm_setup);
@@ -421,6 +465,6 @@
 {
 	if (!dm_early_setup)
 		return;
-	printk(KERN_INFO "dm: attempting early device configuration.\n");
-	dm_setup_drive();
+	DMINFO("attempting early device configuration.");
+	dm_setup_drives();
 }
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d..b22256b 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -5,3 +5,4 @@
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
+obj-$(CONFIG_CGROUP_BPF) += cgroup.o
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
new file mode 100644
index 0000000..a44a7e4
--- /dev/null
+++ b/kernel/bpf/cgroup.c
@@ -0,0 +1,205 @@
+/*
+ * Functions to manage eBPF programs attached to cgroups
+ *
+ * Copyright (c) 2016 Daniel Mack
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup.h>
+#include <net/sock.h>
+
+DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
+EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+
+/**
+ * cgroup_bpf_put() - put references of all bpf programs
+ * @cgrp: the cgroup to modify
+ */
+void cgroup_bpf_put(struct cgroup *cgrp)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.prog); type++) {
+		struct bpf_prog *prog = cgrp->bpf.prog[type];
+
+		if (prog) {
+			bpf_prog_put(prog);
+			static_branch_dec(&cgroup_bpf_enabled_key);
+		}
+	}
+}
+
+/**
+ * cgroup_bpf_inherit() - inherit effective programs from parent
+ * @cgrp: the cgroup to modify
+ * @parent: the parent to inherit from
+ */
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
+{
+	unsigned int type;
+
+	for (type = 0; type < ARRAY_SIZE(cgrp->bpf.effective); type++) {
+		struct bpf_prog *e;
+
+		e = rcu_dereference_protected(parent->bpf.effective[type],
+					      lockdep_is_held(&cgroup_mutex));
+		rcu_assign_pointer(cgrp->bpf.effective[type], e);
+		cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
+	}
+}
+
+/**
+ * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
+ *                         propagate the change to descendants
+ * @cgrp: The cgroup which descendants to traverse
+ * @parent: The parent of @cgrp, or %NULL if @cgrp is the root
+ * @prog: A new program to pin
+ * @type: Type of pinning operation (ingress/egress)
+ *
+ * Each cgroup has a set of two pointers for bpf programs; one for eBPF
+ * programs it owns, and which is effective for execution.
+ *
+ * If @prog is not %NULL, this function attaches a new program to the cgroup
+ * and releases the one that is currently attached, if any. @prog is then made
+ * the effective program of type @type in that cgroup.
+ *
+ * If @prog is %NULL, the currently attached program of type @type is released,
+ * and the effective program of the parent cgroup (if any) is inherited to
+ * @cgrp.
+ *
+ * Then, the descendants of @cgrp are walked and the effective program for
+ * each of them is set to the effective program of @cgrp unless the
+ * descendant has its own program attached, in which case the subbranch is
+ * skipped. This ensures that delegated subcgroups with own programs are left
+ * untouched.
+ *
+ * Must be called with cgroup_mutex held.
+ */
+int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
+			struct bpf_prog *prog, enum bpf_attach_type type,
+			bool new_overridable)
+{
+	struct bpf_prog *old_prog, *effective = NULL;
+	struct cgroup_subsys_state *pos;
+	bool overridable = true;
+
+	if (parent) {
+		overridable = !parent->bpf.disallow_override[type];
+		effective = rcu_dereference_protected(parent->bpf.effective[type],
+						      lockdep_is_held(&cgroup_mutex));
+	}
+
+	if (prog && effective && !overridable)
+		/* if parent has non-overridable prog attached, disallow
+		 * attaching new programs to descendent cgroup
+		 */
+		return -EPERM;
+
+	if (prog && effective && overridable != new_overridable)
+		/* if parent has overridable prog attached, only
+		 * allow overridable programs in descendent cgroup
+		 */
+		return -EPERM;
+
+	old_prog = cgrp->bpf.prog[type];
+
+	if (prog) {
+		overridable = new_overridable;
+		effective = prog;
+		if (old_prog &&
+		    cgrp->bpf.disallow_override[type] == new_overridable)
+			/* disallow attaching non-overridable on top
+			 * of existing overridable in this cgroup
+			 * and vice versa
+			 */
+			return -EPERM;
+	}
+
+	if (!prog && !old_prog)
+		/* report error when trying to detach and nothing is attached */
+		return -ENOENT;
+
+	cgrp->bpf.prog[type] = prog;
+
+	css_for_each_descendant_pre(pos, &cgrp->self) {
+		struct cgroup *desc = container_of(pos, struct cgroup, self);
+
+		/* skip the subtree if the descendant has its own program */
+		if (desc->bpf.prog[type] && desc != cgrp) {
+			pos = css_rightmost_descendant(pos);
+		} else {
+			rcu_assign_pointer(desc->bpf.effective[type],
+					   effective);
+			desc->bpf.disallow_override[type] = !overridable;
+		}
+	}
+
+	if (prog)
+		static_branch_inc(&cgroup_bpf_enabled_key);
+
+	if (old_prog) {
+		bpf_prog_put(old_prog);
+		static_branch_dec(&cgroup_bpf_enabled_key);
+	}
+	return 0;
+}
+
+/**
+ * __cgroup_bpf_run_filter() - Run a program for packet filtering
+ * @sk: The socket sending or receiving traffic
+ * @skb: The skb that is being sent or received
+ * @type: The type of program to be exectuted
+ *
+ * If no socket is passed, or the socket is not of type INET or INET6,
+ * this function does nothing and returns 0.
+ *
+ * The program type passed in via @type must be suitable for network
+ * filtering. No further check is performed to assert that.
+ *
+ * This function will return %-EPERM if any if an attached program was found
+ * and if it returned != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+	int ret = 0;
+
+	if (!sk || !sk_fullsock(sk))
+		return 0;
+
+	if (sk->sk_family != AF_INET &&
+	    sk->sk_family != AF_INET6)
+		return 0;
+
+	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+
+	rcu_read_lock();
+
+	prog = rcu_dereference(cgrp->bpf.effective[type]);
+	if (prog) {
+		unsigned int offset = skb->data - skb_network_header(skb);
+		struct sock *save_sk = skb->sk;
+
+		skb->sk = sk;
+		__skb_push(skb, offset);
+		ret = bpf_prog_run_save_cb(prog, skb) == 1 ? 0 : -EPERM;
+		__skb_pull(skb, offset);
+		skb->sk = save_sk;
+	}
+
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(__cgroup_bpf_run_filter);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 237f3d6..5e668da 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -747,7 +747,9 @@
 	    attr->kern_version != LINUX_VERSION_CODE)
 		return -EINVAL;
 
-	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
+	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
+	    type != BPF_PROG_TYPE_CGROUP_SKB &&
+	    !capable(CAP_SYS_ADMIN))
 		return -EPERM;
 
 	/* plain bpf_prog allocation */
@@ -824,6 +826,85 @@
 	return bpf_obj_get_user(u64_to_ptr(attr->pathname));
 }
 
+#ifdef CONFIG_CGROUP_BPF
+
+#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
+
+static int bpf_prog_attach(const union bpf_attr *attr)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+	int ret;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (CHECK_ATTR(BPF_PROG_ATTACH))
+		return -EINVAL;
+
+	if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
+		return -EINVAL;
+
+	switch (attr->attach_type) {
+	case BPF_CGROUP_INET_INGRESS:
+	case BPF_CGROUP_INET_EGRESS:
+		prog = bpf_prog_get_type(attr->attach_bpf_fd,
+					 BPF_PROG_TYPE_CGROUP_SKB);
+		if (IS_ERR(prog))
+			return PTR_ERR(prog);
+
+		cgrp = cgroup_get_from_fd(attr->target_fd);
+		if (IS_ERR(cgrp)) {
+			bpf_prog_put(prog);
+			return PTR_ERR(cgrp);
+		}
+
+		ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
+					attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
+		if (ret)
+			bpf_prog_put(prog);
+		cgroup_put(cgrp);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+#define BPF_PROG_DETACH_LAST_FIELD attach_type
+
+static int bpf_prog_detach(const union bpf_attr *attr)
+{
+	struct cgroup *cgrp;
+	int ret;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (CHECK_ATTR(BPF_PROG_DETACH))
+		return -EINVAL;
+
+	switch (attr->attach_type) {
+	case BPF_CGROUP_INET_INGRESS:
+	case BPF_CGROUP_INET_EGRESS:
+		cgrp = cgroup_get_from_fd(attr->target_fd);
+		if (IS_ERR(cgrp))
+			return PTR_ERR(cgrp);
+
+		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
+		cgroup_put(cgrp);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
 	union bpf_attr attr = {};
@@ -890,6 +971,16 @@
 	case BPF_OBJ_GET:
 		err = bpf_obj_get(&attr);
 		break;
+
+#ifdef CONFIG_CGROUP_BPF
+	case BPF_PROG_ATTACH:
+		err = bpf_prog_attach(&attr);
+		break;
+	case BPF_PROG_DETACH:
+		err = bpf_prog_detach(&attr);
+		break;
+#endif
+
 	default:
 		err = -EINVAL;
 		break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 44c17f4..fe158bd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2164,6 +2164,7 @@
 	case BPF_PROG_TYPE_SOCKET_FILTER:
 	case BPF_PROG_TYPE_SCHED_CLS:
 	case BPF_PROG_TYPE_SCHED_ACT:
+	case BPF_PROG_TYPE_CGROUP_SKB:
 		return true;
 	default:
 		return false;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 6670008..02e367a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5096,6 +5096,8 @@
 		if (cgrp->kn)
 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
 					 NULL);
+
+		cgroup_bpf_put(cgrp);
 	}
 
 	mutex_unlock(&cgroup_mutex);
@@ -5308,6 +5310,9 @@
 	if (!cgroup_on_dfl(cgrp))
 		cgrp->subtree_control = cgroup_control(cgrp);
 
+	if (parent)
+		cgroup_bpf_inherit(cgrp, parent);
+
 	cgroup_propagate_control(cgrp);
 
 	return cgrp;
@@ -6514,6 +6519,20 @@
 }
 subsys_initcall(cgroup_namespaces_init);
 
+#ifdef CONFIG_CGROUP_BPF
+int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
+		      enum bpf_attach_type type, bool overridable)
+{
+	struct cgroup *parent = cgroup_parent(cgrp);
+	int ret;
+
+	mutex_lock(&cgroup_mutex);
+	ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
+	mutex_unlock(&cgroup_mutex);
+	return ret;
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 #ifdef CONFIG_CGROUP_DEBUG
 static struct cgroup_subsys_state *
 debug_css_alloc(struct cgroup_subsys_state *parent_css)
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index b7b997f..fb6017e 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -3,6 +3,8 @@
 # CONFIG_DEVMEM is not set
 # CONFIG_FHANDLE is not set
 # CONFIG_INET_LRO is not set
+# CONFIG_NFSD is not set
+# CONFIG_NFS_FS is not set
 # CONFIG_OABI_COMPAT is not set
 # CONFIG_SYSVIPC is not set
 # CONFIG_USELIB is not set
@@ -19,6 +21,7 @@
 CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_BPF=y
 CONFIG_CP15_BARRIER_EMULATION=y
 CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
diff --git a/kernel/fork.c b/kernel/fork.c
index cb4faae..33663b0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -521,7 +521,7 @@
 	set_task_stack_end_magic(tsk);
 
 #ifdef CONFIG_CC_STACKPROTECTOR
-	tsk->stack_canary = get_random_int();
+	tsk->stack_canary = get_random_long();
 #endif
 
 	/*
@@ -1774,11 +1774,13 @@
 	*/
 	recalc_sigpending();
 	if (signal_pending(current)) {
-		spin_unlock(&current->sighand->siglock);
-		write_unlock_irq(&tasklist_lock);
 		retval = -ERESTARTNOINTR;
 		goto bad_fork_cancel_cgroup;
 	}
+	if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
+		retval = -ENOMEM;
+		goto bad_fork_cancel_cgroup;
+	}
 
 	if (likely(p->pid)) {
 		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -1829,6 +1831,8 @@
 	return p;
 
 bad_fork_cancel_cgroup:
+	spin_unlock(&current->sighand->siglock);
+	write_unlock_irq(&tasklist_lock);
 	cgroup_cancel_fork(p);
 bad_fork_free_pid:
 	threadgroup_change_end(current);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index be3c34e..077c87f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -877,8 +877,8 @@
 	if (!desc)
 		return;
 
-	__irq_do_set_handler(desc, handle, 1, NULL);
 	desc->irq_common_data.handler_data = data;
+	__irq_do_set_handler(desc, handle, 1, NULL);
 
 	irq_put_desc_busunlock(desc, flags);
 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d630954..a1a07cf 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -563,7 +563,7 @@
 }
 
 /* Wait for completing optimization and unoptimization */
-static void wait_for_kprobe_optimizer(void)
+void wait_for_kprobe_optimizer(void)
 {
 	mutex_lock(&kprobe_mutex);
 
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..c61c56f 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
 #include <linux/osq_lock.h>
+#include <linux/delay.h>
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@
 		 * values at the cost of a few extra spins.
 		 */
 		cpu_relax_lowlatency();
+
+		/*
+		 * On arm systems, we must slow down the waiter's repeated
+		 * aquisition of spin_mlock and atomics on the lock count, or
+		 * we risk starving out a thread attempting to release the
+		 * mutex. The mutex slowpath release must take spin lock
+		 * wait_lock. This spin lock can share a monitor with the
+		 * other waiter atomics in the mutex data structure, so must
+		 * take care to rate limit the waiters.
+		 */
+		udelay(1);
 	}
 
 	osq_unlock(&lock->osq);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index eef2ce9..3976dd5 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -274,7 +274,7 @@
 	 * if reparented.
 	 */
 	for (;;) {
-		set_current_state(TASK_UNINTERRUPTIBLE);
+		set_current_state(TASK_INTERRUPTIBLE);
 		if (pid_ns->nr_hashed == init_pids)
 			break;
 		schedule();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b6fb796..de1b3b7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3355,6 +3355,7 @@
 	curr->sched_class->task_tick(rq, curr, 0);
 	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
+	sched_freq_tick(cpu);
 	cpufreq_update_util(rq, 0);
 
 	early_notif = early_detection_notify(rq, wallclock);
@@ -3380,8 +3381,6 @@
 
 	if (curr->sched_class == &fair_sched_class)
 		check_for_migration(rq, curr);
-
-	sched_freq_tick(cpu);
 }
 
 #ifdef CONFIG_NO_HZ_FULL
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 005d15e..b140e55 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -39,11 +39,13 @@
 	cpumask_t cpu_mask;
 	unsigned int need_cpus;
 	unsigned int task_thres;
+	unsigned int max_nr;
 	s64 need_ts;
 	struct list_head lru;
 	bool pending;
 	spinlock_t pending_lock;
 	bool is_big_cluster;
+	bool enable;
 	int nrrun;
 	struct task_struct *core_ctl_thread;
 	unsigned int first_cpu;
@@ -247,6 +249,29 @@
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
 }
 
+static ssize_t store_enable(struct cluster_data *state,
+				const char *buf, size_t count)
+{
+	unsigned int val;
+	bool bval;
+
+	if (sscanf(buf, "%u\n", &val) != 1)
+		return -EINVAL;
+
+	bval = !!val;
+	if (bval != state->enable) {
+		state->enable = bval;
+		apply_need(state);
+	}
+
+	return count;
+}
+
+static ssize_t show_enable(const struct cluster_data *state, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
+}
+
 static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -377,6 +402,7 @@
 core_ctl_attr_ro(active_cpus);
 core_ctl_attr_ro(global_state);
 core_ctl_attr_rw(not_preferred);
+core_ctl_attr_rw(enable);
 
 static struct attribute *default_attrs[] = {
 	&min_cpus.attr,
@@ -386,6 +412,7 @@
 	&busy_down_thres.attr,
 	&task_thres.attr,
 	&is_big_cluster.attr,
+	&enable.attr,
 	&need_cpus.attr,
 	&active_cpus.attr,
 	&global_state.attr,
@@ -432,47 +459,25 @@
 
 /* ==================== runqueue based core count =================== */
 
-#define NR_RUNNING_TOLERANCE 5
-
 static void update_running_avg(void)
 {
 	int avg, iowait_avg, big_avg;
+	int max_nr, big_max_nr;
 	struct cluster_data *cluster;
 	unsigned int index = 0;
 
-	sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
-
-	/*
-	 * Round up to the next integer if the average nr running tasks
-	 * is within NR_RUNNING_TOLERANCE/100 of the next integer.
-	 * If normal rounding up is used, it will allow a transient task
-	 * to trigger online event. By the time core is onlined, the task
-	 * has finished.
-	 * Rounding to closest suffers same problem because scheduler
-	 * might only provide running stats per jiffy, and a transient
-	 * task could skew the number for one jiffy. If core control
-	 * samples every 2 jiffies, it will observe 0.5 additional running
-	 * average which rounds up to 1 task.
-	 */
-	avg = (avg + NR_RUNNING_TOLERANCE) / 100;
-	big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
+	sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
+				 &max_nr, &big_max_nr);
 
 	for_each_cluster(cluster, index) {
 		if (!cluster->inited)
 			continue;
-		/*
-		 * Big cluster only need to take care of big tasks, but if
-		 * there are not enough big cores, big tasks need to be run
-		 * on little as well. Thus for little's runqueue stat, it
-		 * has to use overall runqueue average, or derive what big
-		 * tasks would have to be run on little. The latter approach
-		 * is not easy to get given core control reacts much slower
-		 * than scheduler, and can't predict scheduler's behavior.
-		 */
 		cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
+		cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
 	}
 }
 
+#define MAX_NR_THRESHOLD	4
 /* adjust needed CPUs based on current runqueue information */
 static unsigned int apply_task_need(const struct cluster_data *cluster,
 				    unsigned int new_need)
@@ -483,7 +488,15 @@
 
 	/* only unisolate more cores if there are tasks to run */
 	if (cluster->nrrun > new_need)
-		return new_need + 1;
+		new_need = new_need + 1;
+
+	/*
+	 * We don't want tasks to be overcrowded in a cluster.
+	 * If any CPU has more than MAX_NR_THRESHOLD in the last
+	 * window, bring another CPU to help out.
+	 */
+	if (cluster->max_nr > MAX_NR_THRESHOLD)
+		new_need = new_need + 1;
 
 	return new_need;
 }
@@ -529,7 +542,7 @@
 
 	spin_lock_irqsave(&state_lock, flags);
 
-	if (cluster->boost) {
+	if (cluster->boost || !cluster->enable) {
 		need_cpus = cluster->max_cpus;
 	} else {
 		cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1020,6 +1033,7 @@
 	cluster->offline_delay_ms = 100;
 	cluster->task_thres = UINT_MAX;
 	cluster->nrrun = cluster->num_cpus;
+	cluster->enable = true;
 	INIT_LIST_HEAD(&cluster->lru);
 	spin_lock_init(&cluster->pending_lock);
 
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 42630ec..c42380a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -24,6 +24,7 @@
 	struct gov_attr_set attr_set;
 	unsigned int rate_limit_us;
 	unsigned int hispeed_freq;
+	bool pl;
 };
 
 struct sugov_policy {
@@ -224,7 +225,8 @@
 	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
 		*util = *max;
 
-	*util = max(*util, sg_cpu->walt_load.pl);
+	if (sg_policy->tunables->pl)
+		*util = max(*util, sg_cpu->walt_load.pl);
 }
 
 static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -450,12 +452,32 @@
 	return count;
 }
 
+static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return sprintf(buf, "%u\n", tunables->pl);
+}
+
+static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
+				   size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtobool(buf, &tunables->pl))
+		return -EINVAL;
+
+	return count;
+}
+
 static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
 static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
+static struct governor_attr pl = __ATTR_RW(pl);
 
 static struct attribute *sugov_attributes[] = {
 	&rate_limit_us.attr,
 	&hispeed_freq.attr,
+	&pl.attr,
 	NULL
 };
 
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 11e9705..ba5e3e2 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -27,6 +27,8 @@
  *  of the License.
  */
 
+#include "sched.h"
+
 #include <linux/gfp.h>
 #include <linux/sched.h>
 #include <linux/sched/rt.h>
@@ -51,6 +53,27 @@
 }
 
 /**
+ * drop_nopreempt_cpus - remove a cpu from the mask if it is likely
+ *			 non-preemptible
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+	unsigned int cpu = cpumask_first(lowest_mask);
+
+	while (cpu < nr_cpu_ids) {
+		/* unlocked access */
+		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+
+		if (task_may_not_preempt(task, cpu))
+			cpumask_clear_cpu(cpu, lowest_mask);
+
+		cpu = cpumask_next(cpu, lowest_mask);
+	}
+}
+
+/**
  * cpupri_find - find the best (lowest-pri) CPU in the system
  * @cp: The cpupri context
  * @p: The task
@@ -70,9 +93,11 @@
 {
 	int idx = 0;
 	int task_pri = convert_prio(p->prio);
+	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
 
 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
+retry:
 	for (idx = 0; idx < task_pri; idx++) {
 		struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 		int skip = 0;
@@ -108,7 +133,8 @@
 
 		if (lowest_mask) {
 			cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask);
-
+			if (drop_nopreempts)
+				drop_nopreempt_cpus(lowest_mask);
 			/*
 			 * We have to ensure that we have at least one bit
 			 * still set in the array, since the map could have
@@ -123,7 +149,14 @@
 
 		return 1;
 	}
-
+	/*
+	 * If we can't find any non-preemptible cpu's, retry so we can
+	 * find the lowest priority target and avoid priority inversion.
+	 */
+	if (drop_nopreempts) {
+		drop_nopreempts = false;
+		goto retry;
+	}
 	return 0;
 }
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6ccd3a7..cd406da 100755
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5553,6 +5553,9 @@
 	for_each_cpu(i, sched_group_cpus(sg))
 		state = min(state, idle_get_state_idx(cpu_rq(i)));
 
+	if (unlikely(state == INT_MAX))
+		return -EINVAL;
+
 	/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
 	state++;
 
@@ -5638,6 +5641,9 @@
 				}
 
 				idle_idx = group_idle_state(sg);
+				if (unlikely(idle_idx < 0))
+					return idle_idx;
+
 				group_util = group_norm_util(eenv, sg);
 				sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
 
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2703e0d..ec90319 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -6,6 +6,7 @@
 #include "sched.h"
 #include "walt.h"
 
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/irq_work.h>
 #include <trace/events/sched.h>
@@ -1489,11 +1490,30 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	__u32 softirqs = per_cpu(active_softirqs, cpu) |
+			 __IRQ_STAT(cpu, __softirq_pending);
+	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+
+	return ((softirqs & LONG_SOFTIRQ_MASK) &&
+		(task == cpu_ksoftirqd ||
+		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+}
+
 static int
 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
 	struct task_struct *curr;
 	struct rq *rq;
+	bool may_not_preempt;
 
 #ifdef CONFIG_SCHED_HMP
 	return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
@@ -1509,7 +1529,17 @@
 	curr = READ_ONCE(rq->curr); /* unlocked access */
 
 	/*
-	 * If the current task on @p's runqueue is an RT task, then
+	 * If the current task on @p's runqueue is a softirq task,
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
 	 * try to see if we can wake this RT task up on another
 	 * runqueue. Otherwise simply start this RT task
 	 * on its current runqueue.
@@ -1530,18 +1560,22 @@
 	 * This test is optimistic, if we get it wrong the load-balancer
 	 * will have to sort it out.
 	 */
-	if (energy_aware() ||
-	    (curr && unlikely(rt_task(curr)) &&
+	may_not_preempt = task_may_not_preempt(curr, cpu);
+	if (energy_aware() || may_not_preempt ||
+	     (unlikely(rt_task(curr)) &&
 	     (tsk_nr_cpus_allowed(curr) < 2 ||
 	      curr->prio <= p->prio))) {
 		int target = find_lowest_rq(p);
 
 		/*
-		 * Don't bother moving it if the destination CPU is
-		 * not running a lower priority task.
+		 * If cpu is non-preemptible, prefer remote cpu
+		 * even if it's running a higher-prio task.
+		 * Otherwise: Don't bother moving it if the
+		 * destination CPU is not running a lower priority task.
 		 */
 		if (target != -1 &&
-		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
+		   (may_not_preempt ||
+		    p->prio < cpu_rq(target)->rt.highest_prio.curr))
 			cpu = target;
 	}
 	rcu_read_unlock();
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5220511..28d660b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2089,6 +2089,11 @@
 		__release(rq2->lock);
 }
 
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
+
 #else /* CONFIG_SMP */
 
 /*
@@ -2451,6 +2456,11 @@
 	return max_possible_capacity != min_max_possible_capacity;
 }
 
+static inline bool is_max_capacity_cpu(int cpu)
+{
+	return cpu_max_possible_capacity(cpu) == max_possible_capacity;
+}
+
 /*
  * 'load' is in reference to "best cpu" at its best frequency.
  * Scale that in reference to a given cpu, accounting for how bad it is
@@ -2676,6 +2686,15 @@
 extern void clear_ed_task(struct task_struct *p, struct rq *rq);
 extern bool early_detection_notify(struct rq *rq, u64 wallclock);
 
+#ifdef CONFIG_SCHED_HMP
+extern unsigned int power_cost(int cpu, u64 demand);
+#else
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return cpu_max_possible_capacity(cpu);
+}
+#endif
+
 #else	/* CONFIG_SCHED_WALT */
 
 struct hmp_sched_stats;
@@ -2719,6 +2738,8 @@
 	return 0;
 }
 
+static inline bool is_max_capacity_cpu(int cpu) { return true; }
+
 static inline void
 inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
 
@@ -2828,6 +2849,11 @@
 	return 0;
 }
 
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
 #endif	/* CONFIG_SCHED_WALT */
 
 #ifdef CONFIG_SCHED_HMP
@@ -2842,7 +2868,6 @@
 check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
 extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
 					struct task_struct *p, s64 delta);
-extern unsigned int power_cost(int cpu, u64 demand);
 extern unsigned int cpu_temp(int cpu);
 extern void pre_big_task_count_change(const struct cpumask *cpus);
 extern void post_big_task_count_change(const struct cpumask *cpus);
@@ -2899,11 +2924,6 @@
 static inline void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
 				      struct task_struct *p, s64 delta) { }
 
-static inline unsigned int power_cost(int cpu, u64 demand)
-{
-	return SCHED_CAPACITY_SCALE;
-}
-
 static inline unsigned int cpu_temp(int cpu)
 {
 	return 0;
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index f820094..7f86c0b 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -27,11 +27,13 @@
 static DEFINE_PER_CPU(u64, last_time);
 static DEFINE_PER_CPU(u64, nr_big_prod_sum);
 static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(u64, nr_max);
 
 static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
 static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
 static s64 last_get_time;
 
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
 /**
  * sched_get_nr_running_avg
  * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
@@ -41,7 +43,8 @@
  * Obtains the average nr_running value since the last poll.
  * This function may not be called concurrently with itself
  */
-void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+			      unsigned int *max_nr, unsigned int *big_max_nr)
 {
 	int cpu;
 	u64 curr_time = sched_clock();
@@ -51,6 +54,8 @@
 	*avg = 0;
 	*iowait_avg = 0;
 	*big_avg = 0;
+	*max_nr = 0;
+	*big_max_nr = 0;
 
 	if (!diff)
 		return;
@@ -79,17 +84,35 @@
 		per_cpu(nr_big_prod_sum, cpu) = 0;
 		per_cpu(iowait_prod_sum, cpu) = 0;
 
+		if (*max_nr < per_cpu(nr_max, cpu))
+			*max_nr = per_cpu(nr_max, cpu);
+
+		if (is_max_capacity_cpu(cpu)) {
+			if (*big_max_nr < per_cpu(nr_max, cpu))
+				*big_max_nr = per_cpu(nr_max, cpu);
+		}
+
+		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
 		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
 	}
 
 	diff = curr_time - last_get_time;
 	last_get_time = curr_time;
 
-	*avg = (int)div64_u64(tmp_avg * 100, diff);
-	*big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
-	*iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+	/*
+	 * Any task running on BIG cluster and BIG tasks running on little
+	 * cluster contributes to big_avg. Small or medium tasks can also
+	 * run on BIG cluster when co-location and scheduler boost features
+	 * are activated. We don't want these tasks to downmigrate to little
+	 * cluster when BIG CPUs are available but isolated. Round up the
+	 * average values so that core_ctl aggressively unisolate BIG CPUs.
+	 */
+	*avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
+	*big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
+	*iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
 
-	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
+				       *max_nr, *big_max_nr);
 
 	BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
 	pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
@@ -122,6 +145,9 @@
 
 	BUG_ON((s64)per_cpu(nr, cpu) < 0);
 
+	if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
+		per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+
 	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
 	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
 	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index b89abbd..65f4148 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -152,6 +152,8 @@
  * IMPORTANT: Initialize both copies to same value!!
  */
 
+static __read_mostly bool sched_predl;
+
 __read_mostly unsigned int sched_ravg_hist_size = 5;
 __read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
 
@@ -231,6 +233,16 @@
 
 early_param("sched_ravg_window", set_sched_ravg_window);
 
+static int __init set_sched_predl(char *str)
+{
+	unsigned int predl;
+
+	get_option(&str, &predl);
+	sched_predl = !!predl;
+	return 0;
+}
+early_param("sched_predl", set_sched_predl);
+
 void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
 {
 	inc_nr_big_task(&rq->hmp_stats, p);
@@ -402,7 +414,7 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 
-	if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+	if (!is_max_capacity_cpu(cpu))
 		return rq->hmp_stats.nr_big_tasks;
 
 	return rq->nr_running;
@@ -923,7 +935,7 @@
 	if (!sync_cpu_available) {
 		rq->window_start = 1;
 		sync_cpu_available = 1;
-		atomic_set(&walt_irq_work_lastq_ws, rq->window_start);
+		atomic64_set(&walt_irq_work_lastq_ws, rq->window_start);
 	} else {
 		struct rq *sync_rq = cpu_rq(cpumask_any(cpu_online_mask));
 
@@ -1096,6 +1108,9 @@
 {
 	u32 new, old;
 
+	if (!sched_predl)
+		return;
+
 	if (is_idle_task(p) || exiting_task(p))
 		return;
 
@@ -1618,6 +1633,9 @@
 	int bidx;
 	u32 pred_demand;
 
+	if (!sched_predl)
+		return 0;
+
 	bidx = busy_to_bucket(runtime);
 	pred_demand = get_pred_busy(rq, p, bidx, runtime);
 	bucket_increase(p->ravg.busy_buckets, bidx);
@@ -1916,7 +1934,7 @@
 	if (old_window_start == rq->window_start)
 		return;
 
-	result = atomic_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
+	result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
 				   rq->window_start);
 	if (result == old_window_start)
 		irq_work_queue(&rq->irq_work);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 744fa61..bde8e33 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -57,6 +57,13 @@
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
+
 const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
 	"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -264,6 +271,7 @@
 restart:
 	/* Reset the pending bitmask before enabling irqs */
 	set_softirq_pending(0);
+	__this_cpu_write(active_softirqs, pending);
 
 	local_irq_enable();
 
@@ -293,6 +301,7 @@
 		pending >>= softirq_bit;
 	}
 
+	__this_cpu_write(active_softirqs, 0);
 	rcu_bh_qs();
 	local_irq_disable();
 
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index eb6c9f1..8d2b4d8 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1484,6 +1484,11 @@
 
 end:
 	release_all_trace_kprobes();
+	/*
+	 * Wait for the optimizer work to finish. Otherwise it might fiddle
+	 * with probes in already freed __init text.
+	 */
+	wait_for_kprobe_optimizer();
 	if (warn)
 		pr_cont("NG: Some tests are failed. Please check them.\n");
 	else
diff --git a/mm/ksm.c b/mm/ksm.c
index 56e92dc..5f1855b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1020,8 +1020,7 @@
 		goto out;
 
 	if (PageTransCompound(page)) {
-		err = split_huge_page(page);
-		if (err)
+		if (split_huge_page(page))
 			goto out_unlock;
 	}
 
diff --git a/mm/memblock.c b/mm/memblock.c
index 49b7c1e..f1eabcc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1725,6 +1725,29 @@
 	}
 }
 
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+	struct memblock_region *rgn;
+	unsigned long size = 0;
+	int idx;
+
+	for_each_memblock_type((&memblock.reserved), rgn) {
+		phys_addr_t start, end;
+
+		if (rgn->base + rgn->size < start_addr)
+			continue;
+		if (rgn->base > end_addr)
+			continue;
+
+		start = rgn->base;
+		end = start + rgn->size;
+		size += end - start;
+	}
+
+	return size;
+}
+
 void __init_memblock __memblock_dump_all(void)
 {
 	pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 19e796d..4bd4480 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1587,12 +1587,8 @@
 	if (ret) {
 		pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 			pfn, ret, page->flags);
-		/*
-		 * We know that soft_offline_huge_page() tries to migrate
-		 * only one hugepage pointed to by hpage, so we need not
-		 * run through the pagelist here.
-		 */
-		putback_active_hugepage(hpage);
+		if (!list_empty(&pagelist))
+			putback_movable_pages(&pagelist);
 		if (ret > 0)
 			ret = -EIO;
 	} else {
diff --git a/mm/mlock.c b/mm/mlock.c
index 4feee1d..9cdd063 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -285,7 +285,7 @@
 {
 	int i;
 	int nr = pagevec_count(pvec);
-	int delta_munlocked;
+	int delta_munlocked = -nr;
 	struct pagevec pvec_putback;
 	int pgrescued = 0;
 
@@ -305,6 +305,8 @@
 				continue;
 			else
 				__munlock_isolation_failed(page);
+		} else {
+			delta_munlocked++;
 		}
 
 		/*
@@ -316,7 +318,6 @@
 		pagevec_add(&pvec_putback, pvec->pages[i]);
 		pvec->pages[i] = NULL;
 	}
-	delta_munlocked = -nr + pagevec_count(&pvec_putback);
 	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
 	spin_unlock_irq(zone_lru_lock(zone));
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ca9565..27ddaae 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,6 +286,26 @@
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
+	unsigned long max_initialise;
+	unsigned long reserved_lowmem;
+
+	/*
+	 * Initialise at least 2G of a node but also take into account that
+	 * two large system hashes that can take up 1GB for 0.25TB/node.
+	 */
+	max_initialise = max(2UL << (30 - PAGE_SHIFT),
+		(pgdat->node_spanned_pages >> 8));
+
+	/*
+	 * Compensate the all the memblock reservations (e.g. crash kernel)
+	 * from the initial estimation to make sure we will initialize enough
+	 * memory to boot.
+	 */
+	reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+			pgdat->node_start_pfn + max_initialise);
+	max_initialise += reserved_lowmem;
+
+	pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
 	pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -308,20 +328,11 @@
 				unsigned long pfn, unsigned long zone_end,
 				unsigned long *nr_initialised)
 {
-	unsigned long max_initialise;
-
 	/* Always populate low zones for address-contrained allocations */
 	if (zone_end < pgdat_end_pfn(pgdat))
 		return true;
-	/*
-	 * Initialise at least 2G of a node but also take into account that
-	 * two large system hashes that can take up 1GB for 0.25TB/node.
-	 */
-	max_initialise = max(2UL << (30 - PAGE_SHIFT),
-		(pgdat->node_spanned_pages >> 8));
-
 	(*nr_initialised)++;
-	if ((*nr_initialised > max_initialise) &&
+	if ((*nr_initialised > pgdat->static_init_size) &&
 	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 		pgdat->first_deferred_pfn = pfn;
 		return false;
@@ -5940,7 +5951,6 @@
 	/* pg_data_t should be reset to zero when it's allocated */
 	WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
 
-	reset_deferred_meminit(pgdat);
 	pgdat->node_id = nid;
 	pgdat->node_start_pfn = node_start_pfn;
 	pgdat->per_cpu_nodestats = NULL;
@@ -5962,6 +5972,7 @@
 		(unsigned long)pgdat->node_mem_map);
 #endif
 
+	reset_deferred_meminit(pgdat);
 	free_area_init_core(pgdat);
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 30be24b..7341005 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@
 	return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+			  unsigned int length)
 {
 	metadata_access_enable();
-	print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
 			length, 1);
 	metadata_access_disable();
 }
@@ -636,14 +637,15 @@
 	       p, p - addr, get_freepointer(s, p));
 
 	if (s->flags & SLAB_RED_ZONE)
-		print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+			      s->red_left_pad);
 	else if (p > addr + 16)
-		print_section("Bytes b4 ", p - 16, 16);
+		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-	print_section("Object ", p, min_t(unsigned long, s->object_size,
-				PAGE_SIZE));
+	print_section(KERN_ERR, "Object ", p,
+		      min_t(unsigned long, s->object_size, PAGE_SIZE));
 	if (s->flags & SLAB_RED_ZONE)
-		print_section("Redzone ", p + s->object_size,
+		print_section(KERN_ERR, "Redzone ", p + s->object_size,
 			s->inuse - s->object_size);
 
 	if (s->offset)
@@ -658,7 +660,8 @@
 
 	if (off != size_from_object(s))
 		/* Beginning of the filler is the free pointer */
-		print_section("Padding ", p + off, size_from_object(s) - off);
+		print_section(KERN_ERR, "Padding ", p + off,
+			      size_from_object(s) - off);
 
 	dump_stack();
 }
@@ -832,7 +835,7 @@
 		end--;
 
 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-	print_section("Padding ", end - remainder, remainder);
+	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
 	return 0;
@@ -985,7 +988,7 @@
 			page->freelist);
 
 		if (!alloc)
-			print_section("Object ", (void *)object,
+			print_section(KERN_INFO, "Object ", (void *)object,
 					s->object_size);
 
 		dump_stack();
@@ -5466,6 +5469,7 @@
 		char mbuf[64];
 		char *buf;
 		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+		ssize_t len;
 
 		if (!attr || !attr->store || !attr->show)
 			continue;
@@ -5490,8 +5494,9 @@
 			buf = buffer;
 		}
 
-		attr->show(root_cache, buf);
-		attr->store(s, buf, strlen(buf));
+		len = attr->show(root_cache, buf);
+		if (len > 0)
+			attr->store(s, buf, len);
 	}
 
 	if (buffer)
diff --git a/net/Kconfig b/net/Kconfig
index d5ff4f7..0b8c255 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -265,10 +265,6 @@
 config HWBM
        bool
 
-config SOCK_CGROUP_DATA
-	bool
-	default n
-
 config CGROUP_NET_PRIO
 	bool "Network priority cgroup"
 	depends on CGROUPS
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0474106..7625ec8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -776,6 +776,13 @@
 			return -EPROTONOSUPPORT;
 		}
 	}
+
+	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+		if (defpvid >= VLAN_VID_MASK)
+			return -EINVAL;
+	}
 #endif
 
 	return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index d8ad73b3..5a782f5 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -185,6 +185,7 @@
 		br_debug(br, "using kernel STP\n");
 
 		/* To start timers on any ports left in blocking */
+		mod_timer(&br->hello_timer, jiffies + br->hello_time);
 		br_port_state_selection(br);
 	}
 
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index da058b8..15826fd 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@
 	if (br->dev->flags & IFF_UP) {
 		br_config_bpdu_generation(br);
 
-		if (br->stp_enabled != BR_USER_STP)
+		if (br->stp_enabled == BR_KERNEL_STP)
 			mod_timer(&br->hello_timer,
 				  round_jiffies(jiffies + br->hello_time));
 	}
diff --git a/net/core/dst.c b/net/core/dst.c
index b5cbbe0..656b70d 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@
 }
 EXPORT_SYMBOL(dst_discard_out);
 
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
 	/* This initializer is needed to force linker to place this variable
 	 * into const section. Otherwise it might end into bss section.
 	 * We really want to avoid false sharing on this variable, and catch
 	 * any writes on it.
 	 */
-	[RTAX_MAX] = 0xdeadbeef,
+	.refcnt = ATOMIC_INIT(1),
 };
 
 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@
 	if (dev)
 		dev_hold(dev);
 	dst->ops = ops;
-	dst_init_metrics(dst, dst_default_metrics, true);
+	dst_init_metrics(dst, dst_default_metrics.metrics, true);
 	dst->expires = 0UL;
 	dst->path = dst;
 	dst->from = NULL;
@@ -315,25 +315,30 @@
 
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
 {
-	u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
 
 	if (p) {
-		u32 *old_p = __DST_METRICS_PTR(old);
+		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
 		unsigned long prev, new;
 
-		memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+		atomic_set(&p->refcnt, 1);
+		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
 
 		new = (unsigned long) p;
 		prev = cmpxchg(&dst->_metrics, old, new);
 
 		if (prev != old) {
 			kfree(p);
-			p = __DST_METRICS_PTR(prev);
+			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
 			if (prev & DST_METRICS_READ_ONLY)
 				p = NULL;
+		} else if (prev & DST_METRICS_REFCOUNTED) {
+			if (atomic_dec_and_test(&old_p->refcnt))
+				kfree(old_p);
 		}
 	}
-	return p;
+	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+	return (u32 *)p;
 }
 EXPORT_SYMBOL(dst_cow_metrics_generic);
 
@@ -342,7 +347,7 @@
 {
 	unsigned long prev, new;
 
-	new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
 	prev = cmpxchg(&dst->_metrics, old, new);
 	if (prev == old)
 		kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/filter.c b/net/core/filter.c
index b391209..5e42e0e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <linux/fcntl.h>
 #include <linux/socket.h>
+#include <linux/sock_diag.h>
 #include <linux/in.h>
 #include <linux/inet.h>
 #include <linux/netdevice.h>
@@ -78,6 +79,10 @@
 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
 		return -ENOMEM;
 
+	err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
+	if (err)
+		return err;
+
 	err = security_sock_rcv_skb(sk, skb);
 	if (err)
 		return err;
@@ -85,7 +90,12 @@
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter) {
-		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
+		struct sock *save_sk = skb->sk;
+		unsigned int pkt_len;
+
+		skb->sk = sk;
+		pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
+		skb->sk = save_sk;
 		err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
 	}
 	rcu_read_unlock();
@@ -2198,6 +2208,7 @@
 	    func == bpf_skb_change_proto ||
 	    func == bpf_skb_change_tail ||
 	    func == bpf_skb_pull_data ||
+	    func == bpf_clone_redirect ||
 	    func == bpf_l3_csum_replace ||
 	    func == bpf_l4_csum_replace)
 		return true;
@@ -2530,6 +2541,36 @@
 	.arg5_type	= ARG_CONST_STACK_SIZE,
 };
 
+BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
+{
+	return skb->sk ? sock_gen_cookie(skb->sk) : 0;
+}
+
+static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
+	.func           = bpf_get_socket_cookie,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
+{
+	struct sock *sk = sk_to_full_sk(skb->sk);
+	kuid_t kuid;
+
+	if (!sk || !sk_fullsock(sk))
+		return overflowuid;
+	kuid = sock_net_uid(sock_net(sk), sk);
+	return from_kuid_munged(sock_net(sk)->user_ns, kuid);
+}
+
+static const struct bpf_func_proto bpf_get_socket_uid_proto = {
+	.func           = bpf_get_socket_uid,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -2551,6 +2592,10 @@
 	case BPF_FUNC_trace_printk:
 		if (capable(CAP_SYS_ADMIN))
 			return bpf_get_trace_printk_proto();
+	case BPF_FUNC_get_socket_cookie:
+		return &bpf_get_socket_cookie_proto;
+	case BPF_FUNC_get_socket_uid:
+		return &bpf_get_socket_uid_proto;
 	default:
 		return NULL;
 	}
@@ -2628,6 +2673,17 @@
 	}
 }
 
+static const struct bpf_func_proto *
+cg_skb_func_proto(enum bpf_func_id func_id)
+{
+	switch (func_id) {
+	case BPF_FUNC_skb_load_bytes:
+		return &bpf_skb_load_bytes_proto;
+	default:
+		return sk_filter_func_proto(func_id);
+	}
+}
+
 static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
 	if (off < 0 || off >= sizeof(struct __sk_buff))
@@ -2990,6 +3046,12 @@
 	.convert_ctx_access	= xdp_convert_ctx_access,
 };
 
+static const struct bpf_verifier_ops cg_skb_ops = {
+	.get_func_proto		= cg_skb_func_proto,
+	.is_valid_access	= sk_filter_is_valid_access,
+	.convert_ctx_access	= sk_filter_convert_ctx_access,
+};
+
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
 	.ops	= &sk_filter_ops,
 	.type	= BPF_PROG_TYPE_SOCKET_FILTER,
@@ -3010,12 +3072,18 @@
 	.type	= BPF_PROG_TYPE_XDP,
 };
 
+static struct bpf_prog_type_list cg_skb_type __read_mostly = {
+	.ops	= &cg_skb_ops,
+	.type	= BPF_PROG_TYPE_CGROUP_SKB,
+};
+
 static int __init register_sk_filter_ops(void)
 {
 	bpf_register_prog_type(&sk_filter_type);
 	bpf_register_prog_type(&sched_cls_type);
 	bpf_register_prog_type(&sched_act_type);
 	bpf_register_prog_type(&xdp_type);
+	bpf_register_prog_type(&cg_skb_type);
 
 	return 0;
 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b490af6..1d91607 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1617,13 +1617,13 @@
 					       cb->nlh->nlmsg_seq, 0,
 					       flags,
 					       ext_filter_mask);
-			/* If we ran out of room on the first message,
-			 * we're in trouble
-			 */
-			WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
 
-			if (err < 0)
-				goto out;
+			if (err < 0) {
+				if (likely(skb->len))
+					goto out;
+
+				goto out_err;
+			}
 
 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
@@ -1631,10 +1631,12 @@
 		}
 	}
 out:
+	err = skb->len;
+out_err:
 	cb->args[1] = idx;
 	cb->args[0] = h;
 
-	return skb->len;
+	return err;
 }
 
 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
@@ -3413,8 +3415,12 @@
 				err = br_dev->netdev_ops->ndo_bridge_getlink(
 						skb, portid, seq, dev,
 						filter_mask, NLM_F_MULTI);
-				if (err < 0 && err != -EOPNOTSUPP)
-					break;
+				if (err < 0 && err != -EOPNOTSUPP) {
+					if (likely(skb->len))
+						break;
+
+					goto out_err;
+				}
 			}
 			idx++;
 		}
@@ -3425,16 +3431,22 @@
 							      seq, dev,
 							      filter_mask,
 							      NLM_F_MULTI);
-				if (err < 0 && err != -EOPNOTSUPP)
-					break;
+				if (err < 0 && err != -EOPNOTSUPP) {
+					if (likely(skb->len))
+						break;
+
+					goto out_err;
+				}
 			}
 			idx++;
 		}
 	}
+	err = skb->len;
+out_err:
 	rcu_read_unlock();
 	cb->args[0] = idx;
 
-	return skb->len;
+	return err;
 }
 
 static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index 19562f7..f07eaea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -138,10 +138,7 @@
 
 #include <trace/events/sock.h>
 
-#ifdef CONFIG_INET
 #include <net/tcp.h>
-#endif
-
 #include <net/busy_poll.h>
 
 static DEFINE_MUTEX(proto_list_mutex);
@@ -1034,6 +1031,7 @@
 
 	union {
 		int val;
+		u64 val64;
 		struct linger ling;
 		struct timeval tm;
 	} v;
@@ -1264,6 +1262,13 @@
 		v.val = sk->sk_incoming_cpu;
 		break;
 
+
+	case SO_COOKIE:
+		lv = sizeof(u64);
+		if (len < lv)
+			return -EINVAL;
+		v.val64 = sock_gen_cookie(sk);
+		break;
 	default:
 		/* We implement the SO_SNDLOWAT etc to not be settable
 		 * (1003.1g 7).
@@ -1687,28 +1692,24 @@
  * delay queue. We want to allow the owner socket to send more
  * packets, as if they were already TX completed by a typical driver.
  * But we also want to keep skb->sk set because some packet schedulers
- * rely on it (sch_fq for example). So we set skb->truesize to a small
- * amount (1) and decrease sk_wmem_alloc accordingly.
+ * rely on it (sch_fq for example).
  */
 void skb_orphan_partial(struct sk_buff *skb)
 {
-	/* If this skb is a TCP pure ACK or already went here,
-	 * we have nothing to do. 2 is already a very small truesize.
-	 */
-	if (skb->truesize <= 2)
+	if (skb_is_tcp_pure_ack(skb))
 		return;
 
-	/* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
-	 * so we do not completely orphan skb, but transfert all
-	 * accounted bytes but one, to avoid unexpected reorders.
-	 */
 	if (skb->destructor == sock_wfree
 #ifdef CONFIG_INET
 	    || skb->destructor == tcp_wfree
 #endif
 		) {
-		atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
-		skb->truesize = 1;
+		struct sock *sk = skb->sk;
+
+		if (atomic_inc_not_zero(&sk->sk_refcnt)) {
+			atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+			skb->destructor = sock_efree;
+		}
 	} else {
 		skb_orphan(skb);
 	}
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 6b10573..acd2a6c 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,7 +19,7 @@
 static DEFINE_MUTEX(sock_diag_table_mutex);
 static struct workqueue_struct *broadcast_wq;
 
-static u64 sock_gen_cookie(struct sock *sk)
+u64 sock_gen_cookie(struct sock *sk)
 {
 	while (1) {
 		u64 res = atomic64_read(&sk->sk_cookie);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 237d62c..2ac9d2a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@
 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
 		newnp->pktoptions  = NULL;
 		newnp->opt	   = NULL;
+		newnp->ipv6_mc_list = NULL;
+		newnp->ipv6_ac_list = NULL;
+		newnp->ipv6_fl_list = NULL;
 		newnp->mcast_oif   = inet6_iif(skb);
 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
 
@@ -490,6 +493,9 @@
 	/* Clone RX bits */
 	newnp->rxopt.all = np->rxopt.all;
 
+	newnp->ipv6_mc_list = NULL;
+	newnp->ipv6_ac_list = NULL;
+	newnp->ipv6_fl_list = NULL;
 	newnp->pktoptions = NULL;
 	newnp->opt	  = NULL;
 	newnp->mcast_oif  = inet6_iif(skb);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 98fd2f7..37f4578 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -759,7 +759,7 @@
 	unsigned int e = 0, s_e;
 	struct fib_table *tb;
 	struct hlist_head *head;
-	int dumped = 0;
+	int dumped = 0, err;
 
 	if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
 	    ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -779,20 +779,27 @@
 			if (dumped)
 				memset(&cb->args[2], 0, sizeof(cb->args) -
 						 2 * sizeof(cb->args[0]));
-			if (fib_table_dump(tb, skb, cb) < 0)
-				goto out;
+			err = fib_table_dump(tb, skb, cb);
+			if (err < 0) {
+				if (likely(skb->len))
+					goto out;
+
+				goto out_err;
+			}
 			dumped = 1;
 next:
 			e++;
 		}
 	}
 out:
+	err = skb->len;
+out_err:
 	rcu_read_unlock();
 
 	cb->args[1] = e;
 	cb->args[0] = h;
 
-	return skb->len;
+	return err;
 }
 
 /* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 6a40680..7563831 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -204,6 +204,7 @@
 static void free_fib_info_rcu(struct rcu_head *head)
 {
 	struct fib_info *fi = container_of(head, struct fib_info, rcu);
+	struct dst_metrics *m;
 
 	change_nexthops(fi) {
 		if (nexthop_nh->nh_dev)
@@ -214,8 +215,9 @@
 		rt_fibinfo_free(&nexthop_nh->nh_rth_input);
 	} endfor_nexthops(fi);
 
-	if (fi->fib_metrics != (u32 *) dst_default_metrics)
-		kfree(fi->fib_metrics);
+	m = fi->fib_metrics;
+	if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+		kfree(m);
 	kfree(fi);
 }
 
@@ -982,11 +984,11 @@
 			val = 255;
 		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
 			return -EINVAL;
-		fi->fib_metrics[type - 1] = val;
+		fi->fib_metrics->metrics[type - 1] = val;
 	}
 
 	if (ecn_ca)
-		fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+		fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
 
 	return 0;
 }
@@ -1044,11 +1046,12 @@
 		goto failure;
 	fib_info_cnt++;
 	if (cfg->fc_mx) {
-		fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+		fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
 		if (!fi->fib_metrics)
 			goto failure;
+		atomic_set(&fi->fib_metrics->refcnt, 1);
 	} else
-		fi->fib_metrics = (u32 *) dst_default_metrics;
+		fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
 
 	fi->fib_net = net;
 	fi->fib_protocol = cfg->fc_protocol;
@@ -1252,7 +1255,7 @@
 	if (fi->fib_priority &&
 	    nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
 		goto nla_put_failure;
-	if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+	if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
 		goto nla_put_failure;
 
 	if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e3665bf..ef40bb6 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1932,6 +1932,8 @@
 
 	/* rcu_read_lock is hold by caller */
 	hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+		int err;
+
 		if (i < s_i) {
 			i++;
 			continue;
@@ -1942,17 +1944,14 @@
 			continue;
 		}
 
-		if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
-				  cb->nlh->nlmsg_seq,
-				  RTM_NEWROUTE,
-				  tb->tb_id,
-				  fa->fa_type,
-				  xkey,
-				  KEYLENGTH - fa->fa_slen,
-				  fa->fa_tos,
-				  fa->fa_info, NLM_F_MULTI) < 0) {
+		err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
+				    cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+				    tb->tb_id, fa->fa_type,
+				    xkey, KEYLENGTH - fa->fa_slen,
+				    fa->fa_tos, fa->fa_info, NLM_F_MULTI);
+		if (err < 0) {
 			cb->args[4] = i;
-			return -1;
+			return err;
 		}
 		i++;
 	}
@@ -1974,10 +1973,13 @@
 	t_key key = cb->args[3];
 
 	while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
-		if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
+		int err;
+
+		err = fn_trie_dump_leaf(l, tb, skb, cb);
+		if (err < 0) {
 			cb->args[3] = key;
 			cb->args[2] = count;
-			return -1;
+			return err;
 		}
 
 		++count;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index d5d3ead..ceae0ea 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -665,6 +665,8 @@
 		/* listeners have SOCK_RCU_FREE, not the children */
 		sock_reset_flag(newsk, SOCK_RCU_FREE);
 
+		inet_sk(newsk)->mc_list = NULL;
+
 		newsk->sk_mark = inet_rsk(req)->ir_mark;
 		atomic64_set(&newsk->sk_cookie,
 			     atomic64_read(&inet_rsk(req)->ir_cookie));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0bd3efe..2c18bcf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -74,6 +74,7 @@
 #include <net/checksum.h>
 #include <net/inetpeer.h>
 #include <net/lwtunnel.h>
+#include <linux/bpf-cgroup.h>
 #include <linux/igmp.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_bridge.h>
@@ -287,6 +288,13 @@
 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	unsigned int mtu;
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
 
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 	/* Policy lookup after SNAT yielded a new policy */
@@ -305,6 +313,20 @@
 	return ip_finish_output2(net, sk, skb);
 }
 
+static int ip_mc_finish_output(struct net *net, struct sock *sk,
+			       struct sk_buff *skb)
+{
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return dev_loopback_xmit(net, sk, skb);
+}
+
 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct rtable *rt = skb_rtable(skb);
@@ -342,7 +364,7 @@
 			if (newskb)
 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 					net, sk, newskb, NULL, newskb->dev,
-					dev_loopback_xmit);
+					ip_mc_finish_output);
 		}
 
 		/* Multicasts with ttl 0 must not go beyond the host */
@@ -358,7 +380,7 @@
 		if (newskb)
 			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 				net, sk, newskb, NULL, newskb->dev,
-				dev_loopback_xmit);
+				ip_mc_finish_output);
 	}
 
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 70c40ba2..18c6e79 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1370,8 +1370,12 @@
 
 static void ipv4_dst_destroy(struct dst_entry *dst)
 {
+	struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
 	struct rtable *rt = (struct rtable *) dst;
 
+	if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+		kfree(p);
+
 	if (!list_empty(&rt->rt_uncached)) {
 		struct uncached_list *ul = rt->rt_uncached_list;
 
@@ -1423,7 +1427,11 @@
 			rt->rt_gateway = nh->nh_gw;
 			rt->rt_uses_gateway = 1;
 		}
-		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+		dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+		if (fi->fib_metrics != &dst_default_metrics) {
+			rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+			atomic_inc(&fi->fib_metrics->refcnt);
+		}
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		rt->dst.tclassid = nh->nh_tclassid;
 #endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb142ca..86fbf0f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1078,9 +1078,12 @@
 				int *copied, size_t size)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
+	struct sockaddr *uaddr = msg->msg_name;
 	int err, flags;
 
-	if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+	if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+	    (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+	     uaddr->sa_family == AF_UNSPEC))
 		return -EOPNOTSUPP;
 	if (tp->fastopen_req)
 		return -EALREADY; /* Another Fast Open is in progress */
@@ -1093,7 +1096,7 @@
 	tp->fastopen_req->size = size;
 
 	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
-	err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+	err = __inet_stream_connect(sk->sk_socket, uaddr,
 				    msg->msg_namelen, flags);
 	*copied = tp->fastopen_req->copied;
 	tcp_free_fastopen_req(tp);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e074816..a03f1e8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1178,13 +1178,14 @@
 		 */
 		if (pkt_len > mss) {
 			unsigned int new_len = (pkt_len / mss) * mss;
-			if (!in_sack && new_len < pkt_len) {
+			if (!in_sack && new_len < pkt_len)
 				new_len += mss;
-				if (new_len >= skb->len)
-					return 0;
-			}
 			pkt_len = new_len;
 		}
+
+		if (pkt_len >= skb->len && !in_sack)
+			return 0;
+
 		err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
 		if (err < 0)
 			return err;
@@ -3233,7 +3234,7 @@
 			int delta;
 
 			/* Non-retransmitted hole got filled? That's reordering */
-			if (reord < prior_fackets)
+			if (reord < prior_fackets && reord <= tp->fackets_out)
 				tcp_update_reordering(sk, tp->fackets_out - reord, 0);
 
 			delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 33b04ec..013086b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@
 	const struct net_offload *ops;
 	int proto;
 	struct frag_hdr *fptr;
-	unsigned int unfrag_ip6hlen;
 	unsigned int payload_len;
 	u8 *prevhdr;
 	int offset = 0;
@@ -116,8 +115,10 @@
 		skb->network_header = (u8 *)ipv6h - skb->head;
 
 		if (udpfrag) {
-			unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-			fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
+			int err = ip6_find_1stfragopt(skb, &prevhdr);
+			if (err < 0)
+				return ERR_PTR(err);
+			fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
 			fptr->frag_off = htons(offset);
 			if (skb->next)
 				fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index e27b8fd..d472a5f 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -39,6 +39,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 
+#include <linux/bpf-cgroup.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 
@@ -131,6 +132,14 @@
 
 static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	int ret;
+
+	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
+	if (ret) {
+		kfree_skb(skb);
+		return ret;
+	}
+
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
@@ -586,7 +595,10 @@
 	int ptr, offset = 0, err = 0;
 	u8 *prevhdr, nexthdr = 0;
 
-	hlen = ip6_find_1stfragopt(skb, &prevhdr);
+	err = ip6_find_1stfragopt(skb, &prevhdr);
+	if (err < 0)
+		goto fail;
+	hlen = err;
 	nexthdr = *prevhdr;
 
 	mtu = ip6_skb_dst_mtu(skb);
@@ -1444,6 +1456,11 @@
 			 */
 			alloclen += sizeof(struct frag_hdr);
 
+			copy = datalen - transhdrlen - fraggap;
+			if (copy < 0) {
+				err = -EINVAL;
+				goto error;
+			}
 			if (transhdrlen) {
 				skb = sock_alloc_send_skb(sk,
 						alloclen + hh_len,
@@ -1493,13 +1510,9 @@
 				data += fraggap;
 				pskb_trim_unique(skb_prev, maxfraglen);
 			}
-			copy = datalen - transhdrlen - fraggap;
-
-			if (copy < 0) {
-				err = -EINVAL;
-				kfree_skb(skb);
-				goto error;
-			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+			if (copy > 0 &&
+			    getfrag(from, data + transhdrlen, offset,
+				    copy, fraggap, skb) < 0) {
 				err = -EFAULT;
 				kfree_skb(skb);
 				goto error;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd42523..e9065b8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
 	u16 offset = sizeof(struct ipv6hdr);
-	struct ipv6_opt_hdr *exthdr =
-				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
 	unsigned int packet_len = skb_tail_pointer(skb) -
 		skb_network_header(skb);
 	int found_rhdr = 0;
 	*nexthdr = &ipv6_hdr(skb)->nexthdr;
 
-	while (offset + 1 <= packet_len) {
+	while (offset <= packet_len) {
+		struct ipv6_opt_hdr *exthdr;
 
 		switch (**nexthdr) {
 
@@ -107,13 +106,16 @@
 			return offset;
 		}
 
-		offset += ipv6_optlen(exthdr);
-		*nexthdr = &exthdr->nexthdr;
+		if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+			return -EINVAL;
+
 		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
 						 offset);
+		offset += ipv6_optlen(exthdr);
+		*nexthdr = &exthdr->nexthdr;
 	}
 
-	return offset;
+	return -EINVAL;
 }
 EXPORT_SYMBOL(ip6_find_1stfragopt);
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 1c3bc0a..368c23a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1048,6 +1048,7 @@
 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
 #endif
 
+		newnp->ipv6_mc_list = NULL;
 		newnp->ipv6_ac_list = NULL;
 		newnp->ipv6_fl_list = NULL;
 		newnp->pktoptions  = NULL;
@@ -1117,6 +1118,7 @@
 	   First: no IPv4 options.
 	 */
 	newinet->inet_opt = NULL;
+	newnp->ipv6_mc_list = NULL;
 	newnp->ipv6_ac_list = NULL;
 	newnp->ipv6_fl_list = NULL;
 
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c4..a2267f8 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@
 	u8 frag_hdr_sz = sizeof(struct frag_hdr);
 	__wsum csum;
 	int tnl_hlen;
+	int err;
 
 	mss = skb_shinfo(skb)->gso_size;
 	if (unlikely(skb->len <= mss))
@@ -90,7 +91,10 @@
 		/* Find the unfragmentable header and shift it left by frag_hdr_sz
 		 * bytes to insert fragment header.
 		 */
-		unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+		err = ip6_find_1stfragopt(skb, &prevhdr);
+		if (err < 0)
+			return ERR_PTR(err);
+		unfrag_ip6hlen = err;
 		nexthdr = *prevhdr;
 		*prevhdr = NEXTHDR_FRAGMENT;
 		unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 48d0dc89b..e735f78 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1168,11 +1168,10 @@
 		sipx->sipx_network	= ipxif->if_netnum;
 		memcpy(sipx->sipx_node, ipxif->if_node,
 			sizeof(sipx->sipx_node));
-		rc = -EFAULT;
-		if (copy_to_user(arg, &ifr, sizeof(ifr)))
-			break;
-		ipxitf_put(ipxif);
 		rc = 0;
+		if (copy_to_user(arg, &ifr, sizeof(ifr)))
+			rc = -EFAULT;
+		ipxitf_put(ipxif);
 		break;
 	}
 	case SIOCAIPXITFCRT:
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index cb76ff3..6a563e6 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2652,13 +2652,6 @@
 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
 	}
 
-	sockc.tsflags = po->sk.sk_tsflags;
-	if (msg->msg_controllen) {
-		err = sock_cmsg_send(&po->sk, msg, &sockc);
-		if (unlikely(err))
-			goto out;
-	}
-
 	err = -ENXIO;
 	if (unlikely(dev == NULL))
 		goto out;
@@ -2666,6 +2659,13 @@
 	if (unlikely(!(dev->flags & IFF_UP)))
 		goto out_put;
 
+	sockc.tsflags = po->sk.sk_tsflags;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(&po->sk, msg, &sockc);
+		if (unlikely(err))
+			goto out_put;
+	}
+
 	if (po->sk.sk_socket->type == SOCK_RAW)
 		reserve = dev->hard_header_len;
 	size_max = po->tx_ring.frame_size
diff --git a/net/sctp/input.c b/net/sctp/input.c
index a01a56e..6c79915 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -473,15 +473,14 @@
 			     struct sctp_association **app,
 			     struct sctp_transport **tpp)
 {
+	struct sctp_init_chunk *chunkhdr, _chunkhdr;
 	union sctp_addr saddr;
 	union sctp_addr daddr;
 	struct sctp_af *af;
 	struct sock *sk = NULL;
 	struct sctp_association *asoc;
 	struct sctp_transport *transport = NULL;
-	struct sctp_init_chunk *chunkhdr;
 	__u32 vtag = ntohl(sctphdr->vtag);
-	int len = skb->len - ((void *)sctphdr - (void *)skb->data);
 
 	*app = NULL; *tpp = NULL;
 
@@ -516,13 +515,16 @@
 	 * discard the packet.
 	 */
 	if (vtag == 0) {
-		chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
-		if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
-			  + sizeof(__be32) ||
+		/* chunk header + first 4 octects of init header */
+		chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+					      sizeof(struct sctphdr),
+					      sizeof(struct sctp_chunkhdr) +
+					      sizeof(__be32), &_chunkhdr);
+		if (!chunkhdr ||
 		    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
-		    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+		    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
 			goto out;
-		}
+
 	} else if (vtag != asoc->c.peer_vtag) {
 		goto out;
 	}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6a2532d..0c09060 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@
 	struct sctp_bind_addr *bp;
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct sctp_sockaddr_entry *laddr;
-	union sctp_addr *baddr = NULL;
 	union sctp_addr *daddr = &t->ipaddr;
 	union sctp_addr dst_saddr;
 	struct in6_addr *final_p, final;
 	__u8 matchlen = 0;
-	__u8 bmatchlen;
 	sctp_scope_t scope;
 
 	memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-		if (!laddr->valid)
+		struct dst_entry *bdst;
+		__u8 bmatchlen;
+
+		if (!laddr->valid ||
+		    laddr->state != SCTP_ADDR_SRC ||
+		    laddr->a.sa.sa_family != AF_INET6 ||
+		    scope > sctp_scope(&laddr->a))
 			continue;
-		if ((laddr->state == SCTP_ADDR_SRC) &&
-		    (laddr->a.sa.sa_family == AF_INET6) &&
-		    (scope <= sctp_scope(&laddr->a))) {
-			bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
-			if (!baddr || (matchlen < bmatchlen)) {
-				baddr = &laddr->a;
-				matchlen = bmatchlen;
-			}
-		}
-	}
-	if (baddr) {
-		fl6->saddr = baddr->v6.sin6_addr;
-		fl6->fl6_sport = baddr->v6.sin6_port;
+
+		fl6->saddr = laddr->a.v6.sin6_addr;
+		fl6->fl6_sport = laddr->a.v6.sin6_port;
 		final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
-		dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+		bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+
+		if (!IS_ERR(bdst) &&
+		    ipv6_chk_addr(dev_net(bdst->dev),
+				  &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+			if (!IS_ERR_OR_NULL(dst))
+				dst_release(dst);
+			dst = bdst;
+			break;
+		}
+
+		bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+		if (matchlen > bmatchlen)
+			continue;
+
+		if (!IS_ERR_OR_NULL(dst))
+			dst_release(dst);
+		dst = bdst;
+		matchlen = bmatchlen;
 	}
 	rcu_read_unlock();
 
@@ -666,6 +678,9 @@
 	newnp = inet6_sk(newsk);
 
 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+	newnp->ipv6_mc_list = NULL;
+	newnp->ipv6_ac_list = NULL;
+	newnp->ipv6_fl_list = NULL;
 
 	rcu_read_lock();
 	opt = rcu_dereference(np->opt);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 72c5867..b2cdced 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -22,6 +22,7 @@
 hostprogs-y += map_perf_test
 hostprogs-y += test_overhead
 hostprogs-y += test_cgrp2_array_pin
+hostprogs-y += test_cgrp2_attach
 hostprogs-y += xdp1
 hostprogs-y += xdp2
 hostprogs-y += test_current_task_under_cgroup
@@ -50,6 +51,7 @@
 map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
 test_overhead-objs := bpf_load.o libbpf.o test_overhead_user.o
 test_cgrp2_array_pin-objs := libbpf.o test_cgrp2_array_pin.o
+test_cgrp2_attach-objs := libbpf.o test_cgrp2_attach.o
 xdp1-objs := bpf_load.o libbpf.o xdp1_user.o
 # reuse xdp1 source intentionally
 xdp2-objs := bpf_load.o libbpf.o xdp1_user.o
diff --git a/samples/bpf/libbpf.c b/samples/bpf/libbpf.c
index 9969e35..9cbc786 100644
--- a/samples/bpf/libbpf.c
+++ b/samples/bpf/libbpf.c
@@ -104,6 +104,29 @@
 	return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 }
 
+int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
+		    unsigned int flags)
+{
+	union bpf_attr attr = {
+		.target_fd = target_fd,
+		.attach_bpf_fd = prog_fd,
+		.attach_type = type,
+		.attach_flags  = flags;
+	};
+
+	return syscall(__NR_bpf, BPF_PROG_ATTACH, &attr, sizeof(attr));
+}
+
+int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
+{
+	union bpf_attr attr = {
+		.target_fd = target_fd,
+		.attach_type = type,
+	};
+
+	return syscall(__NR_bpf, BPF_PROG_DETACH, &attr, sizeof(attr));
+}
+
 int bpf_obj_pin(int fd, const char *pathname)
 {
 	union bpf_attr attr = {
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index ac6edb6..b06cf5a 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -15,6 +15,10 @@
 		  const struct bpf_insn *insns, int insn_len,
 		  const char *license, int kern_version);
 
+int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
+		    unsigned int flags);
+int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
+
 int bpf_obj_pin(int fd, const char *pathname);
 int bpf_obj_get(const char *pathname);
 
diff --git a/samples/bpf/test_cgrp2_attach.c b/samples/bpf/test_cgrp2_attach.c
new file mode 100644
index 0000000..9de4896
--- /dev/null
+++ b/samples/bpf/test_cgrp2_attach.c
@@ -0,0 +1,147 @@
+/* eBPF example program:
+ *
+ * - Creates arraymap in kernel with 4 bytes keys and 8 byte values
+ *
+ * - Loads eBPF program
+ *
+ *   The eBPF program accesses the map passed in to store two pieces of
+ *   information. The number of invocations of the program, which maps
+ *   to the number of packets received, is stored to key 0. Key 1 is
+ *   incremented on each iteration by the number of bytes stored in
+ *   the skb.
+ *
+ * - Detaches any eBPF program previously attached to the cgroup
+ *
+ * - Attaches the new program to a cgroup using BPF_PROG_ATTACH
+ *
+ * - Every second, reads map[0] and map[1] to see how many bytes and
+ *   packets were seen on any socket of tasks in the given cgroup.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <linux/bpf.h>
+
+#include "libbpf.h"
+
+enum {
+	MAP_KEY_PACKETS,
+	MAP_KEY_BYTES,
+};
+
+static int prog_load(int map_fd, int verdict)
+{
+	struct bpf_insn prog[] = {
+		BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), /* save r6 so it's not clobbered by BPF_CALL */
+
+		/* Count packets */
+		BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_PACKETS), /* r0 = 0 */
+		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+		BPF_LD_MAP_FD(BPF_REG_1, map_fd), /* load map fd to r1 */
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+		BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */
+		BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+		/* Count bytes */
+		BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */
+		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */
+		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */
+		BPF_LD_MAP_FD(BPF_REG_1, map_fd),
+		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */
+		BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */
+
+		BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */
+		BPF_EXIT_INSN(),
+	};
+
+	return bpf_prog_load(BPF_PROG_TYPE_CGROUP_SKB,
+			     prog, sizeof(prog), "GPL", 0);
+}
+
+static int usage(const char *argv0)
+{
+	printf("Usage: %s <cg-path> <egress|ingress> [drop]\n", argv0);
+	return EXIT_FAILURE;
+}
+
+int main(int argc, char **argv)
+{
+	int cg_fd, map_fd, prog_fd, key, ret;
+	long long pkt_cnt, byte_cnt;
+	enum bpf_attach_type type;
+	int verdict = 1;
+
+	if (argc < 3)
+		return usage(argv[0]);
+
+	if (strcmp(argv[2], "ingress") == 0)
+		type = BPF_CGROUP_INET_INGRESS;
+	else if (strcmp(argv[2], "egress") == 0)
+		type = BPF_CGROUP_INET_EGRESS;
+	else
+		return usage(argv[0]);
+
+	if (argc > 3 && strcmp(argv[3], "drop") == 0)
+		verdict = 0;
+
+	cg_fd = open(argv[1], O_DIRECTORY | O_RDONLY);
+	if (cg_fd < 0) {
+		printf("Failed to open cgroup path: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY,
+				sizeof(key), sizeof(byte_cnt),
+				256, 0);
+	if (map_fd < 0) {
+		printf("Failed to create map: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	prog_fd = prog_load(map_fd, verdict);
+	printf("Output from kernel verifier:\n%s\n-------\n", bpf_log_buf);
+
+	if (prog_fd < 0) {
+		printf("Failed to load prog: '%s'\n", strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	ret = bpf_prog_detach(cg_fd, type);
+	printf("bpf_prog_detach() returned '%s' (%d)\n", strerror(errno), errno);
+
+	ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
+	if (ret < 0) {
+		printf("Failed to attach prog to cgroup: '%s'\n",
+		       strerror(errno));
+		return EXIT_FAILURE;
+	}
+
+	while (1) {
+		key = MAP_KEY_PACKETS;
+		assert(bpf_lookup_elem(map_fd, &key, &pkt_cnt) == 0);
+
+		key = MAP_KEY_BYTES;
+		assert(bpf_lookup_elem(map_fd, &key, &byte_cnt) == 0);
+
+		printf("cgroup received %lld packets, %lld bytes\n",
+		       pkt_cnt, byte_cnt);
+		sleep(1);
+	}
+
+	return EXIT_SUCCESS;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 389325a..0974598 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -204,10 +204,11 @@
 
 		cause = "missing-hash";
 		status = INTEGRITY_NOLABEL;
-		if (opened & FILE_CREATED) {
+		if (opened & FILE_CREATED)
 			iint->flags |= IMA_NEW_FILE;
+		if ((iint->flags & IMA_NEW_FILE) &&
+		    !(iint->flags & IMA_DIGSIG_REQUIRED))
 			status = INTEGRITY_PASS;
-		}
 		goto out;
 	}
 
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 0430658..0f41257 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -106,7 +106,11 @@
 	/* disable ringbuffer DMAs */
 	snd_hdac_chip_writeb(bus, RIRBCTL, 0);
 	snd_hdac_chip_writeb(bus, CORBCTL, 0);
+	spin_unlock_irq(&bus->reg_lock);
+
 	hdac_wait_for_cmd_dmas(bus);
+
+	spin_lock_irq(&bus->reg_lock);
 	/* disable unsolicited responses */
 	snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0);
 	spin_unlock_irq(&bus->reg_lock);
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 37b70f8..0abab79 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1537,6 +1537,8 @@
 		      "Dell Inspiron 1501", STAC_9200_DELL_M26),
 	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
 		      "unknown Dell", STAC_9200_DELL_M26),
+	SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
+		      "Dell Latitude D430", STAC_9200_DELL_M22),
 	/* Panasonic */
 	SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
 	/* Gateway machines needs EAPD to be set on resume */
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 8c0f3b8..e78b5f0 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -498,7 +498,7 @@
 	struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
 
 	if (gpio_is_valid(cs4271->gpio_nreset)) {
-		gpio_set_value(cs4271->gpio_nreset, 0);
+		gpio_direction_output(cs4271->gpio_nreset, 0);
 		mdelay(1);
 		gpio_set_value(cs4271->gpio_nreset, 1);
 		mdelay(1);
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 502aa4f..0942d4a 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -1662,6 +1662,7 @@
 	mutex_lock(&msm_sdw->codec_mutex);
 	switch (opcode) {
 	case AUDIO_NOTIFIER_SERVICE_DOWN:
+		msm_sdw->int_mclk1_enabled = false;
 		msm_sdw->dev_up = false;
 		for (i = 0; i < msm_sdw->nr; i++)
 			swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 5f8e3fd..125ce7a 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -37,9 +37,10 @@
 #define DRV_NAME "pmic_analog_codec"
 #define SDM660_CDC_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
 			SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
-			SNDRV_PCM_RATE_48000)
+			SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 |\
+			SNDRV_PCM_RATE_192000)
 #define SDM660_CDC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
-		SNDRV_PCM_FMTBIT_S24_LE)
+		SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_3LE)
 #define MSM_DIG_CDC_STRING_LEN 80
 #define MSM_ANLG_CDC_VERSION_ENTRY_SIZE 32
 
@@ -3796,6 +3797,9 @@
 	msm_anlg_cdc_configure_cap(codec, false, false);
 	wcd_mbhc_stop(&sdm660_cdc_priv->mbhc);
 	wcd_mbhc_deinit(&sdm660_cdc_priv->mbhc);
+	/* Disable mechanical detection and set type to insertion */
+	snd_soc_update_bits(codec, MSM89XX_PMIC_ANALOG_MBHC_DET_CTL_1,
+			    0xA0, 0x20);
 	ret = wcd_mbhc_init(&sdm660_cdc_priv->mbhc, codec, &mbhc_cb,
 			    &intr_ids, wcd_mbhc_registers, true);
 	if (ret)
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index f140b19..b6e0ec6 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -1929,8 +1929,12 @@
 			.stream_name = "AIF1 Playback",
 			.channels_min = 1,
 			.channels_max = 2,
-			.rates = SNDRV_PCM_RATE_8000_48000,
-			.formats = SNDRV_PCM_FMTBIT_S16_LE,
+			.rates = SNDRV_PCM_RATE_8000_192000,
+			.rate_max = 192000,
+			.rate_min = 8000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				SNDRV_PCM_FMTBIT_S24_LE |
+				SNDRV_PCM_FMTBIT_S24_3LE,
 		},
 		 .ops = &msm_dig_dai_ops,
 	},
@@ -2012,7 +2016,7 @@
 const struct regmap_config msm_digital_regmap_config = {
 	.reg_bits = 32,
 	.reg_stride = 4,
-	.val_bits = 32,
+	.val_bits = 8,
 	.lock = enable_digital_callback,
 	.unlock = disable_digital_callback,
 	.cache_type = REGCACHE_FLAT,
@@ -2085,10 +2089,18 @@
 #ifdef CONFIG_PM
 static int msm_dig_suspend(struct device *dev)
 {
-	struct msm_asoc_mach_data *pdata =
-	snd_soc_card_get_drvdata(registered_digcodec->component.card);
+	struct msm_asoc_mach_data *pdata;
 	struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
 
+	if (!registered_digcodec || !msm_dig_cdc) {
+		pr_debug("%s:digcodec not initialized, return\n", __func__);
+		return 0;
+	}
+	pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+	if (!pdata) {
+		pr_debug("%s:card not initialized, return\n", __func__);
+		return 0;
+	}
 	if (msm_dig_cdc->dapm_bias_off) {
 		pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
 			__func__, atomic_read(&pdata->int_mclk0_rsc_ref),
diff --git a/sound/soc/codecs/wcd-mbhc-adc.c b/sound/soc/codecs/wcd-mbhc-adc.c
index 7278431..e44eec9 100644
--- a/sound/soc/codecs/wcd-mbhc-adc.c
+++ b/sound/soc/codecs/wcd-mbhc-adc.c
@@ -729,7 +729,8 @@
 				 * otherwise report unsupported plug
 				 */
 				if (mbhc->mbhc_cfg->swap_gnd_mic &&
-					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+					mbhc->mbhc_cfg->swap_gnd_mic(codec,
+					true)) {
 					pr_debug("%s: US_EU gpio present,flip switch\n"
 						, __func__);
 					continue;
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.c b/sound/soc/codecs/wcd-mbhc-legacy.c
index 83023bc..745e2e8 100644
--- a/sound/soc/codecs/wcd-mbhc-legacy.c
+++ b/sound/soc/codecs/wcd-mbhc-legacy.c
@@ -633,7 +633,8 @@
 				 * otherwise report unsupported plug
 				 */
 				if (mbhc->mbhc_cfg->swap_gnd_mic &&
-					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
+					mbhc->mbhc_cfg->swap_gnd_mic(codec,
+					true)) {
 					pr_debug("%s: US_EU gpio present,flip switch\n"
 						, __func__);
 					continue;
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index 510a8dc..ebcb413 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -1460,18 +1460,12 @@
 		if (config->usbc_en1_gpio_p)
 			rc = msm_cdc_pinctrl_select_active_state(
 				config->usbc_en1_gpio_p);
-		if (rc == 0 && config->usbc_en2n_gpio_p)
-			rc = msm_cdc_pinctrl_select_active_state(
-				config->usbc_en2n_gpio_p);
 		if (rc == 0 && config->usbc_force_gpio_p)
 			rc = msm_cdc_pinctrl_select_active_state(
 				config->usbc_force_gpio_p);
 		mbhc->usbc_mode = POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER;
 	} else {
 		/* no delay is required when disabling GPIOs */
-		if (config->usbc_en2n_gpio_p)
-			msm_cdc_pinctrl_select_sleep_state(
-				config->usbc_en2n_gpio_p);
 		if (config->usbc_en1_gpio_p)
 			msm_cdc_pinctrl_select_sleep_state(
 				config->usbc_en1_gpio_p);
@@ -1490,6 +1484,8 @@
 		}
 
 		mbhc->usbc_mode = POWER_SUPPLY_TYPEC_NONE;
+		if (mbhc->mbhc_cfg->swap_gnd_mic)
+			mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec, false);
 	}
 
 	return rc;
@@ -1675,19 +1671,12 @@
 		dev_dbg(mbhc->codec->dev, "%s: usbc analog enabled\n",
 				__func__);
 		rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
-				"qcom,usbc-analog-en1_gpio",
+				"qcom,usbc-analog-en1-gpio",
 				&config->usbc_en1_gpio,
 				&config->usbc_en1_gpio_p);
 		if (rc)
 			goto err;
 
-		rc = wcd_mbhc_init_gpio(mbhc, mbhc_cfg,
-				"qcom,usbc-analog-en2_n_gpio",
-				&config->usbc_en2n_gpio,
-				&config->usbc_en2n_gpio_p);
-		if (rc)
-			goto err;
-
 		if (of_find_property(card->dev->of_node,
 				     "qcom,usbc-analog-force_detect_gpio",
 				     NULL)) {
@@ -1734,12 +1723,6 @@
 		gpio_free(config->usbc_en1_gpio);
 		config->usbc_en1_gpio = 0;
 	}
-	if (config->usbc_en2n_gpio > 0) {
-		dev_dbg(card->dev, "%s free usb_en2 gpio %d\n",
-			__func__, config->usbc_en2n_gpio);
-		gpio_free(config->usbc_en2n_gpio);
-		config->usbc_en2n_gpio = 0;
-	}
 	if (config->usbc_force_gpio > 0) {
 		dev_dbg(card->dev, "%s free usb_force gpio %d\n",
 			__func__, config->usbc_force_gpio);
@@ -1748,8 +1731,6 @@
 	}
 	if (config->usbc_en1_gpio_p)
 		of_node_put(config->usbc_en1_gpio_p);
-	if (config->usbc_en2n_gpio_p)
-		of_node_put(config->usbc_en2n_gpio_p);
 	if (config->usbc_force_gpio_p)
 		of_node_put(config->usbc_force_gpio_p);
 	dev_dbg(mbhc->codec->dev, "%s: leave %d\n", __func__, rc);
@@ -1790,15 +1771,11 @@
 		/* free GPIOs */
 		if (config->usbc_en1_gpio > 0)
 			gpio_free(config->usbc_en1_gpio);
-		if (config->usbc_en2n_gpio > 0)
-			gpio_free(config->usbc_en2n_gpio);
 		if (config->usbc_force_gpio)
 			gpio_free(config->usbc_force_gpio);
 
 		if (config->usbc_en1_gpio_p)
 			of_node_put(config->usbc_en1_gpio_p);
-		if (config->usbc_en2n_gpio_p)
-			of_node_put(config->usbc_en2n_gpio_p);
 		if (config->usbc_force_gpio_p)
 			of_node_put(config->usbc_force_gpio_p);
 	}
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index 4ea4401..7ed06c3 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -404,10 +404,10 @@
 
 struct usbc_ana_audio_config {
 	int usbc_en1_gpio;
-	int usbc_en2n_gpio;
+	int usbc_en2_gpio;
 	int usbc_force_gpio;
 	struct device_node *usbc_en1_gpio_p; /* used by pinctrl API */
-	struct device_node *usbc_en2n_gpio_p; /* used by pinctrl API */
+	struct device_node *usbc_en2_gpio_p; /* used by pinctrl API */
 	struct device_node *usbc_force_gpio_p; /* used by pinctrl API */
 };
 
@@ -416,7 +416,7 @@
 	void *calibration;
 	bool detect_extn_cable;
 	bool mono_stero_detection;
-	bool (*swap_gnd_mic)(struct snd_soc_codec *codec);
+	bool (*swap_gnd_mic)(struct snd_soc_codec *codec, bool active);
 	bool hs_ext_micbias;
 	bool gnd_det_en;
 	int key_code[WCD_MBHC_KEYCODE_NUM];
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 7e217a6..a08b598 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -82,8 +82,15 @@
 #define WCD_SPI_WORD_BYTE_CNT (4)
 #define WCD_SPI_RW_MULTI_MIN_LEN (16)
 
-/* Max size is closest multiple of 16 less than 64Kbytes */
-#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 16)
+/* Max size is 32 bytes less than 64Kbytes */
+#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 32)
+
+/*
+ * Max size for the pre-allocated buffers is the max
+ * possible read/write length + 32 bytes for the SPI
+ * read/write command header itself.
+ */
+#define WCD_SPI_RW_MAX_BUF_SIZE (WCD_SPI_RW_MULTI_MAX_LEN + 32)
 
 /* Alignment requirements */
 #define WCD_SPI_RW_MIN_ALIGN    WCD_SPI_WORD_BYTE_CNT
@@ -149,6 +156,10 @@
 
 	/* Completion object to indicate system resume completion */
 	struct completion resume_comp;
+
+	/* Buffers to hold memory used for transfers */
+	void *tx_buf;
+	void *rx_buf;
 };
 
 enum xfer_request {
@@ -230,17 +241,18 @@
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	struct spi_transfer *tx_xfer = &wcd_spi->xfer2[0];
 	struct spi_transfer *rx_xfer = &wcd_spi->xfer2[1];
-	u8 *tx_buf;
+	u8 *tx_buf = wcd_spi->tx_buf;
 	u32 frame = 0;
 	int ret;
 
 	dev_dbg(&spi->dev, "%s: remote_addr = 0x%x\n",
 		__func__, remote_addr);
 
-	tx_buf = kzalloc(WCD_SPI_READ_SINGLE_LEN,
-			 GFP_KERNEL | GFP_DMA);
-	if (!tx_buf)
+	if (!tx_buf) {
+		dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+			__func__);
 		return -ENOMEM;
+	}
 
 	frame |= WCD_SPI_READ_FRAME_OPCODE;
 	frame |= remote_addr & WCD_CMD_ADDR_MASK;
@@ -256,7 +268,6 @@
 	rx_xfer->len = sizeof(*val);
 
 	ret = spi_sync(spi, &wcd_spi->msg2);
-	kfree(tx_buf);
 
 	return ret;
 }
@@ -267,8 +278,8 @@
 {
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	struct spi_transfer *xfer = &wcd_spi->xfer1;
-	u8 *tx_buf;
-	u8 *rx_buf;
+	u8 *tx_buf = wcd_spi->tx_buf;
+	u8 *rx_buf = wcd_spi->rx_buf;
 	u32 frame = 0;
 	int ret;
 
@@ -278,15 +289,9 @@
 	frame |= WCD_SPI_FREAD_FRAME_OPCODE;
 	frame |= remote_addr & WCD_CMD_ADDR_MASK;
 
-	tx_buf = kzalloc(WCD_SPI_CMD_FREAD_LEN + len,
-			 GFP_KERNEL | GFP_DMA);
-	if (!tx_buf)
-		return -ENOMEM;
-
-	rx_buf = kzalloc(WCD_SPI_CMD_FREAD_LEN + len,
-			 GFP_KERNEL | GFP_DMA);
-	if (!rx_buf) {
-		kfree(tx_buf);
+	if (!tx_buf || !rx_buf) {
+		dev_err(&spi->dev, "%s: %s not allocated\n", __func__,
+			(!tx_buf) ? "tx_buf" : "rx_buf");
 		return -ENOMEM;
 	}
 
@@ -306,8 +311,6 @@
 
 	memcpy(data, rx_buf + WCD_SPI_CMD_FREAD_LEN, len);
 done:
-	kfree(tx_buf);
-	kfree(rx_buf);
 	return ret;
 }
 
@@ -344,7 +347,7 @@
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	struct spi_transfer *xfer = &wcd_spi->xfer1;
 	u32 frame = 0;
-	u8 *tx_buf;
+	u8 *tx_buf = wcd_spi->tx_buf;
 	int xfer_len, ret;
 
 	dev_dbg(&spi->dev, "%s: addr = 0x%x len = %zd\n",
@@ -356,9 +359,11 @@
 	frame = cpu_to_be32(frame);
 	xfer_len = len + sizeof(frame);
 
-	tx_buf = kzalloc(xfer_len, GFP_KERNEL);
-	if (!tx_buf)
+	if (!tx_buf) {
+		dev_err(&spi->dev, "%s: tx_buf not allocated\n",
+			__func__);
 		return -ENOMEM;
+	}
 
 	memcpy(tx_buf, &frame, sizeof(frame));
 	memcpy(tx_buf + sizeof(frame), data, len);
@@ -372,8 +377,6 @@
 		dev_err(&spi->dev,
 			"%s: Failed, addr = 0x%x, len = %zd\n",
 			__func__, remote_addr, len);
-	kfree(tx_buf);
-
 	return ret;
 }
 
@@ -1331,6 +1334,23 @@
 	spi_message_init(&wcd_spi->msg2);
 	spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
 	spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+
+	/* Pre-allocate the buffers */
+	wcd_spi->tx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+				  GFP_KERNEL | GFP_DMA);
+	if (!wcd_spi->tx_buf) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	wcd_spi->rx_buf = kzalloc(WCD_SPI_RW_MAX_BUF_SIZE,
+				  GFP_KERNEL | GFP_DMA);
+	if (!wcd_spi->rx_buf) {
+		kfree(wcd_spi->tx_buf);
+		wcd_spi->tx_buf = NULL;
+		ret = -ENOMEM;
+		goto done;
+	}
 done:
 	return ret;
 }
@@ -1348,6 +1368,11 @@
 	spi_transfer_del(&wcd_spi->xfer1);
 	spi_transfer_del(&wcd_spi->xfer2[0]);
 	spi_transfer_del(&wcd_spi->xfer2[1]);
+
+	kfree(wcd_spi->tx_buf);
+	kfree(wcd_spi->rx_buf);
+	wcd_spi->tx_buf = NULL;
+	wcd_spi->rx_buf = NULL;
 }
 
 static const struct component_ops wcd_spi_component_ops = {
diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c
index 0b07393..4278e36 100644
--- a/sound/soc/codecs/wcd9330.c
+++ b/sound/soc/codecs/wcd9330.c
@@ -1536,6 +1536,13 @@
 	tomtom_mad_input = ucontrol->value.integer.value[0];
 	micb_4_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias;
 
+	if (tomtom_mad_input >= ARRAY_SIZE(tomtom_conn_mad_text)) {
+		dev_err(codec->dev,
+			"%s: tomtom_mad_input = %d out of bounds\n",
+			__func__, tomtom_mad_input);
+		return -EINVAL;
+	}
+
 	pr_debug("%s: tomtom_mad_input = %s\n", __func__,
 			tomtom_conn_mad_text[tomtom_mad_input]);
 
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index dedf4dc..f8fa43b 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -188,7 +188,7 @@
 MODULE_PARM_DESC(sido_buck_svs_voltage,
 			"setting for SVS voltage for SIDO BUCK");
 
-#define TASHA_TX_UNMUTE_DELAY_MS	25
+#define TASHA_TX_UNMUTE_DELAY_MS	40
 
 static int tx_unmute_delay = TASHA_TX_UNMUTE_DELAY_MS;
 module_param(tx_unmute_delay, int, 0664);
@@ -5902,8 +5902,6 @@
 					    CF_MIN_3DB_150HZ << 5);
 		/* Enable TX PGA Mute */
 		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
-		/* Enable APC */
-		snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x08);
 		break;
 	case SND_SOC_DAPM_POST_PMU:
 		snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
@@ -5930,7 +5928,6 @@
 		hpf_cut_off_freq =
 			tasha->tx_hpf_work[decimator].hpf_cut_off_freq;
 		snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
-		snd_soc_update_bits(codec, dec_cfg_reg, 0x08, 0x00);
 		if (cancel_delayed_work_sync(
 		    &tasha->tx_hpf_work[decimator].dwork)) {
 			if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index c0a32f3..f7fb325 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -505,7 +505,7 @@
 static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
 static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
 
-#define WCD934X_TX_UNMUTE_DELAY_MS 25
+#define WCD934X_TX_UNMUTE_DELAY_MS 40
 
 static int tx_unmute_delay = WCD934X_TX_UNMUTE_DELAY_MS;
 module_param(tx_unmute_delay, int, 0664);
@@ -795,11 +795,13 @@
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01);
 	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19);
 	regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15);
+	/* Add 1msec delay for VOUT to settle */
+	usleep_range(1000, 1100);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7);
-	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3);
 	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7);
+	regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3);
 
 	return 0;
 }
@@ -8274,6 +8276,9 @@
 
 	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
 	ret = __tavil_cdc_mclk_enable_locked(tavil, enable);
+	if (enable)
+		wcd_resmgr_set_sido_input_src(tavil->resmgr,
+						     SIDO_SOURCE_RCO_BG);
 	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
 
 	return ret;
@@ -8411,6 +8416,8 @@
 					__func__, ret);
 				goto done;
 			}
+			wcd_resmgr_set_sido_input_src(tavil->resmgr,
+							SIDO_SOURCE_RCO_BG);
 			ret = wcd_resmgr_enable_clk_block(tavil->resmgr,
 							   WCD_CLK_RCO);
 			ret |= tavil_cdc_req_mclk_enable(tavil, false);
@@ -9814,18 +9821,23 @@
 {
 	int val, rc;
 
-	__tavil_cdc_mclk_enable(tavil, true);
+	WCD9XXX_V2_BG_CLK_LOCK(tavil->resmgr);
+	__tavil_cdc_mclk_enable_locked(tavil, true);
 
 	regmap_update_bits(tavil->wcd9xxx->regmap,
 			WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x1E, 0x10);
 	regmap_update_bits(tavil->wcd9xxx->regmap,
 			WCD934X_CHIP_TIER_CTRL_EFUSE_CTL, 0x01, 0x01);
-
 	/*
 	 * 5ms sleep required after enabling efuse control
 	 * before checking the status.
 	 */
 	usleep_range(5000, 5500);
+	wcd_resmgr_set_sido_input_src(tavil->resmgr,
+					     SIDO_SOURCE_RCO_BG);
+
+	WCD9XXX_V2_BG_CLK_UNLOCK(tavil->resmgr);
+
 	rc = regmap_read(tavil->wcd9xxx->regmap,
 			 WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS, &val);
 	if (rc || (!(val & 0x01)))
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 8780888..825aaee 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -25,8 +25,7 @@
 #define WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
 #define WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
 
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
-					  int sido_src);
+
 static const char *wcd_resmgr_clk_type_to_str(enum wcd_clock_type clk_type)
 {
 	if (clk_type == WCD_CLK_OFF)
@@ -267,8 +266,6 @@
 					0x01, 0x01);
 			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD934X_CODEC_RPM_CLK_GATE, 0x03, 0x00);
-			wcd_resmgr_set_sido_input_src(resmgr,
-						      SIDO_SOURCE_RCO_BG);
 		} else {
 			wcd_resmgr_codec_reg_update_bits(resmgr,
 					WCD93XX_CDC_CLK_RST_CTRL_FS_CNT_CONTROL,
@@ -515,7 +512,7 @@
 	return ret;
 }
 
-static void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
 					  int sido_src)
 {
 	if (!resmgr)
@@ -553,6 +550,7 @@
 		pr_debug("%s: sido input src to external\n", __func__);
 	}
 }
+EXPORT_SYMBOL(wcd_resmgr_set_sido_input_src);
 
 /*
  * wcd_resmgr_set_sido_input_src_locked:
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.h b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
index f605a24..e831ba6 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.h
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -87,4 +87,7 @@
 void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr);
 void wcd_resmgr_set_sido_input_src_locked(struct wcd9xxx_resmgr_v2 *resmgr,
 					  int sido_src);
+void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr,
+					  int sido_src);
+
 #endif
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index 3bef986..6cf637c 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -1154,6 +1154,7 @@
 	int ret = 0;
 	struct wsa881x_priv *wsa881x;
 	u8 devnum = 0;
+	bool pin_state_current = false;
 
 	wsa881x = devm_kzalloc(&pdev->dev, sizeof(struct wsa881x_priv),
 			    GFP_KERNEL);
@@ -1184,6 +1185,9 @@
 		if (ret)
 			goto err;
 	}
+	if (wsa881x->wsa_rst_np)
+		pin_state_current = msm_cdc_pinctrl_get_state(
+						wsa881x->wsa_rst_np);
 	wsa881x_gpio_ctrl(wsa881x, true);
 	wsa881x->state = WSA881X_DEV_UP;
 
@@ -1246,6 +1250,8 @@
 	return 0;
 
 dev_err:
+	if (pin_state_current == false)
+		wsa881x_gpio_ctrl(wsa881x, false);
 	swr_remove_device(pdev);
 err:
 	return ret;
@@ -1261,6 +1267,7 @@
 		return -EINVAL;
 	}
 	debugfs_remove_recursive(debugfs_wsa881x_dent);
+	debugfs_wsa881x_dent = NULL;
 	snd_soc_unregister_codec(&pdev->dev);
 	if (wsa881x->pd_gpio)
 		gpio_free(wsa881x->pd_gpio);
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index b75ba98..7bc4051 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -496,6 +496,8 @@
 static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text);
 static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text);
 static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text);
 
 static struct platform_device *spdev;
@@ -2263,6 +2265,54 @@
 	return sample_rate;
 }
 
+static int mi2s_get_format(int value)
+{
+	int format;
+
+	switch (value) {
+	case 0:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	case 1:
+		format = SNDRV_PCM_FORMAT_S24_LE;
+		break;
+	case 2:
+		format = SNDRV_PCM_FORMAT_S24_3LE;
+		break;
+	case 3:
+		format = SNDRV_PCM_FORMAT_S32_LE;
+		break;
+	default:
+		format = SNDRV_PCM_FORMAT_S16_LE;
+		break;
+	}
+	return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+	int value;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		value = 0;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		value = 1;
+		break;
+	case SNDRV_PCM_FORMAT_S24_3LE:
+		value = 2;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		value = 3;
+		break;
+	default:
+		value = 0;
+		break;
+	}
+	return value;
+}
+
 static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
 				   struct snd_ctl_elem_value *ucontrol)
 {
@@ -2395,6 +2445,78 @@
 	return 1;
 }
 
+static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		idx, mi2s_rx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_rx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_rx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	ucontrol->value.enumerated.item[0] =
+		mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		idx, mi2s_tx_cfg[idx].bit_format,
+		ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
+static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	int idx = mi2s_get_port_idx(kcontrol);
+
+	if (idx < 0)
+		return idx;
+
+	mi2s_tx_cfg[idx].bit_format =
+		mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+	pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+		  idx, mi2s_tx_cfg[idx].bit_format,
+		  ucontrol->value.enumerated.item[0]);
+
+	return 0;
+}
+
 static int msm_hifi_ctrl(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
@@ -2647,6 +2769,22 @@
 			msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
 	SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs,
 			msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put),
+	SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format,
+			msm_mi2s_rx_format_get, msm_mi2s_rx_format_put),
+	SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format,
+			msm_mi2s_tx_format_get, msm_mi2s_tx_format_put),
 	SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get,
 			msm_hifi_put),
 };
@@ -3111,48 +3249,64 @@
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[PRIM_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[PRIM_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_PRI_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[PRIM_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[PRIM_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[SEC_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[SEC_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[SEC_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[SEC_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[TERT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[TERT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[TERT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[TERT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_rx_cfg[QUAT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_rx_cfg[QUAT_MI2S].channels;
 		break;
 
 	case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
+		param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+			mi2s_tx_cfg[QUAT_MI2S].bit_format);
 		rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
 		channels->min = channels->max =
 			mi2s_tx_cfg[QUAT_MI2S].channels;
@@ -3980,6 +4134,7 @@
 	u32 bit_per_sample;
 
 	switch (bit_format) {
+	case SNDRV_PCM_FORMAT_S32_LE:
 	case SNDRV_PCM_FORMAT_S24_3LE:
 	case SNDRV_PCM_FORMAT_S24_LE:
 		bit_per_sample = 32;
@@ -4059,6 +4214,13 @@
 		ret = -EINVAL;
 		goto err;
 	}
+
+	if (pinctrl_info->pinctrl == NULL) {
+		pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
 	curr_state = pinctrl_info->curr_state;
 	pinctrl_info->curr_state = new_state;
 	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
@@ -4327,6 +4489,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -4341,11 +4504,10 @@
 		goto done;
 	}
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
-		if (ret) {
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret_pinctrl) {
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
-			goto done;
+				__func__, ret_pinctrl);
 		}
 	}
 
@@ -4404,6 +4566,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -4424,10 +4587,10 @@
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
-		if (ret)
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret_pinctrl)
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
+				__func__, ret_pinctrl);
 	}
 }
 
@@ -7266,10 +7429,12 @@
 	struct msm_asoc_mach_data *pdata =
 				snd_soc_card_get_drvdata(card);
 
-	gpio_free(pdata->us_euro_gpio);
+	if (gpio_is_valid(pdata->us_euro_gpio))
+		gpio_free(pdata->us_euro_gpio);
 	i2s_auxpcm_deinit();
 
 	snd_soc_unregister_card(card);
+	audio_notifier_deregister("msm8998");
 	return 0;
 }
 
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index d4db55f..36382ba 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,5 +1,5 @@
-snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o \
-			msm-compress-q6-v2.o msm-compr-q6-v2.o \
+snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o \
+			msm-pcm-routing-v2.o msm-compress-q6-v2.o \
 			msm-pcm-afe-v2.o msm-pcm-voip-v2.o \
 			msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
 			msm-lsm-client.o msm-pcm-host-voice-v2.o \
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index 5d4a0ba..820aa1b 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -646,7 +646,9 @@
 	return cal_block;
 err:
 	kfree(cal_block->cal_info);
+	cal_block->cal_info = NULL;
 	kfree(cal_block->client_info);
+	cal_block->client_info = NULL;
 	kfree(cal_block);
 	cal_block = NULL;
 	return cal_block;
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
deleted file mode 100644
index 449325c..0000000
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.c
+++ /dev/null
@@ -1,1714 +0,0 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/time.h>
-#include <linux/wait.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <sound/core.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/pcm.h>
-#include <sound/initval.h>
-#include <sound/control.h>
-#include <sound/q6asm-v2.h>
-#include <sound/pcm_params.h>
-#include <asm/dma.h>
-#include <linux/dma-mapping.h>
-#include <linux/msm_audio_ion.h>
-
-#include <sound/timer.h>
-
-#include "msm-compr-q6-v2.h"
-#include "msm-pcm-routing-v2.h"
-#include <sound/tlv.h>
-
-#define COMPRE_CAPTURE_NUM_PERIODS	16
-/* Allocate the worst case frame size for compressed audio */
-#define COMPRE_CAPTURE_HEADER_SIZE	(sizeof(struct snd_compr_audio_info))
-/* Changing period size to 4032. 4032 will make sure COMPRE_CAPTURE_PERIOD_SIZE
- * is 4096 with meta data size of 64 and MAX_NUM_FRAMES_PER_BUFFER 1
- */
-#define COMPRE_CAPTURE_MAX_FRAME_SIZE	(4032)
-#define COMPRE_CAPTURE_PERIOD_SIZE	((COMPRE_CAPTURE_MAX_FRAME_SIZE + \
-					  COMPRE_CAPTURE_HEADER_SIZE) * \
-					  MAX_NUM_FRAMES_PER_BUFFER)
-#define COMPRE_OUTPUT_METADATA_SIZE	(sizeof(struct output_meta_data_st))
-#define COMPRESSED_LR_VOL_MAX_STEPS	0x20002000
-
-#define MAX_AC3_PARAM_SIZE		(18*2*sizeof(int))
-#define AMR_WB_BAND_MODE 8
-#define AMR_WB_DTX_MODE 0
-
-
-const DECLARE_TLV_DB_LINEAR(compr_rx_vol_gain, 0,
-			    COMPRESSED_LR_VOL_MAX_STEPS);
-
-static struct audio_locks the_locks;
-
-static struct snd_pcm_hardware msm_compr_hardware_capture = {
-	.info =		 (SNDRV_PCM_INFO_MMAP |
-				SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				SNDRV_PCM_INFO_MMAP_VALID |
-				SNDRV_PCM_INFO_INTERLEAVED |
-				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =	      SNDRV_PCM_FMTBIT_S16_LE,
-	.rates =		SNDRV_PCM_RATE_8000_48000,
-	.rate_min =	     8000,
-	.rate_max =	     48000,
-	.channels_min =	 1,
-	.channels_max =	 8,
-	.buffer_bytes_max =
-		COMPRE_CAPTURE_PERIOD_SIZE * COMPRE_CAPTURE_NUM_PERIODS,
-	.period_bytes_min =	COMPRE_CAPTURE_PERIOD_SIZE,
-	.period_bytes_max = COMPRE_CAPTURE_PERIOD_SIZE,
-	.periods_min =	  COMPRE_CAPTURE_NUM_PERIODS,
-	.periods_max =	  COMPRE_CAPTURE_NUM_PERIODS,
-	.fifo_size =	    0,
-};
-
-static struct snd_pcm_hardware msm_compr_hardware_playback = {
-	.info =		 (SNDRV_PCM_INFO_MMAP |
-				SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				SNDRV_PCM_INFO_MMAP_VALID |
-				SNDRV_PCM_INFO_INTERLEAVED |
-				SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
-	.formats =	      SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
-	.rates =		SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
-	.rate_min =	     8000,
-	.rate_max =	     48000,
-	.channels_min =	 1,
-	.channels_max =	 8,
-	.buffer_bytes_max =     1024 * 1024,
-	.period_bytes_min =	128 * 1024,
-	.period_bytes_max =     256 * 1024,
-	.periods_min =	  4,
-	.periods_max =	  8,
-	.fifo_size =	    0,
-};
-
-/* Conventional and unconventional sample rate supported */
-static unsigned int supported_sample_rates[] = {
-	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
-};
-
-/* Add supported codecs for compress capture path */
-static uint32_t supported_compr_capture_codecs[] = {
-	SND_AUDIOCODEC_AMRWB
-};
-
-static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
-	.count = ARRAY_SIZE(supported_sample_rates),
-	.list = supported_sample_rates,
-	.mask = 0,
-};
-
-static bool msm_compr_capture_codecs(uint32_t req_codec)
-{
-	int i;
-
-	pr_debug("%s req_codec:%d\n", __func__, req_codec);
-	if (req_codec == 0)
-		return false;
-	for (i = 0; i < ARRAY_SIZE(supported_compr_capture_codecs); i++) {
-		if (req_codec == supported_compr_capture_codecs[i])
-			return true;
-	}
-	return false;
-}
-
-static void compr_event_handler(uint32_t opcode,
-		uint32_t token, uint32_t *payload, void *priv)
-{
-	struct compr_audio *compr = priv;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_pcm_substream *substream = prtd->substream;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct audio_aio_write_param param;
-	struct audio_aio_read_param read_param;
-	struct audio_buffer *buf = NULL;
-	phys_addr_t temp;
-	struct output_meta_data_st output_meta_data;
-	uint32_t *ptrmem = (uint32_t *)payload;
-	int i = 0;
-	int time_stamp_flag = 0;
-	int buffer_length = 0;
-	int stop_playback = 0;
-
-	pr_debug("%s opcode =%08x\n", __func__, opcode);
-	switch (opcode) {
-	case ASM_DATA_EVENT_WRITE_DONE_V2: {
-		uint32_t *ptrmem = (uint32_t *)&param;
-
-		pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
-		pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
-		prtd->pcm_irq_pos += prtd->pcm_count;
-		if (atomic_read(&prtd->start))
-			snd_pcm_period_elapsed(substream);
-		else
-			if (substream->timer_running)
-				snd_timer_interrupt(substream->timer, 1);
-		atomic_inc(&prtd->out_count);
-		wake_up(&the_locks.write_wait);
-		if (!atomic_read(&prtd->start)) {
-			atomic_set(&prtd->pending_buffer, 1);
-			break;
-		}
-		atomic_set(&prtd->pending_buffer, 0);
-
-		/*
-		 * check for underrun
-		 */
-		snd_pcm_stream_lock_irq(substream);
-		if (runtime->status->hw_ptr >= runtime->control->appl_ptr) {
-			runtime->render_flag |= SNDRV_RENDER_STOPPED;
-			stop_playback = 1;
-		}
-		snd_pcm_stream_unlock_irq(substream);
-
-		if (stop_playback) {
-			pr_err("underrun! render stopped\n");
-			break;
-		}
-
-		buf = prtd->audio_client->port[IN].buf;
-		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-		temp = buf[0].phys + (prtd->out_head * prtd->pcm_count);
-		pr_debug("%s:writing buffer[%d] from 0x%pK\n",
-			__func__, prtd->out_head, &temp);
-
-		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-			time_stamp_flag = SET_TIMESTAMP;
-		else
-			time_stamp_flag = NO_TIMESTAMP;
-		memcpy(&output_meta_data, (char *)(buf->data +
-			prtd->out_head * prtd->pcm_count),
-			COMPRE_OUTPUT_METADATA_SIZE);
-
-		buffer_length = output_meta_data.frame_size;
-		pr_debug("meta_data_length: %d, frame_length: %d\n",
-			 output_meta_data.meta_data_length,
-			 output_meta_data.frame_size);
-		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-			 output_meta_data.timestamp_msw,
-			 output_meta_data.timestamp_lsw);
-		if (buffer_length == 0) {
-			pr_debug("Received a zero length buffer-break out");
-			break;
-		}
-		param.paddr = temp + output_meta_data.meta_data_length;
-		param.len = buffer_length;
-		param.msw_ts = output_meta_data.timestamp_msw;
-		param.lsw_ts = output_meta_data.timestamp_lsw;
-		param.flags = time_stamp_flag;
-		param.uid = prtd->session_id;
-		for (i = 0; i < sizeof(struct audio_aio_write_param)/4;
-					i++, ++ptrmem)
-			pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
-		if (q6asm_async_write(prtd->audio_client,
-					&param) < 0)
-			pr_err("%s:q6asm_async_write failed\n",
-				__func__);
-		else
-			prtd->out_head =
-				(prtd->out_head + 1) & (runtime->periods - 1);
-		break;
-	}
-	case ASM_DATA_EVENT_RENDERED_EOS:
-		pr_debug("ASM_DATA_CMDRSP_EOS\n");
-		if (atomic_read(&prtd->eos)) {
-			pr_debug("ASM_DATA_CMDRSP_EOS wake up\n");
-			prtd->cmd_ack = 1;
-			wake_up(&the_locks.eos_wait);
-			atomic_set(&prtd->eos, 0);
-		}
-		break;
-	case ASM_DATA_EVENT_READ_DONE_V2: {
-		pr_debug("ASM_DATA_EVENT_READ_DONE\n");
-		pr_debug("buf = %pK, data = 0x%X, *data = %pK,\n"
-			 "prtd->pcm_irq_pos = %d\n",
-				prtd->audio_client->port[OUT].buf,
-			 *(uint32_t *)prtd->audio_client->port[OUT].buf->data,
-				prtd->audio_client->port[OUT].buf->data,
-				prtd->pcm_irq_pos);
-
-		memcpy(prtd->audio_client->port[OUT].buf->data +
-			   prtd->pcm_irq_pos, (ptrmem + READDONE_IDX_SIZE),
-			   COMPRE_CAPTURE_HEADER_SIZE);
-		pr_debug("buf = %pK, updated data = 0x%X, *data = %pK\n",
-				prtd->audio_client->port[OUT].buf,
-			*(uint32_t *)(prtd->audio_client->port[OUT].buf->data +
-				prtd->pcm_irq_pos),
-				prtd->audio_client->port[OUT].buf->data);
-		if (!atomic_read(&prtd->start))
-			break;
-		pr_debug("frame size=%d, buffer = 0x%X\n",
-				ptrmem[READDONE_IDX_SIZE],
-				ptrmem[READDONE_IDX_BUFADD_LSW]);
-		if (ptrmem[READDONE_IDX_SIZE] > COMPRE_CAPTURE_MAX_FRAME_SIZE) {
-			pr_err("Frame length exceeded the max length");
-			break;
-		}
-		buf = prtd->audio_client->port[OUT].buf;
-
-		pr_debug("pcm_irq_pos=%d, buf[0].phys = 0x%pK\n",
-				prtd->pcm_irq_pos, &buf[0].phys);
-		read_param.len = prtd->pcm_count - COMPRE_CAPTURE_HEADER_SIZE;
-		read_param.paddr = buf[0].phys +
-			prtd->pcm_irq_pos + COMPRE_CAPTURE_HEADER_SIZE;
-		prtd->pcm_irq_pos += prtd->pcm_count;
-
-		if (atomic_read(&prtd->start))
-			snd_pcm_period_elapsed(substream);
-
-		q6asm_async_read(prtd->audio_client, &read_param);
-		break;
-	}
-	case APR_BASIC_RSP_RESULT: {
-		switch (payload[0]) {
-		case ASM_SESSION_CMD_RUN_V2: {
-			if (substream->stream
-				!= SNDRV_PCM_STREAM_PLAYBACK) {
-				atomic_set(&prtd->start, 1);
-				break;
-			}
-			if (!atomic_read(&prtd->pending_buffer))
-				break;
-			pr_debug("%s: writing %d bytes of buffer[%d] to dsp\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-			buf = prtd->audio_client->port[IN].buf;
-			pr_debug("%s: writing buffer[%d] from 0x%pK head %d count %d\n",
-				__func__, prtd->out_head, &buf[0].phys,
-				prtd->pcm_count, prtd->out_head);
-			if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-				time_stamp_flag = SET_TIMESTAMP;
-			else
-				time_stamp_flag = NO_TIMESTAMP;
-			memcpy(&output_meta_data, (char *)(buf->data +
-				prtd->out_head * prtd->pcm_count),
-				COMPRE_OUTPUT_METADATA_SIZE);
-			buffer_length = output_meta_data.frame_size;
-			pr_debug("meta_data_length: %d, frame_length: %d\n",
-				 output_meta_data.meta_data_length,
-				 output_meta_data.frame_size);
-			pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-				 output_meta_data.timestamp_msw,
-				 output_meta_data.timestamp_lsw);
-			param.paddr = buf[prtd->out_head].phys
-					+ output_meta_data.meta_data_length;
-			param.len = buffer_length;
-			param.msw_ts = output_meta_data.timestamp_msw;
-			param.lsw_ts = output_meta_data.timestamp_lsw;
-			param.flags = time_stamp_flag;
-			param.uid = prtd->session_id;
-			param.metadata_len = COMPRE_OUTPUT_METADATA_SIZE;
-			if (q6asm_async_write(prtd->audio_client,
-						&param) < 0)
-				pr_err("%s:q6asm_async_write failed\n",
-					__func__);
-			else
-				prtd->out_head =
-					(prtd->out_head + 1)
-					& (runtime->periods - 1);
-			atomic_set(&prtd->pending_buffer, 0);
-		}
-			break;
-		case ASM_STREAM_CMD_FLUSH:
-			pr_debug("ASM_STREAM_CMD_FLUSH\n");
-			prtd->cmd_ack = 1;
-			wake_up(&the_locks.flush_wait);
-			break;
-		default:
-			break;
-		}
-		break;
-	}
-	default:
-		pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
-		break;
-	}
-}
-
-static int msm_compr_send_ddp_cfg(struct audio_client *ac,
-					struct snd_dec_ddp *ddp)
-{
-	int i, rc;
-
-	pr_debug("%s\n", __func__);
-
-	if (ddp->params_length / 2 > SND_DEC_DDP_MAX_PARAMS) {
-		pr_err("%s: Invalid number of params %u, max allowed %u\n",
-			__func__, ddp->params_length / 2,
-			SND_DEC_DDP_MAX_PARAMS);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < ddp->params_length/2; i++) {
-		rc = q6asm_ds1_set_endp_params(ac, ddp->params_id[i],
-						ddp->params_value[i]);
-		if (rc) {
-			pr_err("sending params_id: %d failed\n",
-				ddp->params_id[i]);
-			return rc;
-		}
-	}
-	return 0;
-}
-
-static int msm_compr_playback_prepare(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_pcm_hw_params *params;
-	struct asm_aac_cfg aac_cfg;
-	uint16_t bits_per_sample = 16;
-	int ret;
-
-	struct asm_softpause_params softpause = {
-		.enable = SOFT_PAUSE_ENABLE,
-		.period = SOFT_PAUSE_PERIOD,
-		.step = SOFT_PAUSE_STEP,
-		.rampingcurve = SOFT_PAUSE_CURVE_LINEAR,
-	};
-	struct asm_softvolume_params softvol = {
-		.period = SOFT_VOLUME_PERIOD,
-		.step = SOFT_VOLUME_STEP,
-		.rampingcurve = SOFT_VOLUME_CURVE_LINEAR,
-	};
-
-	pr_debug("%s\n", __func__);
-
-	params = &soc_prtd->dpcm[substream->stream].hw_params;
-	if (runtime->format == SNDRV_PCM_FORMAT_S24_LE)
-		bits_per_sample = 24;
-
-	ret = q6asm_open_write_v2(prtd->audio_client,
-			compr->codec, bits_per_sample);
-	if (ret < 0) {
-		pr_err("%s: Session out open failed\n",
-				__func__);
-		return -ENOMEM;
-	}
-	msm_pcm_routing_reg_phy_stream(
-			soc_prtd->dai_link->id,
-			prtd->audio_client->perf_mode,
-			prtd->session_id,
-			substream->stream);
-	/*
-	 * the number of channels are required to call volume api
-	 * accoridngly. So, get channels from hw params
-	 */
-	if ((params_channels(params) > 0) &&
-			(params_periods(params) <= runtime->hw.channels_max))
-		prtd->channel_mode = params_channels(params);
-
-	ret = q6asm_set_softpause(prtd->audio_client, &softpause);
-	if (ret < 0)
-		pr_err("%s: Send SoftPause Param failed ret=%d\n",
-				__func__, ret);
-	ret = q6asm_set_softvolume(prtd->audio_client, &softvol);
-	if (ret < 0)
-		pr_err("%s: Send SoftVolume Param failed ret=%d\n",
-				__func__, ret);
-
-	ret = q6asm_set_io_mode(prtd->audio_client,
-			(COMPRESSED_IO | ASYNC_IO_MODE));
-	if (ret < 0) {
-		pr_err("%s: Set IO mode failed\n", __func__);
-		return -ENOMEM;
-	}
-
-	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
-	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
-	prtd->pcm_irq_pos = 0;
-	/* rate and channels are sent to audio driver */
-	prtd->samp_rate = runtime->rate;
-	prtd->channel_mode = runtime->channels;
-	prtd->out_head = 0;
-	atomic_set(&prtd->out_count, runtime->periods);
-
-	if (prtd->enabled)
-		return 0;
-
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_MP3:
-		/* No media format block for mp3 */
-		break;
-	case SND_AUDIOCODEC_AAC:
-		pr_debug("%s: SND_AUDIOCODEC_AAC\n", __func__);
-		memset(&aac_cfg, 0x0, sizeof(struct asm_aac_cfg));
-		aac_cfg.aot = AAC_ENC_MODE_EAAC_P;
-		aac_cfg.format = 0x03;
-		aac_cfg.ch_cfg = runtime->channels;
-		aac_cfg.sample_rate =  runtime->rate;
-		ret = q6asm_media_format_block_aac(prtd->audio_client,
-					&aac_cfg);
-		if (ret < 0)
-			pr_err("%s: CMD Format block failed\n", __func__);
-		break;
-	case SND_AUDIOCODEC_AC3: {
-		struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-		pr_debug("%s: SND_AUDIOCODEC_AC3\n", __func__);
-		ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
-		if (ret < 0)
-			pr_err("%s: DDP CMD CFG failed\n", __func__);
-		break;
-	}
-	case SND_AUDIOCODEC_EAC3: {
-		struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-		pr_debug("%s: SND_AUDIOCODEC_EAC3\n", __func__);
-		ret = msm_compr_send_ddp_cfg(prtd->audio_client, ddp);
-		if (ret < 0)
-			pr_err("%s: DDP CMD CFG failed\n", __func__);
-		break;
-	}
-	default:
-		return -EINVAL;
-	}
-
-	prtd->enabled = 1;
-	prtd->cmd_ack = 0;
-	prtd->cmd_interrupt = 0;
-
-	return 0;
-}
-
-static int msm_compr_capture_prepare(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct audio_buffer *buf = prtd->audio_client->port[OUT].buf;
-	struct snd_codec *codec = &compr->info.codec_param.codec;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct audio_aio_read_param read_param;
-	uint16_t bits_per_sample = 16;
-	int ret = 0;
-	int i;
-
-	prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
-	prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
-	prtd->pcm_irq_pos = 0;
-
-	if (runtime->format == SNDRV_PCM_FORMAT_S24_LE)
-		bits_per_sample = 24;
-
-	if (!msm_compr_capture_codecs(
-				compr->info.codec_param.codec.id)) {
-		/*
-		 * request codec invalid or not supported,
-		 * use default compress format
-		 */
-		compr->info.codec_param.codec.id =
-			SND_AUDIOCODEC_AMRWB;
-	}
-	switch (compr->info.codec_param.codec.id) {
-	case SND_AUDIOCODEC_AMRWB:
-		pr_debug("q6asm_open_read(FORMAT_AMRWB)\n");
-		ret = q6asm_open_read(prtd->audio_client,
-				FORMAT_AMRWB);
-		if (ret < 0) {
-			pr_err("%s: compressed Session out open failed\n",
-					__func__);
-			return -ENOMEM;
-		}
-		pr_debug("msm_pcm_routing_reg_phy_stream\n");
-		msm_pcm_routing_reg_phy_stream(
-				soc_prtd->dai_link->id,
-				prtd->audio_client->perf_mode,
-				prtd->session_id, substream->stream);
-		break;
-	default:
-		pr_debug("q6asm_open_read_compressed(COMPRESSED_META_DATA_MODE)\n");
-		/*
-		 * ret = q6asm_open_read_compressed(prtd->audio_client,
-		 * MAX_NUM_FRAMES_PER_BUFFER,
-		 * COMPRESSED_META_DATA_MODE);
-		 */
-			ret = -EINVAL;
-			break;
-	}
-
-	if (ret < 0) {
-		pr_err("%s: compressed Session out open failed\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	ret = q6asm_set_io_mode(prtd->audio_client,
-		(COMPRESSED_IO | ASYNC_IO_MODE));
-		if (ret < 0) {
-			pr_err("%s: Set IO mode failed\n", __func__);
-				return -ENOMEM;
-		}
-
-	if (!msm_compr_capture_codecs(codec->id)) {
-		/*
-		 * request codec invalid or not supported,
-		 * use default compress format
-		 */
-		codec->id = SND_AUDIOCODEC_AMRWB;
-	}
-	/* rate and channels are sent to audio driver */
-	prtd->samp_rate = runtime->rate;
-	prtd->channel_mode = runtime->channels;
-
-	if (prtd->enabled)
-		return ret;
-	read_param.len = prtd->pcm_count;
-
-	switch (codec->id) {
-	case SND_AUDIOCODEC_AMRWB:
-		pr_debug("SND_AUDIOCODEC_AMRWB\n");
-		ret = q6asm_enc_cfg_blk_amrwb(prtd->audio_client,
-			MAX_NUM_FRAMES_PER_BUFFER,
-			/*
-			 * use fixed band mode and dtx mode
-			 * band mode - 23.85 kbps
-			 */
-			AMR_WB_BAND_MODE,
-			/* dtx mode - disable */
-			AMR_WB_DTX_MODE);
-		if (ret < 0)
-			pr_err("%s: CMD Format block failed: %d\n",
-				__func__, ret);
-		break;
-	default:
-		pr_debug("No config for codec %d\n", codec->id);
-	}
-	pr_debug("%s: Samp_rate = %d, Channel = %d, pcm_size = %d,\n"
-			 "pcm_count = %d, periods = %d\n",
-			 __func__, prtd->samp_rate, prtd->channel_mode,
-			 prtd->pcm_size, prtd->pcm_count, runtime->periods);
-
-	for (i = 0; i < runtime->periods; i++) {
-		read_param.uid = i;
-		switch (codec->id) {
-		case SND_AUDIOCODEC_AMRWB:
-			read_param.len = prtd->pcm_count
-					- COMPRE_CAPTURE_HEADER_SIZE;
-			read_param.paddr = buf[i].phys
-					+ COMPRE_CAPTURE_HEADER_SIZE;
-			pr_debug("Push buffer [%d] to DSP, paddr: %pK, vaddr: %pK\n",
-					i, &read_param.paddr,
-					buf[i].data);
-			q6asm_async_read(prtd->audio_client, &read_param);
-			break;
-		default:
-			read_param.paddr = buf[i].phys;
-			/* q6asm_async_read_compressed(prtd->audio_client,
-			 * &read_param);
-			 */
-			pr_debug("%s: To add support for read compressed\n",
-								__func__);
-			ret = -EINVAL;
-			break;
-		}
-	}
-	prtd->periods = runtime->periods;
-
-	prtd->enabled = 1;
-
-	return ret;
-}
-
-static int msm_compr_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	int ret = 0;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-
-	pr_debug("%s\n", __func__);
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-		prtd->pcm_irq_pos = 0;
-
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-			if (!msm_compr_capture_codecs(
-				compr->info.codec_param.codec.id)) {
-				/*
-				 * request codec invalid or not supported,
-				 * use default compress format
-				 */
-				compr->info.codec_param.codec.id =
-				SND_AUDIOCODEC_AMRWB;
-			}
-			switch (compr->info.codec_param.codec.id) {
-			case SND_AUDIOCODEC_AMRWB:
-				break;
-			default:
-				msm_pcm_routing_reg_psthr_stream(
-					soc_prtd->dai_link->id,
-					prtd->session_id, substream->stream);
-				break;
-			}
-		}
-		atomic_set(&prtd->pending_buffer, 1);
-		/* fallthrough */
-	case SNDRV_PCM_TRIGGER_RESUME:
-	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-		pr_debug("%s: Trigger start\n", __func__);
-		q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
-		atomic_set(&prtd->start, 1);
-		break;
-	case SNDRV_PCM_TRIGGER_STOP:
-		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
-		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-			switch (compr->info.codec_param.codec.id) {
-			case SND_AUDIOCODEC_AMRWB:
-				break;
-			default:
-				msm_pcm_routing_reg_psthr_stream(
-					soc_prtd->dai_link->id,
-					prtd->session_id, substream->stream);
-				break;
-			}
-		}
-		atomic_set(&prtd->start, 0);
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		break;
-	case SNDRV_PCM_TRIGGER_SUSPEND:
-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-		pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
-		q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
-		atomic_set(&prtd->start, 0);
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		break;
-	default:
-		ret = -EINVAL;
-		break;
-	}
-
-	return ret;
-}
-
-static void populate_codec_list(struct compr_audio *compr,
-		struct snd_pcm_runtime *runtime)
-{
-	pr_debug("%s\n", __func__);
-	/* MP3 Block */
-	compr->info.compr_cap.num_codecs = 5;
-	compr->info.compr_cap.min_fragment_size = runtime->hw.period_bytes_min;
-	compr->info.compr_cap.max_fragment_size = runtime->hw.period_bytes_max;
-	compr->info.compr_cap.min_fragments = runtime->hw.periods_min;
-	compr->info.compr_cap.max_fragments = runtime->hw.periods_max;
-	compr->info.compr_cap.codecs[0] = SND_AUDIOCODEC_MP3;
-	compr->info.compr_cap.codecs[1] = SND_AUDIOCODEC_AAC;
-	compr->info.compr_cap.codecs[2] = SND_AUDIOCODEC_AC3;
-	compr->info.compr_cap.codecs[3] = SND_AUDIOCODEC_EAC3;
-	compr->info.compr_cap.codecs[4] = SND_AUDIOCODEC_AMRWB;
-	/* Add new codecs here */
-}
-
-static int msm_compr_open(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr;
-	struct msm_audio *prtd;
-	int ret = 0;
-
-	pr_debug("%s\n", __func__);
-	compr = kzalloc(sizeof(struct compr_audio), GFP_KERNEL);
-	if (compr == NULL) {
-		pr_err("Failed to allocate memory for msm_audio\n");
-		return -ENOMEM;
-	}
-	prtd = &compr->prtd;
-	prtd->substream = substream;
-	runtime->render_flag = SNDRV_DMA_MODE;
-	prtd->audio_client = q6asm_audio_client_alloc(
-				(app_cb)compr_event_handler, compr);
-	if (!prtd->audio_client) {
-		pr_info("%s: Could not allocate memory\n", __func__);
-		kfree(prtd);
-		return -ENOMEM;
-	}
-
-	prtd->audio_client->perf_mode = false;
-	pr_info("%s: session ID %d\n", __func__, prtd->audio_client->session);
-
-	prtd->session_id = prtd->audio_client->session;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		runtime->hw = msm_compr_hardware_playback;
-		prtd->cmd_ack = 1;
-	} else {
-		runtime->hw = msm_compr_hardware_capture;
-	}
-
-
-	ret = snd_pcm_hw_constraint_list(runtime, 0,
-			SNDRV_PCM_HW_PARAM_RATE,
-			&constraints_sample_rates);
-	if (ret < 0)
-		pr_info("snd_pcm_hw_constraint_list failed\n");
-	/* Ensure that buffer size is a multiple of period size */
-	ret = snd_pcm_hw_constraint_integer(runtime,
-			    SNDRV_PCM_HW_PARAM_PERIODS);
-	if (ret < 0)
-		pr_info("snd_pcm_hw_constraint_integer failed\n");
-
-	prtd->dsp_cnt = 0;
-	atomic_set(&prtd->pending_buffer, 1);
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		compr->codec = FORMAT_MP3;
-	populate_codec_list(compr, runtime);
-	runtime->private_data = compr;
-	atomic_set(&prtd->eos, 0);
-	return 0;
-}
-
-static int compressed_set_volume(struct msm_audio *prtd, uint32_t volume)
-{
-	int rc = 0;
-	int avg_vol = 0;
-	int lgain = (volume >> 16) & 0xFFFF;
-	int rgain = volume & 0xFFFF;
-
-	if (prtd && prtd->audio_client) {
-		pr_debug("%s: channels %d volume 0x%x\n", __func__,
-			prtd->channel_mode, volume);
-		if ((prtd->channel_mode == 2) &&
-			(lgain != rgain)) {
-			pr_debug("%s: call q6asm_set_lrgain\n", __func__);
-			rc = q6asm_set_lrgain(prtd->audio_client, lgain, rgain);
-		} else {
-			avg_vol = (lgain + rgain)/2;
-			pr_debug("%s: call q6asm_set_volume\n", __func__);
-			rc = q6asm_set_volume(prtd->audio_client, avg_vol);
-		}
-		if (rc < 0) {
-			pr_err("%s: Send Volume command failed rc=%d\n",
-				__func__, rc);
-		}
-	}
-	return rc;
-}
-
-static int msm_compr_playback_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	int dir = 0;
-
-	pr_debug("%s\n", __func__);
-
-	dir = IN;
-	atomic_set(&prtd->pending_buffer, 0);
-
-	prtd->pcm_irq_pos = 0;
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
-				prtd->audio_client);
-		msm_pcm_routing_dereg_phy_stream(
-			soc_prtd->dai_link->id,
-			SNDRV_PCM_STREAM_PLAYBACK);
-	q6asm_audio_client_free(prtd->audio_client);
-	kfree(prtd);
-	return 0;
-}
-
-static int msm_compr_capture_close(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	int dir = OUT;
-
-	pr_debug("%s\n", __func__);
-	atomic_set(&prtd->pending_buffer, 0);
-	q6asm_cmd(prtd->audio_client, CMD_CLOSE);
-	q6asm_audio_client_buf_free_contiguous(dir,
-				prtd->audio_client);
-	msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->id,
-				SNDRV_PCM_STREAM_CAPTURE);
-	q6asm_audio_client_free(prtd->audio_client);
-	kfree(prtd);
-	return 0;
-}
-
-static int msm_compr_close(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		ret = msm_compr_playback_close(substream);
-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = msm_compr_capture_close(substream);
-	return ret;
-}
-
-static int msm_compr_prepare(struct snd_pcm_substream *substream)
-{
-	int ret = 0;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		ret = msm_compr_playback_prepare(substream);
-	else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		ret = msm_compr_capture_prepare(substream);
-	return ret;
-}
-
-static snd_pcm_uframes_t msm_compr_pointer(struct snd_pcm_substream *substream)
-{
-
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-
-	if (prtd->pcm_irq_pos >= prtd->pcm_size)
-		prtd->pcm_irq_pos = 0;
-
-	pr_debug("%s: pcm_irq_pos = %d, pcm_size = %d, sample_bits = %d,\n"
-			 "frame_bits = %d\n", __func__, prtd->pcm_irq_pos,
-			 prtd->pcm_size, runtime->sample_bits,
-			 runtime->frame_bits);
-	return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
-}
-
-static int msm_compr_mmap(struct snd_pcm_substream *substream,
-				struct vm_area_struct *vma)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct msm_audio *prtd = runtime->private_data;
-	struct audio_client *ac = prtd->audio_client;
-	struct audio_port_data *apd = ac->port;
-	struct audio_buffer *ab;
-	int dir = -1;
-
-	prtd->mmap_flag = 1;
-	runtime->render_flag = SNDRV_NON_DMA_MODE;
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dir = IN;
-	else
-		dir = OUT;
-	ab = &(apd[dir].buf[0]);
-
-	return msm_audio_ion_mmap(ab, vma);
-}
-
-static int msm_compr_hw_params(struct snd_pcm_substream *substream,
-				struct snd_pcm_hw_params *params)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
-	struct audio_buffer *buf;
-	int dir, ret;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dir = IN;
-	else
-		dir = OUT;
-	/* Modifying kernel hardware params based on userspace config */
-	if (params_periods(params) > 0 &&
-		(params_periods(params) != runtime->hw.periods_max)) {
-		runtime->hw.periods_max = params_periods(params);
-	}
-	if (params_period_bytes(params) > 0 &&
-		(params_period_bytes(params) != runtime->hw.period_bytes_min)) {
-		runtime->hw.period_bytes_min = params_period_bytes(params);
-	}
-	runtime->hw.buffer_bytes_max =
-			runtime->hw.period_bytes_min * runtime->hw.periods_max;
-	pr_debug("allocate %zd buffers each of size %d\n",
-		runtime->hw.period_bytes_min,
-		runtime->hw.periods_max);
-	ret = q6asm_audio_client_buf_alloc_contiguous(dir,
-			prtd->audio_client,
-			runtime->hw.period_bytes_min,
-			runtime->hw.periods_max);
-	if (ret < 0) {
-		pr_err("Audio Start: Buffer Allocation failed rc = %d\n",
-						ret);
-		return -ENOMEM;
-	}
-	buf = prtd->audio_client->port[dir].buf;
-
-	dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
-	dma_buf->dev.dev = substream->pcm->card->dev;
-	dma_buf->private_data = NULL;
-	dma_buf->area = buf[0].data;
-	dma_buf->addr =  buf[0].phys;
-	dma_buf->bytes = runtime->hw.buffer_bytes_max;
-
-	pr_debug("%s: buf[%pK]dma_buf->area[%pK]dma_buf->addr[%pK]\n"
-		 "dma_buf->bytes[%zd]\n", __func__,
-		 (void *)buf, (void *)dma_buf->area,
-		 &dma_buf->addr, dma_buf->bytes);
-	if (!dma_buf->area)
-		return -ENOMEM;
-
-	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-	return 0;
-}
-
-static int msm_compr_ioctl_shared(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int rc = 0;
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	uint64_t timestamp;
-	uint64_t temp;
-
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP: {
-		struct snd_compr_tstamp *tstamp;
-
-		pr_debug("SNDRV_COMPRESS_TSTAMP\n");
-		tstamp = arg;
-		memset(tstamp, 0x0, sizeof(*tstamp));
-		rc = q6asm_get_session_time(prtd->audio_client, &timestamp);
-		if (rc < 0) {
-			pr_err("%s: Get Session Time return value =%lld\n",
-				__func__, timestamp);
-			return -EAGAIN;
-		}
-		temp = (timestamp * 2 * runtime->channels);
-		temp = temp * (runtime->rate/1000);
-		temp = div_u64(temp, 1000);
-		tstamp->sampling_rate = runtime->rate;
-		tstamp->timestamp = timestamp;
-		pr_debug("%s: bytes_consumed:,timestamp = %lld,\n",
-						__func__,
-			tstamp->timestamp);
-		return 0;
-	}
-	case SNDRV_COMPRESS_GET_CAPS: {
-		struct snd_compr_caps *caps;
-
-		caps = arg;
-		memset(caps, 0, sizeof(*caps));
-		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
-		memcpy(caps, &compr->info.compr_cap, sizeof(*caps));
-		return 0;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS:
-		pr_debug("SNDRV_COMPRESS_SET_PARAMS:\n");
-		memcpy(&compr->info.codec_param, (void *) arg,
-			sizeof(struct snd_compr_params));
-		switch (compr->info.codec_param.codec.id) {
-		case SND_AUDIOCODEC_MP3:
-			/* For MP3 we dont need any other parameter */
-			pr_debug("SND_AUDIOCODEC_MP3\n");
-			compr->codec = FORMAT_MP3;
-			break;
-		case SND_AUDIOCODEC_AAC:
-			pr_debug("SND_AUDIOCODEC_AAC\n");
-			compr->codec = FORMAT_MPEG4_AAC;
-			break;
-		case SND_AUDIOCODEC_AC3: {
-			char params_value[MAX_AC3_PARAM_SIZE];
-			int *params_value_data = (int *)params_value;
-			/* 36 is the max param length for ddp */
-			int i;
-			struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-			uint32_t params_length = 0;
-
-			memset(params_value, 0, MAX_AC3_PARAM_SIZE);
-			/* check integer overflow */
-			if (ddp->params_length > UINT_MAX/sizeof(int)) {
-				pr_err("%s: Integer overflow ddp->params_length %d\n",
-				__func__, ddp->params_length);
-				return -EINVAL;
-			}
-			params_length = ddp->params_length*sizeof(int);
-			if (params_length > MAX_AC3_PARAM_SIZE) {
-				/*MAX is 36*sizeof(int) this should not happen*/
-				pr_err("%s: params_length(%d) is greater than %zd\n",
-				__func__, params_length, MAX_AC3_PARAM_SIZE);
-				return -EINVAL;
-			}
-			pr_debug("SND_AUDIOCODEC_AC3\n");
-			compr->codec = FORMAT_AC3;
-			pr_debug("params_length: %d\n", ddp->params_length);
-			for (i = 0; i < params_length/sizeof(int); i++)
-				pr_debug("params_value[%d]: %x\n", i,
-					params_value_data[i]);
-			for (i = 0; i < ddp->params_length/2; i++) {
-				ddp->params_id[i] = params_value_data[2*i];
-				ddp->params_value[i] = params_value_data[2*i+1];
-			}
-			if (atomic_read(&prtd->start)) {
-				rc = msm_compr_send_ddp_cfg(prtd->audio_client,
-								ddp);
-				if (rc < 0)
-					pr_err("%s: DDP CMD CFG failed\n",
-						__func__);
-			}
-			break;
-		}
-		case SND_AUDIOCODEC_EAC3: {
-			char params_value[MAX_AC3_PARAM_SIZE];
-			int *params_value_data = (int *)params_value;
-			/* 36 is the max param length for ddp */
-			int i;
-			struct snd_dec_ddp *ddp =
-				&compr->info.codec_param.codec.options.ddp;
-			uint32_t params_length = 0;
-
-			memset(params_value, 0, MAX_AC3_PARAM_SIZE);
-			/* check integer overflow */
-			if (ddp->params_length > UINT_MAX/sizeof(int)) {
-				pr_err("%s: Integer overflow ddp->params_length %d\n",
-				__func__, ddp->params_length);
-				return -EINVAL;
-			}
-			params_length = ddp->params_length*sizeof(int);
-			if (params_length > MAX_AC3_PARAM_SIZE) {
-				/*MAX is 36*sizeof(int) this should not happen*/
-				pr_err("%s: params_length(%d) is greater than %zd\n",
-				__func__, params_length, MAX_AC3_PARAM_SIZE);
-				return -EINVAL;
-			}
-			pr_debug("SND_AUDIOCODEC_EAC3\n");
-			compr->codec = FORMAT_EAC3;
-			pr_debug("params_length: %d\n", ddp->params_length);
-			for (i = 0; i < ddp->params_length; i++)
-				pr_debug("params_value[%d]: %x\n", i,
-					params_value_data[i]);
-			for (i = 0; i < ddp->params_length/2; i++) {
-				ddp->params_id[i] = params_value_data[2*i];
-				ddp->params_value[i] = params_value_data[2*i+1];
-			}
-			if (atomic_read(&prtd->start)) {
-				rc = msm_compr_send_ddp_cfg(prtd->audio_client,
-								ddp);
-				if (rc < 0)
-					pr_err("%s: DDP CMD CFG failed\n",
-						__func__);
-			}
-			break;
-		}
-		default:
-			pr_debug("FORMAT_LINEAR_PCM\n");
-			compr->codec = FORMAT_LINEAR_PCM;
-			break;
-		}
-		return 0;
-	case SNDRV_PCM_IOCTL1_RESET:
-		pr_debug("SNDRV_PCM_IOCTL1_RESET\n");
-		/* Flush only when session is started during CAPTURE,
-		 * while PLAYBACK has no such restriction.
-		 */
-		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
-			  (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
-						atomic_read(&prtd->start))) {
-			if (atomic_read(&prtd->eos)) {
-				prtd->cmd_interrupt = 1;
-				wake_up(&the_locks.eos_wait);
-				atomic_set(&prtd->eos, 0);
-			}
-
-			/* A unlikely race condition possible with FLUSH
-			 * DRAIN if ack is set by flush and reset by drain
-			 */
-			prtd->cmd_ack = 0;
-			rc = q6asm_cmd(prtd->audio_client, CMD_FLUSH);
-			if (rc < 0) {
-				pr_err("%s: flush cmd failed rc=%d\n",
-					__func__, rc);
-				return rc;
-			}
-			rc = wait_event_timeout(the_locks.flush_wait,
-				prtd->cmd_ack, 5 * HZ);
-			if (!rc)
-				pr_err("Flush cmd timeout\n");
-			prtd->pcm_irq_pos = 0;
-		}
-		break;
-	case SNDRV_COMPRESS_DRAIN:
-		pr_debug("%s: SNDRV_COMPRESS_DRAIN\n", __func__);
-		if (atomic_read(&prtd->pending_buffer)) {
-			pr_debug("%s: no pending writes, drain would block\n",
-			 __func__);
-			return -EWOULDBLOCK;
-		}
-
-		atomic_set(&prtd->eos, 1);
-		atomic_set(&prtd->pending_buffer, 0);
-		prtd->cmd_ack = 0;
-		q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
-		/* Wait indefinitely for  DRAIN. Flush can also signal this*/
-		rc = wait_event_interruptible(the_locks.eos_wait,
-			(prtd->cmd_ack || prtd->cmd_interrupt));
-
-		if (rc < 0)
-			pr_err("EOS cmd interrupted\n");
-		pr_debug("%s: SNDRV_COMPRESS_DRAIN  out of wait\n", __func__);
-
-		if (prtd->cmd_interrupt)
-			rc = -EINTR;
-
-		prtd->cmd_interrupt = 0;
-		return rc;
-	default:
-		break;
-	}
-	return snd_pcm_lib_ioctl(substream, cmd, arg);
-}
-#ifdef CONFIG_COMPAT
-struct snd_enc_wma32 {
-	u32 super_block_align; /* WMA Type-specific data */
-	u32 encodeopt1;
-	u32 encodeopt2;
-};
-
-struct snd_enc_vorbis32 {
-	s32 quality;
-	u32 managed;
-	u32 max_bit_rate;
-	u32 min_bit_rate;
-	u32 downmix;
-};
-
-struct snd_enc_real32 {
-	u32 quant_bits;
-	u32 start_region;
-	u32 num_regions;
-};
-
-struct snd_enc_flac32 {
-	u32 num;
-	u32 gain;
-};
-
-struct snd_enc_generic32 {
-	u32 bw;	/* encoder bandwidth */
-	s32 reserved[15];
-};
-struct snd_dec_ddp32 {
-	u32 params_length;
-	u32 params_id[18];
-	u32 params_value[18];
-};
-
-union snd_codec_options32 {
-	struct snd_enc_wma32 wma;
-	struct snd_enc_vorbis32 vorbis;
-	struct snd_enc_real32 real;
-	struct snd_enc_flac32 flac;
-	struct snd_enc_generic32 generic;
-	struct snd_dec_ddp32 ddp;
-};
-
-struct snd_codec32 {
-	u32 id;
-	u32 ch_in;
-	u32 ch_out;
-	u32 sample_rate;
-	u32 bit_rate;
-	u32 rate_control;
-	u32 profile;
-	u32 level;
-	u32 ch_mode;
-	u32 format;
-	u32 align;
-	union snd_codec_options32 options;
-	u32 reserved[3];
-};
-
-struct snd_compressed_buffer32 {
-	u32 fragment_size;
-	u32 fragments;
-};
-
-struct snd_compr_params32 {
-	struct snd_compressed_buffer32 buffer;
-	struct snd_codec32 codec;
-	u8 no_wake_mode;
-};
-
-struct snd_compr_caps32 {
-	u32 num_codecs;
-	u32 direction;
-	u32 min_fragment_size;
-	u32 max_fragment_size;
-	u32 min_fragments;
-	u32 max_fragments;
-	u32 codecs[MAX_NUM_CODECS];
-	u32 reserved[11];
-};
-struct snd_compr_tstamp32 {
-	u32 byte_offset;
-	u32 copied_total;
-	compat_ulong_t pcm_frames;
-	compat_ulong_t pcm_io_frames;
-	u32 sampling_rate;
-	compat_u64 timestamp;
-};
-enum {
-	SNDRV_COMPRESS_TSTAMP32 = _IOR('C', 0x20, struct snd_compr_tstamp32),
-	SNDRV_COMPRESS_GET_CAPS32 = _IOWR('C', 0x10, struct snd_compr_caps32),
-	SNDRV_COMPRESS_SET_PARAMS32 =
-	_IOW('C', 0x12, struct snd_compr_params32),
-};
-static int msm_compr_compat_ioctl(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int err = 0;
-
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP32: {
-		struct snd_compr_tstamp tstamp;
-		struct snd_compr_tstamp32 tstamp32;
-
-		memset(&tstamp, 0, sizeof(tstamp));
-		memset(&tstamp32, 0, sizeof(tstamp32));
-		cmd = SNDRV_COMPRESS_TSTAMP;
-		err = msm_compr_ioctl_shared(substream, cmd, &tstamp);
-		if (err) {
-			pr_err("%s: COMPRESS_TSTAMP failed rc %d\n",
-			__func__, err);
-			goto bail_out;
-		}
-		tstamp32.byte_offset = tstamp.byte_offset;
-		tstamp32.copied_total = tstamp.copied_total;
-		tstamp32.pcm_frames = tstamp.pcm_frames;
-		tstamp32.pcm_io_frames = tstamp.pcm_io_frames;
-		tstamp32.sampling_rate = tstamp.sampling_rate;
-		tstamp32.timestamp = tstamp.timestamp;
-		if (copy_to_user(arg, &tstamp32, sizeof(tstamp32))) {
-			pr_err("%s: copytouser failed COMPRESS_TSTAMP32\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_GET_CAPS32: {
-		struct snd_compr_caps caps;
-		struct snd_compr_caps32 caps32;
-		u32 i;
-
-		memset(&caps, 0, sizeof(caps));
-		memset(&caps32, 0, sizeof(caps32));
-		cmd = SNDRV_COMPRESS_GET_CAPS;
-		err = msm_compr_ioctl_shared(substream, cmd, &caps);
-		if (err) {
-			pr_err("%s: GET_CAPS failed rc %d\n",
-			__func__, err);
-			goto bail_out;
-		}
-		pr_debug("SNDRV_COMPRESS_GET_CAPS_32\n");
-		if (!err && caps.num_codecs >= MAX_NUM_CODECS) {
-			pr_err("%s: Invalid number of codecs\n", __func__);
-			err = -EINVAL;
-			goto bail_out;
-		}
-		caps32.direction = caps.direction;
-		caps32.max_fragment_size = caps.max_fragment_size;
-		caps32.max_fragments = caps.max_fragments;
-		caps32.min_fragment_size = caps.min_fragment_size;
-		caps32.num_codecs = caps.num_codecs;
-		for (i = 0; i < caps.num_codecs; i++)
-			caps32.codecs[i] = caps.codecs[i];
-		if (copy_to_user(arg, &caps32, sizeof(caps32))) {
-			pr_err("%s: copytouser failed COMPRESS_GETCAPS32\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS32: {
-		struct snd_compr_params32 params32;
-		struct snd_compr_params params;
-
-		memset(&params32, 0, sizeof(params32));
-		memset(&params, 0, sizeof(params));
-		cmd = SNDRV_COMPRESS_SET_PARAMS;
-		if (copy_from_user(&params32, arg, sizeof(params32))) {
-			pr_err("%s: copyfromuser failed SET_PARAMS32\n",
-			__func__);
-			err = -EFAULT;
-			goto bail_out;
-		}
-		params.no_wake_mode = params32.no_wake_mode;
-		params.codec.id = params32.codec.id;
-		params.codec.ch_in = params32.codec.ch_in;
-		params.codec.ch_out = params32.codec.ch_out;
-		params.codec.sample_rate = params32.codec.sample_rate;
-		params.codec.bit_rate = params32.codec.bit_rate;
-		params.codec.rate_control = params32.codec.rate_control;
-		params.codec.profile = params32.codec.profile;
-		params.codec.level = params32.codec.level;
-		params.codec.ch_mode = params32.codec.ch_mode;
-		params.codec.format = params32.codec.format;
-		params.codec.align = params32.codec.align;
-
-		switch (params.codec.id) {
-		case SND_AUDIOCODEC_WMA:
-		case SND_AUDIOCODEC_WMA_PRO:
-			params.codec.options.wma.encodeopt1 =
-			params32.codec.options.wma.encodeopt1;
-			params.codec.options.wma.encodeopt2 =
-			params32.codec.options.wma.encodeopt2;
-			params.codec.options.wma.super_block_align =
-			params32.codec.options.wma.super_block_align;
-		break;
-		case SND_AUDIOCODEC_VORBIS:
-			params.codec.options.vorbis.downmix =
-			params32.codec.options.vorbis.downmix;
-			params.codec.options.vorbis.managed =
-			params32.codec.options.vorbis.managed;
-			params.codec.options.vorbis.max_bit_rate =
-			params32.codec.options.vorbis.max_bit_rate;
-			params.codec.options.vorbis.min_bit_rate =
-			params32.codec.options.vorbis.min_bit_rate;
-			params.codec.options.vorbis.quality =
-			params32.codec.options.vorbis.quality;
-		break;
-		case SND_AUDIOCODEC_REAL:
-			params.codec.options.real.num_regions =
-			params32.codec.options.real.num_regions;
-			params.codec.options.real.quant_bits =
-			params32.codec.options.real.quant_bits;
-			params.codec.options.real.start_region =
-			params32.codec.options.real.start_region;
-		break;
-		case SND_AUDIOCODEC_FLAC:
-			params.codec.options.flac.gain =
-			params32.codec.options.flac.gain;
-			params.codec.options.flac.num =
-			params32.codec.options.flac.num;
-		break;
-		case SND_AUDIOCODEC_DTS:
-		case SND_AUDIOCODEC_DTS_PASS_THROUGH:
-		case SND_AUDIOCODEC_DTS_LBR:
-		case SND_AUDIOCODEC_DTS_LBR_PASS_THROUGH:
-		case SND_AUDIOCODEC_DTS_TRANSCODE_LOOPBACK:
-		break;
-		case SND_AUDIOCODEC_AC3:
-		case SND_AUDIOCODEC_EAC3:
-			params.codec.options.ddp.params_length =
-			params32.codec.options.ddp.params_length;
-			memcpy(params.codec.options.ddp.params_value,
-			params32.codec.options.ddp.params_value,
-			sizeof(params32.codec.options.ddp.params_value));
-			memcpy(params.codec.options.ddp.params_id,
-			params32.codec.options.ddp.params_id,
-			sizeof(params32.codec.options.ddp.params_id));
-		break;
-		default:
-			params.codec.options.generic.bw =
-			params32.codec.options.generic.bw;
-		break;
-		}
-		if (!err)
-			err = msm_compr_ioctl_shared(substream, cmd, &params);
-		break;
-	}
-	default:
-		err = msm_compr_ioctl_shared(substream, cmd, arg);
-	}
-bail_out:
-	return err;
-
-}
-#endif
-static int msm_compr_ioctl(struct snd_pcm_substream *substream,
-		unsigned int cmd, void *arg)
-{
-	int err = 0;
-
-	if (!substream) {
-		pr_err("%s: Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	pr_debug("%s called with cmd = %d\n", __func__, cmd);
-	switch (cmd) {
-	case SNDRV_COMPRESS_TSTAMP: {
-		struct snd_compr_tstamp tstamp;
-
-		if (!arg) {
-			pr_err("%s: Invalid params Tstamp\n", __func__);
-			return -EINVAL;
-		}
-		err = msm_compr_ioctl_shared(substream, cmd, &tstamp);
-		if (err)
-			pr_err("%s: COMPRESS_TSTAMP failed rc %d\n",
-			__func__, err);
-		if (!err && copy_to_user(arg, &tstamp, sizeof(tstamp))) {
-			pr_err("%s: copytouser failed COMPRESS_TSTAMP\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_GET_CAPS: {
-		struct snd_compr_caps cap;
-
-		if (!arg) {
-			pr_err("%s: Invalid params getcaps\n", __func__);
-			return -EINVAL;
-		}
-		pr_debug("SNDRV_COMPRESS_GET_CAPS\n");
-		err = msm_compr_ioctl_shared(substream, cmd, &cap);
-		if (err)
-			pr_err("%s: GET_CAPS failed rc %d\n",
-			__func__, err);
-		if (!err && copy_to_user(arg, &cap, sizeof(cap))) {
-			pr_err("%s: copytouser failed GET_CAPS\n",
-			__func__);
-			err = -EFAULT;
-		}
-		break;
-	}
-	case SNDRV_COMPRESS_SET_PARAMS: {
-		struct snd_compr_params params;
-
-		if (!arg) {
-			pr_err("%s: Invalid params setparam\n", __func__);
-			return -EINVAL;
-		}
-		if (copy_from_user(&params, arg,
-			sizeof(struct snd_compr_params))) {
-			pr_err("%s: SET_PARAMS\n", __func__);
-			return -EFAULT;
-		}
-		err = msm_compr_ioctl_shared(substream, cmd, &params);
-		if (err)
-			pr_err("%s: SET_PARAMS failed rc %d\n",
-			__func__, err);
-		break;
-	}
-	default:
-		err = msm_compr_ioctl_shared(substream, cmd, arg);
-	}
-	return err;
-}
-
-static int msm_compr_restart(struct snd_pcm_substream *substream)
-{
-	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct compr_audio *compr = runtime->private_data;
-	struct msm_audio *prtd = &compr->prtd;
-	struct audio_aio_write_param param;
-	struct audio_buffer *buf = NULL;
-	struct output_meta_data_st output_meta_data;
-	int time_stamp_flag = 0;
-	int buffer_length = 0;
-
-	pr_debug("%s, trigger restart\n", __func__);
-
-	if (runtime->render_flag & SNDRV_RENDER_STOPPED) {
-		buf = prtd->audio_client->port[IN].buf;
-		pr_debug("%s:writing %d bytes of buffer[%d] to dsp 2\n",
-				__func__, prtd->pcm_count, prtd->out_head);
-		pr_debug("%s:writing buffer[%d] from 0x%08x\n",
-				__func__, prtd->out_head,
-				((unsigned int)buf[0].phys
-				+ (prtd->out_head * prtd->pcm_count)));
-
-		if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
-			time_stamp_flag = SET_TIMESTAMP;
-		else
-			time_stamp_flag = NO_TIMESTAMP;
-		memcpy(&output_meta_data, (char *)(buf->data +
-			prtd->out_head * prtd->pcm_count),
-			COMPRE_OUTPUT_METADATA_SIZE);
-
-		buffer_length = output_meta_data.frame_size;
-		pr_debug("meta_data_length: %d, frame_length: %d\n",
-			 output_meta_data.meta_data_length,
-			 output_meta_data.frame_size);
-		pr_debug("timestamp_msw: %d, timestamp_lsw: %d\n",
-			 output_meta_data.timestamp_msw,
-			 output_meta_data.timestamp_lsw);
-
-		param.paddr = (unsigned long)buf[0].phys
-				+ (prtd->out_head * prtd->pcm_count)
-				+ output_meta_data.meta_data_length;
-		param.len = buffer_length;
-		param.msw_ts = output_meta_data.timestamp_msw;
-		param.lsw_ts = output_meta_data.timestamp_lsw;
-		param.flags = time_stamp_flag;
-		param.uid = prtd->session_id;
-		if (q6asm_async_write(prtd->audio_client,
-					&param) < 0)
-			pr_err("%s:q6asm_async_write failed\n",
-				__func__);
-		else
-			prtd->out_head =
-				(prtd->out_head + 1) & (runtime->periods - 1);
-
-		runtime->render_flag &= ~SNDRV_RENDER_STOPPED;
-		return 0;
-	}
-	return 0;
-}
-
-static int msm_compr_volume_ctl_put(struct snd_kcontrol *kcontrol,
-				    struct snd_ctl_elem_value *ucontrol)
-{
-	int rc = 0;
-	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
-	struct snd_pcm_substream *substream =
-			 vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-	struct msm_audio *prtd;
-	int volume = ucontrol->value.integer.value[0];
-
-	pr_debug("%s: volume : %x\n", __func__, volume);
-	if (!substream)
-		return -ENODEV;
-	if (!substream->runtime)
-		return 0;
-	prtd = substream->runtime->private_data;
-	if (prtd)
-		rc = compressed_set_volume(prtd, volume);
-
-	return rc;
-}
-
-static int msm_compr_volume_ctl_get(struct snd_kcontrol *kcontrol,
-				  struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_pcm_volume *vol = snd_kcontrol_chip(kcontrol);
-	struct snd_pcm_substream *substream =
-			 vol->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
-	struct msm_audio *prtd;
-
-	pr_debug("%s\n", __func__);
-	if (!substream)
-		return -ENODEV;
-	if (!substream->runtime)
-		return 0;
-	prtd = substream->runtime->private_data;
-	if (prtd)
-		ucontrol->value.integer.value[0] = prtd->volume;
-	return 0;
-}
-
-static int msm_compr_add_controls(struct snd_soc_pcm_runtime *rtd)
-{
-	int ret = 0;
-	struct snd_pcm *pcm = rtd->pcm;
-	struct snd_pcm_volume *volume_info;
-	struct snd_kcontrol *kctl;
-
-	dev_dbg(rtd->dev, "%s, Volume cntrl add\n", __func__);
-	ret = snd_pcm_add_volume_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
-				      NULL, 1, rtd->dai_link->id,
-				      &volume_info);
-	if (ret < 0)
-		return ret;
-	kctl = volume_info->kctl;
-	kctl->put = msm_compr_volume_ctl_put;
-	kctl->get = msm_compr_volume_ctl_get;
-	kctl->tlv.p = compr_rx_vol_gain;
-	return 0;
-}
-
-static const struct snd_pcm_ops msm_compr_ops = {
-	.open	   = msm_compr_open,
-	.hw_params	= msm_compr_hw_params,
-	.close	  = msm_compr_close,
-	.ioctl	  = msm_compr_ioctl,
-	.prepare	= msm_compr_prepare,
-	.trigger	= msm_compr_trigger,
-	.pointer	= msm_compr_pointer,
-	.mmap		= msm_compr_mmap,
-	.restart	= msm_compr_restart,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl   = msm_compr_compat_ioctl,
-#endif
-};
-
-static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_card *card = rtd->card->snd_card;
-	int ret = 0;
-
-	if (!card->dev->coherent_dma_mask)
-		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
-
-	ret = msm_compr_add_controls(rtd);
-	if (ret)
-		pr_err("%s, kctl add failed\n", __func__);
-	return ret;
-}
-
-static struct snd_soc_platform_driver msm_soc_platform = {
-	.ops		= &msm_compr_ops,
-	.pcm_new	= msm_asoc_pcm_new,
-};
-
-static int msm_compr_probe(struct platform_device *pdev)
-{
-
-	dev_info(&pdev->dev, "%s: dev name %s\n",
-			 __func__, dev_name(&pdev->dev));
-
-	return snd_soc_register_platform(&pdev->dev,
-				   &msm_soc_platform);
-}
-
-static int msm_compr_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
-}
-
-static const struct of_device_id msm_compr_dt_match[] = {
-	{.compatible = "qcom,msm-compr-dsp"},
-	{}
-};
-MODULE_DEVICE_TABLE(of, msm_compr_dt_match);
-
-static struct platform_driver msm_compr_driver = {
-	.driver = {
-		.name = "msm-compr-dsp",
-		.owner = THIS_MODULE,
-		.of_match_table = msm_compr_dt_match,
-	},
-	.probe = msm_compr_probe,
-	.remove = msm_compr_remove,
-};
-
-static int __init msm_soc_platform_init(void)
-{
-	init_waitqueue_head(&the_locks.enable_wait);
-	init_waitqueue_head(&the_locks.eos_wait);
-	init_waitqueue_head(&the_locks.write_wait);
-	init_waitqueue_head(&the_locks.read_wait);
-	init_waitqueue_head(&the_locks.flush_wait);
-
-	return platform_driver_register(&msm_compr_driver);
-}
-module_init(msm_soc_platform_init);
-
-static void __exit msm_soc_platform_exit(void)
-{
-	platform_driver_unregister(&msm_compr_driver);
-}
-module_exit(msm_soc_platform_exit);
-
-MODULE_DESCRIPTION("PCM module platform driver");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
deleted file mode 100644
index d6e3ec6..0000000
--- a/sound/soc/msm/qdsp6v2/msm-compr-q6-v2.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef _MSM_COMPR_H
-#define _MSM_COMPR_H
-#include <sound/apr_audio-v2.h>
-#include <sound/q6asm-v2.h>
-#include <sound/compress_params.h>
-#include <sound/compress_offload.h>
-#include <sound/compress_driver.h>
-
-#include "msm-pcm-q6-v2.h"
-
-struct compr_info {
-	struct snd_compr_caps compr_cap;
-	struct snd_compr_codec_caps codec_caps;
-	struct snd_compr_params codec_param;
-};
-
-struct compr_audio {
-	struct msm_audio prtd;
-	struct compr_info info;
-	uint32_t codec;
-};
-
-#endif /*_MSM_COMPR_H*/
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 0c46763..dfac5fd 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -4131,12 +4131,13 @@
 			.stream_name = "INT0 MI2S Playback",
 			.aif_name = "INT0_MI2S_RX",
 			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100,
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_44100 |
+			SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
 				SNDRV_PCM_FMTBIT_S24_LE |
 				SNDRV_PCM_FMTBIT_S24_3LE,
 			.rate_min =     8000,
-			.rate_max =     48000,
+			.rate_max =     192000,
 		},
 		.capture = {
 			.stream_name = "INT0 MI2S Capture",
@@ -4235,12 +4236,13 @@
 			.stream_name = "INT4 MI2S Playback",
 			.aif_name = "INT4_MI2S_RX",
 			.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
-			SNDRV_PCM_RATE_16000,
+			SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_96000 |
+			SNDRV_PCM_RATE_192000,
 			.formats = SNDRV_PCM_FMTBIT_S16_LE |
 				SNDRV_PCM_FMTBIT_S24_LE |
 				SNDRV_PCM_FMTBIT_S24_3LE,
 			.rate_min =     8000,
-			.rate_max =     48000,
+			.rate_max =     192000,
 		},
 		.capture = {
 			.stream_name = "INT4 MI2S Capture",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 465634b..cbee6b6 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -5378,6 +5378,57 @@
 	msm_routing_put_audio_mixer),
 };
 
+static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = {
+	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia9", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia10", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA10, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia11", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA11, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia12", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA12, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia13", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA13, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia14", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA14, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia15", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA15, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+	SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_TERT_TDM_RX_4,
+	MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer,
+	msm_routing_put_audio_mixer),
+};
+
 static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = {
 	SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_QUAT_TDM_RX_0,
 	MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
@@ -11176,6 +11227,9 @@
 	SND_SOC_DAPM_MIXER("TERT_TDM_RX_3 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				tert_tdm_rx_3_mixer_controls,
 				ARRAY_SIZE(tert_tdm_rx_3_mixer_controls)),
+	SND_SOC_DAPM_MIXER("TERT_TDM_RX_4 Audio Mixer", SND_SOC_NOPM, 0, 0,
+				tert_tdm_rx_4_mixer_controls,
+				ARRAY_SIZE(tert_tdm_rx_4_mixer_controls)),
 	SND_SOC_DAPM_MIXER("QUAT_TDM_RX_0 Audio Mixer", SND_SOC_NOPM, 0, 0,
 				quat_tdm_rx_0_mixer_controls,
 				ARRAY_SIZE(quat_tdm_rx_0_mixer_controls)),
@@ -12200,6 +12254,24 @@
 	{"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"},
 	{"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"},
 
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia1", "MM_DL1"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia2", "MM_DL2"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia3", "MM_DL3"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia4", "MM_DL4"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia5", "MM_DL5"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia6", "MM_DL6"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia8", "MM_DL8"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia9", "MM_DL9"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia10", "MM_DL10"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia11", "MM_DL11"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia12", "MM_DL12"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia13", "MM_DL13"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia14", "MM_DL14"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"},
+	{"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"},
+	{"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"},
+
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"},
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"},
 	{"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"},
@@ -12361,6 +12433,7 @@
 	{"MultiMedia2 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"},
 	{"MultiMedia1 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"MultiMedia1 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+	{"MultiMedia2 Mixer", "SEC_MI2S_TX", "SEC_MI2S_TX"},
 	{"MultiMedia6 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
 	{"MultiMedia6 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
 	{"MultiMedia3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
@@ -12954,6 +13027,7 @@
 	{"VOIP_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VoLTE_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VOICE2_UL", NULL, "VOC_EXT_EC MUX"},
+	{"VoWLAN_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VOICEMMODE1_UL", NULL, "VOC_EXT_EC MUX"},
 	{"VOICEMMODE2_UL", NULL, "VOC_EXT_EC MUX"},
 
@@ -13907,6 +13981,7 @@
 	{"BE_OUT", NULL, "TERT_TDM_RX_1"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_2"},
 	{"BE_OUT", NULL, "TERT_TDM_RX_3"},
+	{"BE_OUT", NULL, "TERT_TDM_RX_4"},
 	{"BE_OUT", NULL, "QUAT_TDM_RX_0"},
 	{"BE_OUT", NULL, "QUAT_TDM_RX_1"},
 	{"BE_OUT", NULL, "QUAT_TDM_RX_2"},
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 176b8aa..05d123b 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -6695,8 +6695,6 @@
 	mutex_lock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
 	memcpy(&this_afe.prot_cfg, &cal_data->cal_info,
 		sizeof(this_afe.prot_cfg));
-	this_afe.th_ftm_cfg.mode = this_afe.prot_cfg.mode;
-	this_afe.ex_ftm_cfg.mode = this_afe.prot_cfg.mode;
 	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
 done:
 	return ret;
@@ -6839,8 +6837,6 @@
 		cal_data->cal_info.r0[SP_V2_SPKR_1] = -1;
 		cal_data->cal_info.r0[SP_V2_SPKR_2] = -1;
 	}
-	this_afe.th_ftm_cfg.mode = this_afe.prot_cfg.mode;
-	this_afe.ex_ftm_cfg.mode = this_afe.prot_cfg.mode;
 	mutex_unlock(&this_afe.cal_data[AFE_FB_SPKR_PROT_CAL]->lock);
 	__pm_relax(&wl.ws);
 done:
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index b603b8a..2c3d7fc 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -1609,6 +1609,9 @@
 		snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
 		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
 		snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
+	} else {
+		snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT1");
+		snd_soc_dapm_ignore_suspend(dapm, "MAD_CPE_OUT2");
 	}
 
 	snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c
index 304bf47..22d6e0e 100644
--- a/sound/soc/msm/sdm845.c
+++ b/sound/soc/msm/sdm845.c
@@ -173,7 +173,9 @@
 struct msm_asoc_mach_data {
 	u32 mclk_freq;
 	int us_euro_gpio; /* used by gpio driver API */
+	int usbc_en2_gpio; /* used by gpio driver API */
 	struct device_node *us_euro_gpio_p; /* used by pinctrl API */
+	struct pinctrl *usbc_en2_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en1_gpio_p; /* used by pinctrl API */
 	struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
 	struct snd_info_entry *codec_root;
@@ -3106,27 +3108,126 @@
 	return rc;
 }
 
-static bool msm_swap_gnd_mic(struct snd_soc_codec *codec)
+static bool msm_usbc_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
 {
-	struct snd_soc_card *card = codec->component.card;
-	struct msm_asoc_mach_data *pdata =
-				snd_soc_card_get_drvdata(card);
 	int value = 0;
+	bool ret = 0;
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+	struct pinctrl_state *en2_pinctrl_active;
+	struct pinctrl_state *en2_pinctrl_sleep;
 
-	if (pdata->us_euro_gpio_p) {
-		value = msm_cdc_pinctrl_get_state(pdata->us_euro_gpio_p);
-		if (value)
-			msm_cdc_pinctrl_select_sleep_state(
-							pdata->us_euro_gpio_p);
-		else
-			msm_cdc_pinctrl_select_active_state(
-							pdata->us_euro_gpio_p);
-	} else if (pdata->us_euro_gpio >= 0) {
-		value = gpio_get_value_cansleep(pdata->us_euro_gpio);
-		gpio_set_value_cansleep(pdata->us_euro_gpio, !value);
+	if (!pdata->usbc_en2_gpio_p) {
+		if (active) {
+			/* if active and usbc_en2_gpio undefined, get pin */
+			pdata->usbc_en2_gpio_p = devm_pinctrl_get(card->dev);
+			if (IS_ERR_OR_NULL(pdata->usbc_en2_gpio_p)) {
+				dev_err(card->dev,
+					"%s: Can't get EN2 gpio pinctrl:%ld\n",
+					__func__,
+					PTR_ERR(pdata->usbc_en2_gpio_p));
+				pdata->usbc_en2_gpio_p = NULL;
+				return false;
+			}
+		} else
+			/* if not active and usbc_en2_gpio undefined, return */
+			return false;
 	}
-	pr_debug("%s: swap select switch %d to %d\n", __func__, value, !value);
-	return true;
+
+	pdata->usbc_en2_gpio = of_get_named_gpio(card->dev->of_node,
+				    "qcom,usbc-analog-en2-gpio", 0);
+	if (!gpio_is_valid(pdata->usbc_en2_gpio)) {
+		dev_err(card->dev, "%s, property %s not in node %s",
+			__func__, "qcom,usbc-analog-en2-gpio",
+			card->dev->of_node->full_name);
+		return false;
+	}
+
+	en2_pinctrl_active = pinctrl_lookup_state(
+					pdata->usbc_en2_gpio_p, "aud_active");
+	if (IS_ERR_OR_NULL(en2_pinctrl_active)) {
+		dev_err(card->dev,
+			"%s: Cannot get aud_active pinctrl state:%ld\n",
+			__func__, PTR_ERR(en2_pinctrl_active));
+		ret = false;
+		goto err_lookup_state;
+	}
+
+	en2_pinctrl_sleep = pinctrl_lookup_state(
+					pdata->usbc_en2_gpio_p, "aud_sleep");
+	if (IS_ERR_OR_NULL(en2_pinctrl_sleep)) {
+		dev_err(card->dev,
+			"%s: Cannot get aud_sleep pinctrl state:%ld\n",
+			__func__, PTR_ERR(en2_pinctrl_sleep));
+		ret = false;
+		goto err_lookup_state;
+	}
+
+	/* if active and usbc_en2_gpio_p defined, swap using usbc_en2_gpio_p */
+	if (active) {
+		dev_dbg(codec->dev, "%s: enter\n", __func__);
+		if (pdata->usbc_en2_gpio_p) {
+			value = gpio_get_value_cansleep(pdata->usbc_en2_gpio);
+			if (value)
+				pinctrl_select_state(pdata->usbc_en2_gpio_p,
+							en2_pinctrl_sleep);
+			else
+				pinctrl_select_state(pdata->usbc_en2_gpio_p,
+							en2_pinctrl_active);
+		} else if (pdata->usbc_en2_gpio >= 0) {
+			value = gpio_get_value_cansleep(pdata->usbc_en2_gpio);
+			gpio_set_value_cansleep(pdata->usbc_en2_gpio, !value);
+		}
+		pr_debug("%s: swap select switch %d to %d\n", __func__,
+			value, !value);
+		ret = true;
+	} else {
+		/* if not active, release usbc_en2_gpio_p pin */
+		pinctrl_select_state(pdata->usbc_en2_gpio_p,
+					en2_pinctrl_sleep);
+	}
+
+err_lookup_state:
+	devm_pinctrl_put(pdata->usbc_en2_gpio_p);
+	pdata->usbc_en2_gpio_p = NULL;
+	return ret;
+}
+
+static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active)
+{
+	int value = 0;
+	int ret = 0;
+	struct snd_soc_card *card = codec->component.card;
+	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
+
+	if (!pdata)
+		return false;
+
+	if (!wcd_mbhc_cfg.enable_usbc_analog) {
+		/* if usbc is not defined, swap using us_euro_gpio_p */
+		if (pdata->us_euro_gpio_p) {
+			value = msm_cdc_pinctrl_get_state(
+						pdata->us_euro_gpio_p);
+			if (value)
+				msm_cdc_pinctrl_select_sleep_state(
+						pdata->us_euro_gpio_p);
+			else
+				msm_cdc_pinctrl_select_active_state(
+						pdata->us_euro_gpio_p);
+		} else if (pdata->us_euro_gpio >= 0) {
+			value = gpio_get_value_cansleep(
+						pdata->us_euro_gpio);
+			gpio_set_value_cansleep(
+					pdata->us_euro_gpio, !value);
+		}
+		pr_debug("%s: swap select switch %d to %d\n", __func__,
+			 value, !value);
+		ret = true;
+	} else {
+		/* if usbc is defined, swap using usbc_en2 */
+		ret = msm_usbc_swap_gnd_mic(codec, active);
+	}
+	return ret;
 }
 
 static int msm_afe_set_config(struct snd_soc_codec *codec)
@@ -3833,6 +3934,13 @@
 		ret = -EINVAL;
 		goto err;
 	}
+
+	if (pinctrl_info->pinctrl == NULL) {
+		pr_err("%s: pinctrl_info->pinctrl is NULL\n", __func__);
+		ret = -EINVAL;
+		goto err;
+	}
+
 	curr_state = pinctrl_info->curr_state;
 	pinctrl_info->curr_state = new_state;
 	pr_debug("%s: curr_state = %s new_state = %s\n", __func__,
@@ -4101,6 +4209,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	dev_dbg(rtd->card->dev,
 		"%s: substream = %s  stream = %d, dai name %s, dai ID %d\n",
@@ -4115,12 +4224,10 @@
 		goto err;
 	}
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
-		if (ret) {
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_MI2S_ACTIVE);
+		if (ret_pinctrl)
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
-			goto err;
-		}
+				__func__, ret_pinctrl);
 	}
 	/*
 	 * Muxtex protection in case the same MI2S
@@ -4177,6 +4284,7 @@
 	struct snd_soc_card *card = rtd->card;
 	struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
 	struct msm_pinctrl_info *pinctrl_info = &pdata->pinctrl_info;
+	int ret_pinctrl = 0;
 
 	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
 		 substream->name, substream->stream);
@@ -4197,10 +4305,10 @@
 	mutex_unlock(&mi2s_intf_conf[index].lock);
 
 	if (index == QUAT_MI2S) {
-		ret = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
-		if (ret)
+		ret_pinctrl = msm_set_pinctrl(pinctrl_info, STATE_DISABLE);
+		if (ret_pinctrl)
 			pr_err("%s: MI2S TLMM pinctrl set failed with %d\n",
-				__func__, ret);
+				__func__, ret_pinctrl);
 	}
 }
 
@@ -6454,6 +6562,7 @@
 	char *mclk_freq_prop_name;
 	const struct of_device_id *match;
 	int ret;
+	const char *usb_c_dt = "qcom,msm-mbhc-usbc-audio-supported";
 
 	if (!pdev->dev.of_node) {
 		dev_err(&pdev->dev, "No platform supplied from device tree\n");
@@ -6601,6 +6710,9 @@
 		wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
 	}
 
+	if (of_find_property(pdev->dev.of_node, usb_c_dt, NULL))
+		wcd_mbhc_cfg.swap_gnd_mic = msm_swap_gnd_mic;
+
 	ret = msm_prepare_us_euro(card);
 	if (ret)
 		dev_dbg(&pdev->dev, "msm_prepare_us_euro failed (%d)\n",
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index d2ac038..083887b 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -13,7 +13,8 @@
 			pcm.o \
 			proc.o \
 			quirks.o \
-			stream.o
+			stream.o \
+			badd.o
 
 snd-usbmidi-lib-objs := midi.o
 
diff --git a/sound/usb/badd.c b/sound/usb/badd.c
new file mode 100644
index 0000000..cc6c26c
--- /dev/null
+++ b/sound/usb/badd.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
+
+struct uac3_input_terminal_descriptor badd_baif_in_term_desc = {
+	.bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = BADD_IN_TERM_ID_BAIF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.wExTerminalDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_input_terminal_descriptor badd_baof_in_term_desc = {
+	.bLength = UAC3_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_INPUT_TERMINAL,
+	.bTerminalID = BADD_IN_TERM_ID_BAOF,
+	.wTerminalType = UAC_TERMINAL_STREAMING,
+	.bAssocTerminal = 0x00,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000000,
+	.wExTerminalDescrID = 0x0000,
+	.wConnectorsDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baif_out_term_desc = {
+	.bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = BADD_OUT_TERM_ID_BAIF,
+	.wTerminalType = UAC_TERMINAL_STREAMING,
+	.bAssocTerminal = 0x00,		/* No associated terminal */
+	.bSourceID = BADD_FU_ID_BAIF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000000,	/* No controls */
+	.wExTerminalDescrID = 0x0000,
+	.wConnectorsDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+struct uac3_output_terminal_descriptor badd_baof_out_term_desc = {
+	.bLength = UAC3_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
+	.bTerminalID = BADD_OUT_TERM_ID_BAOF,
+	.bSourceID = BADD_FU_ID_BAOF,
+	.bCSourceID = BADD_CLOCK_SOURCE,
+	.wExTerminalDescrID = 0x0000,
+	.wTerminalDescrStr = 0x0000
+};
+
+__u8 monoControls[] = {
+	0x03, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00};
+
+__u8 stereoControls[] = {
+	0x03, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00,
+	0x0c, 0x00, 0x00, 0x00
+};
+
+__u8 badd_mu_src_ids[] = {BADD_IN_TERM_ID_BAOF, BADD_FU_ID_BAIOF};
+
+struct uac3_mixer_unit_descriptor badd_baiof_mu_desc = {
+	.bLength = UAC3_DT_MIXER_UNIT_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_MIXER_UNIT_V3,
+	.bUnitID = BADD_MU_ID_BAIOF,
+	.bNrInPins = 0x02,
+	.baSourceID = badd_mu_src_ids,
+	.bmMixerControls = 0x00,
+	.bmControls = 0x00000000,
+	.wMixerDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baif_fu_desc = {
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAIF,
+	.bSourceID = BADD_IN_TERM_ID_BAIF,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baof_fu_desc = {
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAOF,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_feature_unit_descriptor badd_baiof_fu_desc = {
+	.bLength = 0x0f,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_FEATURE_UNIT_V3,
+	.bUnitID = BADD_FU_ID_BAIOF,
+	.bSourceID = BADD_IN_TERM_ID_BAIF,
+	.bmaControls = monoControls,
+	.wFeatureDescrStr = 0x0000
+};
+
+struct uac3_clock_source_descriptor badd_clock_desc = {
+	.bLength = UAC3_DT_CLOCK_SRC_SIZE,
+	.bDescriptorType = USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype = UAC3_CLOCK_SOURCE,
+	.bClockID = BADD_CLOCK_SOURCE,
+	.bmControls = 0x00000001,
+	.bReferenceTerminal = 0x00,
+	.wClockSourceStr = 0x0000
+};
+
+void *badd_desc_list[] = {
+	&badd_baif_in_term_desc,
+	&badd_baof_in_term_desc,
+	&badd_baiof_mu_desc,
+	&badd_baif_fu_desc,
+	&badd_baof_fu_desc,
+	&badd_baiof_fu_desc,
+	&badd_clock_desc
+};
+
diff --git a/sound/usb/card.c b/sound/usb/card.c
index ccf06de..eaf18aa 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -45,6 +45,7 @@
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
 #include <linux/module.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/control.h>
 #include <sound/core.h>
@@ -285,7 +286,6 @@
 	struct usb_host_interface *host_iface;
 	struct usb_interface_descriptor *altsd;
 	struct usb_interface *usb_iface;
-	void *control_header;
 	int i, protocol;
 
 	usb_iface = usb_ifnum_to_if(dev, ctrlif);
@@ -302,16 +302,13 @@
 		return -EINVAL;
 	}
 
-	control_header = snd_usb_find_csint_desc(host_iface->extra,
-						 host_iface->extralen,
-						 NULL, UAC_HEADER);
 	altsd = get_iface_desc(host_iface);
 	protocol = altsd->bInterfaceProtocol;
 
-	if (!control_header) {
-		dev_err(&dev->dev, "cannot find UAC_HEADER\n");
-		return -EINVAL;
-	}
+	/*
+	 * UAC 1.0 devices use AC HEADER Desc for linking AS interfaces;
+	 * UAC 2.0 and 3.0 devices use IAD for linking AS interfaces
+	 */
 
 	switch (protocol) {
 	default:
@@ -321,8 +318,17 @@
 		/* fall through */
 
 	case UAC_VERSION_1: {
-		struct uac1_ac_header_descriptor *h1 = control_header;
+		void *control_header;
+		struct uac1_ac_header_descriptor *h1;
 
+		control_header = snd_usb_find_csint_desc(host_iface->extra,
+					host_iface->extralen, NULL, UAC_HEADER);
+		if (!control_header) {
+			dev_err(&dev->dev, "cannot find UAC_HEADER\n");
+			return -EINVAL;
+		}
+
+		h1 = control_header;
 		if (!h1->bInCollection) {
 			dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
 			return -EINVAL;
@@ -339,7 +345,8 @@
 		break;
 	}
 
-	case UAC_VERSION_2: {
+	case UAC_VERSION_2:
+	case UAC_VERSION_3: {
 		struct usb_interface_assoc_descriptor *assoc =
 						usb_iface->intf_assoc;
 		if (!assoc) {
@@ -358,7 +365,8 @@
 		}
 
 		if (!assoc) {
-			dev_err(&dev->dev, "Audio class v2 interfaces need an interface association\n");
+			dev_err(&dev->dev, "Audio class V%d interfaces need an interface association\n",
+					protocol);
 			return -EINVAL;
 		}
 
@@ -606,6 +614,15 @@
 	struct usb_host_interface *alts;
 	int ifnum;
 	u32 id;
+	struct usb_interface_assoc_descriptor *assoc;
+
+	assoc = intf->intf_assoc;
+	if (assoc && assoc->bFunctionClass == USB_CLASS_AUDIO &&
+	    assoc->bFunctionProtocol == UAC_VERSION_3 &&
+	    assoc->bFunctionSubClass == FULL_ADC_PROFILE) {
+		dev_info(&dev->dev, "No support for full-fledged ADC 3.0 yet!!\n");
+		return -EINVAL;
+	}
 
 	alts = &intf->altsetting[0];
 	ifnum = get_iface_desc(alts)->bInterfaceNumber;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 26dd5f2..8238180 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -428,6 +428,10 @@
 
 	case UAC_VERSION_2:
 		return set_sample_rate_v2(chip, iface, alts, fmt, rate);
+
+	/* Clock rate is fixed at 48 kHz for BADD devices */
+	case UAC_VERSION_3:
+		return 0;
 	}
 }
 
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 2c44386..eaf2615 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -20,6 +20,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -69,6 +70,34 @@
 		format <<= 1;
 		break;
 	}
+
+	case UAC_VERSION_3: {
+		switch (fp->maxpacksize) {
+		case BADD_MAXPSIZE_SYNC_MONO_16:
+		case BADD_MAXPSIZE_SYNC_STEREO_16:
+		case BADD_MAXPSIZE_ASYNC_MONO_16:
+		case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+			sample_width = BIT_RES_16_BIT;
+			sample_bytes = SUBSLOTSIZE_16_BIT;
+			break;
+		}
+
+		case BADD_MAXPSIZE_SYNC_MONO_24:
+		case BADD_MAXPSIZE_SYNC_STEREO_24:
+		case BADD_MAXPSIZE_ASYNC_MONO_24:
+		case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+			sample_width = BIT_RES_24_BIT;
+			sample_bytes = SUBSLOTSIZE_24_BIT;
+			break;
+		}
+		default:
+			usb_audio_err(chip, "%u:%d : Invalid wMaxPacketSize\n",
+				fp->iface, fp->altsetting);
+			return pcm_formats;
+		}
+		format = 1 << format;
+		break;
+	}
 	}
 
 	if ((pcm_formats == 0) &&
@@ -364,17 +393,34 @@
 	return ret;
 }
 
+static int badd_set_audio_rate_v3(struct snd_usb_audio *chip,
+		   struct audioformat *fp)
+{
+	unsigned int rate;
+
+	fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+	if (fp->rate_table == NULL)
+		return -ENOMEM;
+
+	fp->nr_rates = 1;
+	rate = BADD_SAMPLING_RATE;
+	fp->rate_min = fp->rate_max = fp->rate_table[0] = rate;
+	fp->rates |= snd_pcm_rate_to_rate_bit(rate);
+	return 0;
+}
+
 /*
  * parse the format type I and III descriptors
  */
 static int parse_audio_format_i(struct snd_usb_audio *chip,
 				struct audioformat *fp, unsigned int format,
+				u8 format_type,
 				struct uac_format_type_i_continuous_descriptor *fmt)
 {
 	snd_pcm_format_t pcm_format;
 	int ret;
 
-	if (fmt->bFormatType == UAC_FORMAT_TYPE_III) {
+	if (format_type == UAC_FORMAT_TYPE_III) {
 		/* FIXME: the format type is really IECxxx
 		 *        but we give normal PCM format to get the existing
 		 *        apps working...
@@ -413,6 +459,9 @@
 		/* fp->channels is already set in this case */
 		ret = parse_audio_format_rates_v2(chip, fp);
 		break;
+	case UAC_VERSION_3:
+		ret = badd_set_audio_rate_v3(chip, fp);
+		break;
 	}
 
 	if (fp->channels < 1) {
@@ -484,11 +533,18 @@
 			       int stream)
 {
 	int err;
+	int format_type = -EINVAL;
 
-	switch (fmt->bFormatType) {
+	if ((fp->protocol == UAC_VERSION_1) ||
+			(fp->protocol == UAC_VERSION_2))
+		format_type = fmt->bFormatType;
+	else
+		format_type = UAC_FORMAT_TYPE_I; /* only BADD is supported */
+
+	switch (format_type) {
 	case UAC_FORMAT_TYPE_I:
 	case UAC_FORMAT_TYPE_III:
-		err = parse_audio_format_i(chip, fp, format, fmt);
+		err = parse_audio_format_i(chip, fp, format, format_type, fmt);
 		break;
 	case UAC_FORMAT_TYPE_II:
 		err = parse_audio_format_ii(chip, fp, format, fmt);
@@ -497,10 +553,10 @@
 		usb_audio_info(chip,
 			 "%u:%d : format type %d is not supported yet\n",
 			 fp->iface, fp->altsetting,
-			 fmt->bFormatType);
+			 format_type);
 		return -ENOTSUPP;
 	}
-	fp->fmt_type = fmt->bFormatType;
+	fp->fmt_type = format_type;
 	if (err < 0)
 		return err;
 #if 1
@@ -511,7 +567,7 @@
 	if (chip->usb_id == USB_ID(0x041e, 0x3000) ||
 	    chip->usb_id == USB_ID(0x041e, 0x3020) ||
 	    chip->usb_id == USB_ID(0x041e, 0x3061)) {
-		if (fmt->bFormatType == UAC_FORMAT_TYPE_I &&
+		if (format_type == UAC_FORMAT_TYPE_I &&
 		    fp->rates != SNDRV_PCM_RATE_48000 &&
 		    fp->rates != SNDRV_PCM_RATE_96000)
 			return -ENOTSUPP;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 932ce3e..c3bf5ff 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -51,6 +51,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/control.h>
@@ -185,6 +186,17 @@
 	/* we just parse the header */
 	struct uac_feature_unit_descriptor *hdr = NULL;
 
+	if (state->mixer->protocol == UAC_VERSION_3) {
+		int i;
+
+		for (i = 0; i < NUM_BADD_DESCS; i++) {
+			hdr = (void *)badd_desc_list[i];
+			if (hdr->bUnitID == unit)
+				return hdr;
+		}
+
+		return NULL;
+	}
 	while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
 					USB_DT_CS_INTERFACE)) != NULL) {
 		if (hdr->bLength >= 4 &&
@@ -718,7 +730,7 @@
 				term->channels = d->bNrChannels;
 				term->chconfig = le16_to_cpu(d->wChannelConfig);
 				term->name = d->iTerminal;
-			} else { /* UAC_VERSION_2 */
+			} else if (state->mixer->protocol == UAC_VERSION_2) {
 				struct uac2_input_terminal_descriptor *d = p1;
 
 				/* call recursively to verify that the
@@ -735,6 +747,24 @@
 				term->channels = d->bNrChannels;
 				term->chconfig = le32_to_cpu(d->bmChannelConfig);
 				term->name = d->iTerminal;
+			} else { /* UAC_VERSION_3 */
+				struct uac3_input_terminal_descriptor *d = p1;
+
+				err = check_input_term(state,
+							d->bCSourceID, term);
+				if (err < 0)
+					return err;
+
+				term->id = id;
+				term->type = d->wTerminalType;
+				if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+					term->channels = NUM_CHANNELS_MONO;
+					term->chconfig = BADD_CH_CONFIG_MONO;
+				} else {
+					term->channels = NUM_CHANNELS_STEREO;
+					term->chconfig = BADD_CH_CONFIG_STEREO;
+				}
+				term->name = d->wTerminalDescrStr;
 			}
 			return 0;
 		case UAC_FEATURE_UNIT: {
@@ -752,41 +782,81 @@
 			return 0;
 		}
 		case UAC_SELECTOR_UNIT:
-		case UAC2_CLOCK_SELECTOR: {
-			struct uac_selector_unit_descriptor *d = p1;
-			/* call recursively to retrieve the channel info */
-			err = check_input_term(state, d->baSourceID[0], term);
-			if (err < 0)
-				return err;
-			term->type = d->bDescriptorSubtype << 16; /* virtual type */
-			term->id = id;
-			term->name = uac_selector_unit_iSelector(d);
+		/* UAC3_MIXER_UNIT_V3 */
+		case UAC2_CLOCK_SELECTOR:
+		/* UAC3_CLOCK_SOURCE */ {
+			if (state->mixer->protocol == UAC_VERSION_3
+				&& hdr[2] == UAC3_CLOCK_SOURCE) {
+				struct uac3_clock_source_descriptor *d = p1;
+
+				term->type = d->bDescriptorSubtype << 16;
+				term->id = id;
+				term->name = d->wClockSourceStr;
+			} else if (state->mixer->protocol == UAC_VERSION_3
+					&& hdr[2] == UAC3_MIXER_UNIT_V3) {
+				struct uac3_mixer_unit_descriptor *d = p1;
+
+				term->type = d->bDescriptorSubtype << 16;
+				if (d->wClusterDescrID == CLUSTER_ID_MONO) {
+					term->channels = NUM_CHANNELS_MONO;
+					term->chconfig = BADD_CH_CONFIG_MONO;
+				} else {
+					term->channels = NUM_CHANNELS_STEREO;
+					term->chconfig = BADD_CH_CONFIG_STEREO;
+				}
+				term->name = d->wMixerDescrStr;
+			} else {
+				struct uac_selector_unit_descriptor *d = p1;
+				/* call recursively to retrieve channel info */
+				err = check_input_term(state,
+							d->baSourceID[0], term);
+				if (err < 0)
+					return err;
+				/* virtual type */
+				term->type = d->bDescriptorSubtype << 16;
+				term->id = id;
+				term->name = uac_selector_unit_iSelector(d);
+			}
 			return 0;
 		}
 		case UAC1_PROCESSING_UNIT:
 		case UAC1_EXTENSION_UNIT:
 		/* UAC2_PROCESSING_UNIT_V2 */
 		/* UAC2_EFFECT_UNIT */
+		/* UAC3_FEATURE_UNIT_V3 */
 		case UAC2_EXTENSION_UNIT_V2: {
-			struct uac_processing_unit_descriptor *d = p1;
+			if (state->mixer->protocol == UAC_VERSION_3) {
+				struct uac_feature_unit_descriptor *d = p1;
 
-			if (state->mixer->protocol == UAC_VERSION_2 &&
-				hdr[2] == UAC2_EFFECT_UNIT) {
-				/* UAC2/UAC1 unit IDs overlap here in an
-				 * uncompatible way. Ignore this unit for now.
-				 */
+				id = d->bSourceID;
+			} else {
+				struct uac_processing_unit_descriptor *d = p1;
+
+				if (state->mixer->protocol == UAC_VERSION_2 &&
+					hdr[2] == UAC2_EFFECT_UNIT) {
+					/* UAC2/UAC1 unit IDs overlap here in an
+					 * uncompatible way. Ignore this unit
+					 * for now.
+					 */
+					return 0;
+				}
+
+				if (d->bNrInPins) {
+					id = d->baSourceID[0];
+					break; /* continue to parse */
+				}
+				/* virtual type */
+				term->type = d->bDescriptorSubtype << 16;
+				term->channels =
+					uac_processing_unit_bNrChannels(d);
+				term->chconfig =
+					uac_processing_unit_wChannelConfig(
+						d, state->mixer->protocol);
+				term->name = uac_processing_unit_iProcessing(
+						d, state->mixer->protocol);
 				return 0;
 			}
-
-			if (d->bNrInPins) {
-				id = d->baSourceID[0];
-				break; /* continue to parse */
-			}
-			term->type = d->bDescriptorSubtype << 16; /* virtual type */
-			term->channels = uac_processing_unit_bNrChannels(d);
-			term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
-			term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
-			return 0;
+			break;
 		}
 		case UAC2_CLOCK_SOURCE: {
 			struct uac_clock_source_descriptor *d = p1;
@@ -1233,12 +1303,18 @@
 	struct usb_feature_control_info *ctl_info;
 	unsigned int len = 0;
 	int mapped_name = 0;
-	int nameid = uac_feature_unit_iFeature(desc);
+	int nameid;
 	struct snd_kcontrol *kctl;
 	struct usb_mixer_elem_info *cval;
 	const struct usbmix_name_map *map;
 	unsigned int range;
 
+	if (state->mixer->protocol == UAC_VERSION_3)
+		nameid = ((struct uac3_feature_unit_descriptor *)
+				raw_desc)->wFeatureDescrStr;
+	else
+		nameid = uac_feature_unit_iFeature(desc);
+
 	control++; /* change from zero-based to 1-based value */
 
 	if (control == UAC_FU_GRAPHIC_EQUALIZER) {
@@ -1259,7 +1335,7 @@
 	ctl_info = &audio_feature_info[control-1];
 	if (state->mixer->protocol == UAC_VERSION_1)
 		cval->val_type = ctl_info->type;
-	else /* UAC_VERSION_2 */
+	else /* UAC_VERSION_2 or UAC_VERSION_3*/
 		cval->val_type = ctl_info->type_uac2 >= 0 ?
 			ctl_info->type_uac2 : ctl_info->type;
 
@@ -1447,6 +1523,62 @@
 	return snd_usb_mixer_add_control(&cval->head, kctl);
 }
 
+static int find_num_channels(struct mixer_build *state, int dir)
+{
+	int num_ch = -EINVAL, num, i, j, wMaxPacketSize;
+	int ctrlif = get_iface_desc(state->mixer->hostif)->bInterfaceNumber;
+	struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev, ctrlif);
+	struct usb_interface_assoc_descriptor *assoc = usb_iface->intf_assoc;
+	struct usb_host_interface *alts;
+
+	for (i = 0; i < assoc->bInterfaceCount; i++) {
+		int intf = assoc->bFirstInterface + i;
+
+		if (intf != ctrlif) {
+			struct usb_interface *iface =
+				usb_ifnum_to_if(state->mixer->chip->dev, intf);
+
+			alts = &iface->altsetting[1];
+			if (dir == USB_DIR_OUT &&
+				get_endpoint(alts, 0)->bEndpointAddress &
+				USB_DIR_IN)
+				continue;
+			if (dir == USB_DIR_IN &&
+				!(get_endpoint(alts, 0)->bEndpointAddress &
+				USB_DIR_IN))
+				continue;
+			num = iface->num_altsetting;
+			for (j = 1; j < num; j++) {
+				num_ch = NUM_CHANNELS_MONO;
+				alts = &iface->altsetting[j];
+				wMaxPacketSize = le16_to_cpu(
+							get_endpoint(alts, 0)->
+							wMaxPacketSize);
+				switch (wMaxPacketSize) {
+				case BADD_MAXPSIZE_SYNC_MONO_16:
+				case BADD_MAXPSIZE_SYNC_MONO_24:
+				case BADD_MAXPSIZE_ASYNC_MONO_16:
+				case BADD_MAXPSIZE_ASYNC_MONO_24:
+					break;
+				case BADD_MAXPSIZE_SYNC_STEREO_16:
+				case BADD_MAXPSIZE_SYNC_STEREO_24:
+				case BADD_MAXPSIZE_ASYNC_STEREO_16:
+				case BADD_MAXPSIZE_ASYNC_STEREO_24:
+					num_ch = NUM_CHANNELS_STEREO;
+					break;
+				}
+				if (num_ch == NUM_CHANNELS_MONO)
+					continue;
+				else
+					break;
+			}
+		}
+	}
+
+	return num_ch;
+}
+
 /*
  * parse a feature unit
  *
@@ -1478,7 +1610,7 @@
 				      unitid);
 			return -EINVAL;
 		}
-	} else {
+	} else if (state->mixer->protocol == UAC_VERSION_2) {
 		struct uac2_feature_unit_descriptor *ftr = _ftr;
 		csize = 4;
 		channels = (hdr->bLength - 6) / 4 - 1;
@@ -1489,11 +1621,118 @@
 				      unitid);
 			return -EINVAL;
 		}
+	} else {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev,
+			get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+							usb_iface->intf_assoc;
+
+		csize = 4;
+		switch (unitid) {
+		case BADD_FU_ID_BAIOF:
+			channels = NUM_CHANNELS_MONO;
+			bmaControls = monoControls;
+			badd_baif_in_term_desc.wClusterDescrID =
+						CLUSTER_ID_MONO;
+			break;
+
+		case BADD_FU_ID_BAOF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADPHONE:
+			case PROF_HEADSET_ADAPTER:
+				channels = NUM_CHANNELS_STEREO;
+				bmaControls = stereoControls;
+				badd_baiof_mu_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			case PROF_SPEAKERPHONE:
+				channels = NUM_CHANNELS_MONO;
+				bmaControls = monoControls;
+				badd_baof_in_term_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			default:
+				channels = find_num_channels(state,
+								USB_DIR_OUT);
+				if (channels < 0) {
+					usb_audio_err(state->chip,
+						      "unit %u: Cant find num of channels\n",
+						      unitid);
+					return channels;
+				}
+
+				bmaControls = (channels == NUM_CHANNELS_MONO) ?
+						monoControls : stereoControls;
+				badd_baof_in_term_desc.wClusterDescrID =
+					(channels == NUM_CHANNELS_MONO) ?
+					CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+				break;
+			}
+			break;
+
+		case BADD_FU_ID_BAIF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADSET:
+			case PROF_HEADSET_ADAPTER:
+			case PROF_SPEAKERPHONE:
+				channels = NUM_CHANNELS_MONO;
+				bmaControls = monoControls;
+				badd_baif_in_term_desc.wClusterDescrID =
+					CLUSTER_ID_MONO;
+				break;
+			default:
+				channels = find_num_channels(state, USB_DIR_IN);
+				if (channels < 0) {
+					usb_audio_err(state->chip,
+						      "unit %u: Cant find num of channels\n",
+						      unitid);
+					return channels;
+				}
+
+				bmaControls = (channels == NUM_CHANNELS_MONO) ?
+						 monoControls : stereoControls;
+				badd_baif_in_term_desc.wClusterDescrID =
+					(channels == NUM_CHANNELS_MONO) ?
+					CLUSTER_ID_MONO : CLUSTER_ID_STEREO;
+				break;
+			}
+			break;
+
+		default:
+			usb_audio_err(state->chip, "Invalid unit %u\n", unitid);
+			return -EINVAL;
+		}
 	}
 
 	/* parse the source unit */
-	if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
-		return err;
+	if (state->mixer->protocol != UAC_VERSION_3) {
+		err = parse_audio_unit(state, hdr->bSourceID);
+		if (err < 0)
+			return err;
+	} else {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(state->mixer->chip->dev,
+			get_iface_desc(state->mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+			usb_iface->intf_assoc;
+
+		switch (unitid) {
+		case BADD_FU_ID_BAOF:
+			switch (assoc->bFunctionSubClass) {
+			case PROF_HEADSET:
+			case PROF_HEADSET_ADAPTER:
+				hdr->bSourceID = BADD_MU_ID_BAIOF;
+				break;
+			default:
+				hdr->bSourceID = BADD_IN_TERM_ID_BAOF;
+				break;
+			}
+		}
+		err = parse_audio_unit(state, hdr->bSourceID);
+		if (err < 0)
+			return err;
+	}
 
 	/* determine the input source type and name */
 	err = check_input_term(state, hdr->bSourceID, &iterm);
@@ -1547,7 +1786,7 @@
 				build_feature_ctl(state, _ftr, 0, i, &iterm,
 						  unitid, 0);
 		}
-	} else { /* UAC_VERSION_2 */
+	} else { /* UAC_VERSION_2 or UAC_VERSION_3*/
 		for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
 			unsigned int ch_bits = 0;
 			unsigned int ch_read_only = 0;
@@ -1665,12 +1904,20 @@
 	int input_pins, num_ins, num_outs;
 	int pin, ich, err;
 
-	if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
-	    !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
-		usb_audio_err(state->chip,
-			      "invalid MIXER UNIT descriptor %d\n",
-			      unitid);
-		return -EINVAL;
+	if (state->mixer->protocol == UAC_VERSION_3) {
+		input_pins = badd_baiof_mu_desc.bNrInPins;
+		num_outs =
+		   (badd_baiof_mu_desc.wClusterDescrID == CLUSTER_ID_MONO) ?
+		    NUM_CHANNELS_MONO : NUM_CHANNELS_STEREO;
+	} else {
+		input_pins = desc->bNrInPins;
+		num_outs = uac_mixer_unit_bNrChannels(desc);
+		if (desc->bLength < 11 || !input_pins || !num_outs) {
+			usb_audio_err(state->chip,
+				      "invalid MIXER UNIT descriptor %d\n",
+				      unitid);
+			return -EINVAL;
+		}
 	}
 
 	num_ins = 0;
@@ -1690,9 +1937,14 @@
 			int och, ich_has_controls = 0;
 
 			for (och = 0; och < num_outs; och++) {
-				__u8 *c = uac_mixer_unit_bmControls(desc,
-						state->mixer->protocol);
+				__u8 *c = NULL;
 
+				if (state->mixer->protocol == UAC_VERSION_3)
+					c =
+					  &(badd_baiof_mu_desc.bmMixerControls);
+				else
+					c = uac_mixer_unit_bmControls(desc,
+							state->mixer->protocol);
 				if (check_matrix_bitmap(c, ich, och, num_outs)) {
 					ich_has_controls = 1;
 					break;
@@ -2201,16 +2453,28 @@
 	case UAC2_CLOCK_SOURCE:
 		return parse_clock_source_unit(state, unitid, p1);
 	case UAC_SELECTOR_UNIT:
+	/*   UAC3_MIXER_UNIT_V3 has the same value */
 	case UAC2_CLOCK_SELECTOR:
-		return parse_audio_selector_unit(state, unitid, p1);
+	/*   UAC3_CLOCK_SOURCE has the same value */
+		if (state->mixer->protocol == UAC_VERSION_3 &&
+			p1[2] == UAC3_CLOCK_SOURCE)
+			return 0; /* NOP */
+		else if (state->mixer->protocol == UAC_VERSION_3
+			&& p1[2] == UAC3_MIXER_UNIT_V3)
+			return parse_audio_mixer_unit(state, unitid, p1);
+		else
+			return parse_audio_selector_unit(state, unitid, p1);
 	case UAC_FEATURE_UNIT:
 		return parse_audio_feature_unit(state, unitid, p1);
 	case UAC1_PROCESSING_UNIT:
 	/*   UAC2_EFFECT_UNIT has the same value */
+	/*   UAC3_FEATURE_UNIT_V3 has the same value */
 		if (state->mixer->protocol == UAC_VERSION_1)
 			return parse_audio_processing_unit(state, unitid, p1);
-		else
+		else if (state->mixer->protocol == UAC_VERSION_2)
 			return 0; /* FIXME - effect units not implemented yet */
+		else
+			return parse_audio_feature_unit(state, unitid, p1);
 	case UAC1_EXTENSION_UNIT:
 	/*   UAC2_PROCESSING_UNIT_V2 has the same value */
 		if (state->mixer->protocol == UAC_VERSION_1)
@@ -2245,6 +2509,23 @@
 	return 0;
 }
 
+static int make_out_term(struct mixer_build state, int wTerminalType)
+{
+	struct uac3_output_terminal_descriptor *desc = NULL;
+
+	if (wTerminalType == UAC_TERMINAL_STREAMING)
+		desc = &badd_baif_out_term_desc;
+	else {
+		desc = &badd_baof_out_term_desc;
+		desc->wTerminalType = wTerminalType;
+	}
+	set_bit(desc->bTerminalID, state.unitbitmap);
+	state.oterm.id = desc->bTerminalID;
+	state.oterm.type = desc->wTerminalType;
+	state.oterm.name = desc->wTerminalDescrStr;
+	return parse_audio_unit(&state, desc->bSourceID);
+}
+
 /*
  * create mixer controls
  *
@@ -2253,9 +2534,8 @@
 static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
 {
 	struct mixer_build state;
-	int err;
+	int err = -EINVAL;
 	const struct usbmix_ctl_map *map;
-	void *p;
 
 	memset(&state, 0, sizeof(state));
 	state.chip = mixer->chip;
@@ -2273,44 +2553,108 @@
 		}
 	}
 
-	p = NULL;
-	while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
-					    mixer->hostif->extralen,
-					    p, UAC_OUTPUT_TERMINAL)) != NULL) {
-		if (mixer->protocol == UAC_VERSION_1) {
-			struct uac1_output_terminal_descriptor *desc = p;
+	if (mixer->protocol == UAC_VERSION_3) {
+		struct usb_interface *usb_iface	=
+			usb_ifnum_to_if(mixer->chip->dev,
+			get_iface_desc(mixer->hostif)->bInterfaceNumber);
+		struct usb_interface_assoc_descriptor *assoc =
+			usb_iface->intf_assoc;
 
-			if (desc->bLength < sizeof(*desc))
-				continue; /* invalid descriptor? */
-			/* mark terminal ID as visited */
-			set_bit(desc->bTerminalID, state.unitbitmap);
-			state.oterm.id = desc->bTerminalID;
-			state.oterm.type = le16_to_cpu(desc->wTerminalType);
-			state.oterm.name = desc->iTerminal;
-			err = parse_audio_unit(&state, desc->bSourceID);
+		switch (assoc->bFunctionSubClass) {
+		case PROF_GENERIC_IO: {
+			if (assoc->bInterfaceCount == 0x02) {
+				if (get_endpoint(mixer->hostif,
+					0)->bEndpointAddress | USB_DIR_IN)
+					err = make_out_term(state,
+							UAC_TERMINAL_STREAMING);
+				else
+					err = make_out_term(state,
+						UAC_OUTPUT_TERMINAL_UNDEFINED);
+			} else {
+				err = make_out_term(state,
+						UAC_OUTPUT_TERMINAL_UNDEFINED);
+				if (err < 0 && err != -EINVAL)
+					return err;
+				err = make_out_term(state,
+						UAC_TERMINAL_STREAMING);
+			}
+			break;
+		}
+
+		case PROF_HEADPHONE:
+			err = make_out_term(state,
+					UAC_OUTPUT_TERMINAL_HEADPHONES);
+			break;
+		case PROF_SPEAKER:
+			err = make_out_term(state, UAC_OUTPUT_TERMINAL_SPEAKER);
+			break;
+		case PROF_MICROPHONE:
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		case PROF_HEADSET:
+		case PROF_HEADSET_ADAPTER:
+			err = make_out_term(state, UAC_BIDIR_TERMINAL_HEADSET);
 			if (err < 0 && err != -EINVAL)
 				return err;
-		} else { /* UAC_VERSION_2 */
-			struct uac2_output_terminal_descriptor *desc = p;
-
-			if (desc->bLength < sizeof(*desc))
-				continue; /* invalid descriptor? */
-			/* mark terminal ID as visited */
-			set_bit(desc->bTerminalID, state.unitbitmap);
-			state.oterm.id = desc->bTerminalID;
-			state.oterm.type = le16_to_cpu(desc->wTerminalType);
-			state.oterm.name = desc->iTerminal;
-			err = parse_audio_unit(&state, desc->bSourceID);
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		case PROF_SPEAKERPHONE:
+			err = make_out_term(state,
+					UAC_BIDIR_TERMINAL_SPEAKERPHONE);
 			if (err < 0 && err != -EINVAL)
 				return err;
+			err = make_out_term(state, UAC_TERMINAL_STREAMING);
+			break;
+		}
+		if (err < 0 && err != -EINVAL)
+			return err;
+	} else {
+		void *p;
 
-			/*
-			 * For UAC2, use the same approach to also add the
-			 * clock selectors
-			 */
-			err = parse_audio_unit(&state, desc->bCSourceID);
-			if (err < 0 && err != -EINVAL)
-				return err;
+		p = NULL;
+		while ((p = snd_usb_find_csint_desc(mixer->hostif->extra,
+						mixer->hostif->extralen, p,
+						UAC_OUTPUT_TERMINAL)) != NULL) {
+			if (mixer->protocol == UAC_VERSION_1) {
+				struct uac1_output_terminal_descriptor *desc =
+									      p;
+
+				if (desc->bLength < sizeof(*desc))
+					continue; /* invalid descriptor? */
+				/* mark terminal ID as visited */
+				set_bit(desc->bTerminalID, state.unitbitmap);
+				state.oterm.id = desc->bTerminalID;
+				state.oterm.type =
+					le16_to_cpu(desc->wTerminalType);
+				state.oterm.name = desc->iTerminal;
+				err = parse_audio_unit(&state, desc->bSourceID);
+				if (err < 0 && err != -EINVAL)
+					return err;
+			} else { /* UAC_VERSION_2 */
+				struct uac2_output_terminal_descriptor *desc =
+									      p;
+
+				if (desc->bLength < sizeof(*desc))
+					continue; /* invalid descriptor? */
+				/* mark terminal ID as visited */
+				set_bit(desc->bTerminalID, state.unitbitmap);
+				state.oterm.id = desc->bTerminalID;
+				state.oterm.type =
+					le16_to_cpu(desc->wTerminalType);
+				state.oterm.name = desc->iTerminal;
+				err = parse_audio_unit(&state, desc->bSourceID);
+				if (err < 0 && err != -EINVAL)
+					return err;
+
+				/*
+				 * For UAC2, use the same approach to also add
+				 * the clock selectors
+				 */
+				err = parse_audio_unit(&state,
+							desc->bCSourceID);
+				if (err < 0 && err != -EINVAL)
+					return err;
+			}
 		}
 	}
 
@@ -2552,6 +2896,9 @@
 	case UAC_VERSION_2:
 		mixer->protocol = UAC_VERSION_2;
 		break;
+	case UAC_VERSION_3:
+		mixer->protocol = UAC_VERSION_3;
+		break;
 	}
 
 	if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 7437cd5..5bc84b4 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -20,6 +20,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
+#include <linux/usb/audio-v3.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -282,8 +283,6 @@
 		0 /* terminator */
 	};
 	struct snd_pcm_chmap_elem *chmap;
-	const unsigned int *maps;
-	int c;
 
 	if (channels > ARRAY_SIZE(chmap->map))
 		return NULL;
@@ -292,26 +291,41 @@
 	if (!chmap)
 		return NULL;
 
-	maps = protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
 	chmap->channels = channels;
-	c = 0;
 
-	if (bits) {
-		for (; bits && *maps; maps++, bits >>= 1)
-			if (bits & 1)
-				chmap->map[c++] = *maps;
+	if (protocol == UAC_VERSION_3) {
+		switch (channels) {
+		case 1:
+			chmap->map[0] = SNDRV_CHMAP_MONO;
+			break;
+		case 2:
+			chmap->map[0] = SNDRV_CHMAP_FL;
+			chmap->map[1] = SNDRV_CHMAP_FR;
+			break;
+		}
 	} else {
-		/* If we're missing wChannelConfig, then guess something
-		    to make sure the channel map is not skipped entirely */
-		if (channels == 1)
-			chmap->map[c++] = SNDRV_CHMAP_MONO;
-		else
-			for (; c < channels && *maps; maps++)
-				chmap->map[c++] = *maps;
-	}
+		int c = 0;
+		const unsigned int *maps =
+			protocol == UAC_VERSION_2 ? uac2_maps : uac1_maps;
 
-	for (; c < channels; c++)
-		chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+		if (bits) {
+			for (; bits && *maps; maps++, bits >>= 1)
+				if (bits & 1)
+					chmap->map[c++] = *maps;
+		} else {
+			/*
+			 * If we're missing wChannelConfig, then guess something
+			 * to make sure the channel map is not skipped entirely
+			 */
+			if (channels == 1)
+				chmap->map[c++] = SNDRV_CHMAP_MONO;
+			else
+				for (; c < channels && *maps; maps++)
+					chmap->map[c++] = *maps;
+		}
+		for (; c < channels; c++)
+			chmap->map[c] = SNDRV_CHMAP_UNKNOWN;
+	}
 
 	return chmap;
 }
@@ -409,6 +423,9 @@
 	struct usb_interface_descriptor *altsd = get_iface_desc(alts);
 	int attributes = 0;
 
+	if (protocol == UAC_VERSION_3)
+		return 0;
+
 	csep = snd_usb_find_desc(alts->endpoint[0].extra, alts->endpoint[0].extralen, NULL, USB_DT_CS_ENDPOINT);
 
 	/* Creamware Noah has this descriptor after the 2nd endpoint */
@@ -492,7 +509,7 @@
 	unsigned int format = 0, num_channels = 0;
 	struct audioformat *fp = NULL;
 	int num, protocol, clock = 0;
-	struct uac_format_type_i_continuous_descriptor *fmt;
+	struct uac_format_type_i_continuous_descriptor *fmt = NULL;
 	unsigned int chconfig;
 
 	dev = chip->dev;
@@ -629,38 +646,78 @@
 				iface_no, altno, as->bTerminalLink);
 			continue;
 		}
+
+		case UAC_VERSION_3: {
+			int wMaxPacketSize;
+
+			format = UAC_FORMAT_TYPE_I_PCM;
+			clock = BADD_CLOCK_SOURCE;
+			wMaxPacketSize = le16_to_cpu(get_endpoint(alts, 0)
+							->wMaxPacketSize);
+			switch (wMaxPacketSize) {
+			case BADD_MAXPSIZE_SYNC_MONO_16:
+			case BADD_MAXPSIZE_SYNC_MONO_24:
+			case BADD_MAXPSIZE_ASYNC_MONO_16:
+			case BADD_MAXPSIZE_ASYNC_MONO_24: {
+				num_channels = NUM_CHANNELS_MONO;
+				chconfig = BADD_CH_CONFIG_MONO;
+				break;
+			}
+
+			case BADD_MAXPSIZE_SYNC_STEREO_16:
+			case BADD_MAXPSIZE_SYNC_STEREO_24:
+			case BADD_MAXPSIZE_ASYNC_STEREO_16:
+			case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+				num_channels = NUM_CHANNELS_STEREO;
+				chconfig = BADD_CH_CONFIG_STEREO;
+				break;
+			}
+			default:
+				dev_err(&dev->dev,
+					"%u:%d: invalid wMaxPacketSize\n",
+					iface_no, altno);
+				continue;
+			}
+		}
 		}
 
-		/* get format type */
-		fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE);
-		if (!fmt) {
-			dev_err(&dev->dev,
-				"%u:%d : no UAC_FORMAT_TYPE desc\n",
-				iface_no, altno);
-			continue;
-		}
-		if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) ||
-		    ((protocol == UAC_VERSION_2) && (fmt->bLength < 6))) {
-			dev_err(&dev->dev,
-				"%u:%d : invalid UAC_FORMAT_TYPE desc\n",
-				iface_no, altno);
-			continue;
-		}
+		if ((protocol == UAC_VERSION_1) ||
+			(protocol == UAC_VERSION_2)) {
+			/* get format type */
+			fmt = snd_usb_find_csint_desc(alts->extra,
+					alts->extralen, NULL, UAC_FORMAT_TYPE);
+			if (!fmt) {
+				dev_err(&dev->dev,
+					"%u:%d : no UAC_FORMAT_TYPE desc\n",
+					iface_no, altno);
+				continue;
+			}
+			if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8))
+			|| ((protocol == UAC_VERSION_2) &&
+				(fmt->bLength < 6))) {
+				dev_err(&dev->dev,
+					"%u:%d :invalid UAC_FORMAT_TYPE desc\n",
+					iface_no, altno);
+				continue;
+			}
 
-		/*
-		 * Blue Microphones workaround: The last altsetting is identical
-		 * with the previous one, except for a larger packet size, but
-		 * is actually a mislabeled two-channel setting; ignore it.
-		 */
-		if (fmt->bNrChannels == 1 &&
-		    fmt->bSubframeSize == 2 &&
-		    altno == 2 && num == 3 &&
-		    fp && fp->altsetting == 1 && fp->channels == 1 &&
-		    fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
-		    protocol == UAC_VERSION_1 &&
-		    le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize) ==
+			/*
+			 * Blue Microphones workaround: The last altsetting is
+			 * identical with the previous one, except for a larger
+			 * packet size, but is actually a mislabeled two-channel
+			 * setting; ignore it.
+			 */
+			if (fmt->bNrChannels == 1 &&
+			    fmt->bSubframeSize == 2 &&
+			    altno == 2 && num == 3 &&
+			    fp && fp->altsetting == 1 && fp->channels == 1 &&
+			    fp->formats == SNDRV_PCM_FMTBIT_S16_LE &&
+			    protocol == UAC_VERSION_1 &&
+			    le16_to_cpu(
+				get_endpoint(alts, 0)->wMaxPacketSize) ==
 							fp->maxpacksize * 2)
-			continue;
+				continue;
+		}
 
 		fp = kzalloc(sizeof(*fp), GFP_KERNEL);
 		if (! fp) {
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5a1974e..801508c 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -27,6 +27,7 @@
 #include <soc/qcom/msm_qmi_interface.h>
 #include <linux/iommu.h>
 #include <linux/platform_device.h>
+#include <linux/usb/audio-v3.h>
 
 #include "usbaudio.h"
 #include "card.h"
@@ -427,12 +428,14 @@
 	protocol = altsd->bInterfaceProtocol;
 
 	/* get format type */
-	fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
-			UAC_FORMAT_TYPE);
-	if (!fmt) {
-		pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
-			subs->interface, subs->altset_idx);
-		goto err;
+	if (protocol != UAC_VERSION_3) {
+		fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+				UAC_FORMAT_TYPE);
+		if (!fmt) {
+			pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n",
+				__func__, subs->interface, subs->altset_idx);
+			goto err;
+		}
 	}
 
 	if (!uadev[card_num].ctrl_intf) {
@@ -440,12 +443,15 @@
 		goto err;
 	}
 
-	hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
-					uadev[card_num].ctrl_intf->extralen,
-					NULL, UAC_HEADER);
-	if (!hdr_ptr) {
-		pr_err("%s: no UAC_HEADER desc\n", __func__);
-		goto err;
+	if (protocol != UAC_VERSION_3) {
+		hdr_ptr = snd_usb_find_csint_desc(
+				uadev[card_num].ctrl_intf->extra,
+				uadev[card_num].ctrl_intf->extralen,
+				NULL, UAC_HEADER);
+		if (!hdr_ptr) {
+			pr_err("%s: no UAC_HEADER desc\n", __func__);
+			goto err;
+		}
 	}
 
 	if (protocol == UAC_VERSION_1) {
@@ -473,6 +479,31 @@
 		resp->usb_audio_spec_revision =
 			((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
 		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_3) {
+		switch (le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize)) {
+		case BADD_MAXPSIZE_SYNC_MONO_16:
+		case BADD_MAXPSIZE_SYNC_STEREO_16:
+		case BADD_MAXPSIZE_ASYNC_MONO_16:
+		case BADD_MAXPSIZE_ASYNC_STEREO_16: {
+			resp->usb_audio_subslot_size = SUBSLOTSIZE_16_BIT;
+			break;
+		}
+
+		case BADD_MAXPSIZE_SYNC_MONO_24:
+		case BADD_MAXPSIZE_SYNC_STEREO_24:
+		case BADD_MAXPSIZE_ASYNC_MONO_24:
+		case BADD_MAXPSIZE_ASYNC_STEREO_24: {
+			resp->usb_audio_subslot_size = SUBSLOTSIZE_24_BIT;
+			break;
+		}
+
+		default:
+			pr_err("%d: %u: Invalid wMaxPacketSize\n",
+				subs->interface, subs->altset_idx);
+			ret = -EINVAL;
+			goto err;
+		}
+		resp->usb_audio_subslot_size_valid = 1;
 	} else {
 		pr_err("%s: unknown protocol version %x\n", __func__, protocol);
 		goto err;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 9e5fc16..42dfbeb 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -375,6 +375,105 @@
 	 */
 	BPF_FUNC_probe_write_user,
 
+	/**
+	 * int bpf_skb_change_tail(skb, len, flags)
+	 *     The helper will resize the skb to the given new size, to be used f.e.
+	 *     with control messages.
+	 *     @skb: pointer to skb
+	 *     @len: new skb length
+	 *     @flags: reserved
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_tail,
+
+	/**
+	 * int bpf_skb_pull_data(skb, len)
+	 *     The helper will pull in non-linear data in case the skb is non-linear
+	 *     and not all of len are part of the linear section. Only needed for
+	 *     read/write with direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: len to make read/writeable
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_pull_data,
+
+	/**
+	 * s64 bpf_csum_update(skb, csum)
+	 *     Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+	 *     @skb: pointer to skb
+	 *     @csum: csum to add
+	 *     Return: csum on success or negative error
+	 */
+	BPF_FUNC_csum_update,
+
+	/**
+	 * void bpf_set_hash_invalid(skb)
+	 *     Invalidate current skb->hash.
+	 *     @skb: pointer to skb
+	 */
+	BPF_FUNC_set_hash_invalid,
+
+	/**
+	 * int bpf_get_numa_node_id()
+	 *     Return: Id of current NUMA node.
+	 */
+	BPF_FUNC_get_numa_node_id,
+
+	/**
+	 * int bpf_skb_change_head()
+	 *     Grows headroom of skb and adjusts MAC header offset accordingly.
+	 *     Will extends/reallocae as required automatically.
+	 *     May change skb data pointer and will thus invalidate any check
+	 *     performed for direct packet access.
+	 *     @skb: pointer to skb
+	 *     @len: length of header to be pushed in front
+	 *     @flags: Flags (unused for now)
+	 *     Return: 0 on success or negative error
+	 */
+	BPF_FUNC_skb_change_head,
+
+	/**
+	 * int bpf_xdp_adjust_head(xdp_md, delta)
+	 *     Adjust the xdp_md.data by delta
+	 *     @xdp_md: pointer to xdp_md
+	 *     @delta: An positive/negative integer to be added to xdp_md.data
+	 *     Return: 0 on success or negative on error
+	 */
+	BPF_FUNC_xdp_adjust_head,
+
+	/**
+	 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+	 *     Copy a NUL terminated string from unsafe address. In case the string
+	 *     length is smaller than size, the target is not padded with further NUL
+	 *     bytes. In case the string length is larger than size, just count-1
+	 *     bytes are copied and the last byte is set to NUL.
+	 *     @dst: destination address
+	 *     @size: maximum number of bytes to copy, including the trailing NUL
+	 *     @unsafe_ptr: unsafe address
+	 *     Return:
+	 *       > 0 length of the string including the trailing NUL on success
+	 *       < 0 error
+	 */
+	BPF_FUNC_probe_read_str,
+
+	/**
+	 * u64 bpf_get_socket_cookie(skb)
+	 * Get the cookie for the socket stored inside sk_buff.
+	 * @skb: pointer to skb
+	 * Return: 8 Bytes non-decreasing number on success or 0 if
+	 * the socket
+	 * field is missing inside sk_buff
+	 */
+	BPF_FUNC_get_socket_cookie,
+
+	/**
+	 * u32 bpf_get_socket_uid(skb)
+	 *     Get the owner uid of the socket stored inside sk_buff.
+	 *     @skb: pointer to skb
+	 *     Return: uid of the socket owner on success or overflowuid if failed.
+	 */
+	BPF_FUNC_get_socket_uid,
+
 	__BPF_FUNC_MAX_ID,
 };