Merge "ksm: Provide support to use deferred timers for scanner thread" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 0450145..baae281 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -263,6 +263,7 @@
 compatible = "qcom,sdm845-cdp"
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm845-qrd"
 compatible = "qcom,sdm830-sim"
 compatible = "qcom,sdm830-rumi"
 compatible = "qcom,sdm830-cdp"
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
new file mode 100644
index 0000000..dc93b35
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies Camera Clock & Reset Controller Binding
+----------------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,cam_cc-sdm845"
+- reg : shall contain base register location and length
+- reg-names: names of registers listed in the same order as in
+	     the reg property.
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- vdd_<rail>-supply: The logic rail supply.
+
+Example:
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,cam_cc-sdm845";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 5eee0c9..c38b45c 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -117,6 +117,11 @@
 				type.
 - qcom,sde-highest-bank-bit:	A u32 property to indicate GPU/Camera/Video highest memory
 				bank bit used for tile format buffers.
+- qcom,sde-ubwc-version:	Property to specify the UBWC feature version.
+- qcom,sde-ubwc-static:	Property to specify the default UBWC static
+				configuration value.
+- qcom,sde-ubwc-swizzle:	Property to specify the default UBWC swizzle
+				configuration value.
 - qcom,sde-panic-per-pipe:	Boolean property to indicate if panic signal
 				control feature is available on each source pipe.
 - qcom,sde-has-src-split:	Boolean property to indicate if source split
@@ -388,6 +393,9 @@
     qcom,sde-sspp-linewidth = <2560>;
     qcom,sde-mixer-blendstages = <0x7>;
     qcom,sde-highest-bank-bit = <0x2>;
+    qcom,sde-ubwc-version = <0x100>;
+    qcom,sde-ubwc-static = <0x100>;
+    qcom,sde-ubwc-swizzle = <0>;
     qcom,sde-panic-per-pipe;
     qcom,sde-has-cdp;
     qcom,sde-has-src-split;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 62efecc..3e7fcb7 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -341,6 +341,28 @@
 					2A/2B command.
 - qcom,dcs-cmd-by-left:			Boolean to indicate that dcs command are sent
 					through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-panel-hdr-enabled:      Boolean to indicate HDR support in panel.
+- qcom,mdss-dsi-panel-hdr-color-primaries:
+                                        Array of 8 unsigned integers denoting chromaticity of panel.These
+                                        values are specified in nits units. The value range is 0 through 50000.
+                                        To obtain real chromacity, these values should be divided by factor of
+                                        50000. The structure of array is defined in below order
+                                        value 1: x value of white chromaticity of display panel
+                                        value 2: y value of white chromaticity of display panel
+                                        value 3: x value of red chromaticity of display panel
+                                        value 4: y value of red chromaticity of display panel
+                                        value 5: x value of green chromaticity of display panel
+                                        value 6: y value of green chromaticity of display panel
+                                        value 7: x value of blue chromaticity of display panel
+                                        value 8: y value of blue chromaticity of display panel
+- qcom,mdss-dsi-panel-peak-brightness:  Maximum brightness supported by panel.In absence of maximum value
+                                        typical value becomes peak brightness. Value is specified in nits units.
+                                        To obtain real peak brightness, this value should be divided by factor of
+                                        10000.
+- qcom,mdss-dsi-panel-blackness-level:  Blackness level supported by panel. Blackness level is defined as
+                                        ratio of peak brightness to contrast. Value is specified in nits units.
+                                        To obtain real blackness level, this value should be divided by factor of
+                                        10000.
 - qcom,mdss-dsi-lp11-init:		Boolean used to enable the DSI clocks and data lanes (low power 11)
 					before issuing hardware reset line.
 - qcom,mdss-dsi-init-delay-us:		Delay in microseconds(us) before performing any DSI activity in lp11
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index f6b7552..a244d6c 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -6,6 +6,10 @@
    * "qcom,i2c-geni.
  - reg: Should contain QUP register address and length.
  - interrupts: Should contain I2C interrupt.
+ - clocks: Serial engine core clock, and AHB clocks needed by the device.
+ - pinctrl-names/pinctrl-0/1: The GPIOs assigned to this core. The names
+   should be "active" and "sleep" for the pin confuguration when core is active
+   or when entering sleep state.
  - #address-cells: Should be <1> Address cells for i2c device address
  - #size-cells: Should be <0> as i2c addresses have no size component
 
@@ -17,6 +21,13 @@
 	compatible = "qcom,i2c-geni";
 	reg = <0xa94000 0x4000>;
 	interrupts = <GIC_SPI 358 0>;
+	clock-names = "se-clk", "m-ahb", "s-ahb";
+	clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+		<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+		<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&qup_1_i2c_5_active>;
+	pinctrl-1 = <&qup_1_i2c_5_sleep>;
 	#address-cells = <1>;
 	#size-cells = <0>;
 };
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index bd35d80..b6bc475 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -15,6 +15,8 @@
 - clocks:		List of Phandles for clock device nodes
 			needed by the device.
 - clock-names:		List of clock names needed by the device.
+- #list-cells:		Number of rotator cells, must be 1
+
 Bus Scaling Data:
 - qcom,msm-bus,name:		String property describing rotator client.
 - qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases
@@ -81,6 +83,12 @@
 				  priority for rotator clients.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
+- qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
+- cache-slice-names:		A set of names that identify the usecase names of a client that uses
+				cache slice. These strings are used to look up the cache slice
+				entries by name.
+- cache-slices:			The tuple has phandle to llcc device as the first argument and the
+				second argument is the usecase id of the client.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -102,6 +110,9 @@
 		reg = <0xfd900000 0x22100>,
 			<0xfd925000 0x1000>;
 		reg-names = "mdp_phys", "rot_vbif_phys";
+
+		#list-cells = <1>;
+
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
@@ -131,6 +142,10 @@
 		qcom,mdss-default-ot-rd-limit = <8>;
 		qcom,mdss-default-ot-wr-limit = <16>;
 
+		qcom,mdss-sbuf-headroom = <20>;
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 3>;
+
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
 			iommus = <&mdp_smmu 0xe00>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index b894c31..6d72e8b 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -16,25 +16,6 @@
 - interrupts : should contain the vidc interrupt.
 - qcom,platform-version : mask and shift of the platform version bits
     in efuse register.
-- qcom,load-freq-tbl : load (in macroblocks/sec) and corresponding vcodec
-  clock required along with codec's config, which is a bitmap that describes
-  what the clock is used for. The bitmaps are as follows:
-    supports mvc encoder = 0x00000001
-    supports mvc decoder = 0x00000003
-    supports h264 encoder = 0x00000004
-    supports h264 decoder = 0x0000000c
-    supports mpeg1 encoder = 0x00000040
-    supports mpeg1 decoder = 0x000000c0
-    supports mpeg2 encoder = 0x00000100
-    supports mpeg2 decoder = 0x00000300
-    supports vp6 encoder = 0x00100000
-    supports vp6 decoder = 0x00300000
-    supports vp7 encoder = 0x00400000
-    supports vp7 decoder = 0x00c00000
-    supports vp8 encoder = 0x01000000
-    supports vp8 decoder = 0x03000000
-    supports hevc encoder = 0x04000000
-    supports hevc decoder = 0x0c000000
 - qcom,reg-presets : list of offset-value pairs for registers to be written.
   The offsets are from the base offset specified in 'reg'. This is mainly
   used for QoS, VBIF, etc. presets for video.
@@ -57,9 +38,26 @@
 - qcom,clock-freq-tbl = node containing individual domain nodes, each with:
      - qcom,codec-mask: a bitmap of supported codec types, every two bits
        represents a codec type.
+         supports mvc encoder = 0x00000001
+         supports mvc decoder = 0x00000003
+         supports h264 encoder = 0x00000004
+         supports h264 decoder = 0x0000000c
+         supports mpeg1 encoder = 0x00000040
+         supports mpeg1 decoder = 0x000000c0
+         supports mpeg2 encoder = 0x00000100
+         supports mpeg2 decoder = 0x00000300
+         supports vp6 encoder = 0x00100000
+         supports vp6 decoder = 0x00300000
+         supports vp7 encoder = 0x00400000
+         supports vp7 decoder = 0x00c00000
+         supports vp8 encoder = 0x01000000
+         supports vp8 decoder = 0x03000000
+         supports hevc encoder = 0x04000000
+         supports hevc decoder = 0x0c000000
      - qcom,cycles-per-mb: number of cycles required to process each macro
        block.
-     - qcom,low-power-mode-factor: the factor which needs to be multiple with
+     - qcom,low-power-cycles-per-mb: number of cycles required to process each
+       macro block in low power mode.
        the required frequency to get the final frequency, the factor is
        represented in Q16 format.
 - qcom,sw-power-collapse = A bool indicating if video hardware core can be
@@ -167,13 +165,6 @@
 		venus-supply = <&gdsc>;
 		venus-core0-supply = <&gdsc1>;
 		venus-core1-supply = <&gdsc2>;
-		qcom,load-freq-tbl =
-			<489600 266670000 0x030fcfff>, /* Legacy decoder 1080p 60fps  */
-			<108000 133330000 0x030fcfff>, /* Legacy decoder 720p 30fps   */
-			<108000 200000000 0x01000414>, /* Legacy encoder 720p 30fps   */
-			<72000 133330000 0x0c000000>, /* HEVC decoder VGA 60fps   */
-			<36000 133330000 0x0c000000>, /* HEVC VGA 30 fps  */
-			<36000 133330000 0x01000414>; /* Legacy encoder VGA 30 fps   */
 		qcom,hfi-version = "3xx";
 		qcom,reg-presets = <0x80004 0x1>,
 			<0x80178 0x00001FFF>;
@@ -190,6 +181,7 @@
 		qcom,use-non-secure-pil;
 		qcom,use_dynamic_bw_update;
 		qcom,fw-bias = <0xe000000>;
+		qcom,allowed-clock-rates = <200000000 300000000 400000000>;
 		msm_vidc_cb1: msm_vidc_cb1 {
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index f842ed6..ea828da 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -87,6 +87,13 @@
 - qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
 - qcom,override-acc-1: Override the default ACC settings with this value if present.
 
+One child node to represent the MBA image may be specified, when the MBA image
+needs to be loaded in a specifically carved out memory region.
+
+Required properties:
+- compatible: Must be "qcom,pil-mba-mem"
+- memory-region: A phandle that points to a reserved memory where the MBA image will be loaded.
+
 Example:
 	qcom,mss@fc880000 {
 		compatible = "qcom,pil-q6v5-mss";
@@ -126,4 +133,9 @@
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
 		qcom,ssctl-instance-id = <12>;
 		qcom,sysmon-id = <0>;
+
+		qcom,mba-mem@0 {
+			compatible = "qcom,pil-mba-mem";
+			memory-region = <&peripheral_mem>;
+		};
 	};
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 6122f6e..8efa85d 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -185,6 +185,40 @@
 	Definition: The initial temp band considering 0-based index at which
 		    the baseline target quotients are derived and fused.
 
+- qcom,cpr-acd-adj-down-step-limit
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: The maximum number of PMIC steps to go down within a given
+		    corner due to all ACD adjustment recommendations. Valid
+		    values are 0 through 31.
+
+- qcom,cpr-acd-adj-up-step-limit
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: The maximum number of PMIC steps to go up within a given
+		    corner due to all ACD adjustment recommendations. Valid
+		    values are 0 through 7
+
+- qcom,cpr-acd-adj-down-step-size
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: Defines the step size in units of PMIC steps used for
+		    target quotient adjustment due to an ACD down recommendation.
+		    Valid values are 0 through 3.
+
+- qcom,cpr-acd-adj-up-step-size
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: Defines the step size in units of PMIC steps used for
+		    target quotient adjustment due to an ACD up recommendation.
+		    Valid values are 0 through 3.
+
+- qcom,cpr-acd-avg-enable
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the CPRh controller
+		    should enable the ACD AVG feature.
+
 =================================================
 Second Level Nodes - CPR Threads for a Controller
 =================================================
diff --git a/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt b/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt
new file mode 100644
index 0000000..9d3fb78
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt
@@ -0,0 +1,26 @@
+QTI USB Audio QMI Device
+
+USB Audio QMI device is used to attach to remote processor IOMMU and
+map USB Audio driver specific memory to iova to share with remote
+processor.
+
+Required Properties:
+
+- compatible : "qcom,usb-audio-qmi-dev"
+
+- iommus : A list of phandle and IOMMU specifier pairs that describe the
+  IOMMU master interfaces of the device.
+
+- qcom,usb-audio-stream-id : Stream id is prepended to iova before passing
+  iova to remote processor. This allows remote processor to access iova.
+
+- qcom,usb-audio-intr-num : Interrupter number for external sub system
+  destination.
+
+Example:
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&lpass_q6_smmu 12>;
+		qcom,usb-audio-stream-id = <12>;
+		qcom,usb-audio-intr-num = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
new file mode 100644
index 0000000..868a5f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -0,0 +1,53 @@
+GENI based Qualcomm Universal Peripheral (QUP) Serial Peripheral Interface (SPI)
+
+The QUP v3 core is a GENI based AHB slave that provides a common data path
+(an output FIFO and an input FIFO) for serial peripheral interface (SPI)
+mini-core.
+
+SPI in master mode supports up to 50MHz, up to four chip selects, programmable
+data path from 4 bits to 32 bits and numerous protocol variants.
+
+Required properties:
+- compatible:	  Should contain "qcom,spi-geni"
+- reg:		  Should contain base register location and length
+- interrupts:	  Interrupt number used by this controller
+- clocks:	  Should contain the core clock and the AHB clock.
+- clock-names:	  Should be "core" for the core clock and "iface" for the
+		  AHB clock.
+- pinctrl-names:  Property should contain "default" and "sleep" for the
+		  pin configurations during the usecase and during idle.
+- pinctrl-x:	  phandle to the default/sleep pin configurations.
+- #address-cells: Number of cells required to define a chip select
+		  address on the SPI bus. Should be set to 1.
+- #size-cells:	  Should be zero.
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+		     Units - Hz. Definition as per
+		     Documentation/devicetree/bindings/spi/spi-bus.txt
+
+SPI slave nodes must be children of the SPI master node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+	qupv3_spi10: spi@a84000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qup_1_spi_2_active>;
+		pinctrl-1 = <&qup_1_spi_2_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		spi-max-frequency = <19200000>;
+
+		dev@0 {
+			compatible = "dummy,slave";
+			reg = <0>;
+			spi-max-frequency = <9600000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index ad4adf0..d20a7cb 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -43,7 +43,7 @@
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-   property. "cfg_ahb_clk" is an optional clock.
+   property. "cfg_ahb_clk" and "com_aux_clk" are an optional clocks.
  - qcom,vbus-valid-override: If present, indicates VBUS pin is not connected to
    the USB PHY and the controller must rely on external VBUS notification in
    order to manually relay the notification to the SSPHY.
@@ -91,6 +91,10 @@
 	"vdd" : vdd supply for digital circuit operation
 	"vdda18" : 1.8v high-voltage analog supply
 	"vdda33" : 3.3v high-voltage analog supply
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. "ref_clk_src" is a mandatory clock.
  - qcom,vdd-voltage-level: This property must be a list of three integer
    values (no, min, max) where each value represents either a voltage in
    microvolts or a value corresponding to voltage corner
@@ -109,7 +113,7 @@
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-   property. "cfg_ahb_clk", "ref_clk_src" and "ref_clk" are optional clocks.
+   property. "cfg_ahb_clk" and "ref_clk" are optional clocks.
  - qcom,qusb-phy-init-seq: QUSB PHY initialization sequence with value,reg pair.
  - qcom,qusb-phy-host-init-seq: QUSB PHY initialization sequence for host mode
    with value,reg pair.
diff --git a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
index 54d342c..ab2bbe4 100644
--- a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
+++ b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
@@ -43,6 +43,9 @@
 - qcom,vconn-uses-external-source: Indicates whether VCONN supply is sourced
 			from an external regulator. If omitted, then it is
 			assumed it is connected to VBUS.
+- qcom,default-sink-caps: List of 32-bit values representing the nominal sink
+			capabilities in voltage (millivolts) and current
+			(milliamps) pairs.
 
 Example:
 	qcom,qpnp-pdphy@1700 {
@@ -64,4 +67,8 @@
 				  "msg-tx-failed",
 				  "msg-tx-discarded",
 				  "msg-rx-discarded";
+
+		qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+					 <9000 3000>, /* 9V @ 3A */
+					 <12000 2250>; /* 12V @ 2.25A */
 	};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4d6cdcf..9877ebf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -848,6 +848,12 @@
 			seconds. Defaults to 10*60 = 10mins. A value of 0
 			disables the blank timer.
 
+	core_ctl_disable_cpumask= [SMP]
+			Exempt the CPUs from being managed by core_ctl.
+			core_ctl operates on a cluster basis. So all the
+			CPUs in a given cluster must be specified to disable
+			core_ctl for that cluster.
+
 	coredump_filter=
 			[KNL] Change the default value for
 			/proc/<pid>/coredump_filter.
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
index 09b7dc1..f485dc8 100644
--- a/Documentation/scheduler/sched-hmp.txt
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -910,7 +910,7 @@
 CPU. The same applies to nt_curr_runnable_sum and  nt_prev_runnable_sum.
 
 A 'new' task is defined as a task whose number of active windows since fork is
-less than sysctl_sched_new_task_windows. An active window is defined as a window
+less than SCHED_NEW_TASK_WINDOWS. An active window is defined as a window
 where a task was observed to be runnable.
 
 *** 6.2 Per-task window-based stats
diff --git a/Makefile b/Makefile
index 5bb6e42..9ec83a0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 15
+SUBLEVEL = 16
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 30ee6e7..3eea0af 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -8,7 +8,8 @@
 	sdm845-cdp.dtb \
 	sdm845-v2-rumi.dtb \
 	sdm845-v2-mtp.dtb \
-	sdm845-v2-cdp.dtb
+	sdm845-v2-cdp.dtb \
+	sdm845-qrd.dtb
 
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index b589fe5..3497e50 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -32,6 +32,19 @@
 				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+		attach-impl-defs =
+				<0x6000 0x2378>,
+				<0x6060 0x1055>,
+				<0x678c 0x8>,
+				<0x6794 0x28>,
+				<0x6800 0x6>,
+				<0x6900 0x3ff>,
+				<0x6924 0x204>,
+				<0x6928 0x11000>,
+				<0x6930 0x800>,
+				<0x6960 0xffffffff>,
+				<0x6b64 0x1a5551>,
+				<0x6b68 0x9a82a382>;
 	};
 
 	apps_smmu: apps-smmu@0x15000000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 6d6f775..c5b53b8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -238,6 +238,61 @@
 			};
 		};
 
+		pmx_sde: pmx_sde {
+			sde_dsi_active: sde_dsi_active {
+				mux {
+					pins = "gpio6", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio52";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable = <0>;   /* no pull */
+				};
+			};
+			sde_dsi_suspend: sde_dsi_suspend {
+				mux {
+					pins = "gpio6", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio52";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
+		pmx_sde_te {
+			sde_te_active: sde_te_active {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+
+			sde_te_suspend: sde_te_suspend {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
new file mode 100644
index 0000000..228b924
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
new file mode 100644
index 0000000..6ea92ee
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -0,0 +1,11 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 228bbb3..fae01ca 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -26,30 +26,253 @@
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
-
-	apc0_pwrcl_vreg: regulator-pwrcl {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc0_pwrcl_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <23>;
-	};
-
-	apc0_l3_vreg: regulator-l3 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc0_l3_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <19>;
-	};
-
-	apc1_perfcl_vreg: regulator-perfcl {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc1_perfcl_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <26>;
-	};
 };
 
 &soc {
+	/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@17dc0000 {
+		compatible = "qcom,cprh-sdm845-v1-kbss-regulator";
+		reg =	<0x17dc0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17840000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x3A00000>;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17dc3a84 0x17dc3a88 0x17840c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_SILVER_CPRH_STATUS_0",
+			"APSS_SILVER_CPRH_STATUS_1",
+			"SILVER_SAW4_PMIC_STS";
+
+		thread@1 {
+			qcom,cpr-thread-id = <1>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <17>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <17>;
+				qcom,cpr-corners = <17>;
+
+				qcom,cpr-corner-fmax-map = <6 12 17>;
+
+				qcom,cpr-voltage-ceiling =
+					<688000  688000  688000  688000  688000
+					 688000  756000  756000  756000  812000
+					 812000  812000  872000  872000  872000
+					 872000  928000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
+					 632000  672000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_l3_vreg: regulator {
+				regulator-name = "apc0_l3_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <9>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <9>;
+				qcom,cpr-corners = <9>;
+
+				qcom,cpr-corner-fmax-map = <4 7 9>;
+
+				qcom,cpr-voltage-ceiling =
+					<688000  688000  688000  688000  756000
+					 812000  812000  872000  928000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@17db0000 {
+		compatible = "qcom,cprh-sdm845-kbss-regulator";
+		reg =	<0x17db0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17830000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <800000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,mem-acc-threshold-voltage = <852000>;
+		qcom,mem-acc-crossover-voltage = <852000>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x4200000>;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17db3a84 0x17830c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <24>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <22>;
+				qcom,cpr-corners = <22>;
+
+				qcom,cpr-corner-fmax-map =
+					<10 17 22>;
+
+				qcom,cpr-voltage-ceiling =
+					<756000  756000  756000  756000  756000
+					 756000  756000  756000  756000  756000
+					 812000  812000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1056000 1056000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+	};
+
 	/* RPMh regulators: */
 
 	/* PM8998 S1 = VDD_EBI supply */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index 221eb38..0f31c0a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -29,7 +29,7 @@
 	};
 };
 
-&usb3 {
+&usb0 {
 	/delete-property/ qcom,usb-dbm;
 	qcom,charging-disabled;
 	dwc3@a600000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 663ff7e..124ed99 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -109,6 +109,14 @@
 	};
 };
 
+&apc0_cpr {
+	qcom,cpr-ignore-invalid-fuses;
+};
+
+&apc1_cpr {
+	qcom,cpr-ignore-invalid-fuses;
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index ca3c4fa..8203ee1 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -77,6 +77,10 @@
 		qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>;
 		qcom,sde-dsc-size = <0x140>;
 
+		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>;
+		qcom,sde-dither-version = <0x00010000>;
+		qcom,sde-dither-size = <0x20>;
+
 		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
 
 		qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 06879c2..5399e99 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -13,7 +13,8 @@
 
 #include <dt-bindings/clock/qcom,gcc-sdm845.h>
 &soc {
-	usb3: ssusb@a600000 {
+	/* Primary USB port related DWC3 controller */
+	usb0: ssusb@a600000 {
 		compatible = "qcom,dwc-usb3-msm";
 		reg = <0x0a600000 0xf8c00>,
 		      <0x088ee000 0x400>;
@@ -22,8 +23,8 @@
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <0 346 0>, <0 130 0>;
-		interrupt-names = "hs_phy_irq", "pwr_event_irq";
+		interrupts = <0 489 0>, <0 130 0>, <0 486 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
 
 		USB3_GDSC-supply = <&usb30_prim_gdsc>;
 		qcom,usb-dbm = <&dbm_1p5>;
@@ -58,9 +59,11 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			maximum-speed = "high-speed";
 		};
 	};
 
+	/* Primary USB port related QUSB2 PHY */
 	qusb_phy0: qusb@88e2000 {
 		compatible = "qcom,qusb2phy-v2";
 		reg = <0x088e2000 0x400>;
@@ -71,21 +74,23 @@
 		vdda33-supply = <&pm8998_l24>;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-init-seq =
-				/* <value reg_offset> */
-					<0x13 0x04
-					0x7c 0x18c
-					0x80 0x2c
-					0x0a 0x184
-					0x00 0x240>;
+				     /* <value reg_offset> */
+					<0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+					0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+					0x80 0x2c  /* PLL_CMODE */
+					0x0a 0x184 /* PLL_LOCK_DELAY */
+					0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+					0xa5 0x240 /* TUNE1 */
+					0x09 0x244 /* TUNE2 */
+					0x00 0x220 /* IMP_CTRL1 */
+					0x58 0x224>; /* IMP_CTRL2 */
 		phy_type= "utmi";
-		clocks = <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
-			 <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
-		clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk";
+		clock-names = "ref_clk_src", "cfg_ahb_clk";
 
-		resets = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_BCR>;
+		resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>;
 		reset-names = "phy_reset";
-
 	};
 
 	dbm_1p5: dbm@a8f8000 {
@@ -97,4 +102,217 @@
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
+
+	/* Secondary USB port related DWC3 controller */
+	usb1: ssusb@a800000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a800000 0xf8c00>,
+		      <0x088ee000 0x400>;
+		reg-names = "core_base", "ahb2phy_base";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 491 0>, <0 135 0>, <0 487 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
+
+		USB3_GDSC-supply = <&usb30_sec_gdsc>;
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+
+		clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>,
+			 <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
+			 <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+			 <&clock_gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+			 <&clock_gcc GCC_USB30_SEC_SLEEP_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "cfg_ahb_clk", "xo";
+
+		qcom,core-clk-rate = <133333333>;
+		qcom,core-clk-rate-hs = <66666667>;
+
+		resets = <&clock_gcc GCC_USB30_SEC_BCR>;
+		reset-names = "core_reset";
+		status = "disabled";
+
+		dwc3@a600000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a800000 0xcd00>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 138 0>;
+			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
+			tx-fifo-resize;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+		};
+	};
+
+	/* Secondary USB port related QUSB2 PHY */
+	qusb_phy1: qusb@88e3000 {
+		compatible = "qcom,qusb2phy-v2";
+		reg = <0x088e3000 0x400>;
+		reg-names = "qusb_phy_base";
+
+		vdd-supply = <&pm8998_l1>;
+		vdda18-supply = <&pm8998_l12>;
+		vdda33-supply = <&pm8998_l24>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,qusb-phy-init-seq =
+				     /* <value reg_offset> */
+					<0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+					0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+					0x80 0x2c  /* PLL_CMODE */
+					0x0a 0x184 /* PLL_LOCK_DELAY */
+					0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+					0xa5 0x240 /* TUNE1 */
+					0x09 0x244 /* TUNE2 */
+					0x00 0x220 /* IMP_CTRL1 */
+					0x58 0x224>; /* IMP_CTRL2 */
+		phy_type= "utmi";
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "ref_clk_src", "cfg_ahb_clk";
+
+		resets = <&clock_gcc GCC_QUSB2PHY_SEC_BCR>;
+		reset-names = "phy_reset";
+		status = "disabled";
+	};
+
+	/* Secondary USB port related QMP PHY */
+	usb_qmp_phy: ssphy@88eb000 {
+		compatible = "qcom,usb-ssphy-qmp-v2";
+		reg = <0x88eb000 0x1000>,
+			<0x01fcbff0 0x4>;
+		reg-names = "qmp_phy_base",
+			    "vls_clamp_reg";
+
+		vdd-supply = <&pm8998_l1>;
+		core-supply = <&pm8998_l26>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+		/* <reg_offset, value, delay> */
+			<0x048 0x07 0x00 /* QSERDES_COM_PLL_IVCO */
+			 0x080 0x14 0x00 /* QSERDES_COM_SYSCLK_EN_SEL */
+			 0x034 0x04 0x00 /* QSERDES_COM_BIAS_EN_CLKBUFLR_EN */
+			 0x138 0x30 0x00 /* QSERDES_COM_CLK_SELECT */
+			 0x03c 0x02 0x00 /* QSERDES_COM_SYS_CLK_CTRL */
+			 0x08c 0x08 0x00 /* QSERDES_COM_RESETSM_CNTRL2 */
+			 0x15c 0x06 0x00 /* QSERDES_COM_CMN_CONFIG */
+			 0x164 0x01 0x00 /* QSERDES_COM_SVS_MODE_CLK_SEL */
+			 0x13c 0x80 0x00 /* QSERDES_COM_HSCLK_SEL */
+			 0x0b0 0x82 0x00 /* QSERDES_COM_DEC_START_MODE0 */
+			 0x0b8 0xab 0x00 /* QSERDES_COM_DIV_FRAC_START1_MODE0 */
+			 0x0bc 0xea 0x00 /* QSERDES_COM_DIV_FRAC_START2_MODE0 */
+			 0x0c0 0x02 0x00 /* QSERDES_COM_DIV_FRAC_START3_MODE0 */
+			 0x060 0x06 0x00 /* QSERDES_COM_CP_CTRL_MODE0 */
+			 0x068 0x16 0x00 /* QSERDES_COM_PLL_RCTRL_MODE0 */
+			 0x070 0x36 0x00 /* QSERDES_COM_PLL_CCTRL_MODE0 */
+			 0x0dc 0x00 0x00 /* QSERDES_COM_INTEGLOOP_GAIN1_MODE0 */
+			 0x0d8 0x3f 0x00 /* QSERDES_COM_INTEGLOOP_GAIN0_MODE0 */
+			 0x0f8 0x01 0x00 /* QSERDES_COM_VCO_TUNE2_MODE0 */
+			 0x0f4 0xc9 0x00 /* QSERDES_COM_VCO_TUNE1_MODE0 */
+			 0x148 0x0a 0x00 /* QSERDES_COM_CORECLK_DIV_MODE0 */
+			 0x0a0 0x00 0x00 /* QSERDES_COM_LOCK_CMP3_MODE0 */
+			 0x09c 0x34 0x00 /* QSERDES_COM_LOCK_CMP2_MODE0 */
+			 0x098 0x15 0x00 /* QSERDES_COM_LOCK_CMP1_MODE0 */
+			 0x090 0x04 0x00 /* QSERDES_COM_LOCK_CMP_EN */
+			 0x154 0x00 0x00 /* QSERDES_COM_CORE_CLK_EN */
+			 0x094 0x00 0x00 /* QSERDES_COM_LOCK_CMP_CFG */
+			 0x0f0 0x00 0x00 /* QSERDES_COM_VCO_TUNE_MAP */
+			 0x040 0x0a 0x00 /* QSERDES_COM_SYSCLK_BUF_ENABLE */
+			 0x0d0 0x80 0x00 /* QSERDES_COM_INTEGLOOP_INITVAL */
+			 0x010 0x01 0x00 /* QSERDES_COM_SSC_EN_CENTER */
+			 0x01c 0x31 0x00 /* QSERDES_COM_SSC_PER1 */
+			 0x020 0x01 0x00 /* QSERDES_COM_SSC_PER2 */
+			 0x014 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER1 */
+			 0x018 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER2 */
+			 0x024 0x85 0x00 /* QSERDES_COM_SSC_STEP_SIZE1 */
+			 0x028 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE2 */
+			 0x4c0 0x0c 0x00 /* QSERDES_RX_VGA_CAL_CNTRL2 */
+			 0x564 0x50 0x00 /* QSERDES_RX_RX_MODE_00 */
+			 0x430 0x0b 0x00 /* QSERDES_RX_UCDR_FASTLOCK_FO_GAIN */
+			 0x4d4 0x0e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 */
+			 0x4d8 0x4e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 */
+			 0x4dc 0x18 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 */
+			 0x4f8 0x77 0x00 /* RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+			 0x4fc 0x80 0x00 /* RX_RX_OFFSET_ADAPTOR_CNTRL2 */
+			 0x504 0x03 0x00 /* QSERDES_RX_SIGDET_CNTRL */
+			 0x50c 0x1c 0x00 /* QSERDES_RX_SIGDET_DEGLITCH_CNTRL */
+			 0x434 0x75 0x00 /* RX_UCDR_SO_SATURATION_AND_ENABLE */
+			 0x444 0x80 0x00 /* QSERDES_RX_UCDR_PI_CONTROLS */
+			 0x408 0x0a 0x00 /* QSERDES_RX_UCDR_FO_GAIN */
+			 0x40c 0x06 0x00 /* QSERDES_RX_UCDR_SO_GAIN */
+			 0x500 0x00 0x00 /* QSERDES_RX_SIGDET_ENABLES */
+			 0x260 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */
+			 0x2a4 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */
+			 0x28c 0xc6 0x00 /* QSERDES_TX_LANE_MODE_1 */
+			 0x248 0x09 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
+			 0x244 0x0d 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
+			 0x8c8 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */
+			 0x8cc 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */
+			 0x8d0 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */
+			 0x8d4 0x40 0x00 /* USB3_UNI_PCS_FLL_MAN_CODE */
+			 0x8c4 0x02 0x00 /* USB3_UNI_PCS_FLL_CNTRL1 */
+			 0x864 0x1b 0x00 /* USB3_UNI_PCS_POWER_STATE_CONFIG2 */
+			 0x80c 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V0 */
+			 0x810 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V1 */
+			 0x814 0xb5 0x00 /* USB3_UNI_PCS_TXMGN_V2 */
+			 0x818 0x4c 0x00 /* USB3_UNI_PCS_TXMGN_V3 */
+			 0x81c 0x64 0x00 /* USB3_UNI_PCS_TXMGN_V4 */
+			 0x820 0x6a 0x00 /* USB3_UNI_PCS_TXMGN_LS */
+			 0x824 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V0 */
+			 0x828 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V0 */
+			 0x82c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V1 */
+			 0x830 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V1 */
+			 0x834 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V2 */
+			 0x838 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V2 */
+			 0x83c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V3 */
+			 0x840 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V3 */
+			 0x844 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V4 */
+			 0x848 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V4 */
+			 0x84c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_LS */
+			 0x850 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_LS */
+			 0x85c 0x02 0x00 /* USB3_UNI_PCS_RATE_SLEW_CNTRL */
+			 0x8a0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
+			 0x88c 0x44 0x00 /* USB3_UNI_PCS_TSYNC_RSYNC_TIME */
+			 0x880 0xd1 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG1 */
+			 0x884 0x1f 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG2 */
+			 0x888 0x47 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG3 */
+			 0x870 0xe7 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L */
+			 0x874 0x03 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H */
+			 0x878 0x40 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_L */
+			 0x87c 0x00 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_H */
+			 0x9d8 0xba 0x00 /* USB3_UNI_PCS_RX_SIGDET_LVL */
+			 0x8b8 0x75 0x00 /* RXEQTRAINING_WAIT_TIME */
+			 0x8b0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
+			 0x8bc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+			 0xa0c 0x21 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG1 */
+			 0xa10 0x60 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG2 */
+			 0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<0x974 /* USB3_UNI_PCS_PCS_STATUS */
+				 0x8d8 /* USB3_UNI_PCS_AUTONOMOUS_MODE_CTRL */
+				 0x8dc /* USB3_UNI_PCS_LFPS_RXTERM_IRQ_CLEAR */
+				 0x804 /* USB3_UNI_PCS_POWER_DOWN_CONTROL */
+				 0x800 /* USB3_UNI_PCS_SW_RESET */
+				 0x808>; /* USB3_UNI_PCS_START_CONTROL */
+
+		clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk";
+
+		resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>,
+			<&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>;
+		reset-names = "phy_reset", "phy_phy_reset";
+		status = "disabled";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 9545581..ed4956f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,50 +15,128 @@
 #include <dt-bindings/clock/qcom,videocc-sdm845.h>
 
 &soc {
-	msm_vidc: qcom,vidc@cc00000 {
-		  compatible = "qcom,msm-vidc";
-		  status = "disabled";
-		  reg = <0xcc00000 0x100000>;
-		  interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
-		  qcom,debug-timeout;
-		  qcom,reg-presets =
-			 <0x80124 0x00000003>,
-			 <0x80550 0x01111111>,
-			 <0x80560 0x01111111>,
-			 <0x80568 0x01111111>,
-			 <0x80570 0x01111111>,
-			 <0x80580 0x01111111>,
-			 <0x80588 0x01111111>,
-			 <0xe2010 0x00000000>;
-		  vdd-supply = <&venus_gdsc>;
-		  venus-core0-supply = <&vcodec0_gdsc>;
-		  venus-core1-supply = <&vcodec1_gdsc>;
-		  clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
-			 <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
-			 <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
-			 <&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
-			 <&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
-		  clock-names = "core_clk", "iface_clk", "bus_clk",
+	msm_vidc: qcom,vidc@aa00000 {
+		compatible = "qcom,msm-vidc";
+		status = "disabled";
+		reg = <0xaa00000 0x200000>;
+		interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hfi = "venus";
+		qcom,firmware-name = "venus";
+		qcom,max-secure-instances = <5>;
+		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+
+		/* Supply */
+		venus-supply = <&venus_gdsc>;
+		venus-core0-supply = <&vcodec0_gdsc>;
+		venus-core1-supply = <&vcodec1_gdsc>;
+
+		/* Clocks */
+		clock-names = "core_clk", "iface_clk", "bus_clk",
 			"core0_clk", "core1_clk";
-		  qcom,proxy-clock-names = "core_clk", "iface_clk",
+		clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+			<&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
+		qcom,proxy-clock-names = "core_clk", "iface_clk",
 			"bus_clk", "core0_clk", "core1_clk";
-		  qcom,clock-configs = <0x1 0x1 0x1 0x1 0x1>;
-		  qcom,proxy-reg-names = "vdd";
-		  bus_cnoc {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "cnoc";
-			  qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
-			  qcom,bus-governor = "performance";
-			  qcom,bus-range-kbps = <1 1>;
-		  };
-		  venus_bus_ddr {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "venus-ddr";
-			  qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			  qcom,bus-governor = "msm-vidc-ddr";
-			  qcom,bus-range-kbps = <1000 3388000>;
-		  };
-	  };
+		qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0>;
+		qcom,allowed-clock-rates = <200000000 320000000 380000000
+			444000000 533000000>;
+		qcom,clock-freq-tbl {
+			qcom,profile-enc {
+				qcom,codec-mask = <0x55555555>;
+				qcom,vpp-cycles-per-mb = <675>;
+				qcom,vsp-cycles-per-mb = <125>;
+				qcom,low-power-cycles-per-mb = <320>;
+			};
+			qcom,profile-dec {
+				qcom,codec-mask = <0xffffffff>;
+				qcom,vpp-cycles-per-mb = <200>;
+				qcom,vsp-cycles-per-mb = <50>;
+			};
+		};
+
+		/* Buses */
+		bus_cnoc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "cnoc";
+			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 1000>;
+		};
+
+		venus_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 3388000>;
+		};
+		arm9_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-arm9-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 1000>;
+		};
+
+		/* MMUs */
+		non_secure_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_ns";
+			iommus =
+				<&apps_smmu 0x10a0>,
+				<&apps_smmu 0x10a8>,
+				<&apps_smmu 0x10b0>;
+			buffer-types = <0xfff>;
+			virtual-addr-pool = <0x70800000 0x6f800000>;
+		};
+
+		firmware_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			qcom,fw-context-bank;
+			iommus =
+				<&apps_smmu 0x10b2>;
+		};
+
+		secure_bitstream_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_bitstream";
+			iommus =
+				<&apps_smmu 0x10a1>,
+				<&apps_smmu 0x10a9>,
+				<&apps_smmu 0x10a5>,
+				<&apps_smmu 0x10ad>;
+			buffer-types = <0x241>;
+			virtual-addr-pool = <0x4b000000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		secure_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_pixel";
+			iommus =
+				<&apps_smmu 0x10a3>,
+				<&apps_smmu 0x10ab>;
+			buffer-types = <0x106>;
+			virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		secure_non_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_non_pixel";
+			iommus =
+				<&apps_smmu 0x10a4>,
+				<&apps_smmu 0x10ac>,
+				<&apps_smmu 0x10b4>;
+			buffer-types = <0x480>;
+			virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-context-bank;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index eb2c066..2edd958 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -42,6 +42,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x0>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_0>;
@@ -72,6 +73,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x100>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_1>;
@@ -96,6 +98,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x200>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_2>;
@@ -120,6 +123,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x300>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_3>;
@@ -144,6 +148,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x400>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_4>;
@@ -168,6 +173,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x500>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_5>;
@@ -192,6 +198,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x600>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_6>;
@@ -216,6 +223,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x700>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_7>;
@@ -511,9 +519,12 @@
 		#reset-cells = <1>;
 	};
 
-	clock_camcc: qcom,camcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "camcc_clocks";
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,cam_cc-sdm845";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 0f711a0..d07c3a5 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -307,6 +307,7 @@
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
 CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -335,6 +336,7 @@
 CONFIG_USB_ISP1760_HOST_ROLE=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -385,6 +387,7 @@
 CONFIG_USB_BAM=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
 CONFIG_CLOCK_QPNP_DIV=y
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 8554526..36d878f 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -275,6 +275,7 @@
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HVC_DCC=y
+CONFIG_HVC_DCC_SERIALIZE_SMP=y
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -318,6 +319,7 @@
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
 CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -345,6 +347,7 @@
 CONFIG_USB_ISP1760_HOST_ROLE=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -403,6 +406,7 @@
 CONFIG_USB_BAM=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
 CONFIG_CLOCK_QPNP_DIV=y
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -414,6 +418,8 @@
 CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 5d83ff7..ec8e968 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -67,8 +67,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 2b74aee..e582069 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -133,7 +133,7 @@
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_SCSI_PMCRAID=m
 CONFIG_SCSI_BFA_FC=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
@@ -205,7 +205,6 @@
 # CONFIG_MLX4_DEBUG is not set
 CONFIG_TEHUTI=m
 CONFIG_BNX2X=m
-CONFIG_QLGE=m
 CONFIG_SFC=m
 CONFIG_BE2NET=m
 CONFIG_LIBERTAS_THINFIRM=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 5da76e0..0cdb431 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -39,7 +39,7 @@
 CONFIG_PM_STD_PARTITION="/dev/hda3"
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEBUG=y
-CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 58d43f3..078ecac 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -59,8 +59,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index c8f7e28..e233f87 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -60,8 +60,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index d2f54e5..fbe085c 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -59,8 +59,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 3d0d9cb..2942610 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -61,8 +61,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index b496c25..07d0182 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -110,7 +110,7 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 8e99ad8..f59969a 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -90,7 +90,7 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h
index 4775a11..24d5e31 100644
--- a/arch/mips/include/asm/mach-ip27/spaces.h
+++ b/arch/mips/include/asm/mach-ip27/spaces.h
@@ -12,14 +12,16 @@
 
 /*
  * IP27 uses the R10000's uncached attribute feature.  Attribute 3 selects
- * uncached memory addressing.
+ * uncached memory addressing. Hide the definitions on 32-bit compilation
+ * of the compat-vdso code.
  */
-
+#ifdef CONFIG_64BIT
 #define HSPEC_BASE		0x9000000000000000
 #define IO_BASE			0x9200000000000000
 #define MSPEC_BASE		0x9400000000000000
 #define UNCAC_BASE		0x9600000000000000
 #define CAC_BASE		0xa800000000000000
+#endif
 
 #define TO_MSPEC(x)		(MSPEC_BASE | ((x) & TO_PHYS_MASK))
 #define TO_HSPEC(x)		(HSPEC_BASE | ((x) & TO_PHYS_MASK))
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 5a73c5e..23198c9 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -30,8 +30,10 @@
 	return soc_info.sys_type;
 }
 
-static __init void prom_init_cmdline(int argc, char **argv)
+static __init void prom_init_cmdline(void)
 {
+	int argc;
+	char **argv;
 	int i;
 
 	pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
@@ -60,14 +62,11 @@
 
 void __init prom_init(void)
 {
-	int argc;
-	char **argv;
-
 	prom_soc_init(&soc_info);
 
 	pr_info("SoC Type: %s\n", get_system_type());
 
-	prom_init_cmdline(argc, argv);
+	prom_init_cmdline();
 }
 
 void __init prom_free_prom_memory(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 285796e..2b76e36 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -40,16 +40,6 @@
 	{ 0 }
 };
 
-static void rt288x_wdt_reset(void)
-{
-	u32 t;
-
-	/* enable WDT reset output on pin SRAM_CS_N */
-	t = rt_sysc_r32(SYSC_REG_CLKCFG);
-	t |= CLKCFG_SRAM_CS_N_WDT;
-	rt_sysc_w32(t, SYSC_REG_CLKCFG);
-}
-
 void __init ralink_clk_init(void)
 {
 	unsigned long cpu_rate, wmac_rate = 40000000;
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index c8a28c4b..e778e0b 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -89,17 +89,6 @@
 	{ 0 }
 };
 
-static void rt305x_wdt_reset(void)
-{
-	u32 t;
-
-	/* enable WDT reset output on pin SRAM_CS_N */
-	t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
-	t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
-		RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
-	rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
-}
-
 static unsigned long rt5350_get_mem_size(void)
 {
 	void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index 4cef916..3e0aa09 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -63,16 +63,6 @@
 	{ 0 }
 };
 
-static void rt3883_wdt_reset(void)
-{
-	u32 t;
-
-	/* enable WDT reset output on GPIO 2 */
-	t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
-	t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
-	rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
-}
-
 void __init ralink_clk_init(void)
 {
 	unsigned long cpu_rate, sys_rate;
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index 8077ff3..d4469b2 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -71,11 +71,6 @@
 	return err;
 }
 
-static void rt_timer_free(struct rt_timer *rt)
-{
-	free_irq(rt->irq, rt);
-}
-
 static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
 {
 	if (rt->timer_freq < divisor)
@@ -101,15 +96,6 @@
 	return 0;
 }
 
-static void rt_timer_disable(struct rt_timer *rt)
-{
-	u32 t;
-
-	t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
-	t &= ~TMR0CTL_ENABLE;
-	rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
-}
-
 static int rt_timer_probe(struct platform_device *pdev)
 {
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
index b7a4b7e..e8f6b3a 100644
--- a/arch/mips/sgi-ip22/Platform
+++ b/arch/mips/sgi-ip22/Platform
@@ -25,7 +25,7 @@
 # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
 #
 ifdef CONFIG_SGI_IP28
-  ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
+  ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
       $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
   endif
 endif
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 3362299..6ca3b90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1807,8 +1807,6 @@
 		goto instr_done;
 
 	case LARX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (op.ea & (size - 1))
 			break;		/* can't handle misaligned */
 		err = -EFAULT;
@@ -1832,8 +1830,6 @@
 		goto ldst_done;
 
 	case STCX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (op.ea & (size - 1))
 			break;		/* can't handle misaligned */
 		err = -EFAULT;
@@ -1859,8 +1855,6 @@
 		goto ldst_done;
 
 	case LOAD:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
 		if (!err) {
 			if (op.type & SIGNEXT)
@@ -1872,8 +1866,6 @@
 
 #ifdef CONFIG_PPC_FPU
 	case LOAD_FP:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (size == 4)
 			err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
 		else
@@ -1882,15 +1874,11 @@
 #endif
 #ifdef CONFIG_ALTIVEC
 	case LOAD_VMX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
 		goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
 	case LOAD_VSX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
 		goto ldst_done;
 #endif
@@ -1913,8 +1901,6 @@
 		goto instr_done;
 
 	case STORE:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if ((op.type & UPDATE) && size == sizeof(long) &&
 		    op.reg == 1 && op.update_reg == 1 &&
 		    !(regs->msr & MSR_PR) &&
@@ -1927,8 +1913,6 @@
 
 #ifdef CONFIG_PPC_FPU
 	case STORE_FP:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (size == 4)
 			err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
 		else
@@ -1937,15 +1921,11 @@
 #endif
 #ifdef CONFIG_ALTIVEC
 	case STORE_VMX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
 		goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
 	case STORE_VSX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
 		goto ldst_done;
 #endif
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index c96c0cb..32c46b4 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -91,6 +91,16 @@
 
 static void icp_opal_set_cpu_priority(unsigned char cppr)
 {
+	/*
+	 * Here be dragons. The caller has asked to allow only IPI's and not
+	 * external interrupts. But OPAL XIVE doesn't support that. So instead
+	 * of allowing no interrupts allow all. That's still not right, but
+	 * currently the only caller who does this is xics_migrate_irqs_away()
+	 * and it works in that case.
+	 */
+	if (cppr >= DEFAULT_PRIORITY)
+		cppr = LOWEST_PRIORITY;
+
 	xics_set_base_cppr(cppr);
 	opal_int_set_cppr(cppr);
 	iosync();
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 69d858e..23efe4e 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/delay.h>
 
 #include <asm/prom.h>
 #include <asm/io.h>
@@ -198,9 +199,6 @@
 	/* Remove ourselves from the global interrupt queue */
 	xics_set_cpu_giq(xics_default_distrib_server, 0);
 
-	/* Allow IPIs again... */
-	icp_ops->set_priority(DEFAULT_PRIORITY);
-
 	for_each_irq_desc(virq, desc) {
 		struct irq_chip *chip;
 		long server;
@@ -255,6 +253,19 @@
 unlock:
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
+
+	/* Allow "sufficient" time to drop any inflight IRQ's */
+	mdelay(5);
+
+	/*
+	 * Allow IPIs again. This is done at the very end, after migrating all
+	 * interrupts, the expectation is that we'll only get woken up by an IPI
+	 * interrupt beyond this point, but leave externals masked just to be
+	 * safe. If we're using icp-opal this may actually allow all
+	 * interrupts anyway, but that should be OK.
+	 */
+	icp_ops->set_priority(DEFAULT_PRIORITY);
+
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index d56ef26..7678f79 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -606,12 +606,29 @@
 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
 {
 	spinlock_t *ptl;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
 	pgste_t pgste;
 	pte_t *ptep;
 	pte_t pte;
 	bool dirty;
 
-	ptep = get_locked_pte(mm, addr, &ptl);
+	pgd = pgd_offset(mm, addr);
+	pud = pud_alloc(mm, pgd, addr);
+	if (!pud)
+		return false;
+	pmd = pmd_alloc(mm, pud, addr);
+	if (!pmd)
+		return false;
+	/* We can't run guests backed by huge pages, but userspace can
+	 * still set them up and then try to migrate them without any
+	 * migration support.
+	 */
+	if (pmd_large(*pmd))
+		return true;
+
+	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
 	if (unlikely(!ptep))
 		return false;
 
diff --git a/crypto/Makefile b/crypto/Makefile
index bd6a029..9e52b3c 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -71,6 +71,7 @@
 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
 obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
 obj-$(CONFIG_CRYPTO_WP512) += wp512.o
+CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
 obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
 obj-$(CONFIG_CRYPTO_ECB) += ecb.o
@@ -94,6 +95,7 @@
 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
 obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
 obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
+CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
 obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
 obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1bd8401..8e92eea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -406,6 +406,7 @@
 	struct files_struct *files = proc->files;
 	unsigned long rlim_cur;
 	unsigned long irqs;
+	int ret;
 
 	if (files == NULL)
 		return -ESRCH;
@@ -416,7 +417,11 @@
 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
 	unlock_task_sighand(proc->tsk, &irqs);
 
-	return __alloc_fd(files, 0, rlim_cur, flags);
+	preempt_enable_no_resched();
+	ret = __alloc_fd(files, 0, rlim_cur, flags);
+	preempt_disable();
+
+	return ret;
 }
 
 /*
@@ -425,8 +430,11 @@
 static void task_fd_install(
 	struct binder_proc *proc, unsigned int fd, struct file *file)
 {
-	if (proc->files)
+	if (proc->files) {
+		preempt_enable_no_resched();
 		__fd_install(proc->files, fd, file);
+		preempt_disable();
+	}
 }
 
 /*
@@ -454,6 +462,7 @@
 {
 	trace_binder_lock(tag);
 	mutex_lock(&binder_main_lock);
+	preempt_disable();
 	trace_binder_locked(tag);
 }
 
@@ -461,8 +470,62 @@
 {
 	trace_binder_unlock(tag);
 	mutex_unlock(&binder_main_lock);
+	preempt_enable();
 }
 
+static inline void *kzalloc_preempt_disabled(size_t size)
+{
+	void *ptr;
+
+	ptr = kzalloc(size, GFP_NOWAIT);
+	if (ptr)
+		return ptr;
+
+	preempt_enable_no_resched();
+	ptr = kzalloc(size, GFP_KERNEL);
+	preempt_disable();
+
+	return ptr;
+}
+
+static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+{
+	long ret;
+
+	preempt_enable_no_resched();
+	ret = copy_to_user(to, from, n);
+	preempt_disable();
+	return ret;
+}
+
+static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+{
+	long ret;
+
+	preempt_enable_no_resched();
+	ret = copy_from_user(to, from, n);
+	preempt_disable();
+	return ret;
+}
+
+#define get_user_preempt_disabled(x, ptr)	\
+({						\
+	int __ret;				\
+	preempt_enable_no_resched();		\
+	__ret = get_user(x, ptr);		\
+	preempt_disable();			\
+	__ret;					\
+})
+
+#define put_user_preempt_disabled(x, ptr)	\
+({						\
+	int __ret;				\
+	preempt_enable_no_resched();		\
+	__ret = put_user(x, ptr);		\
+	preempt_disable();			\
+	__ret;					\
+})
+
 static void binder_set_nice(long nice)
 {
 	long min_nice;
@@ -595,6 +658,8 @@
 	else
 		mm = get_task_mm(proc->tsk);
 
+	preempt_enable_no_resched();
+
 	if (mm) {
 		down_write(&mm->mmap_sem);
 		vma = proc->vma;
@@ -649,6 +714,9 @@
 		up_write(&mm->mmap_sem);
 		mmput(mm);
 	}
+
+	preempt_disable();
+
 	return 0;
 
 free_range:
@@ -671,6 +739,9 @@
 		up_write(&mm->mmap_sem);
 		mmput(mm);
 	}
+
+	preempt_disable();
+	
 	return -ENOMEM;
 }
 
@@ -939,7 +1010,7 @@
 			return NULL;
 	}
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc_preempt_disabled(sizeof(*node));
 	if (node == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_NODE);
@@ -1083,7 +1154,7 @@
 		else
 			return ref;
 	}
-	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	new_ref = kzalloc_preempt_disabled(sizeof(*ref));
 	if (new_ref == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_REF);
@@ -1955,14 +2026,14 @@
 	e->to_proc = target_proc->pid;
 
 	/* TODO: reuse incoming transaction for reply */
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	t = kzalloc_preempt_disabled(sizeof(*t));
 	if (t == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_t_failed;
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 
-	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
 	if (tcomplete == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_tcomplete_failed;
@@ -2023,14 +2094,14 @@
 				      ALIGN(tr->data_size, sizeof(void *)));
 	offp = off_start;
 
-	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+	if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
 			   tr->data.ptr.buffer, tr->data_size)) {
 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
 		goto err_copy_data_failed;
 	}
-	if (copy_from_user(offp, (const void __user *)(uintptr_t)
+	if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
 			   tr->data.ptr.offsets, tr->offsets_size)) {
 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 				proc->pid, thread->pid);
@@ -2148,9 +2219,10 @@
 				return_error = BR_FAILED_REPLY;
 				goto err_bad_offset;
 			}
-			if (copy_from_user(sg_bufp,
-					   (const void __user *)(uintptr_t)
-					   bp->buffer, bp->length)) {
+			if (copy_from_user_preempt_disabled(
+					sg_bufp,
+					(const void __user *)(uintptr_t)
+					bp->buffer, bp->length)) {
 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 						  proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
@@ -2257,7 +2329,7 @@
 	void __user *end = buffer + size;
 
 	while (ptr < end && thread->return_error == BR_OK) {
-		if (get_user(cmd, (uint32_t __user *)ptr))
+		if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		trace_binder_command(cmd);
@@ -2275,7 +2347,7 @@
 			struct binder_ref *ref;
 			const char *debug_string;
 
-			if (get_user(target, (uint32_t __user *)ptr))
+			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 			if (target == 0 && context->binder_context_mgr_node &&
@@ -2327,10 +2399,10 @@
 			binder_uintptr_t cookie;
 			struct binder_node *node;
 
-			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			node = binder_get_node(proc, node_ptr);
@@ -2388,7 +2460,7 @@
 			binder_uintptr_t data_ptr;
 			struct binder_buffer *buffer;
 
-			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 
@@ -2430,7 +2502,8 @@
 		case BC_REPLY_SG: {
 			struct binder_transaction_data_sg tr;
 
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
+			if (copy_from_user_preempt_disabled(&tr, ptr,
+							    sizeof(tr)))
 				return -EFAULT;
 			ptr += sizeof(tr);
 			binder_transaction(proc, thread, &tr.transaction_data,
@@ -2441,7 +2514,7 @@
 		case BC_REPLY: {
 			struct binder_transaction_data tr;
 
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
+			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
 				return -EFAULT;
 			ptr += sizeof(tr);
 			binder_transaction(proc, thread, &tr,
@@ -2492,10 +2565,10 @@
 			struct binder_ref *ref;
 			struct binder_ref_death *death;
 
-			if (get_user(target, (uint32_t __user *)ptr))
+			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			ref = binder_get_ref(proc, target, false);
@@ -2524,7 +2597,7 @@
 						proc->pid, thread->pid);
 					break;
 				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				death = kzalloc_preempt_disabled(sizeof(*death));
 				if (death == NULL) {
 					thread->return_error = BR_ERROR;
 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2578,8 +2651,7 @@
 			struct binder_work *w;
 			binder_uintptr_t cookie;
 			struct binder_ref_death *death = NULL;
-
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 
 			ptr += sizeof(cookie);
@@ -2611,7 +2683,8 @@
 					wake_up_interruptible(&proc->wait);
 				}
 			}
-		} break;
+		}
+		break;
 
 		default:
 			pr_err("%d:%d unknown command %d\n",
@@ -2660,7 +2733,7 @@
 	int wait_for_proc_work;
 
 	if (*consumed == 0) {
-		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 	}
@@ -2671,7 +2744,7 @@
 
 	if (thread->return_error != BR_OK && ptr < end) {
 		if (thread->return_error2 != BR_OK) {
-			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+			if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 			binder_stat_br(proc, thread, thread->return_error2);
@@ -2679,7 +2752,7 @@
 				goto done;
 			thread->return_error2 = BR_OK;
 		}
-		if (put_user(thread->return_error, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		binder_stat_br(proc, thread, thread->return_error);
@@ -2757,7 +2830,7 @@
 		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 			cmd = BR_TRANSACTION_COMPLETE;
-			if (put_user(cmd, (uint32_t __user *)ptr))
+				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 
@@ -2799,14 +2872,14 @@
 				node->has_weak_ref = 0;
 			}
 			if (cmd != BR_NOOP) {
-				if (put_user(cmd, (uint32_t __user *)ptr))
+					if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 					return -EFAULT;
 				ptr += sizeof(uint32_t);
-				if (put_user(node->ptr,
+					if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
 					     (binder_uintptr_t __user *)ptr))
 					return -EFAULT;
 				ptr += sizeof(binder_uintptr_t);
-				if (put_user(node->cookie,
+					if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
 					     (binder_uintptr_t __user *)ptr))
 					return -EFAULT;
 				ptr += sizeof(binder_uintptr_t);
@@ -2850,11 +2923,10 @@
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 			else
 				cmd = BR_DEAD_BINDER;
-			if (put_user(cmd, (uint32_t __user *)ptr))
+				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
-			if (put_user(death->cookie,
-				     (binder_uintptr_t __user *)ptr))
+			if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			binder_stat_br(proc, thread, cmd);
@@ -2921,10 +2993,10 @@
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 
-		if (put_user(cmd, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr)))
+		if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
 			return -EFAULT;
 		ptr += sizeof(tr);
 
@@ -2966,7 +3038,7 @@
 		binder_debug(BINDER_DEBUG_THREADS,
 			     "%d:%d BR_SPAWN_LOOPER\n",
 			     proc->pid, thread->pid);
-		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+		if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
 			return -EFAULT;
 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
 	}
@@ -3041,7 +3113,7 @@
 			break;
 	}
 	if (*p == NULL) {
-		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		thread = kzalloc_preempt_disabled(sizeof(*thread));
 		if (thread == NULL)
 			return NULL;
 		binder_stats_created(BINDER_STAT_THREAD);
@@ -3145,7 +3217,7 @@
 		ret = -EINVAL;
 		goto out;
 	}
-	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+	if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3163,7 +3235,7 @@
 		trace_binder_write_done(ret);
 		if (ret < 0) {
 			bwr.read_consumed = 0;
-			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 			goto out;
 		}
@@ -3177,7 +3249,7 @@
 		if (!list_empty(&proc->todo))
 			wake_up_interruptible(&proc->wait);
 		if (ret < 0) {
-			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 			goto out;
 		}
@@ -3187,7 +3259,7 @@
 		     proc->pid, thread->pid,
 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
-	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+	if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3271,7 +3343,7 @@
 			goto err;
 		break;
 	case BINDER_SET_MAX_THREADS:
-		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+		if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
 			ret = -EINVAL;
 			goto err;
 		}
@@ -3294,9 +3366,8 @@
 			ret = -EINVAL;
 			goto err;
 		}
-		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
-			     &ver->protocol_version)) {
-			ret = -EINVAL;
+			if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
+				ret = -EINVAL;
 			goto err;
 		}
 		break;
@@ -3357,6 +3428,7 @@
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	int ret;
+
 	struct vm_struct *area;
 	struct binder_proc *proc = filp->private_data;
 	const char *failure_string;
@@ -3417,7 +3489,11 @@
 	vma->vm_ops = &binder_vm_ops;
 	vma->vm_private_data = proc;
 
-	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+	/* binder_update_page_range assumes preemption is disabled */
+	preempt_disable();
+	ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+	preempt_enable_no_resched();
+	if (ret) {
 		ret = -ENOMEM;
 		failure_string = "alloc small buf";
 		goto err_alloc_small_buf_failed;
@@ -3703,8 +3779,12 @@
 	int defer;
 
 	do {
-		binder_lock(__func__);
+		trace_binder_lock(__func__);
+		mutex_lock(&binder_main_lock);
+		trace_binder_locked(__func__);
+
 		mutex_lock(&binder_deferred_lock);
+		preempt_disable();
 		if (!hlist_empty(&binder_deferred_list)) {
 			proc = hlist_entry(binder_deferred_list.first,
 					struct binder_proc, deferred_work_node);
@@ -3730,7 +3810,9 @@
 		if (defer & BINDER_DEFERRED_RELEASE)
 			binder_deferred_release(proc); /* frees proc */
 
-		binder_unlock(__func__);
+		trace_binder_unlock(__func__);
+		mutex_unlock(&binder_main_lock);
+		preempt_enable_no_resched();
 		if (files)
 			put_files_struct(files);
 	} while (proc);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97..f95593a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -201,22 +201,43 @@
 	driver_deferred_probe_trigger();
 }
 
+static void enable_trigger_defer_cycle(void)
+{
+	driver_deferred_probe_enable = true;
+	driver_deferred_probe_trigger();
+	/*
+	 * Sort as many dependencies as possible before the next initcall
+	 * level
+	 */
+	flush_work(&deferred_probe_work);
+}
+
 /**
  * deferred_probe_initcall() - Enable probing of deferred devices
  *
  * We don't want to get in the way when the bulk of drivers are getting probed.
  * Instead, this initcall makes sure that deferred probing is delayed until
- * late_initcall time.
+ * all the registered initcall functions at a particular level are completed.
+ * This function is invoked at every *_initcall_sync level.
  */
 static int deferred_probe_initcall(void)
 {
-	driver_deferred_probe_enable = true;
-	driver_deferred_probe_trigger();
-	/* Sort as many dependencies as possible before exiting initcalls */
-	flush_work(&deferred_probe_work);
+	enable_trigger_defer_cycle();
+	driver_deferred_probe_enable = false;
 	return 0;
 }
-late_initcall(deferred_probe_initcall);
+arch_initcall_sync(deferred_probe_initcall);
+subsys_initcall_sync(deferred_probe_initcall);
+fs_initcall_sync(deferred_probe_initcall);
+device_initcall_sync(deferred_probe_initcall);
+
+static int deferred_probe_enable_fn(void)
+{
+	/* Enable deferred probing for all time */
+	enable_trigger_defer_cycle();
+	return 0;
+}
+late_initcall(deferred_probe_enable_fn);
 
 /**
  * device_is_bound() - Check if device is bound to a driver
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 26cf6b9..a95e1e5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -294,8 +294,7 @@
 	"/lib/firmware/updates/" UTS_RELEASE,
 	"/lib/firmware/updates",
 	"/lib/firmware/" UTS_RELEASE,
-	"/lib/firmware",
-	"/firmware/image"
+	"/lib/firmware"
 };
 
 /*
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 16d307b..270cdd4 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1040,7 +1040,7 @@
 		active_time = ktime_set(0, 0);
 	}
 
-	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+	seq_printf(m, "%-32s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
 		   ws->name, active_count, ws->event_count,
 		   ws->wakeup_count, ws->expire_count,
 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
@@ -1060,7 +1060,7 @@
 {
 	struct wakeup_source *ws;
 
-	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+	seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
 		"last_change\tprevent_suspend_time\n");
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 13f747a..34a7d97 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -169,6 +169,15 @@
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
 
+config MSM_CAMCC_SDM845
+	tristate "SDM845 Camera Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the camera clock controller on Qualcomm Technologies, Inc
+	  sdm845 devices.
+	  Say Y if you want to support camera devices and functionality such as
+	  capturing pictures.
+
 config CLOCK_QPNP_DIV
 	tristate "QPNP PMIC clkdiv driver"
 	depends on COMMON_CLK_QCOM && SPMI
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 321587b..b97efe4 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
new file mode 100644
index 0000000..c49eddf
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -0,0 +1,1935 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_CX_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CAM_CC_PLL0_OUT_EVEN,
+	P_CAM_CC_PLL1_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_ODD,
+	P_CAM_CC_PLL3_OUT_EVEN,
+	P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL2_OUT_EVEN, 1 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 5 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_even",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll3_out_even",
+	"cam_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL2_OUT_EVEN, 1 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL2_OUT_ODD, 4 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 5 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_even",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll2_out_odd",
+	"cam_cc_pll3_out_even",
+	"cam_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+	{ 250000000, 2000000000, 0 },
+	{ 125000000, 1000000000, 1 },
+};
+
+static const struct pll_config cam_cc_pll0_config = {
+	.l = 0x1f,
+	.frac = 0x4000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP2(
+				MIN, 19200000,
+				LOWER, 600000000),
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+	{ 0x0, 1 },
+	{ 0x1, 2 },
+	{ 0x3, 4 },
+	{ 0x7, 8 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll1_config = {
+	.l = 0x2a,
+	.frac = 0x1556,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+	.offset = 0x1000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP2(
+				MIN, 19200000,
+				LOW, 808000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+	.offset = 0x1000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll1" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll2_config = {
+	.l = 0x32,
+	.frac = 0x0,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+	.offset = 0x2000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll2",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_MX_FMAX_MAP2(
+				MIN, 19200000,
+				LOWER, 960000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_odd[] = {
+	{ 0x0, 1 },
+	{ 0x3, 3 },
+	{ 0x5, 5 },
+	{ 0x7, 7 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_odd = {
+	.offset = 0x2000,
+	.post_div_shift = 12,
+	.post_div_table = post_div_table_fabia_odd,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_odd),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_odd",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll3_config = {
+	.l = 0x14,
+	.frac = 0x0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+	.offset = 0x3000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll3",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP2(
+				MIN, 19200000,
+				LOWER, 384000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+	.offset = 0x3000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll3_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll3" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+	.cmd_rcgr = 0x600c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_bps_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	{ }
+};
+
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+	.cmd_rcgr = 0xb0d8,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 37500000,
+			LOW, 50000000,
+			NOMINAL, 100000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+	.cmd_rcgr = 0x9060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cphy_rx_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 300000000,
+			LOW, 320000000,
+			HIGH, 384000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_EVEN, 2, 0, 0),
+	F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x5004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi0phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x5028,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi1phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x504c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi2phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+	.cmd_rcgr = 0x6038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fast_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 100000000,
+			LOW, 200000000,
+			LOW_L1, 300000000,
+			NOMINAL, 400000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+	.cmd_rcgr = 0xb0b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fd_core_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 400000000,
+			LOW_L1, 538666667,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+	.cmd_rcgr = 0xb088,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_icp_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 400000000,
+			LOW_L1, 538666667,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+	.cmd_rcgr = 0x900c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+	.cmd_rcgr = 0x9038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+	.cmd_rcgr = 0xa00c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+	.cmd_rcgr = 0xa030,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+	.cmd_rcgr = 0xb004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+	.cmd_rcgr = 0xb024,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+	.cmd_rcgr = 0x700c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP6(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 538666667,
+			HIGH, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ipe_1_clk_src = {
+	.cmd_rcgr = 0x800c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP6(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 538666667,
+			HIGH, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+	.cmd_rcgr = 0xb04c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_jpeg_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+	.cmd_rcgr = 0xb0f8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.enable_safe_config = true,
+	.freq_tbl = ftbl_cam_cc_lrme_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_lrme_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 384000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
+	F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+	.cmd_rcgr = 0x4004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+	.cmd_rcgr = 0x4024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+	.cmd_rcgr = 0x4044,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk2_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+	.cmd_rcgr = 0x4064,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk3_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(60000000, P_CAM_CC_PLL0_OUT_EVEN, 10, 0, 0),
+	F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0),
+	F(73846154, P_CAM_CC_PLL2_OUT_EVEN, 6.5, 0, 0),
+	F(80000000, P_CAM_CC_PLL2_OUT_EVEN, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+	.cmd_rcgr = 0x6054,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_slow_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 60000000,
+			LOW, 66666667,
+			LOW_L1, 73846154,
+			NOMINAL, 80000000),
+	},
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+	.halt_reg = 0x606c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x606c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+	.halt_reg = 0x6050,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x6050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+	.halt_reg = 0x6034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+	.halt_reg = 0x6024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_bps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+	.halt_reg = 0xb12c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb12c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+	.halt_reg = 0xb124,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+	.halt_reg = 0xb0f0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0f0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+	.halt_reg = 0xb11c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xb11c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cpas_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+	.halt_reg = 0x501c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+	.halt_reg = 0x5040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+	.halt_reg = 0x5064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+	.halt_reg = 0x5020,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+	.halt_reg = 0x5044,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+	.halt_reg = 0x5068,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_debug_clk = {
+	.halt_reg = 0xc008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_debug_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+	.halt_reg = 0xb0c8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+	.halt_reg = 0xb0d0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0d0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_uar_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_apb_clk = {
+	.halt_reg = 0xb084,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_apb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+	.halt_reg = 0xb078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+	.halt_reg = 0xb0a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_icp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+	.halt_reg = 0xb07c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_cti_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+	.halt_reg = 0xb080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_ts_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+	.halt_reg = 0x907c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x907c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+	.halt_reg = 0x9024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+	.halt_reg = 0x9078,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x9078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+	.halt_reg = 0x9050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+	.halt_reg = 0x9034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+	.halt_reg = 0xa054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+	.halt_reg = 0xa024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+	.halt_reg = 0xa050,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xa050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+	.halt_reg = 0xa048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+	.halt_reg = 0xa02c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa02c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+	.halt_reg = 0xb01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+	.halt_reg = 0xb044,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xb044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+	.halt_reg = 0xb03c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb03c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+	.halt_reg = 0x703c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x703c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+	.halt_reg = 0x7038,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x7038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+	.halt_reg = 0x7034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+	.halt_reg = 0x7024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+	.halt_reg = 0x803c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x803c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+	.halt_reg = 0x8038,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x8038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+	.halt_reg = 0x8034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+	.halt_reg = 0x8024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+	.halt_reg = 0xb064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_jpeg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_jpeg_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+	.halt_reg = 0xb110,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb110,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_lrme_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_lrme_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+	.halt_reg = 0x401c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x401c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+	.halt_reg = 0x403c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x403c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+	.halt_reg = 0x405c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x405c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+	.halt_reg = 0x407c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x407c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_pll_test_clk = {
+	.halt_reg = 0xc014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll_test_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+	.halt_reg = 0xb13c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb13c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_soc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+	.halt_reg = 0xb0a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_sys_tmr_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *cam_cc_sdm845_clocks[] = {
+	[CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+	[CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+	[CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+	[CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+	[CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+	[CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+	[CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+	[CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+	[CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+	[CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+	[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+	[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+	[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+	[CAM_CC_DEBUG_CLK] = &cam_cc_debug_clk.clkr,
+	[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+	[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+	[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+	[CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+	[CAM_CC_ICP_APB_CLK] = &cam_cc_icp_apb_clk.clkr,
+	[CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+	[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+	[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+	[CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+	[CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+	[CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+	[CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+	[CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+	[CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+	[CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+	[CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+	[CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+	[CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+	[CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+	[CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+	[CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+	[CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+	[CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+	[CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+	[CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+	[CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+	[CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+	[CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+	[CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+	[CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+	[CAM_CC_IPE_1_CLK_SRC] = &cam_cc_ipe_1_clk_src.clkr,
+	[CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+	[CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+	[CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+	[CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+	[CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+	[CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+	[CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+	[CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+	[CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+	[CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+	[CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+	[CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+	[CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+	[CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+	[CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+	[CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+	[CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+	[CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr,
+	[CAM_CC_PLL2_OUT_ODD] = &cam_cc_pll2_out_odd.clkr,
+	[CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+	[CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+	[CAM_CC_PLL_TEST_CLK] = &cam_cc_pll_test_clk.clkr,
+	[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+	[CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+	[CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_sdm845_resets[] = {
+	[TITAN_CAM_CC_BPS_BCR] = { 0x6000 },
+	[TITAN_CAM_CC_CAMNOC_BCR] = { 0xb120 },
+	[TITAN_CAM_CC_CCI_BCR] = { 0xb0d4 },
+	[TITAN_CAM_CC_CPAS_BCR] = { 0xb118 },
+	[TITAN_CAM_CC_CSI0PHY_BCR] = { 0x5000 },
+	[TITAN_CAM_CC_CSI1PHY_BCR] = { 0x5024 },
+	[TITAN_CAM_CC_CSI2PHY_BCR] = { 0x5048 },
+	[TITAN_CAM_CC_FD_BCR] = { 0xb0ac },
+	[TITAN_CAM_CC_ICP_BCR] = { 0xb074 },
+	[TITAN_CAM_CC_IFE_0_BCR] = { 0x9000 },
+	[TITAN_CAM_CC_IFE_1_BCR] = { 0xa000 },
+	[TITAN_CAM_CC_IFE_LITE_BCR] = { 0xb000 },
+	[TITAN_CAM_CC_IPE_0_BCR] = { 0x7000 },
+	[TITAN_CAM_CC_IPE_1_BCR] = { 0x8000 },
+	[TITAN_CAM_CC_JPEG_BCR] = { 0xb048 },
+	[TITAN_CAM_CC_LRME_BCR] = { 0xb0f4 },
+	[TITAN_CAM_CC_MCLK0_BCR] = { 0x4000 },
+	[TITAN_CAM_CC_MCLK1_BCR] = { 0x4020 },
+	[TITAN_CAM_CC_MCLK2_BCR] = { 0x4040 },
+	[TITAN_CAM_CC_MCLK3_BCR] = { 0x4060 },
+	[TITAN_CAM_CC_TITAN_TOP_BCR] = { 0xb130 },
+};
+
+static const struct regmap_config cam_cc_sdm845_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0xd004,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc cam_cc_sdm845_desc = {
+	.config = &cam_cc_sdm845_regmap_config,
+	.clks = cam_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(cam_cc_sdm845_clocks),
+	.resets = cam_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(cam_cc_sdm845_resets),
+};
+
+static const struct of_device_id cam_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,cam_cc-sdm845" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sdm845_match_table);
+
+static int cam_cc_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret = 0;
+
+	regmap = qcom_cc_map(pdev, &cam_cc_sdm845_desc);
+	if (IS_ERR(regmap)) {
+		pr_err("Failed to map the Camera CC registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+	clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+	clk_fabia_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+	clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+
+	ret = qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Camera CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Camera CC clocks\n");
+	return ret;
+}
+
+static struct platform_driver cam_cc_sdm845_driver = {
+	.probe		= cam_cc_sdm845_probe,
+	.driver		= {
+		.name	= "cam_cc-sdm845",
+		.of_match_table = cam_cc_sdm845_match_table,
+	},
+};
+
+static int __init cam_cc_sdm845_init(void)
+{
+	return platform_driver_register(&cam_cc_sdm845_driver);
+}
+core_initcall(cam_cc_sdm845_init);
+
+static void __exit cam_cc_sdm845_exit(void)
+{
+	platform_driver_unregister(&cam_cc_sdm845_driver);
+}
+module_exit(cam_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cam_cc-sdm845");
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index adf2f7f..53f736c 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -218,11 +218,20 @@
 		unsigned long *parent_rate)
 {
 	struct clk_hw *parent = clk_hw_get_parent(hw);
+	unsigned long rrate = 0;
 
 	if (!parent)
 		return -EPERM;
 
-	return clk_hw_round_rate(parent, rate);
+	rrate = clk_hw_round_rate(parent, rate);
+	/*
+	 * If the rounded rate that's returned is valid, update the parent_rate
+	 * field so that the set_rate() call can be propagated to the parent.
+	 */
+	if (rrate > 0)
+		*parent_rate = rrate;
+
+	return rrate;
 }
 
 static unsigned long clk_branch2_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/qcom/vdd-level-sdm845.h b/drivers/clk/qcom/vdd-level-sdm845.h
index 5be7a28..1771c15 100644
--- a/drivers/clk/qcom/vdd-level-sdm845.h
+++ b/drivers/clk/qcom/vdd-level-sdm845.h
@@ -90,6 +90,13 @@
 	},					\
 	.num_rate_max = VDD_CX_NUM
 
+#define VDD_MX_FMAX_MAP2(l1, f1, l2, f2) \
+	.vdd_class = &vdd_mx,			\
+	.rate_max = (unsigned long[VDD_CX_NUM]) {	\
+		[VDD_CX_##l1] = (f1),		\
+		[VDD_CX_##l2] = (f2),		\
+	},					\
+	.num_rate_max = VDD_CX_NUM
 
 enum vdd_cx_levels {
 	VDD_CX_NONE,
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7309c08..fd9ada6f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -1133,19 +1133,26 @@
 	struct devfreq *df = to_devfreq(d);
 	struct device *dev = df->dev.parent;
 	struct dev_pm_opp *opp;
+	unsigned int i = 0, max_state = df->profile->max_state;
+	bool use_opp;
 	ssize_t count = 0;
 	unsigned long freq = 0;
 
 	rcu_read_lock();
-	do {
-		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-		if (IS_ERR(opp))
-			break;
+	use_opp = dev_pm_opp_get_opp_count(dev) > 0;
+	while (use_opp || (!use_opp && i < max_state)) {
+		if (use_opp) {
+			opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+			if (IS_ERR(opp))
+				break;
+		} else {
+			freq = df->profile->freq_table[i++];
+		}
 
 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 				   "%lu ", freq);
 		freq++;
-	} while (1);
+	}
 	rcu_read_unlock();
 
 	/* Truncate the trailing space */
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index 0efca1e..a56c7e0 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -61,4 +61,12 @@
 	  by command engine to the external modem. Also allows masking
 	  of certain notifications being sent to the external modem.
 
+config MDM_DBG_REQ_ENG
+	tristate "manual request engine for 4x series external modems"
+	depends on ESOC_MDM_DBG_ENG
+	help
+	  Provides a user interface to handle incoming requests from
+	  the external modem. Allows for debugging of IPC mechanism
+	  between the external modem and the primary soc.
+
 endif
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index b1834e2..6c42f54 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -435,11 +435,12 @@
 {
 	int value;
 	struct esoc_clink *esoc;
+	struct device *dev;
 	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
-	struct device *dev = mdm->dev;
 
 	if (!mdm)
 		return IRQ_HANDLED;
+	dev = mdm->dev;
 	esoc = mdm->esoc;
 	value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
 	if (value == 0 && mdm->ready) {
@@ -500,7 +501,7 @@
 	struct device_node *node = mdm->dev->of_node;
 
 	addr = of_iomap(node, 0);
-	if (IS_ERR(addr)) {
+	if (IS_ERR_OR_NULL(addr)) {
 		dev_err(mdm->dev, "failed to get debug base address\n");
 		return;
 	}
@@ -509,7 +510,7 @@
 	if (val == MDM_DBG_MODE) {
 		mdm->dbg_mode = true;
 		mdm->cti = coresight_cti_get(MDM_CTI_NAME);
-		if (IS_ERR(mdm->cti)) {
+		if (IS_ERR_OR_NULL(mdm->cti)) {
 			dev_err(mdm->dev, "unable to get cti handle\n");
 			goto cti_get_err;
 		}
@@ -743,7 +744,7 @@
 	mdm->dev = &pdev->dev;
 	mdm->pon_ops = pon_ops;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -813,7 +814,7 @@
 	mdm->pon_ops = pon_ops;
 	node = pdev->dev.of_node;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -901,7 +902,7 @@
 	mdm->pon_ops = pon_ops;
 	node = pdev->dev.of_node;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -1001,11 +1002,11 @@
 	struct mdm_ctrl *mdm;
 
 	match = of_match_node(mdm_dt_match, node);
-	if (IS_ERR(match))
+	if (IS_ERR_OR_NULL(match))
 		return PTR_ERR(match);
 	mdm_ops = match->data;
 	mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
-	if (IS_ERR(mdm))
+	if (IS_ERR_OR_NULL(mdm))
 		return PTR_ERR(mdm);
 	return mdm_ops->config_hw(mdm, mdm_ops, pdev);
 }
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
index a186ea8..309c820 100644
--- a/drivers/esoc/esoc-mdm-dbg-eng.c
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -179,7 +179,165 @@
 }
 static DRIVER_ATTR(notifier_mask, 00200, NULL, notifier_mask_store);
 
-int mdm_dbg_eng_init(struct esoc_drv *esoc_drv)
+#ifdef CONFIG_MDM_DBG_REQ_ENG
+static struct esoc_clink *dbg_clink;
+/* Last recorded request from esoc */
+static enum esoc_req last_req;
+static DEFINE_SPINLOCK(req_lock);
+/*
+ * esoc_to_user: Conversion of esoc ids to user visible strings
+ * id: esoc request, command, notifier, event id
+ * str: string equivalent of the above
+ */
+struct esoc_to_user {
+	unsigned int id;
+	char str[20];
+};
+
+static struct esoc_to_user in_to_resp[] = {
+	{
+		.id = ESOC_IMG_XFER_DONE,
+		.str = "XFER_DONE",
+	},
+	{
+		.id = ESOC_BOOT_DONE,
+		.str = "BOOT_DONE",
+	},
+	{
+		.id = ESOC_BOOT_FAIL,
+		.str = "BOOT_FAIL",
+	},
+	{
+		.id = ESOC_IMG_XFER_RETRY,
+		.str = "XFER_RETRY",
+	},
+	{	.id = ESOC_IMG_XFER_FAIL,
+		.str = "XFER_FAIL",
+	},
+	{
+		.id = ESOC_UPGRADE_AVAILABLE,
+		.str = "UPGRADE",
+	},
+	{	.id = ESOC_DEBUG_DONE,
+		.str = "DEBUG_DONE",
+	},
+	{
+		.id = ESOC_DEBUG_FAIL,
+		.str = "DEBUG_FAIL",
+	},
+};
+
+static struct esoc_to_user req_to_str[] = {
+	{
+		.id = ESOC_REQ_IMG,
+		.str = "REQ_IMG",
+	},
+	{
+		.id = ESOC_REQ_DEBUG,
+		.str = "REQ_DEBUG",
+	},
+	{
+		.id = ESOC_REQ_SHUTDOWN,
+		.str = "REQ_SHUTDOWN",
+	},
+};
+
+static ssize_t req_eng_resp_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int i;
+	const struct esoc_clink_ops *const clink_ops = dbg_clink->clink_ops;
+
+	dev_dbg(&dbg_clink->dev, "user input req eng response %s\n", buf);
+	for (i = 0; i < ARRAY_SIZE(in_to_resp); i++) {
+		size_t len1 = strlen(buf);
+		size_t len2 = strlen(in_to_resp[i].str);
+
+		if (len1 == len2 && !strcmp(buf, in_to_resp[i].str)) {
+			clink_ops->notify(in_to_resp[i].id, dbg_clink);
+			break;
+		}
+	}
+	if (i > ARRAY_SIZE(in_to_resp))
+		dev_err(&dbg_clink->dev, "Invalid resp %s, specified\n", buf);
+	return count;
+}
+
+static DRIVER_ATTR(req_eng_resp, 0200, NULL, req_eng_resp_store);
+
+static ssize_t last_esoc_req_show(struct device_driver *drv, char *buf)
+{
+	unsigned int i;
+	unsigned long flags;
+	size_t count;
+
+	spin_lock_irqsave(&req_lock, flags);
+	for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
+		if (last_req == req_to_str[i].id) {
+			count = snprintf(buf, PAGE_SIZE, "%s\n",
+					req_to_str[i].str);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&req_lock, flags);
+	return count;
+}
+static DRIVER_ATTR(last_esoc_req, 0400, last_esoc_req_show, NULL);
+
+static void esoc_handle_req(enum esoc_req req, struct esoc_eng *eng)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&req_lock, flags);
+	last_req = req;
+	spin_unlock_irqrestore(&req_lock, flags);
+}
+
+static void esoc_handle_evt(enum esoc_evt evt, struct esoc_eng *eng)
+{
+}
+
+static struct esoc_eng dbg_req_eng = {
+	.handle_clink_req = esoc_handle_req,
+	.handle_clink_evt = esoc_handle_evt,
+};
+
+int register_dbg_req_eng(struct esoc_clink *clink,
+					struct device_driver *drv)
+{
+	int ret;
+
+	dbg_clink = clink;
+	ret = driver_create_file(drv, &driver_attr_req_eng_resp);
+	if (ret)
+		return ret;
+	ret = driver_create_file(drv, &driver_attr_last_esoc_req);
+	if (ret) {
+		dev_err(&clink->dev, "Unable to create last esoc req\n");
+		goto last_req_err;
+	}
+	ret = esoc_clink_register_req_eng(clink, &dbg_req_eng);
+	if (ret) {
+		pr_err("Unable to register req eng\n");
+		goto req_eng_fail;
+	}
+	spin_lock_init(&req_lock);
+	return 0;
+last_req_err:
+	driver_remove_file(drv, &driver_attr_last_esoc_req);
+req_eng_fail:
+	driver_remove_file(drv, &driver_attr_req_eng_resp);
+	return ret;
+}
+#else
+int register_dbg_req_eng(struct esoc_clink *clink, struct device_driver *d)
+{
+	return 0;
+}
+#endif
+
+int mdm_dbg_eng_init(struct esoc_drv *esoc_drv,
+			struct esoc_clink *clink)
 {
 	int ret;
 	struct device_driver *drv = &esoc_drv->driver;
@@ -194,7 +352,14 @@
 		pr_err("Unable to create notify mask file\n");
 		goto notify_mask_err;
 	}
+	ret = register_dbg_req_eng(clink, drv);
+	if (ret) {
+		pr_err("Failed to register esoc dbg req eng\n");
+		goto dbg_req_fail;
+	}
 	return 0;
+dbg_req_fail:
+	driver_remove_file(drv, &driver_attr_notifier_mask);
 notify_mask_err:
 	driver_remove_file(drv, &driver_attr_command_mask);
 cmd_mask_err:
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 473a9c7..31cd8c4 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -233,7 +233,7 @@
 	struct esoc_eng *esoc_eng;
 
 	mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
-	if (IS_ERR(mdm_drv))
+	if (IS_ERR_OR_NULL(mdm_drv))
 		return PTR_ERR(mdm_drv);
 	esoc_eng = &mdm_drv->cmd_eng;
 	esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
@@ -261,7 +261,7 @@
 	ret = register_reboot_notifier(&mdm_drv->esoc_restart);
 	if (ret)
 		dev_err(&esoc_clink->dev, "register for reboot failed\n");
-	ret = mdm_dbg_eng_init(drv);
+	ret = mdm_dbg_eng_init(drv, esoc_clink);
 	if (ret) {
 		debug_init_done = false;
 		dev_err(&esoc_clink->dev, "dbg engine failure\n");
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
index 4807e2b..dc94742 100644
--- a/drivers/esoc/esoc_bus.c
+++ b/drivers/esoc/esoc_bus.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -129,7 +129,7 @@
 	struct device *dev;
 
 	dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
-	if (IS_ERR(dev))
+	if (IS_ERR_OR_NULL(dev))
 		return NULL;
 	esoc_clink = to_esoc_clink(dev);
 	return esoc_clink;
@@ -143,7 +143,7 @@
 
 	dev = bus_find_device(&esoc_bus_type, NULL, node,
 						esoc_clink_match_node);
-	if (IS_ERR(dev))
+	if (IS_ERR_OR_NULL(dev))
 		return NULL;
 	esoc_clink = to_esoc_clink(dev);
 	return esoc_clink;
@@ -175,14 +175,14 @@
 
 	len = strlen("esoc") + sizeof(esoc_clink->id);
 	subsys_name = kzalloc(len, GFP_KERNEL);
-	if (IS_ERR(subsys_name))
+	if (IS_ERR_OR_NULL(subsys_name))
 		return PTR_ERR(subsys_name);
 	snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
 	esoc_clink->subsys.name = subsys_name;
 	esoc_clink->dev.of_node = esoc_clink->np;
 	esoc_clink->subsys.dev = &esoc_clink->dev;
 	esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
-	if (IS_ERR(esoc_clink->subsys_dev)) {
+	if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
 		dev_err(&esoc_clink->dev, "failed to register ssr node\n");
 		ret = PTR_ERR(esoc_clink->subsys_dev);
 		goto subsys_err;
diff --git a/drivers/esoc/esoc_client.c b/drivers/esoc/esoc_client.c
index 5b194e31..b9d6833 100644
--- a/drivers/esoc/esoc_client.c
+++ b/drivers/esoc/esoc_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -48,6 +48,8 @@
 
 	for (index = 0;; index++) {
 		esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
+		if (IS_ERR_OR_NULL(esoc_prop))
+			return ERR_PTR(-ENOMEM);
 		parp = of_get_property(np, esoc_prop, NULL);
 		if (parp == NULL) {
 			dev_err(dev, "esoc device not present\n");
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 17a30b8..39090dc 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -259,7 +259,16 @@
 	unsigned int minor = iminor(inode);
 
 	esoc_udev = esoc_udev_get_by_minor(minor);
+	if (!esoc_udev) {
+		pr_err("failed to get udev\n");
+		return -ENOMEM;
+	}
+
 	esoc_clink = get_esoc_clink(esoc_udev->clink->id);
+	if (!esoc_clink) {
+		pr_err("failed to get clink\n");
+		return -ENOMEM;
+	}
 
 	uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
 	if (!uhandle) {
@@ -304,12 +313,12 @@
 	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
 
 	esoc_udev = get_free_esoc_udev(esoc_clink);
-	if (IS_ERR(esoc_udev))
+	if (IS_ERR_OR_NULL(esoc_udev))
 		return PTR_ERR(esoc_udev);
 	esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
 					MKDEV(esoc_major, esoc_clink->id),
 					esoc_clink, "esoc-%d", esoc_clink->id);
-	if (IS_ERR(esoc_udev->dev)) {
+	if (IS_ERR_OR_NULL(esoc_udev->dev)) {
 		pr_err("failed to create user device\n");
 		goto dev_err;
 	}
@@ -357,8 +366,7 @@
 	int ret = 0;
 
 	esoc_class = class_create(THIS_MODULE, "esoc-dev");
-
-	if (IS_ERR(esoc_class)) {
+	if (IS_ERR_OR_NULL(esoc_class)) {
 		pr_err("coudn't create class");
 		return PTR_ERR(esoc_class);
 	}
diff --git a/drivers/esoc/mdm-dbg.h b/drivers/esoc/mdm-dbg.h
index ae31339..ffba87c 100644
--- a/drivers/esoc/mdm-dbg.h
+++ b/drivers/esoc/mdm-dbg.h
@@ -24,7 +24,8 @@
 	return false;
 }
 
-static inline int mdm_dbg_eng_init(struct esoc_drv *drv)
+static inline int mdm_dbg_eng_init(struct esoc_drv *drv,
+						struct esoc_clink *clink)
 {
 	return 0;
 }
@@ -32,7 +33,8 @@
 #else
 extern bool dbg_check_cmd_mask(unsigned int cmd);
 extern bool dbg_check_notify_mask(unsigned int notify);
-extern int mdm_dbg_eng_init(struct esoc_drv *drv);
+extern int mdm_dbg_eng_init(struct esoc_drv *drv,
+				struct esoc_clink *clink);
 #endif
 
 static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 7c75a8d..6bdf39e 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -65,6 +65,7 @@
 	bool systab_found;
 
 	efi_mm.pgd = pgd_alloc(&efi_mm);
+	mm_init_cpumask(&efi_mm);
 	init_new_context(NULL, &efi_mm);
 
 	systab_found = false;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 4da92ee..bda9c2d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1970,6 +1970,52 @@
 	return rc;
 }
 
+static int dsi_panel_parse_hdr_config(struct dsi_panel *panel,
+				     struct device_node *of_node)
+{
+
+	int rc = 0;
+	struct drm_panel_hdr_properties *hdr_prop;
+
+	hdr_prop = &panel->hdr_props;
+	hdr_prop->hdr_enabled = of_property_read_bool(of_node,
+		"qcom,mdss-dsi-panel-hdr-enabled");
+
+	if (hdr_prop->hdr_enabled) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,mdss-dsi-panel-hdr-color-primaries",
+				hdr_prop->display_primaries,
+				DISPLAY_PRIMARIES_MAX);
+		if (rc) {
+			pr_err("%s:%d, Unable to read color primaries,rc:%u",
+					__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"qcom,mdss-dsi-panel-peak-brightness",
+			&(hdr_prop->peak_brightness));
+		if (rc) {
+			pr_err("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"qcom,mdss-dsi-panel-blackness-level",
+			&(hdr_prop->blackness_level));
+		if (rc) {
+			pr_err("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+	}
+	return 0;
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node)
 {
@@ -2071,6 +2117,10 @@
 	if (rc)
 		pr_err("failed to parse panel jitter config, rc=%d\n", rc);
 
+	rc = dsi_panel_parse_hdr_config(panel, of_node);
+	if (rc)
+		pr_err("failed to parse hdr config, rc=%d\n", rc);
+
 	panel->panel_of_node = of_node;
 	drm_panel_init(&panel->drm_panel);
 	mutex_init(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index ab30e16..57226ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -22,6 +22,7 @@
 #include <linux/leds.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
+#include <drm/msm_drm.h>
 
 #include "dsi_defs.h"
 #include "dsi_ctrl_hw.h"
@@ -173,6 +174,7 @@
 	struct dsi_backlight_config bl_config;
 	struct dsi_panel_reset_config reset_config;
 	struct dsi_pinctrl_info pinctrl;
+	struct drm_panel_hdr_properties hdr_props;
 
 	bool lp11_init;
 	bool ulps_enabled;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 48bccd9..aa11a36 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -45,10 +45,20 @@
 		drm_fb_helper_hotplug_event(priv->fbdev);
 }
 
+int msm_atomic_check(struct drm_device *dev,
+			    struct drm_atomic_state *state)
+{
+	if (msm_is_suspend_blocked(dev)) {
+		DRM_DEBUG("rejecting commit during suspend\n");
+		return -EBUSY;
+	}
+	return drm_atomic_helper_check(dev, state);
+}
+
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = msm_framebuffer_create,
 	.output_poll_changed = msm_fb_output_poll_changed,
-	.atomic_check = drm_atomic_helper_check,
+	.atomic_check = msm_atomic_check,
 	.atomic_commit = msm_atomic_commit,
 };
 
@@ -1136,8 +1146,86 @@
 #ifdef CONFIG_PM_SLEEP
 static int msm_pm_suspend(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_device *ddev;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_connector *conn;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	struct msm_drm_private *priv;
+	int ret = 0;
 
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+	SDE_EVT32(0);
+
+	/* acquire modeset lock(s) */
+	drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+	if (ret)
+		goto unlock;
+
+	/* save current state for resume */
+	if (priv->suspend_state)
+		drm_atomic_state_free(priv->suspend_state);
+	priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+	if (IS_ERR_OR_NULL(priv->suspend_state)) {
+		DRM_ERROR("failed to back up suspend state\n");
+		priv->suspend_state = NULL;
+		goto unlock;
+	}
+
+	/* create atomic state to disable all CRTCs */
+	state = drm_atomic_state_alloc(ddev);
+	if (IS_ERR_OR_NULL(state)) {
+		DRM_ERROR("failed to allocate crtc disable state\n");
+		goto unlock;
+	}
+
+	state->acquire_ctx = &ctx;
+	drm_for_each_connector(conn, ddev) {
+
+		if (!conn->state || !conn->state->crtc ||
+				conn->dpms != DRM_MODE_DPMS_ON)
+			continue;
+
+		/* force CRTC to be inactive */
+		crtc_state = drm_atomic_get_crtc_state(state,
+				conn->state->crtc);
+		if (IS_ERR_OR_NULL(crtc_state)) {
+			DRM_ERROR("failed to get crtc %d state\n",
+					conn->state->crtc->base.id);
+			drm_atomic_state_free(state);
+			goto unlock;
+		}
+		crtc_state->active = false;
+	}
+
+	/* commit the "disable all" state */
+	ret = drm_atomic_commit(state);
+	if (ret < 0) {
+		DRM_ERROR("failed to disable crtcs, %d\n", ret);
+		drm_atomic_state_free(state);
+	} else {
+		priv->suspend_block = true;
+	}
+
+unlock:
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
 
 	return 0;
@@ -1145,8 +1233,40 @@
 
 static int msm_pm_resume(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	int ret;
 
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+
+	SDE_EVT32(priv->suspend_state != NULL);
+
+	drm_mode_config_reset(ddev);
+
+	drm_modeset_lock_all(ddev);
+
+	priv->suspend_block = false;
+
+	if (priv->suspend_state) {
+		priv->suspend_state->acquire_ctx =
+			ddev->mode_config.acquire_ctx;
+		ret = drm_atomic_commit(priv->suspend_state);
+		if (ret < 0) {
+			DRM_ERROR("failed to restore state, %d\n", ret);
+			drm_atomic_state_free(priv->suspend_state);
+		}
+		priv->suspend_state = NULL;
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* enable hot-plug polling */
 	drm_kms_helper_poll_enable(ddev);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d6ba63c..6a63bfd 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -137,6 +137,7 @@
 enum msm_mdp_conn_property {
 	/* blob properties, always put these first */
 	CONNECTOR_PROP_SDE_INFO,
+	CONNECTOR_PROP_HDR_INFO,
 
 	/* # of blob properties */
 	CONNECTOR_PROP_BLOBCOUNT,
@@ -472,6 +473,10 @@
 	 */
 	struct task_struct *struct_mutex_task;
 
+	/* saved atomic state during system suspend */
+	struct drm_atomic_state *suspend_state;
+	bool suspend_block;
+
 	/* list of clients waiting for events */
 	struct list_head client_event_list;
 
@@ -499,6 +504,25 @@
 		(_cb)->func = _func;                         \
 	} while (0)
 
+static inline bool msm_is_suspend_state(struct drm_device *dev)
+{
+	if (!dev || !dev->dev_private)
+		return false;
+
+	return ((struct msm_drm_private *)dev->dev_private)->suspend_state != 0;
+}
+
+static inline bool msm_is_suspend_blocked(struct drm_device *dev)
+{
+	if (!dev || !dev->dev_private)
+		return false;
+
+	if (!msm_is_suspend_state(dev))
+		return false;
+
+	return ((struct msm_drm_private *)dev->dev_private)->suspend_block != 0;
+}
+
 int msm_atomic_commit(struct drm_device *dev,
 		struct drm_atomic_state *state, bool nonblock);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 92b7e5d..9caadca 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -17,6 +17,7 @@
 #include "sde_connector.h"
 #include <linux/backlight.h>
 #include "dsi_drm.h"
+#include "dsi_display.h"
 
 #define BL_NODE_NAME_SIZE 32
 
@@ -227,6 +228,8 @@
 
 	if (c_conn->blob_caps)
 		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 
 	drm_connector_unregister(connector);
@@ -666,6 +669,7 @@
 	struct sde_kms *sde_kms;
 	struct sde_kms_info *info;
 	struct sde_connector *c_conn = NULL;
+	struct dsi_display *dsi_display;
 	int rc;
 
 	if (!dev || !dev->dev_private || !encoder) {
@@ -781,6 +785,23 @@
 		kfree(info);
 	}
 
+	if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+		dsi_display = (struct dsi_display *)(display);
+		if (dsi_display && dsi_display->panel &&
+			dsi_display->panel->hdr_props.hdr_enabled == true) {
+			msm_property_install_blob(&c_conn->property_info,
+				"hdr_properties",
+				DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_HDR_INFO);
+
+			msm_property_set_blob(&c_conn->property_info,
+				&c_conn->blob_hdr,
+				&dsi_display->panel->hdr_props,
+				sizeof(dsi_display->panel->hdr_props),
+				CONNECTOR_PROP_HDR_INFO);
+		}
+	}
+
 	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
 			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
 
@@ -810,6 +831,8 @@
 error_destroy_property:
 	if (c_conn->blob_caps)
 		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
 	sde_fence_deinit(&c_conn->retire_fence);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 8be359d..0ece0d2 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -177,6 +177,7 @@
  * @property_info: Private structure for generic property handling
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
+ * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
  * @fb_kmap: true if kernel mapping of framebuffer is requested
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
@@ -200,6 +201,7 @@
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
 	struct drm_property_blob *blob_caps;
+	struct drm_property_blob *blob_hdr;
 
 	bool fb_kmap;
 	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 853f8e9..a037250b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -53,7 +53,17 @@
 
 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
-	struct msm_drm_private *priv = crtc->dev->dev_private;
+	struct msm_drm_private *priv;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid crtc\n");
+		return NULL;
+	}
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return NULL;
+	}
 
 	return to_sde_kms(priv->kms);
 }
@@ -84,11 +94,11 @@
 	msm_property_destroy(&sde_crtc->property_info);
 	sde_cp_crtc_destroy_properties(crtc);
 
-	mutex_destroy(&sde_crtc->crtc_lock);
 	sde_fence_deinit(&sde_crtc->output_fence);
 	_sde_crtc_deinit_events(sde_crtc);
 
 	drm_crtc_cleanup(crtc);
+	mutex_destroy(&sde_crtc->crtc_lock);
 	kfree(sde_crtc);
 }
 
@@ -1042,6 +1052,112 @@
 }
 
 /**
+ * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
+ * @sde_crtc: Pointer to sde crtc structure
+ * @enable: Whether to enable/disable vblanks
+ */
+static void _sde_crtc_vblank_enable_nolock(
+		struct sde_crtc *sde_crtc, bool enable)
+{
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_encoder *enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!sde_crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	crtc = &sde_crtc->base;
+	dev = crtc->dev;
+	priv = dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	if (enable) {
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, true);
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+			sde_encoder_register_vblank_callback(enc,
+					sde_crtc_vblank_cb, (void *)crtc);
+		}
+	} else {
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+			sde_encoder_register_vblank_callback(enc, NULL, NULL);
+		}
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, false);
+	}
+}
+
+/**
+ * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	priv = crtc->dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid crtc kms\n");
+		return;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+
+	/*
+	 * Update CP on suspend/resume transitions
+	 */
+	if (enable && !sde_crtc->suspend)
+		sde_cp_crtc_suspend(crtc);
+	else if (!enable && sde_crtc->suspend)
+		sde_cp_crtc_resume(crtc);
+
+	/*
+	 * If the vblank refcount != 0, release a power reference on suspend
+	 * and take it back during resume (if it is still != 0).
+	 */
+	if (sde_crtc->suspend == enable)
+		SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+				crtc->base.id, enable);
+	else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
+		_sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
+
+	sde_crtc->suspend = enable;
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+/**
  * sde_crtc_duplicate_state - state duplicate hook
  * @crtc: Pointer to drm crtc structure
  * @Returns: Pointer to new drm_crtc_state structure
@@ -1091,6 +1207,10 @@
 		return;
 	}
 
+	/* revert suspend actions, if necessary */
+	if (msm_is_suspend_state(crtc->dev))
+		_sde_crtc_set_suspend(crtc, false);
+
 	/* remove previous state, if present */
 	if (crtc->state) {
 		sde_crtc_destroy_state(crtc, crtc->state);
@@ -1116,27 +1236,26 @@
 
 static void sde_crtc_disable(struct drm_crtc *crtc)
 {
-	struct msm_drm_private *priv;
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_encoder *encoder;
-	struct sde_kms *sde_kms;
 
-	if (!crtc) {
+	if (!crtc || !crtc->dev || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	sde_kms = _sde_crtc_get_kms(crtc);
-	priv = sde_kms->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
+	if (msm_is_suspend_state(crtc->dev))
+		_sde_crtc_set_suspend(crtc, true);
+
 	mutex_lock(&sde_crtc->crtc_lock);
 	SDE_EVT32(DRMID(crtc));
 
-	if (atomic_read(&sde_crtc->vblank_refcount)) {
+	if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
 		SDE_ERROR("crtc%d invalid vblank refcount\n",
 				crtc->base.id);
 		SDE_EVT32(DRMID(crtc));
@@ -1487,40 +1606,36 @@
 
 int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct drm_encoder *encoder;
-	struct drm_device *dev = crtc->dev;
+	struct sde_crtc *sde_crtc;
+	int rc = 0;
 
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	mutex_lock(&sde_crtc->crtc_lock);
 	if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
 		SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
+		if (!sde_crtc->suspend)
+			_sde_crtc_vblank_enable_nolock(sde_crtc, true);
 	} else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
 		SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
-		return -EINVAL;
+		rc = -EINVAL;
 	} else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
 		SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
+		if (!sde_crtc->suspend)
+			_sde_crtc_vblank_enable_nolock(sde_crtc, false);
 	} else {
 		SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
 				crtc->base.id,
 				en ? "enable" : "disable",
 				atomic_read(&sde_crtc->vblank_refcount));
-		return 0;
 	}
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		SDE_EVT32(DRMID(crtc), en);
-
-		if (en)
-			sde_encoder_register_vblank_callback(encoder,
-					sde_crtc_vblank_cb, (void *)crtc);
-		else
-			sde_encoder_register_vblank_callback(encoder, NULL,
-					NULL);
-	}
-
-	return 0;
+	mutex_unlock(&sde_crtc->crtc_lock);
+	return rc;
 }
 
 void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -2094,6 +2209,7 @@
 	crtc->dev = dev;
 	atomic_set(&sde_crtc->vblank_refcount, 0);
 
+	mutex_init(&sde_crtc->crtc_lock);
 	spin_lock_init(&sde_crtc->spin_lock);
 	atomic_set(&sde_crtc->frame_pending, 0);
 
@@ -2124,7 +2240,6 @@
 	}
 
 	/* initialize output fence support */
-	mutex_init(&sde_crtc->crtc_lock);
 	sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
 
 	/* create CRTC properties */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 8a4908c..286d9e6 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -120,6 +120,7 @@
  * @vblank_cb_count : count of vblank callback since last reset
  * @vblank_cb_time  : ktime at vblank count reset
  * @vblank_refcount : reference count for vblank enable request
+ * @suspend         : whether or not a suspend operation is in progress
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
@@ -159,6 +160,7 @@
 	u32 vblank_cb_count;
 	ktime_t vblank_cb_time;
 	atomic_t vblank_refcount;
+	bool suspend;
 
 	struct list_head feature_list;
 	struct list_head active_list;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index c2b3064..7db44d3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -903,6 +903,7 @@
 	enum sde_rsc_state rsc_state;
 	struct sde_rsc_cmd_config rsc_config;
 	int ret;
+	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -910,18 +911,25 @@
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->disp_info.is_primary)
-		return NULL;
+	disp_info = &sde_enc->disp_info;
 
+	/**
+	 * only primary command mode panel can request CMD state.
+	 * all other panels/displays can request for VID state including
+	 * secondary command mode panel.
+	 */
 	rsc_state = enable ?
-		(sde_enc->disp_info.capabilities & MSM_DISPLAY_CAP_CMD_MODE ?
-		SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
+		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
+		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
 
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update) {
-		rsc_config.fps = sde_enc->disp_info.frame_rate;
-		rsc_config.vtotal = sde_enc->disp_info.vtotal;
-		rsc_config.prefill_lines = sde_enc->disp_info.prefill_lines;
-		rsc_config.jitter = sde_enc->disp_info.jitter;
+	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
+					&& disp_info->is_primary) {
+		rsc_config.fps = disp_info->frame_rate;
+		rsc_config.vtotal = disp_info->vtotal;
+		rsc_config.prefill_lines = disp_info->prefill_lines;
+		rsc_config.jitter = disp_info->jitter;
+		/* update it only once */
 		sde_enc->rsc_state_update = true;
 
 		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
@@ -936,7 +944,7 @@
 	if (ret)
 		SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
 
-	return sde_enc->rsc_client;
+	return sde_enc->disp_info.is_primary ? sde_enc->rsc_client : NULL;
 }
 
 void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 34bf2d2..afc21ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -182,7 +182,7 @@
 				atomic_read(&phys_enc->pending_kickoff_cnt));
 
 		SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
-				"dsi1_phy", "vbif", "vbif_nrt", "dbg_bus",
+				"dsi1_phy", "vbif", "dbg_bus",
 				"vbif_dbg_bus", "panic");
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index bdab758..5187627 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -379,7 +379,7 @@
 	}
 
 	if (SDE_FORMAT_IS_UBWC(fmt) &&
-			!(wb_cfg->features & BIT(SDE_WB_UBWC_1_0))) {
+			!(wb_cfg->features & BIT(SDE_WB_UBWC))) {
 		SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 9018581..369d5d1 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -43,6 +43,15 @@
 /* max bank bit for macro tile and ubwc format */
 #define DEFAULT_SDE_HIGHEST_BANK_BIT 15
 
+/* default ubwc version */
+#define DEFAULT_SDE_UBWC_VERSION SDE_HW_UBWC_VER_10
+
+/* default ubwc static config register value */
+#define DEFAULT_SDE_UBWC_STATIC 0x0
+
+/* default ubwc swizzle register value */
+#define DEFAULT_SDE_UBWC_SWIZZLE 0x0
+
 /* default hardware block size if dtsi entry is not present */
 #define DEFAULT_SDE_HW_BLOCK_LEN 0x100
 
@@ -97,6 +106,9 @@
 	MIXER_BLEND,
 	WB_LINEWIDTH,
 	BANK_BIT,
+	UBWC_VERSION,
+	UBWC_STATIC,
+	UBWC_SWIZZLE,
 	QSEED_TYPE,
 	CSC_TYPE,
 	PANIC_PER_PIPE,
@@ -287,6 +299,9 @@
 	{MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
 	{WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
 	{BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
+	{UBWC_VERSION, "qcom,sde-ubwc-version", false, PROP_TYPE_U32},
+	{UBWC_STATIC, "qcom,sde-ubwc-static", false, PROP_TYPE_U32},
+	{UBWC_SWIZZLE, "qcom,sde-ubwc-swizzle", false, PROP_TYPE_U32},
 	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
 	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
 	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
@@ -809,6 +824,8 @@
 		sblk->pcc_blk.len = 0;
 		set_bit(SDE_SSPP_PCC, &sspp->features);
 	}
+
+	sblk->format_list = sde_cfg->vig_formats;
 }
 
 static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
@@ -856,15 +873,21 @@
 		sblk->pcc_blk.len = 0;
 		set_bit(SDE_SSPP_PCC, &sspp->features);
 	}
+
+	sblk->format_list = sde_cfg->dma_formats;
 }
 
 static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
 	struct sde_prop_value *prop_value, u32 *cursor_count)
 {
+	if (!IS_SDE_MAJOR_MINOR_SAME(sde_cfg->hwversion, SDE_HW_VER_300))
+		SDE_ERROR("invalid sspp type %d, xin id %d\n",
+				sspp->type, sspp->xin_id);
 	set_bit(SDE_SSPP_CURSOR, &sspp->features);
 	sblk->maxupscale = SSPP_UNITY_SCALE;
 	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sblk->format_list = sde_cfg->cursor_formats;
 	sspp->id = SSPP_CURSOR0 + *cursor_count;
 	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
 	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
@@ -878,6 +901,7 @@
 {
 	sblk->maxupscale = SSPP_UNITY_SCALE;
 	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sblk->format_list = sde_cfg->dma_formats;
 	sspp->id = SSPP_DMA0 + *dma_count;
 	sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
 	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
@@ -1414,6 +1438,9 @@
 		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
 		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
 
+		if (sde_cfg->has_wb_ubwc)
+			set_bit(SDE_WB_UBWC, &wb->features);
+
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
 				PROP_BITVALUE_ACCESS(prop_value,
@@ -1423,6 +1450,8 @@
 						WB_CLK_CTRL, i, 1);
 		}
 
+		wb->format_list = sde_cfg->wb_formats;
+
 		SDE_DEBUG(
 			"wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
 			wb->id - WB_0,
@@ -2037,6 +2066,19 @@
 	if (!prop_exists[BANK_BIT])
 		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
 
+	cfg->ubwc_version = PROP_VALUE_ACCESS(prop_value, UBWC_VERSION, 0);
+	if (!prop_exists[UBWC_VERSION])
+		cfg->ubwc_version = DEFAULT_SDE_UBWC_VERSION;
+
+	cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
+	if (!prop_exists[UBWC_STATIC])
+		cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
+
+	cfg->mdp[0].ubwc_swizzle = PROP_VALUE_ACCESS(prop_value,
+			UBWC_SWIZZLE, 0);
+	if (!prop_exists[UBWC_SWIZZLE])
+		cfg->mdp[0].ubwc_swizzle = DEFAULT_SDE_UBWC_SWIZZLE;
+
 	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
 	if (!rc && !strcmp(type, "qseedv3")) {
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
@@ -2154,10 +2196,9 @@
 static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
 	uint32_t hw_rev)
 {
-	int i, rc = 0;
+	int rc = 0;
 	uint32_t dma_list_size, vig_list_size, wb2_list_size;
 	uint32_t cursor_list_size = 0;
-	struct sde_sspp_sub_blks *sblk;
 	uint32_t index = 0;
 
 	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) {
@@ -2239,43 +2280,17 @@
 	index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
 		index, tp10_ubwc_formats,
 		ARRAY_SIZE(tp10_ubwc_formats));
-
-	for (i = 0; i < sde_cfg->sspp_count; ++i) {
-		struct sde_sspp_cfg *sspp = &sde_cfg->sspp[i];
-
-		sblk = (struct sde_sspp_sub_blks *)sspp->sblk;
-		switch (sspp->type) {
-		case SSPP_TYPE_VIG:
-			sblk->format_list = sde_cfg->vig_formats;
-			break;
-		case SSPP_TYPE_CURSOR:
-			if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300))
-				sblk->format_list = sde_cfg->cursor_formats;
-			else
-				SDE_ERROR("invalid sspp type %d, xin id %d\n",
-					sspp->type, sspp->xin_id);
-			break;
-		case SSPP_TYPE_DMA:
-			sblk->format_list = sde_cfg->dma_formats;
-			break;
-		default:
-			SDE_ERROR("invalid sspp type %d\n", sspp->type);
-			rc = -EINVAL;
-			goto end;
-		}
-	}
-
-	for (i = 0; i < sde_cfg->wb_count; ++i)
-		sde_cfg->wb[i].format_list = sde_cfg->wb_formats;
-
 end:
 	return rc;
 }
 
-static int sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+static int _sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 {
 	int rc = 0;
 
+	if (!sde_cfg)
+		return -EINVAL;
+
 	switch (hw_rev) {
 	case SDE_HW_VER_170:
 	case SDE_HW_VER_171:
@@ -2287,6 +2302,7 @@
 	case SDE_HW_VER_400:
 		/* update msm8998 and sdm845 target here */
 		rc = sde_hardware_format_caps(sde_cfg, hw_rev);
+		sde_cfg->has_wb_ubwc = true;
 		break;
 	}
 
@@ -2343,6 +2359,10 @@
 
 	sde_cfg->hwversion = hw_rev;
 
+	rc = _sde_hardware_caps(sde_cfg, hw_rev);
+	if (rc)
+		goto end;
+
 	rc = sde_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
@@ -2397,10 +2417,6 @@
 	if (rc)
 		goto end;
 
-	rc = sde_hardware_caps(sde_cfg, hw_rev);
-	if (rc)
-		goto end;
-
 	return sde_cfg;
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 23640bb..2b34016 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -61,6 +61,17 @@
 #define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
 
 /**
+ * Supported UBWC feature versions
+ */
+enum {
+	SDE_HW_UBWC_VER_10 = 0x100,
+	SDE_HW_UBWC_VER_20 = 0x200,
+	SDE_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= SDE_HW_UBWC_VER_20)
+
+/**
  * MDP TOP BLOCK features
  * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
  * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
@@ -207,9 +218,7 @@
  * @SDE_WB_DOWNSCALE,       Writeback integer downscaler,
  * @SDE_WB_DITHER,          Dither block
  * @SDE_WB_TRAFFIC_SHAPER,  Writeback traffic shaper bloc
- * @SDE_WB_UBWC_1_0,        Writeback Universal bandwidth compression 1.0
- *                          support
- * @SDE_WB_UBWC_1_5         UBWC 1.5 support
+ * @SDE_WB_UBWC,            Writeback Universal bandwidth compression
  * @SDE_WB_YUV_CONFIG       Writeback supports output of YUV colorspace
  * @SDE_WB_PIPE_ALPHA       Writeback supports pipe alpha
  * @SDE_WB_XY_ROI_OFFSET    Writeback supports x/y-offset of out ROI in
@@ -225,7 +234,7 @@
 	SDE_WB_DOWNSCALE,
 	SDE_WB_DITHER,
 	SDE_WB_TRAFFIC_SHAPER,
-	SDE_WB_UBWC_1_0,
+	SDE_WB_UBWC,
 	SDE_WB_YUV_CONFIG,
 	SDE_WB_PIPE_ALPHA,
 	SDE_WB_XY_ROI_OFFSET,
@@ -447,11 +456,15 @@
  * @base:              register base offset to mdss
  * @features           bit mask identifying sub-blocks/features
  * @highest_bank_bit:  UBWC parameter
+ * @ubwc_static:       ubwc static configuration
+ * @ubwc_swizzle:      ubwc default swizzle setting
  * @clk_ctrls          clock control register definition
  */
 struct sde_mdp_cfg {
 	SDE_HW_BLK_INFO;
 	u32 highest_bank_bit;
+	u32 ubwc_static;
+	u32 ubwc_swizzle;
 	struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
 };
 
@@ -660,12 +673,13 @@
  * @max_mixer_blendstages max layer mixer blend stages or
  *                       supported z order
  * @max_wb_linewidth   max writeback line width support.
- * @highest_bank_bit   highest memory bit setting for tile buffers.
  * @qseed_type         qseed2 or qseed3 support.
  * @csc_type           csc or csc_10bit support.
  * @smart_dma_rev      Supported version of SmartDMA feature.
  * @has_src_split      source split feature status
  * @has_cdp            Client driver prefetch feature status
+ * @has_wb_ubwc        UBWC feature supported on WB
+ * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @dma_formats        Supported formats for dma pipe
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
@@ -678,13 +692,14 @@
 	u32 max_mixer_width;
 	u32 max_mixer_blendstages;
 	u32 max_wb_linewidth;
-	u32 highest_bank_bit;
 	u32 qseed_type;
 	u32 csc_type;
 	u32 smart_dma_rev;
 	bool has_src_split;
 	bool has_cdp;
 	bool has_dim_layer;
+	bool has_wb_ubwc;
+	u32 ubwc_version;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 14230c27..71c3855 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -62,6 +62,7 @@
 
 #define SSPP_SRC_CONSTANT_COLOR            0x3c
 #define SSPP_EXCL_REC_CTL                  0x40
+#define SSPP_UBWC_STATIC_CTRL              0x44
 #define SSPP_FETCH_CONFIG                  0x048
 #define SSPP_DANGER_LUT                    0x60
 #define SSPP_SAFE_LUT                      0x64
@@ -366,7 +367,11 @@
 		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
 		SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
 			SDE_FETCH_CONFIG_RESET_VALUE |
-			ctx->highest_bank_bit << 18);
+			ctx->mdp->highest_bank_bit << 18);
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+					BIT(31) | (ctx->mdp->ubwc_swizzle) |
+					(ctx->mdp->highest_bank_bit << 4));
 	}
 
 	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
@@ -1074,6 +1079,9 @@
 	struct sde_hw_pipe *hw_pipe;
 	struct sde_sspp_cfg *cfg;
 
+	if (!addr || !catalog)
+		return ERR_PTR(-EINVAL);
+
 	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
 	if (!hw_pipe)
 		return ERR_PTR(-ENOMEM);
@@ -1085,10 +1093,11 @@
 	}
 
 	/* Assign ops */
+	hw_pipe->catalog = catalog;
+	hw_pipe->mdp = &catalog->mdp[0];
 	hw_pipe->idx = idx;
 	hw_pipe->cap = cfg;
 	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
-	hw_pipe->highest_bank_bit = catalog->mdp[0].highest_bank_bit;
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
 			hw_pipe->hw.blk_off,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index a224234..2fa01e4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -496,21 +496,23 @@
  * @blk_off:      pipe offset relative to mdss offset
  * @length        length of register block offset
  * @hwversion     mdss hw version number
+ * @catalog:      back pointer to catalog
+ * @mdp:          pointer to associated mdp portion of the catalog
  * @idx:          pipe index
  * @type :        pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
  *                supported for each pipe type
  * @pipe_hw_cap:  pointer to layer_cfg
- * @highest_bank_bit:
  * @ops:          pointer to operations possible for this pipe
  */
 struct sde_hw_pipe {
 	/* base */
-	 struct sde_hw_blk_reg_map hw;
+	struct sde_hw_blk_reg_map hw;
+	struct sde_mdss_cfg *catalog;
+	struct sde_mdp_cfg *mdp;
 
 	/* Pipe */
 	enum sde_sspp idx;
 	const struct sde_sspp_cfg *cap;
-	u32 highest_bank_bit;
 
 	/* Ops */
 	struct sde_hw_sspp_ops ops;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index b3fb379..a7bebc2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -16,6 +16,7 @@
 #include "sde_dbg.h"
 
 #define SSPP_SPARE                        0x28
+#define UBWC_STATIC                       0x144
 
 #define FLD_SPLIT_DISPLAY_CMD             BIT(1)
 #define FLD_SMART_PANEL_FREE_RUN          BIT(2)
@@ -242,6 +243,25 @@
 	return ERR_PTR(-EINVAL);
 }
 
+static inline void _sde_hw_mdptop_init_ubwc(void __iomem *addr,
+		const struct sde_mdss_cfg *m)
+{
+	struct sde_hw_blk_reg_map hw;
+
+	if (!addr || !m)
+		return;
+
+	if (!IS_UBWC_20_SUPPORTED(m->ubwc_version))
+		return;
+
+	memset(&hw, 0, sizeof(hw));
+	hw.base_off = addr;
+	hw.blk_off = 0x0;
+	hw.hwversion = m->hwversion;
+	hw.log_mask = SDE_DBG_MASK_TOP;
+	SDE_REG_WRITE(&hw, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
 struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
 		void __iomem *addr,
 		const struct sde_mdss_cfg *m)
@@ -249,6 +269,9 @@
 	struct sde_hw_mdp *mdp;
 	const struct sde_mdp_cfg *cfg;
 
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
 	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
 	if (!mdp)
 		return ERR_PTR(-ENOMEM);
@@ -270,6 +293,8 @@
 			mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
 			mdp->hw.xin_id);
 
+	_sde_hw_mdptop_init_ubwc(addr, m);
+
 	return mdp;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index 320b05f..98aff0f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -41,6 +41,7 @@
 #define WB_N16_INIT_PHASE_Y_C12		0x06C
 #define WB_OUT_SIZE			0x074
 #define WB_ALPHA_X_VALUE		0x078
+#define WB_UBWC_STATIC_CTRL		0x144
 #define WB_CSC_BASE			0x260
 #define WB_DST_ADDR_SW_STATUS		0x2B0
 #define WB_CDP_CTRL			0x2B4
@@ -135,10 +136,13 @@
 	if (SDE_FORMAT_IS_UBWC(fmt)) {
 		opmode |= BIT(0);
 		dst_format |= BIT(31);
-		if (ctx->highest_bank_bit)
-			write_config |= (ctx->highest_bank_bit << 8);
+		write_config |= (ctx->mdp->highest_bank_bit << 8);
 		if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
 			write_config |= 0x8;
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+			SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
+					(ctx->mdp->ubwc_swizzle << 0) |
+					(ctx->mdp->highest_bank_bit << 4));
 	}
 
 	if (data->is_secure)
@@ -199,6 +203,9 @@
 	struct sde_hw_wb *c;
 	struct sde_wb_cfg *cfg;
 
+	if (!addr || !m || !hw_mdp)
+		return ERR_PTR(-EINVAL);
+
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
 		return ERR_PTR(-ENOMEM);
@@ -211,10 +218,11 @@
 	}
 
 	/* Assign ops */
+	c->catalog = m;
+	c->mdp = &m->mdp[0];
 	c->idx = idx;
 	c->caps = cfg;
 	_setup_wb_ops(&c->ops, c->caps->features);
-	c->highest_bank_bit = m->mdp[0].highest_bank_bit;
 	c->hw_mdp = hw_mdp;
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index 52a5ee5..9d17fb3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -62,15 +62,18 @@
 /**
  * struct sde_hw_wb : WB driver object
  * @struct sde_hw_blk_reg_map *hw;
+ * @catalog: back pointer to catalog
+ * @mdp:          pointer to associated mdp portion of the catalog
  * @idx
  * @wb_hw_caps
  * @ops
- * @highest_bank_bit: GPU highest memory bank bit used
  * @hw_mdp: MDP top level hardware block
  */
 struct sde_hw_wb {
 	/* base */
 	struct sde_hw_blk_reg_map hw;
+	struct sde_mdss_cfg *catalog;
+	struct sde_mdp_cfg *mdp;
 
 	/* wb path */
 	int idx;
@@ -79,8 +82,6 @@
 	/* ops */
 	struct sde_hw_wb_ops ops;
 
-	u32 highest_bank_bit;
-
 	struct sde_hw_mdp *hw_mdp;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index d72c0b3..c7cb190 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -339,24 +339,12 @@
 
 static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-
 	return sde_crtc_vblank(crtc, true);
 }
 
 static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-
 	sde_crtc_vblank(crtc, false);
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 }
 
 static void sde_kms_prepare_commit(struct msm_kms *kms,
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 908926c..78c596d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -136,6 +136,7 @@
 	struct sde_debugfs_regset32 debugfs_src;
 	struct sde_debugfs_regset32 debugfs_scaler;
 	struct sde_debugfs_regset32 debugfs_csc;
+	bool debugfs_default_scale;
 };
 
 #define to_sde_plane(x) container_of(x, struct sde_plane, base)
@@ -666,6 +667,7 @@
 	}
 
 	memset(scale_cfg, 0, sizeof(*scale_cfg));
+	memset(&psde->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
 
 	decimated = DECIMATED_DIMENSION(src_w,
 			psde->pipe_cfg.horz_decimation);
@@ -1007,7 +1009,8 @@
 		int error;
 
 		error = _sde_plane_setup_scaler3_lut(psde, pstate);
-		if (error || !psde->pixel_ext_usr) {
+		if (error || !psde->pixel_ext_usr ||
+				psde->debugfs_default_scale) {
 			/* calculate default config for QSEED3 */
 			_sde_plane_setup_scaler3(psde,
 					psde->pipe_cfg.src_rect.w,
@@ -1017,7 +1020,8 @@
 					psde->scaler3_cfg, fmt,
 					chroma_subsmpl_h, chroma_subsmpl_v);
 		}
-	} else if (!psde->pixel_ext_usr || !pstate) {
+	} else if (!psde->pixel_ext_usr || !pstate ||
+			psde->debugfs_default_scale) {
 		uint32_t deci_dim, i;
 
 		/* calculate default configuration for QSEED2 */
@@ -2549,6 +2553,10 @@
 		sde_debugfs_create_regset32("scaler_blk", 0444,
 				psde->debugfs_root,
 				&psde->debugfs_scaler);
+		debugfs_create_bool("default_scaling",
+				0644,
+				psde->debugfs_root,
+				&psde->debugfs_default_scale);
 	}
 
 	if (cfg->features & BIT(SDE_SSPP_CSC) ||
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index d72b7cd..eb6e711 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1130,14 +1130,6 @@
 	}
 
 	kfree(rsvp);
-
-	/* if no remaining reservation, then clear the topology name */
-	if (!_sde_rm_get_rsvp(rm, conn->encoder))
-		(void) msm_property_set_property(
-				sde_connector_get_propinfo(conn),
-				sde_connector_get_property_values(conn->state),
-				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
 }
 
 void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
@@ -1173,6 +1165,12 @@
 		SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
 				rsvp->enc_id);
 		_sde_rm_release_rsvp(rm, rsvp, conn);
+
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(conn),
+				sde_connector_get_property_values(conn->state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
 	}
 }
 
@@ -1190,8 +1188,12 @@
 			sde_connector_get_property_values(conn_state),
 			CONNECTOR_PROP_TOPOLOGY_NAME,
 			rsvp->topology);
-	if (ret)
+	if (ret) {
+		SDE_ERROR("failed to set topology name property, ret %d\n",
+				ret);
 		_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+		return ret;
+	}
 
 	/* Swap next rsvp to be the active */
 	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
@@ -1284,6 +1286,12 @@
 		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
 		rsvp_cur = NULL;
 		_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(
+						conn_state->connector),
+				sde_connector_get_property_values(conn_state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 4479e5e..2220925 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -341,7 +341,7 @@
 			wb_dev->wb_cfg->sblk->maxlinewidth);
 
 	sde_kms_info_start(info, "features");
-	if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC_1_0))
+	if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC))
 		sde_kms_info_append(info, "wb_ubwc");
 	sde_kms_info_stop(info);
 
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 4c1260b..697b7f7 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -1974,6 +1974,9 @@
 	char *end_addr;
 	int i;
 
+	if (!len_bytes)
+		return;
+
 	in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
 	in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
 
@@ -2446,8 +2449,12 @@
 			sizeof(sde_dbg_base.req_dump_blks));
 
 	va_start(args, name);
-	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
-		blk_name = va_arg(args, char*);
+	i = 0;
+	while ((blk_name = va_arg(args, char*))) {
+		if (i++ >= SDE_EVTLOG_MAX_DATA) {
+			pr_err("could not parse all dump arguments\n");
+			break;
+		}
 		if (IS_ERR_OR_NULL(blk_name))
 			break;
 
@@ -2471,9 +2478,6 @@
 		if (!strcmp(blk_name, "panic"))
 			do_panic = true;
 	}
-	blk_name = va_arg(args, char*);
-	if (!IS_ERR_OR_NULL(blk_name))
-		pr_err("could not parse all dump arguments\n");
 	va_end(args);
 
 	if (queue_work) {
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index b36e17c..2464551 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -48,6 +48,8 @@
 #define MAX_BUFFER_SIZE 256
 
 #define TRY_CMD_MODE_SWITCH		0xFFFF
+#define TRY_CLK_MODE_SWITCH		0xFFFE
+#define STATE_UPDATE_NOT_ALLOWED	0xFFFD
 
 static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
 
@@ -140,6 +142,111 @@
 	return;
 }
 
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
+{
+	struct sde_rsc_event *evt;
+	struct sde_rsc_priv *rsc;
+
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return ERR_PTR(-EINVAL);
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return ERR_PTR(-EINVAL);
+	} else if (!cb_func || !event_type) {
+		pr_err("no event or cb func\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rsc = rsc_prv_list[rsc_index];
+	evt = kzalloc(sizeof(struct sde_rsc_event), GFP_KERNEL);
+	if (!evt)
+		return ERR_PTR(-ENOMEM);
+
+	evt->event_type = event_type;
+	evt->rsc_index = rsc_index;
+	evt->usr = usr;
+	evt->cb_func = cb_func;
+	pr_debug("event register type:%d rsc index:%d\n",
+						event_type, rsc_index);
+
+	mutex_lock(&rsc->client_lock);
+	list_add(&evt->list, &rsc->event_list);
+	mutex_unlock(&rsc->client_lock);
+
+	return evt;
+}
+
+void sde_rsc_unregister_event(struct sde_rsc_event *event)
+{
+	struct sde_rsc_priv *rsc;
+
+	if (!event) {
+		pr_debug("invalid event client\n");
+		goto end;
+	} else if (event->rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index\n");
+		goto end;
+	}
+
+	pr_debug("event client destroyed\n");
+	rsc = rsc_prv_list[event->rsc_index];
+	if (!rsc)
+		goto end;
+
+	mutex_lock(&rsc->client_lock);
+	list_del_init(&event->list);
+	mutex_unlock(&rsc->client_lock);
+
+	kfree(event);
+end:
+	return;
+}
+
+static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
+	struct sde_power_client *pclient, bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mp = &phandle->mp;
+
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	if (phandle->current_usecase_ndx == pclient->usecase_ndx)
+		goto end;
+
+	if (enable) {
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+		if (rc) {
+			pr_err("clock enable failed rc:%d\n", rc);
+			goto end;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+	}
+
+	phandle->current_usecase_ndx = pclient->usecase_ndx;
+
+end:
+	return rc;
+}
+
 static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *cmd_config)
 {
@@ -237,24 +344,50 @@
 static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc)
 {
 	struct sde_rsc_client *client;
-	int rc = 0;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
+	bool idle_switch = true;
 
 	list_for_each_entry(client, &rsc->client_list, list)
-		if (client->current_state != SDE_RSC_IDLE_STATE)
-			return TRY_CMD_MODE_SWITCH;
+		if (client->current_state != SDE_RSC_IDLE_STATE) {
+			idle_switch = false;
+			break;
+		}
 
-	if (rsc->hw_ops.state_update)
+	if (!idle_switch) {
+		/**
+		 * following code needs to run the loop through each
+		 * client because they might be in different order
+		 * sorting is not possible; only preference is available
+		 */
+
+		/* first check if any vid client active */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_VID_STATE)
+				return rc;
+
+		/* now try cmd state switch */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_CMD_STATE)
+				return TRY_CMD_MODE_SWITCH;
+
+		/* now try clk state switch */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_CLK_STATE)
+				return TRY_CLK_MODE_SWITCH;
+
+	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
+	}
 
 	return rc;
 }
 
-static bool sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
+static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client, bool wait_req)
 {
 	struct sde_rsc_client *client;
-	int rc = 0;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
 
 	if (!rsc->primary_client) {
 		pr_err("primary client not available for cmd state switch\n");
@@ -276,6 +409,12 @@
 		if (client->current_state == SDE_RSC_VID_STATE)
 			goto end;
 
+	/* no need to enable solver again */
+	if (rsc->current_state == SDE_RSC_CLK_STATE) {
+		rc = 0;
+		goto end;
+	}
+
 	if (rsc->hw_ops.state_update)
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
 
@@ -287,6 +426,28 @@
 	return rc;
 }
 
+static bool sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
+{
+	struct sde_rsc_client *client;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
+
+	list_for_each_entry(client, &rsc->client_list, list)
+		if ((client->current_state == SDE_RSC_VID_STATE) ||
+		    (client->current_state == SDE_RSC_CMD_STATE))
+			goto end;
+
+	/* no need to enable the solver again */
+	if (rsc->current_state == SDE_RSC_CMD_STATE) {
+		rc = 0;
+		goto end;
+	}
+
+	if (rsc->hw_ops.state_update)
+		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+end:
+	return rc;
+}
+
 static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client, bool wait_req)
@@ -310,7 +471,7 @@
 
 /**
  * sde_rsc_client_state_update() - rsc client state update
- * Video mode and command mode are supported as modes. A client need to
+ * Video mode, cmd mode and clk state are suppoed as modes. A client need to
  * set this property during panel config time. A switching client can set the
  * property to change the state
  *
@@ -350,8 +511,7 @@
 		pr_err("invalid master component binding\n");
 		rc = -EINVAL;
 		goto end;
-	} else if ((rsc->current_state == state) &&
-				(state != SDE_RSC_CMD_UPDATE_STATE)) {
+	} else if ((rsc->current_state == state) && !config) {
 		pr_debug("no state change: %d\n", state);
 		goto end;
 	}
@@ -360,22 +520,33 @@
 		__builtin_return_address(0), rsc->current_state,
 		caller_client->name, state);
 
-	wait_requested = (rsc->current_state != SDE_RSC_IDLE_STATE);
+	/* only switch state needs vsync wait */
+	wait_requested = (rsc->current_state == SDE_RSC_VID_STATE) ||
+			(rsc->current_state == SDE_RSC_CMD_STATE);
 
 	if (rsc->power_collapse)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	switch (state) {
 	case SDE_RSC_IDLE_STATE:
 		rc = sde_rsc_switch_to_idle(rsc);
+
 		/* video state client might be exiting; try cmd state switch */
-		if (rc == TRY_CMD_MODE_SWITCH)
+		if (rc == TRY_CMD_MODE_SWITCH) {
 			rc = sde_rsc_switch_to_cmd(rsc, NULL,
 					rsc->primary_client, wait_requested);
+			if (!rc)
+				state = SDE_RSC_CMD_STATE;
+
+		/* cmd state client might be exiting; try clk state switch */
+		} else if (rc == TRY_CLK_MODE_SWITCH) {
+			rc = sde_rsc_switch_to_clk(rsc);
+			if (!rc)
+				state = SDE_RSC_CLK_STATE;
+		}
 		break;
 
 	case SDE_RSC_CMD_STATE:
-	case SDE_RSC_CMD_UPDATE_STATE:
 		rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
 								wait_requested);
 		break;
@@ -385,21 +556,29 @@
 								wait_requested);
 		break;
 
+	case SDE_RSC_CLK_STATE:
+		rc = sde_rsc_switch_to_clk(rsc);
+		break;
+
 	default:
 		pr_err("invalid state handling %d\n", state);
 		break;
 	}
 
-	if (rc) {
+	if (rc == STATE_UPDATE_NOT_ALLOWED) {
+		rc = 0;
+		goto clk_disable;
+	} else if (rc) {
 		pr_err("state update failed rc:%d\n", rc);
-		goto end;
+		goto clk_disable;
 	}
 
 	pr_debug("state switch successfully complete: %d\n", state);
 	rsc->current_state = state;
 
+clk_disable:
 	if (rsc->power_collapse)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 end:
 	mutex_unlock(&rsc->client_lock);
 	return rc;
@@ -518,7 +697,7 @@
 		seq_printf(s, "\t client:%s state:%d\n",
 				client->name, client->current_state);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	if (rsc->hw_ops.debug_show) {
 		ret = rsc->hw_ops.debug_show(s, rsc);
@@ -526,7 +705,7 @@
 			pr_err("sde rsc: hw debug failed ret:%d\n", ret);
 	}
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	return 0;
@@ -555,12 +734,12 @@
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
 							MAX_BUFFER_SIZE, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	if (blen < 0)
@@ -594,7 +773,7 @@
 	input[count - 1] = '\0';
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	mode = strnstr(input, "mode0=", strlen("mode0="));
 	if (mode) {
@@ -620,7 +799,7 @@
 	}
 
 end:
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	pr_err("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
@@ -647,12 +826,12 @@
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
 						MAX_BUFFER_SIZE, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	if (blen < 0)
@@ -692,7 +871,7 @@
 	}
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	if (vsync_state)
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -700,7 +879,7 @@
 	else
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	kfree(input);
@@ -750,7 +929,7 @@
 		return;
 
 	if (rsc->pclient)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	if (rsc->fs)
 		devm_regulator_put(rsc->fs);
 	if (rsc->wrapper_io.base)
@@ -890,8 +1069,7 @@
 		goto sde_rsc_fail;
 	}
 
-	/* these clocks are always on */
-	if (sde_power_resource_enable(&rsc->phandle, rsc->pclient, true)) {
+	if (sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true)) {
 		pr_err("failed to enable sde rsc power resources\n");
 		goto sde_rsc_fail;
 	}
@@ -899,6 +1077,8 @@
 	if (sde_rsc_timer_calculate(rsc, NULL))
 		goto sde_rsc_fail;
 
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+
 	INIT_LIST_HEAD(&rsc->client_list);
 	mutex_init(&rsc->client_lock);
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.h b/drivers/gpu/drm/msm/sde_rsc.h
index e9a55b6..2775d21 100644
--- a/drivers/gpu/drm/msm/sde_rsc.h
+++ b/drivers/gpu/drm/msm/sde_rsc.h
@@ -33,6 +33,37 @@
 struct sde_rsc_priv;
 
 /**
+ * event will be triggered before sde core power collapse,
+ * mdss gdsc is still on
+ */
+#define SDE_RSC_EVENT_PRE_CORE_PC 0x1
+/**
+ * event will be triggered after sde core collapse complete,
+ * mdss gdsc is off now
+ */
+#define SDE_RSC_EVENT_POST_CORE_PC 0x2
+/**
+ * event will be triggered before restoring the sde core from power collapse,
+ * mdss gdsc is still off
+ */
+#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4
+/**
+ * event will be triggered after restoring the sde core from power collapse,
+ * mdss gdsc is on now
+ */
+#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8
+/**
+ * event attached with solver state enabled
+ * all clients in clk_state or cmd_state
+ */
+#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10
+/**
+ * event attached with solver state disabled
+ * one of the client requested for vid state
+ */
+#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20
+
+/**
  * rsc_mode_req: sde rsc mode request information
  * MODE_READ: read vsync status
  * MODE0_UPDATE: mode0 status , this should be 0x0
@@ -60,23 +91,22 @@
 
 /**
  * sde_rsc_state: sde rsc state information
- * SDE_RSC_MODE_IDLE: A client requests for idle state when there is no
+ * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no
  *                    pixel or cmd transfer expected. An idle vote from
  *                    all clients lead to power collapse state.
- * SDE_RSC_MODE_CMD:  A client requests for cmd state when it wants to
+ * SDE_RSC_CLK_STATE:  A client requests for clk state when it wants to
+ *                    only avoid mode-2 entry/exit. For ex: V4L2 driver,
+ *                    sde power handle, etc.
+ * SDE_RSC_CMD_STATE:  A client requests for cmd state when it wants to
  *                    enable the solver mode.
- * SDE_RSC_MODE_CMD_UPDATE: A clients requests for cmd_update state when
- *                    it wants to update the backoff time during solver
- *                    enable state. Inline-rotation is one good example
- *                    use case. It increases the prefill lines by 128 lines.
- * SDE_RSC_MODE_VID:  A client requests for vid state it wants to avoid
+ * SDE_RSC_VID_STATE:  A client requests for vid state it wants to avoid
  *                    solver enable because client is fetching data from
  *                    continuously.
  */
 enum sde_rsc_state {
 	SDE_RSC_IDLE_STATE,
+	SDE_RSC_CLK_STATE,
 	SDE_RSC_CMD_STATE,
-	SDE_RSC_CMD_UPDATE_STATE,
 	SDE_RSC_VID_STATE,
 };
 
@@ -86,7 +116,7 @@
  * @current_state:   current client state
  * @crtc_id:		crtc_id associated with this rsc client.
  * @rsc_index:	rsc index of a client - only index "0" valid.
- * @list:	list to attach power handle master list
+ * @list:	list to attach client master list
  */
 struct sde_rsc_client {
 	char name[MAX_RSC_CLIENT_NAME_LEN];
@@ -97,6 +127,22 @@
 };
 
 /**
+ * struct sde_rsc_event: local event registration entry structure
+ * @cb_func:	Pointer to desired callback function
+ * @usr:	User pointer to pass to callback on event trigger
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @event_type:	refer comments in event_register
+ * @list:	list to attach event master list
+ */
+struct sde_rsc_event {
+	void (*cb_func)(uint32_t event_type, void *usr);
+	void *usr;
+	u32 rsc_index;
+	uint32_t event_type;
+	struct list_head list;
+};
+
+/**
  * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
  * @init:			Initialize the sequencer, solver, qtimer,
 				etc. hardware blocks on RSC.
@@ -183,6 +229,7 @@
  * @wrapper_io:		wrapper io data mapping
  *
  * @client_list:	current rsc client list handle
+ * @event_list:		current rsc event list handle
  * @client_lock:	current rsc client synchronization lock
  *
  * timer_config:	current rsc timer configuration
@@ -212,6 +259,7 @@
 	struct dss_io_data wrapper_io;
 
 	struct list_head client_list;
+	struct list_head event_list;
 	struct mutex client_lock;
 
 	struct sde_rsc_timer_config timer_config;
@@ -261,7 +309,7 @@
 
 /**
  * sde_rsc_client_state_update() - rsc client state update
- * Video mode and command mode are supported as modes. A client need to
+ * Video mode, cmd mode and clk state are supported as modes. A client need to
  * set this property during panel time. A switching client can set the
  * property to change the state
  *
@@ -298,5 +346,23 @@
  */
 int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
 
+/**
+ * sde_rsc_register_event - register a callback function for an event
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @event_type:  event type to register; client sets 0x3 if it wants
+ *               to register for CORE_PC and CORE_RESTORE - both events.
+ * @cb_func:     Pointer to desired callback function
+ * @usr:         User pointer to pass to callback on event trigger
+ * Returns: sde_rsc_event pointer on success
+ */
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr);
+
+/**
+ * sde_rsc_unregister_event - unregister callback for an event
+ * @sde_rsc_event: event returned by sde_rsc_register_event
+ */
+void sde_rsc_unregister_event(struct sde_rsc_event *event);
 
 #endif /* _SDE_RSC_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 8dd04bd..dd7f37a 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -93,15 +93,17 @@
 #define SDE_RSCC_F0_QTMR_V1_CNTP_CTL			0x202C
 #define SDE_RSCC_F1_QTMR_V1_CNTP_CTL			0x302C
 
-/* mdp and dsi clocks in clock gate state */
-#define DISP_MDP_DSI_CLK_GATE		0x7f0
-
-/* mdp and dsi clocks in clock ungate state */
-#define MDSS_CORE_GDSCR			0x0
-#define DISP_MDP_DSI_CLK_UNGATE		0x5000
-
 #define MAX_CHECK_LOOPS			500
 
+static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
+{
+	struct sde_rsc_event *event;
+
+	list_for_each_entry(event, &rsc->event_list, list)
+		if (event->event_type & event_type)
+			event->cb_func(event_type, event->usr);
+}
+
 static int rsc_hw_qtimer_init(struct sde_rsc_priv *rsc)
 {
 	pr_debug("rsc hardware qtimer init\n");
@@ -182,31 +184,33 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
 						0x888babec, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0xaaa8a020, rsc->debug_mode);
+						0xa806a020, rsc->debug_mode);
 
 	/* Mode - 2 sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xe1a138eb, rsc->debug_mode);
+						0xa138ebaa, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xa2ede081, rsc->debug_mode);
+						0xe0a581e1, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0x8a3982e2, rsc->debug_mode);
+						0x82e2a2ed, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0xa92088ea, rsc->debug_mode);
+						0x88ea8a39, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0x89e6a6e9, rsc->debug_mode);
+						0xa6e9a920, rsc->debug_mode);
 
 	/* tcs sleep sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0xa7e9a920, rsc->debug_mode);
+						0xa92089e6, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0x002089e7, rsc->debug_mode);
+						0x89e7a7e9, rsc->debug_mode);
+	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
+						0x00000020, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x27, rsc->debug_mode);
+						0x29, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x2d, rsc->debug_mode);
+						0x2f, rsc->debug_mode);
 
 	return 0;
 }
@@ -297,10 +301,13 @@
 	rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
 	if (rc) {
 		pr_err("vdd reg fast mode set failed rc:%d\n", rc);
-		goto end;
+		return rc;
 	}
 
 	rc = -EBUSY;
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+
 	wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 				rsc->debug_mode);
 	wrapper_status |= BIT(3);
@@ -319,10 +326,20 @@
 		usleep_range(1, 2);
 	}
 
-	if (rc)
+	if (rc) {
 		pr_err("vdd fs is still enabled\n");
+		goto end;
+	}
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
+
+	return 0;
 
 end:
+	regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
+
 	return rc;
 }
 
@@ -331,6 +348,8 @@
 	int rc = -EBUSY;
 	int count, reg;
 
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
+
 	// needs review with HPG sequence
 	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
 					0x0, rsc->debug_mode);
@@ -374,6 +393,8 @@
 	if (rc)
 		pr_err("vdd reg normal mode set failed rc:%d\n", rc);
 
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
+
 	return rc;
 }
 
@@ -407,6 +428,8 @@
 							reg, rsc->debug_mode);
 		/* make sure that solver is enabled */
 		wmb();
+
+		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_ENABLED);
 		break;
 
 	case SDE_RSC_VID_STATE:
@@ -424,6 +447,8 @@
 							0x1, rsc->debug_mode);
 		/* make sure that solver mode is override */
 		wmb();
+
+		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_DISABLED);
 		break;
 
 	case SDE_RSC_IDLE_STATE:
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 9f7300a..f513207 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -35,6 +35,7 @@
 	adreno_a3xx_snapshot.o \
 	adreno_a4xx_snapshot.o \
 	adreno_a5xx_snapshot.o \
+	adreno_a6xx_snapshot.o \
 	adreno_a4xx_preempt.o \
 	adreno_a5xx_preempt.o \
 	adreno_sysfs.o \
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 3907e24..1d42797 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -70,11 +70,44 @@
 #define A6XX_CP_ADDR_MODE_CNTL           0x842
 #define A6XX_CP_PROTECT_CNTL             0x84F
 #define A6XX_CP_PROTECT_REG              0x850
+#define A6XX_CP_PERFCTR_CP_SEL_0         0x8D0
+#define A6XX_CP_PERFCTR_CP_SEL_1         0x8D1
+#define A6XX_CP_PERFCTR_CP_SEL_2         0x8D2
+#define A6XX_CP_PERFCTR_CP_SEL_3         0x8D3
+#define A6XX_CP_PERFCTR_CP_SEL_4         0x8D4
+#define A6XX_CP_PERFCTR_CP_SEL_5         0x8D5
+#define A6XX_CP_PERFCTR_CP_SEL_6         0x8D6
+#define A6XX_CP_PERFCTR_CP_SEL_7         0x8D7
+#define A6XX_CP_PERFCTR_CP_SEL_8         0x8D8
+#define A6XX_CP_PERFCTR_CP_SEL_9         0x8D9
+#define A6XX_CP_PERFCTR_CP_SEL_10        0x8DA
+#define A6XX_CP_PERFCTR_CP_SEL_11        0x8DB
+#define A6XX_CP_PERFCTR_CP_SEL_12        0x8DC
+#define A6XX_CP_PERFCTR_CP_SEL_13        0x8DD
+#define A6XX_CP_CRASH_SCRIPT_BASE_LO     0x900
+#define A6XX_CP_CRASH_SCRIPT_BASE_HI     0x901
+#define A6XX_CP_CRASH_DUMP_CNTL          0x902
+#define A6XX_CP_CRASH_DUMP_STATUS        0x903
 #define A6XX_CP_SQE_STAT_ADDR            0x908
 #define A6XX_CP_SQE_STAT_DATA            0x909
+#define A6XX_CP_DRAW_STATE_ADDR          0x90A
+#define A6XX_CP_DRAW_STATE_DATA          0x90B
+#define A6XX_CP_ROQ_DBG_ADDR             0x90C
+#define A6XX_CP_ROQ_DBG_DATA             0x90D
+#define A6XX_CP_MEM_POOL_DBG_ADDR        0x90E
+#define A6XX_CP_MEM_POOL_DBG_DATA        0x90F
+#define A6XX_CP_SQE_UCODE_DBG_ADDR       0x910
+#define A6XX_CP_SQE_UCODE_DBG_DATA       0x911
+#define A6XX_CP_IB1_BASE                 0x928
+#define A6XX_CP_IB1_BASE_HI              0x929
+#define A6XX_CP_IB1_REM_SIZE             0x92A
+#define A6XX_CP_IB2_BASE                 0x92B
+#define A6XX_CP_IB2_BASE_HI              0x92C
+#define A6XX_CP_IB2_REM_SIZE             0x92D
 #define A6XX_CP_ALWAYS_ON_COUNTER_LO     0x980
 #define A6XX_CP_ALWAYS_ON_COUNTER_HI     0x981
 #define A6XX_CP_AHB_CNTL                 0x98D
+#define A6XX_CP_APERTURE_CNTL_HOST       0xA00
 #define A6XX_VSC_ADDR_MODE_CNTL          0xC01
 
 /* RBBM registers */
@@ -89,28 +122,401 @@
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
+#define A6XX_RBBM_PERFCTR_CP_0_LO                0x400
+#define A6XX_RBBM_PERFCTR_CP_0_HI                0x401
+#define A6XX_RBBM_PERFCTR_CP_1_LO                0x402
+#define A6XX_RBBM_PERFCTR_CP_1_HI                0x403
+#define A6XX_RBBM_PERFCTR_CP_2_LO                0x404
+#define A6XX_RBBM_PERFCTR_CP_2_HI                0x405
+#define A6XX_RBBM_PERFCTR_CP_3_LO                0x406
+#define A6XX_RBBM_PERFCTR_CP_3_HI                0x407
+#define A6XX_RBBM_PERFCTR_CP_4_LO                0x408
+#define A6XX_RBBM_PERFCTR_CP_4_HI                0x409
+#define A6XX_RBBM_PERFCTR_CP_5_LO                0x40a
+#define A6XX_RBBM_PERFCTR_CP_5_HI                0x40b
+#define A6XX_RBBM_PERFCTR_CP_6_LO                0x40c
+#define A6XX_RBBM_PERFCTR_CP_6_HI                0x40d
+#define A6XX_RBBM_PERFCTR_CP_7_LO                0x40e
+#define A6XX_RBBM_PERFCTR_CP_7_HI                0x40f
+#define A6XX_RBBM_PERFCTR_CP_8_LO                0x410
+#define A6XX_RBBM_PERFCTR_CP_8_HI                0x411
+#define A6XX_RBBM_PERFCTR_CP_9_LO                0x412
+#define A6XX_RBBM_PERFCTR_CP_9_HI                0x413
+#define A6XX_RBBM_PERFCTR_CP_10_LO               0x414
+#define A6XX_RBBM_PERFCTR_CP_10_HI               0x415
+#define A6XX_RBBM_PERFCTR_CP_11_LO               0x416
+#define A6XX_RBBM_PERFCTR_CP_11_HI               0x417
+#define A6XX_RBBM_PERFCTR_CP_12_LO               0x418
+#define A6XX_RBBM_PERFCTR_CP_12_HI               0x419
+#define A6XX_RBBM_PERFCTR_CP_13_LO               0x41a
+#define A6XX_RBBM_PERFCTR_CP_13_HI               0x41b
+#define A6XX_RBBM_PERFCTR_RBBM_0_LO              0x41c
+#define A6XX_RBBM_PERFCTR_RBBM_0_HI              0x41d
+#define A6XX_RBBM_PERFCTR_RBBM_1_LO              0x41e
+#define A6XX_RBBM_PERFCTR_RBBM_1_HI              0x41f
+#define A6XX_RBBM_PERFCTR_RBBM_2_LO              0x420
+#define A6XX_RBBM_PERFCTR_RBBM_2_HI              0x421
+#define A6XX_RBBM_PERFCTR_RBBM_3_LO              0x422
+#define A6XX_RBBM_PERFCTR_RBBM_3_HI              0x423
+#define A6XX_RBBM_PERFCTR_PC_0_LO                0x424
+#define A6XX_RBBM_PERFCTR_PC_0_HI                0x425
+#define A6XX_RBBM_PERFCTR_PC_1_LO                0x426
+#define A6XX_RBBM_PERFCTR_PC_1_HI                0x427
+#define A6XX_RBBM_PERFCTR_PC_2_LO                0x428
+#define A6XX_RBBM_PERFCTR_PC_2_HI                0x429
+#define A6XX_RBBM_PERFCTR_PC_3_LO                0x42a
+#define A6XX_RBBM_PERFCTR_PC_3_HI                0x42b
+#define A6XX_RBBM_PERFCTR_PC_4_LO                0x42c
+#define A6XX_RBBM_PERFCTR_PC_4_HI                0x42d
+#define A6XX_RBBM_PERFCTR_PC_5_LO                0x42e
+#define A6XX_RBBM_PERFCTR_PC_5_HI                0x42f
+#define A6XX_RBBM_PERFCTR_PC_6_LO                0x430
+#define A6XX_RBBM_PERFCTR_PC_6_HI                0x431
+#define A6XX_RBBM_PERFCTR_PC_7_LO                0x432
+#define A6XX_RBBM_PERFCTR_PC_7_HI                0x433
+#define A6XX_RBBM_PERFCTR_VFD_0_LO               0x434
+#define A6XX_RBBM_PERFCTR_VFD_0_HI               0x435
+#define A6XX_RBBM_PERFCTR_VFD_1_LO               0x436
+#define A6XX_RBBM_PERFCTR_VFD_1_HI               0x437
+#define A6XX_RBBM_PERFCTR_VFD_2_LO               0x438
+#define A6XX_RBBM_PERFCTR_VFD_2_HI               0x439
+#define A6XX_RBBM_PERFCTR_VFD_3_LO               0x43a
+#define A6XX_RBBM_PERFCTR_VFD_3_HI               0x43b
+#define A6XX_RBBM_PERFCTR_VFD_4_LO               0x43c
+#define A6XX_RBBM_PERFCTR_VFD_4_HI               0x43d
+#define A6XX_RBBM_PERFCTR_VFD_5_LO               0x43e
+#define A6XX_RBBM_PERFCTR_VFD_5_HI               0x43f
+#define A6XX_RBBM_PERFCTR_VFD_6_LO               0x440
+#define A6XX_RBBM_PERFCTR_VFD_6_HI               0x441
+#define A6XX_RBBM_PERFCTR_VFD_7_LO               0x442
+#define A6XX_RBBM_PERFCTR_VFD_7_HI               0x443
+#define A6XX_RBBM_PERFCTR_HLSQ_0_LO              0x444
+#define A6XX_RBBM_PERFCTR_HLSQ_0_HI              0x445
+#define A6XX_RBBM_PERFCTR_HLSQ_1_LO              0x446
+#define A6XX_RBBM_PERFCTR_HLSQ_1_HI              0x447
+#define A6XX_RBBM_PERFCTR_HLSQ_2_LO              0x448
+#define A6XX_RBBM_PERFCTR_HLSQ_2_HI              0x449
+#define A6XX_RBBM_PERFCTR_HLSQ_3_LO              0x44a
+#define A6XX_RBBM_PERFCTR_HLSQ_3_HI              0x44b
+#define A6XX_RBBM_PERFCTR_HLSQ_4_LO              0x44c
+#define A6XX_RBBM_PERFCTR_HLSQ_4_HI              0x44d
+#define A6XX_RBBM_PERFCTR_HLSQ_5_LO              0x44e
+#define A6XX_RBBM_PERFCTR_HLSQ_5_HI              0x44f
+#define A6XX_RBBM_PERFCTR_VPC_0_LO               0x450
+#define A6XX_RBBM_PERFCTR_VPC_0_HI               0x451
+#define A6XX_RBBM_PERFCTR_VPC_1_LO               0x452
+#define A6XX_RBBM_PERFCTR_VPC_1_HI               0x453
+#define A6XX_RBBM_PERFCTR_VPC_2_LO               0x454
+#define A6XX_RBBM_PERFCTR_VPC_2_HI               0x455
+#define A6XX_RBBM_PERFCTR_VPC_3_LO               0x456
+#define A6XX_RBBM_PERFCTR_VPC_3_HI               0x457
+#define A6XX_RBBM_PERFCTR_VPC_4_LO               0x458
+#define A6XX_RBBM_PERFCTR_VPC_4_HI               0x459
+#define A6XX_RBBM_PERFCTR_VPC_5_LO               0x45a
+#define A6XX_RBBM_PERFCTR_VPC_5_HI               0x45b
+#define A6XX_RBBM_PERFCTR_CCU_0_LO               0x45c
+#define A6XX_RBBM_PERFCTR_CCU_0_HI               0x45d
+#define A6XX_RBBM_PERFCTR_CCU_1_LO               0x45e
+#define A6XX_RBBM_PERFCTR_CCU_1_HI               0x45f
+#define A6XX_RBBM_PERFCTR_CCU_2_LO               0x460
+#define A6XX_RBBM_PERFCTR_CCU_2_HI               0x461
+#define A6XX_RBBM_PERFCTR_CCU_3_LO               0x462
+#define A6XX_RBBM_PERFCTR_CCU_3_HI               0x463
+#define A6XX_RBBM_PERFCTR_CCU_4_LO               0x464
+#define A6XX_RBBM_PERFCTR_CCU_4_HI               0x465
+#define A6XX_RBBM_PERFCTR_TSE_0_LO               0x466
+#define A6XX_RBBM_PERFCTR_TSE_0_HI               0x467
+#define A6XX_RBBM_PERFCTR_TSE_1_LO               0x468
+#define A6XX_RBBM_PERFCTR_TSE_1_HI               0x469
+#define A6XX_RBBM_PERFCTR_TSE_2_LO               0x46a
+#define A6XX_RBBM_PERFCTR_CCU_4_HI               0x465
+#define A6XX_RBBM_PERFCTR_TSE_0_LO               0x466
+#define A6XX_RBBM_PERFCTR_TSE_0_HI               0x467
+#define A6XX_RBBM_PERFCTR_TSE_1_LO               0x468
+#define A6XX_RBBM_PERFCTR_TSE_1_HI               0x469
+#define A6XX_RBBM_PERFCTR_TSE_2_LO               0x46a
+#define A6XX_RBBM_PERFCTR_TSE_2_HI               0x46b
+#define A6XX_RBBM_PERFCTR_TSE_3_LO               0x46c
+#define A6XX_RBBM_PERFCTR_TSE_3_HI               0x46d
+#define A6XX_RBBM_PERFCTR_RAS_0_LO               0x46e
+#define A6XX_RBBM_PERFCTR_RAS_0_HI               0x46f
+#define A6XX_RBBM_PERFCTR_RAS_1_LO               0x470
+#define A6XX_RBBM_PERFCTR_RAS_1_HI               0x471
+#define A6XX_RBBM_PERFCTR_RAS_2_LO               0x472
+#define A6XX_RBBM_PERFCTR_RAS_2_HI               0x473
+#define A6XX_RBBM_PERFCTR_RAS_3_LO               0x474
+#define A6XX_RBBM_PERFCTR_RAS_3_HI               0x475
+#define A6XX_RBBM_PERFCTR_UCHE_0_LO              0x476
+#define A6XX_RBBM_PERFCTR_UCHE_0_HI              0x477
+#define A6XX_RBBM_PERFCTR_UCHE_1_LO              0x478
+#define A6XX_RBBM_PERFCTR_UCHE_1_HI              0x479
+#define A6XX_RBBM_PERFCTR_UCHE_2_LO              0x47a
+#define A6XX_RBBM_PERFCTR_UCHE_2_HI              0x47b
+#define A6XX_RBBM_PERFCTR_UCHE_3_LO              0x47c
+#define A6XX_RBBM_PERFCTR_UCHE_3_HI              0x47d
+#define A6XX_RBBM_PERFCTR_UCHE_4_LO              0x47e
+#define A6XX_RBBM_PERFCTR_UCHE_4_HI              0x47f
+#define A6XX_RBBM_PERFCTR_UCHE_5_LO              0x480
+#define A6XX_RBBM_PERFCTR_UCHE_5_HI              0x481
+#define A6XX_RBBM_PERFCTR_UCHE_6_LO              0x482
+#define A6XX_RBBM_PERFCTR_UCHE_6_HI              0x483
+#define A6XX_RBBM_PERFCTR_UCHE_7_LO              0x484
+#define A6XX_RBBM_PERFCTR_UCHE_7_HI              0x485
+#define A6XX_RBBM_PERFCTR_UCHE_8_LO              0x486
+#define A6XX_RBBM_PERFCTR_UCHE_8_HI              0x487
+#define A6XX_RBBM_PERFCTR_UCHE_9_LO              0x488
+#define A6XX_RBBM_PERFCTR_UCHE_9_HI              0x489
+#define A6XX_RBBM_PERFCTR_UCHE_10_LO             0x48a
+#define A6XX_RBBM_PERFCTR_UCHE_10_HI             0x48b
+#define A6XX_RBBM_PERFCTR_UCHE_11_LO             0x48c
+#define A6XX_RBBM_PERFCTR_UCHE_11_HI             0x48d
+#define A6XX_RBBM_PERFCTR_TP_0_LO                0x48e
+#define A6XX_RBBM_PERFCTR_TP_0_HI                0x48f
+#define A6XX_RBBM_PERFCTR_TP_1_LO                0x490
+#define A6XX_RBBM_PERFCTR_TP_1_HI                0x491
+#define A6XX_RBBM_PERFCTR_TP_2_LO                0x492
+#define A6XX_RBBM_PERFCTR_TP_2_HI                0x493
+#define A6XX_RBBM_PERFCTR_TP_3_LO                0x494
+#define A6XX_RBBM_PERFCTR_TP_3_HI                0x495
+#define A6XX_RBBM_PERFCTR_TP_4_LO                0x496
+#define A6XX_RBBM_PERFCTR_TP_4_HI                0x497
+#define A6XX_RBBM_PERFCTR_TP_5_LO                0x498
+#define A6XX_RBBM_PERFCTR_TP_5_HI                0x499
+#define A6XX_RBBM_PERFCTR_TP_6_LO                0x49a
+#define A6XX_RBBM_PERFCTR_TP_6_HI                0x49b
+#define A6XX_RBBM_PERFCTR_TP_7_LO                0x49c
+#define A6XX_RBBM_PERFCTR_TP_7_HI                0x49d
+#define A6XX_RBBM_PERFCTR_TP_8_LO                0x49e
+#define A6XX_RBBM_PERFCTR_TP_8_HI                0x49f
+#define A6XX_RBBM_PERFCTR_TP_9_LO                0x4a0
+#define A6XX_RBBM_PERFCTR_TP_9_HI                0x4a1
+#define A6XX_RBBM_PERFCTR_TP_10_LO               0x4a2
+#define A6XX_RBBM_PERFCTR_TP_10_HI               0x4a3
+#define A6XX_RBBM_PERFCTR_TP_11_LO               0x4a4
+#define A6XX_RBBM_PERFCTR_TP_11_HI               0x4a5
+#define A6XX_RBBM_PERFCTR_SP_0_LO                0x4a6
+#define A6XX_RBBM_PERFCTR_SP_0_HI                0x4a7
+#define A6XX_RBBM_PERFCTR_SP_1_LO                0x4a8
+#define A6XX_RBBM_PERFCTR_SP_1_HI                0x4a9
+#define A6XX_RBBM_PERFCTR_SP_2_LO                0x4aa
+#define A6XX_RBBM_PERFCTR_SP_2_HI                0x4ab
+#define A6XX_RBBM_PERFCTR_SP_3_LO                0x4ac
+#define A6XX_RBBM_PERFCTR_SP_3_HI                0x4ad
+#define A6XX_RBBM_PERFCTR_SP_4_LO                0x4ae
+#define A6XX_RBBM_PERFCTR_SP_4_HI                0x4af
+#define A6XX_RBBM_PERFCTR_SP_5_LO                0x4b0
+#define A6XX_RBBM_PERFCTR_SP_5_HI                0x4b1
+#define A6XX_RBBM_PERFCTR_SP_6_LO                0x4b2
+#define A6XX_RBBM_PERFCTR_SP_6_HI                0x4b3
+#define A6XX_RBBM_PERFCTR_SP_7_LO                0x4b4
+#define A6XX_RBBM_PERFCTR_SP_7_HI                0x4b5
+#define A6XX_RBBM_PERFCTR_SP_8_LO                0x4b6
+#define A6XX_RBBM_PERFCTR_SP_8_HI                0x4b7
+#define A6XX_RBBM_PERFCTR_SP_9_LO                0x4b8
+#define A6XX_RBBM_PERFCTR_SP_9_HI                0x4b9
+#define A6XX_RBBM_PERFCTR_SP_10_LO               0x4ba
+#define A6XX_RBBM_PERFCTR_SP_10_HI               0x4bb
+#define A6XX_RBBM_PERFCTR_SP_11_LO               0x4bc
+#define A6XX_RBBM_PERFCTR_SP_11_HI               0x4bd
+#define A6XX_RBBM_PERFCTR_SP_12_LO               0x4be
+#define A6XX_RBBM_PERFCTR_SP_12_HI               0x4bf
+#define A6XX_RBBM_PERFCTR_SP_13_LO               0x4c0
+#define A6XX_RBBM_PERFCTR_SP_13_HI               0x4c1
+#define A6XX_RBBM_PERFCTR_SP_14_LO               0x4c2
+#define A6XX_RBBM_PERFCTR_SP_14_HI               0x4c3
+#define A6XX_RBBM_PERFCTR_SP_15_LO               0x4c4
+#define A6XX_RBBM_PERFCTR_SP_15_HI               0x4c5
+#define A6XX_RBBM_PERFCTR_SP_16_LO               0x4c6
+#define A6XX_RBBM_PERFCTR_SP_16_HI               0x4c7
+#define A6XX_RBBM_PERFCTR_SP_17_LO               0x4c8
+#define A6XX_RBBM_PERFCTR_SP_17_HI               0x4c9
+#define A6XX_RBBM_PERFCTR_SP_18_LO               0x4ca
+#define A6XX_RBBM_PERFCTR_SP_18_HI               0x4cb
+#define A6XX_RBBM_PERFCTR_SP_19_LO               0x4cc
+#define A6XX_RBBM_PERFCTR_SP_19_HI               0x4cd
+#define A6XX_RBBM_PERFCTR_SP_20_LO               0x4ce
+#define A6XX_RBBM_PERFCTR_SP_20_HI               0x4cf
+#define A6XX_RBBM_PERFCTR_SP_21_LO               0x4d0
+#define A6XX_RBBM_PERFCTR_SP_21_HI               0x4d1
+#define A6XX_RBBM_PERFCTR_SP_22_LO               0x4d2
+#define A6XX_RBBM_PERFCTR_SP_22_HI               0x4d3
+#define A6XX_RBBM_PERFCTR_SP_23_LO               0x4d4
+#define A6XX_RBBM_PERFCTR_SP_23_HI               0x4d5
+#define A6XX_RBBM_PERFCTR_RB_0_LO                0x4d6
+#define A6XX_RBBM_PERFCTR_RB_0_HI                0x4d7
+#define A6XX_RBBM_PERFCTR_RB_1_LO                0x4d8
+#define A6XX_RBBM_PERFCTR_RB_1_HI                0x4d9
+#define A6XX_RBBM_PERFCTR_RB_2_LO                0x4da
+#define A6XX_RBBM_PERFCTR_RB_2_HI                0x4db
+#define A6XX_RBBM_PERFCTR_RB_3_LO                0x4dc
+#define A6XX_RBBM_PERFCTR_RB_3_HI                0x4dd
+#define A6XX_RBBM_PERFCTR_RB_4_LO                0x4de
+#define A6XX_RBBM_PERFCTR_RB_4_HI                0x4df
+#define A6XX_RBBM_PERFCTR_RB_5_LO                0x4e0
+#define A6XX_RBBM_PERFCTR_RB_5_HI                0x4e1
+#define A6XX_RBBM_PERFCTR_RB_6_LO                0x4e2
+#define A6XX_RBBM_PERFCTR_RB_6_HI                0x4e3
+#define A6XX_RBBM_PERFCTR_RB_7_LO                0x4e4
+#define A6XX_RBBM_PERFCTR_RB_7_HI                0x4e5
+#define A6XX_RBBM_PERFCTR_VSC_0_LO               0x4e6
+#define A6XX_RBBM_PERFCTR_VSC_0_HI               0x4e7
+#define A6XX_RBBM_PERFCTR_VSC_1_LO               0x4e8
+#define A6XX_RBBM_PERFCTR_VSC_1_HI               0x4e9
+#define A6XX_RBBM_PERFCTR_LRZ_0_LO               0x4ea
+#define A6XX_RBBM_PERFCTR_LRZ_0_HI               0x4eb
+#define A6XX_RBBM_PERFCTR_LRZ_1_LO               0x4ec
+#define A6XX_RBBM_PERFCTR_LRZ_1_HI               0x4ed
+#define A6XX_RBBM_PERFCTR_LRZ_2_LO               0x4ee
+#define A6XX_RBBM_PERFCTR_LRZ_2_HI               0x4ef
+#define A6XX_RBBM_PERFCTR_LRZ_3_LO               0x4f0
+#define A6XX_RBBM_PERFCTR_LRZ_3_HI               0x4f1
+#define A6XX_RBBM_PERFCTR_CMP_0_LO               0x4f2
+#define A6XX_RBBM_PERFCTR_CMP_0_HI               0x4f3
+#define A6XX_RBBM_PERFCTR_CMP_1_LO               0x4f4
+#define A6XX_RBBM_PERFCTR_CMP_1_HI               0x4f5
+#define A6XX_RBBM_PERFCTR_CMP_2_LO               0x4f6
+#define A6XX_RBBM_PERFCTR_CMP_2_HI               0x4f7
+#define A6XX_RBBM_PERFCTR_CMP_3_LO               0x4f8
+#define A6XX_RBBM_PERFCTR_CMP_3_HI               0x4f9
+#define A6XX_RBBM_PERFCTR_CNTL                   0x500
+#define A6XX_RBBM_PERFCTR_LOAD_CMD0              0x501
+#define A6XX_RBBM_PERFCTR_LOAD_CMD1              0x502
+#define A6XX_RBBM_PERFCTR_LOAD_CMD2              0x503
+#define A6XX_RBBM_PERFCTR_LOAD_CMD3              0x504
+#define A6XX_RBBM_PERFCTR_LOAD_VALUE_LO          0x505
+#define A6XX_RBBM_PERFCTR_LOAD_VALUE_HI          0x506
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_0             0x507
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_1             0x508
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_2             0x509
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_3             0x50A
+
 #define A6XX_RBBM_SECVID_TRUST_CNTL              0xF400
 #define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL      0xF810
 
+/* DBGC_CFG registers */
+#define A6XX_DBGC_CFG_DBGBUS_SEL_A                  0x600
+#define A6XX_DBGC_CFG_DBGBUS_SEL_B                  0x601
+#define A6XX_DBGC_CFG_DBGBUS_SEL_C                  0x602
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D                  0x603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT   0x0
+#define A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT 0x8
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT                  0x604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT    0x0
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT      0xC
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT       0x1C
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM                  0x605
+#define A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT     0x18
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_0                 0x608
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_1                 0x609
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_2                 0x60a
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_3                 0x60b
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_0                0x60c
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_1                0x60d
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_2                0x60e
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_3                0x60f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0                0x610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1                0x611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT           0x0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT           0x4
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT           0x8
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT           0xC
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT           0x10
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT           0x14
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT           0x18
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT           0x1C
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT           0x0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT           0x4
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT          0x8
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT          0xC
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT          0x10
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT          0x14
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT          0x18
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT          0x1C
+#define A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1             0x62f
+#define A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2             0x630
+
 /* VSC registers */
+#define A6XX_VSC_PERFCTR_VSC_SEL_0          0xCD8
+#define A6XX_VSC_PERFCTR_VSC_SEL_1          0xCD9
+
+/* GRAS registers */
 #define A6XX_GRAS_ADDR_MODE_CNTL            0x8601
+#define A6XX_GRAS_PERFCTR_TSE_SEL_0         0x8610
+#define A6XX_GRAS_PERFCTR_TSE_SEL_1         0x8611
+#define A6XX_GRAS_PERFCTR_TSE_SEL_2         0x8612
+#define A6XX_GRAS_PERFCTR_TSE_SEL_3         0x8613
+#define A6XX_GRAS_PERFCTR_RAS_SEL_0         0x8614
+#define A6XX_GRAS_PERFCTR_RAS_SEL_1         0x8615
+#define A6XX_GRAS_PERFCTR_RAS_SEL_2         0x8616
+#define A6XX_GRAS_PERFCTR_RAS_SEL_3         0x8617
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_0         0x8618
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_1         0x8619
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_2         0x861A
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_3         0x861B
 
 /* RB registers */
 #define A6XX_RB_ADDR_MODE_CNTL              0x8E05
 #define A6XX_RB_NC_MODE_CNTL                0x8E08
+#define A6XX_RB_PERFCTR_RB_SEL_0            0x8E10
+#define A6XX_RB_PERFCTR_RB_SEL_1            0x8E11
+#define A6XX_RB_PERFCTR_RB_SEL_2            0x8E12
+#define A6XX_RB_PERFCTR_RB_SEL_3            0x8E13
+#define A6XX_RB_PERFCTR_RB_SEL_4            0x8E14
+#define A6XX_RB_PERFCTR_RB_SEL_5            0x8E15
+#define A6XX_RB_PERFCTR_RB_SEL_6            0x8E16
+#define A6XX_RB_PERFCTR_RB_SEL_7            0x8E17
+#define A6XX_RB_PERFCTR_CCU_SEL_0           0x8E18
+#define A6XX_RB_PERFCTR_CCU_SEL_1           0x8E19
+#define A6XX_RB_PERFCTR_CCU_SEL_2           0x8E1A
+#define A6XX_RB_PERFCTR_CCU_SEL_3           0x8E1B
+#define A6XX_RB_PERFCTR_CCU_SEL_4           0x8E1C
+#define A6XX_RB_PERFCTR_CMP_SEL_0           0x8E2C
+#define A6XX_RB_PERFCTR_CMP_SEL_1           0x8E2D
+#define A6XX_RB_PERFCTR_CMP_SEL_2           0x8E2E
+#define A6XX_RB_PERFCTR_CMP_SEL_3           0x8E2F
 
 /* PC registers */
 #define A6XX_PC_DBG_ECO_CNTL                0x9E00
 #define A6XX_PC_ADDR_MODE_CNTL              0x9E01
+#define A6XX_PC_PERFCTR_PC_SEL_0            0x9E34
+#define A6XX_PC_PERFCTR_PC_SEL_1            0x9E35
+#define A6XX_PC_PERFCTR_PC_SEL_2            0x9E36
+#define A6XX_PC_PERFCTR_PC_SEL_3            0x9E37
+#define A6XX_PC_PERFCTR_PC_SEL_4            0x9E38
+#define A6XX_PC_PERFCTR_PC_SEL_5            0x9E39
+#define A6XX_PC_PERFCTR_PC_SEL_6            0x9E3A
+#define A6XX_PC_PERFCTR_PC_SEL_7            0x9E3B
 
 /* HLSQ registers */
 #define A6XX_HLSQ_ADDR_MODE_CNTL            0xBE05
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_0        0xBE10
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_1        0xBE11
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_2        0xBE12
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_3        0xBE13
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_4        0xBE14
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_5        0xBE15
+#define A6XX_HLSQ_DBG_AHB_READ_APERTURE     0xC800
+#define A6XX_HLSQ_DBG_READ_SEL              0xD000
 
 /* VFD registers */
 #define A6XX_VFD_ADDR_MODE_CNTL             0xA601
+#define A6XX_VFD_PERFCTR_VFD_SEL_0          0xA610
+#define A6XX_VFD_PERFCTR_VFD_SEL_1          0xA611
+#define A6XX_VFD_PERFCTR_VFD_SEL_2          0xA612
+#define A6XX_VFD_PERFCTR_VFD_SEL_3          0xA613
+#define A6XX_VFD_PERFCTR_VFD_SEL_4          0xA614
+#define A6XX_VFD_PERFCTR_VFD_SEL_5          0xA615
+#define A6XX_VFD_PERFCTR_VFD_SEL_6          0xA616
+#define A6XX_VFD_PERFCTR_VFD_SEL_7          0xA617
 
 /* VPC registers */
 #define A6XX_VPC_ADDR_MODE_CNTL             0x9601
+#define A6XX_VPC_PERFCTR_VPC_SEL_0          0x9604
+#define A6XX_VPC_PERFCTR_VPC_SEL_1          0x9605
+#define A6XX_VPC_PERFCTR_VPC_SEL_2          0x9606
+#define A6XX_VPC_PERFCTR_VPC_SEL_3          0x9607
+#define A6XX_VPC_PERFCTR_VPC_SEL_4          0x9608
+#define A6XX_VPC_PERFCTR_VPC_SEL_5          0x9609
 
 /* UCHE registers */
 #define A6XX_UCHE_ADDR_MODE_CNTL            0xE00
@@ -127,20 +533,89 @@
 #define A6XX_UCHE_GMEM_RANGE_MAX_HI         0xE0E
 #define A6XX_UCHE_CACHE_WAYS                0xE17
 #define A6XX_UCHE_FILTER_CNTL               0xE18
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_0        0xE1C
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_1        0xE1D
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_2        0xE1E
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_3        0xE1F
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_4        0xE20
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_5        0xE21
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_6        0xE22
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_7        0xE23
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_8        0xE24
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_9        0xE25
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_10       0xE26
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_11       0xE27
 
 /* SP registers */
 #define A6XX_SP_ADDR_MODE_CNTL              0xAE01
 #define A6XX_SP_NC_MODE_CNTL                0xAE02
+#define A6XX_SP_PERFCTR_SP_SEL_0            0xAE10
+#define A6XX_SP_PERFCTR_SP_SEL_1            0xAE11
+#define A6XX_SP_PERFCTR_SP_SEL_2            0xAE12
+#define A6XX_SP_PERFCTR_SP_SEL_3            0xAE13
+#define A6XX_SP_PERFCTR_SP_SEL_4            0xAE14
+#define A6XX_SP_PERFCTR_SP_SEL_5            0xAE15
+#define A6XX_SP_PERFCTR_SP_SEL_6            0xAE16
+#define A6XX_SP_PERFCTR_SP_SEL_7            0xAE17
+#define A6XX_SP_PERFCTR_SP_SEL_8            0xAE18
+#define A6XX_SP_PERFCTR_SP_SEL_9            0xAE19
+#define A6XX_SP_PERFCTR_SP_SEL_10           0xAE1A
+#define A6XX_SP_PERFCTR_SP_SEL_11           0xAE1B
+#define A6XX_SP_PERFCTR_SP_SEL_12           0xAE1C
+#define A6XX_SP_PERFCTR_SP_SEL_13           0xAE1D
+#define A6XX_SP_PERFCTR_SP_SEL_14           0xAE1E
+#define A6XX_SP_PERFCTR_SP_SEL_15           0xAE1F
+#define A6XX_SP_PERFCTR_SP_SEL_16           0xAE20
+#define A6XX_SP_PERFCTR_SP_SEL_17           0xAE21
+#define A6XX_SP_PERFCTR_SP_SEL_18           0xAE22
+#define A6XX_SP_PERFCTR_SP_SEL_19           0xAE23
+#define A6XX_SP_PERFCTR_SP_SEL_20           0xAE24
+#define A6XX_SP_PERFCTR_SP_SEL_21           0xAE25
+#define A6XX_SP_PERFCTR_SP_SEL_22           0xAE26
+#define A6XX_SP_PERFCTR_SP_SEL_23           0xAE27
 
 /* TP registers */
 #define A6XX_TPL1_ADDR_MODE_CNTL            0xB601
 #define A6XX_TPL1_NC_MODE_CNTL              0xB604
+#define A6XX_TPL1_PERFCTR_TP_SEL_0          0xB610
+#define A6XX_TPL1_PERFCTR_TP_SEL_1          0xB611
+#define A6XX_TPL1_PERFCTR_TP_SEL_2          0xB612
+#define A6XX_TPL1_PERFCTR_TP_SEL_3          0xB613
+#define A6XX_TPL1_PERFCTR_TP_SEL_4          0xB614
+#define A6XX_TPL1_PERFCTR_TP_SEL_5          0xB615
+#define A6XX_TPL1_PERFCTR_TP_SEL_6          0xB616
+#define A6XX_TPL1_PERFCTR_TP_SEL_7          0xB617
+#define A6XX_TPL1_PERFCTR_TP_SEL_8          0xB618
+#define A6XX_TPL1_PERFCTR_TP_SEL_9          0xB619
+#define A6XX_TPL1_PERFCTR_TP_SEL_10         0xB61A
+#define A6XX_TPL1_PERFCTR_TP_SEL_11         0xB61B
 
 /* VBIF registers */
 #define A6XX_VBIF_VERSION                       0x3000
 #define A6XX_VBIF_GATE_OFF_WRREQ_EN             0x302A
 #define A6XX_VBIF_XIN_HALT_CTRL0                0x3080
 #define A6XX_VBIF_XIN_HALT_CTRL1                0x3081
+#define A6XX_VBIF_PERF_CNT_SEL0                 0x30d0
+#define A6XX_VBIF_PERF_CNT_SEL1                 0x30d1
+#define A6XX_VBIF_PERF_CNT_SEL2                 0x30d2
+#define A6XX_VBIF_PERF_CNT_SEL3                 0x30d3
+#define A6XX_VBIF_PERF_CNT_LOW0                 0x30d8
+#define A6XX_VBIF_PERF_CNT_LOW1                 0x30d9
+#define A6XX_VBIF_PERF_CNT_LOW2                 0x30da
+#define A6XX_VBIF_PERF_CNT_LOW3                 0x30db
+#define A6XX_VBIF_PERF_CNT_HIGH0                0x30e0
+#define A6XX_VBIF_PERF_CNT_HIGH1                0x30e1
+#define A6XX_VBIF_PERF_CNT_HIGH2                0x30e2
+#define A6XX_VBIF_PERF_CNT_HIGH3                0x30e3
+#define A6XX_VBIF_PERF_PWR_CNT_EN0              0x3100
+#define A6XX_VBIF_PERF_PWR_CNT_EN1              0x3101
+#define A6XX_VBIF_PERF_PWR_CNT_EN2              0x3102
+#define A6XX_VBIF_PERF_PWR_CNT_LOW0             0x3110
+#define A6XX_VBIF_PERF_PWR_CNT_LOW1             0x3111
+#define A6XX_VBIF_PERF_PWR_CNT_LOW2             0x3112
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH0            0x3118
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH1            0x3119
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH2            0x311a
 
 /* GMU control registers */
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 52639e3..876ff0c 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -334,7 +334,7 @@
 		.gmem_size = SZ_1M,
 		.num_protected_regs = 0x20,
 		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a540_gpmu.fw2",
+		.gpmufw_name = "a630_gmu.bin",
 		.gpmu_major = 0x0,
 		.gpmu_minor = 0x005,
 		.gpmu_tsens = 0x000C000D,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index a8ebf59..bea5707a 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -15,6 +15,7 @@
 
 #include "adreno.h"
 #include "a6xx_reg.h"
+#include "adreno_a6xx.h"
 #include "adreno_cp_parser.h"
 #include "adreno_trace.h"
 #include "adreno_pm4types.h"
@@ -101,6 +102,11 @@
 	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
 }
 
+static void a6xx_init(struct adreno_device *adreno_dev)
+{
+	a6xx_crashdump_init(adreno_dev);
+}
+
 /**
  * a6xx_protect_init() - Initializes register protection on a6xx
  * @device: Pointer to the device structure
@@ -110,34 +116,29 @@
 static void a6xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	struct kgsl_protected_registers *mmu_prot = NULL;
-	int i;
-	int num_sets;
-	int num_sets_array;
-	unsigned int mmu_base;
-	unsigned int mmu_range;
+	struct kgsl_protected_registers *mmu_prot =
+		kgsl_mmu_get_prot_regs(&device->mmu);
+	int i, num_sets;
+	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
+	int max_sets = adreno_dev->gpucore->num_protected_regs;
+	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
 
 	/* enable access protection to privileged registers */
 	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
 
-	num_sets = ARRAY_SIZE(a6xx_protected_regs_group);
-
-	mmu_prot = kgsl_mmu_get_prot_regs(&device->mmu);
-
 	if (mmu_prot) {
 		mmu_base = mmu_prot->base;
 		mmu_range = 1 << mmu_prot->range;
-		num_sets += DIV_ROUND_UP(mmu_range, 0x2000);
+		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
 	}
 
-	if (num_sets > adreno_dev->gpucore->num_protected_regs) {
+	if (req_sets > max_sets)
 		WARN(1, "Size exceeds the num of protection regs available\n");
-		num_sets = adreno_dev->gpucore->num_protected_regs;
-	}
 
-	num_sets_array = min_t(unsigned int,
-		ARRAY_SIZE(a6xx_protected_regs_group), num_sets);
-	for (i = 0; i < num_sets_array; i++) {
+	/* Protect GPU registers */
+	num_sets = min_t(unsigned int,
+		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
+	for (i = 0; i < num_sets; i++) {
 		struct a6xx_protected_regs *regs =
 					&a6xx_protected_regs_group[i];
 
@@ -146,15 +147,18 @@
 				(regs->read_protect << 31));
 	}
 
-	for (; i < num_sets; i++) {
-		unsigned int cur_range = min_t(unsigned int, mmu_range,
+	/* Protect MMU registers */
+	if (mmu_prot) {
+		while ((i < max_sets) && (mmu_range > 0)) {
+			cur_range = min_t(unsigned int, mmu_range,
 						0x2000);
+			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
+				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
 
-		kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
-			mmu_base | ((cur_range - 1) << 18) | (1 << 31));
-
-		mmu_base += cur_range;
-		mmu_range -= cur_range;
+			mmu_base += cur_range;
+			mmu_range -= cur_range;
+			i++;
+		}
 	}
 }
 
@@ -1522,10 +1526,381 @@
 	.mask = A6XX_INT_MASK,
 };
 
+static struct adreno_snapshot_sizes a6xx_snap_sizes = {
+	.cp_pfp = 0x33,
+	.roq = 0x400,
+};
+
+static struct adreno_snapshot_data a6xx_snapshot_data = {
+	.sect_sizes = &a6xx_snap_sizes,
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
+		A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
+		A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
+		A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
+		A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
+		A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
+		A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
+		A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
+		A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
+		A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
+		A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
+		A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
+		A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
+		A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
+		A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
+		A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
+		A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
+		A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
+		A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
+		A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
+		A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
+		A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
+		A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
+		A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
+		A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
+		A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
+		A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
+		A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
+		A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
+		A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
+		A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
+		A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
+		A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
+		A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
+		A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
+		A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
+		A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
+		A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
+		A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
+		A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
+		A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
+		A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
+		A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
+		A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
+		A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
+		A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
+		A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
+		A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
+		A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
+		A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
+		A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
+		A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
+		A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
+		A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
+		A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
+		A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
+		A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
+		A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
+		A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
+		A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
+		A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
+		A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
+		A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
+		A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
+		A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
+					A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
+		A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
+					A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
+		A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
+		A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
+		A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
+		A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
+		A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
+		A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
+		A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
+		A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
+		A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
+		A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
+		A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
+		A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
+		A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
+		A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
+		A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
+		A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
+		A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
+		A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
+		A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
+		A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
+		A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
+		A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
+		A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
+		A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
+		A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
+		A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
+		A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
+		A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
+		A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
+		A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
+		A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
+		A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
+		A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
+		A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
+		A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
+		A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
+		A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
+		A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
+		A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
+		A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
+		A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
+		A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
+		A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
+		A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
+		A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
+		A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
+		A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
+		A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
+		A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
+		A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
+		A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
+		A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
+		A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
+		A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
+		A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
+		A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
+		A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
+		A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
+		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
+};
+
+#define A6XX_PERFCOUNTER_GROUP(offset, name) \
+	ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
+
+#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
+	ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
+
+static struct adreno_perfcount_group a6xx_perfcounter_groups
+				[KGSL_PERFCOUNTER_GROUP_MAX] = {
+	A6XX_PERFCOUNTER_GROUP(CP, cp),
+	A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
+	A6XX_PERFCOUNTER_GROUP(PC, pc),
+	A6XX_PERFCOUNTER_GROUP(VFD, vfd),
+	A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
+	A6XX_PERFCOUNTER_GROUP(VPC, vpc),
+	A6XX_PERFCOUNTER_GROUP(CCU, ccu),
+	A6XX_PERFCOUNTER_GROUP(CMP, cmp),
+	A6XX_PERFCOUNTER_GROUP(TSE, tse),
+	A6XX_PERFCOUNTER_GROUP(RAS, ras),
+	A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
+	A6XX_PERFCOUNTER_GROUP(UCHE, uche),
+	A6XX_PERFCOUNTER_GROUP(TP, tp),
+	A6XX_PERFCOUNTER_GROUP(SP, sp),
+	A6XX_PERFCOUNTER_GROUP(RB, rb),
+	A6XX_PERFCOUNTER_GROUP(VSC, vsc),
+	A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
+};
+
+static struct adreno_perfcounters a6xx_perfcounters = {
+	a6xx_perfcounter_groups,
+	ARRAY_SIZE(a6xx_perfcounter_groups),
+};
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
 				A6XX_CP_RB_RPTR_ADDR_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
@@ -1533,10 +1908,28 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
+					A6XX_RBBM_PERFCTR_LOAD_CMD0),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
+					A6XX_RBBM_PERFCTR_LOAD_CMD1),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
+					A6XX_RBBM_PERFCTR_LOAD_CMD2),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
+					A6XX_RBBM_PERFCTR_LOAD_CMD3),
 
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
@@ -1548,6 +1941,10 @@
 					  A6XX_RBBM_BLOCK_SW_RESET_CMD),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
 					  A6XX_RBBM_BLOCK_SW_RESET_CMD2),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
+				A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
+				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_CP_ALWAYS_ON_COUNTER_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
@@ -1597,13 +1994,17 @@
 struct adreno_gpudev adreno_a6xx_gpudev = {
 	.reg_offsets = &a6xx_reg_offsets,
 	.start = a6xx_start,
+	.snapshot = a6xx_snapshot,
 	.irq = &a6xx_irq,
+	.snapshot_data = &a6xx_snapshot_data,
 	.irq_trace = trace_kgsl_a5xx_irq_status,
 	.num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
 	.platform_setup = a6xx_platform_setup,
+	.init = a6xx_init,
 	.rb_start = a6xx_rb_start,
 	.regulator_enable = a6xx_sptprac_enable,
 	.regulator_disable = a6xx_sptprac_disable,
+	.perfcounters = &a6xx_perfcounters,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
 	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
new file mode 100644
index 0000000..4b96f56
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ADRENO_A6XX_H_
+#define _ADRENO_A6XX_H_
+
+#include "a6xx_reg.h"
+
+#define CP_CLUSTER_FE		0x0
+#define CP_CLUSTER_SP_VS	0x1
+#define CP_CLUSTER_PC_VS	0x2
+#define CP_CLUSTER_GRAS		0x3
+#define CP_CLUSTER_SP_PS	0x4
+#define CP_CLUSTER_PS		0x5
+
+
+void a6xx_snapshot(struct adreno_device *adreno_dev,
+		struct kgsl_snapshot *snapshot);
+
+void a6xx_crashdump_init(struct adreno_device *adreno_dev);
+
+#endif
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
new file mode 100644
index 0000000..7d87096
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -0,0 +1,1294 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_snapshot.h"
+#include "adreno_snapshot.h"
+#include "a6xx_reg.h"
+#include "adreno_a6xx.h"
+#include "kgsl_gmu.h"
+
+#define A6XX_NUM_CTXTS 2
+
+static const unsigned int a6xx_gras_cluster[] = {
+	0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
+	0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+	0x8400, 0x840B,
+};
+
+static const unsigned int a6xx_ps_cluster[] = {
+	0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
+	0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+	0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
+	0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
+	0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const unsigned int a6xx_fe_cluster[] = {
+	0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
+	0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
+};
+
+static const unsigned int a6xx_pc_vs_cluster[] = {
+	0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
+};
+
+static struct a6xx_cluster_registers {
+	unsigned int id;
+	const unsigned int *regs;
+	unsigned int num_sets;
+	unsigned int offset0;
+	unsigned int offset1;
+} a6xx_clusters[] = {
+	{ CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
+	{ CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
+	{ CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
+	{ CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
+					ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
+};
+
+struct a6xx_cluster_regs_info {
+	struct a6xx_cluster_registers *cluster;
+	unsigned int ctxt_id;
+};
+
+static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
+	0xB800, 0xB803, 0xB820, 0xB822,
+};
+
+static const unsigned int a6xx_sp_vs_sp_cluster[] = {
+	0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
+	0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
+};
+
+static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
+	0xBB10, 0xBB11, 0xBB20, 0xBB29,
+};
+
+static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
+	0xBD80, 0xBD80,
+};
+
+static const unsigned int a6xx_sp_duplicate_cluster[] = {
+	0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
+};
+
+static const unsigned int a6xx_tp_duplicate_cluster[] = {
+	0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
+};
+
+static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
+	0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
+	0xB9C0, 0xB9C9,
+};
+
+static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
+	0xBD80, 0xBD80,
+};
+
+static const unsigned int a6xx_sp_ps_sp_cluster[] = {
+	0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
+	0xAA00, 0xAA00, 0xAA30, 0xAA31,
+};
+
+static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
+	0xACC0, 0xACC0,
+};
+
+static const unsigned int a6xx_sp_ps_tp_cluster[] = {
+	0xB180, 0xB183, 0xB190, 0xB191,
+};
+
+static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
+	0xB4C0, 0xB4D1,
+};
+
+static struct a6xx_cluster_dbgahb_registers {
+	unsigned int id;
+	unsigned int regbase;
+	unsigned int statetype;
+	const unsigned int *regs;
+	unsigned int num_sets;
+} a6xx_dbgahb_ctx_clusters[] = {
+	{ CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
+		ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
+		ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002EC00, 0x41, a6xx_hlsq_duplicate_cluster,
+		ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
+		ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002AC00, 0x21, a6xx_sp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002CC00, 0x1, a6xx_tp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002E600, 0x42, a6xx_sp_ps_hlsq_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002F300, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002A600, 0x22, a6xx_sp_ps_sp_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002B300, 0x26, a6xx_sp_ps_sp_2d_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002C600, 0x2, a6xx_sp_ps_tp_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002D300, 0x6, a6xx_sp_ps_tp_2d_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002EC00, 0x42, a6xx_hlsq_duplicate_cluster,
+		ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002AC00, 0x22, a6xx_sp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002CC00, 0x2, a6xx_tp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
+};
+
+struct a6xx_cluster_dbgahb_regs_info {
+	struct a6xx_cluster_dbgahb_registers *cluster;
+	unsigned int ctxt_id;
+};
+
+static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
+	0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
+	0xBE20, 0xBE23,
+};
+
+static const unsigned int a6xx_sp_non_ctx_registers[] = {
+	0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
+	0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
+};
+
+static const unsigned int a6xx_tp_non_ctx_registers[] = {
+	0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
+};
+
+static struct a6xx_non_ctx_dbgahb_registers {
+	unsigned int regbase;
+	unsigned int statetype;
+	const unsigned int *regs;
+	unsigned int num_sets;
+} a6xx_non_ctx_dbgahb[] = {
+	{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
+		ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
+	{ 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
+		ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
+	{ 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
+		ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
+};
+
+static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
+	/* VBIF */
+	0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
+	0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
+	0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
+	0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+	0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+	0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+	0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+	0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+	0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
+	0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
+	0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const unsigned int a6xx_gmu_registers[] = {
+	/* GMU */
+	0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
+};
+
+static const struct adreno_vbif_snapshot_registers
+a6xx_vbif_snapshot_registers[] = {
+	{ 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
+				ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
+};
+
+/*
+ * Set of registers to dump for A6XX on snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+static const unsigned int a6xx_registers[] = {
+	/* RBBM */
+	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0014, 0x0014,
+	0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042,
+	0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE,
+	0x00B0, 0x00FB, 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213,
+	0x0218, 0x023D, 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B,
+	0x050E, 0x0511, 0x0533, 0x0533, 0x0540, 0x0555,
+	/* CP */
+	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
+	0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
+	0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
+	0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
+	0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
+	0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
+	/* VSC */
+	0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
+	/* UCHE */
+	0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
+	0x0E38, 0x0E39,
+	/* GRAS */
+	0x8600, 0x8601, 0x8604, 0x8605, 0x8610, 0x861B, 0x8620, 0x8620,
+	0x8628, 0x862B, 0x8630, 0x8637,
+	/* RB */
+	0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
+	0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
+	0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
+	/* VPC */
+	0x9600, 0x9604, 0x9624, 0x9637,
+	/* PC */
+	0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
+	0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
+	0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
+	/* VFD */
+	0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
+	0xA630, 0xA630, 0xD200, 0xD263,
+};
+
+enum a6xx_debugbus_id {
+	A6XX_DBGBUS_CP           = 0x1,
+	A6XX_DBGBUS_RBBM         = 0x2,
+	A6XX_DBGBUS_VBIF         = 0x3,
+	A6XX_DBGBUS_HLSQ         = 0x4,
+	A6XX_DBGBUS_UCHE         = 0x5,
+	A6XX_DBGBUS_DPM          = 0x6,
+	A6XX_DBGBUS_TESS         = 0x7,
+	A6XX_DBGBUS_PC           = 0x8,
+	A6XX_DBGBUS_VFDP         = 0x9,
+	A6XX_DBGBUS_VPC          = 0xa,
+	A6XX_DBGBUS_TSE          = 0xb,
+	A6XX_DBGBUS_RAS          = 0xc,
+	A6XX_DBGBUS_VSC          = 0xd,
+	A6XX_DBGBUS_COM          = 0xe,
+	A6XX_DBGBUS_LRZ          = 0x10,
+	A6XX_DBGBUS_A2D          = 0x11,
+	A6XX_DBGBUS_CCUFCHE      = 0x12,
+	A6XX_DBGBUS_GMU          = 0x13,
+	A6XX_DBGBUS_RBP          = 0x14,
+	A6XX_DBGBUS_DCS          = 0x15,
+	A6XX_DBGBUS_RBBM_CFG     = 0x16,
+	A6XX_DBGBUS_CX           = 0x17,
+	A6XX_DBGBUS_TPFCHE       = 0x19,
+	A6XX_DBGBUS_GPC          = 0x1d,
+	A6XX_DBGBUS_LARC         = 0x1e,
+	A6XX_DBGBUS_HLSQ_SPTP    = 0x1f,
+	A6XX_DBGBUS_RB_0         = 0x20,
+	A6XX_DBGBUS_RB_1         = 0x21,
+	A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
+	A6XX_DBGBUS_CCU_0        = 0x28,
+	A6XX_DBGBUS_CCU_1        = 0x29,
+	A6XX_DBGBUS_VFD_0        = 0x38,
+	A6XX_DBGBUS_VFD_1        = 0x39,
+	A6XX_DBGBUS_VFD_2        = 0x3a,
+	A6XX_DBGBUS_VFD_3        = 0x3b,
+	A6XX_DBGBUS_SP_0         = 0x40,
+	A6XX_DBGBUS_SP_1         = 0x41,
+	A6XX_DBGBUS_TPL1_0       = 0x48,
+	A6XX_DBGBUS_TPL1_1       = 0x49,
+	A6XX_DBGBUS_TPL1_2       = 0x4a,
+	A6XX_DBGBUS_TPL1_3       = 0x4b,
+};
+
+static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
+	{ A6XX_DBGBUS_CP, 0x100, },
+	{ A6XX_DBGBUS_RBBM, 0x100, },
+	{ A6XX_DBGBUS_HLSQ, 0x100, },
+	{ A6XX_DBGBUS_UCHE, 0x100, },
+	{ A6XX_DBGBUS_DPM, 0x100, },
+	{ A6XX_DBGBUS_TESS, 0x100, },
+	{ A6XX_DBGBUS_PC, 0x100, },
+	{ A6XX_DBGBUS_VFDP, 0x100, },
+	{ A6XX_DBGBUS_VPC, 0x100, },
+	{ A6XX_DBGBUS_TSE, 0x100, },
+	{ A6XX_DBGBUS_RAS, 0x100, },
+	{ A6XX_DBGBUS_VSC, 0x100, },
+	{ A6XX_DBGBUS_COM, 0x100, },
+	{ A6XX_DBGBUS_LRZ, 0x100, },
+	{ A6XX_DBGBUS_A2D, 0x100, },
+	{ A6XX_DBGBUS_CCUFCHE, 0x100, },
+	{ A6XX_DBGBUS_RBP, 0x100, },
+	{ A6XX_DBGBUS_DCS, 0x100, },
+	{ A6XX_DBGBUS_RBBM_CFG, 0x100, },
+	{ A6XX_DBGBUS_TPFCHE, 0x100, },
+	{ A6XX_DBGBUS_GPC, 0x100, },
+	{ A6XX_DBGBUS_LARC, 0x100, },
+	{ A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
+	{ A6XX_DBGBUS_RB_0, 0x100, },
+	{ A6XX_DBGBUS_RB_1, 0x100, },
+	{ A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
+	{ A6XX_DBGBUS_CCU_0, 0x100, },
+	{ A6XX_DBGBUS_CCU_1, 0x100, },
+	{ A6XX_DBGBUS_VFD_0, 0x100, },
+	{ A6XX_DBGBUS_VFD_1, 0x100, },
+	{ A6XX_DBGBUS_VFD_2, 0x100, },
+	{ A6XX_DBGBUS_VFD_3, 0x100, },
+	{ A6XX_DBGBUS_SP_0, 0x100, },
+	{ A6XX_DBGBUS_SP_1, 0x100, },
+	{ A6XX_DBGBUS_TPL1_0, 0x100, },
+	{ A6XX_DBGBUS_TPL1_1, 0x100, },
+	{ A6XX_DBGBUS_TPL1_2, 0x100, },
+	{ A6XX_DBGBUS_TPL1_3, 0x100, },
+};
+
+#define A6XX_NUM_SHADER_BANKS 3
+#define A6XX_SHADER_STATETYPE_SHIFT 8
+
+enum a6xx_shader_obj {
+	A6XX_TP0_TMO_DATA               = 0x9,
+	A6XX_TP0_SMO_DATA               = 0xa,
+	A6XX_TP0_MIPMAP_BASE_DATA       = 0xb,
+	A6XX_TP1_TMO_DATA               = 0x19,
+	A6XX_TP1_SMO_DATA               = 0x1a,
+	A6XX_TP1_MIPMAP_BASE_DATA       = 0x1b,
+	A6XX_SP_INST_DATA               = 0x29,
+	A6XX_SP_LB_0_DATA               = 0x2a,
+	A6XX_SP_LB_1_DATA               = 0x2b,
+	A6XX_SP_LB_2_DATA               = 0x2c,
+	A6XX_SP_LB_3_DATA               = 0x2d,
+	A6XX_SP_LB_4_DATA               = 0x2e,
+	A6XX_SP_LB_5_DATA               = 0x2f,
+	A6XX_SP_CB_BINDLESS_DATA        = 0x30,
+	A6XX_SP_CB_LEGACY_DATA          = 0x31,
+	A6XX_SP_UAV_DATA                = 0x32,
+	A6XX_SP_INST_TAG                = 0x33,
+	A6XX_SP_CB_BINDLESS_TAG         = 0x34,
+	A6XX_SP_TMO_UMO_TAG             = 0x35,
+	A6XX_SP_SMO_TAG                 = 0x36,
+	A6XX_SP_STATE_DATA              = 0x37,
+	A6XX_HLSQ_CHUNK_CVS_RAM         = 0x49,
+	A6XX_HLSQ_CHUNK_CPS_RAM         = 0x4a,
+	A6XX_HLSQ_CHUNK_CVS_RAM_TAG     = 0x4b,
+	A6XX_HLSQ_CHUNK_CPS_RAM_TAG     = 0x4c,
+	A6XX_HLSQ_ICB_CVS_CB_BASE_TAG   = 0x4d,
+	A6XX_HLSQ_ICB_CPS_CB_BASE_TAG   = 0x4e,
+	A6XX_HLSQ_CVS_MISC_RAM          = 0x50,
+	A6XX_HLSQ_CPS_MISC_RAM          = 0x51,
+	A6XX_HLSQ_INST_RAM              = 0x52,
+	A6XX_HLSQ_GFX_CVS_CONST_RAM     = 0x53,
+	A6XX_HLSQ_GFX_CPS_CONST_RAM     = 0x54,
+	A6XX_HLSQ_CVS_MISC_RAM_TAG      = 0x55,
+	A6XX_HLSQ_CPS_MISC_RAM_TAG      = 0x56,
+	A6XX_HLSQ_INST_RAM_TAG          = 0x57,
+	A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
+	A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
+	A6XX_HLSQ_PWR_REST_RAM          = 0x5a,
+	A6XX_HLSQ_PWR_REST_TAG          = 0x5b,
+	A6XX_HLSQ_DATAPATH_META         = 0x60,
+	A6XX_HLSQ_FRONTEND_META         = 0x61,
+	A6XX_HLSQ_INDIRECT_META         = 0x62,
+	A6XX_HLSQ_BACKEND_META          = 0x63
+};
+
+struct a6xx_shader_block {
+	unsigned int statetype;
+	unsigned int sz;
+	uint64_t offset;
+};
+
+struct a6xx_shader_block_info {
+	struct a6xx_shader_block *block;
+	unsigned int bank;
+	uint64_t offset;
+};
+
+static struct a6xx_shader_block a6xx_shader_blocks[] = {
+	{A6XX_TP0_TMO_DATA,               0x200},
+	{A6XX_TP0_SMO_DATA,               0x80,},
+	{A6XX_TP0_MIPMAP_BASE_DATA,       0x3C0},
+	{A6XX_TP1_TMO_DATA,               0x200},
+	{A6XX_TP1_SMO_DATA,               0x80,},
+	{A6XX_TP1_MIPMAP_BASE_DATA,       0x3C0},
+	{A6XX_SP_INST_DATA,               0x800},
+	{A6XX_SP_LB_0_DATA,               0x800},
+	{A6XX_SP_LB_1_DATA,               0x800},
+	{A6XX_SP_LB_2_DATA,               0x800},
+	{A6XX_SP_LB_3_DATA,               0x800},
+	{A6XX_SP_LB_4_DATA,               0x800},
+	{A6XX_SP_LB_5_DATA,               0x200},
+	{A6XX_SP_CB_BINDLESS_DATA,        0x2000},
+	{A6XX_SP_CB_LEGACY_DATA,          0x280,},
+	{A6XX_SP_UAV_DATA,                0x80,},
+	{A6XX_SP_INST_TAG,                0x80,},
+	{A6XX_SP_CB_BINDLESS_TAG,         0x80,},
+	{A6XX_SP_TMO_UMO_TAG,             0x80,},
+	{A6XX_SP_SMO_TAG,                 0x80},
+	{A6XX_SP_STATE_DATA,              0x3F},
+	{A6XX_HLSQ_CHUNK_CVS_RAM,         0x1C0},
+	{A6XX_HLSQ_CHUNK_CPS_RAM,         0x280},
+	{A6XX_HLSQ_CHUNK_CVS_RAM_TAG,     0x40,},
+	{A6XX_HLSQ_CHUNK_CPS_RAM_TAG,     0x40,},
+	{A6XX_HLSQ_ICB_CVS_CB_BASE_TAG,   0x4,},
+	{A6XX_HLSQ_ICB_CPS_CB_BASE_TAG,   0x4,},
+	{A6XX_HLSQ_CVS_MISC_RAM,          0x1C0},
+	{A6XX_HLSQ_CPS_MISC_RAM,          0x580},
+	{A6XX_HLSQ_INST_RAM,              0x800},
+	{A6XX_HLSQ_GFX_CVS_CONST_RAM,     0x800},
+	{A6XX_HLSQ_GFX_CPS_CONST_RAM,     0x800},
+	{A6XX_HLSQ_CVS_MISC_RAM_TAG,      0x8,},
+	{A6XX_HLSQ_CPS_MISC_RAM_TAG,      0x4,},
+	{A6XX_HLSQ_INST_RAM_TAG,          0x80,},
+	{A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
+	{A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
+	{A6XX_HLSQ_PWR_REST_RAM,          0x28},
+	{A6XX_HLSQ_PWR_REST_TAG,          0x14},
+	{A6XX_HLSQ_DATAPATH_META,         0x40,},
+	{A6XX_HLSQ_FRONTEND_META,         0x40},
+	{A6XX_HLSQ_INDIRECT_META,         0x40,}
+};
+
+static struct kgsl_memdesc a6xx_capturescript;
+static struct kgsl_memdesc a6xx_crashdump_registers;
+static bool crash_dump_valid;
+
+static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
+		u8 *buf, size_t remain)
+{
+	struct kgsl_snapshot_registers regs = {
+		.regs = a6xx_registers,
+		.count = ARRAY_SIZE(a6xx_registers) / 2,
+	};
+
+	return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
+}
+
+static struct cdregs {
+	const unsigned int *regs;
+	unsigned int size;
+} _a6xx_cd_registers[] = {
+	{ a6xx_registers, ARRAY_SIZE(a6xx_registers) },
+};
+
+#define REG_PAIR_COUNT(_a, _i) \
+	(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
+
+static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
+		size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
+	unsigned int i, j, k;
+	unsigned int count = 0;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_registers(device, buf, remain);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
+		struct cdregs *regs = &_a6xx_cd_registers[i];
+
+		for (j = 0; j < regs->size / 2; j++) {
+			unsigned int start = regs->regs[2 * j];
+			unsigned int end = regs->regs[(2 * j) + 1];
+
+			if (remain < ((end - start) + 1) * 8) {
+				SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+				goto out;
+			}
+
+			remain -= ((end - start) + 1) * 8;
+
+			for (k = start; k <= end; k++, count++) {
+				*data++ = k;
+				*data++ = *src++;
+			}
+		}
+	}
+
+out:
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
+static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_shader *header =
+		(struct kgsl_snapshot_shader *) buf;
+	struct a6xx_shader_block_info *info =
+		(struct a6xx_shader_block_info *) priv;
+	struct a6xx_shader_block *block = info->block;
+	unsigned int *data = (unsigned int *) (buf + sizeof(*header));
+
+	if (remain < SHADER_SECTION_SZ(block->sz)) {
+		SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
+		return 0;
+	}
+
+	header->type = block->statetype;
+	header->index = info->bank;
+	header->size = block->sz;
+
+	memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
+		block->sz);
+
+	return SHADER_SECTION_SZ(block->sz);
+}
+
+static void a6xx_snapshot_shader(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	unsigned int i, j;
+	struct a6xx_shader_block_info info;
+
+	/* Shader blocks can only be read by the crash dumper */
+	if (crash_dump_valid == false)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
+		for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
+			info.block = &a6xx_shader_blocks[i];
+			info.bank = j;
+			info.offset = a6xx_shader_blocks[i].offset +
+				(j * a6xx_shader_blocks[i].sz);
+
+			/* Shader working/shadow memory */
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_SHADER,
+				snapshot, a6xx_snapshot_shader_memory, &info);
+		}
+	}
+}
+
+static void a6xx_snapshot_mempool(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	unsigned int pool_size;
+
+	/* Save the mempool size to 0 to stabilize it while dumping */
+	kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
+	kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
+
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
+		0, 0x2060);
+
+	/* Restore the saved mempool size */
+	kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
+}
+
+static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
+				unsigned int regbase, unsigned int reg)
+{
+	unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+				reg - regbase / 4;
+	unsigned int val;
+
+	kgsl_regread(device, read_reg, &val);
+	return val;
+}
+
+static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_mvc_regs *header =
+				(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_dbgahb_regs_info *info =
+				(struct a6xx_cluster_dbgahb_regs_info *)priv;
+	struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
+	unsigned int read_sel;
+	unsigned int data_size = 0;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int i, j;
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cur_cluster->id;
+
+	read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
+	kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
+
+	for (i = 0; i < cur_cluster->num_sets; i++) {
+		unsigned int start = cur_cluster->regs[2 * i];
+		unsigned int end = cur_cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+
+		for (j = start; j <= end; j++) {
+			unsigned int val;
+
+			val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
+			*data++ = val;
+
+		}
+	}
+
+out:
+	return data_size + sizeof(*header);
+}
+
+static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header =
+				(struct kgsl_snapshot_regs *)buf;
+	struct a6xx_non_ctx_dbgahb_registers *regs =
+				(struct a6xx_non_ctx_dbgahb_registers *)priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0;
+	unsigned int read_sel;
+	int i, j;
+
+	/* Figure out how many registers we are going to dump */
+	for (i = 0; i < regs->num_sets; i++) {
+		int start = regs->regs[i * 2];
+		int end = regs->regs[i * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	header->count = count;
+
+	read_sel = (regs->statetype & 0xff) << 8;
+	kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
+
+	for (i = 0; i < regs->num_sets; i++) {
+		unsigned int start = regs->regs[2 * i];
+		unsigned int end = regs->regs[2 * i + 1];
+
+		for (j = start; j <= end; j++) {
+			unsigned int val;
+
+			val = a6xx_read_dbgahb(device, regs->regbase, j);
+			*data++ = j;
+			*data++ = val;
+
+		}
+	}
+	return (count * 8) + sizeof(*header);
+}
+
+static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
+		struct a6xx_cluster_dbgahb_registers *cluster =
+				&a6xx_dbgahb_ctx_clusters[i];
+		struct a6xx_cluster_dbgahb_regs_info info;
+
+		info.cluster = cluster;
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+			info.ctxt_id = j;
+
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_MVC, snapshot,
+				a6xx_snapshot_cluster_dbgahb, &info);
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		kgsl_snapshot_add_section(device,
+			KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+			a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
+	}
+}
+
+static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_mvc_regs *header =
+					(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_regs_info *info =
+					(struct a6xx_cluster_regs_info *)priv;
+	struct a6xx_cluster_registers *cur_cluster = info->cluster;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int ctxt = info->ctxt_id;
+	unsigned int start, end, i, j, aperture_cntl = 0;
+	unsigned int data_size = 0;
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cur_cluster->id;
+
+	/*
+	 * Set the AHB control for the Host to read from the
+	 * cluster/context for this iteration.
+	 */
+	aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
+	kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
+
+	for (i = 0; i < cur_cluster->num_sets; i++) {
+		start = cur_cluster->regs[2 * i];
+		end = cur_cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+		for (j = start; j <= end; j++) {
+			unsigned int val;
+
+			kgsl_regread(device, j, &val);
+			*data++ = val;
+		}
+	}
+out:
+	return data_size + sizeof(*header);
+}
+
+static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_mvc_regs *header =
+				(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_regs_info *info =
+				(struct a6xx_cluster_regs_info *)priv;
+	struct a6xx_cluster_registers *cluster = info->cluster;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int *src;
+	int i, j;
+	unsigned int start, end;
+	size_t data_size = 0;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cluster->id;
+
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
+		(header->ctxt_id ? cluster->offset1 : cluster->offset0));
+
+	for (i = 0; i < cluster->num_sets; i++) {
+		start = cluster->regs[2 * i];
+		end = cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+		for (j = start; j <= end; j++)
+			*data++ = *src++;
+	}
+
+out:
+	return data_size + sizeof(*header);
+
+}
+
+static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	int i, j;
+	struct a6xx_cluster_regs_info info;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
+		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+
+		info.cluster = cluster;
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+			info.ctxt_id = j;
+
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_MVC, snapshot,
+				a6xx_snapshot_mvc, &info);
+		}
+	}
+}
+
+/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
+static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
+	unsigned int block_id, unsigned int index, unsigned int *val)
+{
+	unsigned int reg;
+
+	reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
+			(index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+	kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
+	val++;
+	kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
+}
+
+/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
+static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
+	u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_debugbus *header =
+		(struct kgsl_snapshot_debugbus *)buf;
+	struct adreno_debugbus_block *block = priv;
+	int i;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int dwords;
+	size_t size;
+
+	dwords = block->dwords;
+
+	/* For a6xx each debug bus data unit is 2 DWORDS */
+	size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
+
+	if (remain < size) {
+		SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+		return 0;
+	}
+
+	header->id = block->block_id;
+	header->count = dwords * 2;
+
+	for (i = 0; i < dwords; i++)
+		a6xx_dbgc_debug_bus_read(device, block->block_id, i,
+					&data[i*2]);
+
+	return size;
+}
+
+/* a6xx_snapshot_debugbus() - Capture debug bus data */
+static void a6xx_snapshot_debugbus(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	int i;
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
+		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
+		(0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
+		(0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
+		0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+		(0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
+		(1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
+		(2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
+		(3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
+		(4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
+		(5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
+		(6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
+		(7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+		(8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
+		(9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
+		(10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
+		(11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
+		(12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
+		(13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
+		(14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
+		(15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
+		kgsl_snapshot_add_section(device,
+			KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+			snapshot, a6xx_snapshot_dbgc_debugbus_block,
+			(void *) &a6xx_dbgc_debugbus_blocks[i]);
+	}
+}
+
+static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	struct kgsl_snapshot_registers *regs = priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0, j, k;
+
+	/* Figure out how many registers we are going to dump */
+	for (j = 0; j < regs->count; j++) {
+		int start = regs->regs[j * 2];
+		int end = regs->regs[j * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	for (j = 0; j < regs->count; j++) {
+		unsigned int start = regs->regs[j * 2];
+		unsigned int end = regs->regs[j * 2 + 1];
+
+		for (k = start; k <= end; k++) {
+			unsigned int val;
+
+			kgsl_gmu_regread(device, k, &val);
+			*data++ = k;
+			*data++ = val;
+		}
+	}
+
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
+static void a6xx_snapshot_gmu(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	struct kgsl_snapshot_registers gmu_regs = {
+		.regs = a6xx_gmu_registers,
+		.count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
+	};
+
+	if (!kgsl_gmu_isenabled(device))
+		return;
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+			snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
+}
+
+static void _a6xx_do_crashdump(struct kgsl_device *device)
+{
+	unsigned long wait_time;
+	unsigned int reg = 0;
+	unsigned int val;
+
+	crash_dump_valid = false;
+
+	if (a6xx_capturescript.gpuaddr == 0 ||
+		a6xx_crashdump_registers.gpuaddr == 0)
+		return;
+
+	/* IF the SMMU is stalled we cannot do a crash dump */
+	kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
+	if (val & BIT(24))
+		return;
+
+	/* Turn on APRIV so we can access the buffers */
+	kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
+
+	kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
+			lower_32_bits(a6xx_capturescript.gpuaddr));
+	kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
+			upper_32_bits(a6xx_capturescript.gpuaddr));
+	kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+	wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
+	while (!time_after(jiffies, wait_time)) {
+		kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
+		if (reg & 0x2)
+			break;
+		cpu_relax();
+	}
+
+	kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
+
+	if (!(reg & 0x2)) {
+		KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
+		return;
+	}
+
+	crash_dump_valid = true;
+}
+
+/*
+ * a6xx_snapshot() - A6XX GPU snapshot function
+ * @adreno_dev: Device being snapshotted
+ * @snapshot: Pointer to the snapshot instance
+ *
+ * This is where all of the A6XX specific bits and pieces are grabbed
+ * into the snapshot memory
+ */
+void a6xx_snapshot(struct adreno_device *adreno_dev,
+		struct kgsl_snapshot *snapshot)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
+
+	/* Try to run the crash dumper */
+	_a6xx_do_crashdump(device);
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+		snapshot, a6xx_snapshot_registers, NULL);
+
+	adreno_snapshot_vbif_registers(device, snapshot,
+		a6xx_vbif_snapshot_registers,
+		ARRAY_SIZE(a6xx_vbif_snapshot_registers));
+
+	/* CP_SQE indexed registers */
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
+		0, snap_data->sect_sizes->cp_pfp);
+
+	/* CP_DRAW_STATE */
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
+		0, 0x100);
+
+	 /* SQE_UCODE Cache */
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
+		0, 0x6000);
+
+	/* CP ROQ */
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
+		snapshot, adreno_snapshot_cp_roq,
+		&snap_data->sect_sizes->roq);
+
+	/* Mempool debug data */
+	a6xx_snapshot_mempool(device, snapshot);
+
+	/* Shader memory */
+	a6xx_snapshot_shader(device, snapshot);
+
+	/* MVC register section */
+	a6xx_snapshot_mvc_regs(device, snapshot);
+
+	/* registers dumped through DBG AHB */
+	a6xx_snapshot_dbgahb_regs(device, snapshot);
+
+	a6xx_snapshot_debugbus(device, snapshot);
+
+	/* GMU TCM data dumped through AHB */
+	a6xx_snapshot_gmu(device, snapshot);
+}
+
+static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int i, j, k;
+	unsigned int count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
+		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+
+		cluster->offset0 = *offset;
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+
+			if (j == 1)
+				cluster->offset1 = *offset;
+
+			ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
+			ptr[qwords++] =
+				((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
+				(1 << 21) | 1;
+
+			for (k = 0; k < cluster->num_sets; k++) {
+				count = REG_PAIR_COUNT(cluster->regs, k);
+				ptr[qwords++] =
+				a6xx_crashdump_registers.gpuaddr + *offset;
+				ptr[qwords++] =
+				(((uint64_t)cluster->regs[2 * k]) << 44) |
+						count;
+
+				*offset += count * sizeof(unsigned int);
+			}
+		}
+	}
+
+	return qwords;
+}
+
+static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
+		uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int j;
+
+	/* Capture each bank in the block */
+	for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
+		/* Program the aperture */
+		ptr[qwords++] =
+			(block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
+		ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
+			(1 << 21) | 1;
+
+		/* Read all the data in one chunk */
+		ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
+		ptr[qwords++] =
+			(((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
+			block->sz;
+
+		/* Remember the offset of the first bank for easy access */
+		if (j == 0)
+			block->offset = *offset;
+
+		*offset += block->sz * sizeof(unsigned int);
+	}
+
+	return qwords;
+}
+
+void a6xx_crashdump_init(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int script_size = 0;
+	unsigned int data_size = 0;
+	unsigned int i, j, k;
+	uint64_t *ptr;
+	uint64_t offset = 0;
+
+	if (a6xx_capturescript.gpuaddr != 0 &&
+		a6xx_crashdump_registers.gpuaddr != 0)
+		return;
+
+	/*
+	 * We need to allocate two buffers:
+	 * 1 - the buffer to hold the draw script
+	 * 2 - the buffer to hold the data
+	 */
+
+	/*
+	 * To save the registers, we need 16 bytes per register pair for the
+	 * script and a dword for each register in the data
+	 */
+	for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
+		struct cdregs *regs = &_a6xx_cd_registers[i];
+
+		/* Each pair needs 16 bytes (2 qwords) */
+		script_size += (regs->size / 2) * 16;
+
+		/* Each register needs a dword in the data */
+		for (j = 0; j < regs->size / 2; j++)
+			data_size += REG_PAIR_COUNT(regs->regs, j) *
+				sizeof(unsigned int);
+
+	}
+
+	/*
+	 * To save the shader blocks for each block in each type we need 32
+	 * bytes for the script (16 bytes to program the aperture and 16 to
+	 * read the data) and then a block specific number of bytes to hold
+	 * the data
+	 */
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
+		script_size += 32 * A6XX_NUM_SHADER_BANKS;
+		data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
+			A6XX_NUM_SHADER_BANKS;
+	}
+
+	/* Calculate the script and data size for MVC registers */
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
+		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+
+			/* 16 bytes for programming the aperture */
+			script_size += 16;
+
+			/* Reading each pair of registers takes 16 bytes */
+			script_size += 16 * cluster->num_sets;
+
+			/* A dword per register read from the cluster list */
+			for (k = 0; k < cluster->num_sets; k++)
+				data_size += REG_PAIR_COUNT(cluster->regs, k) *
+						sizeof(unsigned int);
+		}
+	}
+
+	/* Now allocate the script and data buffers */
+
+	/* The script buffers needs 2 extra qwords on the end */
+	if (kgsl_allocate_global(device, &a6xx_capturescript,
+		script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
+		KGSL_MEMDESC_PRIVILEGED, "capturescript"))
+		return;
+
+	if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
+		0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
+		kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
+		return;
+	}
+
+	/* Build the crash script */
+
+	ptr = (uint64_t *)a6xx_capturescript.hostptr;
+
+	/* For the registers, program a read command for each pair */
+	for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
+		struct cdregs *regs = &_a6xx_cd_registers[i];
+
+		for (j = 0; j < regs->size / 2; j++) {
+			unsigned int r = REG_PAIR_COUNT(regs->regs, j);
+			*ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
+			*ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
+			offset += r * sizeof(unsigned int);
+		}
+	}
+
+	/* Program each shader block */
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
+		ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
+							&offset);
+	}
+
+	/* Program the capturescript for the MVC regsiters */
+	ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
+
+	*ptr++ = 0;
+	*ptr++ = 0;
+}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index b0e9292..4d38794 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -26,6 +26,7 @@
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
+#include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
 #define KGSL_PWRFLAGS_POWER_ON 0
@@ -65,7 +66,8 @@
 	"alwayson_clk",
 	"isense_clk",
 	"rbcpr_clk",
-	"iref_clk"
+	"iref_clk",
+	"gmu_clk"
 };
 
 static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
@@ -214,6 +216,69 @@
 #endif
 
 /**
+ * kgsl_bus_scale_request() - set GPU BW vote
+ * @device: Pointer to the kgsl_device struct
+ * @buslevel: index of bw vector[] table
+ */
+static int kgsl_bus_scale_request(struct kgsl_device *device,
+		unsigned int buslevel)
+{
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret;
+
+	/* GMU scales BW */
+	if (kgsl_gmu_isenabled(device)) {
+		if (!(gmu->flags & GMU_HFI_ON))
+			return 0;
+
+		ret = gmu_dcvs_set(gmu, INVALID_DCVS_IDX, buslevel);
+	} else {
+		/* Linux bus driver scales BW */
+		ret = msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+	}
+
+	if (ret)
+		KGSL_PWR_ERR(device, "GPU BW scaling failure\n");
+
+	return ret;
+}
+
+/**
+ * kgsl_clk_set_rate() - set GPU clock rate
+ * @device: Pointer to the kgsl_device struct
+ * @pwrlevel: power level in pwrlevels[] table
+ */
+static int kgsl_clk_set_rate(struct kgsl_device *device,
+		unsigned int pwrlevel)
+{
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret = 0;
+
+	/* GMU scales GPU freq */
+	if (kgsl_gmu_isenabled(device)) {
+		/* If GMU has not been started, save it */
+		if (!(gmu->flags & GMU_HFI_ON)) {
+			gmu->wakeup_pwrlevel = pwrlevel;
+			return 0;
+		}
+
+		ret = gmu_dcvs_set(gmu, pwrlevel, INVALID_DCVS_IDX);
+	} else {
+		/* Linux clock driver scales GPU freq */
+		struct kgsl_pwrlevel *Pl = &pwr->pwrlevels[pwrlevel];
+
+		ret = clk_set_rate(pwr->grp_clks[0], Pl->gpu_freq);
+	}
+
+	if (ret)
+		KGSL_PWR_ERR(device, "GPU clk freq set failure\n");
+
+	return ret;
+}
+
+/**
  * kgsl_pwrctrl_buslevel_update() - Recalculate the bus vote and send it
  * @device: Pointer to the kgsl_device struct
  * @on: true for setting and active bus vote, false to turn off the vote
@@ -259,7 +324,7 @@
 
 	/* vote for bus if gpubw-dev support is not enabled */
 	if (pwr->pcl)
-		msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+		kgsl_bus_scale_request(device, buslevel);
 
 	kgsl_pwrctrl_vbif_update(ab);
 }
@@ -388,7 +453,7 @@
 	pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
 	/* Change register settings if any  BEFORE pwrlevel change*/
 	kgsl_pwrctrl_pwrlevel_change_settings(device, 0);
-	clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
+	kgsl_clk_set_rate(device, pwr->active_pwrlevel);
 	_isense_clk_set_rate(pwr, pwr->active_pwrlevel);
 
 	trace_kgsl_pwrlevel(device,
@@ -1631,9 +1696,8 @@
 				(requested_state != KGSL_STATE_NAP)) {
 				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 					clk_unprepare(pwr->grp_clks[i]);
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
+				kgsl_clk_set_rate(device,
+						pwr->num_pwrlevels - 1);
 				_isense_clk_set_rate(pwr,
 					pwr->num_pwrlevels - 1);
 			}
@@ -1645,9 +1709,8 @@
 			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 				clk_unprepare(pwr->grp_clks[i]);
 			if ((pwr->pwrlevels[0].gpu_freq > 0)) {
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
+				kgsl_clk_set_rate(device,
+						pwr->num_pwrlevels - 1);
 				_isense_clk_set_rate(pwr,
 					pwr->num_pwrlevels - 1);
 			}
@@ -1660,10 +1723,8 @@
 			/* High latency clock maintenance. */
 			if (device->state != KGSL_STATE_NAP) {
 				if (pwr->pwrlevels[0].gpu_freq > 0) {
-					clk_set_rate(pwr->grp_clks[0],
-						pwr->pwrlevels
-						[pwr->active_pwrlevel].
-						gpu_freq);
+					kgsl_clk_set_rate(device,
+							pwr->active_pwrlevel);
 					_isense_clk_set_rate(pwr,
 						pwr->active_pwrlevel);
 				}
@@ -2101,11 +2162,11 @@
 		if (freq > 0)
 			freq = clk_round_rate(pwr->grp_clks[0], freq);
 
-		pwr->pwrlevels[i].gpu_freq = freq;
+		if (freq >= pwr->pwrlevels[i].gpu_freq)
+			pwr->pwrlevels[i].gpu_freq = freq;
 	}
 
-	clk_set_rate(pwr->grp_clks[0],
-		pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
 	clk_set_rate(pwr->grp_clks[6],
 		clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ));
@@ -2362,8 +2423,12 @@
 	/* In order to touch a register you must hold the device mutex */
 	WARN_ON(!mutex_is_locked(&device->mutex));
 
-	/* A register access without device power will cause a fatal timeout */
-	BUG_ON(!kgsl_pwrctrl_isenabled(device));
+	/*
+	 * A register access without device power will cause a fatal timeout.
+	 * This is not valid for targets with a GMU.
+	 */
+	if (!kgsl_gmu_isenabled(device))
+		WARN_ON(!kgsl_pwrctrl_isenabled(device));
 }
 EXPORT_SYMBOL(kgsl_pre_hwaccess);
 
@@ -2383,6 +2448,9 @@
 
 	kgsl_pwrctrl_pwrlevel_change(device, level);
 
+	if (kgsl_gmu_isenabled(device))
+		return gmu_start(device);
+
 	/* Order pwrrail/clk sequence based upon platform */
 	status = kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
 	if (status)
@@ -2394,6 +2462,9 @@
 
 static void kgsl_pwrctrl_disable(struct kgsl_device *device)
 {
+	if (kgsl_gmu_isenabled(device))
+		return gmu_stop(device);
+
 	/* Order pwrrail/clk sequence based upon platform */
 	device->ftbl->regulator_disable(device);
 	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 58f16e8..62ee597 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -25,7 +25,7 @@
 
 #define KGSL_PWR_ON	0xFFFF
 
-#define KGSL_MAX_CLKS 14
+#define KGSL_MAX_CLKS 15
 #define KGSL_MAX_REGULATORS 2
 
 #define KGSL_MAX_PWRLEVELS 10
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index 2cb8b8f..d2ff8f1 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -58,6 +58,7 @@
 #define KGSL_SNAPSHOT_SECTION_MEMLIST      0x0E01
 #define KGSL_SNAPSHOT_SECTION_MEMLIST_V2   0x0E02
 #define KGSL_SNAPSHOT_SECTION_SHADER       0x1201
+#define KGSL_SNAPSHOT_SECTION_MVC          0x1501
 
 #define KGSL_SNAPSHOT_SECTION_END          0xFFFF
 
@@ -196,6 +197,12 @@
 	int count;     /* Number of dwords in the data */
 } __packed;
 
+/* MVC register sub-section header */
+struct kgsl_snapshot_mvc_regs {
+	int ctxt_id;
+	int cluster_id;
+} __packed;
+
 /* Istore sub-section header */
 struct kgsl_snapshot_istore {
 	int count;   /* Number of instructions in the istore */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 8e38a24..d7b4363 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/qcom-geni-se.h>
 
 #define SE_I2C_TX_TRANS_LEN		(0x26C)
@@ -57,6 +58,7 @@
 	struct i2c_adapter adap;
 	struct completion xfer;
 	struct i2c_msg *cur;
+	struct se_geni_rsc i2c_rsc;
 	int cur_wr;
 	int cur_rd;
 };
@@ -153,7 +155,15 @@
 	gi2c->err = 0;
 	gi2c->cur = &msgs[0];
 	reinit_completion(&gi2c->xfer);
-	enable_irq(gi2c->irq);
+	ret = pm_runtime_get_sync(gi2c->dev);
+	if (ret < 0) {
+		dev_err(gi2c->dev, "error turning SE resources:%d\n", ret);
+		pm_runtime_put_noidle(gi2c->dev);
+		/* Set device in suspended since resume failed */
+		pm_runtime_set_suspended(gi2c->dev);
+		return ret;
+	}
+	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 	qcom_geni_i2c_conf(gi2c->base, 0, 2);
 	se_config_packing(gi2c->base, 8, 4, true);
 	dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
@@ -206,7 +216,7 @@
 	}
 	if (ret == 0)
 		ret = i;
-	disable_irq(gi2c->irq);
+	pm_runtime_put_sync(gi2c->dev);
 	gi2c->cur = NULL;
 	gi2c->err = 0;
 	dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -239,10 +249,54 @@
 	if (!res)
 		return -EINVAL;
 
+	gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
+	if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
+		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+		return ret;
+	}
+
+	gi2c->i2c_rsc.m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
+	if (IS_ERR(gi2c->i2c_rsc.m_ahb_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.m_ahb_clk);
+		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
+		return ret;
+	}
+
+	gi2c->i2c_rsc.s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
+	if (IS_ERR(gi2c->i2c_rsc.s_ahb_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.s_ahb_clk);
+		dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
+		return ret;
+	}
+
 	gi2c->base = devm_ioremap_resource(gi2c->dev, res);
 	if (IS_ERR(gi2c->base))
 		return PTR_ERR(gi2c->base);
 
+	gi2c->i2c_rsc.geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_pinctrl)) {
+		dev_err(&pdev->dev, "No pinctrl config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_pinctrl);
+		return ret;
+	}
+	gi2c->i2c_rsc.geni_gpio_active =
+		pinctrl_lookup_state(gi2c->i2c_rsc.geni_pinctrl,
+							PINCTRL_DEFAULT);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_gpio_active);
+		return ret;
+	}
+	gi2c->i2c_rsc.geni_gpio_sleep =
+		pinctrl_lookup_state(gi2c->i2c_rsc.geni_pinctrl,
+							PINCTRL_SLEEP);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_gpio_sleep);
+		return ret;
+	}
+
 	gi2c->irq = platform_get_irq(pdev, 0);
 	if (gi2c->irq < 0) {
 		dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
@@ -266,8 +320,9 @@
 
 	strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
 
+	pm_runtime_set_suspended(gi2c->dev);
+	pm_runtime_enable(gi2c->dev);
 	i2c_add_adapter(&gi2c->adap);
-	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 
 	return 0;
 }
@@ -276,27 +331,67 @@
 {
 	struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
 
-	disable_irq(gi2c->irq);
+	pm_runtime_disable(gi2c->dev);
 	i2c_del_adapter(&gi2c->adap);
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int geni_i2c_suspend(struct device *device)
+static int geni_i2c_resume_noirq(struct device *device)
 {
 	return 0;
 }
 
-static int geni_i2c_resume(struct device *device)
+#ifdef CONFIG_PM
+static int geni_i2c_runtime_suspend(struct device *dev)
+{
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+	disable_irq(gi2c->irq);
+	se_geni_resources_off(&gi2c->i2c_rsc);
+	return 0;
+}
+
+static int geni_i2c_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+	ret = se_geni_resources_on(&gi2c->i2c_rsc);
+	if (ret)
+		return ret;
+
+	enable_irq(gi2c->irq);
+	return 0;
+}
+
+static int geni_i2c_suspend_noirq(struct device *device)
+{
+	if (!pm_runtime_status_suspended(device))
+		return -EBUSY;
+	return 0;
+}
+#else
+static int geni_i2c_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i2c_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i2c_suspend_noirq(struct device *device)
 {
 	return 0;
 }
 #endif
 
 static const struct dev_pm_ops geni_i2c_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(
-		geni_i2c_suspend,
-		geni_i2c_resume)
+	.suspend_noirq		= geni_i2c_suspend_noirq,
+	.resume_noirq		= geni_i2c_resume_noirq,
+	.runtime_suspend	= geni_i2c_runtime_suspend,
+	.runtime_resume		= geni_i2c_runtime_resume,
 };
 
 static const struct of_device_id geni_i2c_dt_match[] = {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 83768e8..2178266 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -429,6 +429,7 @@
 	while (muxc->num_adapters) {
 		struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
 		struct i2c_mux_priv *priv = adap->algo_data;
+		struct device_node *np = adap->dev.of_node;
 
 		muxc->adapter[muxc->num_adapters] = NULL;
 
@@ -438,6 +439,7 @@
 
 		sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
 		i2c_del_adapter(adap);
+		of_node_put(np);
 		kfree(priv);
 	}
 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4cab29e..11bfa27 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3141,9 +3141,11 @@
 	if (err)
 		goto err_rsrc;
 
-	err = mlx5_ib_alloc_q_counters(dev);
-	if (err)
-		goto err_odp;
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
+		err = mlx5_ib_alloc_q_counters(dev);
+		if (err)
+			goto err_odp;
+	}
 
 	err = ib_register_device(&dev->ib_dev, NULL);
 	if (err)
@@ -3171,7 +3173,8 @@
 	ib_unregister_device(&dev->ib_dev);
 
 err_q_cnt:
-	mlx5_ib_dealloc_q_counters(dev);
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+		mlx5_ib_dealloc_q_counters(dev);
 
 err_odp:
 	mlx5_ib_odp_remove_one(dev);
@@ -3201,7 +3204,8 @@
 
 	mlx5_remove_roce_notifier(dev);
 	ib_unregister_device(&dev->ib_dev);
-	mlx5_ib_dealloc_q_counters(dev);
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+		mlx5_ib_dealloc_q_counters(dev);
 	destroy_umrc_res(dev);
 	mlx5_ib_odp_remove_one(dev);
 	destroy_dev_resources(&dev->devr);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c782305..37dfe0a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -576,6 +576,17 @@
 	return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
 }
 
+static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
+{
+	if (smmu_domain->attributes &
+			(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
+		return true;
+	else if (smmu_domain->smmu && smmu_domain->smmu->dev)
+		return smmu_domain->smmu->dev->archdata.dma_coherent;
+	else
+		return false;
+}
+
 static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
 {
 	return (smmu_domain->secure_vmid != VMID_INVAL);
@@ -1603,6 +1614,8 @@
 
 	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
 		quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
+	if (is_iommu_pt_coherent(smmu_domain))
+		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
 
 	/* Dynamic domains must set cbndx through domain attribute */
 	if (!dynamic) {
@@ -2590,7 +2603,12 @@
 	case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
 		if (!smmu_domain->smmu)
 			return -ENODEV;
-		*((int *)data) = smmu_domain->smmu->dev->archdata.dma_coherent;
+		*((int *)data) = is_iommu_pt_coherent(smmu_domain);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
+		*((int *)data) = !!(smmu_domain->attributes
+			& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
 		ret = 0;
 		break;
 	default:
@@ -2749,6 +2767,27 @@
 		}
 		break;
 	}
+	case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
+		int force_coherent = *((int *)data);
+
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change force coherent attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+
+		if (force_coherent)
+			smmu_domain->attributes |=
+			    1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
+		else
+			smmu_domain->attributes &=
+			    ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
+
+		ret = 0;
+		break;
+	}
+
 	default:
 		ret = -ENODEV;
 	}
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 6a6ee40..85fe317 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -437,7 +437,7 @@
 		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
 			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
 			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
-	else if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+	else if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
 		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
 			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_IRGN0_SHIFT) |
 			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
@@ -583,6 +583,7 @@
 	av8l_fast_iopte *pmds;
 
 	cfg = (struct io_pgtable_cfg) {
+		.quirks = 0,
 		.tlb = &dummy_tlb_ops,
 		.ias = 32,
 		.oas = 32,
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 08809a9..01e553c 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -677,7 +677,8 @@
 #define VIN_FLASH_MIN_UV	3300000LL
 static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led)
 {
-	int ocv_uv, rbatt_uohm, ibat_now, voltage_hdrm_mv, rc;
+	int ocv_uv, ibat_now, voltage_hdrm_mv, rc;
+	int rbatt_uohm = 0;
 	int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
 	int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv, vph_flash_vdip;
 
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index 16ce734..3b19017 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -115,6 +115,12 @@
 	return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug));
 }
 
+static inline bool is_unlocked(void)
+{
+	static const char unlocked[] = "orange";
+
+	return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked));
+}
 
 static int table_extract_mpi_array(struct public_key_signature *pks,
 				const void *data, size_t len)
@@ -651,6 +657,28 @@
 	return err;
 }
 
+static int create_linear_device(struct dm_target *ti, dev_t dev,
+				char *target_device)
+{
+	u64 device_size = 0;
+	int err = find_size(dev, &device_size);
+
+	if (err) {
+		DMERR("error finding bdev size");
+		handle_error();
+		return err;
+	}
+
+	ti->len = device_size;
+	err = add_as_linear_device(ti, target_device);
+	if (err) {
+		handle_error();
+		return err;
+	}
+	verity_enabled = false;
+	return 0;
+}
+
 /*
  * Target parameters:
  *	<key id>	Key id of the public key in the system keyring.
@@ -674,7 +702,6 @@
 	struct fec_ecc_metadata uninitialized_var(ecc);
 	char buf[FEC_ARG_LENGTH], *buf_ptr;
 	unsigned long long tmpll;
-	u64  uninitialized_var(device_size);
 
 	if (argc == 1) {
 		/* Use the default keyid */
@@ -702,23 +729,8 @@
 		return -EINVAL;
 	}
 
-	if (is_eng()) {
-		err = find_size(dev, &device_size);
-		if (err) {
-			DMERR("error finding bdev size");
-			handle_error();
-			return err;
-		}
-
-		ti->len = device_size;
-		err = add_as_linear_device(ti, target_device);
-		if (err) {
-			handle_error();
-			return err;
-		}
-		verity_enabled = false;
-		return 0;
-	}
+	if (is_eng())
+		return create_linear_device(ti, dev, target_device);
 
 	strreplace(key_id, '#', ' ');
 
@@ -733,6 +745,11 @@
 	err = extract_metadata(dev, &fec, &metadata, &verity_enabled);
 
 	if (err) {
+		/* Allow invalid metadata when the device is unlocked */
+		if (is_unlocked()) {
+			DMWARN("Allow invalid metadata when unlocked");
+			return create_linear_device(ti, dev, target_device);
+		}
 		DMERR("Error while extracting metadata");
 		handle_error();
 		goto free_metadata;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1d..628ba00 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -972,10 +972,61 @@
 }
 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
 
+/*
+ * Flush current->bio_list when the target map method blocks.
+ * This fixes deadlocks in snapshot and possibly in other targets.
+ */
+struct dm_offload {
+	struct blk_plug plug;
+	struct blk_plug_cb cb;
+};
+
+static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
+{
+	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
+	struct bio_list list;
+	struct bio *bio;
+
+	INIT_LIST_HEAD(&o->cb.list);
+
+	if (unlikely(!current->bio_list))
+		return;
+
+	list = *current->bio_list;
+	bio_list_init(current->bio_list);
+
+	while ((bio = bio_list_pop(&list))) {
+		struct bio_set *bs = bio->bi_pool;
+		if (unlikely(!bs) || bs == fs_bio_set) {
+			bio_list_add(current->bio_list, bio);
+			continue;
+		}
+
+		spin_lock(&bs->rescue_lock);
+		bio_list_add(&bs->rescue_list, bio);
+		queue_work(bs->rescue_workqueue, &bs->rescue_work);
+		spin_unlock(&bs->rescue_lock);
+	}
+}
+
+static void dm_offload_start(struct dm_offload *o)
+{
+	blk_start_plug(&o->plug);
+	o->cb.callback = flush_current_bio_list;
+	list_add(&o->cb.list, &current->plug->cb_list);
+}
+
+static void dm_offload_end(struct dm_offload *o)
+{
+	list_del(&o->cb.list);
+	blk_finish_plug(&o->plug);
+}
+
 static void __map_bio(struct dm_target_io *tio)
 {
 	int r;
 	sector_t sector;
+	struct dm_offload o;
 	struct bio *clone = &tio->clone;
 	struct dm_target *ti = tio->ti;
 
@@ -988,7 +1039,11 @@
 	 */
 	atomic_inc(&tio->io->io_count);
 	sector = clone->bi_iter.bi_sector;
+
+	dm_offload_start(&o);
 	r = ti->type->map(ti, clone);
+	dm_offload_end(&o);
+
 	if (r == DM_MAPIO_REMAPPED) {
 		/* the bio has been remapped so dispatch it */
 
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index bd925f4..faba819 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -1,3 +1,5 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
new file mode 100644
index 0000000..6f9525e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
new file mode 100644
index 0000000..78cd9d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_io_w(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CDBG("0x%pK %08x\n", addr, data);
+	writel_relaxed(data, addr);
+
+	return 0;
+}
+
+int cam_io_w_mb(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CDBG("0x%pK %08x\n", addr, data);
+	/* Ensure previous writes are done */
+	wmb();
+	writel_relaxed(data, addr);
+
+	return 0;
+}
+
+uint32_t cam_io_r(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		pr_err("Invalid args\n");
+		return 0;
+	}
+
+	data = readl_relaxed(addr);
+	CDBG("0x%pK %08x\n", addr, data);
+
+	return data;
+}
+
+uint32_t cam_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		pr_err("Invalid args\n");
+		return 0;
+	}
+
+	/* Ensure previous read is done */
+	rmb();
+	data = readl_relaxed(addr);
+	CDBG("0x%pK %08x\n", addr, data);
+
+	return data;
+}
+
+int cam_io_memcpy(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+
+	for (i = 0; i < len/4; i++) {
+		CDBG("0x%pK %08x\n", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int  cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+
+	/*
+	 * Do not use cam_io_w_mb to avoid double wmb() after a write
+	 * and before the next write.
+	 */
+	wmb();
+	for (i = 0; i < (len / 4); i++) {
+		CDBG("0x%pK %08x\n", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while ((tmp != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		pr_debug("Poll failed by value\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		pr_debug("Poll failed with mask\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr =%pK\n",
+			i, len, data[i], addr);
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr =%pK\n",
+			i, len, data[i], addr);
+		/* Ensure previous writes are done */
+		wmb();
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+#define __OFFSET(__i)   (data[__i][0])
+#define __VAL(__i)      (data[__i][1])
+int cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	/* Ensure write is done */
+	wmb();
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+#define BYTES_PER_REGISTER           4
+#define NUM_REGISTER_PER_LINE        4
+#define REG_OFFSET(__start, __i)    (__start + (__i * BYTES_PER_REGISTER))
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size)
+{
+	char          line_str[128];
+	char         *p_str;
+	int           i;
+	uint32_t      data;
+
+	CDBG("addr=%pK offset=0x%x size=%d\n", base_addr, start_offset, size);
+
+	if (!base_addr || (size <= 0))
+		return -EINVAL;
+
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size; i++) {
+		if (i % NUM_REGISTER_PER_LINE == 0) {
+			snprintf(p_str, 12, "0x%08x: ",
+				REG_OFFSET(start_offset, i));
+			p_str += 12;
+		}
+		data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+		snprintf(p_str, 9, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+			pr_err("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		pr_err("%s\n", line_str);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
new file mode 100644
index 0000000..e4f73ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
@@ -0,0 +1,239 @@
+/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IO_UTIL_H_
+#define _CAM_IO_UTIL_H_
+
+#include <linux/types.h>
+
+/**
+ * cam_io_w()
+ *
+ * @brief:              Camera IO util for register write
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_w_mb()
+ *
+ * @brief:              Camera IO util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_r()
+ *
+ * @brief:              Camera IO util for register read
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r(void __iomem *addr);
+
+/**
+ * cam_io_r_mb()
+ *
+ * @brief:              Camera IO util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r_mb(void __iomem *addr);
+
+/**
+ * cam_io_memcpy()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy(void __iomem *dest_addr,
+		void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_memcpy_mb()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *                      with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_poll_value_wmask()
+ *
+ * @brief:              Poll register value with bitmask.
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until @bmask read from @addr matches this data
+ * @bmask:              Bit mask
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs);
+
+/**
+ * cam_io_poll_value()
+ *
+ * @brief:              Poll register value
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until value read from @addr matches this data
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs);
+
+/**
+ * cam_io_w_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_mb_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_w_mb_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *                      The OFFSETS NEED to be different because of the way
+ *                      barrier is used here.
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_dump()
+ *
+ * @brief:              Camera IO util for dumping a range of register
+ *
+ * @base_addr:          Start register address for the dumping
+ * @start_offset:       Start register offset for the dump
+ * @size:               Size specifying the range for dumping
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size);
+
+#endif /* _CAM_IO_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
new file mode 100644
index 0000000..d396d4f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -0,0 +1,598 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include "cam_soc_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		pr_err("No IRQ line available\n");
+		return -ENODEV;
+	}
+
+	enable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		pr_err("No IRQ line available\n");
+		return -ENODEV;
+	}
+
+	disable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate)
+{
+	int rc = 0;
+	long clk_rate_round;
+
+	if (!clk || !clk_name || !clk_rate)
+		return -EINVAL;
+
+	CDBG("enable %s, clk %pK rate %d\n",
+		clk_name, clk, clk_rate);
+	if (clk_rate > 0) {
+		clk_rate_round = clk_round_rate(clk, clk_rate);
+		CDBG("new_rate %ld\n", clk_rate_round);
+		if (clk_rate_round < 0) {
+			pr_err("%s: round failed for clock %s rc = %ld\n",
+				__func__, clk_name, clk_rate_round);
+			return clk_rate_round;
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			pr_err("set_rate failed on %s\n", clk_name);
+			return rc;
+		}
+	} else if (clk_rate == INIT_RATE) {
+		clk_rate_round = clk_get_rate(clk);
+		CDBG("init new_rate %ld\n", clk_rate_round);
+		if (clk_rate_round == 0) {
+			clk_rate_round = clk_round_rate(clk, 0);
+			if (clk_rate_round <= 0) {
+				pr_err("round rate failed on %s\n", clk_name);
+				return clk_rate_round;
+			}
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			pr_err("set_rate failed on %s\n", clk_name);
+			return rc;
+		}
+	}
+	rc = clk_prepare_enable(clk);
+	if (rc) {
+		pr_err("enable failed for %s\n", clk_name);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
+{
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	CDBG("disable %s\n", clk_name);
+	clk_disable_unprepare(clk);
+
+	return 0;
+}
+
+/**
+ * cam_soc_util_clk_enable_default()
+ *
+ * @brief:              This function enables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i, rc = 0;
+
+	if (soc_info->num_clk == 0)
+		return rc;
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		rc = cam_soc_util_clk_enable(soc_info->clk[i],
+			soc_info->clk_name[i], soc_info->clk_rate[i]);
+		if (rc)
+			goto clk_disable;
+	}
+
+	return rc;
+
+clk_disable:
+	for (i--; i >= 0; i--) {
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_clk_disable_default()
+ *
+ * @brief:              This function disables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+static void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+
+	if (soc_info->num_clk == 0)
+		return;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		CDBG("disable %s\n", soc_info->clk_name[i]);
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+}
+
+/**
+ * cam_soc_util_get_dt_clk_info()
+ *
+ * @brief:              Parse the DT and populate the Clock properties
+ *
+ * @soc_info:           device soc struct to be populated
+ * @src_clk_str         name of src clock that has rate control
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count;
+	int i, rc;
+	struct platform_device *pdev = NULL;
+	const char *src_clk_str = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	CDBG("count = %d\n", count);
+	if (count > CAM_SOC_MAX_CLK) {
+		pr_err("invalid count of clocks, count=%d", count);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (count <= 0) {
+		CDBG("No clock-names found\n");
+		count = 0;
+		soc_info->num_clk = count;
+		return 0;
+	}
+	soc_info->num_clk = count;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(soc_info->clk_name[i]));
+		CDBG("clock-names[%d] = %s\n", i, soc_info->clk_name[i]);
+		if (rc) {
+			pr_err("i= %d count= %d reading clock-names failed\n",
+				i, count);
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32_array(of_node, "clock-rates",
+		soc_info->clk_rate, count);
+	if (rc) {
+		pr_err("reading clock-rates failed");
+		return rc;
+	}
+
+	rc = of_property_read_string_index(of_node, "src-clock-name",
+				i, &src_clk_str);
+	if (rc) {
+		CDBG("No src_clk_str found\n");
+		soc_info->src_clk_idx = -1;
+		rc = 0;
+		/* Bottom loop is dependent on src_clk_str. So return here */
+		return rc;
+	}
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk_rate[i] = (soc_info->clk_rate[i] == 0) ?
+			(long)-1 : soc_info->clk_rate[i];
+		if (src_clk_str &&
+			(strcmp(soc_info->clk_name[i], src_clk_str) == 0)) {
+			soc_info->src_clk_idx = i;
+		}
+		CDBG("clk_rate[%d] = %d\n", i, soc_info->clk_rate[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count = 0, i = 0, rc = 0;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+	if (rc) {
+		pr_err("device %s failed to read cell-index\n", pdev->name);
+		return rc;
+	}
+
+	count = of_property_count_strings(of_node, "regulator-names");
+	if (count <= 0) {
+		pr_err("no regulators found\n");
+		count = 0;
+	}
+	soc_info->num_rgltr = count;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = of_property_read_string_index(of_node,
+			"regulator-names", i, &soc_info->rgltr_name[i]);
+		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+		if (rc) {
+			pr_err("no regulator resource at cnt=%d\n", i);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	count = of_property_count_strings(of_node, "reg-names");
+	if (count <= 0) {
+		pr_err("no reg-names found\n");
+		count = 0;
+	}
+	soc_info->num_mem_block = count;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		rc = of_property_read_string_index(of_node, "reg-names", i,
+			&soc_info->mem_block_name[i]);
+		if (rc) {
+			pr_err("failed to read reg-names at %d\n", i);
+			return rc;
+		}
+		soc_info->mem_block[i] =
+			platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			soc_info->mem_block_name[i]);
+
+		if (!soc_info->mem_block[i]) {
+			pr_err("no mem resource by name %s\n",
+				soc_info->mem_block_name[i]);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32_array(of_node, "reg-cam-base",
+		soc_info->mem_block_cam_base, soc_info->num_mem_block);
+	if (rc) {
+		pr_err("Error reading register offsets\n");
+		return rc;
+	}
+
+	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
+		&soc_info->irq_name);
+	if (rc) {
+		pr_warn("No interrupt line present\n");
+	} else {
+		soc_info->irq_line = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, soc_info->irq_name);
+		if (!soc_info->irq_line) {
+			pr_err("no irq resource\n");
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = cam_soc_util_get_dt_clk_info(soc_info);
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_get_regulator()
+ *
+ * @brief:              Get regulator resource named vdd
+ *
+ * @pdev:               Platform device associated with regulator
+ * @reg:                Return pointer to be filled with regulator on success
+ * @rgltr_name:         Name of regulator to get
+ *
+ * @return:             0 for Success, negative value for failure
+ */
+static int cam_soc_util_get_regulator(struct platform_device *pdev,
+	struct regulator **reg, const char *rgltr_name)
+{
+	int rc = 0;
+	*reg = regulator_get(&pdev->dev, rgltr_name);
+	if (IS_ERR_OR_NULL(*reg)) {
+		rc = PTR_ERR(*reg);
+		rc = rc ? rc : -EINVAL;
+		pr_err("Regulator %s get failed %d\n", rgltr_name, rc);
+		*reg = NULL;
+	}
+	return rc;
+}
+
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data)
+{
+	int i = 0, rc = 0;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		soc_info->reg_map[i].mem_base = ioremap(
+			soc_info->mem_block[i]->start,
+			resource_size(soc_info->mem_block[i]));
+		if (!soc_info->reg_map[i].mem_base) {
+			pr_err("i= %d base NULL\n", i);
+			rc = -ENOMEM;
+			goto unmap_base;
+		}
+		soc_info->reg_map[i].mem_cam_base =
+			soc_info->mem_block_cam_base[i];
+		soc_info->reg_map[i].size =
+			resource_size(soc_info->mem_block[i]);
+		soc_info->num_reg_map++;
+	}
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = cam_soc_util_get_regulator(pdev, &soc_info->rgltr[i],
+			soc_info->rgltr_name[i]);
+		if (rc)
+			goto put_regulator;
+	}
+
+	if (soc_info->irq_line) {
+		rc = devm_request_irq(&pdev->dev, soc_info->irq_line->start,
+			handler, IRQF_TRIGGER_RISING,
+			soc_info->irq_name, irq_data);
+		if (rc < 0) {
+			pr_err("irq request fail\n");
+			rc = -EBUSY;
+			goto put_regulator;
+		}
+		disable_irq(soc_info->irq_line->start);
+	}
+
+	/* Get Clock */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = clk_get(&soc_info->pdev->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			pr_err("get failed for %s\n", soc_info->clk_name[i]);
+			rc = -ENOENT;
+			goto put_clk;
+		}
+	}
+
+	return rc;
+
+put_clk:
+	if (i == -1)
+		i = soc_info->num_clk;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->clk[i]) {
+			clk_put(soc_info->clk[i]);
+			soc_info->clk[i] = NULL;
+		}
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		free_irq(soc_info->irq_line->start, soc_info);
+	}
+
+put_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_disable(soc_info->rgltr[i]);
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+unmap_base:
+	if (i == -1)
+		i = soc_info->num_reg_map;
+	for (i = i - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		clk_put(soc_info->clk[i]);
+		soc_info->clk[i] = NULL;
+	}
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+	for (i = soc_info->num_reg_map - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		free_irq(soc_info->irq_line->start, soc_info);
+	}
+
+	return 0;
+}
+
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, bool enable_irq)
+{
+	int i, rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_enable(soc_info->rgltr[i]);
+		if (rc) {
+			pr_err("Regulator enable %s failed\n",
+				soc_info->rgltr_name[i]);
+			goto disable_regulator;
+		}
+	}
+
+	if (enable_clocks) {
+		rc = cam_soc_util_clk_enable_default(soc_info);
+		if (rc)
+			goto disable_regulator;
+	}
+
+	if (enable_irq) {
+		rc  = cam_soc_util_irq_enable(soc_info);
+		if (rc)
+			goto disable_clk;
+	}
+
+	return rc;
+
+disable_clk:
+	if (enable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+disable_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i])
+			regulator_disable(soc_info->rgltr[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disble_irq)
+{
+	int i, rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	if (disable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		rc |= regulator_disable(soc_info->rgltr[i]);
+		if (rc) {
+			pr_err("Regulator disble %s failed\n",
+				soc_info->rgltr_name[i]);
+			continue;
+		}
+	}
+
+	if (disble_irq)
+		rc |= cam_soc_util_irq_disable(soc_info);
+
+	return rc;
+}
+
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size)
+{
+	void __iomem     *base_addr = NULL;
+
+	CDBG("base_idx %u size=%d\n", base_index, size);
+
+	if (!soc_info || base_index >= soc_info->num_reg_map ||
+		size <= 0 || (offset + size) >=
+		CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index))
+		return -EINVAL;
+
+	base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index);
+
+	/*
+	 * All error checking already done above,
+	 * hence ignoring the return value below.
+	 */
+	cam_io_dump(base_addr, offset, size);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
new file mode 100644
index 0000000..0baa9e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -0,0 +1,386 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SOC_UTIL_H_
+#define _CAM_SOC_UTIL_H_
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "cam_io_util.h"
+
+#define NO_SET_RATE  -1
+#define INIT_RATE    -2
+
+/* maximum number of device block */
+#define CAM_SOC_MAX_BLOCK           4
+
+/* maximum number of device base */
+#define CAM_SOC_MAX_BASE            CAM_SOC_MAX_BLOCK
+
+/* maximum number of device regulator */
+#define CAM_SOC_MAX_REGULATOR       4
+
+/* maximum number of device clock */
+#define CAM_SOC_MAX_CLK             32
+
+/**
+ * struct cam_soc_reg_map:   Information about the mapped register space
+ *
+ * @mem_base:               Starting location of MAPPED register space
+ * @mem_cam_base:           Starting offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @size:                   Size of register space
+ **/
+struct cam_soc_reg_map {
+	void __iomem                   *mem_base;
+	uint32_t                        mem_cam_base;
+	resource_size_t                 size;
+};
+
+/**
+ * struct cam_hw_soc_info:  Soc information pertaining to specific instance of
+ *                          Camera hardware driver module
+ *
+ * @pdev:                   Platform device pointer
+ * @hw_version;             Camera device version
+ * @index:                  Instance id for the camera device
+ * @irq_name:               Name of the irq associated with the device
+ * @irq_line:               Irq resource
+ * @num_mem_block:          Number of entry in the "reg-names"
+ * @mem_block_name:         Array of the reg block name
+ * @mem_block_cam_base:     Array of offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @mem_block:              Associated resource structs
+ * @reg_map:                Array of Mapped register info for the "reg-names"
+ * @num_reg_map:            Number of mapped register space associated
+ *                          with mem_block. num_reg_map = num_mem_block in
+ *                          most cases
+ * @num_rgltr:              Number of regulators
+ * @rgltr_name:             Array of regulator names
+ * @rgltr:                  Array of associated regulator resources
+ * @num_clk:                Number of clocks
+ * @clk_name:               Array of clock names
+ * @clk:                    Array of associated clock resources
+ * @clk_rate:               Array of default clock rates
+ * @src_clk_idx:            Source clock index that is rate-controllable
+ * @soc_private;            Soc private data
+ *
+ */
+struct cam_hw_soc_info {
+	struct platform_device         *pdev;
+	uint32_t                        hw_version;
+	uint32_t                        index;
+
+	const char                     *irq_name;
+	struct resource                *irq_line;
+
+	uint32_t                        num_mem_block;
+	const char                     *mem_block_name[CAM_SOC_MAX_BLOCK];
+	uint32_t                        mem_block_cam_base[CAM_SOC_MAX_BLOCK];
+	struct resource                *mem_block[CAM_SOC_MAX_BLOCK];
+	struct cam_soc_reg_map          reg_map[CAM_SOC_MAX_BASE];
+	uint32_t                        num_reg_map;
+
+	uint32_t                        num_rgltr;
+	const char                     *rgltr_name[CAM_SOC_MAX_REGULATOR];
+	struct regulator               *rgltr[CAM_SOC_MAX_REGULATOR];
+
+	uint32_t                        num_clk;
+	const char                     *clk_name[CAM_SOC_MAX_CLK];
+	struct clk                     *clk[CAM_SOC_MAX_CLK];
+	int32_t                         clk_rate[CAM_SOC_MAX_CLK];
+	int32_t                         src_clk_idx;
+
+	void                           *soc_private;
+};
+
+/*
+ * CAM_SOC_GET_REG_MAP_START
+ *
+ * @brief:              This MACRO will get the mapped starting address
+ *                      where the register space can be accessed
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a pointer to the mapped register memory
+ */
+#define CAM_SOC_GET_REG_MAP_START(__soc_info, __base_index)          \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		NULL : __soc_info->reg_map[__base_index].mem_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_CAM_BASE
+ *
+ * @brief:              This MACRO will get the cam_base of the
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns an int32_t value.
+ *                        Failure: -1
+ *                        Success: Starting offset of register space compared
+ *                                 to entire Camera Register Map
+ */
+#define CAM_SOC_GET_REG_MAP_CAM_BASE(__soc_info, __base_index)       \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		-1 : __soc_info->reg_map[__base_index].mem_cam_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_SIZE
+ *
+ * @brief:              This MACRO will get the size of the mapped
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a uint32_t value.
+ *                        Failure: 0
+ *                        Success: Non-zero size of mapped register space
+ */
+#define CAM_SOC_GET_REG_MAP_SIZE(__soc_info, __base_index)           \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		0 : __soc_info->reg_map[__base_index].size)
+
+
+/**
+ * cam_soc_util_get_dt_properties()
+ *
+ * @brief:              Parse the DT and populate the common properties that
+ *                      are part of the soc_info structure - register map,
+ *                      clocks, regulators, irq, etc.
+ *
+ * @soc_info:           Device soc struct to be populated
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info);
+
+
+/**
+ * cam_soc_util_request_platform_resource()
+ *
+ * @brief:              Request regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ * @handler:            Irq handler function pointer
+ * @irq_data:           Irq handler function CB data
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data);
+
+/**
+ * cam_soc_util_release_platform_resource()
+ *
+ * @brief:              Release regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_enable_platform_resource()
+ *
+ * @brief:              Enable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @enable_clocks:      Boolean flag:
+ *                          TRUE: Enable all clocks in soc_info Now.
+ *                          False: Don't enable clocks Now. Driver will
+ *                                 enable independently.
+ @enable_irq:           Boolean flag:
+ *                          TRUE: Enable IRQ in soc_info Now.
+ *                          False: Don't enable IRQ Now. Driver will
+ *                                 enable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, bool enable_irq);
+
+/**
+ * cam_soc_util_disable_platform_resource()
+ *
+ * @brief:              Disable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @disable_irq:        Boolean flag:
+ *                          TRUE: Disable IRQ in soc_info Now.
+ *                          False: Don't disble IRQ Now. Driver will
+ *                                 disable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq);
+
+/**
+ * cam_soc_util_clk_enable()
+ *
+ * @brief:              Enable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned ON
+ * @clk_name:           Clocks name associated with clk
+ * @clk_rate:           Clocks rate associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate);
+
+/**
+ * cam_soc_util_clk_disable()
+ *
+ * @brief:              Disable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned OFF
+ * @clk_name:           Clocks name associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name);
+
+/**
+ * cam_soc_util_irq_enable()
+ *
+ * @brief:              Enable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_irq_disable()
+ *
+ * @brief:              Disable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_w()
+ *
+ * @brief:              Camera SOC util for register write
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_w_mb()
+ *
+ * @brief:              Camera SOC util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w_mb(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r()
+ *
+ * @brief:              Camera SOC util for register read
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r_mb()
+ *
+ * @brief:              Camera SOC util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r_mb(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_reg_dump()
+ *
+ * @brief:              Camera SOC util for dumping a range of register
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Start register offset for the dump
+ * @size:               Size specifying the range for dump
+ *
+ * @return:             Success or Failure
+ */
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size);
+
+#endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index ef3846c..dd70794 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -96,12 +96,14 @@
  * @SDE_CAPS_R1_WB: MDSS V1.x WB block
  * @SDE_CAPS_R3_WB: MDSS V3.x WB block
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
+ * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
 	SDE_CAPS_R3_WB,
 	SDE_CAPS_R3_1P5_DOWNSCALE,
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
+	SDE_CAPS_SBUF_1,
 	SDE_CAPS_MAX,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 10858b3..a3603da 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -532,6 +532,10 @@
 	if (!input)
 		dir = DMA_FROM_DEVICE;
 
+	data->sbuf = buffer->sbuf;
+	data->scid = buffer->scid;
+	data->writeback = buffer->writeback;
+
 	memset(planes, 0, sizeof(planes));
 
 	for (i = 0; i < buffer->plane_count; i++) {
@@ -539,6 +543,8 @@
 		planes[i].offset = buffer->planes[i].offset;
 		planes[i].buffer = buffer->planes[i].buffer;
 		planes[i].handle = buffer->planes[i].handle;
+		planes[i].addr = buffer->planes[i].addr;
+		planes[i].len = buffer->planes[i].len;
 	}
 
 	ret =  sde_mdp_data_get_and_validate_size(data, planes,
@@ -760,6 +766,9 @@
 	if (entry->item.flags & SDE_ROTATION_EXT_DMA_BUF)
 		flag |= SDE_ROT_EXT_DMA_BUF;
 
+	if (entry->item.flags & SDE_ROTATION_EXT_IOVA)
+		flag |= SDE_ROT_EXT_IOVA;
+
 	if (entry->item.flags & SDE_ROTATION_SECURE_CAMERA)
 		flag |= SDE_SECURE_CAMERA_SESSION;
 
@@ -800,6 +809,10 @@
 			entry->perf->wrot_limit != mgr->wrot_limit))
 		return true;
 
+	/* sbuf mode is exclusive and may impact queued entries */
+	if (!mgr->sbuf_ctx && entry->perf && entry->perf->config.output.sbuf)
+		return true;
+
 	return false;
 }
 
@@ -855,6 +868,9 @@
 				entry->item.session_id,
 				entry->item.sequence_id);
 		return sde_rotator_is_hw_idle(mgr, hw);
+	} else if (mgr->sbuf_ctx && mgr->sbuf_ctx != entry->private) {
+		SDEROT_DBG("wait until sbuf mode is off\n");
+		return false;
 	} else {
 		return (atomic_read(&hw->num_active) < hw->max_active);
 	}
@@ -907,6 +923,14 @@
 			entry->item.session_id, entry->item.sequence_id);
 	mgr->rdot_limit = entry->perf->rdot_limit;
 	mgr->wrot_limit = entry->perf->wrot_limit;
+
+	if (!mgr->sbuf_ctx && entry->perf->config.output.sbuf) {
+		SDEROT_DBG("acquire sbuf s:%d.%d\n", entry->item.session_id,
+				entry->item.sequence_id);
+		SDEROT_EVTLOG(entry->item.session_id, entry->item.sequence_id);
+		mgr->sbuf_ctx = entry->private;
+	}
+
 	return hw;
 }
 
@@ -1233,8 +1257,12 @@
 				(mgr->overhead.denom - max_fps *
 				mgr->overhead.numer));
 
+	/* use client provided clock if specified */
+	if (config->flags & SDE_ROTATION_EXT_PERF)
+		perf->clk_rate = config->clk_rate;
+
 	/*
-	 * check for Override clock calcualtion
+	 * check for Override clock calculation
 	 */
 	if (rot_dev->min_rot_clk > perf->clk_rate)
 		perf->clk_rate = rot_dev->min_rot_clk;
@@ -1258,6 +1286,10 @@
 	if (rot_dev->min_bw > perf->bw)
 		perf->bw = rot_dev->min_bw;
 
+	/* use client provided bandwidth if specified */
+	if (config->flags & SDE_ROTATION_EXT_PERF)
+		perf->bw = config->data_bw;
+
 	perf->rdot_limit = sde_mdp_get_ot_limit(
 			config->input.width, config->input.height,
 			config->input.format, config->frame_rate, true);
@@ -1560,7 +1592,11 @@
 	if ((in_fmt->is_yuv != out_fmt->is_yuv) ||
 		(in_fmt->pixel_mode != out_fmt->pixel_mode) ||
 		(in_fmt->unpack_tight != out_fmt->unpack_tight)) {
-		SDEROT_ERR("Rotator does not support CSC\n");
+		SDEROT_ERR(
+			"Rotator does not support CSC yuv:%d/%d pm:%d/%d ut:%d/%d\n",
+			in_fmt->is_yuv, out_fmt->is_yuv,
+			in_fmt->pixel_mode, out_fmt->pixel_mode,
+			in_fmt->unpack_tight, out_fmt->unpack_tight);
 		goto verify_error;
 	}
 
@@ -1887,8 +1923,9 @@
 
 		INIT_WORK(&entry->commit_work, sde_rotator_commit_handler);
 		INIT_WORK(&entry->done_work, sde_rotator_done_handler);
-		SDEROT_DBG("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
-			"dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
+		SDEROT_DBG(
+			"Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%x dst{%u,%u,%u,%u}f=%x session_id=%u\n",
+			item->wb_idx,
 			item->src_rect.x, item->src_rect.y,
 			item->src_rect.w, item->src_rect.h, item->input.format,
 			item->dst_rect.x, item->dst_rect.y,
@@ -1967,8 +2004,7 @@
 	struct sde_rot_entry_container *req, *req_next;
 
 	list_for_each_entry_safe(req, req_next, &private->req_list, list) {
-		if ((atomic_read(&req->pending_count) == 0) &&
-				(!req->retire_work && !req->retireq)) {
+		if ((atomic_read(&req->pending_count) == 0) && req->finished) {
 			list_del_init(&req->list);
 			devm_kfree(&mgr->pdev->dev, req);
 		}
@@ -2029,6 +2065,34 @@
 	return ret;
 }
 
+/*
+ * sde_rotator_commit_request - commit the request to hardware
+ * @mgr: pointer to rotator manager
+ * @private: pointer to per file context
+ * @req: pointer to rotation request
+ *
+ * This differs from sde_rotator_queue_request in that this
+ * function will wait until request is committed to hardware.
+ */
+void sde_rotator_commit_request(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *ctx,
+	struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!mgr || !ctx || !req || !req->entries) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	sde_rotator_queue_request(mgr, ctx, req);
+
+	sde_rot_mgr_unlock(mgr);
+	for (i = 0; i < req->count; i++)
+		flush_work(&req->entries[i].commit_work);
+	sde_rot_mgr_lock(mgr);
+}
+
 static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private, u32 session_id)
 {
@@ -2139,7 +2203,13 @@
 	sde_rotator_update_clk(mgr);
 	sde_rotator_resource_ctrl(mgr, false);
 done:
-	SDEROT_DBG("Closed session id:%u", id);
+	if (mgr->sbuf_ctx == private) {
+		SDEROT_DBG("release sbuf session id:%u\n", id);
+		SDEROT_EVTLOG(id);
+		mgr->sbuf_ctx = NULL;
+	}
+
+	SDEROT_DBG("Closed session id:%u\n", id);
 	return 0;
 }
 
@@ -2183,6 +2253,11 @@
 		goto done;
 	}
 
+	if (config->output.sbuf && mgr->sbuf_ctx != private && mgr->sbuf_ctx) {
+		SDEROT_ERR("too many sbuf sessions\n");
+		goto done;
+	}
+
 	SDEROT_DBG(
 		"reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u fps:%d clk:%lu, bw:%llu\n",
 		config->session_id, config->input.width, config->input.height,
@@ -2230,14 +2305,25 @@
 	return req;
 }
 
+void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private,
+	struct sde_rot_entry_container *req)
+{
+	if (!mgr || !private || !req) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	req->finished = true;
+}
+
 int sde_rotator_handle_request_common(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req,
-	struct sde_rotation_item *items)
+	struct sde_rot_entry_container *req)
 {
 	int ret;
 
-	if (!mgr || !private || !req || !items) {
+	if (!mgr || !private || !req) {
 		SDEROT_ERR("null parameters\n");
 		return -EINVAL;
 	}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 16eaae1..be36f42 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -62,22 +62,49 @@
 /* secure camera operation*/
 #define SDE_ROTATION_SECURE_CAMERA	0x40000
 
+/* use client mapped i/o virtual address */
+#define SDE_ROTATION_EXT_IOVA		0x80000
+
+/* use client provided clock/bandwidth parameters */
+#define SDE_ROTATION_EXT_PERF		0x100000
+
 /**********************************************************************
  * configuration structures
  **********************************************************************/
 
+/*
+ * struct sde_rotation_buf_info - input/output buffer configuration
+ * @width: width of buffer region to be processed
+ * @height: height of buffer region to be processed
+ * @format: pixel format of buffer
+ * @comp_ratio: compression ratio for the session
+ * @sbuf: true if buffer is streaming buffer
+ */
 struct sde_rotation_buf_info {
 	uint32_t width;
 	uint32_t height;
 	uint32_t format;
 	struct sde_mult_factor comp_ratio;
+	bool sbuf;
 };
 
+/*
+ * struct sde_rotation_config - rotation configuration for given session
+ * @session_id: identifier of the given session
+ * @input: input buffer information
+ * @output: output buffer information
+ * @frame_rate: session frame rate in fps
+ * @clk_rate: requested rotator clock rate if SDE_ROTATION_EXT_PERF is set
+ * @data_bw: requested data bus bandwidth if SDE_ROTATION_EXT_PERF is set
+ * @flags: configuration flags, e.g. rotation angle, flip, etc...
+ */
 struct sde_rotation_config {
 	uint32_t	session_id;
 	struct sde_rotation_buf_info	input;
 	struct sde_rotation_buf_info	output;
 	uint32_t	frame_rate;
+	uint64_t	clk_rate;
+	uint64_t	data_bw;
 	uint32_t	flags;
 };
 
@@ -106,10 +133,22 @@
 	SDE_ROTATOR_CLK_MAX
 };
 
+enum sde_rotator_trigger {
+	SDE_ROTATOR_TRIGGER_IMMEDIATE,
+	SDE_ROTATOR_TRIGGER_VIDEO,
+	SDE_ROTATOR_TRIGGER_COMMAND,
+};
+
 struct sde_rotation_item {
 	/* rotation request flag */
 	uint32_t	flags;
 
+	/* rotation trigger mode */
+	uint32_t	trigger;
+
+	/* prefill bandwidth in Bps */
+	uint64_t	prefill_bw;
+
 	/* Source crop rectangle */
 	struct sde_rect	src_rect;
 
@@ -191,6 +230,18 @@
 	struct sde_rot_hw_resource *hw;
 };
 
+/*
+ * struct sde_rot_entry_container - rotation request
+ * @list: list of active requests managed by rotator manager
+ * @flags: reserved
+ * @count: size of rotation entries
+ * @pending_count: count of entries pending completion
+ * @failed_count: count of entries failed completion
+ * @finished: true if client is finished with the request
+ * @retireq: workqueue to post completion notification
+ * @retire_work: work for completion notification
+ * @entries: array of rotation entries
+ */
 struct sde_rot_entry_container {
 	struct list_head list;
 	u32 flags;
@@ -199,12 +250,33 @@
 	atomic_t failed_count;
 	struct workqueue_struct *retireq;
 	struct work_struct *retire_work;
+	bool finished;
 	struct sde_rot_entry *entries;
 };
 
 struct sde_rot_mgr;
 struct sde_rot_file_private;
 
+/*
+ * struct sde_rot_entry - rotation entry
+ * @item: rotation item
+ * @commit_work: work descriptor for commit handler
+ * @done_work: work descriptor for done handler
+ * @commitq: pointer to commit handler rotator queue
+ * @fenceq: pointer to fence signaling rotator queue
+ * @doneq: pointer to done handler rotator queue
+ * @request: pointer to containing request
+ * @src_buf: descriptor of source buffer
+ * @dst_buf: descriptor of destination buffer
+ * @input_fence: pointer to input fence for when input content is available
+ * @output_fence: pointer to output fence for when output content is available
+ * @output_signaled: true if output fence of this entry has been signaled
+ * @dnsc_factor_w: calculated width downscale factor for this entry
+ * @dnsc_factor_w: calculated height downscale factor for this entry
+ * @perf: pointer to performance configuration associated with this entry
+ * @work_assigned: true if this item is assigned to h/w queue/unit
+ * @private: pointer to controlling session context
+ */
 struct sde_rot_entry {
 	struct sde_rotation_item item;
 	struct work_struct commit_work;
@@ -230,6 +302,18 @@
 	struct sde_rot_file_private *private;
 };
 
+/*
+ * struct sde_rot_perf - rotator session performance configuration
+ * @list: list of performance configuration under one session
+ * @config: current rotation configuration
+ * @clk_rate: current clock rate in Hz
+ * @bw: current bandwidth in byte per second
+ * @work_dis_lock: serialization lock for updating work distribution (not used)
+ * @work_distribution: work distribution among multiple hardware queue/unit
+ * @last_wb_idx: last queue/unit index, used to account for pre-distributed work
+ * @rdot_limit: read OT limit of this session
+ * @wrot_limit: write OT limit of this session
+ */
 struct sde_rot_perf {
 	struct list_head list;
 	struct sde_rotation_config config;
@@ -242,6 +326,14 @@
 	u32 wrot_limit;
 };
 
+/*
+ * struct sde_rot_file_private - rotator manager per session context
+ * @list: list of all session context
+ * @req_list: list of rotation request for this session
+ * @perf_list: list of performance configuration for this session (only one)
+ * @mgr: pointer to the controlling rotator manager
+ * @fenceq: pointer to rotator queue to signal when entry is done
+ */
 struct sde_rot_file_private {
 	struct list_head list;
 	struct list_head req_list;
@@ -250,6 +342,13 @@
 	struct sde_rot_queue *fenceq;
 };
 
+/*
+ * struct sde_rot_bus_data_type - rotator bus scaling configuration
+ * @bus_cale_pdata: pointer to bus scaling configuration table
+ * @bus_hdl: msm bus scaling handle
+ * @curr_bw_uc_idx; current usecase index into configuration table
+ * @curr_quota_val: current bandwidth request in byte per second
+ */
 struct sde_rot_bus_data_type {
 	struct msm_bus_scale_pdata *bus_scale_pdata;
 	u32 bus_hdl;
@@ -257,6 +356,35 @@
 	u64 curr_quota_val;
 };
 
+/*
+ * struct sde_rot_mgr - core rotator manager
+ * @lock: serialization lock to rotator manager functions
+ * @device_suspended: 0 if device is not suspended; non-zero suspended
+ * @pdev: pointer to controlling platform device
+ * @device: pointer to controlling device
+ * @queue_count: number of hardware queue/unit available
+ * @commitq: array of rotator commit queue corresponding to hardware queue
+ * @doneq: array of rotator done queue corresponding to hardware queue
+ * @file_list: list of all sessions managed by rotator manager
+ * @pending_close_bw_vote: bandwidth of closed sessions with pending work
+ * @data_bus: data bus configuration state
+ * @reg_bus: register bus configuration state
+ * @module_power: power/clock configuration state
+ * @regulator_enable: true if foot switch is enabled; false otherwise
+ * @res_ref_cnt: reference count of how many times resource is requested
+ * @rot_enable_clk_cnt: reference count of how many times clock is requested
+ * @rot_clk: array of rotator and periphery clocks
+ * @num_rot_clk: size of the rotator clock array
+ * @rdot_limit: current read OT limit
+ * @wrot_limit: current write OT limit
+ * @hwacquire_timeout: maximum wait time for hardware availability in msec
+ * @pixel_per_clk: rotator hardware performance in pixel for clock
+ * @fudge_factor: fudge factor for clock calculation
+ * @overhead: software overhead for offline rotation in msec
+ * @sbuf_ctx: pointer to sbuf session context
+ * @ops_xxx: function pointers of rotator HAL layer
+ * @hw_data: private handle of rotator HAL layer
+ */
 struct sde_rot_mgr {
 	struct mutex lock;
 	atomic_t device_suspended;
@@ -297,6 +425,8 @@
 	struct sde_mult_factor fudge_factor;
 	struct sde_mult_factor overhead;
 
+	struct sde_rot_file_private *sbuf_ctx;
+
 	int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
 			struct sde_rot_entry *entry);
 	int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
@@ -323,6 +453,8 @@
 			bool input);
 	int (*ops_hw_is_valid_pixfmt)(struct sde_rot_mgr *mgr, u32 pixfmt,
 			bool input);
+	int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
+			int len);
 
 	void *hw_data;
 };
@@ -345,6 +477,15 @@
 	return 0;
 }
 
+static inline int sde_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
+		char *caps, int len)
+{
+	if (mgr && mgr->ops_hw_get_downscale_caps)
+		return mgr->ops_hw_get_downscale_caps(mgr, caps, len);
+
+	return 0;
+}
+
 static inline int __compare_session_item_rect(
 	struct sde_rotation_buf_info *s_rect,
 	struct sde_rect *i_rect, uint32_t i_fmt, bool src)
@@ -380,61 +521,179 @@
 	return 0;
 }
 
+/*
+ * sde_rotator_core_init - initialize rotator manager for the given platform
+ *	device
+ * @pmgr: Pointer to pointer of the newly initialized rotator manager
+ * @pdev: Pointer to platform device
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 		struct platform_device *pdev);
 
+/*
+ * sde_rotator_core_destroy - destroy given rotator manager
+ * @mgr: Pointer to rotator manager
+ * return: none
+ */
 void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
 
+/*
+ * sde_rotator_session_open - open a new rotator per file session
+ * @mgr: Pointer to rotator manager
+ * @pprivate: Pointer to pointer of the newly initialized per file session
+ * @session_id: identifier of the newly created session
+ * @queue: Pointer to fence queue of the new session
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_session_open(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private **pprivate, int session_id,
 	struct sde_rot_queue *queue);
 
+/*
+ * sde_rotator_session_close - close the given rotator per file session
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to per file session
+ * @session_id: identifier of the session
+ * return: none
+ */
 void sde_rotator_session_close(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private, int session_id);
 
+/*
+ * sde_rotator_session_config - configure the given rotator per file session
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to  per file session
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_session_config(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_req_init - allocate a new request and initialzie with given
+ *	array of rotation items
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @items: Pointer to array of rotation item
+ * @count: size of rotation item array
+ * @flags: rotation request flags
+ * return: Pointer to new rotation request if success; ERR_PTR otherwise
+ */
 struct sde_rot_entry_container *sde_rotator_req_init(
 	struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *private,
 	struct sde_rotation_item *items,
 	u32 count, u32 flags);
 
+/*
+ * sde_rotator_req_finish - notify manager that client is finished with the
+ *	given request and manager can release the request as required
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: none
+ */
+void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private,
+	struct sde_rot_entry_container *req);
+
+/*
+ * sde_rotator_handle_request_common - add the given request to rotator
+ *	manager and clean up completed requests
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_handle_request_common(struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *ctx,
-	struct sde_rot_entry_container *req,
-	struct sde_rotation_item *items);
+	struct sde_rot_entry_container *req);
 
+/*
+ * sde_rotator_queue_request - queue/schedule the given request for h/w commit
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
 void sde_rotator_queue_request(struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *ctx,
 	struct sde_rot_entry_container *req);
 
-void sde_rotator_remove_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
+/*
+ * sde_rotator_commit_request - queue/schedule the given request and wait
+ *	until h/w commit
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
+void sde_rotator_commit_request(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *ctx,
 	struct sde_rot_entry_container *req);
 
+/*
+ * sde_rotator_verify_config_all - verify given rotation configuration
+ * @rot_dev: Pointer to rotator device
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_verify_config_all(struct sde_rot_mgr *rot_dev,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_verify_config_input - verify rotation input configuration
+ * @rot_dev: Pointer to rotator device
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_verify_config_input(struct sde_rot_mgr *rot_dev,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_verify_config_output - verify rotation output configuration
+ * @rot_dev: Pointer to rotator device
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_verify_config_output(struct sde_rot_mgr *rot_dev,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_validate_request - validates given rotation request with
+ *	previous rotator configuration
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_validate_request(struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *ctx,
 	struct sde_rot_entry_container *req);
 
+/*
+ * sde_rotator_clk_ctrl - enable/disable rotator clock with reference counting
+ * @mgr: Pointer to rotator manager
+ * @enable: true to enable clock; false to disable clock
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable);
 
+/*
+ * sde_rot_mgr_lock - serialization lock prior to rotator manager calls
+ * @mgr: Pointer to rotator manager
+ */
 static inline void sde_rot_mgr_lock(struct sde_rot_mgr *mgr)
 {
 	mutex_lock(&mgr->lock);
 }
 
+/*
+ * sde_rot_mgr_lock - serialization unlock after rotator manager calls
+ * @mgr: Pointer to rotator manager
+ */
 static inline void sde_rot_mgr_unlock(struct sde_rot_mgr *mgr)
 {
 	mutex_unlock(&mgr->lock);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index a41c450..da2705a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -1283,6 +1283,13 @@
 		return NULL;
 	}
 
+	if (!debugfs_create_u32("disable_syscache", 0644,
+			debugfs_root, &rot_dev->disable_syscache)) {
+		SDEROT_ERR("fail create disable_syscache\n");
+		debugfs_remove_recursive(debugfs_root);
+		return NULL;
+	}
+
 	if (!debugfs_create_u32("streamoff_timeout", 0644,
 			debugfs_root, &rot_dev->streamoff_timeout)) {
 		SDEROT_ERR("fail create streamoff_timeout\n");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 6db0923..47f4cb0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -27,6 +27,7 @@
 #include <media/videobuf2-v4l2.h>
 #include <media/v4l2-mem2mem.h>
 
+#include "sde_rotator_inline.h"
 #include "sde_rotator_base.h"
 #include "sde_rotator_core.h"
 #include "sde_rotator_dev.h"
@@ -413,16 +414,15 @@
 	SDEDEV_DBG(rot_dev->dev, "start streaming s:%d t:%d\n",
 			ctx->session_id, q->type);
 
-	if (!IS_ERR_OR_NULL(ctx->request) ||
-				atomic_read(&ctx->command_pending))
+	if (!list_empty(&ctx->pending_list)) {
 		SDEDEV_ERR(rot_dev->dev,
 				"command pending error s:%d t:%d p:%d\n",
 				ctx->session_id, q->type,
-				atomic_read(&ctx->command_pending));
+				!list_empty(&ctx->pending_list));
+		return -EINVAL;
+	}
 
-	ctx->request = NULL;
 	ctx->abort_pending = 0;
-	atomic_set(&ctx->command_pending, 0);
 
 	return 0;
 }
@@ -443,18 +443,18 @@
 
 	SDEDEV_DBG(rot_dev->dev, "stop streaming s:%d t:%d p:%d\n",
 			ctx->session_id, q->type,
-			atomic_read(&ctx->command_pending));
+			!list_empty(&ctx->pending_list));
 	ctx->abort_pending = 1;
 	mutex_unlock(q->lock);
 	ret = wait_event_timeout(ctx->wait_queue,
-			(atomic_read(&ctx->command_pending) == 0),
+			list_empty(&ctx->pending_list),
 			msecs_to_jiffies(rot_dev->streamoff_timeout));
 	mutex_lock(q->lock);
 	if (!ret)
 		SDEDEV_ERR(rot_dev->dev,
 				"timeout to stream off s:%d t:%d p:%d\n",
 				ctx->session_id, q->type,
-				atomic_read(&ctx->command_pending));
+				!list_empty(&ctx->pending_list));
 
 	sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
 
@@ -737,9 +737,7 @@
 			ctx->format_cap.fmt.pix.bytesperline,
 			ctx->format_cap.fmt.pix.sizeimage);
 	SPRINT("abort_pending=%d\n", ctx->abort_pending);
-	SPRINT("command_pending=%d\n", atomic_read(&ctx->command_pending));
-	SPRINT("submit_work=%d\n", work_busy(&ctx->submit_work));
-	SPRINT("retire_work=%d\n", work_busy(&ctx->retire_work));
+	SPRINT("command_pending=%d\n", !list_empty(&ctx->pending_list));
 	SPRINT("sequence=%u\n",
 		sde_rotator_get_timeline_commit_ts(ctx->work_queue.timeline));
 	SPRINT("timestamp=%u\n",
@@ -848,24 +846,26 @@
 }
 
 /*
- * sde_rotator_open - Rotator device open method.
- * @file: Pointer to file struct.
+ * sde_rotator_ctx_open - Rotator device open method.
+ * @rot_dev: Pointer to rotator device structure
+ * @file: Pointer to file struct (optional)
+ * return: Pointer rotator context if success; ptr error code, otherwise.
  */
-static int sde_rotator_open(struct file *file)
+struct sde_rotator_ctx *sde_rotator_ctx_open(
+		struct sde_rotator_device *rot_dev, struct file *file)
 {
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
-	struct video_device *video = video_devdata(file);
+	struct video_device *video = file ? video_devdata(file) : NULL;
 	struct sde_rotator_ctx *ctx;
 	struct v4l2_ctrl_handler *ctrl_handler;
 	char name[32];
-	int ret;
+	int i, ret;
 
 	if (atomic_read(&rot_dev->mgr->device_suspended))
-		return -EPERM;
+		return ERR_PTR(-EPERM);
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	if (mutex_lock_interruptible(&rot_dev->lock)) {
 		ret = -ERESTARTSYS;
@@ -873,6 +873,7 @@
 	}
 
 	ctx->rot_dev = rot_dev;
+	ctx->file = file;
 
 	/* Set context defaults */
 	ctx->session_id = rot_dev->session_id++;
@@ -883,7 +884,6 @@
 	ctx->vflip = 0;
 	ctx->rotate = 0;
 	ctx->secure = 0;
-	atomic_set(&ctx->command_pending, 0);
 	ctx->abort_pending = 0;
 	ctx->format_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 	ctx->format_cap.fmt.pix.pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2;
@@ -898,18 +898,33 @@
 	ctx->crop_out.width = 640;
 	ctx->crop_out.height = 480;
 	init_waitqueue_head(&ctx->wait_queue);
-	INIT_WORK(&ctx->submit_work, sde_rotator_submit_handler);
-	INIT_WORK(&ctx->retire_work, sde_rotator_retire_handler);
+	spin_lock_init(&ctx->list_lock);
+	INIT_LIST_HEAD(&ctx->pending_list);
+	INIT_LIST_HEAD(&ctx->retired_list);
 
-	v4l2_fh_init(&ctx->fh, video);
-	file->private_data = &ctx->fh;
-	v4l2_fh_add(&ctx->fh);
+	for (i = 0 ; i < ARRAY_SIZE(ctx->requests); i++) {
+		struct sde_rotator_request *request = &ctx->requests[i];
 
-	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
-		ctx, sde_rotator_queue_init);
-	if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
-		ret = PTR_ERR(ctx->fh.m2m_ctx);
-		goto error_m2m_init;
+		INIT_WORK(&request->submit_work,
+				sde_rotator_submit_handler);
+		INIT_WORK(&request->retire_work,
+				sde_rotator_retire_handler);
+		request->ctx = ctx;
+		INIT_LIST_HEAD(&request->list);
+		list_add_tail(&request->list, &ctx->retired_list);
+	}
+
+	if (ctx->file) {
+		v4l2_fh_init(&ctx->fh, video);
+		file->private_data = &ctx->fh;
+		v4l2_fh_add(&ctx->fh);
+
+		ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
+			ctx, sde_rotator_queue_init);
+		if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
+			ret = PTR_ERR(ctx->fh.m2m_ctx);
+			goto error_m2m_init;
+		}
 	}
 
 	ret = kobject_init_and_add(&ctx->kobj, &sde_rotator_fs_ktype,
@@ -954,33 +969,34 @@
 	sde_rot_mgr_unlock(rot_dev->mgr);
 
 	/* Create control */
-	ctrl_handler = &ctx->ctrl_handler;
-	v4l2_ctrl_handler_init(ctrl_handler, 4);
-	v4l2_ctrl_new_std(ctrl_handler,
+	if (ctx->file) {
+		ctrl_handler = &ctx->ctrl_handler;
+		v4l2_ctrl_handler_init(ctrl_handler, 4);
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
-	v4l2_ctrl_new_std(ctrl_handler,
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
-	v4l2_ctrl_new_std(ctrl_handler,
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
-	v4l2_ctrl_new_custom(ctrl_handler,
+		v4l2_ctrl_new_custom(ctrl_handler,
 			&sde_rotator_ctrl_secure, NULL);
-	v4l2_ctrl_new_custom(ctrl_handler,
+		v4l2_ctrl_new_custom(ctrl_handler,
 			&sde_rotator_ctrl_secure_camera, NULL);
-	if (ctrl_handler->error) {
-		ret = ctrl_handler->error;
-		v4l2_ctrl_handler_free(ctrl_handler);
-		goto error_ctrl_handler;
+		if (ctrl_handler->error) {
+			ret = ctrl_handler->error;
+			v4l2_ctrl_handler_free(ctrl_handler);
+			goto error_ctrl_handler;
+		}
+		ctx->fh.ctrl_handler = ctrl_handler;
+		v4l2_ctrl_handler_setup(ctrl_handler);
 	}
-	ctx->fh.ctrl_handler = ctrl_handler;
-	v4l2_ctrl_handler_setup(ctrl_handler);
-
 	mutex_unlock(&rot_dev->lock);
 
 	SDEDEV_DBG(ctx->rot_dev->dev, "SDE v4l2 rotator open success\n");
 
 	ATRACE_BEGIN(ctx->kobj.name);
 
-	return 0;
+	return ctx;
 error_ctrl_handler:
 error_open_session:
 	sde_rot_mgr_unlock(rot_dev->mgr);
@@ -992,11 +1008,655 @@
 	kobject_put(&ctx->kobj);
 error_kobj_init:
 error_m2m_init:
-	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
+	if (ctx->file) {
+		v4l2_fh_del(&ctx->fh);
+		v4l2_fh_exit(&ctx->fh);
+	}
 	mutex_unlock(&rot_dev->lock);
 error_lock:
 	kfree(ctx);
+	return ERR_PTR(ret);
+}
+
+/*
+ * sde_rotator_ctx_release - Rotator device release method.
+ * @ctx: Pointer rotator context.
+ * @file: Pointer to file struct (optional)
+ * return: 0 if success; error code, otherwise
+ */
+static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
+		struct file *file)
+{
+	struct sde_rotator_device *rot_dev = ctx->rot_dev;
+	u32 session_id = ctx->session_id;
+	struct list_head *curr, *next;
+
+	ATRACE_END(ctx->kobj.name);
+
+	SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
+	mutex_lock(&rot_dev->lock);
+	if (ctx->file) {
+		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+		SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
+		v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	}
+	mutex_unlock(&rot_dev->lock);
+	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n", session_id);
+	list_for_each_safe(curr, next, &ctx->pending_list) {
+		struct sde_rotator_request *request =
+			container_of(curr, struct sde_rotator_request, list);
+
+		SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n",
+				session_id);
+		cancel_work_sync(&request->submit_work);
+	}
+	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
+	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
+	list_for_each_safe(curr, next, &ctx->pending_list) {
+		struct sde_rotator_request *request =
+			container_of(curr, struct sde_rotator_request, list);
+
+		SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n",
+				session_id);
+		cancel_work_sync(&request->retire_work);
+	}
+	mutex_lock(&rot_dev->lock);
+	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
+	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
+	destroy_workqueue(ctx->work_queue.rot_work_queue);
+	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
+	kobject_put(&ctx->kobj);
+	if (ctx->file) {
+		v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+		v4l2_fh_del(&ctx->fh);
+		v4l2_fh_exit(&ctx->fh);
+	}
+	kfree(ctx->vbinfo_out);
+	kfree(ctx->vbinfo_cap);
+	kfree(ctx);
+	mutex_unlock(&rot_dev->lock);
+	SDEDEV_DBG(rot_dev->dev, "release complete s:%d\n", session_id);
+	return 0;
+}
+
+/*
+ * sde_rotator_update_retire_sequence - update retired sequence of the context
+ *	referenced in the request, and wake up any waiting for update event
+ * @request: Pointer to rotator request
+ */
+static void sde_rotator_update_retire_sequence(
+		struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rot_entry_container *req;
+
+	if (!request || !request->ctx) {
+		SDEROT_ERR("invalid parameters\n");
+		return;
+	}
+
+	ctx = request->ctx;
+	req = request->req;
+
+	if (req && req->entries && req->count)
+		ctx->retired_sequence_id =
+				req->entries[req->count - 1].item.sequence_id;
+
+	wake_up(&ctx->wait_queue);
+
+	SDEROT_DBG("update sequence s:%d.%d\n",
+				ctx->session_id, ctx->retired_sequence_id);
+}
+
+/*
+ * sde_rotator_retire_request - retire the given rotator request with
+ *	device mutex locked
+ * @request: Pointer to rotator request
+ */
+static void sde_rotator_retire_request(struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+
+	if (!request || !request->ctx) {
+		SDEROT_ERR("invalid parameters\n");
+		return;
+	}
+
+	ctx = request->ctx;
+
+	request->req = NULL;
+	request->committed = false;
+	spin_lock(&ctx->list_lock);
+	list_del_init(&request->list);
+	list_add_tail(&request->list, &ctx->retired_list);
+	spin_unlock(&ctx->list_lock);
+
+	SDEROT_DBG("retire request s:%d.%d\n",
+				ctx->session_id, ctx->retired_sequence_id);
+}
+
+/*
+ * sde_rotator_is_request_retired - Return true if given request already expired
+ * @request: Pointer to rotator request
+ */
+static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rot_entry_container *req;
+	u32 sequence_id;
+	s32 retire_delta;
+
+	if (!request || !request->ctx || !request->req ||
+			!request->req->entries || !request->req->count)
+		return true;
+
+	ctx = request->ctx;
+	req = request->req;
+	sequence_id = req->entries[req->count - 1].item.sequence_id;
+
+	retire_delta = (s32) (ctx->retired_sequence_id - sequence_id);
+
+	SDEROT_DBG("sequence:%u/%u\n", sequence_id, ctx->retired_sequence_id);
+
+	return retire_delta >= 0;
+}
+
+/*
+ * sde_rotator_inline_open - open inline rotator session
+ * @pdev: Pointer to rotator platform device
+ * @video_mode: true if video mode is requested
+ * return: Pointer to new rotator session context
+ */
+void *sde_rotator_inline_open(struct platform_device *pdev)
+{
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_ctx *ctx;
+	int rc;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctx = sde_rotator_ctx_open(rot_dev, NULL);
+	if (IS_ERR_OR_NULL(ctx)) {
+		rc = PTR_ERR(ctx);
+		SDEROT_ERR("failed to open rotator context %d\n", rc);
+		goto rotator_open_error;
+	}
+
+	ctx->slice = llcc_slice_getd(rot_dev->dev, "rotator");
+	if (IS_ERR(ctx->slice)) {
+		rc = PTR_ERR(ctx->slice);
+		SDEROT_ERR("failed to get system cache %d\n", rc);
+		goto slice_getd_error;
+	}
+
+	if (!rot_dev->disable_syscache) {
+		rc = llcc_slice_activate(ctx->slice);
+		if (rc) {
+			SDEROT_ERR("failed to activate slice %d\n", rc);
+			goto activate_error;
+		}
+		SDEROT_DBG("scid %d size %zukb\n",
+				llcc_get_slice_id(ctx->slice),
+				llcc_get_slice_size(ctx->slice));
+	} else {
+		SDEROT_DBG("syscache bypassed\n");
+	}
+
+	SDEROT_EVTLOG(ctx->session_id, llcc_get_slice_id(ctx->slice),
+			llcc_get_slice_size(ctx->slice),
+			rot_dev->disable_syscache);
+
+	return ctx;
+
+activate_error:
+	llcc_slice_putd(ctx->slice);
+	ctx->slice = NULL;
+slice_getd_error:
+	sde_rotator_ctx_release(ctx, NULL);
+rotator_open_error:
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(sde_rotator_inline_open);
+
+int sde_rotator_inline_release(void *handle)
+{
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_ctx *ctx;
+
+	if (!handle) {
+		SDEROT_ERR("invalid rotator ctx\n");
+		return -EINVAL;
+	}
+
+	ctx = handle;
+	rot_dev = ctx->rot_dev;
+
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	if (ctx->slice) {
+		if (!rot_dev->disable_syscache)
+			llcc_slice_deactivate(ctx->slice);
+		llcc_slice_putd(ctx->slice);
+		ctx->slice = NULL;
+	}
+
+	SDEROT_EVTLOG(ctx->session_id);
+
+	return sde_rotator_ctx_release(ctx, NULL);
+}
+EXPORT_SYMBOL(sde_rotator_inline_release);
+
+/*
+ * sde_rotator_inline_get_dst_pixfmt - determine output pixel format
+ * @pdev: Pointer to platform device
+ * @src_pixfmt: input pixel format
+ * @dst_pixfmt: Pointer to output pixel format (output)
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	return sde_rot_get_base_tilea5x_pixfmt(src_pixfmt, dst_pixfmt);
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_dst_pixfmt);
+
+/*
+ * sde_rotator_inline_get_downscale_caps - get scaling capability
+ * @pdev: Pointer to platform device
+ * @caps: string buffer for capability
+ * @len: length of string buffer
+ * return: length of capability string
+ */
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *caps, int len)
+{
+	struct sde_rotator_device *rot_dev;
+	int rc;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	rc = sde_rotator_get_downscale_caps(rot_dev->mgr, caps, len);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return rc;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_downscale_caps);
+
+/*
+ * sde_rotator_inline_get_pixfmt_caps - get pixel format capability
+ * @pdev: Pointer to platform device
+ * @pixfmt: array of pixel format buffer
+ * @len: length of pixel format buffer
+ * return: length of pixel format capability if success; error code otherwise
+ */
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmts, int len)
+{
+	struct sde_rotator_device *rot_dev;
+	u32 i, pixfmt;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	for (i = 0;; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, input);
+		if (!pixfmt)
+			break;
+		if (pixfmts && i < len)
+			pixfmts[i] = pixfmt;
+	}
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return i;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_pixfmt_caps);
+
+/*
+ * sde_rotator_inline_commit - commit given rotator command
+ * @handle: Pointer to rotator context
+ * @cmd: Pointer to rotator command
+ * @cmd_type: command type - validate/prepare/commit/cleanup
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request = NULL;
+	struct sde_rot_entry_container *req = NULL;
+	ktime_t *ts;
+	u32 flags = 0;
+	int i, ret;
+
+	if (!handle || !cmd) {
+		SDEROT_ERR("invalid rotator handle/cmd\n");
+		return -EINVAL;
+	}
+
+	ctx = handle;
+	rot_dev = ctx->rot_dev;
+
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	SDEROT_DBG(
+		"s:%d.%u src:(%u,%u,%u,%u)/%ux%u/%c%c%c%c dst:(%u,%u,%u,%u)/%c%c%c%c r:%d f:%d/%d s:%d fps:%u clk:%llu bw:%llu wb:%d vid:%d cmd:%d\n",
+		ctx->session_id, cmd->sequence_id,
+		cmd->src_rect_x, cmd->src_rect_y,
+		cmd->src_rect_w, cmd->src_rect_h,
+		cmd->src_width, cmd->src_height,
+		cmd->src_pixfmt >> 0, cmd->src_pixfmt >> 8,
+		cmd->src_pixfmt >> 16, cmd->src_pixfmt >> 24,
+		cmd->dst_rect_x, cmd->dst_rect_y,
+		cmd->dst_rect_w, cmd->dst_rect_h,
+		cmd->dst_pixfmt >> 0, cmd->dst_pixfmt >> 8,
+		cmd->dst_pixfmt >> 16, cmd->dst_pixfmt >> 24,
+		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
+		cmd->clkrate, cmd->data_bw,
+		cmd->dst_writeback, cmd->video_mode, cmd_type);
+	SDEROT_EVTLOG(ctx->session_id, cmd->sequence_id,
+		cmd->src_rect_x, cmd->src_rect_y,
+		cmd->src_rect_w, cmd->src_rect_h,
+		cmd->src_width, cmd->src_height,
+		cmd->src_pixfmt,
+		cmd->dst_rect_x, cmd->dst_rect_y,
+		cmd->dst_rect_w, cmd->dst_rect_h,
+		cmd->dst_pixfmt,
+		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
+		cmd->clkrate, cmd->data_bw,
+		cmd->dst_writeback, cmd->video_mode, cmd_type);
+
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE ||
+			cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
+
+		struct sde_rotation_item item;
+		struct sde_rotator_statistics *stats = &rot_dev->stats;
+		int scid = llcc_get_slice_id(ctx->slice);
+
+		/* allocate slot for timestamp */
+		ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+
+		if (cmd->rot90)
+			flags |= SDE_ROTATION_90;
+		if (cmd->hflip)
+			flags |= SDE_ROTATION_FLIP_LR;
+		if (cmd->vflip)
+			flags |= SDE_ROTATION_FLIP_UD;
+		if (cmd->secure)
+			flags |= SDE_ROTATION_SECURE;
+
+		flags |= SDE_ROTATION_EXT_PERF;
+
+		/* fill in item work structure */
+		memset(&item, 0, sizeof(struct sde_rotation_item));
+		item.flags = flags | SDE_ROTATION_EXT_IOVA;
+		item.trigger = cmd->video_mode ? SDE_ROTATOR_TRIGGER_VIDEO :
+				SDE_ROTATOR_TRIGGER_COMMAND;
+		item.prefill_bw = cmd->prefill_bw;
+		item.session_id = ctx->session_id;
+		item.sequence_id = cmd->sequence_id;
+		item.src_rect.x = cmd->src_rect_x;
+		item.src_rect.y = cmd->src_rect_y;
+		item.src_rect.w = cmd->src_rect_w;
+		item.src_rect.h = cmd->src_rect_h;
+		item.input.width = cmd->src_width;
+		item.input.height = cmd->src_height;
+		item.input.format = cmd->src_pixfmt;
+
+		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+			item.input.planes[i].addr = cmd->src_addr[i];
+			item.input.planes[i].len = cmd->src_len[i];
+			item.input.planes[i].fd = -1;
+		}
+		item.input.plane_count = cmd->src_planes;
+		item.input.comp_ratio.numer = 1;
+		item.input.comp_ratio.denom = 1;
+
+		item.output.width = cmd->dst_rect_x + cmd->dst_rect_w;
+		item.output.height = cmd->dst_rect_y + cmd->dst_rect_h;
+		item.dst_rect.x = cmd->dst_rect_x;
+		item.dst_rect.y = cmd->dst_rect_y;
+		item.dst_rect.w = cmd->dst_rect_w;
+		item.dst_rect.h = cmd->dst_rect_h;
+		item.output.sbuf = true;
+		item.output.scid = scid;
+		item.output.writeback = cmd->dst_writeback;
+		item.output.format = cmd->dst_pixfmt;
+
+		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+			item.output.planes[i].addr = cmd->dst_addr[i];
+			item.output.planes[i].len = cmd->dst_len[i];
+			item.output.planes[i].fd = -1;
+		}
+		item.output.plane_count = cmd->dst_planes;
+		item.output.comp_ratio.numer = 1;
+		item.output.comp_ratio.denom = 1;
+		item.sequence_id = ++(ctx->commit_sequence_id);
+		item.ts = ts;
+
+		req = sde_rotator_req_init(rot_dev->mgr, ctx->private,
+				&item, 1, 0);
+		if (IS_ERR_OR_NULL(req)) {
+			SDEROT_ERR("fail allocate request s:%d\n",
+					ctx->session_id);
+			ret = -ENOMEM;
+			goto error_init_request;
+		}
+	}
+
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE) {
+		struct sde_rotation_config rotcfg;
+
+		memset(&rotcfg, 0, sizeof(struct sde_rotation_config));
+		rotcfg.flags = flags;
+		rotcfg.frame_rate = cmd->fps;
+		rotcfg.clk_rate = cmd->clkrate;
+		rotcfg.data_bw = cmd->data_bw;
+		rotcfg.session_id = ctx->session_id;
+		rotcfg.input.width = cmd->src_rect_w;
+		rotcfg.input.height = cmd->src_rect_h;
+		rotcfg.input.format = cmd->src_pixfmt;
+		rotcfg.input.comp_ratio.numer = 1;
+		rotcfg.input.comp_ratio.denom = 1;
+		rotcfg.output.width = cmd->dst_rect_w;
+		rotcfg.output.height = cmd->dst_rect_h;
+		rotcfg.output.format = cmd->dst_pixfmt;
+		rotcfg.output.comp_ratio.numer = 1;
+		rotcfg.output.comp_ratio.denom = 1;
+		rotcfg.output.sbuf = true;
+
+		if (memcmp(&rotcfg, &ctx->rotcfg, sizeof(rotcfg))) {
+			ret = sde_rotator_session_config(rot_dev->mgr,
+					ctx->private, &rotcfg);
+			if (ret) {
+				SDEROT_ERR("fail session config s:%d\n",
+						ctx->session_id);
+				goto error_session_config;
+			}
+
+			ctx->rotcfg = rotcfg;
+		}
+
+		ret = sde_rotator_validate_request(rot_dev->mgr, ctx->private,
+				req);
+		if (ret) {
+			SDEROT_ERR("fail validate request s:%d\n",
+					ctx->session_id);
+			goto error_validate_request;
+		}
+
+		devm_kfree(rot_dev->dev, req);
+		req = NULL;
+
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
+
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			/* should not happen */
+			ret = -ENOMEM;
+			SDEROT_ERR("no free request s:%d\n", ctx->session_id);
+			goto error_retired_list;
+		}
+
+		request->req = req;
+
+		spin_lock(&ctx->list_lock);
+		list_del_init(&request->list);
+		list_add_tail(&request->list, &ctx->pending_list);
+		spin_unlock(&ctx->list_lock);
+
+		ts = req->entries[0].item.ts;
+		if (ts) {
+			ts[SDE_ROTATOR_TS_SRCQB] = ktime_get();
+			ts[SDE_ROTATOR_TS_DSTQB] = ktime_get();
+			ts[SDE_ROTATOR_TS_FENCE] = ktime_get();
+		} else {
+			SDEROT_ERR("invalid stats timestamp\n");
+		}
+		req->retireq = ctx->work_queue.rot_work_queue;
+		req->retire_work = &request->retire_work;
+
+		trace_rot_entry_fence(
+			ctx->session_id, cmd->sequence_id,
+			req->entries[0].item.wb_idx,
+			req->entries[0].item.flags,
+			req->entries[0].item.input.format,
+			req->entries[0].item.input.width,
+			req->entries[0].item.input.height,
+			req->entries[0].item.src_rect.x,
+			req->entries[0].item.src_rect.y,
+			req->entries[0].item.src_rect.w,
+			req->entries[0].item.src_rect.h,
+			req->entries[0].item.output.format,
+			req->entries[0].item.output.width,
+			req->entries[0].item.output.height,
+			req->entries[0].item.dst_rect.x,
+			req->entries[0].item.dst_rect.y,
+			req->entries[0].item.dst_rect.w,
+			req->entries[0].item.dst_rect.h);
+
+		ret = sde_rotator_handle_request_common(
+				rot_dev->mgr, ctx->private, req);
+		if (ret) {
+			SDEROT_ERR("fail handle request s:%d\n",
+					ctx->session_id);
+			goto error_handle_request;
+		}
+
+		sde_rotator_commit_request(rot_dev->mgr, ctx->private, req);
+
+		request->committed = true;
+
+		/* save request in private handle */
+		cmd->priv_handle = request;
+
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
+		if (!cmd->priv_handle) {
+			ret = -EINVAL;
+			SDEROT_ERR("invalid private handle\n");
+			goto error_invalid_handle;
+		}
+
+		request = cmd->priv_handle;
+		req = request->req;
+
+		if (request->committed) {
+			/* wait until request is finished */
+			sde_rot_mgr_unlock(rot_dev->mgr);
+			ret = wait_event_timeout(ctx->wait_queue,
+				sde_rotator_is_request_retired(request),
+				msecs_to_jiffies(rot_dev->streamoff_timeout));
+			if (!ret)
+				SDEROT_ERR("timeout w/o retire s:%d\n",
+						ctx->session_id);
+			else if (ret == 1)
+				SDEROT_ERR("timeout w/ retire s:%d\n",
+						ctx->session_id);
+
+			sde_rot_mgr_lock(rot_dev->mgr);
+		}
+
+		sde_rotator_req_finish(rot_dev->mgr, ctx->private, req);
+		sde_rotator_retire_request(request);
+	}
+
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	return 0;
+
+error_handle_request:
+	sde_rotator_update_retire_sequence(request);
+	sde_rotator_retire_request(request);
+error_retired_list:
+error_validate_request:
+error_session_config:
+	devm_kfree(rot_dev->dev, req);
+error_invalid_handle:
+error_init_request:
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	return ret;
+}
+EXPORT_SYMBOL(sde_rotator_inline_commit);
+
+/*
+ * sde_rotator_open - Rotator device open method.
+ * @file: Pointer to file struct.
+ */
+static int sde_rotator_open(struct file *file)
+{
+	struct sde_rotator_device *rot_dev = video_drvdata(file);
+	struct sde_rotator_ctx *ctx;
+	int ret = 0;
+
+	ctx = sde_rotator_ctx_open(rot_dev, file);
+	if (IS_ERR_OR_NULL(ctx)) {
+		SDEDEV_DBG(rot_dev->dev, "failed to open %d\n", ret);
+		ret = PTR_ERR(ctx);
+	}
+
 	return ret;
 }
 
@@ -1006,45 +1666,10 @@
  */
 static int sde_rotator_release(struct file *file)
 {
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
 	struct sde_rotator_ctx *ctx =
 			sde_rotator_ctx_from_fh(file->private_data);
-	u32 session_id = ctx->session_id;
 
-	ATRACE_END(ctx->kobj.name);
-
-	SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
-	mutex_lock(&rot_dev->lock);
-	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
-	SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
-	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
-	mutex_unlock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d w:%x\n",
-			session_id, work_busy(&ctx->submit_work));
-	cancel_work_sync(&ctx->submit_work);
-	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d w:%x\n",
-			session_id, work_busy(&ctx->retire_work));
-	cancel_work_sync(&ctx->retire_work);
-	mutex_lock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
-	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
-	destroy_workqueue(ctx->work_queue.rot_work_queue);
-	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
-	kobject_put(&ctx->kobj);
-	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
-	kfree(ctx->vbinfo_out);
-	kfree(ctx->vbinfo_cap);
-	kfree(ctx);
-	mutex_unlock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release complete s:%d\n", session_id);
-	return 0;
+	return sde_rotator_ctx_release(ctx, file);
 }
 
 /*
@@ -1109,14 +1734,30 @@
 	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	struct sde_mdp_format_params *fmt;
-	u32 pixfmt;
+	u32 i, index, pixfmt;
+	bool found = false;
 
-	pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, f->index, false);
-	if (!pixfmt)
-		return -EINVAL;
+	for (i = 0, index = 0; index <= f->index; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, false);
+		if (!pixfmt)
+			return -EINVAL;
 
-	fmt = sde_get_format_params(pixfmt);
-	if (!fmt)
+		fmt = sde_get_format_params(pixfmt);
+		if (!fmt)
+			return -EINVAL;
+
+		if (sde_mdp_is_private_format(fmt))
+			continue;
+
+		if (index == f->index) {
+			found = true;
+			break;
+		}
+
+		index++;
+	}
+
+	if (!found)
 		return -EINVAL;
 
 	f->pixelformat = pixfmt;
@@ -1137,14 +1778,30 @@
 	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	struct sde_mdp_format_params *fmt;
-	u32 pixfmt;
+	u32 i, index, pixfmt;
+	bool found = false;
 
-	pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, f->index, true);
-	if (!pixfmt)
-		return -EINVAL;
+	for (i = 0, index = 0; index <= f->index; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, true);
+		if (!pixfmt)
+			return -EINVAL;
 
-	fmt = sde_get_format_params(pixfmt);
-	if (!fmt)
+		fmt = sde_get_format_params(pixfmt);
+		if (!fmt)
+			return -EINVAL;
+
+		if (sde_mdp_is_private_format(fmt))
+			continue;
+
+		if (index == f->index) {
+			found = true;
+			break;
+		}
+
+		index++;
+	}
+
+	if (!found)
 		return -EINVAL;
 
 	f->pixelformat = pixfmt;
@@ -1516,6 +2173,7 @@
 				ctx->session_id, buf_type, ret);
 			return ret;
 		}
+		ctx->rotcfg = config;
 	}
 
 	ret = v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, buf_type);
@@ -1992,8 +2650,10 @@
 	struct vb2_v4l2_buffer *dst_buf;
 	struct sde_rotator_ctx *ctx;
 	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request;
 
-	ctx = container_of(work, struct sde_rotator_ctx, retire_work);
+	request = container_of(work, struct sde_rotator_request, retire_work);
+	ctx = request->ctx;
 
 	if (!ctx || !ctx->rot_dev) {
 		SDEROT_ERR("null context/device\n");
@@ -2008,15 +2668,16 @@
 	if (ctx->abort_pending) {
 		SDEDEV_DBG(rot_dev->dev, "abort command in retire s:%d\n",
 				ctx->session_id);
-		ctx->request = ERR_PTR(-EINTR);
-		atomic_dec(&ctx->command_pending);
-		wake_up(&ctx->wait_queue);
+		sde_rotator_update_retire_sequence(request);
+		sde_rotator_retire_request(request);
 		mutex_unlock(&rot_dev->lock);
 		return;
 	}
 
-	if (rot_dev->early_submit) {
-		if (IS_ERR_OR_NULL(ctx->request)) {
+	if (!ctx->file) {
+		sde_rotator_update_retire_sequence(request);
+	} else if (rot_dev->early_submit) {
+		if (IS_ERR_OR_NULL(request->req)) {
 			/* fail pending request or something wrong */
 			SDEDEV_ERR(rot_dev->dev,
 					"pending request fail in retire s:%d\n",
@@ -2037,9 +2698,8 @@
 				src_buf, dst_buf);
 		}
 
-		ctx->request = NULL;
-		atomic_dec(&ctx->command_pending);
-		wake_up(&ctx->wait_queue);
+		sde_rotator_update_retire_sequence(request);
+		sde_rotator_retire_request(request);
 		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
 		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
 		v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
@@ -2052,9 +2712,11 @@
  * @ctx: Pointer rotator context.
  * @src_buf: Pointer to Vb2 source buffer.
  * @dst_buf: Pointer to Vb2 destination buffer.
+ * @request: Pointer to rotator request
  */
 static int sde_rotator_process_buffers(struct sde_rotator_ctx *ctx,
-	struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf)
+	struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf,
+	struct sde_rotator_request *request)
 {
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	struct sde_rotation_item item;
@@ -2173,17 +2835,17 @@
 	}
 
 	req->retireq = ctx->work_queue.rot_work_queue;
-	req->retire_work = &ctx->retire_work;
+	req->retire_work = &request->retire_work;
 
 	ret = sde_rotator_handle_request_common(
-			rot_dev->mgr, ctx->private, req, &item);
+			rot_dev->mgr, ctx->private, req);
 	if (ret) {
 		SDEDEV_ERR(rot_dev->dev, "fail handle request\n");
 		goto error_handle_request;
 	}
 
 	sde_rotator_queue_request(rot_dev->mgr, ctx->private, req);
-	ctx->request = req;
+	request->req = req;
 
 	return 0;
 error_handle_request:
@@ -2191,7 +2853,7 @@
 error_init_request:
 error_fence_wait:
 error_null_buffer:
-	ctx->request = ERR_PTR(ret);
+	request->req = NULL;
 	return ret;
 }
 
@@ -2207,11 +2869,13 @@
 	struct sde_rotator_device *rot_dev;
 	struct vb2_v4l2_buffer *src_buf;
 	struct vb2_v4l2_buffer *dst_buf;
+	struct sde_rotator_request *request;
 	int ret;
 
-	ctx = container_of(work, struct sde_rotator_ctx, submit_work);
+	request = container_of(work, struct sde_rotator_request, submit_work);
+	ctx = request->ctx;
 
-	if (!ctx->rot_dev) {
+	if (!ctx || !ctx->rot_dev) {
 		SDEROT_ERR("null device\n");
 		return;
 	}
@@ -2223,9 +2887,8 @@
 	if (ctx->abort_pending) {
 		SDEDEV_DBG(rot_dev->dev, "abort command in submit s:%d\n",
 				ctx->session_id);
-		ctx->request = ERR_PTR(-EINTR);
-		atomic_dec(&ctx->command_pending);
-		wake_up(&ctx->wait_queue);
+		sde_rotator_update_retire_sequence(request);
+		sde_rotator_retire_request(request);
 		mutex_unlock(&rot_dev->lock);
 		return;
 	}
@@ -2235,7 +2898,7 @@
 	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	sde_rot_mgr_lock(rot_dev->mgr);
 	ret = sde_rotator_process_buffers(ctx, &src_buf->vb2_buf,
-			&dst_buf->vb2_buf);
+			&dst_buf->vb2_buf, request);
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	if (ret) {
 		SDEDEV_ERR(rot_dev->dev,
@@ -2258,6 +2921,7 @@
 	struct sde_rotator_device *rot_dev;
 	struct vb2_v4l2_buffer *src_buf;
 	struct vb2_v4l2_buffer *dst_buf;
+	struct sde_rotator_request *request;
 	int ret;
 
 	if (!ctx || !ctx->rot_dev) {
@@ -2269,8 +2933,11 @@
 	SDEDEV_DBG(rot_dev->dev, "device run s:%d\n", ctx->session_id);
 
 	if (rot_dev->early_submit) {
+		request = list_first_entry_or_null(&ctx->pending_list,
+				struct sde_rotator_request, list);
+
 		/* pending request mode, check for completion */
-		if (IS_ERR_OR_NULL(ctx->request)) {
+		if (!request || IS_ERR_OR_NULL(request->req)) {
 			/* pending request fails or something wrong. */
 			SDEDEV_ERR(rot_dev->dev,
 				"pending request fail in device run s:%d\n",
@@ -2278,19 +2945,19 @@
 			rot_dev->stats.fail_count++;
 			ATRACE_INT("fail_count", rot_dev->stats.fail_count);
 			goto error_process_buffers;
-		} else if (!atomic_read(&ctx->request->pending_count)) {
+
+		} else if (!atomic_read(&request->req->pending_count)) {
 			/* pending request completed. signal done. */
 			int failed_count =
-				atomic_read(&ctx->request->failed_count);
+				atomic_read(&request->req->failed_count);
 			SDEDEV_DBG(rot_dev->dev,
 				"pending request completed in device run s:%d\n",
 				ctx->session_id);
 
 			/* disconnect request (will be freed by core layer) */
 			sde_rot_mgr_lock(rot_dev->mgr);
-			ctx->request->retireq = NULL;
-			ctx->request->retire_work = NULL;
-			ctx->request = NULL;
+			sde_rotator_req_finish(rot_dev->mgr, ctx->private,
+					request->req);
 			sde_rot_mgr_unlock(rot_dev->mgr);
 
 			if (failed_count) {
@@ -2314,8 +2981,8 @@
 				goto error_process_buffers;
 			}
 
-			atomic_dec(&ctx->command_pending);
-			wake_up(&ctx->wait_queue);
+			sde_rotator_update_retire_sequence(request);
+			sde_rotator_retire_request(request);
 			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
 			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
 			v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
@@ -2327,22 +2994,33 @@
 
 			/* disconnect request (will be freed by core layer) */
 			sde_rot_mgr_lock(rot_dev->mgr);
-			ctx->request->retireq = NULL;
-			ctx->request->retire_work = NULL;
-			ctx->request = ERR_PTR(-EIO);
+			sde_rotator_req_finish(rot_dev->mgr, ctx->private,
+					request->req);
 			sde_rot_mgr_unlock(rot_dev->mgr);
 
 			goto error_process_buffers;
 		}
 	} else {
-		/* no pending request. submit buffer the usual way. */
-		atomic_inc(&ctx->command_pending);
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			SDEDEV_ERR(rot_dev->dev,
+				"no free request in device run s:%d\n",
+				ctx->session_id);
+			goto error_retired_list;
+		}
 
+		spin_lock(&ctx->list_lock);
+		list_del_init(&request->list);
+		list_add_tail(&request->list, &ctx->pending_list);
+		spin_unlock(&ctx->list_lock);
+
+		/* no pending request. submit buffer the usual way. */
 		dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 		src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 		if (!src_buf || !dst_buf) {
 			SDEDEV_ERR(rot_dev->dev,
-				"null buffer in device run s:%d sb:%p db:%p\n",
+				"null buffer in device run s:%d sb:%pK db:%pK\n",
 				ctx->session_id,
 				src_buf, dst_buf);
 			goto error_empty_buffer;
@@ -2350,13 +3028,12 @@
 
 		sde_rot_mgr_lock(rot_dev->mgr);
 		ret = sde_rotator_process_buffers(ctx, &src_buf->vb2_buf,
-				&dst_buf->vb2_buf);
+				&dst_buf->vb2_buf, request);
 		sde_rot_mgr_unlock(rot_dev->mgr);
 		if (ret) {
 			SDEDEV_ERR(rot_dev->dev,
 				"fail process buffer in device run s:%d\n",
 				ctx->session_id);
-			ctx->request = ERR_PTR(ret);
 			rot_dev->stats.fail_count++;
 			ATRACE_INT("fail_count", rot_dev->stats.fail_count);
 			goto error_process_buffers;
@@ -2366,8 +3043,9 @@
 	return;
 error_process_buffers:
 error_empty_buffer:
-	atomic_dec(&ctx->command_pending);
-	wake_up(&ctx->wait_queue);
+error_retired_list:
+	sde_rotator_update_retire_sequence(request);
+	sde_rotator_retire_request(request);
 	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
 	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 	if (src_buf)
@@ -2406,6 +3084,7 @@
 {
 	struct sde_rotator_ctx *ctx = priv;
 	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request;
 	int ret = 0;
 
 	if (!ctx || !ctx->rot_dev) {
@@ -2416,26 +3095,43 @@
 	rot_dev = ctx->rot_dev;
 	SDEDEV_DBG(rot_dev->dev, "job ready s:%d\n", ctx->session_id);
 
+	request = list_first_entry_or_null(&ctx->pending_list,
+			struct sde_rotator_request, list);
+
 	if (!rot_dev->early_submit) {
 		/* always ready in normal mode. */
 		ret = 1;
-	} else if (IS_ERR(ctx->request)) {
+	} else if (request && IS_ERR_OR_NULL(request->req)) {
 		/* if pending request fails, forward to device run state. */
 		SDEDEV_DBG(rot_dev->dev,
 				"pending request fail in job ready s:%d\n",
 				ctx->session_id);
 		ret = 1;
-	} else if (!ctx->request) {
+	} else if (list_empty(&ctx->pending_list)) {
 		/* if no pending request, submit a new request. */
 		SDEDEV_DBG(rot_dev->dev,
 				"submit job s:%d sc:%d dc:%d p:%d\n",
 				ctx->session_id,
 				v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
 				v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx),
-				atomic_read(&ctx->command_pending));
-		atomic_inc(&ctx->command_pending);
-		queue_work(ctx->work_queue.rot_work_queue, &ctx->submit_work);
-	} else if (!atomic_read(&ctx->request->pending_count)) {
+				!list_empty(&ctx->pending_list));
+
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			/* should not happen */
+			SDEDEV_ERR(rot_dev->dev,
+					"no free request in job ready s:%d\n",
+					ctx->session_id);
+		} else {
+			spin_lock(&ctx->list_lock);
+			list_del_init(&request->list);
+			list_add_tail(&request->list, &ctx->pending_list);
+			spin_unlock(&ctx->list_lock);
+			queue_work(ctx->work_queue.rot_work_queue,
+					&request->submit_work);
+		}
+	} else if (request && !atomic_read(&request->req->pending_count)) {
 		/* if pending request completed, forward to device run state */
 		SDEDEV_DBG(rot_dev->dev,
 				"pending request completed in job ready s:%d\n",
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index a46c0b5..100ce27 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,8 @@
 #include <linux/iommu.h>
 #include <linux/dma-buf.h>
 #include <linux/msm-bus.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-ctrls.h>
@@ -36,6 +38,9 @@
 #define SDE_ROTATOR_NUM_EVENTS		4096
 #define SDE_ROTATOR_NUM_TIMESTAMPS	SDE_ROTATOR_TS_MAX
 
+/* maximum number of outstanding requests per ctx session */
+#define SDE_ROTATOR_REQUEST_MAX		2
+
 struct sde_rotator_device;
 struct sde_rotator_ctx;
 
@@ -80,9 +85,28 @@
 };
 
 /*
+ * struct sde_rotator_request - device layer rotation request
+ * @list: list head for submit/retire list
+ * @submit_work: submit work structure
+ * @retire_work: retire work structure
+ * @request: Pointer to core layer rotator manager request
+ * @ctx: Pointer to parent context
+ * @committed: true if request committed to hardware
+ */
+struct sde_rotator_request {
+	struct list_head list;
+	struct work_struct submit_work;
+	struct work_struct retire_work;
+	struct sde_rot_entry_container *req;
+	struct sde_rotator_ctx *ctx;
+	bool committed;
+};
+
+/*
  * struct sde_rotator_ctx - Structure contains per open file handle context.
  * @kobj: kernel object of this context
  * @rot_dev: Pointer to rotator device.
+ * @file: Pointer to device file handle
  * @fh: V4l2 file handle.
  * @ctrl_handler: control handler
  * @format_cap: Current capture format.
@@ -95,22 +119,27 @@
  * @vflip: vertical flip (1-flip)
  * @rotate: rotation angle (0,90,180,270)
  * @secure: Non-secure (0) / Secure processing
- * @command_pending: Number of pending transaction in h/w
  * @abort_pending: True if abort is requested for async handling.
  * @nbuf_cap: Number of requested buffer for capture queue
  * @nbuf_out: Number of requested buffer for output queue
  * @fence_cap: Fence info for each requested capture buffer
  * @fence_out: Fence info for each requested output buffer
  * @wait_queue: Wait queue for signaling end of job
- * @submit_work: Work structure for submitting work
- * @retire_work: Work structure for retiring work
  * @work_queue: work queue for submit and retire processing
- * @request: current service request
  * @private: Pointer to session private information
+ * @slice: Pointer to system cache slice descriptor
+ * @commit_sequence_id: last committed sequence id
+ * @retired_sequence_id: last retired sequence id
+ * @list_lock: lock for pending/retired list
+ * @pending_list: list of pending request
+ * @retired_list: list of retired/free request
+ * @requests: static allocation of free requests
+ * @rotcfg: current core rotation configuration
  */
 struct sde_rotator_ctx {
 	struct kobject kobj;
 	struct sde_rotator_device *rot_dev;
+	struct file *file;
 	struct v4l2_fh fh;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct v4l2_format format_cap;
@@ -124,18 +153,22 @@
 	s32 rotate;
 	s32 secure;
 	s32 secure_camera;
-	atomic_t command_pending;
 	int abort_pending;
 	int nbuf_cap;
 	int nbuf_out;
 	struct sde_rotator_vbinfo *vbinfo_cap;
 	struct sde_rotator_vbinfo *vbinfo_out;
 	wait_queue_head_t wait_queue;
-	struct work_struct submit_work;
-	struct work_struct retire_work;
 	struct sde_rot_queue work_queue;
-	struct sde_rot_entry_container *request;
 	struct sde_rot_file_private *private;
+	struct llcc_slice_desc *slice;
+	u32 commit_sequence_id;
+	u32 retired_sequence_id;
+	spinlock_t list_lock;
+	struct list_head pending_list;
+	struct list_head retired_list;
+	struct sde_rotator_request requests[SDE_ROTATOR_REQUEST_MAX];
+	struct sde_rotation_config rotcfg;
 };
 
 /*
@@ -160,6 +193,7 @@
  * @pdev: Pointer to platform device.
  * @drvdata: Pointer to driver data.
  * @early_submit: flag enable job submission in ready state.
+ * @disable_syscache: true to disable system cache
  * @mgr: Pointer to core rotator manager.
  * @mdata: Pointer to common rotator data/resource.
  * @session_id: Next context session identifier
@@ -180,6 +214,7 @@
 	struct platform_device *pdev;
 	const void *drvdata;
 	u32 early_submit;
+	u32 disable_syscache;
 	struct sde_rot_mgr *mgr;
 	struct sde_rot_data_type *mdata;
 	u32 session_id;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 3b36b6b..c78c513 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,9 +31,9 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2) },		\
 		.bits = {					\
-			[C2_R_Cr] = COLOR_5BIT,			\
-			[C0_G_Y] = COLOR_6BIT,			\
-			[C1_B_Cb] = COLOR_5BIT,			\
+			[C2_R_Cr] = SDE_COLOR_5BIT,		\
+			[C0_G_Y] = SDE_COLOR_6BIT,		\
+			[C1_B_Cb] = SDE_COLOR_5BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -53,9 +53,9 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2) },		\
 		.bits = {					\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -76,10 +76,10 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2), (e3) },		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_8BIT,		\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C3_ALPHA] = SDE_COLOR_8BIT,		\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -88,9 +88,9 @@
 		.format = (fmt),				\
 		.is_yuv = 1,					\
 		.bits = {					\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.alpha_enable = 0,				\
 		.unpack_tight = 1,				\
@@ -143,10 +143,10 @@
 		.frame_format = SDE_MDP_FMT_LINEAR,		\
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_ALPHA_1BIT,		\
-			[C2_R_Cr] = COLOR_5BIT,			\
-			[C0_G_Y] = COLOR_5BIT,			\
-			[C1_B_Cb] = COLOR_5BIT,			\
+			[C3_ALPHA] = SDE_COLOR_ALPHA_1BIT,	\
+			[C2_R_Cr] = SDE_COLOR_5BIT,		\
+			[C0_G_Y] = SDE_COLOR_5BIT,		\
+			[C1_B_Cb] = SDE_COLOR_5BIT,		\
 		},						\
 		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
 	}
@@ -166,10 +166,10 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2), (e3) },		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_ALPHA_4BIT,		\
-			[C2_R_Cr] = COLOR_4BIT,			\
-			[C0_G_Y] = COLOR_4BIT,			\
-			[C1_B_Cb] = COLOR_4BIT,			\
+			[C3_ALPHA] = SDE_COLOR_ALPHA_4BIT,	\
+			[C2_R_Cr] = SDE_COLOR_4BIT,		\
+			[C0_G_Y] = SDE_COLOR_4BIT,		\
+			[C1_B_Cb] = SDE_COLOR_4BIT,		\
 		},						\
 		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
 	}
@@ -190,10 +190,10 @@
 		.pixel_mode = SDE_MDP_PIXEL_10BIT,		\
 		.element = { (e0), (e1), (e2), (e3) },		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_8BIT,		\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C3_ALPHA] = SDE_COLOR_8BIT,		\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -283,6 +283,240 @@
 			.tile_width = 48,
 		},
 	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
+			"SDE/RGBA_1010102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_TILE,
+			"SDE/RGBX_1010102102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102_TILE,
+			"SDE/BGRA_1010102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102_TILE,
+			"SDE/BGRX_1010102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010_TILE,
+			"SDE/ARGB_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010_TILE,
+			"SDE/XRGB_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010_TILE,
+			"SDE/ABGR_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010_TILE,
+			"SDE/XBGR_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+			"Y_CRCB_H2V2_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 8,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+			"Y_CBCR_H2V2_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 8,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_ABGR_8888_TILE,
+			"SDE/ABGR_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_XRGB_8888_TILE,
+			"SDE/XRGB_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_ARGB_8888_TILE,
+			"SDE/ARGB_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_TILE,
+			"SDE/RGBA_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_TILE,
+			"SDE/RGBX_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_BGRA_8888_TILE,
+			"SDE/BGRA_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_BGRX_8888_TILE,
+			"SDE/BGRX_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_XBGR_8888_TILE,
+			"SDE/XBGR_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
 };
 
 static struct sde_mdp_format_params sde_mdp_format_map[] = {
@@ -543,3 +777,93 @@
 
 	return 0;
 }
+
+/*
+ * sde_rot_get_tilea5x_pixfmt - get base a5x tile format of given source format
+ * @src_pixfmt: source pixel format to be converted
+ * @dst_pixfmt: pointer to base a5x tile pixel format
+ * return: 0 if success; error code otherwise
+ */
+int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	int rc = 0;
+
+	if (!dst_pixfmt) {
+		SDEROT_ERR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	switch (src_pixfmt) {
+	case SDE_PIX_FMT_Y_CBCR_H2V2:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CRCB_H2V2:
+	case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2_TILE;
+		break;
+	case SDE_PIX_FMT_RGBA_8888:
+	case SDE_PIX_FMT_RGBA_8888_UBWC:
+	case SDE_PIX_FMT_RGBA_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_RGBA_8888_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_8888:
+	case SDE_PIX_FMT_RGBX_8888_UBWC:
+	case SDE_PIX_FMT_RGBX_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_RGBX_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_8888:
+	case SDE_PIX_FMT_ARGB_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_8888:
+	case SDE_PIX_FMT_XRGB_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_8888:
+	case SDE_PIX_FMT_ABGR_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_8888:
+	case SDE_PIX_FMT_XBGR_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_2101010:
+	case SDE_PIX_FMT_ARGB_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_2101010:
+	case SDE_PIX_FMT_XRGB_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_2101010:
+	case SDE_PIX_FMT_ABGR_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010:
+	case SDE_PIX_FMT_XBGR_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102:
+	case SDE_PIX_FMT_BGRA_1010102_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102:
+	case SDE_PIX_FMT_BGRX_1010102_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
+		break;
+	default:
+		SDEROT_ERR("invalid src pixel format %c%c%c%c\n",
+				src_pixfmt >> 0, src_pixfmt >> 8,
+				src_pixfmt >> 16, src_pixfmt >> 24);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
index bdd16a9..5bb6198 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
@@ -17,6 +17,27 @@
 #include <linux/types.h>
 #include <media/msm_sde_rotator.h>
 
+/* Internal rotator pixel formats */
+#define SDE_PIX_FMT_RGBA_8888_TILE	v4l2_fourcc('Q', 'T', '0', '0')
+#define SDE_PIX_FMT_RGBX_8888_TILE	v4l2_fourcc('Q', 'T', '0', '1')
+#define SDE_PIX_FMT_BGRA_8888_TILE	v4l2_fourcc('Q', 'T', '0', '2')
+#define SDE_PIX_FMT_BGRX_8888_TILE	v4l2_fourcc('Q', 'T', '0', '3')
+#define SDE_PIX_FMT_ARGB_8888_TILE	v4l2_fourcc('Q', 'T', '0', '4')
+#define SDE_PIX_FMT_XRGB_8888_TILE	v4l2_fourcc('Q', 'T', '0', '5')
+#define SDE_PIX_FMT_ABGR_8888_TILE	v4l2_fourcc('Q', 'T', '0', '6')
+#define SDE_PIX_FMT_XBGR_8888_TILE	v4l2_fourcc('Q', 'T', '0', '7')
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TILE	v4l2_fourcc('Q', 'T', '0', '8')
+#define SDE_PIX_FMT_Y_CRCB_H2V2_TILE	v4l2_fourcc('Q', 'T', '0', '9')
+#define SDE_PIX_FMT_ARGB_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'A')
+#define SDE_PIX_FMT_XRGB_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'B')
+#define SDE_PIX_FMT_ABGR_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'C')
+#define SDE_PIX_FMT_XBGR_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'D')
+#define SDE_PIX_FMT_BGRA_1010102_TILE	v4l2_fourcc('Q', 'T', '0', 'E')
+#define SDE_PIX_FMT_BGRX_1010102_TILE	v4l2_fourcc('Q', 'T', '0', 'F')
+#define SDE_PIX_FMT_RGBA_1010102_TILE	v4l2_fourcc('Q', 'T', '1', '0')
+#define SDE_PIX_FMT_RGBX_1010102_TILE	v4l2_fourcc('Q', 'T', '1', '1')
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE	v4l2_fourcc('Q', 'T', '1', '2')
+
 #define SDE_ROT_MAX_PLANES		4
 
 #define UBWC_META_MACRO_W_H		16
@@ -27,12 +48,12 @@
  * expected by the HW programming.
  */
 enum {
-	COLOR_4BIT,
-	COLOR_5BIT,
-	COLOR_6BIT,
-	COLOR_8BIT,
-	COLOR_ALPHA_1BIT = 0,
-	COLOR_ALPHA_4BIT = 1,
+	SDE_COLOR_4BIT,
+	SDE_COLOR_5BIT,
+	SDE_COLOR_6BIT,
+	SDE_COLOR_8BIT,
+	SDE_COLOR_ALPHA_1BIT = 0,
+	SDE_COLOR_ALPHA_4BIT = 1,
 };
 
 #define C3_ALPHA	3	/* alpha */
@@ -69,6 +90,10 @@
 	SDE_MDP_CHROMA_420
 };
 
+enum sde_mdp_format_flag_type {
+	SDE_MDP_FORMAT_FLAG_PRIVATE = BIT(0)
+};
+
 struct sde_mdp_format_params {
 	u32 format;
 	const char *description;
@@ -104,6 +129,8 @@
 
 int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h);
 
+int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt);
+
 static inline bool sde_mdp_is_tilea4x_format(struct sde_mdp_format_params *fmt)
 {
 	return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A4X);
@@ -158,4 +185,19 @@
 {
 	return fmt && fmt->is_yuv;
 }
+
+static inline bool sde_mdp_is_rgb_format(struct sde_mdp_format_params *fmt)
+{
+	return !sde_mdp_is_yuv_format(fmt);
+}
+
+static inline bool sde_mdp_is_private_format(struct sde_mdp_format_params *fmt)
+{
+	return fmt && (fmt->flag & SDE_MDP_FORMAT_FLAG_PRIVATE);
+}
+
+static inline int sde_mdp_format_blk_size(struct sde_mdp_format_params *fmt)
+{
+	return sde_mdp_is_tp10_format(fmt) ? 96 : 128;
+}
 #endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
new file mode 100644
index 0000000..ec89785
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_INLINE_H__
+#define __SDE_ROTATOR_INLINE_H__
+
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/platform_device.h>
+
+#include "sde_rotator_formats.h"
+
+#define SDE_ROTATOR_INLINE_PLANE_MAX	4
+
+/*
+ * enum sde_rotator_inline_cmd_type - inline rotator command stages
+ * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
+ * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
+ * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
+ */
+enum sde_rotator_inline_cmd_type {
+	SDE_ROTATOR_INLINE_CMD_VALIDATE,
+	SDE_ROTATOR_INLINE_CMD_COMMIT,
+	SDE_ROTATOR_INLINE_CMD_CLEANUP,
+};
+
+/**
+ * sde_rotator_inline_cmd - inline rotation command
+ * @sequence_id: unique command sequence identifier
+ * @video_mode: true if video interface is connected
+ * @fps: frame rate in frame-per-second
+ * @rot90: rotate 90 counterclockwise
+ * @hflip: horizontal flip prior to rotation
+ * @vflip: vertical flip prior to rotation
+ * @secure: true if buffer is in secure domain
+ * @prefill_bw: prefill bandwidth in Bps
+ * @clkrate: clock rate in Hz
+ * @data_bw: data bus bandwidth in Bps
+ * @src_addr: source i/o buffer virtual address
+ * @src_len: source i/o buffer length
+ * @src_planes: source plane number
+ * @src_pixfmt: v4l2 fourcc pixel format of source buffer
+ * @src_width: width of source buffer
+ * @src_height: height of source buffer
+ * @src_rect_x: roi x coordinate of source buffer
+ * @src_rect_y: roi y coordinate of source buffer
+ * @src_rect_w: roi width of source buffer
+ * @src_rect_h: roi height of source buffer
+ * @dst_addr: destination i/o virtual buffer address
+ * @dst_len: destination i/o buffer length
+ * @dst_planes: destination plane number
+ * @dst_pixfmt: v4l2 fourcc pixel format of destination buffer
+ * @dst_rect_x: roi x coordinate of destination buffer
+ * @dst_rect_y: roi y coordinate of destination buffer
+ * @dst_rect_w: roi width of destination buffer
+ * @dst_rect_h: roi height of destination buffer
+ * @dst_writeback: true if cache writeback is required
+ * @priv_handle: private handle of rotator session
+ */
+struct sde_rotator_inline_cmd {
+	u32 sequence_id;
+	bool video_mode;
+	u32 fps;
+	bool rot90;
+	bool hflip;
+	bool vflip;
+	bool secure;
+	u64 prefill_bw;
+	u64 clkrate;
+	u64 data_bw;
+	dma_addr_t src_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 src_len[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 src_planes;
+	u32 src_pixfmt;
+	u32 src_width;
+	u32 src_height;
+	u32 src_rect_x;
+	u32 src_rect_y;
+	u32 src_rect_w;
+	u32 src_rect_h;
+	dma_addr_t dst_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 dst_len[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 dst_planes;
+	u32 dst_pixfmt;
+	u32 dst_rect_x;
+	u32 dst_rect_y;
+	u32 dst_rect_w;
+	u32 dst_rect_h;
+	bool dst_writeback;
+	void *priv_handle;
+};
+
+void *sde_rotator_inline_open(struct platform_device *pdev);
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt);
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *downscale_caps, int len);
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmt, int len);
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type);
+int sde_rotator_inline_release(void *handle);
+
+#endif /* __SDE_ROTATOR_INLINE_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 0512083..645baea 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -41,17 +41,23 @@
 /* traffic shaping clock ticks = finish_time x 19.2MHz */
 #define TRAFFIC_SHAPE_CLKTICK_14MS   268800
 #define TRAFFIC_SHAPE_CLKTICK_12MS   230400
+#define TRAFFIC_SHAPE_VSYNC_CLK      19200000
 
 /* XIN mapping */
 #define XIN_SSPP		0
 #define XIN_WRITEBACK		1
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
+#define KOFF_TIMEOUT		(42 * 32)
+
+/* default stream buffer headroom in lines */
+#define DEFAULT_SBUF_HEADROOM	20
 
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
 	do { \
+		SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
+				(u32)(data));\
 		*p++ = REGDMA_OP_REGWRITE | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (data); \
@@ -59,6 +65,8 @@
 
 #define SDE_REGDMA_MODIFY(p, off, mask, data) \
 	do { \
+		SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
+				(u32)(data));\
 		*p++ = REGDMA_OP_REGMODIFY | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (mask); \
@@ -67,6 +75,8 @@
 
 #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
 	do { \
+		SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
+				(u32)(len));\
 		*p++ = REGDMA_OP_BLKWRITE_INC | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (len); \
@@ -74,18 +84,23 @@
 
 #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
 	do { \
+		SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
 		*(p) = (data); \
 		(p)++; \
 	} while (0)
 
 /* Macro for directly accessing mapped registers */
 #define SDE_ROTREG_WRITE(base, off, data) \
-	writel_relaxed(data, (base + (off)))
+	do { \
+		SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
+				, (u32)(data));\
+		writel_relaxed(data, (base + (off))); \
+	} while (0)
 
 #define SDE_ROTREG_READ(base, off) \
 	readl_relaxed(base + (off))
 
-static u32 sde_hw_rotator_input_pixfmts[] = {
+static u32 sde_hw_rotator_v3_inpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
 	SDE_PIX_FMT_ABGR_8888,
@@ -145,7 +160,7 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
 };
 
-static u32 sde_hw_rotator_output_pixfmts[] = {
+static u32 sde_hw_rotator_v3_outpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
 	SDE_PIX_FMT_ABGR_8888,
@@ -205,6 +220,162 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
 };
 
+static u32 sde_hw_rotator_v4_inpixfmts[] = {
+	SDE_PIX_FMT_XRGB_8888,
+	SDE_PIX_FMT_ARGB_8888,
+	SDE_PIX_FMT_ABGR_8888,
+	SDE_PIX_FMT_RGBA_8888,
+	SDE_PIX_FMT_BGRA_8888,
+	SDE_PIX_FMT_RGBX_8888,
+	SDE_PIX_FMT_BGRX_8888,
+	SDE_PIX_FMT_XBGR_8888,
+	SDE_PIX_FMT_RGBA_5551,
+	SDE_PIX_FMT_ARGB_1555,
+	SDE_PIX_FMT_ABGR_1555,
+	SDE_PIX_FMT_BGRA_5551,
+	SDE_PIX_FMT_BGRX_5551,
+	SDE_PIX_FMT_RGBX_5551,
+	SDE_PIX_FMT_XBGR_1555,
+	SDE_PIX_FMT_XRGB_1555,
+	SDE_PIX_FMT_ARGB_4444,
+	SDE_PIX_FMT_RGBA_4444,
+	SDE_PIX_FMT_BGRA_4444,
+	SDE_PIX_FMT_ABGR_4444,
+	SDE_PIX_FMT_RGBX_4444,
+	SDE_PIX_FMT_XRGB_4444,
+	SDE_PIX_FMT_BGRX_4444,
+	SDE_PIX_FMT_XBGR_4444,
+	SDE_PIX_FMT_RGB_888,
+	SDE_PIX_FMT_BGR_888,
+	SDE_PIX_FMT_RGB_565,
+	SDE_PIX_FMT_BGR_565,
+	SDE_PIX_FMT_Y_CB_CR_H2V2,
+	SDE_PIX_FMT_Y_CR_CB_H2V2,
+	SDE_PIX_FMT_Y_CR_CB_GH2V2,
+	SDE_PIX_FMT_Y_CBCR_H2V2,
+	SDE_PIX_FMT_Y_CRCB_H2V2,
+	SDE_PIX_FMT_Y_CBCR_H1V2,
+	SDE_PIX_FMT_Y_CRCB_H1V2,
+	SDE_PIX_FMT_Y_CBCR_H2V1,
+	SDE_PIX_FMT_Y_CRCB_H2V1,
+	SDE_PIX_FMT_YCBYCR_H2V1,
+	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+	SDE_PIX_FMT_RGBA_8888_UBWC,
+	SDE_PIX_FMT_RGBX_8888_UBWC,
+	SDE_PIX_FMT_RGB_565_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+	SDE_PIX_FMT_RGBA_1010102,
+	SDE_PIX_FMT_RGBX_1010102,
+	SDE_PIX_FMT_ARGB_2101010,
+	SDE_PIX_FMT_XRGB_2101010,
+	SDE_PIX_FMT_BGRA_1010102,
+	SDE_PIX_FMT_BGRX_1010102,
+	SDE_PIX_FMT_ABGR_2101010,
+	SDE_PIX_FMT_XBGR_2101010,
+	SDE_PIX_FMT_RGBA_1010102_UBWC,
+	SDE_PIX_FMT_RGBX_1010102_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+	SDE_PIX_FMT_XRGB_8888_TILE,
+	SDE_PIX_FMT_ARGB_8888_TILE,
+	SDE_PIX_FMT_ABGR_8888_TILE,
+	SDE_PIX_FMT_XBGR_8888_TILE,
+	SDE_PIX_FMT_RGBA_8888_TILE,
+	SDE_PIX_FMT_BGRA_8888_TILE,
+	SDE_PIX_FMT_RGBX_8888_TILE,
+	SDE_PIX_FMT_BGRX_8888_TILE,
+	SDE_PIX_FMT_RGBA_1010102_TILE,
+	SDE_PIX_FMT_RGBX_1010102_TILE,
+	SDE_PIX_FMT_ARGB_2101010_TILE,
+	SDE_PIX_FMT_XRGB_2101010_TILE,
+	SDE_PIX_FMT_BGRA_1010102_TILE,
+	SDE_PIX_FMT_BGRX_1010102_TILE,
+	SDE_PIX_FMT_ABGR_2101010_TILE,
+	SDE_PIX_FMT_XBGR_2101010_TILE,
+};
+
+static u32 sde_hw_rotator_v4_outpixfmts[] = {
+	SDE_PIX_FMT_XRGB_8888,
+	SDE_PIX_FMT_ARGB_8888,
+	SDE_PIX_FMT_ABGR_8888,
+	SDE_PIX_FMT_RGBA_8888,
+	SDE_PIX_FMT_BGRA_8888,
+	SDE_PIX_FMT_RGBX_8888,
+	SDE_PIX_FMT_BGRX_8888,
+	SDE_PIX_FMT_XBGR_8888,
+	SDE_PIX_FMT_RGBA_5551,
+	SDE_PIX_FMT_ARGB_1555,
+	SDE_PIX_FMT_ABGR_1555,
+	SDE_PIX_FMT_BGRA_5551,
+	SDE_PIX_FMT_BGRX_5551,
+	SDE_PIX_FMT_RGBX_5551,
+	SDE_PIX_FMT_XBGR_1555,
+	SDE_PIX_FMT_XRGB_1555,
+	SDE_PIX_FMT_ARGB_4444,
+	SDE_PIX_FMT_RGBA_4444,
+	SDE_PIX_FMT_BGRA_4444,
+	SDE_PIX_FMT_ABGR_4444,
+	SDE_PIX_FMT_RGBX_4444,
+	SDE_PIX_FMT_XRGB_4444,
+	SDE_PIX_FMT_BGRX_4444,
+	SDE_PIX_FMT_XBGR_4444,
+	SDE_PIX_FMT_RGB_888,
+	SDE_PIX_FMT_BGR_888,
+	SDE_PIX_FMT_RGB_565,
+	SDE_PIX_FMT_BGR_565,
+	/* SDE_PIX_FMT_Y_CB_CR_H2V2 */
+	/* SDE_PIX_FMT_Y_CR_CB_H2V2 */
+	/* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
+	SDE_PIX_FMT_Y_CBCR_H2V2,
+	SDE_PIX_FMT_Y_CRCB_H2V2,
+	SDE_PIX_FMT_Y_CBCR_H1V2,
+	SDE_PIX_FMT_Y_CRCB_H1V2,
+	SDE_PIX_FMT_Y_CBCR_H2V1,
+	SDE_PIX_FMT_Y_CRCB_H2V1,
+	/* SDE_PIX_FMT_YCBYCR_H2V1 */
+	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+	SDE_PIX_FMT_RGBA_8888_UBWC,
+	SDE_PIX_FMT_RGBX_8888_UBWC,
+	SDE_PIX_FMT_RGB_565_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+	SDE_PIX_FMT_RGBA_1010102,
+	SDE_PIX_FMT_RGBX_1010102,
+	/* SDE_PIX_FMT_ARGB_2101010 */
+	/* SDE_PIX_FMT_XRGB_2101010 */
+	SDE_PIX_FMT_BGRA_1010102,
+	SDE_PIX_FMT_BGRX_1010102,
+	/* SDE_PIX_FMT_ABGR_2101010 */
+	/* SDE_PIX_FMT_XBGR_2101010 */
+	SDE_PIX_FMT_RGBA_1010102_UBWC,
+	SDE_PIX_FMT_RGBX_1010102_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+	SDE_PIX_FMT_XRGB_8888_TILE,
+	SDE_PIX_FMT_ARGB_8888_TILE,
+	SDE_PIX_FMT_ABGR_8888_TILE,
+	SDE_PIX_FMT_XBGR_8888_TILE,
+	SDE_PIX_FMT_RGBA_8888_TILE,
+	SDE_PIX_FMT_BGRA_8888_TILE,
+	SDE_PIX_FMT_RGBX_8888_TILE,
+	SDE_PIX_FMT_BGRX_8888_TILE,
+	SDE_PIX_FMT_RGBA_1010102_TILE,
+	SDE_PIX_FMT_RGBX_1010102_TILE,
+	SDE_PIX_FMT_ARGB_2101010_TILE,
+	SDE_PIX_FMT_XRGB_2101010_TILE,
+	SDE_PIX_FMT_BGRA_1010102_TILE,
+	SDE_PIX_FMT_BGRX_1010102_TILE,
+	SDE_PIX_FMT_ABGR_2101010_TILE,
+	SDE_PIX_FMT_XBGR_2101010_TILE,
+};
+
 static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
 	{0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
 	{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
@@ -284,6 +455,30 @@
 }
 
 /**
+ * sde_hw_rotator_update_swts - update software timestamp with given value
+ * @rot: Pointer to hw rotator
+ * @ctx: Pointer to rotator contxt
+ * @swts: new software timestamp
+ * @return: new combined swts
+ */
+static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
+		struct sde_hw_rotator_context *ctx, u32 swts)
+{
+	u32 mask = SDE_REGDMA_SWTS_MASK;
+
+	swts &= SDE_REGDMA_SWTS_MASK;
+	if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
+		swts <<= SDE_REGDMA_SWTS_SHIFT;
+		mask <<= SDE_REGDMA_SWTS_SHIFT;
+	}
+
+	swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
+	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
+
+	return swts;
+}
+
+/**
  * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
  *				Also, clear rotator/regdma irq status.
  * @rot: Pointer to hw rotator
@@ -376,6 +571,13 @@
 	SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
 		SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
 		SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
+
+	SDEROT_ERR(
+		"sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
+		SDE_ROTREG_READ(rot->mdss_base,
+			ROT_WB_SBUF_STATUS_PLANE0),
+		SDE_ROTREG_READ(rot->mdss_base,
+			ROT_WB_SBUF_STATUS_PLANE1));
 }
 
 /**
@@ -540,6 +742,17 @@
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
+	/*
+	 * initialize start control trigger selection first
+	 */
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		if (ctx->sbuf_mode)
+			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
+					ctx->start_ctrl);
+		else
+			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
+	}
+
 	/* source image setup */
 	if ((flags & SDE_ROT_FLAG_DEINTERLACE)
 			&& !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
@@ -618,6 +831,9 @@
 	if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
 		src_format |= BIT(14); /* UNPACK_DX_FORMAT */
 
+	if (rot->solid_fill)
+		src_format |= BIT(22); /* SOLID_FILL */
+
 	/* SRC_FORMAT */
 	SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
 
@@ -652,6 +868,10 @@
 			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
 	}
 
+	if (rot->solid_fill)
+		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
+				rot->constant_color);
+
 	SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
 			fetch_blocksize |
 			SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
@@ -697,6 +917,7 @@
 		struct sde_hw_rot_wb_cfg *cfg,
 		u32 flags)
 {
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
 	u32 *wrptr;
 	u32 pack = 0;
@@ -784,17 +1005,19 @@
 			cfg->v_downscale_factor |
 			(cfg->h_downscale_factor << 16));
 
-	/* write config setup for bank configration */
+	/* write config setup for bank configuration */
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
 			(ctx->rot->highest_bank & 0x3) << 8);
 
-	if (flags & SDE_ROT_FLAG_ROT_90)
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
-	else
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
+				ctx->sys_cache_mode);
 
-	/* setup traffic shaper for 4k 30fps content */
-	if (ctx->is_traffic_shaping) {
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
+			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
+
+	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
+	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
 		u32 bw;
 
 		/*
@@ -813,10 +1036,16 @@
 			bw *= fmt->bpp;
 
 		bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
+
+		/* use prefill bandwidth instead if specified */
+		if (cfg->prefill_bw)
+			bw = DIV_ROUND_UP(cfg->prefill_bw,
+					TRAFFIC_SHAPE_VSYNC_CLK);
+
 		if (bw > 0xFF)
 			bw = 0xFF;
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
-				BIT(31) | bw);
+				BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
 		SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
 	} else {
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
@@ -852,7 +1081,7 @@
 		sde_hw_rotator_enable_irq(rot);
 	}
 
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
 
 	/* Update command queue write ptr */
 	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
@@ -942,13 +1171,15 @@
 	u32  enableInt;
 	u32  swts = 0;
 	u32  mask = 0;
+	u32  trig_sel;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	/*
 	 * Last ROT command must be ROT_START before REGDMA start
 	 */
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
+
 	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
 
 	/*
@@ -959,6 +1190,8 @@
 	offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
 				REGDMA_RAM_REGDMA_CMD_RAM));
 	enableInt = ((ctx->timestamp & 1) + 1) << 30;
+	trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
+			REGDMA_CMD_TRIG_SEL_SW_START;
 
 	SDEROT_DBG(
 		"regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
@@ -972,34 +1205,39 @@
 	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
 		SDE_ROTREG_WRITE(rot->mdss_base,
 				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				(length << 14) | offset);
+				(ctx->sbuf_mode ? enableInt : 0) | trig_sel |
+				((length & 0x3ff) << 14) | offset);
 		swts = ctx->timestamp;
 		mask = ~SDE_REGDMA_SWTS_MASK;
 	} else {
 		SDE_ROTREG_WRITE(rot->mdss_base,
 				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				(length << 14) | offset);
+				(ctx->sbuf_mode ? enableInt : 0) | trig_sel |
+				((length & 0x3ff) << 14) | offset);
 		swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
 		mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
 	}
 
-	/* Write timestamp after previous rotator job finished */
-	sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
-	offset += length;
-	ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
-	WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
+	/* timestamp update can only be used in offline multi-context mode */
+	if (!ctx->sbuf_mode) {
+		/* Write timestamp after previous rotator job finished */
+		sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
+		offset += length;
+		ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
+		WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
 
-	/* ensure command packet is issue before the submit command */
-	wmb();
+		/* ensure command packet is issue before the submit command */
+		wmb();
 
-	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				enableInt | (ts_length << 14) | offset);
-	} else {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				enableInt | (ts_length << 14) | offset);
+		if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
+			SDE_ROTREG_WRITE(rot->mdss_base,
+					REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
+					enableInt | (ts_length << 14) | offset);
+		} else {
+			SDE_ROTREG_WRITE(rot->mdss_base,
+					REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
+					enableInt | (ts_length << 14) | offset);
+		}
 	}
 
 	/* Update command queue write ptr */
@@ -1027,7 +1265,7 @@
 	if (rot->irq_num >= 0) {
 		SDEROT_DBG("Wait for Rotator completion\n");
 		rc = wait_for_completion_timeout(&ctx->rot_comp,
-					KOFF_TIMEOUT);
+				msecs_to_jiffies(rot->koff_timeout));
 
 		spin_lock_irqsave(&rot->rotisr_lock, flags);
 		status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
@@ -1098,7 +1336,7 @@
 				ctx, ctx->timestamp);
 		rc = wait_event_timeout(ctx->regdma_waitq,
 				!sde_hw_rotator_pending_swts(rot, ctx, &swts),
-				KOFF_TIMEOUT);
+				msecs_to_jiffies(rot->koff_timeout));
 
 		ATRACE_INT("sde_rot_done", 0);
 		spin_lock_irqsave(&rot->rotisr_lock, flags);
@@ -1506,13 +1744,15 @@
  * @rot: Pointer to rotator hw
  * @hw: Pointer to rotator resource
  * @session_id: Session identifier of this context
+ * @sbuf_mode: true if stream buffer is requested
  *
  * This function allocates a new rotator context for the given session id.
  */
 static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
 		struct sde_hw_rotator *rot,
 		struct sde_rot_hw_resource *hw,
-		u32    session_id)
+		u32    session_id,
+		bool   sbuf_mode)
 {
 	struct sde_hw_rotator_context *ctx;
 
@@ -1530,6 +1770,8 @@
 	ctx->timestamp  = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
 	ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
 	ctx->is_secure  = false;
+	ctx->sbuf_mode  = sbuf_mode;
+	INIT_LIST_HEAD(&ctx->list);
 
 	ctx->regdma_base  = rot->cmd_wr_ptr[ctx->q_id]
 		[sde_hw_rotator_get_regdma_ctxidx(ctx)];
@@ -1547,10 +1789,11 @@
 	sde_hw_rotator_put_ctx(ctx);
 
 	SDEROT_DBG(
-		"New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
+		"New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
 		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
 		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active));
+		atomic_read(&ctx->hwres->num_active),
+		ctx->sbuf_mode);
 
 	return ctx;
 }
@@ -1567,10 +1810,11 @@
 		return;
 
 	SDEROT_DBG(
-		"Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
+		"Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
 		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
 		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active));
+		atomic_read(&ctx->hwres->num_active),
+		ctx->sbuf_mode);
 
 	/* Clear rotator context from lookup purpose */
 	sde_hw_rotator_clr_ctx(ctx);
@@ -1599,6 +1843,7 @@
 	u32 safe_lut = 0;	/* applicable for realtime client only */
 	u32 flags = 0;
 	struct sde_rotation_item *item;
+	int ret;
 
 	if (!hw || !entry) {
 		SDEROT_ERR("null hw resource/entry\n");
@@ -1609,12 +1854,65 @@
 	rot = resinfo->rot;
 	item = &entry->item;
 
-	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
+	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
+			item->output.sbuf);
 	if (!ctx) {
 		SDEROT_ERR("Failed allocating rotator context!!\n");
 		return -EINVAL;
 	}
 
+	/* save entry for debugging purposes */
+	ctx->last_entry = entry;
+
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		if (entry->dst_buf.sbuf) {
+			u32 op_mode;
+
+			if (entry->item.trigger ==
+					SDE_ROTATOR_TRIGGER_COMMAND)
+				ctx->start_ctrl = (rot->cmd_trigger << 4);
+			else if (entry->item.trigger ==
+					SDE_ROTATOR_TRIGGER_VIDEO)
+				ctx->start_ctrl = (rot->vid_trigger << 4);
+			else
+				ctx->start_ctrl = 0;
+
+			ctx->sys_cache_mode = BIT(15) |
+					((item->output.scid & 0x1f) << 8) |
+					(item->output.writeback ? 0x5 : 0);
+
+			ctx->op_mode = BIT(4) |
+				((ctx->rot->sbuf_headroom & 0xff) << 8);
+
+			/* detect transition to inline mode */
+			op_mode = (SDE_ROTREG_READ(rot->mdss_base,
+					ROTTOP_OP_MODE) >> 4) & 0x3;
+			if (!op_mode) {
+				u32 status;
+
+				status = SDE_ROTREG_READ(rot->mdss_base,
+						ROTTOP_STATUS);
+				if (status & BIT(0)) {
+					SDEROT_ERR("rotator busy 0x%x\n",
+							status);
+					sde_hw_rotator_dump_status(rot);
+					SDEROT_EVTLOG_TOUT_HANDLER("rot",
+							"vbif_dbg_bus",
+							"panic");
+				}
+			}
+
+		} else {
+			ctx->start_ctrl = BIT(0);
+			ctx->sys_cache_mode = 0;
+			ctx->op_mode = 0;
+		}
+	} else  {
+		ctx->start_ctrl = BIT(0);
+	}
+
+	SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
+
 	if (rot->reset_hw_ts) {
 		SDEROT_EVTLOG(rot->last_hw_ts);
 		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
@@ -1645,7 +1943,8 @@
 	sspp_cfg.fmt = sde_get_format_params(item->input.format);
 	if (!sspp_cfg.fmt) {
 		SDEROT_ERR("null format\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto error;
 	}
 	sspp_cfg.src_rect = &item->src_rect;
 	sspp_cfg.data = &entry->src_buf;
@@ -1673,6 +1972,7 @@
 
 	wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
 	wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
+	wb_cfg.prefill_bw = item->prefill_bw;
 
 	rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
 
@@ -1778,6 +2078,10 @@
 			BIT(XIN_WRITEBACK));
 
 	return 0;
+
+error:
+	sde_hw_rotator_free_rotctx(rot, ctx);
+	return ret;
 }
 
 /*
@@ -1887,6 +2191,7 @@
 
 	set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
 
+	/* features exposed via rotator top h/w version */
 	if (hw_version != SDE_ROT_TYPE_V1_0) {
 		SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
 		set_bit(SDE_CAPS_R3_1P5_DOWNSCALE,  mdata->sde_caps_map);
@@ -1901,6 +2206,27 @@
 	mdata->regdump = sde_rot_r3_regdump;
 	mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
 	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
+
+	/* features exposed via mdss h/w version */
+	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
+		SDEROT_DBG("Supporting sys cache inline rotation\n");
+		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
+		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
+		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
+		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
+		rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
+		rot->downscale_caps =
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	} else {
+		rot->inpixfmts = sde_hw_rotator_v3_inpixfmts;
+		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
+		rot->outpixfmts = sde_hw_rotator_v3_outpixfmts;
+		rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
+		rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
+			"LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	}
+
 	return 0;
 }
 
@@ -1989,6 +2315,23 @@
 			SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
 			goto done_isr_handle;
 		}
+
+		/*
+		 * Timestamp packet is not available in sbuf mode.
+		 * Simulate timestamp update in the handler instead.
+		 */
+		if (!list_empty(&rot->sbuf_ctx[q_id])) {
+			ctx = list_first_entry_or_null(&rot->sbuf_ctx[q_id],
+					struct sde_hw_rotator_context, list);
+			if (ctx) {
+				ts = ctx->timestamp;
+				sde_hw_rotator_update_swts(rot, ctx, ts);
+				SDEROT_DBG("update swts:0x%X\n", ts);
+			} else {
+				SDEROT_ERR("invalid swts ctx\n");
+			}
+		}
+
 		ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
 
 		/*
@@ -2076,6 +2419,12 @@
 	entry->dnsc_factor_w = 0;
 	entry->dnsc_factor_h = 0;
 
+	if (item->output.sbuf &&
+			!test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		SDEROT_ERR("stream buffer not supported\n");
+		return -EINVAL;
+	}
+
 	if ((src_w != dst_w) || (src_h != dst_h)) {
 		if ((src_w % dst_w) || (src_h % dst_h)) {
 			SDEROT_DBG("non integral scale not support\n");
@@ -2183,6 +2532,9 @@
 
 	SPRINT("downscale_compression=1\n");
 
+	if (hw_data->downscale_caps)
+		SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
+
 #undef SPRINT
 	return cnt;
 }
@@ -2253,14 +2605,23 @@
 static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
 		int index, bool input)
 {
+	struct sde_hw_rotator *rot;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return 0;
+	}
+
+	rot = mgr->hw_data;
+
 	if (input) {
-		if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
-			return sde_hw_rotator_input_pixfmts[index];
+		if ((index < rot->num_inpixfmt) && rot->inpixfmts)
+			return rot->inpixfmts[index];
 		else
 			return 0;
 	} else {
-		if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
-			return sde_hw_rotator_output_pixfmts[index];
+		if ((index < rot->num_outpixfmt) && rot->outpixfmts)
+			return rot->outpixfmts[index];
 		else
 			return 0;
 	}
@@ -2275,22 +2636,69 @@
 static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
 		bool input)
 {
+	struct sde_hw_rotator *rot;
+	u32 *pixfmts;
+	u32 num_pixfmt;
 	int i;
 
-	if (input) {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
-			if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
-				return true;
-	} else {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
-			if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
-				return true;
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return false;
 	}
 
+	rot = mgr->hw_data;
+
+	if (input) {
+		pixfmts = rot->inpixfmts;
+		num_pixfmt = rot->num_inpixfmt;
+	} else {
+		pixfmts = rot->outpixfmts;
+		num_pixfmt = rot->num_outpixfmt;
+	}
+
+	if (!pixfmts || !num_pixfmt) {
+		SDEROT_ERR("invalid pixel format tables\n");
+		return false;
+	}
+
+	for (i = 0; i < num_pixfmt; i++)
+		if (pixfmts[i] == pixfmt)
+			return true;
+
 	return false;
 }
 
 /*
+ * sde_hw_rotator_get_downscale_caps - get scaling capability string
+ * @mgr: Pointer to rotator manager
+ * @caps: Pointer to capability string buffer; NULL to return maximum length
+ * @len: length of capability string buffer
+ * return: length of capability string
+ */
+static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
+		char *caps, int len)
+{
+	struct sde_hw_rotator *rot;
+	int rc = 0;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return -EINVAL;
+	}
+
+	rot = mgr->hw_data;
+
+	if (rot->downscale_caps) {
+		if (caps)
+			rc = snprintf(caps, len, "%s", rot->downscale_caps);
+		else
+			rc = strlen(rot->downscale_caps);
+	}
+
+	return rc;
+}
+
+/*
  * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  * @hw_data: Pointer to rotator hw
  * @dev: Pointer to platform device
@@ -2329,6 +2737,16 @@
 		hw_data->highest_bank = data;
 	}
 
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,mdss-sbuf-headroom", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
+	} else {
+		SDEROT_DBG("set sbuf headroom to %d\n", data);
+		hw_data->sbuf_headroom = data;
+	}
+
 	return ret;
 }
 
@@ -2356,6 +2774,9 @@
 
 	rot->mdss_base = mdata->sde_io.base;
 	rot->pdev      = mgr->pdev;
+	rot->koff_timeout = KOFF_TIMEOUT;
+	rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
+	rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
 
 	/* Assign ops */
 	mgr->ops_hw_destroy = sde_hw_rotator_destroy;
@@ -2372,6 +2793,7 @@
 	mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
 	mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
 	mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
+	mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
 
 	ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
 	if (ret)
@@ -2425,8 +2847,10 @@
 					(i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
 	}
 
-	atomic_set(&rot->timestamp[0], 0);
-	atomic_set(&rot->timestamp[1], 0);
+	for (i = 0; i < ROT_QUEUE_MAX; i++) {
+		atomic_set(&rot->timestamp[i], 0);
+		INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
+	}
 
 	ret = sde_rotator_hw_rev_init(rot);
 	if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
index 987e61c..da67527 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,5 +44,41 @@
 		return -EINVAL;
 	}
 
+	if (!debugfs_create_u32("koff_timeout", 0644,
+			debugfs_root, &hw_data->koff_timeout)) {
+		SDEROT_ERR("fail create koff_timeout\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("vid_trigger", 0644,
+			debugfs_root, &hw_data->vid_trigger)) {
+		SDEROT_ERR("fail create vid_trigger\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("cmd_trigger", 0644,
+			debugfs_root, &hw_data->cmd_trigger)) {
+		SDEROT_ERR("fail create cmd_trigger\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("sbuf_headroom", 0644,
+			debugfs_root, &hw_data->sbuf_headroom)) {
+		SDEROT_ERR("fail create sbuf_headroom\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("solid_fill", 0644,
+			debugfs_root, &hw_data->solid_fill)) {
+		SDEROT_ERR("fail create solid_fill\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("constant_color", 0644,
+			debugfs_root, &hw_data->constant_color)) {
+		SDEROT_ERR("fail create constant_color\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index fedade1..f86f54b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -45,6 +45,11 @@
 #define ROTTOP_ROT_UBWC_DEC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x58)
 #define ROTTOP_ROT_UBWC_ENC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x5C)
 
+#define ROTTOP_START_CTRL_TRIG_SEL_SW           0
+#define ROTTOP_START_CTRL_TRIG_SEL_DONE         1
+#define ROTTOP_START_CTRL_TRIG_SEL_REGDMA       2
+#define ROTTOP_START_CTRL_TRIG_SEL_MDP          3
+
 /* SDE_ROT_SSPP:
  * OFFSET=0x0A8900
  */
@@ -160,6 +165,10 @@
 #define ROT_WB_SAFE_LUT                         (SDE_ROT_WB_OFFSET+0x088)
 #define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
 #define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
+#define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
+#define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
+#define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
+#define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
 #define ROT_WB_CSC_MATRIX_COEFF_0               (SDE_ROT_WB_OFFSET+0x260)
 #define ROT_WB_CSC_MATRIX_COEFF_1               (SDE_ROT_WB_OFFSET+0x264)
 #define ROT_WB_CSC_MATRIX_COEFF_2               (SDE_ROT_WB_OFFSET+0x268)
@@ -251,6 +260,10 @@
 /* REGDMA ADDR offset Mask */
 #define REGDMA_ADDR_OFFSET_MASK         0xFFFFF
 
+/* REGDMA command trigger select */
+#define REGDMA_CMD_TRIG_SEL_SW_START    (0 << 27)
+#define REGDMA_CMD_TRIG_SEL_MDP_FLUSH   (1 << 27)
+
 /* General defines */
 #define ROT_DONE_MASK                   0x1
 #define ROT_DONE_CLEAR                  0x1
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 5502cc0..c011d7a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -85,6 +85,7 @@
  *  @dest_rect: dest ROI, caller takes into account the different operations
  *              such as decimation, flip etc to program this field
  *  @addr:      destination surface address
+ *  @prefill_bw: prefill bandwidth in Bps
  */
 struct sde_hw_rot_wb_cfg {
 	struct sde_mdp_format_params   *fmt;
@@ -97,6 +98,7 @@
 	u32                             h_downscale_factor;
 	u32                             fps;
 	u64                             bw;
+	u64                             prefill_bw;
 };
 
 
@@ -200,9 +202,16 @@
  * struct sde_hw_rotator_context : Each rotator context ties to each priority
  * queue. Max number of concurrent contexts in regdma is limited to regdma
  * ram segment size allocation. Each rotator context can be any priority. A
- * incrementatl timestamp is used to identify and assigne to each context.
+ * incremental timestamp is used to identify and assigned to each context.
+ * @list: list of pending context
+ * @sbuf_mode: true if stream buffer is requested
+ * @start_ctrl: start control register update value
+ * @sys_cache_mode: sys cache mode register update value
+ * @op_mode: rot top op mode selection
+ * @last_entry: pointer to last configured entry (for debugging purposes)
  */
 struct sde_hw_rotator_context {
+	struct list_head list;
 	struct sde_hw_rotator *rot;
 	struct sde_rot_hw_resource *hwres;
 	enum   sde_rot_queue_prio q_id;
@@ -219,6 +228,11 @@
 	dma_addr_t ts_addr;
 	bool   is_secure;
 	bool   is_traffic_shaping;
+	bool   sbuf_mode;
+	u32    start_ctrl;
+	u32    sys_cache_mode;
+	u32    op_mode;
+	struct sde_rot_entry *last_entry;
 };
 
 /**
@@ -234,6 +248,17 @@
  * struct sde_hw_rotator : Rotator description
  * @hw:           mdp register mapped offset
  * @ops:          pointer to operations possible for the rotator HW
+ * @sbuf_headroom: stream buffer headroom in lines
+ * @solid_fill: true if solid fill is requested
+ * @constant_color: solid fill constant color
+ * @sbuf_ctx: list of active sbuf context in FIFO order
+ * @vid_trigger: video mode trigger select
+ * @cmd_trigger: command mode trigger select
+ * @inpixfmts: array of supported input pixel formats forucc
+ * @num_inpixfmt: size of the supported input pixel format array
+ * @outpixfmts: array of supported output pixel formats in fourcc
+ * @num_outpixfmt: size of the supported output pixel formats array
+ * @downscale_caps: capability string of scaling
  */
 struct sde_hw_rotator {
 	/* base */
@@ -271,6 +296,9 @@
 	void *swts_buffer;
 
 	u32    highest_bank;
+	u32    sbuf_headroom;
+	u32    solid_fill;
+	u32    constant_color;
 
 	spinlock_t rotctx_lock;
 	spinlock_t rotisr_lock;
@@ -278,6 +306,17 @@
 	bool    dbgmem;
 	bool reset_hw_ts;
 	u32 last_hw_ts;
+	u32 koff_timeout;
+	u32 vid_trigger;
+	u32 cmd_trigger;
+
+	struct list_head sbuf_ctx[ROT_QUEUE_MAX];
+
+	u32 *inpixfmts;
+	u32 num_inpixfmt;
+	u32 *outpixfmts;
+	u32 num_outpixfmt;
+	const char *downscale_caps;
 };
 
 /**
@@ -349,15 +388,17 @@
  */
 static inline void sde_hw_rotator_put_ctx(struct sde_hw_rotator_context *ctx)
 {
-	 struct sde_hw_rotator *rot = ctx->rot;
-	 u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	 unsigned long flags;
+	struct sde_hw_rotator *rot = ctx->rot;
+	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
+	unsigned long flags;
 
-	 spin_lock_irqsave(&rot->rotisr_lock, flags);
-	 rot->rotCtx[ctx->q_id][idx] = ctx;
-	 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+	rot->rotCtx[ctx->q_id][idx] = ctx;
+	if (ctx->sbuf_mode)
+		list_add_tail(&rot->sbuf_ctx[ctx->q_id], &ctx->list);
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
-	 SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
+	SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
 			 ctx->q_id, idx, ctx, ctx->session_id);
 }
 
@@ -367,15 +408,17 @@
  */
 static inline void sde_hw_rotator_clr_ctx(struct sde_hw_rotator_context *ctx)
 {
-	 struct sde_hw_rotator *rot = ctx->rot;
-	 u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	 unsigned long flags;
+	struct sde_hw_rotator *rot = ctx->rot;
+	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
+	unsigned long flags;
 
-	 spin_lock_irqsave(&rot->rotisr_lock, flags);
-	 rot->rotCtx[ctx->q_id][idx] = NULL;
-	 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+	rot->rotCtx[ctx->q_id][idx] = NULL;
+	if (ctx->sbuf_mode)
+		list_del_init(&ctx->list);
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
-	 SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
+	SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
 			 ctx->q_id, idx, ctx->session_id);
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 20d578f..4cf9dfc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -179,13 +179,13 @@
 	return 0;
 }
 
-static int sde_mdp_get_ubwc_plane_size(struct sde_mdp_format_params *fmt,
+static int sde_mdp_get_a5x_plane_size(struct sde_mdp_format_params *fmt,
 	u32 width, u32 height, struct sde_mdp_plane_sizes *ps)
 {
 	int rc = 0;
 
-	if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) {
-		ps->num_planes = 4;
+	if (sde_mdp_is_nv12_8b_format(fmt)) {
+		ps->num_planes = 2;
 		/* Y bitstream stride and plane size */
 		ps->ystride[0] = ALIGN(width, 128);
 		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 32),
@@ -196,6 +196,11 @@
 		ps->plane_size[1] = ALIGN(ps->ystride[1] *
 			ALIGN(height / 2, 32), 4096);
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
 		/* Y meta data stride and plane size */
 		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
 		ps->plane_size[2] = ALIGN(ps->ystride[2] *
@@ -205,13 +210,13 @@
 		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 			ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
-	} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) {
+	} else if (sde_mdp_is_tp10_format(fmt)) {
 		u32 yWidth   = sde_mdp_general_align(width, 192);
 		u32 yHeight  = ALIGN(height, 16);
 		u32 uvWidth  = sde_mdp_general_align(width, 192);
 		u32 uvHeight = ALIGN(height, 32);
 
-		ps->num_planes = 4;
+		ps->num_planes = 2;
 
 		/* Y bitstream stride and plane size */
 		ps->ystride[0]    = yWidth * TILEWIDTH_SIZE / Y_TILEWIDTH;
@@ -225,6 +230,11 @@
 				(uvHeight * TILEHEIGHT_SIZE / UV_TILEHEIGHT),
 				4096);
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
 		/* Y meta data stride and plane size */
 		ps->ystride[2]    = ALIGN(yWidth / Y_TILEWIDTH, 64);
 		ps->plane_size[2] = ALIGN(ps->ystride[2] *
@@ -234,11 +244,7 @@
 		ps->ystride[3]    = ALIGN(uvWidth / UV_TILEWIDTH, 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 				ALIGN((uvHeight / UV_TILEHEIGHT), 16), 4096);
-	} else if (fmt->format == SDE_PIX_FMT_RGBA_8888_UBWC ||
-		fmt->format == SDE_PIX_FMT_RGBX_8888_UBWC    ||
-		fmt->format == SDE_PIX_FMT_RGBA_1010102_UBWC ||
-		fmt->format == SDE_PIX_FMT_RGBX_1010102_UBWC ||
-		fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
+	} else if (sde_mdp_is_rgb_format(fmt)) {
 		uint32_t stride_alignment, bpp, aligned_bitstream_width;
 
 		if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
@@ -248,7 +254,8 @@
 			stride_alignment = 64;
 			bpp = 4;
 		}
-		ps->num_planes = 2;
+
+		ps->num_planes = 1;
 
 		/* RGB bitstream stride and plane size */
 		aligned_bitstream_width = ALIGN(width, stride_alignment);
@@ -256,6 +263,11 @@
 		ps->plane_size[0] = ALIGN(bpp * aligned_bitstream_width *
 			ALIGN(height, 16), 4096);
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 1;
+
 		/* RGB meta data stride and plane size */
 		ps->ystride[2] = ALIGN(DIV_ROUND_UP(aligned_bitstream_width,
 			16), 64);
@@ -266,7 +278,7 @@
 			__func__, fmt->format);
 		rc = -EINVAL;
 	}
-
+done:
 	return rc;
 }
 
@@ -285,8 +297,8 @@
 	bpp = fmt->bpp;
 	memset(ps, 0, sizeof(struct sde_mdp_plane_sizes));
 
-	if (sde_mdp_is_ubwc_format(fmt)) {
-		rc = sde_mdp_get_ubwc_plane_size(fmt, w, h, ps);
+	if (sde_mdp_is_tilea5x_format(fmt)) {
+		rc = sde_mdp_get_a5x_plane_size(fmt, w, h, ps);
 	} else if (bwc_mode) {
 		u32 height, meta_size;
 
@@ -394,7 +406,7 @@
 	return rc;
 }
 
-static int sde_mdp_ubwc_data_check(struct sde_mdp_data *data,
+static int sde_mdp_a5x_data_check(struct sde_mdp_data *data,
 			struct sde_mdp_plane_sizes *ps,
 			struct sde_mdp_format_params *fmt)
 {
@@ -416,8 +428,7 @@
 
 	base_addr = data->p[0].addr;
 
-	if ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) ||
-		(fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC)) {
+	if (sde_mdp_is_yuv_format(fmt)) {
 		/************************************************/
 		/*      UBWC            **                      */
 		/*      buffer          **      MDP PLANE       */
@@ -447,6 +458,9 @@
 			+ ps->plane_size[2] + ps->plane_size[3];
 		data->p[1].len = ps->plane_size[1];
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
 		/* configure Y metadata plane */
 		data->p[2].addr = base_addr;
 		data->p[2].len = ps->plane_size[2];
@@ -477,10 +491,14 @@
 		data->p[0].addr = base_addr + ps->plane_size[2];
 		data->p[0].len = ps->plane_size[0];
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
 		/* configure RGB metadata plane */
 		data->p[2].addr = base_addr;
 		data->p[2].len = ps->plane_size[2];
 	}
+done:
 	data->num_planes = ps->num_planes;
 
 end:
@@ -490,7 +508,7 @@
 		return -EINVAL;
 	}
 
-	inc = ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) ? 1 : 2);
+	inc = (sde_mdp_is_yuv_format(fmt) ? 1 : 2);
 	for (i = 0; i < SDE_ROT_MAX_PLANES; i += inc) {
 		if (data->p[i].len != ps->plane_size[i]) {
 			SDEROT_ERR(
@@ -517,8 +535,8 @@
 	if (!data || data->num_planes == 0)
 		return -ENOMEM;
 
-	if (sde_mdp_is_ubwc_format(fmt))
-		return sde_mdp_ubwc_data_check(data, ps, fmt);
+	if (sde_mdp_is_tilea5x_format(fmt))
+		return sde_mdp_a5x_data_check(data, ps, fmt);
 
 	SDEROT_DBG("srcp0=%pa len=%lu frame_size=%u\n", &data->p[0].addr,
 		data->p[0].len, ps->total_size);
@@ -574,7 +592,7 @@
 	return ret;
 }
 
-/* x and y are assumednt to be valid, expected to line up with start of tiles */
+/* x and y are assumed to be valid, expected to line up with start of tiles */
 void sde_rot_ubwc_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
 	struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
 {
@@ -589,7 +607,7 @@
 	}
 	macro_w = 4 * micro_w;
 
-	if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) {
+	if (sde_mdp_is_nv12_8b_format(fmt)) {
 		u16 chroma_macro_w = macro_w / 2;
 		u16 chroma_micro_w = micro_w / 2;
 
@@ -631,9 +649,11 @@
 			ret = 4;
 			goto done;
 		}
-	} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) {
+	} else if (sde_mdp_is_nv12_10b_format(fmt)) {
 		/* TODO: */
-		SDEROT_ERR("UBWC TP10 format not implemented yet");
+		SDEROT_ERR("%c%c%c%c format not implemented yet",
+				fmt->format >> 0, fmt->format >> 8,
+				fmt->format >> 16, fmt->format >> 24);
 		ret = 1;
 		goto done;
 	} else {
@@ -670,7 +690,7 @@
 	if ((x == 0) && (y == 0))
 		return;
 
-	if (sde_mdp_is_ubwc_format(fmt)) {
+	if (sde_mdp_is_tilea5x_format(fmt)) {
 		sde_rot_ubwc_data_calc_offset(data, x, y, ps, fmt);
 		return;
 	}
@@ -715,6 +735,12 @@
 {
 	u32 domain;
 
+	if (data->flags & SDE_ROT_EXT_IOVA) {
+		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
+				&data->addr, data->len);
+		return 0;
+	}
+
 	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
 		SDEROT_DBG("ion hdl=%p buf=0x%pa\n", data->srcp_dma_buf,
 							&data->addr);
@@ -767,9 +793,14 @@
 	len = &data->len;
 	data->flags |= img->flags;
 	data->offset = img->offset;
-	if (data->flags & SDE_ROT_EXT_DMA_BUF)
+	if (data->flags & SDE_ROT_EXT_DMA_BUF) {
 		data->srcp_dma_buf = img->buffer;
-	else if (IS_ERR(data->srcp_dma_buf)) {
+	} else if (data->flags & SDE_ROT_EXT_IOVA) {
+		data->addr = img->addr;
+		data->len = img->len;
+		SDEROT_DBG("use client %pad/%lx\n", &data->addr, data->len);
+		return 0;
+	} else if (IS_ERR(data->srcp_dma_buf)) {
 		SDEROT_ERR("error on ion_import_fd\n");
 		ret = PTR_ERR(data->srcp_dma_buf);
 		data->srcp_dma_buf = NULL;
@@ -871,6 +902,12 @@
 	if (data->addr && data->len)
 		return 0;
 
+	if (data->flags & SDE_ROT_EXT_IOVA) {
+		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
+				&data->addr, data->len);
+		return 0;
+	}
+
 	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
 		if (sde_mdp_is_map_needed(data)) {
 			domain = sde_smmu_get_domain_type(data->flags,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
index 3f94a15..cc367cd 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,7 @@
 #define SDE_SECURE_OVERLAY_SESSION	0x00008000
 #define SDE_ROT_EXT_DMA_BUF		0x00010000
 #define SDE_SECURE_CAMERA_SESSION	0x00020000
+#define SDE_ROT_EXT_IOVA			0x00040000
 
 struct sde_rot_data_type;
 
@@ -77,7 +78,8 @@
 	int id;
 	uint32_t flags;
 	uint32_t priv;
-	uint32_t iova;
+	dma_addr_t addr;
+	u32 len;
 };
 
 struct sde_layer_plane {
@@ -86,6 +88,10 @@
 	struct dma_buf *buffer;
 	struct ion_handle *handle;
 
+	/* i/o virtual address & length */
+	dma_addr_t addr;
+	u32 len;
+
 	/* Pixel offset in the dma buffer. */
 	uint32_t offset;
 
@@ -127,6 +133,15 @@
 	 * for new content.
 	 */
 	struct sde_rot_sync_fence *fence;
+
+	/* indicate if this is a stream (inline) buffer */
+	bool sbuf;
+
+	/* specify the system cache id in stream buffer mode */
+	int scid;
+
+	/* indicate if system cache writeback is required */
+	bool writeback;
 };
 
 struct sde_mdp_plane_sizes {
@@ -151,22 +166,12 @@
 	struct sg_table *srcp_table;
 };
 
-enum sde_data_state {
-	SDE_BUF_STATE_UNUSED,
-	SDE_BUF_STATE_READY,
-	SDE_BUF_STATE_ACTIVE,
-	SDE_BUF_STATE_CLEANUP,
-};
-
 struct sde_mdp_data {
-	enum sde_data_state state;
 	u8 num_planes;
 	struct sde_mdp_img_data p[SDE_ROT_MAX_PLANES];
-	struct list_head buf_list;
-	struct list_head pipe_list;
-	struct list_head chunk_list;
-	u64 last_alloc;
-	u64 last_freed;
+	bool sbuf;
+	int scid;
+	bool writeback;
 };
 
 void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 90ec313..6b3ddfa 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1069,6 +1069,7 @@
 		hfi = (struct hfi_buffer_count_actual *)
 			&pkt->rg_property_data[1];
 		hfi->buffer_count_actual = prop->buffer_count_actual;
+		hfi->buffer_count_min_host = prop->buffer_count_min_host;
 
 		buffer_type = get_hfi_buffer(prop->buffer_type);
 		if (buffer_type)
@@ -1720,14 +1721,6 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_VENC_H264_NAL_SVC_EXT:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_CONFIG_VENC_PERF_MODE:
 	{
 		u32 hfi_perf_mode = 0;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 33821c7..abc6cc8 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -20,10 +20,7 @@
 #include "msm_vidc_clocks.h"
 
 #define MSM_VDEC_DVC_NAME "msm_vdec_8974"
-#define MIN_NUM_OUTPUT_BUFFERS 4
-#define MIN_NUM_CAPTURE_BUFFERS 6
-#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS 1
-#define MAX_NUM_OUTPUT_BUFFERS VB2_MAX_FRAME
+#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
 #define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
 #define MB_SIZE_IN_PIXEL (16 * 16)
 #define MAX_OPERATING_FRAME_RATE (300 << 16)
@@ -323,6 +320,30 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+		.name = "CAPTURE Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_CAPTURE_BUFFERS,
+		.maximum = MAX_NUM_CAPTURE_BUFFERS,
+		.default_value = MIN_NUM_CAPTURE_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+		.name = "OUTPUT Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_OUTPUT_BUFFERS,
+		.maximum = MAX_NUM_OUTPUT_BUFFERS,
+		.default_value = MIN_NUM_OUTPUT_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT,
 		.name = "Video decoder dpb color format",
 		.type = V4L2_CTRL_TYPE_MENU,
@@ -477,7 +498,6 @@
 		.name = "YCbCr Semiplanar 4:2:0",
 		.description = "Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12,
-		.num_planes = 2,
 		.get_frame_size = get_frame_size_nv12,
 		.type = CAPTURE_PORT,
 	},
@@ -485,7 +505,6 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0",
 		.description = "UBWC Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12_UBWC,
-		.num_planes = 2,
 		.get_frame_size = get_frame_size_nv12_ubwc,
 		.type = CAPTURE_PORT,
 	},
@@ -493,7 +512,6 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
 		.description = "UBWC Y/CbCr 4:2:0 10bit",
 		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
-		.num_planes = 2,
 		.get_frame_size = get_frame_size_nv12_ubwc_10bit,
 		.type = CAPTURE_PORT,
 	},
@@ -501,7 +519,6 @@
 		.name = "Mpeg4",
 		.description = "Mpeg4 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG4,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -510,7 +527,6 @@
 		.name = "Mpeg2",
 		.description = "Mpeg2 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG2,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -519,7 +535,6 @@
 		.name = "H263",
 		.description = "H263 compressed format",
 		.fourcc = V4L2_PIX_FMT_H263,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -528,7 +543,6 @@
 		.name = "VC1",
 		.description = "VC-1 compressed format",
 		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -537,7 +551,6 @@
 		.name = "VC1 SP",
 		.description = "VC-1 compressed format G",
 		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -546,7 +559,6 @@
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -555,7 +567,6 @@
 		.name = "H264_MVC",
 		.description = "H264_MVC compressed format",
 		.fourcc = V4L2_PIX_FMT_H264_MVC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -564,7 +575,6 @@
 		.name = "HEVC",
 		.description = "HEVC compressed format",
 		.fourcc = V4L2_PIX_FMT_HEVC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -573,7 +583,6 @@
 		.name = "VP8",
 		.description = "VP8 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP8,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -582,122 +591,12 @@
 		.name = "VP9",
 		.description = "VP9 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP9,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed_full_yuv,
 		.type = OUTPUT_PORT,
 		.defer_outputs = true,
 	},
 };
 
-int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
-{
-	const struct msm_vidc_format *fmt = NULL;
-	struct hfi_device *hdev;
-	int rc = 0, i = 0, stride = 0, scanlines = 0, color_format = 0;
-	unsigned int *plane_sizes = NULL, extra_idx = 0;
-
-	if (!inst || !f || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR,
-			"Invalid input, inst = %pK, format = %pK\n", inst, f);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		fmt = &inst->fmts[CAPTURE_PORT];
-	else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		fmt = &inst->fmts[OUTPUT_PORT];
-	else
-		return -ENOTSUPP;
-
-	f->fmt.pix_mp.pixelformat = fmt->fourcc;
-	f->fmt.pix_mp.num_planes = fmt->num_planes;
-	if (inst->in_reconfig) {
-		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
-		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
-
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"%s: unsupported session\n", __func__);
-			goto exit;
-		}
-	}
-
-	f->fmt.pix_mp.height = inst->prop.height[CAPTURE_PORT];
-	f->fmt.pix_mp.width = inst->prop.width[CAPTURE_PORT];
-	stride = inst->prop.width[CAPTURE_PORT];
-	scanlines = inst->prop.height[CAPTURE_PORT];
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		plane_sizes = &inst->bufq[OUTPUT_PORT].plane_sizes[0];
-		for (i = 0; i < fmt->num_planes; ++i) {
-			if (!plane_sizes[i]) {
-				f->fmt.pix_mp.plane_fmt[i].sizeimage =
-					get_frame_size(inst, fmt, f->type, i);
-				plane_sizes[i] = f->fmt.pix_mp.plane_fmt[i].
-					sizeimage;
-			} else
-				f->fmt.pix_mp.plane_fmt[i].sizeimage =
-					plane_sizes[i];
-		}
-		f->fmt.pix_mp.height = inst->prop.height[OUTPUT_PORT];
-		f->fmt.pix_mp.width = inst->prop.width[OUTPUT_PORT];
-		f->fmt.pix_mp.plane_fmt[0].bytesperline =
-			(__u16)inst->prop.width[OUTPUT_PORT];
-		f->fmt.pix_mp.plane_fmt[0].reserved[0] =
-			(__u16)inst->prop.height[OUTPUT_PORT];
-	} else {
-		switch (fmt->fourcc) {
-		case V4L2_PIX_FMT_NV12:
-			color_format = COLOR_FMT_NV12;
-			break;
-		case V4L2_PIX_FMT_NV12_UBWC:
-			color_format = COLOR_FMT_NV12_UBWC;
-			break;
-		case V4L2_PIX_FMT_NV12_TP10_UBWC:
-			color_format = COLOR_FMT_NV12_BPP10_UBWC;
-			break;
-		default:
-			dprintk(VIDC_WARN, "Color format not recognized\n");
-			rc = -ENOTSUPP;
-			goto exit;
-		}
-
-		stride = VENUS_Y_STRIDE(color_format,
-				inst->prop.width[CAPTURE_PORT]);
-		scanlines = VENUS_Y_SCANLINES(color_format,
-				inst->prop.height[CAPTURE_PORT]);
-
-		f->fmt.pix_mp.plane_fmt[0].sizeimage =
-			fmt->get_frame_size(0,
-			inst->prop.height[CAPTURE_PORT],
-			inst->prop.width[CAPTURE_PORT]);
-
-		extra_idx = EXTRADATA_IDX(fmt->num_planes);
-		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-				VENUS_EXTRADATA_SIZE(
-					inst->prop.height[CAPTURE_PORT],
-					inst->prop.width[CAPTURE_PORT]);
-		}
-
-		for (i = 0; i < fmt->num_planes; ++i)
-			inst->bufq[CAPTURE_PORT].plane_sizes[i] =
-				f->fmt.pix_mp.plane_fmt[i].sizeimage;
-
-		f->fmt.pix_mp.height = inst->prop.height[CAPTURE_PORT];
-		f->fmt.pix_mp.width = inst->prop.width[CAPTURE_PORT];
-		f->fmt.pix_mp.plane_fmt[0].bytesperline =
-			(__u16)stride;
-		f->fmt.pix_mp.plane_fmt[0].reserved[0] =
-			(__u16)scanlines;
-	}
-
-exit:
-	return rc;
-}
-
 int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 {
 	struct msm_vidc_format *fmt = NULL;
@@ -724,6 +623,14 @@
 			rc = -EINVAL;
 			goto err_invalid_fmt;
 		}
+
+		if (inst->fmts[fmt->type].fourcc == f->fmt.pix_mp.pixelformat &&
+			inst->prop.width[CAPTURE_PORT] == f->fmt.pix_mp.width &&
+			inst->prop.height[CAPTURE_PORT] ==
+				f->fmt.pix_mp.height) {
+			dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+			return 0;
+		}
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
@@ -750,7 +657,7 @@
 			inst->fmts[fmt->type].get_frame_size(0,
 			f->fmt.pix_mp.height, f->fmt.pix_mp.width);
 
-		extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
+		extra_idx = EXTRADATA_IDX(inst->bufq[fmt->type].num_planes);
 		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
 			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
 				VENUS_EXTRADATA_SIZE(
@@ -758,14 +665,12 @@
 					inst->prop.width[CAPTURE_PORT]);
 		}
 
-		f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
-		for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
 			inst->bufq[CAPTURE_PORT].plane_sizes[i] =
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
-		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
 
 		fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
 				ARRAY_SIZE(vdec_formats),
@@ -781,34 +686,21 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
-		rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
-		if (rc) {
-			dprintk(VIDC_ERR, "Failed to initialize instance\n");
-			goto err_invalid_fmt;
-		}
-
-		if (!(get_hal_codec(inst->fmts[fmt->type].fourcc) &
-			inst->core->dec_codec_supported)) {
-			dprintk(VIDC_ERR,
-				"Codec(%#x) is not present in the supported codecs list(%#x)\n",
-				get_hal_codec(inst->fmts[fmt->type].fourcc),
-				inst->core->dec_codec_supported);
-			rc = -EINVAL;
-			goto err_invalid_fmt;
-		}
-
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
 			goto err_invalid_fmt;
 		}
 
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto err_invalid_fmt;
+		if (inst->fmts[fmt->type].fourcc == f->fmt.pix_mp.pixelformat &&
+			inst->prop.width[OUTPUT_PORT] == f->fmt.pix_mp.width &&
+			inst->prop.height[OUTPUT_PORT] ==
+				f->fmt.pix_mp.height) {
+			dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+			return 0;
 		}
+		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
+		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
 
 		frame_sz.buffer_type = HAL_BUFFER_INPUT;
 		frame_sz.width = inst->prop.width[OUTPUT_PORT];
@@ -820,18 +712,19 @@
 		msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
 
 		max_input_size = get_frame_size(
-inst, &inst->fmts[fmt->type], f->type, 0);
+			inst, &inst->fmts[fmt->type], f->type, 0);
 		if (f->fmt.pix_mp.plane_fmt[0].sizeimage > max_input_size ||
 			!f->fmt.pix_mp.plane_fmt[0].sizeimage) {
 			f->fmt.pix_mp.plane_fmt[0].sizeimage = max_input_size;
 		}
 
-		f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
-		for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; ++i) {
 			inst->bufq[OUTPUT_PORT].plane_sizes[i] =
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 
+		rc = msm_comm_try_get_bufreqs(inst);
 	}
 err_invalid_fmt:
 	return rc;
@@ -868,516 +761,6 @@
 	return rc;
 }
 
-static int set_actual_buffer_count(struct msm_vidc_inst *inst,
-			int count, enum hal_buffer type)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct hal_buffer_count_actual buf_count;
-
-	hdev = inst->core->device;
-
-	buf_count.buffer_type = type;
-	buf_count.buffer_count_actual = count;
-	rc = call_hfi_op(hdev, session_set_property,
-		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to set actual buffer count %d for buffer type %d\n",
-			count, type);
-	return rc;
-}
-
-static int msm_vdec_queue_setup(
-	struct vb2_queue *q,
-	unsigned int *num_buffers, unsigned int *num_planes,
-	unsigned int sizes[], struct device *alloc_devs[])
-{
-	int i, rc = 0;
-	struct msm_vidc_inst *inst;
-	struct hal_buffer_requirements *bufreq;
-	int extra_idx = 0;
-	int min_buff_count = 0;
-
-	if (!q || !num_buffers || !num_planes
-		|| !sizes || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK, %pK, %pK\n",
-			q, num_buffers, num_planes);
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"%s: Failed : Buffer requirements\n", __func__);
-		goto exit;
-	}
-
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		*num_planes = inst->fmts[OUTPUT_PORT].num_planes;
-		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
-				*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
-			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
-		for (i = 0; i < *num_planes; i++) {
-			sizes[i] = get_frame_size(inst,
-					&inst->fmts[OUTPUT_PORT], q->type, i);
-		}
-		rc = set_actual_buffer_count(inst, *num_buffers,
-			HAL_BUFFER_INPUT);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		dprintk(VIDC_DBG, "Getting bufreqs on capture plane\n");
-		*num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
-		if (rc) {
-			dprintk(VIDC_ERR, "Failed to open instance\n");
-			break;
-		}
-		rc = msm_comm_try_get_bufreqs(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to get buffer requirements: %d\n", rc);
-			break;
-		}
-
-		bufreq = get_buff_req_buffer(inst,
-			msm_comm_get_hal_output_buffer(inst));
-		if (!bufreq) {
-			dprintk(VIDC_ERR,
-				"No buffer requirement for buffer type %x\n",
-				HAL_BUFFER_OUTPUT);
-			rc = -EINVAL;
-			break;
-		}
-
-		/* Pretend as if FW itself is asking for
-		 * additional buffers.
-		 * *num_buffers += MSM_VIDC_ADDITIONAL_BUFS_FOR_DCVS
-		 * is wrong since it will end up increasing the count
-		 * on every call to reqbufs if *num_bufs is larger
-		 * than min requirement.
-		 */
-		*num_buffers = max(*num_buffers, bufreq->buffer_count_min
-			+ msm_dcvs_get_extra_buff_count(inst));
-
-		min_buff_count = (!!(inst->flags & VIDC_THUMBNAIL)) ?
-			MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS :
-				MIN_NUM_CAPTURE_BUFFERS;
-
-		*num_buffers = clamp_val(*num_buffers,
-			min_buff_count, VB2_MAX_FRAME);
-
-		dprintk(VIDC_DBG, "Set actual output buffer count: %d\n",
-				*num_buffers);
-		rc = set_actual_buffer_count(inst, *num_buffers,
-					msm_comm_get_hal_output_buffer(inst));
-		if (rc)
-			break;
-
-		if (*num_buffers != bufreq->buffer_count_actual) {
-			rc = msm_comm_try_get_bufreqs(inst);
-			if (rc) {
-				dprintk(VIDC_WARN,
-					"Failed to get buf req, %d\n", rc);
-				break;
-			}
-		}
-		dprintk(VIDC_DBG, "count =  %d, size = %d, alignment = %d\n",
-				inst->buff_req.buffer[1].buffer_count_actual,
-				inst->buff_req.buffer[1].buffer_size,
-				inst->buff_req.buffer[1].buffer_alignment);
-		sizes[0] = inst->bufq[CAPTURE_PORT].plane_sizes[0];
-
-		/*
-		 * Set actual buffer count to firmware for DPB buffers.
-		 * Firmware mandates setting of minimum buffer size
-		 * and actual buffer count for both OUTPUT and OUTPUT2.
-		 * Hence we are setting back the same buffer size
-		 * information back to firmware.
-		 */
-		if (msm_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-			bufreq = get_buff_req_buffer(inst,
-					HAL_BUFFER_OUTPUT);
-			if (!bufreq) {
-				rc = -EINVAL;
-				break;
-			}
-
-			rc = set_actual_buffer_count(inst,
-				bufreq->buffer_count_actual,
-				HAL_BUFFER_OUTPUT);
-			if (rc)
-				break;
-		}
-
-		extra_idx =
-			EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
-		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-			sizes[extra_idx] =
-				VENUS_EXTRADATA_SIZE(
-					inst->prop.height[CAPTURE_PORT],
-					inst->prop.width[CAPTURE_PORT]);
-		}
-		break;
-	default:
-		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
-		rc = -EINVAL;
-	}
-exit:
-	return rc;
-}
-
-static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	struct {
-		enum hal_buffer type;
-		struct hal_buffer_requirements *req;
-		size_t size;
-	} internal_buffers[] = {
-		{ HAL_BUFFER_INTERNAL_SCRATCH, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_SCRATCH_1, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_SCRATCH_2, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_PERSIST, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_PERSIST_1, NULL, 0},
-	};
-
-	struct hal_frame_size frame_sz;
-	int i;
-
-	frame_sz.buffer_type = HAL_BUFFER_INPUT;
-	frame_sz.width = inst->capability.width.max;
-	frame_sz.height =
-		(inst->capability.mbs_per_frame.max * 256) /
-		inst->capability.width.max;
-
-	dprintk(VIDC_DBG,
-		"Max buffer reqs, buffer type = %d width = %d, height = %d, max_mbs_per_frame = %d\n",
-		frame_sz.buffer_type, frame_sz.width,
-		frame_sz.height, inst->capability.mbs_per_frame.max);
-
-	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"%s Failed to get max buf req, %d\n", __func__, rc);
-		return 0;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) {
-		internal_buffers[i].req =
-			get_buff_req_buffer(inst, internal_buffers[i].type);
-		internal_buffers[i].size = internal_buffers[i].req ?
-			internal_buffers[i].req->buffer_size : 0;
-	}
-
-	frame_sz.buffer_type = HAL_BUFFER_INPUT;
-	frame_sz.width = inst->prop.width[OUTPUT_PORT];
-	frame_sz.height = inst->prop.height[OUTPUT_PORT];
-
-	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"%s Failed to get back old buf req, %d\n",
-			__func__, rc);
-		return rc;
-	}
-
-	dprintk(VIDC_DBG,
-			"Old buffer reqs, buffer type = %d width = %d, height = %d\n",
-			frame_sz.buffer_type, frame_sz.width,
-			frame_sz.height);
-
-	for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) {
-		if (internal_buffers[i].req) {
-			internal_buffers[i].req->buffer_size =
-				internal_buffers[i].size;
-			dprintk(VIDC_DBG,
-				"Changing buffer type : %d size to : %zd\n",
-				internal_buffers[i].type,
-				internal_buffers[i].size);
-		}
-	}
-	return 0;
-}
-
-static inline int start_streaming(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	bool slave_side_cp = inst->core->resources.slave_side_cp;
-	struct hal_buffer_size_minimum b;
-	unsigned int buffer_size;
-	struct msm_vidc_format *fmt = NULL;
-
-	fmt = &inst->fmts[CAPTURE_PORT];
-	buffer_size = fmt->get_frame_size(0,
-		inst->prop.height[CAPTURE_PORT],
-		inst->prop.width[CAPTURE_PORT]);
-	hdev = inst->core->device;
-
-	if (msm_comm_get_stream_output_mode(inst) ==
-		HAL_VIDEO_DECODER_SECONDARY) {
-		rc = msm_vidc_check_scaling_supported(inst);
-		b.buffer_type = HAL_BUFFER_OUTPUT2;
-	} else {
-		b.buffer_type = HAL_BUFFER_OUTPUT;
-	}
-
-	b.buffer_size = buffer_size;
-	rc = call_hfi_op(hdev, session_set_property,
-		 inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
-		 &b);
-	if (rc) {
-		dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
-		return -EINVAL;
-	}
-	if ((inst->flags & VIDC_SECURE) && !inst->in_reconfig &&
-		!slave_side_cp) {
-		rc = set_max_internal_buffers_size(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set max scratch buffer size: %d\n",
-				rc);
-			goto fail_start;
-		}
-	}
-	rc = msm_comm_set_scratch_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to set scratch buffers: %d\n", rc);
-		goto fail_start;
-	}
-	rc = msm_comm_set_persist_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to set persist buffers: %d\n", rc);
-		goto fail_start;
-	}
-
-	if (msm_comm_get_stream_output_mode(inst) ==
-		HAL_VIDEO_DECODER_SECONDARY) {
-		rc = msm_comm_set_output_buffers(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set output buffers: %d\n", rc);
-			goto fail_start;
-		}
-	}
-
-	/*
-	 * For seq_changed_insufficient, driver should set session_continue
-	 * to firmware after the following sequence
-	 * - driver raises insufficient event to v4l2 client
-	 * - all output buffers have been flushed and freed
-	 * - v4l2 client queries buffer requirements and splits/combines OPB-DPB
-	 * - v4l2 client sets new set of buffers to firmware
-	 * - v4l2 client issues CONTINUE to firmware to resume decoding of
-	 *   submitted ETBs.
-	 */
-	if (inst->in_reconfig) {
-		dprintk(VIDC_DBG, "send session_continue after reconfig\n");
-		rc = call_hfi_op(hdev, session_continue,
-			(void *) inst->session);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s - failed to send session_continue\n",
-				__func__);
-			goto fail_start;
-		}
-	}
-	inst->in_reconfig = false;
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK to start done state\n", inst);
-		goto fail_start;
-	}
-	msm_dcvs_init_load(inst);
-	if (msm_comm_get_stream_output_mode(inst) ==
-		HAL_VIDEO_DECODER_SECONDARY) {
-		rc = msm_comm_queue_output_buffers(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to queue output buffers: %d\n", rc);
-			goto fail_start;
-		}
-	}
-
-fail_start:
-	return rc;
-}
-
-static inline int stop_streaming(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK to start done state\n", inst);
-	return rc;
-}
-
-static int msm_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
-		q->type, inst);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		goto stream_start_failed;
-	}
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Streamon failed on: %d capability for inst: %pK\n",
-			q->type, inst);
-		goto stream_start_failed;
-	}
-
-	rc = msm_comm_qbuf(inst, NULL);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
-				rc);
-		goto stream_start_failed;
-	}
-
-stream_start_failed:
-	return rc;
-}
-
-static void msm_vdec_stop_streaming(struct vb2_queue *q)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
-		return;
-	}
-
-	inst = q->drv_priv;
-	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		if (!inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
-			rc = stop_streaming(inst);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (!inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
-			rc = stop_streaming(inst);
-		break;
-	default:
-		dprintk(VIDC_ERR,
-			"Q-type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		break;
-	}
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK, cap = %d to state: %d\n",
-			inst, q->type, MSM_VIDC_RELEASE_RESOURCES_DONE);
-}
-
-static void msm_vdec_buf_queue(struct vb2_buffer *vb)
-{
-	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
-
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
-}
-
-static void msm_vdec_buf_cleanup(struct vb2_buffer *vb)
-{
-	int rc = 0;
-	struct buf_queue *q = NULL;
-	struct msm_vidc_inst *inst = NULL;
-
-	if (!vb) {
-		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
-			__func__, vb);
-		return;
-	}
-
-	inst = vb2_get_drv_priv(vb->vb2_queue);
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
-			__func__);
-		return;
-	}
-
-	q = msm_comm_get_vb2q(inst, vb->type);
-	if (!q) {
-		dprintk(VIDC_ERR,
-			"%s : Failed to find buffer queue for type = %d\n",
-				__func__, vb->type);
-		return;
-	}
-
-	if (q->vb2_bufq.streaming) {
-		dprintk(VIDC_DBG, "%d PORT is streaming\n",
-			vb->type);
-		return;
-	}
-
-	rc = msm_vidc_release_buffers(inst, vb->type);
-	if (rc)
-		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
-			__func__, rc);
-}
-
-static const struct vb2_ops msm_vdec_vb2q_ops = {
-	.queue_setup = msm_vdec_queue_setup,
-	.start_streaming = msm_vdec_start_streaming,
-	.buf_queue = msm_vdec_buf_queue,
-	.buf_cleanup = msm_vdec_buf_cleanup,
-	.stop_streaming = msm_vdec_stop_streaming,
-};
-
-const struct vb2_ops *msm_vdec_get_vb2q_ops(void)
-{
-	return &msm_vdec_vb2q_ops;
-}
-
 int msm_vdec_inst_init(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -1398,6 +781,9 @@
 	inst->capability.secure_output2_threshold.max = 0;
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_DYNAMIC;
+	/* To start with, both ports are 1 plane each */
+	inst->bufq[OUTPUT_PORT].num_planes = 1;
+	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->prop.fps = DEFAULT_FPS;
 	inst->operating_rate = 0;
 	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
@@ -1497,6 +883,7 @@
 		property_id = HAL_PARAM_VDEC_SYNC_FRAME_DECODE;
 		hal_property.enable = ctrl->val;
 		pdata = &hal_property;
+		msm_dcvs_try_enable(inst);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
 		inst->flags |= VIDC_SECURE;
@@ -1506,6 +893,36 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
 		property_id = HAL_PARAM_INDEX_EXTRADATA;
 		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION:
+		case V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO:
+		case V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP:
+		case V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING:
+		case V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE:
+		case V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW:
+		case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+		case V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP:
+		case V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA:
+		case V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP:
+		case V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO:
+		case V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP:
+		case V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY:
+		case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
+			inst->bufq[CAPTURE_PORT].num_planes = 2;
+			inst->bufq[CAPTURE_PORT].plane_sizes[EXTRADATA_IDX(2)] =
+				VENUS_EXTRADATA_SIZE(
+				inst->prop.height[CAPTURE_PORT],
+				inst->prop.width[CAPTURE_PORT]);
+			break;
+		default:
+			rc = -ENOTSUPP;
+			break;
+		}
 		extra.enable = 1;
 		pdata = &extra;
 		break;
@@ -1606,6 +1023,7 @@
 				V4L2_CID_MPEG_VIDEO_H264_LEVEL,
 				temp_ctrl->val);
 		pdata = &profile_level;
+		rc = msm_comm_try_get_bufreqs(inst);
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
@@ -1617,6 +1035,7 @@
 				V4L2_CID_MPEG_VIDEO_H264_PROFILE,
 				temp_ctrl->val);
 		pdata = &profile_level;
+		rc = msm_comm_try_get_bufreqs(inst);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT:
 		dprintk(VIDC_DBG,
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.h b/drivers/media/platform/msm/vidc/msm_vdec.h
index a209dd5..44ba4fd 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.h
+++ b/drivers/media/platform/msm/vidc/msm_vdec.h
@@ -22,10 +22,8 @@
 	const struct v4l2_ctrl_ops *ctrl_ops);
 int msm_vdec_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
 int msm_vdec_s_fmt(void *instance, struct v4l2_format *f);
-int msm_vdec_g_fmt(void *instance, struct v4l2_format *f);
 int msm_vdec_s_ctrl(void *instance, struct v4l2_ctrl *ctrl);
 int msm_vdec_g_ctrl(void *instance, struct v4l2_ctrl *ctrl);
 int msm_vdec_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-struct vb2_ops *msm_vdec_get_vb2q_ops(void);
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 8956b0e..ff7204c 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -18,8 +18,6 @@
 #include "msm_vidc_clocks.h"
 
 #define MSM_VENC_DVC_NAME "msm_venc_8974"
-#define MIN_NUM_OUTPUT_BUFFERS 4
-#define MIN_NUM_CAPTURE_BUFFERS 4
 #define MIN_BIT_RATE 32000
 #define MAX_BIT_RATE 300000000
 #define DEFAULT_BIT_RATE 64000
@@ -326,6 +324,31 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+		.name = "CAPTURE Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_CAPTURE_BUFFERS,
+		.maximum = MAX_NUM_CAPTURE_BUFFERS,
+		.default_value = MIN_NUM_CAPTURE_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+		.name = "OUTPUT Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_OUTPUT_BUFFERS,
+		.maximum = MAX_NUM_OUTPUT_BUFFERS,
+		.default_value = MIN_NUM_OUTPUT_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME,
 		.name = "Request I Frame",
 		.type = V4L2_CTRL_TYPE_BUTTON,
@@ -1098,7 +1121,6 @@
 		.name = "YCbCr Semiplanar 4:2:0",
 		.description = "Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_nv12,
 		.type = OUTPUT_PORT,
 	},
@@ -1106,7 +1128,6 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0",
 		.description = "UBWC Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12_UBWC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_nv12_ubwc,
 		.type = OUTPUT_PORT,
 	},
@@ -1114,7 +1135,6 @@
 		.name = "RGBA 8:8:8:8",
 		.description = "RGBA 8:8:8:8",
 		.fourcc = V4L2_PIX_FMT_RGB32,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_rgba,
 		.type = OUTPUT_PORT,
 	},
@@ -1122,7 +1142,6 @@
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -1130,7 +1149,6 @@
 		.name = "VP8",
 		.description = "VP8 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP8,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -1138,7 +1156,6 @@
 		.name = "HEVC",
 		.description = "HEVC compressed format",
 		.fourcc = V4L2_PIX_FMT_HEVC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -1146,222 +1163,13 @@
 		.name = "YCrCb Semiplanar 4:2:0",
 		.description = "Y/CrCb 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV21,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_nv21,
 		.type = OUTPUT_PORT,
 	},
 };
 
-static void msm_venc_update_plane_count(struct msm_vidc_inst *inst, int type)
-{
-	struct v4l2_ctrl *ctrl = NULL;
-	u32 extradata = 0;
-
-	if (!inst)
-		return;
-
-	inst->fmts[type].num_planes = 1;
-
-	ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
-		V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
-
-	if (ctrl)
-		extradata = v4l2_ctrl_g_ctrl(ctrl);
-
-	if (type == CAPTURE_PORT) {
-		switch (extradata) {
-		case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
-		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
-		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
-		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
-		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
-			inst->fmts[CAPTURE_PORT].num_planes = 2;
-		default:
-			break;
-		}
-	} else if (type == OUTPUT_PORT) {
-		switch (extradata) {
-		case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
-		case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
-		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
-		case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
-		case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
-		case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
-			inst->fmts[OUTPUT_PORT].num_planes = 2;
-			break;
-		default:
-			break;
-		}
-	}
-}
-
 static int msm_venc_set_csc(struct msm_vidc_inst *inst);
 
-static int msm_venc_queue_setup(struct vb2_queue *q,
-	unsigned int *num_buffers, unsigned int *num_planes,
-	unsigned int sizes[], struct device *alloc_devs[])
-{
-	int i, temp, rc = 0;
-	struct msm_vidc_inst *inst;
-	struct hal_buffer_count_actual new_buf_count;
-	enum hal_property property_id;
-	struct hfi_device *hdev;
-	struct hal_buffer_requirements *buff_req;
-	u32 extra_idx = 0;
-	struct hal_buffer_requirements *buff_req_buffer = NULL;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input\n");
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to open instance\n");
-		return rc;
-	}
-
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to get buffer requirements: %d\n", rc);
-		return rc;
-	}
-
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		*num_planes = 1;
-
-		buff_req = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-		if (buff_req) {
-			/*
-			 * Pretend as if the FW itself is asking for additional
-			 * buffers, which are required for DCVS
-			 */
-			unsigned int min_req_buffers =
-				buff_req->buffer_count_min +
-				msm_dcvs_get_extra_buff_count(inst);
-			*num_buffers = max(*num_buffers, min_req_buffers);
-		}
-
-		if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
-				*num_buffers > VB2_MAX_FRAME) {
-			int temp = *num_buffers;
-
-			*num_buffers = clamp_val(*num_buffers,
-					MIN_NUM_CAPTURE_BUFFERS,
-					VB2_MAX_FRAME);
-			dprintk(VIDC_INFO,
-				"Changing buffer count on CAPTURE_MPLANE from %d to %d for best effort encoding\n",
-				temp, *num_buffers);
-		}
-
-		msm_venc_update_plane_count(inst, CAPTURE_PORT);
-		*num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-
-		for (i = 0; i < *num_planes; i++) {
-			int extra_idx = EXTRADATA_IDX(*num_planes);
-
-			buff_req_buffer = get_buff_req_buffer(inst,
-					HAL_BUFFER_OUTPUT);
-
-			sizes[i] = buff_req_buffer ?
-				buff_req_buffer->buffer_size : 0;
-
-			if (extra_idx && i == extra_idx &&
-					extra_idx < VIDEO_MAX_PLANES) {
-				buff_req_buffer = get_buff_req_buffer(inst,
-						HAL_BUFFER_EXTRADATA_OUTPUT);
-				if (!buff_req_buffer) {
-					dprintk(VIDC_ERR,
-						"%s: failed - invalid buffer req\n",
-						__func__);
-					return -EINVAL;
-				}
-
-				sizes[i] = buff_req_buffer->buffer_size;
-			}
-		}
-
-		dprintk(VIDC_DBG, "actual output buffer count set to fw = %d\n",
-				*num_buffers);
-		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
-		new_buf_count.buffer_type = HAL_BUFFER_OUTPUT;
-		new_buf_count.buffer_count_actual = *num_buffers;
-		rc = call_hfi_op(hdev, session_set_property, inst->session,
-			property_id, &new_buf_count);
-
-		break;
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		*num_planes = 1;
-
-		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual =
-			max(*num_buffers, inst->buff_req.buffer[0].
-				buffer_count_min);
-
-		temp = *num_buffers;
-
-		*num_buffers = clamp_val(*num_buffers,
-				MIN_NUM_OUTPUT_BUFFERS,
-				VB2_MAX_FRAME);
-		dprintk(VIDC_INFO,
-			"Changing buffer count on OUTPUT_MPLANE from %d to %d for best effort encoding\n",
-			temp, *num_buffers);
-
-		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
-		new_buf_count.buffer_type = HAL_BUFFER_INPUT;
-		new_buf_count.buffer_count_actual = *num_buffers;
-
-		dprintk(VIDC_DBG, "actual input buffer count set to fw = %d\n",
-				*num_buffers);
-
-		msm_venc_update_plane_count(inst, OUTPUT_PORT);
-		*num_planes = inst->fmts[OUTPUT_PORT].num_planes;
-
-		rc = call_hfi_op(hdev, session_set_property, inst->session,
-					property_id, &new_buf_count);
-		if (rc)
-			dprintk(VIDC_ERR, "failed to set count to fw\n");
-
-		dprintk(VIDC_DBG, "size = %d, alignment = %d, count = %d\n",
-				inst->buff_req.buffer[0].buffer_size,
-				inst->buff_req.buffer[0].buffer_alignment,
-				inst->buff_req.buffer[0].buffer_count_actual);
-		sizes[0] = inst->fmts[OUTPUT_PORT].get_frame_size(
-				0, inst->prop.height[OUTPUT_PORT],
-				inst->prop.width[OUTPUT_PORT]);
-
-		extra_idx =
-			EXTRADATA_IDX(inst->fmts[OUTPUT_PORT].num_planes);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-			buff_req_buffer = get_buff_req_buffer(inst,
-				HAL_BUFFER_EXTRADATA_INPUT);
-			if (!buff_req_buffer) {
-				dprintk(VIDC_ERR,
-					"%s: failed - invalid buffer req\n",
-					__func__);
-				return -EINVAL;
-			}
-
-			sizes[extra_idx] = buff_req_buffer->buffer_size;
-		}
-
-		break;
-	default:
-		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
-		rc = -EINVAL;
-		break;
-	}
-	return rc;
-}
-
 static int msm_venc_toggle_hier_p(struct msm_vidc_inst *inst, int layers)
 {
 	int num_enh_layers = 0;
@@ -1436,190 +1244,6 @@
 	return rc;
 }
 
-static inline int start_streaming(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	msm_venc_power_save_mode_enable(inst);
-	if (inst->capability.pixelprocess_capabilities &
-		HAL_VIDEO_ENCODER_SCALING_CAPABILITY)
-		rc = msm_vidc_check_scaling_supported(inst);
-	if (rc) {
-		dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
-		return -EINVAL;
-	}
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to get Buffer Requirements : %d\n", rc);
-		goto fail_start;
-	}
-	rc = msm_comm_set_scratch_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to set scratch buffers: %d\n", rc);
-		goto fail_start;
-	}
-	rc = msm_comm_set_persist_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to set persist buffers: %d\n", rc);
-		goto fail_start;
-	}
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK to start done state\n", inst);
-		goto fail_start;
-	}
-	msm_dcvs_init_load(inst);
-
-fail_start:
-	return rc;
-}
-
-static int msm_venc_start_streaming(struct vb2_queue *q, unsigned int count)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
-		q->type, inst);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		goto stream_start_failed;
-	}
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Streamon failed on: %d capability for inst: %pK\n",
-			q->type, inst);
-		goto stream_start_failed;
-	}
-
-	rc = msm_comm_qbuf(inst, NULL);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
-				rc);
-		goto stream_start_failed;
-	}
-
-stream_start_failed:
-	return rc;
-}
-
-static void msm_venc_stop_streaming(struct vb2_queue *q)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "%s - Invalid input, q = %pK\n", __func__, q);
-		return;
-	}
-
-	inst = q->drv_priv;
-	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Q-type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		break;
-	}
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK, cap = %d to state: %d\n",
-			inst, q->type, MSM_VIDC_CLOSE_DONE);
-}
-
-static void msm_venc_buf_queue(struct vb2_buffer *vb)
-{
-	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
-
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
-}
-
-static void msm_venc_buf_cleanup(struct vb2_buffer *vb)
-{
-	int rc = 0;
-	struct buf_queue *q = NULL;
-	struct msm_vidc_inst *inst = NULL;
-
-	if (!vb) {
-		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
-			__func__, vb);
-		return;
-	}
-
-	inst = vb2_get_drv_priv(vb->vb2_queue);
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
-			__func__);
-		return;
-	}
-
-	q = msm_comm_get_vb2q(inst, vb->type);
-	if (!q) {
-		dprintk(VIDC_ERR,
-			"%s : Failed to find buffer queue for type = %d\n",
-				__func__, vb->type);
-		return;
-	}
-
-	if (q->vb2_bufq.streaming) {
-		dprintk(VIDC_DBG, "%d PORT is streaming\n",
-			vb->type);
-		return;
-	}
-
-	rc = msm_vidc_release_buffers(inst, vb->type);
-	if (rc)
-		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
-			__func__, rc);
-}
-
-static const struct vb2_ops msm_venc_vb2q_ops = {
-	.queue_setup = msm_venc_queue_setup,
-	.start_streaming = msm_venc_start_streaming,
-	.buf_queue = msm_venc_buf_queue,
-	.buf_cleanup = msm_venc_buf_cleanup,
-	.stop_streaming = msm_venc_stop_streaming,
-};
-
-const struct vb2_ops *msm_venc_get_vb2q_ops(void)
-{
-	return &msm_venc_vb2q_ops;
-}
-
 static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
 		struct v4l2_ctrl **cluster, int ncontrols)
 {
@@ -1823,6 +1447,7 @@
 		bitrate.bit_rate = ctrl->val;
 		bitrate.layer_id = 0;
 		pdata = &bitrate;
+		inst->bitrate = ctrl->val;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -2129,11 +1754,63 @@
 		dprintk(VIDC_INFO, "Setting secure mode to: %d\n",
 				!!(inst->flags & VIDC_SECURE));
 		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA: {
+		struct hal_buffer_requirements *buff_req_buffer = NULL;
+		int extra_idx = 0;
+
 		property_id = HAL_PARAM_INDEX_EXTRADATA;
 		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
 		extra.enable = 1;
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+		case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+		case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+		case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+		case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+			inst->bufq[OUTPUT_PORT].num_planes = 2;
+			break;
+		case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+			inst->bufq[CAPTURE_PORT].num_planes = 2;
+			break;
+		default:
+			rc = -ENOTSUPP;
+			break;
+		}
+
 		pdata = &extra;
+		rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session, property_id, pdata);
+
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+			break;
+		}
+
+		buff_req_buffer = get_buff_req_buffer(inst,
+			HAL_BUFFER_EXTRADATA_INPUT);
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+
+		inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
+			buff_req_buffer ?
+			buff_req_buffer->buffer_size : 0;
+
+		buff_req_buffer = get_buff_req_buffer(inst,
+			HAL_BUFFER_EXTRADATA_OUTPUT);
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
+		inst->bufq[CAPTURE_PORT].plane_sizes[extra_idx] =
+			buff_req_buffer ?
+			buff_req_buffer->buffer_size : 0;
+
+		property_id = 0;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER:
 		property_id = HAL_PARAM_VENC_GENERATE_AUDNAL;
@@ -2694,6 +2371,9 @@
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->prop.fps = DEFAULT_FPS;
 	inst->capability.pixelprocess_capabilities = 0;
+	/* To start with, both ports are 1 plane each */
+	inst->bufq[OUTPUT_PORT].num_planes = 1;
+	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->operating_rate = 0;
 
 	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
@@ -2763,8 +2443,9 @@
 {
 	struct msm_vidc_format *fmt = NULL;
 	int rc = 0;
-	int i;
 	struct hfi_device *hdev;
+	int extra_idx = 0, i = 0;
+	struct hal_buffer_requirements *buff_req_buffer;
 
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
@@ -2779,6 +2460,7 @@
 	hdev = inst->core->device;
 
 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
 		fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
 			ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
 			CAPTURE_PORT);
@@ -2793,9 +2475,6 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
-		msm_venc_update_plane_count(inst, CAPTURE_PORT);
-		fmt->num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -2804,11 +2483,45 @@
 
 		inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
 		inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
-		rc = msm_vidc_check_session_supported(inst);
+
+		rc = msm_comm_try_get_bufreqs(inst);
 		if (rc) {
 			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto exit;
+				"Failed to get buffer requirements: %d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Get CAPTURE plane size from HW. This may change based on
+		 * settings like Slice delivery mode. HW should decide howmuch
+		 * it needs.
+		 */
+
+		buff_req_buffer = get_buff_req_buffer(inst,
+			HAL_BUFFER_OUTPUT);
+
+		f->fmt.pix_mp.plane_fmt[0].sizeimage = buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+
+		/*
+		 * Get CAPTURE plane Extradata size from HW. This may change
+		 * with no of Extradata's enabled. HW should decide howmuch
+		 * it needs.
+		 */
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[fmt->type].num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_OUTPUT);
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+		}
+
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
+			inst->bufq[fmt->type].plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		struct hal_frame_size frame_sz;
@@ -2816,13 +2529,6 @@
 		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
 		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
 
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto exit;
-		}
-
 		frame_sz.buffer_type = HAL_BUFFER_INPUT;
 		frame_sz.width = inst->prop.width[OUTPUT_PORT];
 		frame_sz.height = inst->prop.height[OUTPUT_PORT];
@@ -2849,8 +2555,38 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
-		msm_venc_update_plane_count(inst, OUTPUT_PORT);
-		fmt->num_planes = inst->fmts[OUTPUT_PORT].num_planes;
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			inst->fmts[fmt->type].get_frame_size(0,
+			f->fmt.pix_mp.height, f->fmt.pix_mp.width);
+
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Get OUTPUT plane Extradata size from HW. This may change
+		 * with no of Extradata's enabled. HW should decide howmuch
+		 * it needs.
+		 */
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[fmt->type].num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_INPUT);
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+		}
+
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
+			inst->bufq[fmt->type].plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
 
 		msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc);
 	} else {
@@ -2859,137 +2595,10 @@
 		rc = -EINVAL;
 		goto exit;
 	}
-
-	f->fmt.pix_mp.num_planes = fmt->num_planes;
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		struct hal_frame_size frame_sz = {0};
-		struct hal_buffer_requirements *bufreq = NULL;
-
-		frame_sz.width = inst->prop.width[CAPTURE_PORT];
-		frame_sz.height = inst->prop.height[CAPTURE_PORT];
-		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
-		rc = call_hfi_op(hdev, session_set_property, (void *)
-				inst->session, HAL_PARAM_FRAME_SIZE,
-				&frame_sz);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"Failed to set OUTPUT framesize\n");
-			goto exit;
-		}
-		rc = msm_comm_try_get_bufreqs(inst);
-		if (rc) {
-			dprintk(VIDC_WARN,
-				"%s : Getting buffer reqs failed: %d\n",
-					__func__, rc);
-			goto exit;
-		}
-		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-		f->fmt.pix_mp.plane_fmt[0].sizeimage =
-			bufreq ? bufreq->buffer_size : 0;
-	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		struct hal_buffer_requirements *bufreq = NULL;
-		int extra_idx = 0;
-
-		for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
-			f->fmt.pix_mp.plane_fmt[i].sizeimage =
-				inst->fmts[fmt->type].get_frame_size(i,
-				f->fmt.pix_mp.height, f->fmt.pix_mp.width);
-		}
-		extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-			bufreq = get_buff_req_buffer(inst,
-					HAL_BUFFER_EXTRADATA_INPUT);
-			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-				bufreq ? bufreq->buffer_size : 0;
-		}
-	}
 exit:
 	return rc;
 }
 
-int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
-{
-	const struct msm_vidc_format *fmt = NULL;
-	int rc = 0;
-	int i;
-	u32 height, width, num_planes;
-	unsigned int extra_idx = 0;
-	struct hal_buffer_requirements *bufreq = NULL;
-
-	if (!inst || !f) {
-		dprintk(VIDC_ERR,
-			"Invalid input, inst = %pK, format = %pK\n", inst, f);
-		return -EINVAL;
-	}
-
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_WARN, "Getting buffer requirements failed: %d\n",
-				rc);
-		return rc;
-	}
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		fmt = &inst->fmts[CAPTURE_PORT];
-		height = inst->prop.height[CAPTURE_PORT];
-		width = inst->prop.width[CAPTURE_PORT];
-		msm_venc_update_plane_count(inst, CAPTURE_PORT);
-		num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		fmt = &inst->fmts[OUTPUT_PORT];
-		height = inst->prop.height[OUTPUT_PORT];
-		width = inst->prop.width[OUTPUT_PORT];
-		msm_venc_update_plane_count(inst, OUTPUT_PORT);
-		num_planes = inst->fmts[OUTPUT_PORT].num_planes;
-	} else {
-		dprintk(VIDC_ERR, "Invalid type: %x\n", f->type);
-		return -ENOTSUPP;
-	}
-
-	f->fmt.pix_mp.pixelformat = fmt->fourcc;
-	f->fmt.pix_mp.height = height;
-	f->fmt.pix_mp.width = width;
-	f->fmt.pix_mp.num_planes = num_planes;
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		for (i = 0; i < num_planes; ++i) {
-			f->fmt.pix_mp.plane_fmt[i].sizeimage =
-				fmt->get_frame_size(i, height, width);
-		}
-	} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		bufreq = get_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT);
-
-		f->fmt.pix_mp.plane_fmt[0].sizeimage =
-			bufreq ? bufreq->buffer_size : 0;
-	}
-	extra_idx = EXTRADATA_IDX(num_planes);
-	if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-		if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-			bufreq = get_buff_req_buffer(inst,
-						HAL_BUFFER_EXTRADATA_OUTPUT);
-		else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-			bufreq = get_buff_req_buffer(inst,
-						HAL_BUFFER_EXTRADATA_INPUT);
-
-		f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-			bufreq ? bufreq->buffer_size : 0;
-	}
-
-	for (i = 0; i < num_planes; ++i) {
-		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-			inst->bufq[OUTPUT_PORT].plane_sizes[i] =
-				f->fmt.pix_mp.plane_fmt[i].sizeimage;
-		} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-			inst->bufq[CAPTURE_PORT].plane_sizes[i] =
-				f->fmt.pix_mp.plane_fmt[i].sizeimage;
-		}
-	}
-
-	return rc;
-}
-
 int msm_venc_ctrl_init(struct msm_vidc_inst *inst,
 	const struct v4l2_ctrl_ops *ctrl_ops)
 {
diff --git a/drivers/media/platform/msm/vidc/msm_venc.h b/drivers/media/platform/msm/vidc/msm_venc.h
index 0bb7de77a..6fe1db3 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.h
+++ b/drivers/media/platform/msm/vidc/msm_venc.h
@@ -22,9 +22,7 @@
 	const struct v4l2_ctrl_ops *ctrl_ops);
 int msm_venc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
 int msm_venc_s_fmt(void *instance, struct v4l2_format *f);
-int msm_venc_g_fmt(void *instance, struct v4l2_format *f);
 int msm_venc_s_ctrl(void *instance, struct v4l2_ctrl *ctrl);
 int msm_venc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-struct vb2_ops *msm_venc_get_vb2q_ops(void);
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index e93b771..2d803bb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -187,15 +187,55 @@
 int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
 {
 	struct msm_vidc_inst *inst = instance;
+	int i, rc = 0, color_format = 0;
+	enum vidc_ports port;
+	u32 num_planes;
 
-	if (!inst || !f)
+	if (!inst || !f) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, format = %pK\n", inst, f);
 		return -EINVAL;
+	}
+	if (inst->in_reconfig) {
+		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
+		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
+	}
 
-	if (inst->session_type == MSM_VIDC_DECODER)
-		return msm_vdec_g_fmt(instance, f);
-	else if (inst->session_type == MSM_VIDC_ENCODER)
-		return msm_venc_g_fmt(instance, f);
-	return -EINVAL;
+	port = f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+		OUTPUT_PORT : CAPTURE_PORT;
+
+	f->fmt.pix_mp.pixelformat = inst->fmts[port].fourcc;
+	f->fmt.pix_mp.height = inst->prop.height[port];
+	f->fmt.pix_mp.width = inst->prop.width[port];
+	num_planes = f->fmt.pix_mp.num_planes = inst->bufq[port].num_planes;
+	for (i = 0; i < num_planes; ++i)
+		f->fmt.pix_mp.plane_fmt[i].sizeimage =
+			inst->bufq[port].plane_sizes[i];
+	switch (inst->fmts[port].fourcc) {
+	case V4L2_PIX_FMT_NV12:
+		color_format = COLOR_FMT_NV12;
+		break;
+	case V4L2_PIX_FMT_NV12_UBWC:
+		color_format = COLOR_FMT_NV12_UBWC;
+		break;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+		color_format = COLOR_FMT_NV12_BPP10_UBWC;
+		break;
+	default:
+		dprintk(VIDC_DBG,
+			"Invalid : g_fmt called on %s port with Invalid fourcc 0x%x\n",
+			port == OUTPUT_PORT ? "OUTPUT" : "CAPTURE",
+			inst->fmts[port].fourcc);
+		goto exit;
+	}
+
+	f->fmt.pix_mp.plane_fmt[0].bytesperline = VENUS_Y_STRIDE(color_format,
+			inst->prop.width[port]);
+	f->fmt.pix_mp.plane_fmt[0].reserved[0] = VENUS_Y_SCANLINES(color_format,
+			inst->prop.height[port]);
+
+exit:
+	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_g_fmt);
 
@@ -457,11 +497,12 @@
 		return -EINVAL;
 }
 
-static inline bool is_dynamic_output_buffer_mode(struct v4l2_buffer *b,
+static inline bool is_dynamic_buffer_mode(struct v4l2_buffer *b,
 				struct msm_vidc_inst *inst)
 {
-	return b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-		inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC;
+	enum vidc_ports port = b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+		OUTPUT_PORT : CAPTURE_PORT;
+	return inst->buffer_mode_set[port] == HAL_BUFFER_MODE_DYNAMIC;
 }
 
 
@@ -518,7 +559,7 @@
 		}
 		mutex_lock(&inst->registeredbufs.lock);
 		temp = get_registered_buf(inst, b, i, &plane);
-		if (temp && !is_dynamic_output_buffer_mode(b, inst)) {
+		if (temp && !is_dynamic_buffer_mode(b, inst)) {
 			dprintk(VIDC_DBG,
 				"This memory region has already been prepared\n");
 			rc = 0;
@@ -526,7 +567,7 @@
 			goto exit;
 		}
 
-		if (temp && is_dynamic_output_buffer_mode(b, inst) && !i) {
+		if (temp && is_dynamic_buffer_mode(b, inst) && !i) {
 			/*
 			 * Buffer is already present in registered list
 			 * increment ref_count, populate new values of v4l2
@@ -559,7 +600,7 @@
 		if (rc == 1) {
 			rc = 0;
 			goto exit;
-		} else if (rc == 2) {
+		} else if (rc >= 2) {
 			rc = -EEXIST;
 			goto exit;
 		}
@@ -589,7 +630,7 @@
 		}
 
 		/* We maintain one ref count for all planes*/
-		if (!i && is_dynamic_output_buffer_mode(b, inst)) {
+		if (!i && is_dynamic_buffer_mode(b, inst)) {
 			rc = buf_ref_get(inst, binfo);
 			if (rc < 0)
 				goto exit;
@@ -769,7 +810,7 @@
 								MAX_PORT_NUM;
 
 	return port != MAX_PORT_NUM &&
-		inst->fmts[port].num_planes == b->length;
+		inst->bufq[port].num_planes == b->length;
 }
 
 int msm_vidc_release_buffers(void *instance, int buffer_type)
@@ -834,7 +875,7 @@
 	rc = map_and_register_buf(inst, b);
 	if (rc == -EEXIST) {
 		if (atomic_read(&inst->in_flush) &&
-			is_dynamic_output_buffer_mode(b, inst)) {
+			is_dynamic_buffer_mode(b, inst)) {
 			dprintk(VIDC_ERR,
 				"Flush in progress, do not hold any buffers in driver\n");
 			msm_comm_flush_dynamic_buffers(inst);
@@ -958,7 +999,7 @@
 		return rc;
 
 
-	if (is_dynamic_output_buffer_mode(b, inst)) {
+	if (is_dynamic_buffer_mode(b, inst)) {
 		buffer_info->dequeued = true;
 
 		dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
@@ -1062,6 +1103,435 @@
 	.put_userptr = vidc_put_userptr,
 };
 
+
+static void msm_vidc_cleanup_buffer(struct vb2_buffer *vb)
+{
+	int rc = 0;
+	struct buf_queue *q = NULL;
+	struct msm_vidc_inst *inst = NULL;
+
+	if (!vb) {
+		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
+			__func__, vb);
+		return;
+	}
+
+	inst = vb2_get_drv_priv(vb->vb2_queue);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
+			__func__);
+		return;
+	}
+
+	q = msm_comm_get_vb2q(inst, vb->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"%s : Failed to find buffer queue for type = %d\n",
+			__func__, vb->type);
+		return;
+	}
+
+	if (q->vb2_bufq.streaming) {
+		dprintk(VIDC_DBG, "%d PORT is streaming\n",
+			vb->type);
+		return;
+	}
+
+	rc = msm_vidc_release_buffers(inst,
+		vb->type);
+	if (rc)
+		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
+			__func__, rc);
+}
+
+static int set_buffer_count(struct msm_vidc_inst *inst,
+	int host_count, int act_count, enum hal_buffer type)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_count_actual buf_count;
+
+	hdev = inst->core->device;
+
+	buf_count.buffer_type = type;
+	buf_count.buffer_count_actual = act_count;
+	buf_count.buffer_count_min_host = host_count;
+	rc = call_hfi_op(hdev, session_set_property,
+		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to set actual buffer count %d for buffer type %d\n",
+			act_count, type);
+	return rc;
+}
+
+static int msm_vidc_queue_setup(struct vb2_queue *q,
+	unsigned int *num_buffers, unsigned int *num_planes,
+	unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct msm_vidc_inst *inst;
+	int i, rc = 0;
+	struct hal_buffer_requirements *bufreq;
+	enum hal_buffer buffer_type;
+
+	if (!q || !num_buffers || !num_planes
+		|| !sizes || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK, %pK, %pK\n",
+			q, num_buffers, num_planes);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: {
+		bufreq = get_buff_req_buffer(inst,
+			HAL_BUFFER_INPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+				HAL_BUFFER_INPUT);
+			return -EINVAL;
+		}
+		if (*num_buffers < bufreq->buffer_count_actual) {
+			dprintk(VIDC_ERR,
+				"Invalid parameters : Req = %d Act = %d\n",
+				*num_buffers, bufreq->buffer_count_actual);
+			return -EINVAL;
+		}
+		*num_planes = inst->bufq[OUTPUT_PORT].num_planes;
+		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
+			*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
+			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+		for (i = 0; i < *num_planes; i++)
+			sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i];
+
+		bufreq->buffer_count_actual = *num_buffers;
+		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+			*num_buffers, HAL_BUFFER_INPUT);
+		}
+
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
+		buffer_type = msm_comm_get_hal_output_buffer(inst);
+		bufreq = get_buff_req_buffer(inst,
+			buffer_type);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+				buffer_type);
+			return -EINVAL;
+		}
+		if (*num_buffers < bufreq->buffer_count_actual) {
+			dprintk(VIDC_ERR,
+				"Invalid parameters : Req = %d Act = %d\n",
+				*num_buffers, bufreq->buffer_count_actual);
+			return -EINVAL;
+		}
+		*num_planes = inst->bufq[CAPTURE_PORT].num_planes;
+		if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
+			*num_buffers > MAX_NUM_CAPTURE_BUFFERS)
+			*num_buffers = MIN_NUM_CAPTURE_BUFFERS;
+
+		for (i = 0; i < *num_planes; i++)
+			sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i];
+
+		bufreq->buffer_count_actual = *num_buffers;
+		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+			*num_buffers, buffer_type);
+		}
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static inline int msm_vidc_decide_core_and_power_mode(
+	struct msm_vidc_inst *inst)
+{
+	dprintk(VIDC_DBG,
+		"Core selection is not yet implemented for inst = %pK\n",
+			inst);
+	return 0;
+}
+static inline int msm_vidc_verify_buffer_counts(struct msm_vidc_inst *inst)
+{
+	int rc = 0, i = 0;
+
+	for (i = 0; i < HAL_BUFFER_MAX; i++) {
+		struct hal_buffer_requirements *req = &inst->buff_req.buffer[i];
+
+		dprintk(VIDC_DBG, "Verifying Buffer : %d\n", req->buffer_type);
+		if (!req ||
+			req->buffer_count_actual < req->buffer_count_min_host ||
+			req->buffer_count_min_host < req->buffer_count_min) {
+			dprintk(VIDC_ERR, "Invalid data : Counts mismatch\n");
+			dprintk(VIDC_ERR,
+				"Min Count = %d ", req->buffer_count_min);
+			dprintk(VIDC_ERR,
+				"Min Host Count = %d ",
+					req->buffer_count_min_host);
+			dprintk(VIDC_ERR,
+				"Min Actual Count = %d\n",
+					req->buffer_count_actual);
+			rc = -EINVAL;
+			break;
+		}
+	}
+	return rc;
+}
+
+static inline int start_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_size_minimum b;
+	struct vb2_buf_entry *temp, *next;
+
+	hdev = inst->core->device;
+
+	/* Check if current session is under HW capability */
+	rc = msm_vidc_check_session_supported(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"This session is not supported %pK\n", inst);
+		goto fail_start;
+	}
+
+	/* Assign Core and LP mode for current session */
+	rc = msm_vidc_decide_core_and_power_mode(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"This session can't be submitted to HW%pK\n", inst);
+		goto fail_start;
+	}
+
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		b.buffer_type = HAL_BUFFER_OUTPUT2;
+	} else {
+		b.buffer_type = HAL_BUFFER_OUTPUT;
+	}
+
+	b.buffer_size = inst->bufq[CAPTURE_PORT].plane_sizes[0];
+	rc = call_hfi_op(hdev, session_set_property,
+			inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
+			&b);
+
+	rc = msm_comm_try_get_bufreqs(inst);
+
+	/* Check if current session is under HW capability */
+	rc = msm_vidc_verify_buffer_counts(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"This session has mis-match buffer counts%pK\n", inst);
+		goto fail_start;
+	}
+
+	rc = msm_comm_set_scratch_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to set scratch buffers: %d\n", rc);
+		goto fail_start;
+	}
+	rc = msm_comm_set_persist_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to set persist buffers: %d\n", rc);
+		goto fail_start;
+	}
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_comm_set_output_buffers(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set output buffers: %d\n", rc);
+			goto fail_start;
+		}
+	}
+
+	/*
+	 * For seq_changed_insufficient, driver should set session_continue
+	 * to firmware after the following sequence
+	 * - driver raises insufficient event to v4l2 client
+	 * - all output buffers have been flushed and freed
+	 * - v4l2 client queries buffer requirements and splits/combines OPB-DPB
+	 * - v4l2 client sets new set of buffers to firmware
+	 * - v4l2 client issues CONTINUE to firmware to resume decoding of
+	 *   submitted ETBs.
+	 */
+	if (inst->in_reconfig) {
+		dprintk(VIDC_DBG, "send session_continue after reconfig\n");
+		rc = call_hfi_op(hdev, session_continue,
+				(void *) inst->session);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s - failed to send session_continue\n",
+				__func__);
+			goto fail_start;
+		}
+	}
+	inst->in_reconfig = false;
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto fail_start;
+	}
+	msm_dcvs_init(inst);
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_comm_queue_output_buffers(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to queue output buffers: %d\n", rc);
+			goto fail_start;
+		}
+	}
+
+fail_start:
+	if (rc) {
+		mutex_lock(&inst->pendingq.lock);
+		list_for_each_entry_safe(temp, next, &inst->pendingq.list,
+				list) {
+			vb2_buffer_done(temp->vb,
+					VB2_BUF_STATE_QUEUED);
+			list_del(&temp->list);
+			kfree(temp);
+		}
+		mutex_unlock(&inst->pendingq.lock);
+	}
+	return rc;
+}
+
+
+static int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
+		q->type, inst);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
+		rc = -EINVAL;
+		goto stream_start_failed;
+	}
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Streamon failed on: %d capability for inst: %pK\n",
+			q->type, inst);
+		goto stream_start_failed;
+	}
+
+	rc = msm_comm_qbuf(inst, NULL);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
+				rc);
+		goto stream_start_failed;
+	}
+
+stream_start_failed:
+	return rc;
+}
+
+static inline int stop_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to state %d\n",
+				inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	return rc;
+}
+
+static void msm_vidc_stop_streaming(struct vb2_queue *q)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return;
+	}
+
+	inst = q->drv_priv;
+	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (!inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = stop_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (!inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = stop_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR,
+			"Q-type is not supported: %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed STOP Streaming inst = %pK on cap = %d\n",
+			inst, q->type);
+}
+
+static void msm_vidc_buf_queue(struct vb2_buffer *vb)
+{
+	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
+
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
+}
+
+static const struct vb2_ops msm_vidc_vb2q_ops = {
+	.queue_setup = msm_vidc_queue_setup,
+	.start_streaming = msm_vidc_start_streaming,
+	.buf_queue = msm_vidc_buf_queue,
+	.buf_cleanup = msm_vidc_cleanup_buffer,
+	.stop_streaming = msm_vidc_stop_streaming,
+};
+
 static inline int vb2_bufq_init(struct msm_vidc_inst *inst,
 		enum v4l2_buf_type type, enum session_type sess)
 {
@@ -1079,11 +1549,8 @@
 	q->type = type;
 	q->io_modes = VB2_MMAP | VB2_USERPTR;
 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	q->ops = &msm_vidc_vb2q_ops;
 
-	if (sess == MSM_VIDC_DECODER)
-		q->ops = msm_vdec_get_vb2q_ops();
-	else if (sess == MSM_VIDC_ENCODER)
-		q->ops = msm_venc_get_vb2q_ops();
 	q->mem_ops = &msm_vidc_vb2_mem_ops;
 	q->drv_priv = inst;
 	q->allow_zero_bytesused = 1;
@@ -1214,9 +1681,152 @@
 	return rc;
 }
 
+static int set_actual_buffer_count(struct msm_vidc_inst *inst,
+	int count, enum hal_buffer type)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_count_actual buf_count;
+
+	hdev = inst->core->device;
+
+	buf_count.buffer_type = type;
+	buf_count.buffer_count_min_host = count;
+	buf_count.buffer_count_actual = count;
+	rc = call_hfi_op(hdev, session_set_property,
+		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL,
+		&buf_count);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to set actual count %d for buffer type %d\n",
+			count, type);
+	return rc;
+}
+
+
+static int msm_vdec_get_count(struct msm_vidc_inst *inst,
+	struct v4l2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hal_buffer_requirements *bufreq, *newreq;
+	enum hal_buffer buffer_type;
+
+	if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT) {
+		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed to find bufreqs for buffer type = %d\n",
+					HAL_BUFFER_INPUT);
+			return 0;
+		}
+		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming) {
+			ctrl->val = bufreq->buffer_count_min_host;
+			return 0;
+		}
+		if (ctrl->val > bufreq->buffer_count_min_host) {
+			dprintk(VIDC_DBG,
+				"Interesting : Usually shouldn't happen\n");
+			bufreq->buffer_count_min_host = ctrl->val;
+		}
+		rc = set_actual_buffer_count(inst, ctrl->val,
+			HAL_BUFFER_INPUT);
+		return rc;
+
+	} else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) {
+		int count = 0;
+
+		buffer_type = msm_comm_get_hal_output_buffer(inst);
+		bufreq = get_buff_req_buffer(inst,
+			buffer_type);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed to find bufreqs for buffer type = %d\n",
+					buffer_type);
+			return 0;
+		}
+		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming) {
+			if (ctrl->val != bufreq->buffer_count_min_host)
+				return -EINVAL;
+			else
+				return 0;
+		}
+		count = bufreq->buffer_count_min_host;
+
+		if (inst->in_reconfig) {
+			rc = msm_comm_try_get_bufreqs(inst);
+			newreq = get_buff_req_buffer(inst,
+				buffer_type);
+			if (!newreq) {
+				dprintk(VIDC_ERR,
+					"Failed to find new bufreqs = %d\n",
+					buffer_type);
+				return 0;
+			}
+			newreq->buffer_count_min_host = count =
+				newreq->buffer_count_min +
+				msm_dcvs_get_extra_buff_count(inst);
+		}
+		if (!inst->in_reconfig &&
+			inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
+			dprintk(VIDC_DBG, "Clients will correct this\n");
+			rc = set_actual_buffer_count(inst, ctrl->val,
+				buffer_type);
+			bufreq->buffer_count_min_host = ctrl->val;
+			return 0;
+		}
+		bufreq->buffer_count_min_host = ctrl->val = count;
+		rc = set_actual_buffer_count(inst, ctrl->val,
+			buffer_type);
+
+		return rc;
+	}
+	return -EINVAL;
+}
+
 static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
-	return 0;
+	int rc = 0;
+
+	/*
+	 * HACK: unlock the control prior to querying the hardware.  Otherwise
+	 * lower level code that attempts to do g_ctrl() will end up deadlocking
+	 * us.
+	 */
+	v4l2_ctrl_unlock(ctrl);
+
+	switch (ctrl->id) {
+
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+		ctrl->val = inst->profile;
+	break;
+
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+		ctrl->val = inst->level;
+	break;
+
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		ctrl->val = inst->entropy_mode;
+	break;
+
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+		rc = msm_vdec_get_count(inst, ctrl);
+		break;
+	default:
+		/*
+		 * Other controls aren't really volatile, shouldn't need to
+		 * modify ctrl->value
+		 */
+		break;
+	}
+	v4l2_ctrl_lock(ctrl);
+
+	return rc;
 }
 
 static int msm_vidc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -1301,6 +1911,7 @@
 
 	INIT_MSM_VIDC_LIST(&inst->pendingq);
 	INIT_MSM_VIDC_LIST(&inst->scratchbufs);
+	INIT_MSM_VIDC_LIST(&inst->freqs);
 	INIT_MSM_VIDC_LIST(&inst->persistbufs);
 	INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	INIT_MSM_VIDC_LIST(&inst->outputbufs);
@@ -1311,8 +1922,9 @@
 	inst->session_type = session_type;
 	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
 	inst->core = core;
+	inst->freq = 0;
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
-	inst->instant_bitrate = 0;
+	inst->bitrate = 0;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
 	inst->colour_space = MSM_VIDC_BT601_6_525;
 	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -1341,7 +1953,6 @@
 	if (rc)
 		goto fail_bufq_capture;
 
-	msm_dcvs_init(inst);
 	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
 			session_type);
 	if (rc) {
@@ -1427,6 +2038,8 @@
 		}
 		mutex_unlock(&inst->pendingq.lock);
 
+		msm_comm_free_freq_table(inst);
+
 		if (msm_comm_release_scratch_buffers(inst, false)) {
 			dprintk(VIDC_ERR,
 				"Failed to release scratch buffers\n");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index d891644..70427d3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,32 +16,250 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
-#define IS_VALID_DCVS_SESSION(__cur_mbpf, __min_mbpf) \
-		((__cur_mbpf) >= (__min_mbpf))
-
-static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst);
-static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst);
-static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd);
-
-int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+int msm_comm_vote_bus(struct msm_vidc_core *core)
 {
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+	int rc = 0, vote_data_count = 0, i = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_inst *inst = NULL;
+	struct vidc_bus_vote_data *vote_data = NULL;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
-	inst->dcvs_mode = msm_dcvs_check_supported(inst);
-	return 0;
+
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
+				__func__, hdev);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list)
+		++vote_data_count;
+
+	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
+			GFP_TEMPORARY);
+	if (!vote_data) {
+		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	list_for_each_entry(inst, &core->instances, list) {
+		int codec = 0, yuv = 0;
+
+		codec = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[OUTPUT_PORT].fourcc :
+			inst->fmts[CAPTURE_PORT].fourcc;
+
+		yuv = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[CAPTURE_PORT].fourcc :
+			inst->fmts[OUTPUT_PORT].fourcc;
+
+		vote_data[i].domain = get_hal_domain(inst->session_type);
+		vote_data[i].codec = get_hal_codec(codec);
+		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
+				inst->prop.width[OUTPUT_PORT]);
+		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
+				inst->prop.height[OUTPUT_PORT]);
+
+		if (inst->operating_rate)
+			vote_data[i].fps = (inst->operating_rate >> 16) ?
+				inst->operating_rate >> 16 : 1;
+		else
+			vote_data[i].fps = inst->prop.fps;
+
+		/*
+		 * TODO: support for OBP-DBP split mode hasn't been yet
+		 * implemented, once it is, this part of code needs to be
+		 * revisited since passing in accurate information to the bus
+		 * governor will drastically reduce bandwidth
+		 */
+		//vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
+		vote_data[i].num_formats = 1;
+		i++;
+	}
+	mutex_unlock(&core->lock);
+
+	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
+			vote_data_count);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
+
+	kfree(vote_data);
+	return rc;
+
+fail_alloc:
+	mutex_unlock(&core->lock);
+	return rc;
 }
 
+static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
+{
+	int fw_out_qsize = 0, buffers_in_driver = 0;
+
+	/*
+	 * DCVS always operates on Uncompressed buffers.
+	 * For Decoders, FTB and Encoders, ETB.
+	 */
+
+	if (inst->state >= MSM_VIDC_OPEN_DONE &&
+			inst->state < MSM_VIDC_STOP_DONE) {
+		if (inst->session_type == MSM_VIDC_DECODER)
+			fw_out_qsize = inst->count.ftb - inst->count.fbd;
+		else
+			fw_out_qsize = inst->count.etb - inst->count.ebd;
+
+		buffers_in_driver = inst->buffers_held_in_driver;
+	}
+
+	return fw_out_qsize + buffers_in_driver;
+}
+
+static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	int fw_pending_bufs = 0;
+	int total_output_buf = 0;
+	int buffers_outside_fw = 0;
+	struct msm_vidc_core *core;
+	struct hal_buffer_requirements *output_buf_req;
+	struct dcvs_stats *dcvs;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (!inst->dcvs_mode) {
+		dprintk(VIDC_DBG, "DCVS is not enabled\n");
+		return 0;
+	}
+
+	dcvs = &inst->dcvs;
+
+	core = inst->core;
+	mutex_lock(&inst->lock);
+	fw_pending_bufs = get_pending_bufs_fw(inst);
+
+	output_buf_req = get_buff_req_buffer(inst,
+			dcvs->buffer_type);
+	mutex_unlock(&inst->lock);
+	if (!output_buf_req) {
+		dprintk(VIDC_ERR,
+				"%s: No buffer requirement for buffer type %x\n",
+				__func__, dcvs->buffer_type);
+		return -EINVAL;
+	}
+
+	/* Total number of output buffers */
+	total_output_buf = output_buf_req->buffer_count_actual;
+
+	/* Buffers outside FW are with display */
+	buffers_outside_fw = total_output_buf - fw_pending_bufs;
+	dprintk(VIDC_DBG,
+		"Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
+		total_output_buf, fw_pending_bufs, buffers_outside_fw);
+
+	if (buffers_outside_fw >=  dcvs->min_threshold &&
+			dcvs->load > dcvs->load_low) {
+		dcvs->load = dcvs->load_low;
+	} else if (buffers_outside_fw < dcvs->min_threshold &&
+			dcvs->load == dcvs->load_low) {
+		dcvs->load = dcvs->load_high;
+	}
+	return rc;
+}
+
+static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
+	unsigned long freq, ion_phys_addr_t device_addr)
+{
+	struct vidc_freq_data *temp, *next;
+	bool found = false;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		if (temp->device_addr == device_addr) {
+			temp->freq = freq;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+		temp->freq = freq;
+		temp->device_addr = device_addr;
+		list_add_tail(&temp->list, &inst->freqs.list);
+	}
+	mutex_unlock(&inst->freqs.lock);
+}
+
+// TODO this needs to be removed later and use queued_list
+
+void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
+	ion_phys_addr_t device_addr)
+{
+	struct vidc_freq_data *temp, *next;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		if (temp->device_addr == device_addr)
+			temp->freq = 0;
+	}
+	mutex_unlock(&inst->freqs.lock);
+
+	inst->dcvs.buffer_counter++;
+}
+
+
+static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst)
+{
+	struct vidc_freq_data *temp;
+	unsigned long freq = 0;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry(temp, &inst->freqs.list, list) {
+		freq = max(freq, temp->freq);
+	}
+	mutex_unlock(&inst->freqs.lock);
+
+	/* If current requirement is within DCVS limits, try DCVS. */
+
+	if (freq < inst->dcvs.load_high) {
+		dprintk(VIDC_DBG, "Calling DCVS now\n");
+		// TODO calling DCVS here may reduce the residency. Re-visit.
+		msm_dcvs_scale_clocks(inst);
+		freq = inst->dcvs.load;
+	}
+
+	return freq;
+}
+
+void msm_comm_free_freq_table(struct msm_vidc_inst *inst)
+{
+	struct vidc_freq_data *temp, *next;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		list_del(&temp->list);
+		kfree(temp);
+	}
+	INIT_LIST_HEAD(&inst->freqs.list);
+	mutex_unlock(&inst->freqs.lock);
+}
+
+
 static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
 {
 	int height, width;
 
 	if (!inst->in_reconfig) {
 		height = max(inst->prop.height[CAPTURE_PORT],
-				inst->prop.height[OUTPUT_PORT]);
+			inst->prop.height[OUTPUT_PORT]);
 		width = max(inst->prop.width[CAPTURE_PORT],
-				inst->prop.width[OUTPUT_PORT]);
+			inst->prop.width[OUTPUT_PORT]);
 	} else {
 		height = inst->reconfig_height;
 		width = inst->reconfig_width;
@@ -50,31 +268,174 @@
 	return NUM_MBS_PER_FRAME(height, width);
 }
 
-static inline int msm_dcvs_count_active_instances(struct msm_vidc_core *core,
-	enum session_type session_type)
+static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
+	u32 filled_len)
 {
-	int active_instances = 0;
-	struct msm_vidc_inst *temp = NULL;
+	unsigned long freq = 0;
+	unsigned long vpp_cycles = 0, vsp_cycles = 0;
+	u32 vpp_cycles_per_mb;
+	u32 mbs_per_frame;
 
-	if (!core) {
-		dprintk(VIDC_ERR, "%s: Invalid args: %pK\n", __func__, core);
+	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+
+	/*
+	 * Calculate vpp, vsp cycles separately for encoder and decoder.
+	 * Even though, most part is common now, in future it may change
+	 * between them.
+	 */
+
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ?
+			inst->entry->low_power_cycles :
+			inst->entry->vpp_cycles;
+
+		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+
+		/* 10 / 7 is overhead factor */
+		vsp_cycles += (inst->bitrate * 10) / 7;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		vpp_cycles = mbs_per_frame * inst->entry->vpp_cycles;
+
+		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		/* 10 / 7 is overhead factor */
+		vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
+
+	} else {
+		// TODO return Min or Max ?
+		dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__);
+		return freq;
+	}
+
+	freq = max(vpp_cycles, vsp_cycles);
+
+	return freq;
+}
+
+static int msm_vidc_set_clocks(struct msm_vidc_core *core)
+{
+	struct hfi_device *hdev;
+	unsigned long freq = 0, rate = 0;
+	struct msm_vidc_inst *temp = NULL;
+	int rc = 0, i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+
+	hdev = core->device;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	if (!hdev || !allowed_clks_tbl) {
+		dprintk(VIDC_ERR,
+			"%s Invalid parameters\n", __func__);
 		return -EINVAL;
 	}
 
-	/* DCVS condition is as following
-	 * Decoder DCVS : Only for ONE decoder session.
-	 * Encoder DCVS : Only for ONE encoder session + ONE decoder session
-	 */
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->state >= MSM_VIDC_OPEN_DONE &&
-			temp->state < MSM_VIDC_STOP_DONE &&
-			(temp->session_type == session_type ||
-			 temp->session_type == MSM_VIDC_ENCODER))
-			active_instances++;
+		freq += temp->freq;
+	}
+	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= freq)
+			break;
 	}
 	mutex_unlock(&core->lock);
-	return active_instances;
+
+	core->freq = rate;
+	dprintk(VIDC_PROF, "Voting for freq = %lu", freq);
+	rc = call_hfi_op(hdev, scale_clocks,
+			hdev->hfi_device_data, rate);
+
+	return rc;
+}
+
+static unsigned long msm_vidc_max_freq(struct msm_vidc_inst *inst)
+{
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	unsigned long freq = 0;
+
+	allowed_clks_tbl = inst->core->resources.allowed_clks_tbl;
+	freq = allowed_clks_tbl[0].clock_rate;
+	dprintk(VIDC_PROF, "Max rate = %lu", freq);
+
+	return freq;
+}
+
+int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
+{
+	struct vb2_buf_entry *temp, *next;
+	unsigned long freq = 0;
+	u32 filled_len = 0;
+	ion_phys_addr_t device_addr = 0;
+
+	if (inst->dcvs.buffer_counter < DCVS_FTB_WINDOW) {
+		freq = msm_vidc_max_freq(inst);
+		goto decision_done;
+	}
+
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		if (temp->vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			filled_len = max(filled_len,
+				temp->vb->planes[0].bytesused);
+			device_addr = temp->vb->planes[0].m.userptr;
+		}
+	}
+	mutex_unlock(&inst->pendingq.lock);
+
+	if (!filled_len || !device_addr) {
+		freq = inst->freq;
+		goto decision_done;
+	}
+
+	freq = msm_vidc_calc_freq(inst, filled_len);
+
+	msm_vidc_update_freq_entry(inst, freq, device_addr);
+
+	freq = msm_vidc_adjust_freq(inst);
+
+decision_done:
+	inst->freq = freq;
+	msm_vidc_set_clocks(inst->core);
+	return 0;
+}
+
+int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+	hdev = core->device;
+
+	if (msm_comm_scale_clocks(inst)) {
+		dprintk(VIDC_WARN,
+			"Failed to scale clocks. Performance might be impacted\n");
+	}
+	if (msm_comm_vote_bus(core)) {
+		dprintk(VIDC_WARN,
+			"Failed to scale DDR bus. Performance might be impacted\n");
+	}
+	return 0;
+}
+
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+		return -EINVAL;
+	}
+	if (inst->flags & VIDC_THUMBNAIL) {
+		dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
+			inst);
+		return false;
+	}
+	inst->dcvs_mode = true;
+
+	// TODO : Update with proper number based on on-target tuning.
+	inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	return true;
 }
 
 static bool msm_dcvs_check_codec_supported(int fourcc,
@@ -104,90 +465,41 @@
 	return codec_type && session_type;
 }
 
-static void msm_dcvs_update_dcvs_params(int idx, struct msm_vidc_inst *inst)
+int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst)
 {
-	struct dcvs_stats *dcvs = NULL;
-	struct msm_vidc_platform_resources *res = NULL;
-	struct dcvs_table *table = NULL;
+	int rc = 0, j = 0;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct clock_profile_entry *entry = NULL;
+	int fourcc;
 
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
+	clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
+	fourcc = inst->session_type == MSM_VIDC_DECODER ?
+		inst->fmts[OUTPUT_PORT].fourcc :
+		inst->fmts[CAPTURE_PORT].fourcc;
 
-	dcvs = &inst->dcvs;
-	res = &inst->core->resources;
-	table = res->dcvs_tbl;
+	for (j = 0; j < clk_freq_tbl->count; j++) {
+		bool matched = false;
 
-	dcvs->load_low = table[idx].load_low;
-	dcvs->load_high = table[idx].load_high;
-	dcvs->supported_codecs = table[idx].supported_codecs;
-}
+		entry = &clk_freq_tbl->clk_prof_entries[j];
 
-static void msm_dcvs_enc_check_and_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
+		matched = msm_dcvs_check_codec_supported(
+				fourcc,
+				entry->codec_mask,
+				inst->session_type);
 
-	if (inst->session_type == MSM_VIDC_ENCODER &&
-		msm_vidc_enc_dcvs_mode) {
-		rc = msm_dcvs_enc_scale_clocks(inst);
-		if (rc) {
-			dprintk(VIDC_DBG,
-				"ENC_DCVS: error while scaling clocks\n");
+		if (matched) {
+			inst->entry = entry;
+			break;
 		}
 	}
-}
 
-static void msm_dcvs_dec_check_and_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	if (inst->session_type == MSM_VIDC_DECODER &&
-		msm_vidc_dec_dcvs_mode) {
-		msm_dcvs_monitor_buffer(inst);
-		rc = msm_dcvs_dec_scale_clocks(inst, false);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"%s: Failed to scale clocks in DCVS: %d\n",
-					__func__, rc);
-		}
-	}
-}
-
-void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb)
-{
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-	msm_dcvs_try_enable(inst);
-	if (!inst->dcvs_mode) {
-		dprintk(VIDC_DBG, "DCVS is not enabled\n");
-		return;
+	if (j == clk_freq_tbl->count) {
+		dprintk(VIDC_ERR,
+			"Failed : No matching clock entry found\n");
+		rc = -EINVAL;
 	}
 
-	if (is_etb)
-		msm_dcvs_enc_check_and_scale_clocks(inst);
-	else
-		msm_dcvs_dec_check_and_scale_clocks(inst);
-}
-
-static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
-{
-	int fw_out_qsize = 0, buffers_in_driver = 0;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state >= MSM_VIDC_OPEN_DONE &&
-		inst->state < MSM_VIDC_STOP_DONE) {
-		fw_out_qsize = inst->count.ftb - inst->count.fbd;
-		buffers_in_driver = inst->buffers_held_in_driver;
-	}
-
-	return fw_out_qsize + buffers_in_driver;
+	return rc;
 }
 
 static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
@@ -198,23 +510,18 @@
 		dcvs->load_high);
 
 	dprintk(VIDC_DBG,
-		"DCVS: ThrDispBufLow %d, ThrDispBufHigh %d\n",
-		dcvs->threshold_disp_buf_low,
-		dcvs->threshold_disp_buf_high);
-
-	dprintk(VIDC_DBG,
 		"DCVS: min_threshold %d, max_threshold %d\n",
 		dcvs->min_threshold, dcvs->max_threshold);
 }
 
-void msm_dcvs_init_load(struct msm_vidc_inst *inst)
+void msm_dcvs_init(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
+	int i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u64 total_freq = 0, rate = 0, load;
+	int cycles;
 	struct dcvs_stats *dcvs;
-	struct dcvs_table *table;
-	struct msm_vidc_platform_resources *res = NULL;
-	int i, num_rows, fourcc;
 
 	dprintk(VIDC_DBG, "Init DCVS Load\n");
 
@@ -225,414 +532,38 @@
 
 	core = inst->core;
 	dcvs = &inst->dcvs;
-	res = &core->resources;
-	dcvs->load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-
-	num_rows = res->dcvs_tbl_size;
-	table = res->dcvs_tbl;
-
-	if (!num_rows || !table) {
-		dprintk(VIDC_ERR,
-				"%s: Dcvs table entry not found.\n", __func__);
-		return;
-	}
-
-	fourcc = inst->session_type == MSM_VIDC_DECODER ?
-				inst->fmts[OUTPUT_PORT].fourcc :
-				inst->fmts[CAPTURE_PORT].fourcc;
-
-	for (i = 0; i < num_rows; i++) {
-		bool matches = msm_dcvs_check_codec_supported(
-					fourcc,
-					table[i].supported_codecs,
-					inst->session_type);
-		if (!matches)
-			continue;
-
-		if (dcvs->load > table[i].load) {
-			msm_dcvs_update_dcvs_params(i, inst);
-			break;
-		}
-	}
-
-	if (inst->session_type == MSM_VIDC_ENCODER)
-		goto print_stats;
-
-	output_buf_req = get_buff_req_buffer(inst,
-		msm_comm_get_hal_output_buffer(inst));
-
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR,
-			"%s: No buffer requirement for buffer type %x\n",
-			__func__, HAL_BUFFER_OUTPUT);
-		return;
-	}
-
-	dcvs->transition_turbo = false;
-
-	/* calculating the min and max threshold */
-	if (output_buf_req->buffer_count_actual) {
-		dcvs->min_threshold = output_buf_req->buffer_count_actual -
-			output_buf_req->buffer_count_min -
-			msm_dcvs_get_extra_buff_count(inst) + 1;
-		dcvs->max_threshold = output_buf_req->buffer_count_actual;
-		if (dcvs->max_threshold <= dcvs->min_threshold)
-			dcvs->max_threshold =
-				dcvs->min_threshold + DCVS_BUFFER_SAFEGUARD;
-		dcvs->threshold_disp_buf_low = dcvs->min_threshold;
-		dcvs->threshold_disp_buf_high = dcvs->max_threshold;
-	}
-
-print_stats:
-	msm_dcvs_print_dcvs_stats(dcvs);
-}
-
-void msm_dcvs_init(struct msm_vidc_inst *inst)
-{
-	dprintk(VIDC_DBG, "Init DCVS Struct\n");
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-
-	inst->dcvs = (struct dcvs_stats){ {0} };
-	inst->dcvs.threshold_disp_buf_high = DCVS_NOMINAL_THRESHOLD;
-	inst->dcvs.threshold_disp_buf_low = DCVS_TURBO_THRESHOLD;
-}
-
-void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst)
-{
-	int new_ftb, i, prev_buf_count;
-	int fw_pending_bufs, total_output_buf, buffers_outside_fw;
-	struct dcvs_stats *dcvs;
-	struct hal_buffer_requirements *output_buf_req;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-	dcvs = &inst->dcvs;
-
-	mutex_lock(&inst->lock);
-	output_buf_req = get_buff_req_buffer(inst,
-				msm_comm_get_hal_output_buffer(inst));
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR, "%s : Get output buffer req failed %pK\n",
-			__func__, inst);
-		mutex_unlock(&inst->lock);
-		return;
-	}
-
-	total_output_buf = output_buf_req->buffer_count_actual;
-	fw_pending_bufs = get_pending_bufs_fw(inst);
-	mutex_unlock(&inst->lock);
-
-	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-	dcvs->num_ftb[dcvs->ftb_index] = buffers_outside_fw;
-	dcvs->ftb_index = (dcvs->ftb_index + 1) % DCVS_FTB_WINDOW;
-
-	if (dcvs->ftb_counter < DCVS_FTB_WINDOW)
-		dcvs->ftb_counter++;
-
-	dprintk(VIDC_PROF,
-		"DCVS: ftb_counter %d\n", dcvs->ftb_counter);
-
-	if (dcvs->ftb_counter == DCVS_FTB_WINDOW) {
-		new_ftb = 0;
-		for (i = 0; i < dcvs->ftb_counter; i++) {
-			if (dcvs->num_ftb[i] > new_ftb)
-				new_ftb = dcvs->num_ftb[i];
-		}
-
-		dcvs->threshold_disp_buf_high = new_ftb;
-		if (dcvs->threshold_disp_buf_high <=
-			dcvs->threshold_disp_buf_low +
-			DCVS_BUFFER_SAFEGUARD) {
-			dcvs->threshold_disp_buf_high =
-				dcvs->threshold_disp_buf_low +
-				DCVS_BUFFER_SAFEGUARD
-				+ (DCVS_BUFFER_SAFEGUARD == 0);
-		}
-
-		dcvs->threshold_disp_buf_high =
-			clamp(dcvs->threshold_disp_buf_high,
-				dcvs->min_threshold,
-				dcvs->max_threshold);
-	}
-
-	if (dcvs->ftb_counter == DCVS_FTB_WINDOW &&
-			dcvs->load == dcvs->load_low) {
-		prev_buf_count =
-			dcvs->num_ftb[((dcvs->ftb_index - 2 +
-				DCVS_FTB_WINDOW) % DCVS_FTB_WINDOW)];
-		if (prev_buf_count == dcvs->threshold_disp_buf_low &&
-			buffers_outside_fw <= dcvs->threshold_disp_buf_low) {
-			dcvs->transition_turbo = true;
-		} else if (buffers_outside_fw > dcvs->threshold_disp_buf_low &&
-			(buffers_outside_fw -
-			 (prev_buf_count - buffers_outside_fw))
-			< dcvs->threshold_disp_buf_low){
-			dcvs->transition_turbo = true;
-		}
-	}
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_output_buf %d buffers_outside_fw %d load %d transition_turbo %d\n",
-		total_output_buf, buffers_outside_fw, dcvs->load_low,
-		dcvs->transition_turbo);
-}
-
-static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0, fw_pending_bufs = 0, total_input_buf = 0;
-	struct msm_vidc_core *core;
-	struct dcvs_stats *dcvs;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	dcvs = &inst->dcvs;
-
-	mutex_lock(&inst->lock);
-	total_input_buf = inst->buff_req.buffer[0].buffer_count_actual;
-	fw_pending_bufs = (inst->count.etb - inst->count.ebd);
-	mutex_unlock(&inst->lock);
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_input_buf %d, fw_pending_bufs %d\n",
-		total_input_buf, fw_pending_bufs);
-
-	if (dcvs->etb_counter < total_input_buf) {
-		dcvs->etb_counter++;
-		if (dcvs->etb_counter != total_input_buf)
-			return rc;
-	}
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_input_buf %d, fw_pending_bufs %d etb_counter %d  dcvs->load %d\n",
-		total_input_buf, fw_pending_bufs,
-		dcvs->etb_counter, dcvs->load);
-
-	if (fw_pending_bufs <= DCVS_ENC_LOW_THR &&
-		dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-		dcvs->prev_freq_lowered = true;
-	} else {
-		dcvs->prev_freq_lowered = false;
-	}
-
-	if (fw_pending_bufs >= DCVS_ENC_HIGH_THR &&
-		dcvs->load <= dcvs->load_low) {
-		dcvs->load = dcvs->load_high;
-		dcvs->prev_freq_increased = true;
-	} else {
-		dcvs->prev_freq_increased = false;
-	}
-
-	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
-		dprintk(VIDC_PROF,
-			"DCVS: (Scaling Clock %s)  etb clock set = %d total_input_buf = %d fw_pending_bufs %d\n",
-			dcvs->prev_freq_lowered ? "Lower" : "Higher",
-			dcvs->load, total_input_buf, fw_pending_bufs);
-
-		rc = msm_comm_scale_clocks_load(core, dcvs->load,
-				LOAD_CALC_NO_QUIRKS);
-		if (rc) {
-			dprintk(VIDC_PROF,
-				"Failed to set clock rate in FBD: %d\n", rc);
-		}
-	} else {
-		dprintk(VIDC_PROF,
-			"DCVS: etb clock load_old = %d total_input_buf = %d fw_pending_bufs %d\n",
-			dcvs->load, total_input_buf, fw_pending_bufs);
-	}
-
-	return rc;
-}
-
-
-/*
- * In DCVS scale_clocks will be done both in qbuf and FBD
- * 1 indicates call made from fbd that lowers clock
- * 0 indicates call made from qbuf that increases clock
- * based on DCVS algorithm
- */
-
-static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd)
-{
-	int rc = 0;
-	int fw_pending_bufs = 0;
-	int total_output_buf = 0;
-	int buffers_outside_fw = 0;
-	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	dcvs = &inst->dcvs;
-	mutex_lock(&inst->lock);
-	fw_pending_bufs = get_pending_bufs_fw(inst);
-
-	output_buf_req = get_buff_req_buffer(inst,
-		msm_comm_get_hal_output_buffer(inst));
-	mutex_unlock(&inst->lock);
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR,
-			"%s: No buffer requirement for buffer type %x\n",
-			__func__, HAL_BUFFER_OUTPUT);
-		return -EINVAL;
-	}
-
-	/* Total number of output buffers */
-	total_output_buf = output_buf_req->buffer_count_actual;
-
-	/* Buffers outside FW are with display */
-	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-
-	if (buffers_outside_fw >= dcvs->threshold_disp_buf_high &&
-		!dcvs->prev_freq_increased &&
-		dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-		dcvs->prev_freq_lowered = true;
-		dcvs->prev_freq_increased = false;
-	} else if (dcvs->transition_turbo && dcvs->load == dcvs->load_low) {
-		dcvs->load = dcvs->load_high;
-		dcvs->prev_freq_increased = true;
-		dcvs->prev_freq_lowered = false;
-		dcvs->transition_turbo = false;
-	} else {
-		dcvs->prev_freq_increased = false;
-		dcvs->prev_freq_lowered = false;
-	}
-
-	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
-		dprintk(VIDC_PROF,
-			"DCVS: clock set = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
-			dcvs->load, total_output_buf, buffers_outside_fw,
-			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
-
-		rc = msm_comm_scale_clocks_load(core, dcvs->load,
-				LOAD_CALC_NO_QUIRKS);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set clock rate in FBD: %d\n", rc);
-		}
-	} else {
-		dprintk(VIDC_PROF,
-			"DCVS: clock old = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
-			dcvs->load, total_output_buf, buffers_outside_fw,
-			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
-	}
-	return rc;
-}
-
-static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
-{
-	int num_mbs_per_frame = 0, instance_count = 0;
-	long int instance_load = 0;
-	long int dcvs_limit = 0;
-	struct msm_vidc_inst *temp = NULL;
-	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
-	bool is_codec_supported = false;
-	bool is_dcvs_supported = true;
-	struct msm_vidc_platform_resources *res = NULL;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	dcvs = &inst->dcvs;
-	res = &core->resources;
-
-	if (!res->dcvs_limit) {
-		dprintk(VIDC_WARN,
-				"%s: dcvs limit table not found\n", __func__);
-		return false;
-	}
-	instance_count = msm_dcvs_count_active_instances(core,
-		inst->session_type);
-	num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
-	instance_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-	dcvs_limit =
-		(long int)res->dcvs_limit[inst->session_type].min_mbpf *
-		res->dcvs_limit[inst->session_type].fps;
-	inst->dcvs.extra_buffer_count = 0;
-
-	if (!IS_VALID_DCVS_SESSION(num_mbs_per_frame,
-				res->dcvs_limit[inst->session_type].min_mbpf)) {
-		inst->dcvs.extra_buffer_count = 0;
-		is_dcvs_supported = false;
-		goto dcvs_decision_done;
-
-	}
-
-	if (inst->session_type == MSM_VIDC_DECODER) {
-		inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
-		output_buf_req = get_buff_req_buffer(inst,
-				msm_comm_get_hal_output_buffer(inst));
-		if (!output_buf_req) {
-			dprintk(VIDC_ERR,
-					"%s: No buffer requirement for buffer type %x\n",
-					__func__, HAL_BUFFER_OUTPUT);
-			return false;
-		}
-		is_codec_supported =
-			msm_dcvs_check_codec_supported(
-				inst->fmts[OUTPUT_PORT].fourcc,
-				inst->dcvs.supported_codecs,
-				inst->session_type);
-		if (!is_codec_supported ||
-				!msm_vidc_dec_dcvs_mode) {
-			inst->dcvs.extra_buffer_count = 0;
-			is_dcvs_supported = false;
-			goto dcvs_decision_done;
-		}
-		if (msm_comm_turbo_session(inst) ||
-			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
-			instance_count > 1)
-			is_dcvs_supported = false;
-	}
+	inst->dcvs = (struct dcvs_stats){0};
+	load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
+	cycles = inst->entry->vpp_cycles;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	if (inst->session_type == MSM_VIDC_ENCODER) {
-		inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
-		is_codec_supported =
-			msm_dcvs_check_codec_supported(
-				inst->fmts[CAPTURE_PORT].fourcc,
-				inst->dcvs.supported_codecs,
-				inst->session_type);
-		if (!is_codec_supported ||
-				!msm_vidc_enc_dcvs_mode) {
-			inst->dcvs.extra_buffer_count = 0;
-			is_dcvs_supported = false;
-			goto dcvs_decision_done;
-		}
-		if (msm_comm_turbo_session(inst) ||
-			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
-				instance_count > 1)
-			is_dcvs_supported = false;
+		cycles = inst->flags & VIDC_LOW_POWER ?
+			inst->entry->low_power_cycles :
+			cycles;
+
+		dcvs->buffer_type = HAL_BUFFER_INPUT;
+		// TODO : Update with proper no based on Buffer counts change.
+		dcvs->min_threshold = 7;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst);
+		// TODO : Update with proper no based on Buffer counts change.
+		dcvs->min_threshold = 4;
+	} else {
+		return;
 	}
-dcvs_decision_done:
-	if (!is_dcvs_supported) {
-		msm_comm_scale_clocks(core);
-		if (instance_count > 1) {
-			mutex_lock(&core->lock);
-			list_for_each_entry(temp, &core->instances, list)
-				temp->dcvs_mode = false;
-			mutex_unlock(&core->lock);
-		}
+
+	total_freq = cycles * load;
+
+	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= total_freq)
+			break;
 	}
-	return is_dcvs_supported;
+
+	dcvs->load = dcvs->load_high = rate;
+	dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
+
+	msm_dcvs_print_dcvs_stats(dcvs);
 }
 
 int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 383c27e1..0229ccbb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -32,9 +32,12 @@
 #define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
 void msm_dcvs_init(struct msm_vidc_inst *inst);
-void msm_dcvs_init_load(struct msm_vidc_inst *inst);
-void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst);
-void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb);
 int  msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
+int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
+int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst);
+void msm_comm_free_freq_table(struct msm_vidc_inst *inst);
+void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
+	ion_phys_addr_t device_addr);
+
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index a6490d1..4aaa525 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -773,103 +773,6 @@
 	return format;
 }
 
-static int msm_comm_vote_bus(struct msm_vidc_core *core)
-{
-	int rc = 0, vote_data_count = 0, i = 0;
-	struct hfi_device *hdev;
-	struct msm_vidc_inst *inst = NULL;
-	struct vidc_bus_vote_data *vote_data = NULL;
-	unsigned long core_freq = 0;
-
-	if (!core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
-		return -EINVAL;
-	}
-
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-			__func__, hdev);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list)
-		++vote_data_count;
-
-	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
-			GFP_TEMPORARY);
-	if (!vote_data) {
-		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
-		rc = -ENOMEM;
-		goto fail_alloc;
-	}
-
-	core_freq = call_hfi_op(hdev, get_core_clock_rate,
-			hdev->hfi_device_data, 0);
-
-	list_for_each_entry(inst, &core->instances, list) {
-		int codec = 0, yuv = 0;
-
-		codec = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-		yuv = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[CAPTURE_PORT].fourcc :
-			inst->fmts[OUTPUT_PORT].fourcc;
-
-		vote_data[i].domain = get_hal_domain(inst->session_type);
-		vote_data[i].codec = get_hal_codec(codec);
-		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
-			inst->prop.width[OUTPUT_PORT]);
-		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
-			inst->prop.height[OUTPUT_PORT]);
-
-		if (inst->operating_rate)
-			vote_data[i].fps = (inst->operating_rate >> 16) ?
-				inst->operating_rate >> 16 : 1;
-		else
-			vote_data[i].fps = inst->prop.fps;
-
-		if (msm_comm_turbo_session(inst))
-			vote_data[i].power_mode = VIDC_POWER_TURBO;
-		else if (is_low_power_session(inst))
-			vote_data[i].power_mode = VIDC_POWER_LOW;
-		else
-			vote_data[i].power_mode = VIDC_POWER_NORMAL;
-		if (i == 0) {
-			vote_data[i].imem_ab_tbl = core->resources.imem_ab_tbl;
-			vote_data[i].imem_ab_tbl_size =
-				core->resources.imem_ab_tbl_size;
-			vote_data[i].core_freq = core_freq;
-		}
-
-		/*
-		 * TODO: support for OBP-DBP split mode hasn't been yet
-		 * implemented, once it is, this part of code needs to be
-		 * revisited since passing in accurate information to the bus
-		 * governor will drastically reduce bandwidth
-		 */
-		vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
-		vote_data[i].num_formats = 1;
-		i++;
-	}
-	mutex_unlock(&core->lock);
-
-	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
-			vote_data_count);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
-
-	kfree(vote_data);
-	return rc;
-
-fail_alloc:
-	mutex_unlock(&core->lock);
-	return rc;
-}
-
 struct msm_vidc_core *get_vidc_core(int core_id)
 {
 	struct msm_vidc_core *core;
@@ -1622,9 +1525,6 @@
 		inst->prop.width[OUTPUT_PORT] = event_notify->width;
 	}
 
-	if (inst->session_type == MSM_VIDC_DECODER)
-		msm_dcvs_init_load(inst);
-
 	rc = msm_vidc_check_session_supported(inst);
 	if (!rc) {
 		seq_changed_event.type = event;
@@ -2163,6 +2063,43 @@
 	return vb;
 }
 
+static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
+		ion_phys_addr_t device_addr, u32 flags)
+{
+	struct buffer_info *binfo = NULL, *temp = NULL;
+
+	/*
+	 * Update reference count and release OR queue back the buffer,
+	 * only when firmware is not holding a reference.
+	 */
+	binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
+	if (!binfo) {
+		dprintk(VIDC_ERR,
+			"%s buffer not found in registered list\n",
+			__func__);
+		return;
+	}
+	if (flags & HAL_BUFFERFLAG_READONLY) {
+		dprintk(VIDC_DBG,
+			"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
+			binfo->fd[0], &device_addr);
+	} else {
+		dprintk(VIDC_DBG,
+			"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
+			binfo->fd[0], &device_addr);
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry(temp, &inst->registeredbufs.list,
+				list) {
+			if (temp == binfo) {
+				buf_ref_put(inst, binfo);
+				break;
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+	}
+}
+
 static void handle_ebd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
@@ -2182,6 +2119,9 @@
 		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
 		return;
 	}
+	if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC)
+		handle_dynamic_buffer(inst,
+			response->input_done.packet_buffer, 0);
 
 	vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT],
 			response->input_done.packet_buffer);
@@ -2221,6 +2161,8 @@
 			empty_buf_done->alloc_len, empty_buf_done->status,
 			empty_buf_done->picture_type, empty_buf_done->flags);
 
+		msm_vidc_clear_freq_entry(inst, empty_buf_done->packet_buffer);
+
 		mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
 		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
@@ -2239,11 +2181,7 @@
 
 	atomic_inc(&binfo->ref_count);
 	cnt = atomic_read(&binfo->ref_count);
-	if (cnt > 2) {
-		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
-		cnt = -EINVAL;
-	}
-	if (cnt == 2)
+	if (cnt >= 2)
 		inst->buffers_held_in_driver++;
 
 	dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
@@ -2266,7 +2204,7 @@
 	dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
 	if (!cnt)
 		release_buf = true;
-	else if (cnt == 1)
+	else if (cnt >= 1)
 		qbuf_again = true;
 	else {
 		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
@@ -2297,45 +2235,6 @@
 	return cnt;
 }
 
-static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
-		ion_phys_addr_t device_addr, u32 flags)
-{
-	struct buffer_info *binfo = NULL, *temp = NULL;
-
-	/*
-	 * Update reference count and release OR queue back the buffer,
-	 * only when firmware is not holding a reference.
-	 */
-	if (inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC) {
-		binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
-		if (!binfo) {
-			dprintk(VIDC_ERR,
-				"%s buffer not found in registered list\n",
-				__func__);
-			return;
-		}
-		if (flags & HAL_BUFFERFLAG_READONLY) {
-			dprintk(VIDC_DBG,
-				"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
-				binfo->fd[0], &device_addr);
-		} else {
-			dprintk(VIDC_DBG,
-				"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
-				binfo->fd[0], &device_addr);
-
-			mutex_lock(&inst->registeredbufs.lock);
-			list_for_each_entry(temp, &inst->registeredbufs.list,
-							list) {
-				if (temp == binfo) {
-					buf_ref_put(inst, binfo);
-					break;
-				}
-			}
-			mutex_unlock(&inst->registeredbufs.lock);
-		}
-	}
-}
-
 static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
 		ion_phys_addr_t dev_addr)
 {
@@ -2386,7 +2285,7 @@
 	struct vidc_hal_fbd *fill_buf_done;
 	enum hal_buffer buffer_type;
 	int extra_idx = 0;
-	u64 time_nsec = 0;
+	u64 time_usec = 0;
 	struct vb2_v4l2_buffer *vbuf = NULL;
 
 	if (!response) {
@@ -2434,11 +2333,11 @@
 				vb->planes[0].length);
 		if (!(fill_buf_done->flags1 &
 			HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
-			time_nsec = fill_buf_done->timestamp_hi;
-			time_nsec = (time_nsec << 32) |
+			time_usec = fill_buf_done->timestamp_hi;
+			time_usec = (time_usec << 32) |
 				fill_buf_done->timestamp_lo;
 		} else {
-			time_nsec = 0;
+			time_usec = 0;
 			dprintk(VIDC_DBG,
 					"Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
 					&fill_buf_done->packet_buffer1,
@@ -2447,10 +2346,10 @@
 					fill_buf_done->timestamp_lo);
 		}
 		vbuf->flags = 0;
-		vb->timestamp = time_nsec;
+		vb->timestamp = (time_usec * NSEC_PER_USEC);
 
 		extra_idx =
-			EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
+			EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
 		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
 			vb->planes[extra_idx].m.userptr =
 				(unsigned long)fill_buf_done->extra_data_buffer;
@@ -2459,6 +2358,8 @@
 			vb->planes[extra_idx].data_offset = 0;
 		}
 
+		if (inst->buffer_mode_set[CAPTURE_PORT] ==
+			HAL_BUFFER_MODE_DYNAMIC)
 		handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1,
 					fill_buf_done->flags1);
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
@@ -2513,7 +2414,7 @@
 		dprintk(VIDC_DBG,
 		"Got fbd from hal: device_addr: %pa, alloc: %d, filled: %d, offset: %d, ts: %lld, flags: %#x, crop: %d %d %d %d, pic_type: %#x, mark_data: %#x\n",
 		&fill_buf_done->packet_buffer1, fill_buf_done->alloc_len1,
-		fill_buf_done->filled_len1, fill_buf_done->offset1, time_nsec,
+		fill_buf_done->filled_len1, fill_buf_done->offset1, time_usec,
 		fill_buf_done->flags1, fill_buf_done->start_x_coord,
 		fill_buf_done->start_y_coord, fill_buf_done->frame_width,
 		fill_buf_done->frame_height, fill_buf_done->picture_type,
@@ -2641,127 +2542,6 @@
 	}
 }
 
-int msm_comm_scale_clocks(struct msm_vidc_core *core)
-{
-	int num_mbs_per_sec, enc_mbs_per_sec, dec_mbs_per_sec;
-
-	enc_mbs_per_sec =
-		msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS);
-	dec_mbs_per_sec	=
-		msm_comm_get_load(core, MSM_VIDC_DECODER, LOAD_CALC_NO_QUIRKS);
-
-	if (enc_mbs_per_sec >= dec_mbs_per_sec) {
-	/*
-	 * If Encoder load is higher, use that load. Encoder votes for higher
-	 * clock. Since Encoder and Deocder run on parallel cores, this clock
-	 * should suffice decoder usecases.
-	 */
-		num_mbs_per_sec = enc_mbs_per_sec;
-	} else {
-	/*
-	 * If Decoder load is higher, it's tricky to decide clock. Decoder
-	 * higher load might results less clocks than Encoder smaller load.
-	 * At this point driver doesn't know which clock to vote. Hence use
-	 * total load.
-	 */
-		num_mbs_per_sec = enc_mbs_per_sec + dec_mbs_per_sec;
-	}
-
-	return msm_comm_scale_clocks_load(core, num_mbs_per_sec,
-			LOAD_CALC_NO_QUIRKS);
-}
-
-int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
-		int num_mbs_per_sec, enum load_calc_quirks quirks)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_vidc_inst *inst = NULL;
-	unsigned long instant_bitrate = 0;
-	int num_sessions = 0;
-	struct vidc_clk_scale_data clk_scale_data = { {0} };
-	int codec = 0;
-
-	if (!core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
-		return -EINVAL;
-	}
-
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-			__func__, hdev);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-
-		codec = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-		if (msm_comm_turbo_session(inst))
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_TURBO;
-		else if (is_low_power_session(inst))
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_LOW;
-		else
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_NORMAL;
-
-		if (inst->dcvs_mode)
-			clk_scale_data.load[num_sessions] = inst->dcvs.load;
-		else
-			clk_scale_data.load[num_sessions] =
-				msm_comm_get_inst_load(inst, quirks);
-
-		clk_scale_data.session[num_sessions] =
-				VIDC_VOTE_DATA_SESSION_VAL(
-				get_hal_codec(codec),
-				get_hal_domain(inst->session_type));
-		num_sessions++;
-
-		if (inst->instant_bitrate > instant_bitrate)
-			instant_bitrate = inst->instant_bitrate;
-
-	}
-	clk_scale_data.num_sessions = num_sessions;
-	mutex_unlock(&core->lock);
-
-
-	rc = call_hfi_op(hdev, scale_clocks,
-		hdev->hfi_device_data, num_mbs_per_sec,
-		&clk_scale_data, instant_bitrate);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
-
-	return rc;
-}
-
-void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
-{
-	struct msm_vidc_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return;
-	}
-	core = inst->core;
-	hdev = core->device;
-
-	if (msm_comm_scale_clocks(core)) {
-		dprintk(VIDC_WARN,
-				"Failed to scale clocks. Performance might be impacted\n");
-	}
-	if (msm_comm_vote_bus(core)) {
-		dprintk(VIDC_WARN,
-				"Failed to scale DDR bus. Performance might be impacted\n");
-	}
-}
-
 static inline enum msm_vidc_thermal_level msm_comm_vidc_thermal_level(int level)
 {
 	switch (level) {
@@ -2776,33 +2556,16 @@
 	}
 }
 
-static unsigned long msm_comm_get_clock_rate(struct msm_vidc_core *core)
-{
-	struct hfi_device *hdev;
-	unsigned long freq = 0;
-
-	if (!core || !core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = core->device;
-
-	freq = call_hfi_op(hdev, get_core_clock_rate, hdev->hfi_device_data, 1);
-	dprintk(VIDC_DBG, "clock freq %ld\n", freq);
-
-	return freq;
-}
-
 static bool is_core_turbo(struct msm_vidc_core *core, unsigned long freq)
 {
 	int i = 0;
-	struct msm_vidc_platform_resources *res = &core->resources;
-	struct load_freq_table *table = res->load_freq_tbl;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u32 max_freq = 0;
 
-	for (i = 0; i < res->load_freq_tbl_size; i++) {
-		if (max_freq < table[i].freq)
-			max_freq = table[i].freq;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	for (i = 0; i < core->resources.allowed_clks_tbl_size; i++) {
+		if (max_freq < allowed_clks_tbl[i].clock_rate)
+			max_freq = allowed_clks_tbl[i].clock_rate;
 	}
 	return freq >= max_freq;
 }
@@ -2824,7 +2587,7 @@
 	}
 
 	tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
-	freq = msm_comm_get_clock_rate(core);
+	freq = core->freq;
 
 	is_turbo = is_core_turbo(core, freq);
 	dprintk(VIDC_DBG,
@@ -3042,6 +2805,8 @@
 core_already_inited:
 	change_inst_state(inst, MSM_VIDC_CORE_INIT);
 	mutex_unlock(&core->lock);
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
 	return rc;
 
 fail_core_init:
@@ -3135,6 +2900,8 @@
 		return -EINVAL;
 	}
 
+	msm_comm_init_clocks_and_bus_data(inst);
+
 	rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
 			inst, get_hal_domain(inst->session_type),
 			get_hal_codec(fourcc),
@@ -3961,15 +3728,19 @@
 static void populate_frame_data(struct vidc_frame_data *data,
 		const struct vb2_buffer *vb, struct msm_vidc_inst *inst)
 {
+	u64 time_usec;
 	int extra_idx;
 	enum v4l2_buf_type type = vb->type;
 	enum vidc_ports port = type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
 		OUTPUT_PORT : CAPTURE_PORT;
 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 
+	time_usec = vb->timestamp;
+	do_div(time_usec, NSEC_PER_USEC);
+
 	data->alloc_len = vb->planes[0].length;
 	data->device_addr = vb->planes[0].m.userptr;
-	data->timestamp = vb->timestamp;
+	data->timestamp = time_usec;
 	data->flags = 0;
 	data->clnt_data = data->device_addr;
 
@@ -4005,7 +3776,7 @@
 		data->buffer_type = msm_comm_get_hal_output_buffer(inst);
 	}
 
-	extra_idx = EXTRADATA_IDX(inst->fmts[port].num_planes);
+	extra_idx = EXTRADATA_IDX(inst->bufq[port].num_planes);
 	if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
 			vb->planes[extra_idx].m.userptr) {
 		data->extradata_addr = vb->planes[extra_idx].m.userptr;
@@ -4061,6 +3832,7 @@
 static void log_frame(struct msm_vidc_inst *inst, struct vidc_frame_data *data,
 		enum v4l2_buf_type type)
 {
+
 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		dprintk(VIDC_DBG,
 			"Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n",
@@ -4068,13 +3840,6 @@
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
 
-		if (msm_vidc_bitrate_clock_scaling &&
-			inst->session_type == MSM_VIDC_DECODER &&
-			!inst->dcvs_mode)
-			inst->instant_bitrate =
-				data->filled_len * 8 * inst->prop.fps;
-		else
-			inst->instant_bitrate = 0;
 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		dprintk(VIDC_DBG,
 			"Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n",
@@ -4082,20 +3847,6 @@
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
 	}
-
-	msm_dcvs_check_and_scale_clocks(inst,
-			type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
-	if (msm_vidc_bitrate_clock_scaling && !inst->dcvs_mode &&
-		type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
-		inst->session_type == MSM_VIDC_DECODER)
-		if (msm_comm_scale_clocks(inst->core))
-			dprintk(VIDC_WARN,
-				"Failed to scale clocks. Performance might be impacted\n");
-
-	if (msm_comm_vote_bus(inst->core))
-		dprintk(VIDC_WARN,
-			"Failed to scale bus. Performance might be impacted\n");
 }
 
 /*
@@ -4178,6 +3929,8 @@
 		return 0;
 	}
 
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
 	dprintk(VIDC_DBG, "%sing %d etbs and %d ftbs\n",
 			batch_mode ? "Batch" : "Process",
 			output_count, capture_count);
@@ -4850,20 +4603,22 @@
 	 * driver should not queue any new buffer it has been holding.
 	 *
 	 * Each dynamic o/p buffer can have one of following ref_count:
-	 * ref_count : 0 - f/w has released reference and sent fbd back.
-	 *		  The buffer has been returned back to client.
+	 * ref_count : 0   - f/w has released reference and sent dynamic
+	 *                   buffer back. The buffer has been returned
+	 *                   back to client.
 	 *
-	 * ref_count : 1 - f/w is holding reference. f/w may have released
-	 *                 fbd as read_only OR fbd is pending. f/w will
-	 *		  release reference before sending flush_done.
+	 * ref_count : 1   - f/w is holding reference. f/w may have released
+	 *                   dynamic buffer as read_only OR dynamic buffer is
+	 *                   pending. f/w will release reference before sending
+	 *                   flush_done.
 	 *
-	 * ref_count : 2 - f/w is holding reference, f/w has released fbd as
-	 *                 read_only, which client has queued back to driver.
-	 *                 driver holds this buffer and will queue back
-	 *                 only when f/w releases the reference. During
-	 *		  flush_done, f/w will release the reference but driver
-	 *		  should not queue back the buffer to f/w.
-	 *		  Flush all buffers with ref_count 2.
+	 * ref_count : >=2 - f/w is holding reference, f/w has released dynamic
+	 *                   buffer as read_only, which client has queued back
+	 *                   to driver. Driver holds this buffer and will queue
+	 *                   back only when f/w releases the reference. During
+	 *                   flush_done, f/w will release the reference but
+	 *                   driver should not queue back the buffer to f/w.
+	 *                   Flush all buffers with ref_count >= 2.
 	 */
 	mutex_lock(&inst->registeredbufs.lock);
 	if (!list_empty(&inst->registeredbufs.list)) {
@@ -4872,7 +4627,7 @@
 
 		list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
 			if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-				atomic_read(&binfo->ref_count) == 2) {
+				atomic_read(&binfo->ref_count) >= 2) {
 
 				atomic_dec(&binfo->ref_count);
 				buf_event.type =
@@ -4964,6 +4719,10 @@
 		return 0;
 	}
 
+	// Finish FLUSH As Soon As Possible.
+	inst->dcvs.buffer_counter = 0;
+	msm_comm_scale_clocks_and_bus(inst);
+
 	msm_comm_flush_dynamic_buffers(inst);
 
 	if (inst->state == MSM_VIDC_CORE_INVALID ||
@@ -5299,9 +5058,6 @@
 		return -ENOTSUPP;
 	}
 
-	if (!rc)
-		msm_dcvs_try_enable(inst);
-
 	if (!rc) {
 		if (inst->prop.width[CAPTURE_PORT] < capability->width.min ||
 			inst->prop.height[CAPTURE_PORT] <
@@ -5618,11 +5374,7 @@
 			if (rc)
 				dprintk(VIDC_WARN,
 					"Failed to set frame rate %d\n", rc);
-		} else {
-			msm_dcvs_init_load(inst);
 		}
-		msm_comm_scale_clocks_and_bus(inst);
-		msm_dcvs_try_enable(inst);
 	}
 exit:
 	return rc;
@@ -5706,8 +5458,7 @@
 	}
 	core = inst->core;
 
-	dprintk(VIDC_ERR, "Venus core frequency = %lu",
-		msm_comm_get_clock_rate(core));
+	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->freq);
 	mutex_lock(&core->lock);
 	dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
 	msm_comm_print_inst_info(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index d898682..39a28b3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -46,10 +46,6 @@
 int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
-void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
-int msm_comm_scale_clocks(struct msm_vidc_core *core);
-int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
-		int num_mbs_per_sec, enum load_calc_quirks quirks);
 void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst);
 int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
 int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
@@ -101,5 +97,4 @@
 void msm_comm_print_inst_info(struct msm_vidc_inst *inst);
 int msm_comm_v4l2_to_hal(int id, int value);
 int msm_comm_hal_to_v4l2(int id, int value);
-
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index f418260..15ee8a8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -50,7 +50,7 @@
 })
 
 #define DYNAMIC_BUF_OWNER(__binfo) ({ \
-	atomic_read(&__binfo->ref_count) == 2 ? "video driver" : "firmware";\
+	atomic_read(&__binfo->ref_count) >= 2 ? "video driver" : "firmware";\
 })
 
 static int core_info_open(struct inode *inode, struct file *file)
@@ -296,7 +296,7 @@
 		write_str(&dbg_buf, "capability: %s\n", i == OUTPUT_PORT ?
 			"Output" : "Capture");
 		write_str(&dbg_buf, "name : %s\n", inst->fmts[i].name);
-		write_str(&dbg_buf, "planes : %d\n", inst->fmts[i].num_planes);
+		write_str(&dbg_buf, "planes : %d\n", inst->bufq[i].num_planes);
 		write_str(
 		&dbg_buf, "type: %s\n", inst->fmts[i].type == OUTPUT_PORT ?
 		"Output" : "Capture");
@@ -314,7 +314,7 @@
 		write_str(&dbg_buf, "count: %u\n",
 				inst->bufq[i].vb2_bufq.num_buffers);
 
-		for (j = 0; j < inst->fmts[i].num_planes; j++)
+		for (j = 0; j < inst->bufq[i].num_planes; j++)
 			write_str(&dbg_buf, "size for plane %d: %u\n", j,
 			inst->bufq[i].plane_sizes[j]);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 4a14ca3..8562e8f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -45,6 +45,11 @@
 #define MIN_SUPPORTED_WIDTH 32
 #define MIN_SUPPORTED_HEIGHT 32
 #define DEFAULT_FPS 15
+#define MIN_NUM_OUTPUT_BUFFERS 1
+#define MIN_NUM_CAPTURE_BUFFERS 1
+#define MAX_NUM_OUTPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+#define MAX_NUM_CAPTURE_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+
 
 /* Maintains the number of FTB's between each FBD over a window */
 #define DCVS_FTB_WINDOW 32
@@ -134,6 +139,12 @@
 	MAX_OWNER
 };
 
+struct vidc_freq_data {
+	struct list_head list;
+	ion_phys_addr_t device_addr;
+	unsigned long freq;
+};
+
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -145,7 +156,6 @@
 	char name[MAX_NAME_LENGTH];
 	u8 description[32];
 	u32 fourcc;
-	int num_planes;
 	int type;
 	u32 (*get_frame_size)(int plane, u32 height, u32 width);
 	bool defer_outputs;
@@ -175,7 +185,8 @@
 struct buf_queue {
 	struct vb2_queue vb2_bufq;
 	struct mutex lock;
-	unsigned int	plane_sizes[VB2_MAX_PLANES];
+	unsigned int plane_sizes[VB2_MAX_PLANES];
+	int num_planes;
 };
 
 enum profiling_points {
@@ -195,23 +206,14 @@
 };
 
 struct dcvs_stats {
-	int num_ftb[DCVS_FTB_WINDOW];
-	bool transition_turbo;
-	int ftb_index;
-	int ftb_counter;
-	bool prev_freq_lowered;
-	bool prev_freq_increased;
-	int threshold_disp_buf_high;
-	int threshold_disp_buf_low;
+	int buffer_counter;
 	int load;
 	int load_low;
 	int load_high;
 	int min_threshold;
 	int max_threshold;
-	int etb_counter;
-	bool is_power_save_mode;
 	unsigned int extra_buffer_count;
-	u32 supported_codecs;
+	enum hal_buffer buffer_type;
 };
 
 struct profile_data {
@@ -256,6 +258,7 @@
 	struct msm_vidc_capability *capabilities;
 	struct delayed_work fw_unload_work;
 	bool smmu_fault_handled;
+	unsigned long freq;
 };
 
 struct msm_vidc_inst {
@@ -269,6 +272,7 @@
 	struct msm_vidc_format fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
 	struct msm_vidc_list pendingq;
+	struct msm_vidc_list freqs;
 	struct msm_vidc_list scratchbufs;
 	struct msm_vidc_list persistbufs;
 	struct msm_vidc_list pending_getpropq;
@@ -297,7 +301,8 @@
 	bool dcvs_mode;
 	enum msm_vidc_pixel_depth bit_depth;
 	struct kref kref;
-	unsigned long instant_bitrate;
+	unsigned long bitrate;
+	unsigned long freq;
 	u32 buffers_held_in_driver;
 	atomic_t in_flush;
 	u32 pic_struct;
@@ -306,6 +311,7 @@
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
+	struct clock_profile_entry *entry;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 97a625b..8b9018c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -90,24 +90,6 @@
 	res->pf_ver_tbl = NULL;
 }
 
-static inline void msm_vidc_free_freq_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->load_freq_tbl = NULL;
-}
-
-static inline void msm_vidc_free_dcvs_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->dcvs_tbl = NULL;
-}
-
-static inline void msm_vidc_free_dcvs_limit(
-		struct msm_vidc_platform_resources *res)
-{
-	res->dcvs_limit = NULL;
-}
-
 static inline void msm_vidc_free_imem_ab_table(
 		struct msm_vidc_platform_resources *res)
 {
@@ -168,10 +150,7 @@
 {
 	msm_vidc_free_clock_table(res);
 	msm_vidc_free_regulator_table(res);
-	msm_vidc_free_freq_table(res);
 	msm_vidc_free_platform_version_table(res);
-	msm_vidc_free_dcvs_table(res);
-	msm_vidc_free_dcvs_limit(res);
 	msm_vidc_free_cycles_per_mb_table(res);
 	msm_vidc_free_allowed_clocks_table(res);
 	msm_vidc_free_reg_table(res);
@@ -411,6 +390,14 @@
 	int rc = 0;
 	struct platform_device *pdev = res->pdev;
 
+	/* A comparator to compare loads (needed later on) */
+	int cmp(const void *a, const void *b)
+	{
+		/* want to sort in reverse so flip the comparison */
+		return ((struct allowed_clock_rates_table *)b)->clock_rate -
+			((struct allowed_clock_rates_table *)a)->clock_rate;
+	}
+
 	if (!of_find_property(pdev->dev.of_node,
 			"qcom,allowed-clock-rates", NULL)) {
 		dprintk(VIDC_DBG, "qcom,allowed-clock-rates not found\n");
@@ -428,6 +415,9 @@
 		return rc;
 	}
 
+	sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
+		 sizeof(*res->allowed_clks_tbl), cmp, NULL);
+
 	return 0;
 }
 
@@ -490,34 +480,51 @@
 		}
 		dprintk(VIDC_DBG, "codec_mask %#x\n", entry->codec_mask);
 
-		if (of_find_property(child_node, "qcom,cycles-per-mb", NULL)) {
+		if (of_find_property(child_node,
+				"qcom,vsp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
-					"qcom,cycles-per-mb", &entry->cycles);
+					"qcom,vsp-cycles-per-mb",
+					&entry->vsp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"qcom,cycles-per-mb not found\n");
+					"qcom,vsp-cycles-per-mb not found\n");
 				goto error;
 			}
 		} else {
-			entry->cycles = 0;
+			entry->vsp_cycles = 0;
 		}
-		dprintk(VIDC_DBG, "cycles_per_mb %d\n", entry->cycles);
+		dprintk(VIDC_DBG, "vsp cycles_per_mb %d\n", entry->vsp_cycles);
 
 		if (of_find_property(child_node,
-				"qcom,low-power-mode-factor", NULL)) {
+				"qcom,vpp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
-					"qcom,low-power-mode-factor",
-					&entry->low_power_factor);
+					"qcom,vpp-cycles-per-mb",
+					&entry->vsp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"qcom,low-power-mode-factor not found\n");
+					"qcom,vpp-cycles-per-mb not found\n");
 				goto error;
 			}
 		} else {
-			entry->low_power_factor = 0;
+			entry->vpp_cycles = 0;
+		}
+		dprintk(VIDC_DBG, "vpp cycles_per_mb %d\n", entry->vpp_cycles);
+
+		if (of_find_property(child_node,
+				"qcom,low-power-cycles-per-mb", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,low-power-cycles-per-mb",
+					&entry->low_power_cycles);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,low-power-cycles-per-mb not found\n");
+				goto error;
+			}
+		} else {
+			entry->low_power_cycles = 0;
 		}
 		dprintk(VIDC_DBG, "low_power_factor %d\n",
-				entry->low_power_factor);
+				entry->low_power_cycles);
 
 		i++;
 	}
@@ -526,155 +533,6 @@
 	return rc;
 }
 
-static int msm_vidc_load_freq_table(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	/* A comparator to compare loads (needed later on) */
-	int cmp(const void *a, const void *b)
-	{
-		/* want to sort in reverse so flip the comparison */
-		return ((struct load_freq_table *)b)->load -
-			((struct load_freq_table *)a)->load;
-	}
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,load-freq-tbl", NULL)) {
-		/*
-		 * qcom,load-freq-tbl is an optional property.  It likely won't
-		 * be present on cores that we can't clock scale on.
-		 */
-		dprintk(VIDC_DBG, "qcom,load-freq-tbl not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,load-freq-tbl");
-	num_elements /= sizeof(*res->load_freq_tbl) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in frequency table\n");
-		return rc;
-	}
-
-	res->load_freq_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->load_freq_tbl), GFP_KERNEL);
-	if (!res->load_freq_tbl) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc load_freq_tbl\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,load-freq-tbl", (u32 *)res->load_freq_tbl,
-		num_elements * sizeof(*res->load_freq_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read frequency table\n");
-		msm_vidc_free_freq_table(res);
-		return -EINVAL;
-	}
-
-	res->load_freq_tbl_size = num_elements;
-
-	/* The entries in the DT might not be sorted (for aesthetic purposes).
-	 * Given that we expect the loads in descending order for our scaling
-	 * logic to work, just sort it ourselves
-	 */
-	sort(res->load_freq_tbl, res->load_freq_tbl_size,
-			sizeof(*res->load_freq_tbl), cmp, NULL);
-	return rc;
-}
-
-static int msm_vidc_load_dcvs_table(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-tbl", NULL)) {
-		/*
-		 * qcom,dcvs-tbl is an optional property. Incase qcom,dcvs-limit
-		 * property is present, it becomes mandatory. It likely won't
-		 * be present on targets that does not support the feature
-		 */
-		dprintk(VIDC_DBG, "qcom,dcvs-tbl not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,dcvs-tbl");
-	num_elements /= sizeof(*res->dcvs_tbl) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in dcvs table\n");
-		return rc;
-	}
-
-	res->dcvs_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->dcvs_tbl), GFP_KERNEL);
-	if (!res->dcvs_tbl) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc dcvs_tbl\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,dcvs-tbl", (u32 *)res->dcvs_tbl,
-		num_elements * sizeof(*res->dcvs_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read dcvs table\n");
-		msm_vidc_free_dcvs_table(res);
-		return -EINVAL;
-	}
-	res->dcvs_tbl_size = num_elements;
-
-	return rc;
-}
-
-static int msm_vidc_load_dcvs_limit(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-limit", NULL)) {
-		/*
-		 * qcom,dcvs-limit is an optional property. Incase qcom,dcvs-tbl
-		 * property is present, it becomes mandatory. It likely won't
-		 * be present on targets that does not support the feature
-		 */
-		dprintk(VIDC_DBG, "qcom,dcvs-limit not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,dcvs-limit");
-	num_elements /= sizeof(*res->dcvs_limit) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in dcvs limit\n");
-		res->dcvs_limit = NULL;
-		return rc;
-	}
-
-	res->dcvs_limit = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->dcvs_limit), GFP_KERNEL);
-	if (!res->dcvs_limit) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc dcvs_limit\n",
-				__func__);
-		return -ENOMEM;
-	}
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,dcvs-limit", (u32 *)res->dcvs_limit,
-		num_elements * sizeof(*res->dcvs_limit) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read dcvs limit\n");
-		msm_vidc_free_dcvs_limit(res);
-		return -EINVAL;
-	}
-
-	return rc;
-}
-
-
 static int msm_vidc_populate_bus(struct device *dev,
 		struct msm_vidc_platform_resources *res)
 {
@@ -952,11 +810,8 @@
 
 		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
 			vc->has_scaling = true;
-			vc->count = res->load_freq_tbl_size;
-			vc->load_freq_tbl = res->load_freq_tbl;
 		} else {
 			vc->count = 0;
-			vc->load_freq_tbl = NULL;
 			vc->has_scaling = false;
 		}
 
@@ -1016,7 +871,7 @@
 			&res->fw_name);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to read firmware name: %d\n", rc);
-		goto err_load_freq_table;
+		goto err_load_reg_table;
 	}
 	dprintk(VIDC_DBG, "Firmware filename: %s\n", res->fw_name);
 
@@ -1029,20 +884,6 @@
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to load pf version table: %d\n", rc);
 
-	rc = msm_vidc_load_freq_table(res);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to load freq table: %d\n", rc);
-		goto err_load_freq_table;
-	}
-
-	rc = msm_vidc_load_dcvs_table(res);
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to load dcvs table: %d\n", rc);
-
-	rc = msm_vidc_load_dcvs_limit(res);
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to load dcvs limit: %d\n", rc);
-
 	rc = msm_vidc_load_imem_ab_table(res);
 	if (rc)
 		dprintk(VIDC_WARN, "Failed to load freq table: %d\n", rc);
@@ -1157,8 +998,6 @@
 err_load_buffer_usage_table:
 	msm_vidc_free_reg_table(res);
 err_load_reg_table:
-	msm_vidc_free_freq_table(res);
-err_load_freq_table:
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 4f152fb..8fd43006 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -24,12 +24,6 @@
 	u32 version_shift;
 };
 
-struct load_freq_table {
-	u32 load;
-	u32 freq;
-	u32 supported_codecs;
-};
-
 struct dcvs_table {
 	u32 load;
 	u32 load_low;
@@ -101,7 +95,6 @@
 struct clock_info {
 	const char *name;
 	struct clk *clk;
-	struct load_freq_table *load_freq_tbl;
 	u32 count;
 	bool has_scaling;
 	bool has_mem_retention;
@@ -142,8 +135,9 @@
 
 struct clock_profile_entry {
 	u32 codec_mask;
-	u32 cycles;
-	u32 low_power_factor;
+	u32 vpp_cycles;
+	u32 vsp_cycles;
+	u32 low_power_cycles;
 };
 
 struct clock_freq_table {
@@ -160,8 +154,6 @@
 	struct allowed_clock_rates_table *allowed_clks_tbl;
 	u32 allowed_clks_tbl_size;
 	struct clock_freq_table clock_freq_tbl;
-	struct load_freq_table *load_freq_tbl;
-	uint32_t load_freq_tbl_size;
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index aabf2d3..74e360e 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -85,13 +85,11 @@
 static int __enable_regulators(struct venus_hfi_device *device);
 static inline int __prepare_enable_clks(struct venus_hfi_device *device);
 static inline void __disable_unprepare_clks(struct venus_hfi_device *device);
-static int __scale_clocks_load(struct venus_hfi_device *device, int load,
-		struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate);
 static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet);
 static int __initialize_packetization(struct venus_hfi_device *device);
 static struct hal_session *__get_session(struct venus_hfi_device *device,
 		u32 session_id);
+static int __set_clocks(struct venus_hfi_device *device, u32 freq);
 static int __iface_cmdq_write(struct venus_hfi_device *device,
 					void *pkt);
 static int __load_fw(struct venus_hfi_device *device);
@@ -1137,162 +1135,6 @@
 	return rc;
 }
 
-static struct clock_info *__get_clock(struct venus_hfi_device *device,
-		char *name)
-{
-	struct clock_info *vc;
-
-	venus_hfi_for_each_clock(device, vc) {
-		if (!strcmp(vc->name, name))
-			return vc;
-	}
-
-	dprintk(VIDC_WARN, "%s Clock %s not found\n", __func__, name);
-
-	return NULL;
-}
-
-static unsigned long __get_clock_rate(struct clock_info *clock,
-	int num_mbs_per_sec, struct vidc_clk_scale_data *data)
-{
-	int num_rows = clock->count;
-	struct load_freq_table *table = clock->load_freq_tbl;
-	unsigned long freq = table[0].freq, max_freq = 0;
-	int i = 0, j = 0;
-	unsigned long instance_freq[VIDC_MAX_SESSIONS] = {0};
-
-	if (!data && !num_rows) {
-		freq = 0;
-		goto print_clk;
-	}
-
-	if ((!num_mbs_per_sec || !data) && num_rows) {
-
-		/* When no data is given, vote for the highest frequency. */
-
-		freq = table[0].freq;
-		goto print_clk;
-	}
-
-	for (i = 0; i < num_rows; i++) {
-		if (num_mbs_per_sec > table[i].load)
-			break;
-		for (j = 0; j < data->num_sessions; j++) {
-			bool matches = __is_session_supported(
-				table[i].supported_codecs, data->session[j]);
-
-			if (!matches)
-				continue;
-			instance_freq[j] = table[i].freq;
-		}
-	}
-	for (i = 0; i < data->num_sessions; i++)
-		max_freq = max(instance_freq[i], max_freq);
-
-	freq = max_freq ? : freq;
-print_clk:
-	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
-					freq, num_mbs_per_sec);
-	return freq;
-}
-
-static unsigned long __get_clock_rate_with_bitrate(struct clock_info *clock,
-		int num_mbs_per_sec, struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate)
-{
-	int num_rows = clock->count;
-	struct load_freq_table *table = clock->load_freq_tbl;
-	unsigned long freq = table[0].freq, max_freq = 0;
-	unsigned long base_freq, supported_clk[VIDC_MAX_SESSIONS] = {0};
-	int i, j;
-
-	if (!data && !num_rows) {
-		freq = 0;
-		goto print_clk;
-	}
-	if ((!num_mbs_per_sec || !data) && num_rows) {
-		freq = table[num_rows - 1].freq;
-		goto print_clk;
-	}
-
-	/* Get clock rate based on current load only */
-	base_freq = __get_clock_rate(clock, num_mbs_per_sec, data);
-
-	/*
-	 * Supported bitrate = 40% of clock frequency
-	 * Check if the instant bitrate is supported by the base frequency.
-	 * If not, move on to the next frequency which supports the bitrate.
-	 */
-
-	for (j = 0; j < data->num_sessions; j++) {
-		unsigned long supported_bitrate = 0;
-
-		for (i = num_rows - 1; i >= 0; i--) {
-			bool matches = __is_session_supported(
-				table[i].supported_codecs, data->session[j]);
-
-			if (!matches)
-				continue;
-			freq = table[i].freq;
-
-			supported_bitrate = freq * 40/100;
-			/*
-			 * Store this frequency for each instance, we need
-			 * to select the maximum freq among all the instances.
-			 */
-			if (freq >= base_freq &&
-				supported_bitrate >= instant_bitrate) {
-				supported_clk[j] = freq;
-				break;
-			}
-		}
-
-		/*
-		 * Current bitrate is higher than max supported load.
-		 * Select max frequency to handle this load.
-		 */
-		if (i < 0)
-			supported_clk[j] = table[0].freq;
-	}
-
-	for (i = 0; i < data->num_sessions; i++)
-		max_freq = max(supported_clk[i], max_freq);
-
-	freq = max_freq ? : base_freq;
-
-	if (base_freq == freq)
-		dprintk(VIDC_DBG, "Stay at base freq: %lu bitrate = %lu\n",
-			freq, instant_bitrate);
-	else
-		dprintk(VIDC_DBG, "Move up clock freq: %lu bitrate = %lu\n",
-			freq, instant_bitrate);
-print_clk:
-	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
-					freq, num_mbs_per_sec);
-	return freq;
-}
-
-static unsigned long venus_hfi_get_core_clock_rate(void *dev, bool actual_rate)
-{
-	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
-	struct clock_info *vc;
-
-	if (!device) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, device);
-		return -EINVAL;
-	}
-
-	if (actual_rate) {
-		vc = __get_clock(device, "core_clk");
-		if (vc)
-			return clk_get_rate(vc->clk);
-		else
-			return 0;
-	} else {
-		return device->scaled_rate;
-	}
-}
-
 static int venus_hfi_suspend(void *dev)
 {
 	int rc = 0;
@@ -1390,167 +1232,31 @@
 	return rc;
 }
 
-static int __scale_clocks_cycles_per_mb(struct venus_hfi_device *device,
-		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
-{
-	int rc = 0, i = 0, j = 0;
-	struct clock_info *cl;
-	struct clock_freq_table *clk_freq_tbl = NULL;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	struct clock_profile_entry *entry = NULL;
-	u64 total_freq = 0, rate = 0;
-
-	clk_freq_tbl = &device->res->clock_freq_tbl;
-	allowed_clks_tbl = device->res->allowed_clks_tbl;
-
-	if (!data) {
-		dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
-		total_freq = device->clk_freq;
-		goto get_clock_freq;
-	}
-
-	device->clk_bitrate = instant_bitrate;
-
-	for (i = 0; i < data->num_sessions; i++) {
-		/*
-		 * for each active session iterate through all possible
-		 * sessions and get matching session's cycles per mb
-		 * from dtsi and multiply with the session's load to
-		 * get the frequency required for the session.
-		 * accumulate all session's frequencies to get the
-		 * total clock frequency.
-		 */
-		for (j = 0; j < clk_freq_tbl->count; j++) {
-			bool matched = false;
-			u64 freq = 0;
-
-			entry = &clk_freq_tbl->clk_prof_entries[j];
-
-			matched = __is_session_supported(entry->codec_mask,
-					data->session[i]);
-			if (!matched)
-				continue;
-
-			freq = entry->cycles * data->load[i];
-
-			if (data->power_mode[i] == VIDC_POWER_LOW &&
-					entry->low_power_factor) {
-				/* low_power_factor is in Q16 format */
-				freq = (freq * entry->low_power_factor) >> 16;
-			}
-
-			total_freq += freq;
-
-			dprintk(VIDC_DBG,
-				"session[%d] %#x: cycles (%d), load (%d), freq (%llu), factor (%d)\n",
-				i, data->session[i], entry->cycles,
-				data->load[i], freq,
-				entry->low_power_factor);
-		}
-	}
-
-get_clock_freq:
-	/*
-	 * get required clock rate from allowed clock rates table
-	 */
-	for (i = device->res->allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= total_freq)
-			break;
-	}
-
-	venus_hfi_for_each_clock(device, cl) {
-		if (!cl->has_scaling)
-			continue;
-
-		device->clk_freq = rate;
-		rc = clk_set_rate(cl->clk, rate);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: Failed to set clock rate %llu %s: %d\n",
-				__func__, rate, cl->name, rc);
-			return rc;
-		}
-		if (!strcmp(cl->name, "core_clk"))
-			device->scaled_rate = rate;
-
-		dprintk(VIDC_DBG,
-			"scaling clock %s to %llu (required freq %llu)\n",
-			cl->name, rate, total_freq);
-	}
-
-	return rc;
-}
-
-static int __scale_clocks_load(struct venus_hfi_device *device, int load,
-		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
+static int __set_clocks(struct venus_hfi_device *device, u32 freq)
 {
 	struct clock_info *cl;
-
-	device->clk_bitrate = instant_bitrate;
+	int rc = 0;
 
 	venus_hfi_for_each_clock(device, cl) {
-		if (cl->has_scaling) {
-
-			unsigned long rate = 0;
-			int rc;
-			/*
-			 * load_fw and power_on needs to be addressed.
-			 * differently. Below check enforces the same.
-			 */
-			if (!device->clk_bitrate && !data && !load &&
-				device->clk_freq)
-				rate = device->clk_freq;
-
-			if (!rate) {
-				if (!device->clk_bitrate)
-					rate = __get_clock_rate(cl, load,
-							data);
-				else
-					rate = __get_clock_rate_with_bitrate(cl,
-							load, data,
-							instant_bitrate);
-			}
-			device->clk_freq = rate;
-			rc = clk_set_rate(cl->clk, rate);
+		if (cl->has_scaling) {/* has_scaling */
+			device->clk_freq = freq;
+			rc = clk_set_rate(cl->clk, freq);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"Failed to set clock rate %lu %s: %d\n",
-					rate, cl->name, rc);
+					"Failed to set clock rate %u %s: %d %s\n",
+					freq, cl->name, rc, __func__);
 				return rc;
 			}
 
-			if (!strcmp(cl->name, "core_clk"))
-				device->scaled_rate = rate;
-
-			dprintk(VIDC_PROF, "Scaling clock %s to %lu\n",
-					cl->name, rate);
+			dprintk(VIDC_PROF, "Scaling clock %s to %u\n",
+					cl->name, freq);
 		}
 	}
 
 	return 0;
 }
 
-static int __scale_clocks(struct venus_hfi_device *device,
-		int load, struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate)
-{
-	int rc = 0;
-
-	if (device->res->clock_freq_tbl.clk_prof_entries &&
-			device->res->allowed_clks_tbl)
-		rc = __scale_clocks_cycles_per_mb(device,
-				data, instant_bitrate);
-	else if (device->res->load_freq_tbl)
-		rc = __scale_clocks_load(device, load, data, instant_bitrate);
-	else
-		dprintk(VIDC_DBG, "Clock scaling is not supported\n");
-
-	return rc;
-}
-static int venus_hfi_scale_clocks(void *dev, int load,
-					struct vidc_clk_scale_data *data,
-					unsigned long instant_bitrate)
+static int venus_hfi_scale_clocks(void *dev, u32 freq)
 {
 	int rc = 0;
 	struct venus_hfi_device *device = dev;
@@ -1568,9 +1274,28 @@
 		goto exit;
 	}
 
-	rc = __scale_clocks(device, load, data, instant_bitrate);
+	rc = __set_clocks(device, freq);
 exit:
 	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __scale_clocks(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 rate = 0;
+
+	clk_freq_tbl = &device->res->clock_freq_tbl;
+	allowed_clks_tbl = device->res->allowed_clks_tbl;
+
+	dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
+	rate = device->clk_freq ? device->clk_freq :
+		allowed_clks_tbl[0].clock_rate;
+
+	rc = __set_clocks(device, rate);
 	return rc;
 }
 
@@ -3672,17 +3397,9 @@
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
-		int i = 0;
 
 		dprintk(VIDC_DBG, "%s: scalable? %d, count %d\n",
 				cl->name, cl->has_scaling, cl->count);
-		for (i = 0; i < cl->count; ++i) {
-			dprintk(VIDC_DBG,
-				"\tload = %d, freq = %d codecs supported %#x\n",
-				cl->load_freq_tbl[i].load,
-				cl->load_freq_tbl[i].freq,
-				cl->load_freq_tbl[i].supported_codecs);
-		}
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
@@ -4141,7 +3858,7 @@
 		goto fail_enable_clks;
 	}
 
-	rc = __scale_clocks(device, 0, NULL, 0);
+	rc = __scale_clocks(device);
 	if (rc) {
 		dprintk(VIDC_WARN,
 				"Failed to scale clocks, performance might be affected\n");
@@ -4624,7 +4341,6 @@
 	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
 	hdev->suspend = venus_hfi_suspend;
 	hdev->flush_debug_queue = venus_hfi_flush_debug_queue;
-	hdev->get_core_clock_rate = venus_hfi_get_core_clock_rate;
 	hdev->get_default_properties = venus_hfi_get_default_properties;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 3267999..7caff53 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -251,6 +251,7 @@
 struct hfi_buffer_count_actual {
 	u32 buffer_type;
 	u32 buffer_count_actual;
+	u32 buffer_count_min_host;
 };
 
 struct hfi_buffer_size_minimum {
@@ -262,8 +263,8 @@
 	u32 buffer_type;
 	u32 buffer_size;
 	u32 buffer_region_size;
-	u32 buffer_hold_count;
 	u32 buffer_count_min;
+	u32 buffer_count_min_host;
 	u32 buffer_count_actual;
 	u32 contiguous;
 	u32 buffer_alignment;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 1bbb730..8375c2f 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -220,7 +220,6 @@
 	HAL_PARAM_VENC_SEARCH_RANGE,
 	HAL_PARAM_VPE_COLOR_SPACE_CONVERSION,
 	HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
-	HAL_PARAM_VENC_H264_NAL_SVC_EXT,
 	HAL_CONFIG_VENC_PERF_MODE,
 	HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS,
 	HAL_PARAM_VDEC_NON_SECURE_OUTPUT2,
@@ -608,6 +607,7 @@
 struct hal_buffer_count_actual {
 	enum hal_buffer buffer_type;
 	u32 buffer_count_actual;
+	u32 buffer_count_min_host;
 };
 
 struct hal_buffer_size_minimum {
@@ -827,8 +827,8 @@
 	enum hal_buffer buffer_type;
 	u32 buffer_size;
 	u32 buffer_region_size;
-	u32 buffer_hold_count;
 	u32 buffer_count_min;
+	u32 buffer_count_min_host;
 	u32 buffer_count_actual;
 	u32 contiguous;
 	u32 buffer_alignment;
@@ -1529,9 +1529,7 @@
 	int (*session_set_property)(void *sess, enum hal_property ptype,
 			void *pdata);
 	int (*session_get_property)(void *sess, enum hal_property ptype);
-	int (*scale_clocks)(void *dev, int load,
-			struct vidc_clk_scale_data *data,
-			unsigned long instant_bitrate);
+	int (*scale_clocks)(void *dev, u32 freq);
 	int (*vote_bus)(void *dev, struct vidc_bus_vote_data *data,
 			int num_data);
 	int (*get_fw_info)(void *dev, struct hal_fw_info *fw_info);
@@ -1539,7 +1537,6 @@
 	int (*get_core_capabilities)(void *dev);
 	int (*suspend)(void *dev);
 	int (*flush_debug_queue)(void *dev);
-	unsigned long (*get_core_clock_rate)(void *dev, bool actual_rate);
 	enum hal_default_properties (*get_default_properties)(void *dev);
 };
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 8ea5472..ad2a336 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -273,8 +273,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017)
 #define HFI_PROPERTY_PARAM_VENC_NUMREF					\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x018)
-#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT		\
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
 #define HFI_PROPERTY_PARAM_VENC_LTRMODE		\
 	 (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
 #define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO	\
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index d9c1f2f..aba7735 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1411,6 +1411,7 @@
 	int attr = 0;
 	int minor;
 	int rc;
+	u64 rc_type;
 
 	if (!dev || !dev->map_name)
 		return -EINVAL;
@@ -1496,14 +1497,18 @@
 			goto out_input;
 	}
 
+	rc_type = BIT_ULL(rc_map->rc_type);
+
 	if (dev->change_protocol) {
-		u64 rc_type = (1ll << rc_map->rc_type);
 		rc = dev->change_protocol(dev, &rc_type);
 		if (rc < 0)
 			goto out_raw;
 		dev->enabled_protocols = rc_type;
 	}
 
+	if (dev->driver_type == RC_DRIVER_IR_RAW)
+		ir_raw_load_modules(&rc_type);
+
 	/* Allow the RC sysfs nodes to be accessible */
 	atomic_set(&dev->initialized, 1);
 
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 2c720cb..c3e6734 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -68,6 +68,7 @@
 struct dw2102_state {
 	u8 initialized;
 	u8 last_lock;
+	u8 data[MAX_XFER_SIZE + 4];
 	struct i2c_client *i2c_client_demod;
 	struct i2c_client *i2c_client_tuner;
 
@@ -662,62 +663,72 @@
 								int num)
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
-	u8 obuf[0x40], ibuf[0x40];
+	struct dw2102_state *state;
 
 	if (!d)
 		return -ENODEV;
+
+	state = d->priv;
+
 	if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
 		return -EAGAIN;
+	if (mutex_lock_interruptible(&d->data_mutex) < 0) {
+		mutex_unlock(&d->i2c_mutex);
+		return -EAGAIN;
+	}
 
 	switch (num) {
 	case 1:
 		switch (msg[0].addr) {
 		case SU3000_STREAM_CTRL:
-			obuf[0] = msg[0].buf[0] + 0x36;
-			obuf[1] = 3;
-			obuf[2] = 0;
-			if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
+			state->data[0] = msg[0].buf[0] + 0x36;
+			state->data[1] = 3;
+			state->data[2] = 0;
+			if (dvb_usb_generic_rw(d, state->data, 3,
+					state->data, 0, 0) < 0)
 				err("i2c transfer failed.");
 			break;
 		case DW2102_RC_QUERY:
-			obuf[0] = 0x10;
-			if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
+			state->data[0] = 0x10;
+			if (dvb_usb_generic_rw(d, state->data, 1,
+					state->data, 2, 0) < 0)
 				err("i2c transfer failed.");
-			msg[0].buf[1] = ibuf[0];
-			msg[0].buf[0] = ibuf[1];
+			msg[0].buf[1] = state->data[0];
+			msg[0].buf[0] = state->data[1];
 			break;
 		default:
 			/* always i2c write*/
-			obuf[0] = 0x08;
-			obuf[1] = msg[0].addr;
-			obuf[2] = msg[0].len;
+			state->data[0] = 0x08;
+			state->data[1] = msg[0].addr;
+			state->data[2] = msg[0].len;
 
-			memcpy(&obuf[3], msg[0].buf, msg[0].len);
+			memcpy(&state->data[3], msg[0].buf, msg[0].len);
 
-			if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
-						ibuf, 1, 0) < 0)
+			if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
+						state->data, 1, 0) < 0)
 				err("i2c transfer failed.");
 
 		}
 		break;
 	case 2:
 		/* always i2c read */
-		obuf[0] = 0x09;
-		obuf[1] = msg[0].len;
-		obuf[2] = msg[1].len;
-		obuf[3] = msg[0].addr;
-		memcpy(&obuf[4], msg[0].buf, msg[0].len);
+		state->data[0] = 0x09;
+		state->data[1] = msg[0].len;
+		state->data[2] = msg[1].len;
+		state->data[3] = msg[0].addr;
+		memcpy(&state->data[4], msg[0].buf, msg[0].len);
 
-		if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
-					ibuf, msg[1].len + 1, 0) < 0)
+		if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+					state->data, msg[1].len + 1, 0) < 0)
 			err("i2c transfer failed.");
 
-		memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+		memcpy(msg[1].buf, &state->data[1], msg[1].len);
 		break;
 	default:
 		warn("more than 2 i2c messages at a time is not handled yet.");
 		break;
 	}
+	mutex_unlock(&d->data_mutex);
 	mutex_unlock(&d->i2c_mutex);
 	return num;
 }
@@ -845,17 +856,23 @@
 static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
 {
 	struct dw2102_state *state = (struct dw2102_state *)d->priv;
-	u8 obuf[] = {0xde, 0};
+	int ret = 0;
 
 	info("%s: %d, initialized %d", __func__, i, state->initialized);
 
 	if (i && !state->initialized) {
+		mutex_lock(&d->data_mutex);
+
+		state->data[0] = 0xde;
+		state->data[1] = 0;
+
 		state->initialized = 1;
 		/* reset board */
-		return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+		ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
+		mutex_unlock(&d->data_mutex);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1310,49 +1327,57 @@
 	return 0;
 }
 
-static int su3000_frontend_attach(struct dvb_usb_adapter *d)
+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-	u8 obuf[3] = { 0xe, 0x80, 0 };
-	u8 ibuf[] = { 0 };
+	struct dvb_usb_device *d = adap->dev;
+	struct dw2102_state *state = d->priv;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 0;
+
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x02;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x02;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 	msleep(300);
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 0;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 0;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0x51;
+	state->data[0] = 0x51;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
-	d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
-					&d->dev->i2c_adap);
-	if (d->fe_adap[0].fe == NULL)
+	mutex_unlock(&d->data_mutex);
+
+	adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+					&d->i2c_adap);
+	if (adap->fe_adap[0].fe == NULL)
 		return -EIO;
 
-	if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+	if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
 				&dw2104_ts2020_config,
-				&d->dev->i2c_adap)) {
+				&d->i2c_adap)) {
 		info("Attached DS3000/TS2020!");
 		return 0;
 	}
@@ -1361,47 +1386,55 @@
 	return -EIO;
 }
 
-static int t220_frontend_attach(struct dvb_usb_adapter *d)
+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
 {
-	u8 obuf[3] = { 0xe, 0x87, 0 };
-	u8 ibuf[] = { 0 };
+	struct dvb_usb_device *d = adap->dev;
+	struct dw2102_state *state = d->priv;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0xe;
+	state->data[1] = 0x87;
+	state->data[2] = 0x0;
+
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x86;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x86;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x80;
-	obuf[2] = 0;
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 0;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
 	msleep(50);
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x80;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0x51;
+	state->data[0] = 0x51;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
-	d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
-					&d->dev->i2c_adap, NULL);
-	if (d->fe_adap[0].fe != NULL) {
-		if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
-					&d->dev->i2c_adap, &tda18271_config)) {
+	mutex_unlock(&d->data_mutex);
+
+	adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+					&d->i2c_adap, NULL);
+	if (adap->fe_adap[0].fe != NULL) {
+		if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
+					&d->i2c_adap, &tda18271_config)) {
 			info("Attached TDA18271HD/CXD2820R!");
 			return 0;
 		}
@@ -1411,23 +1444,30 @@
 	return -EIO;
 }
 
-static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-	u8 obuf[] = { 0x51 };
-	u8 ibuf[] = { 0 };
+	struct dvb_usb_device *d = adap->dev;
+	struct dw2102_state *state = d->priv;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0x51;
+
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
-	d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
-					&d->dev->i2c_adap);
+	mutex_unlock(&d->data_mutex);
 
-	if (d->fe_adap[0].fe == NULL)
+	adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
+					&s421_m88rs2000_config,
+					&d->i2c_adap);
+
+	if (adap->fe_adap[0].fe == NULL)
 		return -EIO;
 
-	if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+	if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
 				&dw2104_ts2020_config,
-				&d->dev->i2c_adap)) {
+				&d->i2c_adap)) {
 		info("Attached RS2000/TS2020!");
 		return 0;
 	}
@@ -1440,44 +1480,50 @@
 {
 	struct dvb_usb_device *d = adap->dev;
 	struct dw2102_state *state = d->priv;
-	u8 obuf[3] = { 0xe, 0x80, 0 };
-	u8 ibuf[] = { 0 };
 	struct i2c_adapter *i2c_adapter;
 	struct i2c_client *client;
 	struct i2c_board_info board_info;
 	struct m88ds3103_platform_data m88ds3103_pdata = {};
 	struct ts2020_config ts2020_config = {};
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 0x0;
+
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x02;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x02;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 	msleep(300);
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 0;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 0;
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0x51;
+	state->data[0] = 0x51;
 
-	if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
+	mutex_unlock(&d->data_mutex);
+
 	/* attach demod */
 	m88ds3103_pdata.clk = 27000000;
 	m88ds3103_pdata.i2c_wr_max = 33;
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f9fa3fa..2051f28 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -139,15 +139,13 @@
 		}
 
 		msp_maps[i].bankwidth = 1;
-		msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+		msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
 		if (!msp_maps[i].name) {
 			iounmap(msp_maps[i].virt);
 			kfree(msp_parts[i]);
 			goto cleanup_loop;
 		}
 
-		msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
-
 		for (j = 0; j < pcnt; j++) {
 			part_name[5] = '0' + i;
 			part_name[7] = '0' + j;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 5370909..08d91ef 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -913,6 +913,8 @@
 		priv->old_link = 0;
 		priv->old_duplex = -1;
 		priv->old_pause = -1;
+	} else {
+		phydev = NULL;
 	}
 
 	/* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@
 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
 			 ENETDMAC_IRMASK, priv->tx_chan);
 
-	if (priv->has_phy)
+	if (phydev)
 		phy_start(phydev);
 	else
 		bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@
 	free_irq(dev->irq, dev);
 
 out_phy_disconnect:
-	if (priv->has_phy)
+	if (phydev)
 		phy_disconnect(phydev);
 
 	return ret;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 28097be..5127b7e 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1211,7 +1211,7 @@
 		goto fail_alloc;
 	}
 
-#warning FIXME: unhardcode gpio&reset bits
+	/* FIXME: unhardcode gpio&reset bits */
 	ar7_gpio_disable(26);
 	ar7_gpio_disable(27);
 	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 3a035e07..087a218 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2173,6 +2173,7 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
 		quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
 
 /*
  * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 069f0a2..51c930a 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -74,6 +74,10 @@
 
 static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
 
+static int ipa_uc_ntn_cons_release(void);
+static int ipa_uc_ntn_cons_request(void);
+static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long);
+
 static int ipa_commit_partial_hdr(
 	struct ipa_ioc_add_hdr *hdr,
 	const char *netdev_name,
@@ -115,16 +119,37 @@
 	struct ipa_uc_offload_out_params *outp,
 	struct ipa_uc_offload_ctx *ntn_ctx)
 {
-	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_ioc_add_hdr *hdr = NULL;
 	struct ipa_tx_intf tx;
 	struct ipa_rx_intf rx;
 	struct ipa_ioc_tx_intf_prop tx_prop[2];
 	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	struct ipa_rm_create_params param;
 	u32 len;
 	int ret = 0;
 
 	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
 					 inp->netdev_name);
+	memset(&param, 0, sizeof(param));
+	param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	param.reg_params.user_data = ntn_ctx;
+	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+	param.floor_voltage = IPA_VOLTAGE_SVS;
+	ret = ipa_rm_create_resource(&param);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+		return -EFAULT;
+	}
+
+	memset(&param, 0, sizeof(param));
+	param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	param.request_resource = ipa_uc_ntn_cons_request;
+	param.release_resource = ipa_uc_ntn_cons_release;
+	ret = ipa_rm_create_resource(&param);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+		goto fail_create_rm_cons;
+	}
 
 	memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
 	ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
@@ -136,7 +161,8 @@
 	hdr = kzalloc(len, GFP_KERNEL);
 	if (hdr == NULL) {
 		IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto fail_alloc;
 	}
 
 	if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
@@ -197,8 +223,15 @@
 	init_completion(&ntn_ctx->ntn_completion);
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
 
+	kfree(hdr);
+	return ret;
+
 fail:
 	kfree(hdr);
+fail_alloc:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
 	return ret;
 }
 
@@ -301,9 +334,10 @@
 			struct ipa_ntn_conn_out_params *outp,
 			struct ipa_uc_offload_ctx *ntn_ctx)
 {
-	struct ipa_rm_create_params param;
 	int result = 0;
+	enum ipa_uc_offload_state prev_state;
 
+	prev_state = ntn_ctx->state;
 	if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
 		inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
 		IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
@@ -315,42 +349,13 @@
 		return -EINVAL;
 	}
 
-	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
-	param.reg_params.user_data = ntn_ctx;
-	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
-	param.floor_voltage = IPA_VOLTAGE_SVS;
-	result = ipa_rm_create_resource(&param);
+	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
 	if (result) {
-		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
-		return -EFAULT;
+		IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", result);
+		return result;
 	}
 
-	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
-	param.request_resource = ipa_uc_ntn_cons_request;
-	param.release_resource = ipa_uc_ntn_cons_release;
-	result = ipa_rm_create_resource(&param);
-	if (result) {
-		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
-		goto fail_create_rm_cons;
-	}
-
-	if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
-		ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
-		IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
 	result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
 	if (result == -EINPROGRESS) {
 		if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
@@ -365,13 +370,22 @@
 		goto fail;
 	}
 
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+	result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+		ntn_ctx->priv, ntn_ctx->hdr_len, outp);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n",
+				result);
+		ntn_ctx->state = prev_state;
+		result = -EFAULT;
+		goto fail;
+	}
+
 	return 0;
 
 fail:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-fail_create_rm_cons:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
 	return result;
 }
 
@@ -399,7 +413,8 @@
 		return -EINVAL;
 	}
 
-	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED &&
+		offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
 		IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
 		return -EPERM;
 	}
@@ -454,32 +469,34 @@
 static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 {
 	int ipa_ep_idx_ul, ipa_ep_idx_dl;
+	int ret = 0;
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
-	if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-				IPA_RM_RESOURCE_APPS_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+
+	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to release ODU_ADAPT_PROD res: %d\n",
+						  ret);
 		return -EFAULT;
 	}
 
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
-		return -EFAULT;
-	}
-
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to del dep ODU->APPS, %d\n", ret);
 		return -EFAULT;
 	}
 
 	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
 	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
-	if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
-		IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+	ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
+						 ret);
 		return -EFAULT;
 	}
 
-	return 0;
+	return ret;
 }
 
 int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
@@ -524,6 +541,16 @@
 	int len, result = 0;
 	struct ipa_ioc_del_hdr *hdr;
 
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+		return -EFAULT;
+	}
+
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+		return -EFAULT;
+	}
+
 	len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
 	hdr = kzalloc(len, GFP_KERNEL);
 	if (hdr == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
index 54cad88..e10c75a 100644
--- a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
@@ -227,7 +227,7 @@
 	if (ipa_rm_dep_graph_get_resource(graph,
 					  resource_name,
 					  &dependent)) {
-		IPA_RM_ERR("%s does not exist\n",
+		IPA_RM_DBG("%s does not exist\n",
 					ipa_rm_resource_str(resource_name));
 		result = -EINVAL;
 		goto bail;
@@ -236,7 +236,7 @@
 	if (ipa_rm_dep_graph_get_resource(graph,
 					  depends_on_name,
 					  &dependency)) {
-		IPA_RM_ERR("%s does not exist\n",
+		IPA_RM_DBG("%s does not exist\n",
 					ipa_rm_resource_str(depends_on_name));
 		result = -EINVAL;
 		goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index ad5b799..e474a40 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1463,18 +1463,18 @@
 
 	mutex_lock(&ipa_ctx->lock);
 	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
-	tbl->sticky_rear = true;
 	rule.action = IPA_PASS_TO_EXCEPTION;
-	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
 			&ep->dflt_flt4_rule_hdl);
 	ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+	tbl->sticky_rear = true;
 
 	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
-	tbl->sticky_rear = true;
 	rule.action = IPA_PASS_TO_EXCEPTION;
-	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
 			&ep->dflt_flt6_rule_hdl);
 	ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+	tbl->sticky_rear = true;
 	mutex_unlock(&ipa_ctx->lock);
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index 00d52d0..6f59ebd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -299,12 +299,6 @@
 	/* setup ul ep cfg */
 	ep_ul->valid = 1;
 	ep_ul->client = in->ul.client;
-	result = ipa_enable_data_path(ipa_ep_idx_ul);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_ul);
-		return -EFAULT;
-	}
 	ep_ul->client_notify = notify;
 	ep_ul->priv = priv;
 
@@ -333,14 +327,6 @@
 	/* setup dl ep cfg */
 	ep_dl->valid = 1;
 	ep_dl->client = in->dl.client;
-	result = ipa_enable_data_path(ipa_ep_idx_dl);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_dl);
-		result = -EFAULT;
-		goto fail;
-	}
-
 	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
 	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
 	ep_dl->cfg.hdr.hdr_len = hdr_len;
@@ -359,9 +345,16 @@
 	}
 	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
 	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+
+	result = ipa_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("Enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
 	IPAERR("client %d (ep: %d) connected\n", in->dl.client,
 		ipa_ep_idx_dl);
-	ipa_inc_acquire_wakelock(IPA_WAKELOCK_REF_CLIENT_ODU_RX);
 
 fail:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -403,11 +396,31 @@
 	}
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	/* teardown the UL pipe */
 	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
 	cmd_data->protocol = IPA_HW_FEATURE_NTN;
-
 	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+
+	/* teardown the DL pipe */
+	ipa_disable_data_path(ipa_ep_idx_dl);
+	/*
+	 * Reset ep before sending cmd otherwise disconnect
+	 * during data transfer will result into
+	 * enormous suspend interrupts
+	 */
+	memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* teardown the UL pipe */
 	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
 	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
@@ -418,27 +431,11 @@
 		result = -EFAULT;
 		goto fail;
 	}
-	ipa_disable_data_path(ipa_ep_idx_ul);
+
 	ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
 	memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
 	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
 
-	/* teardown the DL pipe */
-	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
-	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
-				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
-				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-				false, 10*HZ);
-	if (result) {
-		IPAERR("fail to tear down ul pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa_disable_data_path(ipa_ep_idx_dl);
-	memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
-	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
-	ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_ODU_RX);
-
 fail:
 	dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 560ffda..db732c5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1072,6 +1072,8 @@
 		IPAWANDBG
 		("SW filtering out none QMAP packet received from %s",
 		current->comm);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
 
@@ -1113,6 +1115,8 @@
 	if (ret) {
 		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
 		       dev->name, ret);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return -EFAULT;
 	}
 	/* IPA_RM checking end */
@@ -1128,7 +1132,6 @@
 
 	if (ret) {
 		ret = NETDEV_TX_BUSY;
-		dev->stats.tx_dropped++;
 		goto out;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 57d31ee..20b73d8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2934,14 +2934,17 @@
 	}
 
 	/* LAN OUT (AP->IPA) */
-	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
-	sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
-	sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
-	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
-	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_out)) {
-		IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
-		result = -EPERM;
-		goto fail_lan_data_out;
+	if (!ipa3_ctx->ipa_config_is_mhi) {
+		memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+		sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
+		sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+		sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+		if (ipa3_setup_sys_pipe(&sys_in,
+			&ipa3_ctx->clnt_hdl_data_out)) {
+			IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
+			result = -EPERM;
+			goto fail_lan_data_out;
+		}
 	}
 
 	return 0;
@@ -2962,7 +2965,8 @@
 
 static void ipa3_teardown_apps_pipes(void)
 {
-	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+	if (!ipa3_ctx->ipa_config_is_mhi)
+		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
 	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
@@ -3889,6 +3893,10 @@
  * @pdev:	The platform device structure representing the IPA driver
  *
  * Function initialization process:
+ * - Initialize endpoints bitmaps
+ * - Initialize resource groups min and max values
+ * - Initialize filtering lists heads and idr
+ * - Initialize interrupts
  * - Register GSI
  * - Setup APPS pipes
  * - Initialize tethering bridge
@@ -3906,6 +3914,61 @@
 	int result;
 	struct gsi_per_props gsi_props;
 	struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
+	struct ipa3_flt_tbl *flt_tbl;
+	int i;
+
+	/*
+	 * indication whether working in MHI config or non MHI config is given
+	 * in ipa3_write which is launched before ipa3_post_init. i.e. from
+	 * this point it is safe to use ipa3_ep_mapping array and the correct
+	 * entry will be returned from ipa3_get_hw_type_index()
+	 */
+	ipa_init_ep_flt_bitmap();
+	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
+		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
+
+	/* Assign resource limitation to each group */
+	ipa3_set_resorce_groups_min_max_limits();
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+	}
+
+	if (!ipa3_ctx->apply_rg10_wa) {
+		result = ipa3_init_interrupts();
+		if (result) {
+			IPAERR("ipa initialization of interrupts failed\n");
+			result = -ENODEV;
+			goto fail_register_device;
+		}
+	} else {
+		IPADBG("Initialization of ipa interrupts skipped\n");
+	}
+
+	/*
+	 * IPAv3.5 and above requires to disable prefetch for USB in order
+	 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
+	 */
+	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) &&
+		(!ipa3_ctx->ipa_config_is_mhi))
+		ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
 
 	memset(&gsi_props, 0, sizeof(gsi_props));
 	gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
@@ -4103,10 +4166,19 @@
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	if (ipa3_is_msm_device())
+	if (ipa3_is_msm_device()) {
 		result = ipa3_trigger_fw_loading_msms();
-	else
+	} else {
+		if (!strcasecmp(dbg_buff, "MHI")) {
+			ipa3_ctx->ipa_config_is_mhi = true;
+			pr_info(
+			"IPA is loading with MHI configuration\n");
+		} else {
+			pr_info(
+			"IPA is loading with non MHI configuration\n");
+		}
 		result = ipa3_trigger_fw_loading_mdms();
+	}
 	/* No IPAv3.x chipsets that don't support FW loading */
 
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -4176,35 +4248,34 @@
 * @pdev:	The platform device structure representing the IPA driver
 *
 * Function initialization process:
-* - Allocate memory for the driver context data struct
-* - Initializing the ipa3_ctx with:
+* Allocate memory for the driver context data struct
+* Initializing the ipa3_ctx with :
 *    1)parsed values from the dts file
 *    2)parameters passed to the module initialization
 *    3)read HW values(such as core memory size)
-* - Map IPA core registers to CPU memory
-* - Restart IPA core(HW reset)
-* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* Map IPA core registers to CPU memory
+* Restart IPA core(HW reset)
+* Initialize the look-aside caches(kmem_cache/slab) for filter,
 *   routing and IPA-tree
-* - Create memory pool with 4 objects for DMA operations(each object
+* Create memory pool with 4 objects for DMA operations(each object
 *   is 512Bytes long), this object will be use for tx(A5->IPA)
-* - Initialize lists head(routing,filter,hdr,system pipes)
-* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
-* - Initialize spinlocks (for list related to A5<->IPA pipes)
-* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
-* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
-*   routing table ,filtering rule
-* - Initialize the filter block by committing IPV4 and IPV6 default rules
-* - Create empty routing table in system memory(no committing)
-* - Create a char-device for IPA
-* - Initialize IPA RM (resource manager)
-* - Configure GSI registers (in GSI case)
+* Initialize lists head(routing, hdr, system pipes)
+* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* Initialize spinlocks (for list related to A5<->IPA pipes)
+* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*  routing table ,filtering rule
+* Initialize the filter block by committing IPV4 and IPV6 default rules
+* Create empty routing table in system memory(no committing)
+* Create a char-device for IPA
+* Initialize IPA RM (resource manager)
+* Configure GSI registers (in GSI case)
 */
 static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		struct device *ipa_dev)
 {
 	int result = 0;
 	int i;
-	struct ipa3_flt_tbl *flt_tbl;
 	struct ipa3_rt_tbl_set *rset;
 	struct ipa_active_client_logging_info log_info;
 
@@ -4362,10 +4433,6 @@
 		goto fail_init_hw;
 	}
 
-	ipa_init_ep_flt_bitmap();
-	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
-		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
-
 	ipa3_ctx->ctrl->ipa_sram_read_settings();
 	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
 		ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
@@ -4398,9 +4465,6 @@
 	ipa3_active_clients_log_inc(&log_info, false);
 	ipa3_ctx->ipa3_active_clients.cnt = 1;
 
-	/* Assign resource limitation to each group */
-	ipa3_set_resorce_groups_min_max_limits();
-
 	/* Create workqueues for power management */
 	ipa3_ctx->power_mgmt_wq =
 		create_singlethread_workqueue("ipa_power_mgmt");
@@ -4503,26 +4567,6 @@
 	}
 	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
 	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
-	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
-		if (!ipa_is_ep_support_flt(i))
-			continue;
-
-		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
-		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
-		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
-			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
-		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
-			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
-		idr_init(&flt_tbl->rule_ids);
-
-		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
-		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
-		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
-			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
-		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
-			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
-		idr_init(&flt_tbl->rule_ids);
-	}
 
 	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
 	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
@@ -4604,17 +4648,6 @@
 		goto fail_create_apps_resource;
 	}
 
-	if (!ipa3_ctx->apply_rg10_wa) {
-		result = ipa3_init_interrupts();
-		if (result) {
-			IPAERR("ipa initialization of interrupts failed\n");
-			result = -ENODEV;
-			goto fail_ipa_init_interrupts;
-		}
-	} else {
-		IPADBG("Initialization of ipa interrupts skipped\n");
-	}
-
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
 		ipa3_enable_dcd();
 
@@ -4653,7 +4686,6 @@
 fail_device_create:
 	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
 fail_alloc_chrdev_region:
-	ipa3_destroy_flt_tbl_idrs();
 	idr_destroy(&ipa3_ctx->ipa_idr);
 	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
 fail_rx_pkt_wrapper_cache:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 81d7d2e..244c80c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1141,6 +1141,7 @@
 	wait_queue_head_t msg_waitq;
 	enum ipa_hw_type ipa_hw_type;
 	enum ipa3_hw_mode ipa3_hw_mode;
+	bool ipa_config_is_mhi;
 	bool use_ipa_teth_bridge;
 	bool modem_cfg_emb_pipe_flt;
 	bool ipa_wdi2;
@@ -1961,4 +1962,5 @@
 bool ipa3_is_msm_device(void);
 struct device *ipa3_get_pdev(void);
 void ipa3_enable_dcd(void);
+void ipa3_disable_prefetch(enum ipa_client_type client);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index 7b89184..30243da 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -265,12 +265,6 @@
 	/* setup ul ep cfg */
 	ep_ul->valid = 1;
 	ep_ul->client = in->ul.client;
-	result = ipa3_enable_data_path(ipa_ep_idx_ul);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_ul);
-		return -EFAULT;
-	}
 	ep_ul->client_notify = notify;
 	ep_ul->priv = priv;
 
@@ -299,14 +293,6 @@
 	/* setup dl ep cfg */
 	ep_dl->valid = 1;
 	ep_dl->client = in->dl.client;
-	result = ipa3_enable_data_path(ipa_ep_idx_dl);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_dl);
-		result = -EFAULT;
-		goto fail;
-	}
-
 	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
 	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
 	ep_dl->cfg.hdr.hdr_len = hdr_len;
@@ -325,6 +311,14 @@
 	}
 	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
 	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+
+	result = ipa3_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("Enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
 	IPADBG("client %d (ep: %d) connected\n", in->dl.client,
 		ipa_ep_idx_dl);
 
@@ -368,11 +362,31 @@
 	}
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	/* teardown the UL pipe */
 	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
 	cmd_data->protocol = IPA_HW_FEATURE_NTN;
-
 	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+
+	/* teardown the DL pipe */
+	ipa3_disable_data_path(ipa_ep_idx_dl);
+	/*
+	 * Reset ep before sending cmd otherwise disconnect
+	 * during data transfer will result into
+	 * enormous suspend interrupts
+	 */
+	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* teardown the UL pipe */
 	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
@@ -383,25 +397,9 @@
 		result = -EFAULT;
 		goto fail;
 	}
-	ipa3_disable_data_path(ipa_ep_idx_ul);
 	ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
-	memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
-	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
-
-	/* teardown the DL pipe */
-	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
-	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
-				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
-				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-				false, 10*HZ);
-	if (result) {
-		IPAERR("fail to tear down ul pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa3_disable_data_path(ipa_ep_idx_dl);
 	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
-	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
 
 fail:
 	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 836e3e8..4811425 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -112,11 +112,12 @@
 #define IPA_v3_0_GROUP_Q6ZIP_ENGINE	IPA_v3_0_GROUP_UC_RX_Q
 #define IPA_v3_0_GROUP_MAX		(6)
 
-#define IPA_v3_5_GROUP_LWA_DL		(0)
-#define IPA_v3_5_GROUP_PCIE		(0)
+#define IPA_v3_5_GROUP_LWA_DL		(0) /* currently not used */
+#define IPA_v3_5_MHI_GROUP_PCIE	IPA_v3_5_GROUP_LWA_DL
 #define IPA_v3_5_GROUP_UL_DL		(1)
-#define IPA_v3_5_GROUP_DMA		(2)
-#define IPA_v3_5_GROUP_UC_RX_Q		(3)
+#define IPA_v3_5_MHI_GROUP_DDR		IPA_v3_5_GROUP_UL_DL
+#define IPA_v3_5_MHI_GROUP_DMA		(2)
+#define IPA_v3_5_GROUP_UC_RX_Q		(3) /* currently not used */
 #define IPA_v3_5_SRC_GROUP_MAX		(4)
 #define IPA_v3_5_DST_GROUP_MAX		(3)
 
@@ -167,6 +168,7 @@
 enum ipa_ver {
 	IPA_3_0,
 	IPA_3_5,
+	IPA_3_5_MHI,
 	IPA_3_5_1,
 	IPA_VER_MAX,
 };
@@ -195,6 +197,19 @@
 	[IPA_3_5] = {
 		/* LWA_DL  UL_DL    not used  UC_RX_Q, other are invalid */
 		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{0, 0}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{0, 0}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{0, 0}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{0, 0}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE  DDR     DMA  not used, other are invalid */
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
 		{4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
 		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
@@ -223,7 +238,7 @@
 static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
 	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
 	[IPA_3_0] = {
-		/*UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng*/
+		/* UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng */
 		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
 		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
 		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
@@ -232,14 +247,21 @@
 		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
 	},
 	[IPA_3_5] = {
-		/*LWA_DL UL/DL/DPL not used, other are invalid */
+		/* unused UL/DL/DPL unused N/A    N/A     N/A */
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE  DDR     DMA     N/A     N/A     N/A */
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
 		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
 		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
 	},
 	[IPA_3_5_1] = {
-		/*LWA_DL UL/DL/DPL not used, other are invalid */
+		/* LWA_DL UL/DL/DPL unused N/A   N/A     N/A */
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
 		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
@@ -250,17 +272,22 @@
 static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
 	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
 	[IPA_3_0] = {
-		/*UL	DL	DIAG	DMA	Unused	uC Rx*/
+		/* UL	DL	DIAG	DMA	Unused	uC Rx */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
 	},
 	[IPA_3_5] = {
-		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		/* unused UL_DL	unused UC_RX_Q   N/A     N/A */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
-		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+		{0, 0}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
 	},
+	[IPA_3_5_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{ 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } },
+},
 	[IPA_3_5_1] = {
-		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		/* LWA_DL UL_DL	unused   UC_RX_Q N/A     N/A */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
 	},
@@ -519,15 +546,11 @@
 
 	/* IPA_3_5 */
 	[IPA_3_5][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
-	/*
-	 * for WLAN1_PROD this configuration is temporal and needs to be updated
-	 * according to documentation.
-	 */
 	[IPA_3_5][IPA_CLIENT_WLAN1_PROD]          = {
 			6, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 6, 1, 12, 30, IPA_EE_UC } },
+			{ 6, 1, 8, 16, IPA_EE_UC } },
 	[IPA_3_5][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
@@ -563,22 +586,14 @@
 			1, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_MHI_PROD]            = {
-			1, IPA_v3_5_GROUP_PCIE, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_PCIE,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_UC } },
+	[IPA_3_5][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LAN_PROD]         = {
-			3, IPA_v3_5_GROUP_UL_DL, false,
+			3, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
-	[IPA_3_5][IPA_CLIENT_Q6_WAN_PROD]         = {
-			6, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR,
-			{ 6, 4, 10, 30, IPA_EE_Q6 } },
+	[IPA_3_5][IPA_CLIENT_Q6_WAN_PROD]         = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			4, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
@@ -586,16 +601,8 @@
 			{ 4, 1, 20, 23, IPA_EE_Q6 } },
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
-			7, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
-			QMB_MASTER_SELECT_PCIE,
-			{ 7, 8, 8, 16, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
-			8, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
-			QMB_MASTER_SELECT_PCIE,
-			{ 8, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5][IPA_CLIENT_TEST_PROD]           = {
 			0, IPA_v3_5_GROUP_UL_DL, true,
@@ -650,7 +657,7 @@
 	[IPA_3_5][IPA_CLIENT_USB_CONS]            = {
 			17, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_USB_DPL_CONS]        = {
 			14, IPA_v3_5_GROUP_UL_DL, false,
@@ -676,11 +683,7 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MHI_CONS]            = {
-			15, IPA_v3_5_GROUP_PCIE, false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LAN_CONS]         = {
 			13, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
@@ -694,22 +697,15 @@
 	[IPA_3_5][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP_CONS]	  = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_CONS]	  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
-			18, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 18, 12, 8, 8, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
-			19, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 19, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_3_5][IPA_CLIENT_TEST_CONS]           = {
 			15, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST1_CONS]           = {
 			15, IPA_v3_5_GROUP_UL_DL, false,
@@ -719,7 +715,7 @@
 	[IPA_3_5][IPA_CLIENT_TEST2_CONS]          = {
 			17, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST3_CONS]          = {
 			18, IPA_v3_5_GROUP_UL_DL, false,
@@ -729,9 +725,195 @@
 	[IPA_3_5][IPA_CLIENT_TEST4_CONS]          = {
 			19, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
 
+	/* IPA_3_5_MHI */
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB_PROD]            = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
+			2, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			5, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 23, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MHI_PROD]            = {
+			1, IPA_v3_5_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
+			3, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
+			6, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 4, 10, 30, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			4, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 23, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+			7, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+			8, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+	/* Only for test purpose */
+	[IPA_3_5_MHI][IPA_CLIENT_TEST_PROD]           = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST2_PROD]          = {
+			1, IPA_v3_5_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST3_PROD]          = {
+			7, IPA_v3_5_MHI_GROUP_DMA, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD]          = {
+			8, IPA_v3_5_MHI_GROUP_DMA, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_CONS]          = {
+			16, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 3, 8, 8, IPA_EE_UC } },
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB_CONS]            = {
+			17, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			14, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 10, 4, 6, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
+			9, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 5, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
+			10, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 6, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MHI_CONS]            = {
+			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
+			13, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
+			12, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DUN_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP2_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+			18, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+			19, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]	= IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_5_MHI][IPA_CLIENT_TEST_CONS]           = {
+			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST1_CONS]           = {
+			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST2_CONS]          = {
+			17, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST3_CONS]          = {
+			18, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST4_CONS]          = {
+			19, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5_1 */
 	[IPA_3_5_1][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
@@ -1383,6 +1565,11 @@
 	int qsb_max_writes[2] = { 8, 2 };
 	int qsb_max_reads[2] = { 8, 8 };
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
+		qsb_max_writes[1] = 4;
+		qsb_max_reads[1] = 12;
+	}
+
 	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
 	ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
 }
@@ -1441,6 +1628,12 @@
 		break;
 	case IPA_HW_v3_5:
 		hw_type_index = IPA_3_5;
+		/*
+		 *this flag is initialized only after fw load trigger from
+		 * user space (ipa3_write)
+		 */
+		if (ipa3_ctx->ipa_config_is_mhi)
+			hw_type_index = IPA_3_5_MHI;
 		break;
 	case IPA_HW_v3_5_1:
 		hw_type_index = IPA_3_5_1;
@@ -3729,6 +3922,7 @@
 		}
 		break;
 	case IPA_3_5:
+	case IPA_3_5_MHI:
 	case IPA_3_5_1:
 		if (src) {
 			switch (group_index) {
@@ -3738,7 +3932,7 @@
 					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
 				break;
-			case IPA_v3_5_GROUP_DMA:
+			case IPA_v3_5_MHI_GROUP_DMA:
 			case IPA_v3_5_GROUP_UC_RX_Q:
 				ipahal_write_reg_n_fields(
 					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
@@ -3758,7 +3952,7 @@
 					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
 				break;
-			case IPA_v3_5_GROUP_DMA:
+			case IPA_v3_5_MHI_GROUP_DMA:
 				ipahal_write_reg_n_fields(
 					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
 					n, val);
@@ -3838,6 +4032,7 @@
 		dst_grp_idx_max = IPA_v3_0_GROUP_MAX;
 		break;
 	case IPA_3_5:
+	case IPA_3_5_MHI:
 	case IPA_3_5_1:
 		src_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX;
 		dst_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_DST_MAX;
@@ -4266,6 +4461,33 @@
 }
 
 /**
+* ipa3_disable_prefetch() - disable\enable tx prefetch
+*
+* @client: the client which is related to the TX where prefetch will be
+*          disabled
+*
+* Return value: Non applicable
+*
+*/
+void ipa3_disable_prefetch(enum ipa_client_type client)
+{
+	struct ipahal_reg_tx_cfg cfg;
+	u8 qmb;
+
+	qmb = ipa3_get_qmb_master_sel(client);
+
+	IPADBG("disabling prefetch for qmb %d\n", (int)qmb);
+
+	ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+	/* QMB0 (DDR) correlates with TX0, QMB1(PCIE) correlates with TX1 */
+	if (qmb == QMB_MASTER_SELECT_DDR)
+		cfg.tx0_prefetch_disable = true;
+	else
+		cfg.tx1_prefetch_disable = true;
+	ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+}
+
+/**
  * ipa3_get_pdev() - return a pointer to IPA dev struct
  *
  * Return value: a pointer to IPA dev struct
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 2a780b6..3c8688e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -901,6 +901,26 @@
 			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
 }
 
+static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->tx0_prefetch_disable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+	tx_cfg->tx1_prefetch_disable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+	tx_cfg->prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
 static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -1174,7 +1194,7 @@
 
 	/* IPAv3.5 */
 	[IPA_HW_v3_5][IPA_TX_CFG] = {
-		ipareg_construct_tx_cfg, ipareg_parse_dummy,
+		ipareg_construct_tx_cfg, ipareg_parse_tx_cfg,
 		0x000001FC, 0},
 	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
 		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 9a400d9..d747771 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1067,6 +1067,8 @@
 		IPAWANDBG_LOW
 		("SW filtering out none QMAP packet received from %s",
 		current->comm);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
 
@@ -1078,7 +1080,8 @@
 			pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
 			goto send;
 		} else {
-			pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+			pr_err("[%s]fatal: ipa3_wwan_xmit stopped\n",
+				  dev->name);
 			return NETDEV_TX_BUSY;
 		}
 	}
@@ -1108,6 +1111,8 @@
 	if (ret) {
 		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
 		       dev->name, ret);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return -EFAULT;
 	}
 	/* IPA_RM checking end */
@@ -1124,7 +1129,6 @@
 
 	if (ret) {
 		ret = NETDEV_TX_BUSY;
-		dev->stats.tx_dropped++;
 		goto out;
 	}
 
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 4752653..07a0aef 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -280,6 +280,18 @@
 	((vband) == 0 ? CPR4_REG_MARGIN_TEMP_CORE(core) \
 			: 0x3AB0 + 0x40 * ((vband) - 1) + 0x4 * (core))
 
+#define CPRH_REG_MISC_REG2	0x3AAC
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_MASK	GENMASK(31, 29)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_SHIFT	29
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_MASK	GENMASK(28, 24)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_SHIFT	24
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_MASK	GENMASK(23, 22)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT	22
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK	GENMASK(21, 20)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT	20
+#define CPRH_MISC_REG2_ACD_AVG_EN_MASK	BIT(12)
+#define CPRH_MISC_REG2_ACD_AVG_ENABLE	BIT(12)
+
 /* SAW module registers */
 #define SAW_REG_AVS_CTL				0x904
 #define SAW_REG_AVS_LIMIT			0x908
@@ -1399,6 +1411,33 @@
 	}
 
 	/*
+	 * Configure CPRh ACD AVG registers on controllers
+	 * that support this feature.
+	 */
+	if (ctrl->cpr_hw_version >= CPRH_CPR_VERSION_4P5
+	    && ctrl->acd_avg_enabled) {
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_MASK,
+				  ctrl->acd_adj_up_step_limit <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_MASK,
+				  ctrl->acd_adj_down_step_limit <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_MASK,
+				  ctrl->acd_adj_up_step_size <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK,
+				  ctrl->acd_adj_down_step_size <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_AVG_EN_MASK,
+				  CPRH_MISC_REG2_ACD_AVG_ENABLE);
+	}
+
+	/*
 	 * Program base voltage and voltage multiplier values which
 	 * are used for floor and initial voltage calculations by the
 	 * CPRh controller.
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 31d737ca..570ddfc 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -649,6 +649,20 @@
  *			defines the maximum number of VDD supply regulator steps
  *			that the voltage may be increased as the result of a
  *			single CPR measurement.
+ * @acd_adj_down_step_limit: Limits the number of PMIC steps to go down within
+ *			a given corner due to all ACD adjustments on some CPRh
+ *			controllers.
+ * @acd_adj_up_step_limit: Limits the number of PMIC steps to go up within a
+ *			given corner due to all ACD adjustments on some CPRh
+ *			controllers.
+ * @acd_adj_down_step_size: ACD step size in units of PMIC steps used for
+ *			target quotient adjustment due to an ACD down
+ *			recommendation.
+ * @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
+ *			target quotient adjustment due to an ACD up
+ *			recommendation.
+ * @acd_avg_enabled:	Boolean defining the enable state of the ACD AVG
+ *			feature.
  * @count_mode:		CPR controller count mode
  * @count_repeat:	Number of times to perform consecutive sensor
  *			measurements when using all-at-once count modes.
@@ -804,6 +818,11 @@
 	int			step_volt;
 	u32			down_error_step_limit;
 	u32			up_error_step_limit;
+	u32			acd_adj_down_step_limit;
+	u32			acd_adj_up_step_limit;
+	u32			acd_adj_down_step_size;
+	u32			acd_adj_up_step_size;
+	bool			acd_avg_enabled;
 	enum cpr3_count_mode	count_mode;
 	u32			count_repeat;
 	u32			proc_clock_throttle;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 84fc703..a93e7d8 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -2221,6 +2221,46 @@
 		return rc;
 	}
 
+	ctrl->acd_avg_enabled = of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-avg-enable");
+	if (ctrl->acd_avg_enabled) {
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-down-step-limit",
+					  &ctrl->acd_adj_down_step_limit);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-adj-down-step-limit, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-up-step-limit",
+					  &ctrl->acd_adj_up_step_limit);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-adj-up-step-limit, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-down-step-size",
+					  &ctrl->acd_adj_down_step_size);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-down-step-size, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-up-step-size",
+					  &ctrl->acd_adj_up_step_size);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-up-step-size, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
 	rc = of_property_read_u32(ctrl->dev->of_node,
 				  "qcom,voltage-base",
 				  &ctrl->base_volt);
diff --git a/drivers/soc/qcom/gladiator_hang_detect.c b/drivers/soc/qcom/gladiator_hang_detect.c
index 7fc2825..b0940ad 100644
--- a/drivers/soc/qcom/gladiator_hang_detect.c
+++ b/drivers/soc/qcom/gladiator_hang_detect.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,7 +71,7 @@
 		hang_dev->M1_threshold = threshold_val;
 	else if (offset == hang_dev->M2_offset)
 		hang_dev->M2_threshold = threshold_val;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		hang_dev->PCIO_threshold = threshold_val;
 }
 
@@ -86,7 +86,7 @@
 		*reg_value = hang_dev->M1_threshold;
 	else if (offset == hang_dev->M2_offset)
 		*reg_value = hang_dev->M2_threshold;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		*reg_value = hang_dev->PCIO_threshold;
 }
 
@@ -101,7 +101,7 @@
 		hang_dev->M1_enable = enabled;
 	else if (offset == hang_dev->M2_offset)
 		hang_dev->M2_enable = enabled;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		hang_dev->PCIO_enable = enabled;
 }
 
@@ -116,7 +116,7 @@
 		*reg_value = hang_dev->M1_enable;
 	else if (offset == hang_dev->M2_offset)
 		*reg_value = hang_dev->M2_enable;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		*reg_value = hang_dev->PCIO_enable;
 }
 
@@ -475,7 +475,7 @@
 	struct device_node *node = pdev->dev.of_node;
 	struct hang_detect *hang_det = NULL;
 	int i = 0, ret;
-	u32 NR_GLA_REG;
+	u32 NR_GLA_REG = 0;
 	u32 *treg;
 	u32 creg;
 
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index 8c6deb1..c977d1b 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -572,7 +572,7 @@
 		bcm_dev->lnode_list[lnode_idx].lnode_query_ab[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].sum_query_ab *
 					(uint64_t)bcm_dev->bcmdev->width,
-				cur_dev->node_info->agg_params.num_aggports,
+				cur_dev->node_info->agg_params.num_aggports *
 				cur_dev->node_info->agg_params.buswidth);
 
 		for (i = 0; i < bcm_dev->num_lnodes; i++) {
@@ -1298,7 +1298,7 @@
 					struct msm_bus_tcs_usecase *tcs_usecase)
 {
 	int lnode, src, dest, cur_idx;
-	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw;
 	int i, ret = 0;
 	struct msm_bus_scale_pdata *pdata;
 	struct device *src_dev;
@@ -1339,8 +1339,8 @@
 					curr_bw, curr_clk);
 		}
 
-		ret = query_path(src_dev, dest, req_clk, req_bw, slp_clk,
-			slp_bw, curr_clk, curr_bw, lnode);
+		ret = query_path(src_dev, dest, req_clk, req_bw, 0,
+			0, curr_clk, curr_bw, lnode);
 
 		if (ret) {
 			MSM_BUS_ERR("%s: Query path failed! %d ctx %d\n",
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
index 7a0fbc5..4911cf2 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_core.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -288,7 +288,7 @@
 	struct device **src_devs;
 };
 
-uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
+uint64_t msm_bus_div64(uint64_t num, unsigned int base);
 int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
 void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
 struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
index 5b5159d..03042fa 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -588,7 +588,7 @@
 static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
 					struct notifier_block *nb)
 {
-	int i;
+	int i = 0;
 	struct rule_node_info *node = NULL;
 	struct rule_node_info *node_tmp = NULL;
 	struct rules_def *node_rule;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 03a6204..11e1b4d 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -928,7 +928,8 @@
 					desc->attrs);
 			priv->region = NULL;
 		}
-		pil_clear_segment(desc);
+		if (desc->clear_fw_region && priv->region_start)
+			pil_clear_segment(desc);
 		pil_release_mmap(desc);
 	}
 	return ret;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 752a6ce..af7249b 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
  * This defaults to iounmap if not specified.
  * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
  * @modem_ssr: true if modem is restarting, false if booting for first time.
+ * @clear_fw_region: Clear fw region on failure in loading.
  * @subsys_vmid: memprot id for the subsystem.
  */
 struct pil_desc {
@@ -54,6 +55,7 @@
 	void *map_data;
 	bool shutdown_fail;
 	bool modem_ssr;
+	bool clear_fw_region;
 	u32 subsys_vmid;
 };
 
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index ffe72e6..fb3d7d9 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -320,6 +320,7 @@
 	struct modem_data *drv = dev_get_drvdata(pil->dev);
 	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
 	int ret = 0;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	s32 status;
 	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
 
@@ -349,7 +350,7 @@
 		if (pil->subsys_vmid > 0)
 			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
 						drv->q6->mba_dp_size);
-		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
 				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
 				drv->attrs_dma);
 		drv->q6->mba_dp_virt = NULL;
@@ -542,6 +543,7 @@
 	dma_addr_t mba_dp_phys, mba_dp_phys_end;
 	int ret, count;
 	const u8 *data;
+	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
 
 	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
 	ret = request_firmware(&fw, fw_name_p, pil->dev);
@@ -560,11 +562,12 @@
 
 	drv->mba_dp_size = SZ_1M;
 
-	arch_setup_dma_ops(&md->mba_mem_dev, 0, 0, NULL, 0);
+	arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
 
-	md->mba_mem_dev.coherent_dma_mask =
-		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
 	md->attrs_dma = 0;
+	md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
 	md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
 
 	ret = request_firmware(&dp_fw, dp_name, pil->dev);
@@ -581,10 +584,11 @@
 		drv->mba_dp_size += drv->dp_size;
 	}
 
-	mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
-			&mba_dp_phys, GFP_KERNEL, md->attrs_dma);
+	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+				   GFP_KERNEL, md->attrs_dma);
 	if (!mba_dp_virt) {
-		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
+				 __func__, drv->mba_dp_size);
 		ret = -ENOMEM;
 		goto err_invalid_fw;
 	}
@@ -640,7 +644,7 @@
 		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
 							drv->mba_dp_size);
 err_mba_data:
-	dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+	dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
 				drv->mba_dp_phys, md->attrs_dma);
 err_invalid_fw:
 	if (dp_fw)
@@ -659,13 +663,14 @@
 	s32 status;
 	int ret;
 	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	unsigned long attrs = 0;
 
-	drv->mba_mem_dev.coherent_dma_mask =
-		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_SKIP_ZEROING;
 	attrs |= DMA_ATTR_STRONGLY_ORDERED;
 	/* Make metadata physically contiguous and 4K aligned. */
-	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
+	mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
 					GFP_KERNEL, attrs);
 	if (!mdata_virt) {
 		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
@@ -682,8 +687,8 @@
 		if (ret) {
 			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
 									ret);
-			dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
-							mdata_phys, attrs);
+			dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+									attrs);
 			goto fail;
 		}
 	}
@@ -709,7 +714,7 @@
 	if (pil->subsys_vmid > 0)
 		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
 
-	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, attrs);
+	dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
 
 	if (!ret)
 		return ret;
@@ -721,7 +726,7 @@
 		if (pil->subsys_vmid > 0)
 			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
 						drv->q6->mba_dp_size);
-		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
 				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
 				drv->attrs_dma);
 		drv->q6->mba_dp_virt = NULL;
@@ -773,6 +778,7 @@
 	struct modem_data *drv = dev_get_drvdata(pil->dev);
 	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
 	int ret;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	s32 status;
 	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
 
@@ -794,9 +800,9 @@
 				pil_assign_mem_to_linux(pil,
 					drv->q6->mba_dp_phys,
 					drv->q6->mba_dp_size);
-			dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
-					drv->q6->mba_dp_virt,
-					drv->q6->mba_dp_phys, drv->attrs_dma);
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
 
 			drv->q6->mba_dp_virt = NULL;
 		}
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index 3af6368..1789ba3 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@
 	struct clk *xo;
 	struct pil_desc desc;
 	struct device mba_mem_dev;
+	struct device *mba_mem_dev_fixed;
 	unsigned long attrs_dma;
 };
 
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index ec0187a..2cbbe2e 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/ioport.h>
@@ -379,6 +380,11 @@
 	}
 	init_completion(&drv->stop_ack);
 
+	/* Probe the MBA mem device if present */
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		return ret;
+
 	return pil_subsys_init(drv, pdev);
 }
 
@@ -392,6 +398,33 @@
 	return 0;
 }
 
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+
+	if (!pdev->dev.parent) {
+		pr_err("No parent found.\n");
+		return -EINVAL;
+	}
+	drv = dev_get_drvdata(pdev->dev.parent);
+	drv->mba_mem_dev_fixed = &pdev->dev;
+	return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+	{ .compatible = "qcom,pil-mba-mem" },
+	{}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+	.probe = pil_mba_mem_driver_probe,
+	.driver = {
+		.name = "pil-mba-mem",
+		.of_match_table = mba_mem_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
 static const struct of_device_id mss_match_table[] = {
 	{ .compatible = "qcom,pil-q6v5-mss" },
 	{ .compatible = "qcom,pil-q6v55-mss" },
@@ -411,7 +444,12 @@
 
 static int __init pil_mss_init(void)
 {
-	return platform_driver_register(&pil_mss_driver);
+	int ret;
+
+	ret = platform_driver_register(&pil_mba_mem_driver);
+	if (!ret)
+		ret = platform_driver_register(&pil_mss_driver);
+	return ret;
 }
 module_init(pil_mss_init);
 
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index fb4d0ea..d9d6c72 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -637,6 +637,7 @@
 	if (ret)
 		return ERR_PTR(ret);
 
+	desc->clear_fw_region = false;
 	desc->dev = &pdev->dev;
 
 	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/qdsp6v2/adsp-loader.c b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
index 1bde1bf..d90267e 100644
--- a/drivers/soc/qcom/qdsp6v2/adsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
 #include <linux/qdsp6v2/apr.h>
 #include <linux/of_device.h>
 #include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
 #include <soc/qcom/subsystem_restart.h>
 
 #define Q6_PIL_GET_DELAY_MS 100
@@ -44,12 +46,13 @@
 	NULL,
 };
 
+static struct work_struct adsp_ldr_work;
 static struct platform_device *adsp_private;
 static void adsp_loader_unload(struct platform_device *pdev);
 
-static void adsp_loader_do(struct platform_device *pdev)
+static void adsp_load_fw(struct work_struct *adsp_ldr_work)
 {
-
+	struct platform_device *pdev = adsp_private;
 	struct adsp_loader_private *priv = NULL;
 
 	const char *adsp_dt = "qcom,adsp-state";
@@ -146,6 +149,10 @@
 	dev_err(&pdev->dev, "%s: Q6 image loading failed\n", __func__);
 }
 
+static void adsp_loader_do(struct platform_device *pdev)
+{
+	schedule_work(&adsp_ldr_work);
+}
 
 static ssize_t adsp_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
@@ -272,6 +279,8 @@
 		return ret;
 	}
 
+	INIT_WORK(&adsp_ldr_work, adsp_load_fw);
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
index 9bb4eb0..70977d3 100644
--- a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
@@ -19,6 +19,8 @@
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
 #include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
 #include <soc/qcom/subsystem_restart.h>
 
 #define BOOT_CMD 1
@@ -47,10 +49,12 @@
 
 static u32 cdsp_state = CDSP_SUBSYS_DOWN;
 static struct platform_device *cdsp_private;
+static struct work_struct cdsp_ldr_work;
 static void cdsp_loader_unload(struct platform_device *pdev);
 
-static int cdsp_loader_do(struct platform_device *pdev)
+static void cdsp_load_fw(struct work_struct *cdsp_ldr_work)
 {
+	struct platform_device *pdev = cdsp_private;
 	struct cdsp_loader_private *priv = NULL;
 
 	int rc = 0;
@@ -99,14 +103,17 @@
 		}
 
 		dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
-		return rc;
+		return;
 	}
 
 fail:
 	dev_err(&pdev->dev, "%s: CDSP image loading failed\n", __func__);
-	return rc;
 }
 
+static void cdsp_loader_do(struct platform_device *pdev)
+{
+	schedule_work(&cdsp_ldr_work);
+}
 
 static ssize_t cdsp_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
@@ -124,7 +131,7 @@
 		pr_debug("%s: going to call cdsp_loader_do\n", __func__);
 		cdsp_loader_do(cdsp_private);
 	} else if (boot == IMAGE_UNLOAD_CMD) {
-		pr_debug("%s: going to call adsp_unloader\n", __func__);
+		pr_debug("%s: going to call cdsp_unloader\n", __func__);
 		cdsp_loader_unload(cdsp_private);
 	}
 	return count;
@@ -236,6 +243,8 @@
 		return ret;
 	}
 
+	INIT_WORK(&cdsp_ldr_work, cdsp_load_fw);
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index bcd00b4..fca1c68 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -99,6 +99,7 @@
  */
 struct qmi_client_info {
 	int instance_id;
+	enum pd_subsys_state subsys_state;
 	struct work_struct svc_arrive;
 	struct work_struct svc_exit;
 	struct work_struct svc_rcv_msg;
@@ -436,7 +437,7 @@
 {
 	struct qmi_client_info *data = container_of(work,
 					struct qmi_client_info, svc_exit);
-	root_service_service_exit(data, ROOT_PD_DOWN);
+	root_service_service_exit(data, data->subsys_state);
 }
 
 static int service_event_notify(struct notifier_block *this,
@@ -453,6 +454,7 @@
 		break;
 	case QMI_SERVER_EXIT:
 		pr_debug("Root PD service DOWN\n");
+		data->subsys_state = ROOT_PD_DOWN;
 		queue_work(data->svc_event_wq, &data->svc_exit);
 		break;
 	default:
@@ -468,7 +470,6 @@
 	struct qmi_client_info *info = container_of(this,
 					struct qmi_client_info, ssr_notifier);
 	struct notif_data *notif = data;
-	enum pd_subsys_state state;
 
 	switch (code) {
 	case	SUBSYS_BEFORE_SHUTDOWN:
@@ -476,16 +477,16 @@
 						notif->crashed);
 		switch (notif->crashed) {
 		case CRASH_STATUS_ERR_FATAL:
-			state = ROOT_PD_ERR_FATAL;
+			info->subsys_state = ROOT_PD_ERR_FATAL;
 			break;
 		case CRASH_STATUS_WDOG_BITE:
-			state = ROOT_PD_WDOG_BITE;
+			info->subsys_state = ROOT_PD_WDOG_BITE;
 			break;
 		default:
-			state = ROOT_PD_SHUTDOWN;
+			info->subsys_state = ROOT_PD_SHUTDOWN;
 			break;
 		}
-		root_service_service_exit(info, state);
+		queue_work(info->svc_event_wq, &info->svc_exit);
 		break;
 	default:
 		break;
@@ -635,7 +636,13 @@
 		return rc;
 	}
 
-	/* Check the response */
+	/* Check response if PDR is disabled */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) == QMI_ERR_DISABLED_V01) {
+		pr_err("PD restart is disabled 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		return -EOPNOTSUPP;
+	}
+	/* Check the response for other error case*/
 	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
 		pr_err("QMI request for PD restart failed 0x%x\n",
 					QMI_RESP_BIT_SHIFT(resp.resp.error));
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 0063ae1..982dfae 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1035,6 +1035,7 @@
 	d->desc.ops = &pil_ops_trusted;
 
 	d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+	d->desc.clear_fw_region = true;
 
 	rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
 					&proxy_timeout);
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index a087ad6..f4c7779 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -153,10 +153,12 @@
 	struct sysmon_qmi_data *data = container_of(work,
 					struct sysmon_qmi_data, svc_arrive);
 
+	mutex_lock(&sysmon_lock);
 	/* Create a Local client port for QMI communication */
 	data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
 	if (!data->clnt_handle) {
 		pr_err("QMI client handle alloc failed for %s\n", data->name);
+		mutex_unlock(&sysmon_lock);
 		return;
 	}
 
@@ -167,6 +169,7 @@
 								data->name);
 		qmi_handle_destroy(data->clnt_handle);
 		data->clnt_handle = NULL;
+		mutex_unlock(&sysmon_lock);
 		return;
 	}
 	pr_info("Connection established between QMI handle and %s's SSCTL service\n"
@@ -177,6 +180,7 @@
 	if (rc < 0)
 		pr_warn("%s: Could not register the indication callback\n",
 								data->name);
+	mutex_unlock(&sysmon_lock);
 }
 
 static void sysmon_clnt_svc_exit(struct work_struct *work)
@@ -184,8 +188,10 @@
 	struct sysmon_qmi_data *data = container_of(work,
 					struct sysmon_qmi_data, svc_exit);
 
+	mutex_lock(&sysmon_lock);
 	qmi_handle_destroy(data->clnt_handle);
 	data->clnt_handle = NULL;
+	mutex_unlock(&sysmon_lock);
 }
 
 static void sysmon_clnt_recv_msg(struct work_struct *work)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b799547..fc96f62 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -533,6 +533,18 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called spi_qup.
 
+config SPI_QCOM_GENI
+	tristate "Qualcomm Technologies Inc.'s GENI based SPI controller"
+	depends on ARCH_QCOM
+	help
+	  SPI driver for Qualcomm Technologies Inc's GENI based controller.
+	  The controller can run upto 50 Mhz, support upto 4 CS lines,
+	  programmable bits per word from 4 to 32 and supports the various
+	  SPI modes. It can operate in FIFO mode (SW driven IO) and DMA mode.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called spi-geni-qcom.
+
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
 	depends on ARCH_S3C24XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aa939d9..9d72f37 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -73,6 +73,7 @@
 obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-platform.o
 obj-$(CONFIG_SPI_PXA2XX_PCI)		+= spi-pxa2xx-pci.o
 obj-$(CONFIG_SPI_QUP)			+= spi-qup.o
+obj-$(CONFIG_SPI_QCOM_GENI)		+= spi-geni-qcom.o
 obj-$(CONFIG_SPI_ROCKCHIP)		+= spi-rockchip.o
 obj-$(CONFIG_SPI_RB4XX)			+= spi-rb4xx.o
 obj-$(CONFIG_SPI_RSPI)			+= spi-rspi.o
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
new file mode 100644
index 0000000..4c86197
--- /dev/null
+++ b/drivers/spi/spi-geni-qcom.c
@@ -0,0 +1,687 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spi/spi.h>
+
+#define SPI_NUM_CHIPSELECT	(4)
+#define SPI_XFER_TIMEOUT_MS	(250)
+#define SPI_OVERSAMPLING	(2)
+/* SPI SE specific registers */
+#define SE_SPI_CPHA		(0x224)
+#define SE_SPI_LOOPBACK		(0x22C)
+#define SE_SPI_CPOL		(0x230)
+#define SE_SPI_DEMUX_OUTPUT_INV	(0x24C)
+#define SE_SPI_DEMUX_SEL	(0x250)
+#define SE_SPI_TRANS_CFG	(0x25C)
+#define SE_SPI_WORD_LEN		(0x268)
+#define SE_SPI_TX_TRANS_LEN	(0x26C)
+#define SE_SPI_RX_TRANS_LEN	(0x270)
+#define SE_SPI_PRE_POST_CMD_DLY	(0x274)
+#define SE_SPI_DELAY_COUNTERS	(0x278)
+
+/* SE_SPI_CPHA register fields */
+#define CPHA			(BIT(0))
+
+/* SE_SPI_LOOPBACK register fields */
+#define LOOPBACK_ENABLE		(0x1)
+#define NORMAL_MODE		(0x0)
+#define LOOPBACK_MSK		(GENMASK(1, 0))
+
+/* SE_SPI_CPOL register fields */
+#define CPOL			(BIT(2))
+
+/* SE_SPI_DEMUX_OUTPUT_INV register fields */
+#define CS_DEMUX_OUTPUT_INV_MSK	(GENMASK(3, 0))
+
+/* SE_SPI_DEMUX_SEL register fields */
+#define CS_DEMUX_OUTPUT_SEL	(GENMASK(3, 0))
+
+/* SE_SPI_TX_TRANS_CFG register fields */
+#define CS_TOGGLE		(BIT(0))
+
+/* SE_SPI_WORD_LEN register fields */
+#define WORD_LEN_MSK		(GENMASK(9, 0))
+#define MIN_WORD_LEN		(4)
+
+/* SPI_TX/SPI_RX_TRANS_LEN fields */
+#define TRANS_LEN_MSK		(GENMASK(23, 0))
+
+/* M_CMD OP codes for SPI */
+#define SPI_TX_ONLY		(1)
+#define SPI_RX_ONLY		(2)
+#define SPI_FULL_DUPLEX		(3)
+#define SPI_TX_RX		(7)
+#define SPI_CS_ASSERT		(8)
+#define SPI_CS_DEASSERT		(9)
+#define SPI_SCK_ONLY		(10)
+/* M_CMD params for SPI */
+#define SPI_PRE_CMD_DELAY	(0)
+#define TIMESTAMP_BEFORE	(1)
+#define FRAGMENTATION		(2)
+#define TIMESTAMP_AFTER		(3)
+#define POST_CMD_DELAY		(4)
+
+struct spi_geni_master {
+	struct se_geni_rsc spi_rsc;
+	resource_size_t phys_addr;
+	resource_size_t size;
+	void __iomem *base;
+	int irq;
+	struct device *dev;
+	int rx_fifo_depth;
+	int tx_fifo_depth;
+	int tx_fifo_width;
+	int tx_wm;
+	bool setup;
+	u32 cur_speed_hz;
+	int cur_word_len;
+	unsigned int tx_rem_bytes;
+	unsigned int rx_rem_bytes;
+	struct spi_transfer *cur_xfer;
+	struct completion xfer_done;
+};
+
+static struct spi_master *get_spi_master(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct spi_master *spi = platform_get_drvdata(pdev);
+
+	return spi;
+}
+
+static int get_sclk(u32 speed_hz, unsigned long *sclk_freq)
+{
+	u32 root_freq[] = { 19200000 };
+
+	*sclk_freq = root_freq[0];
+	return 0;
+}
+
+static int do_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas)
+{
+	unsigned long sclk_freq;
+	int div = 0;
+	int idx;
+	struct se_geni_rsc *rsc = &mas->spi_rsc;
+	int ret = 0;
+	u32 clk_sel = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
+	u32 m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
+
+	clk_sel &= ~CLK_SEL_MSK;
+	m_clk_cfg &= ~CLK_DIV_MSK;
+
+	idx = get_sclk(speed_hz, &sclk_freq);
+	if (idx < 0) {
+		ret = -EINVAL;
+		goto spi_clk_cfg_exit;
+	}
+	div = (sclk_freq / (SPI_OVERSAMPLING / speed_hz));
+
+	clk_sel |= (idx & CLK_SEL_MSK);
+	m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
+	ret = clk_set_rate(rsc->se_clk, sclk_freq);
+	if (ret)
+		goto spi_clk_cfg_exit;
+
+	geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
+	geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
+spi_clk_cfg_exit:
+	return ret;
+}
+
+static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
+						int bits_per_word)
+{
+	int pack_words = mas->tx_fifo_width / bits_per_word;
+	bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
+	u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
+
+	word_len &= ~WORD_LEN_MSK;
+	word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
+	se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
+	geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
+}
+
+static int spi_geni_prepare_message(struct spi_master *spi_mas,
+					struct spi_message *spi_msg)
+{
+	struct spi_device *spi_slv = spi_msg->spi;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+	u16 mode = spi_slv->mode;
+	u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
+	u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
+	u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
+	u32 demux_sel = geni_read_reg(mas->base, SE_SPI_DEMUX_SEL);
+	u32 demux_output_inv =
+			geni_read_reg(mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	int ret = 0;
+
+	loopback_cfg &= ~LOOPBACK_MSK;
+	cpol &= ~CPOL;
+	cpha &= ~CPHA;
+	demux_output_inv &= ~BIT(spi_slv->chip_select);
+
+	if (mode & SPI_LOOP)
+		loopback_cfg |= LOOPBACK_ENABLE;
+
+	if (mode & SPI_CPOL)
+		cpol |= CPOL;
+
+	if (mode & SPI_CPHA)
+		cpha |= CPHA;
+
+	if (spi_slv->mode & SPI_CS_HIGH)
+		demux_output_inv |= BIT(spi_slv->chip_select);
+
+	demux_sel |= BIT(spi_slv->chip_select);
+	mas->cur_speed_hz = spi_slv->max_speed_hz;
+	mas->cur_word_len = spi_slv->bits_per_word;
+
+	ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
+	if (ret) {
+		dev_err(&spi_mas->dev, "Err setting clks ret %d\n", ret);
+		goto prepare_message_exit;
+	}
+	spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+	geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
+	geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
+	geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
+	geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
+	geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	/* Ensure message level attributes are written before returning */
+	mb();
+prepare_message_exit:
+	return ret;
+}
+
+static int spi_geni_unprepare_message(struct spi_master *spi_mas,
+					struct spi_message *spi_msg)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+
+	mas->cur_speed_hz = 0;
+	mas->cur_word_len = 0;
+	return 0;
+}
+
+static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int ret = 0;
+
+	ret = pm_runtime_get_sync(mas->dev);
+	if (ret < 0) {
+		dev_err(mas->dev, "Error enabling SE resources\n");
+		pm_runtime_put_noidle(mas->dev);
+		goto exit_prepare_transfer_hardware;
+	} else {
+		ret = 0;
+	}
+
+	if (unlikely(!mas->setup)) {
+		int proto = get_se_proto(mas->base);
+
+		if (unlikely(proto != SPI)) {
+			dev_err(mas->dev, "Invalid proto %d\n", proto);
+			return -ENXIO;
+		}
+		geni_se_init(mas->base, FIFO_MODE, 0x0,
+						(mas->tx_fifo_depth - 2));
+		mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
+		mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
+		mas->tx_fifo_width = get_tx_fifo_width(mas->base);
+		/* Transmit an entire FIFO worth of data per IRQ */
+		mas->tx_wm = 1;
+		dev_dbg(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
+			mas->tx_fifo_depth, mas->rx_fifo_depth,
+			mas->tx_fifo_width);
+		mas->setup = true;
+	}
+exit_prepare_transfer_hardware:
+	return ret;
+}
+
+static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+	pm_runtime_put_sync(mas->dev);
+	return 0;
+}
+
+static void setup_fifo_xfer(struct spi_transfer *xfer,
+				struct spi_geni_master *mas, u16 mode,
+				struct spi_master *spi)
+{
+	u32 m_cmd = 0;
+	u32 m_param = 0;
+	u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
+	u32 trans_len = 0;
+
+	if (xfer->bits_per_word != mas->cur_word_len) {
+		spi_setup_word_len(mas, mode, xfer->bits_per_word);
+		mas->cur_word_len = xfer->bits_per_word;
+	}
+
+	if (xfer->tx_buf && xfer->rx_buf)
+		m_cmd = SPI_FULL_DUPLEX;
+	else if (xfer->tx_buf)
+		m_cmd = SPI_TX_ONLY;
+	else if (xfer->rx_buf)
+		m_cmd = SPI_RX_ONLY;
+
+	spi_tx_cfg &= ~CS_TOGGLE;
+	if (xfer->cs_change)
+		spi_tx_cfg |= CS_TOGGLE;
+	trans_len = ((xfer->len / (mas->cur_word_len >> 3)) & TRANS_LEN_MSK);
+	if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+		m_param |= FRAGMENTATION;
+
+	mas->cur_xfer = xfer;
+	if (m_cmd & SPI_TX_ONLY) {
+		mas->tx_rem_bytes = xfer->len;
+		geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
+	}
+
+	if (m_cmd & SPI_RX_ONLY) {
+		geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
+		mas->rx_rem_bytes = xfer->len;
+	}
+	geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
+	geni_setup_m_cmd(mas->base, m_cmd, m_param);
+	geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG);
+	/* Ensure all writes are done before the WM interrupt */
+	mb();
+}
+
+static void handle_fifo_timeout(struct spi_geni_master *mas)
+{
+	unsigned long timeout;
+	u32 tx_trans_len = geni_read_reg(mas->base, SE_SPI_TX_TRANS_LEN);
+	u32 rx_trans_len = geni_read_reg(mas->base, SE_SPI_RX_TRANS_LEN);
+	u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
+	u32 m_cmd = geni_read_reg(mas->base, SE_GENI_M_CMD0);
+
+	/* Timed-out on a FIFO xfer, print relevant reg info. */
+	dev_err(mas->dev, "tx_rem_bytes %d rx_rem_bytes %d\n",
+			mas->tx_rem_bytes, mas->rx_rem_bytes);
+	dev_err(mas->dev, "tx_trans_len %d rx_trans_len %d\n", tx_trans_len,
+								rx_trans_len);
+	dev_err(mas->dev, "spi_tx_cfg 0x%x m_cmd 0x%x\n", spi_tx_cfg, m_cmd);
+	reinit_completion(&mas->xfer_done);
+	geni_cancel_m_cmd(mas->base);
+	/* Ensure cmd cancel is written */
+	mb();
+	timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
+	if (!timeout) {
+		reinit_completion(&mas->xfer_done);
+		geni_abort_m_cmd(mas->base);
+		/* Ensure cmd abort is written */
+		mb();
+		timeout = wait_for_completion_timeout(&mas->xfer_done,
+								HZ);
+		if (!timeout)
+			dev_err(mas->dev,
+				"Failed to cancel/abort m_cmd\n");
+	}
+}
+
+static int spi_geni_transfer_one(struct spi_master *spi,
+				struct spi_device *slv,
+				struct spi_transfer *xfer)
+{
+	int ret = 0;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	unsigned long timeout;
+
+	if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
+		dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
+		return -EINVAL;
+	}
+
+	reinit_completion(&mas->xfer_done);
+	/* Speed and bits per word can be overridden per transfer */
+	if (xfer->speed_hz != mas->cur_speed_hz) {
+		ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
+		if (ret) {
+			dev_err(mas->dev, "%s:Err setting clks:%d\n",
+								__func__, ret);
+			goto geni_transfer_one_exit;
+		}
+		mas->cur_speed_hz = xfer->speed_hz;
+	}
+
+	setup_fifo_xfer(xfer, mas, slv->mode, spi);
+	timeout = wait_for_completion_timeout(&mas->xfer_done,
+					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+	if (!timeout) {
+		dev_err(mas->dev, "Xfer[len %d tx %p rx %p n %d] timed out.\n",
+						xfer->len, xfer->tx_buf,
+						xfer->rx_buf,
+						xfer->bits_per_word);
+		ret = -ETIMEDOUT;
+		handle_fifo_timeout(mas);
+	}
+geni_transfer_one_exit:
+	return ret;
+}
+
+static void geni_spi_handle_tx(struct spi_geni_master *mas)
+{
+	int i = 0;
+	int tx_fifo_width = (mas->tx_fifo_width >> 3);
+	int max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
+	const u8 *tx_buf = mas->cur_xfer->tx_buf;
+
+	tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
+	max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
+	while (i < max_bytes) {
+		int j;
+		u32 fifo_word = 0;
+		u8 *fifo_byte;
+		int bytes_to_write = min_t(int, (max_bytes - i), tx_fifo_width);
+
+		fifo_byte = (u8 *)&fifo_word;
+		for (j = 0; j < bytes_to_write; j++)
+			fifo_byte[j] = tx_buf[i++];
+		geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
+		/* Ensure FIFO writes are written in order */
+		mb();
+	}
+	mas->tx_rem_bytes -= max_bytes;
+	if (!mas->tx_rem_bytes) {
+		geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
+		/* Barrier here before return to prevent further ISRs */
+		mb();
+	}
+}
+
+static void geni_spi_handle_rx(struct spi_geni_master *mas)
+{
+	int i = 0;
+	int fifo_width = (mas->tx_fifo_width >> 3);
+	u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
+	int rx_bytes = 0;
+	int rx_wc = 0;
+	u8 *rx_buf = mas->cur_xfer->rx_buf;
+
+	rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
+	if (rx_fifo_status & RX_LAST) {
+		int rx_last_byte_valid =
+			(rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
+					>> RX_LAST_BYTE_VALID_SHFT;
+		if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
+			rx_wc -= 1;
+			rx_bytes += rx_last_byte_valid;
+		}
+	}
+	rx_bytes += rx_wc * fifo_width;
+	rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
+	rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
+	while (i < rx_bytes) {
+		u32 fifo_word = 0;
+		u8 *fifo_byte;
+		int read_bytes = min_t(int, (rx_bytes - i), fifo_width);
+		int j;
+
+		fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
+		fifo_byte = (u8 *)&fifo_word;
+		for (j = 0; j < read_bytes; j++)
+			rx_buf[i++] = fifo_byte[j];
+	}
+	mas->rx_rem_bytes -= rx_bytes;
+}
+
+static irqreturn_t geni_spi_irq(int irq, void *dev)
+{
+	struct spi_geni_master *mas = dev;
+	u32 m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
+
+	if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+		geni_spi_handle_rx(mas);
+
+	if ((m_irq & M_TX_FIFO_WATERMARK_EN))
+		geni_spi_handle_tx(mas);
+
+	if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
+		(m_irq & M_CMD_ABORT_EN)) {
+		complete(&mas->xfer_done);
+	}
+	geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
+	return IRQ_HANDLED;
+}
+
+static int spi_geni_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct spi_master *spi;
+	struct spi_geni_master *geni_mas;
+	struct se_geni_rsc *rsc;
+	struct resource *res;
+
+	spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
+	if (!spi) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to alloc spi struct\n");
+		goto spi_geni_probe_err;
+	}
+
+	platform_set_drvdata(pdev, spi);
+	geni_mas = spi_master_get_devdata(spi);
+	rsc = &geni_mas->spi_rsc;
+	geni_mas->dev = &pdev->dev;
+	spi->dev.of_node = pdev->dev.of_node;
+	rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
+		dev_err(&pdev->dev, "No pinctrl config specified!\n");
+		ret = PTR_ERR(rsc->geni_pinctrl);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
+							PINCTRL_DEFAULT);
+	if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified!\n");
+		ret = PTR_ERR(rsc->geni_gpio_active);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
+							PINCTRL_SLEEP);
+	if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified!\n");
+		ret = PTR_ERR(rsc->geni_gpio_sleep);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
+	if (IS_ERR(rsc->se_clk)) {
+		ret = PTR_ERR(rsc->se_clk);
+		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
+	if (IS_ERR(rsc->m_ahb_clk)) {
+		ret = PTR_ERR(rsc->m_ahb_clk);
+		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
+	if (IS_ERR(rsc->s_ahb_clk)) {
+		ret = PTR_ERR(rsc->s_ahb_clk);
+		dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+				&spi->max_speed_hz)) {
+		dev_err(&pdev->dev, "Max frequency not specified.\n");
+		ret = -ENXIO;
+		goto spi_geni_probe_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
+	if (!res) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "Err getting IO region\n");
+		goto spi_geni_probe_err;
+	}
+
+	geni_mas->phys_addr = res->start;
+	geni_mas->size = resource_size(res);
+	geni_mas->base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!geni_mas->base) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Err IO mapping iomem\n");
+		goto spi_geni_probe_err;
+	}
+
+	geni_mas->irq = platform_get_irq(pdev, 0);
+	if (geni_mas->irq < 0) {
+		dev_err(&pdev->dev, "Err getting IRQ\n");
+		ret = geni_mas->irq;
+		goto spi_geni_probe_unmap;
+	}
+	ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
+			       IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
+	if (ret) {
+		dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+				   geni_mas->irq, ret);
+		goto spi_geni_probe_unmap;
+	}
+
+	spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
+	spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+	spi->num_chipselect = SPI_NUM_CHIPSELECT;
+	spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
+	spi->prepare_message = spi_geni_prepare_message;
+	spi->unprepare_message = spi_geni_unprepare_message;
+	spi->transfer_one = spi_geni_transfer_one;
+	spi->unprepare_transfer_hardware
+			= spi_geni_unprepare_transfer_hardware;
+	spi->auto_runtime_pm = false;
+
+	init_completion(&geni_mas->xfer_done);
+	pm_runtime_enable(&pdev->dev);
+	ret = spi_register_master(spi);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register SPI master\n");
+		goto spi_geni_probe_unmap;
+	}
+	return ret;
+spi_geni_probe_unmap:
+	devm_iounmap(&pdev->dev, geni_mas->base);
+spi_geni_probe_err:
+	spi_master_put(spi);
+	return ret;
+}
+
+static int spi_geni_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
+
+	spi_unregister_master(master);
+	se_geni_resources_off(&geni_mas->spi_rsc);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_geni_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct spi_master *spi = get_spi_master(dev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+	ret = se_geni_resources_off(&geni_mas->spi_rsc);
+	return ret;
+}
+
+static int spi_geni_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct spi_master *spi = get_spi_master(dev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+	ret = se_geni_resources_on(&geni_mas->spi_rsc);
+	return ret;
+}
+
+static int spi_geni_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_suspend(struct device *dev)
+{
+	if (!pm_runtime_status_suspended(dev))
+		return -EBUSY;
+	return 0;
+}
+#else
+static int spi_geni_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_suspend(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops spi_geni_pm_ops = {
+	SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
+					spi_geni_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
+};
+
+static const struct of_device_id spi_geni_dt_match[] = {
+	{ .compatible = "qcom,spi-geni" },
+	{}
+};
+
+static struct platform_driver spi_geni_driver = {
+	.probe  = spi_geni_probe,
+	.remove = spi_geni_remove,
+	.driver = {
+		.name = "spi_geni",
+		.pm = &spi_geni_pm_ops,
+		.of_match_table = spi_geni_dt_match,
+	},
+};
+module_platform_driver(spi_geni_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_geni");
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 7e6f8d8..9ea4a9f 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -504,9 +504,10 @@
 	return 0;
 }
 
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
-			     size_t align, unsigned int heap_id_mask,
-			     unsigned int flags)
+static struct ion_handle *__ion_alloc(
+		struct ion_client *client, size_t len,
+		size_t align, unsigned int heap_id_mask,
+		unsigned int flags, bool grab_handle)
 {
 	struct ion_handle *handle;
 	struct ion_device *dev = client->dev;
@@ -605,6 +606,8 @@
 		return handle;
 
 	mutex_lock(&client->lock);
+	if (grab_handle)
+		ion_handle_get(handle);
 	ret = ion_handle_add(client, handle);
 	mutex_unlock(&client->lock);
 	if (ret) {
@@ -614,6 +617,13 @@
 
 	return handle;
 }
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags)
+{
+	return __ion_alloc(client, len, align, heap_id_mask, flags, false);
+}
 EXPORT_SYMBOL(ion_alloc);
 
 static void ion_free_nolock(struct ion_client *client,
@@ -1524,10 +1534,10 @@
 	{
 		struct ion_handle *handle;
 
-		handle = ion_alloc(client, data.allocation.len,
-				   data.allocation.align,
-				   data.allocation.heap_id_mask,
-				   data.allocation.flags);
+		handle = __ion_alloc(client, data.allocation.len,
+				     data.allocation.align,
+				     data.allocation.heap_id_mask,
+				     data.allocation.flags, true);
 		if (IS_ERR(handle))
 			return PTR_ERR(handle);
 
@@ -1605,11 +1615,15 @@
 
 	if (dir & _IOC_READ) {
 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
-			if (cleanup_handle)
+			if (cleanup_handle) {
 				ion_free(client, cleanup_handle);
+				ion_handle_put(cleanup_handle);
+			}
 			return -EFAULT;
 		}
 	}
+	if (cleanup_handle)
+		ion_handle_put(cleanup_handle);
 	return ret;
 }
 
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 574da15..7a2d45b 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -88,6 +88,27 @@
 	 driver. This console is used through a JTAG only on ARM. If you don't have
 	 a JTAG then you probably don't want this option.
 
+config HVC_DCC_SERIALIZE_SMP
+	bool "Use DCC only on core 0"
+	depends on SMP && HVC_DCC
+	help
+	  Some debuggers, such as Trace32 from Lauterbach GmbH, do not handle
+	  reads/writes from/to DCC on more than one core.  Each core has its
+	  own DCC device registers, so when a core reads or writes from/to DCC,
+	  it only accesses its own DCC device.  Since kernel code can run on
+	  any core, every time the kernel wants to write to the console, it
+	  might write to a different DCC.
+
+	  In SMP mode, Trace32 only uses the DCC on core 0.  In AMP mode, it
+	  creates multiple windows, and each window shows the DCC output
+	  only from that core's DCC.  The result is that console output is
+	  either lost or scattered across windows.
+
+	  Selecting this option will enable code that serializes all console
+	  input and output to core 0.  The DCC driver will create input and
+	  output FIFOs that all cores will use.  Reads and writes from/to DCC
+	  are handled by a workqueue that runs only core 0.
+
 config HVC_BFIN_JTAG
 	bool "Blackfin JTAG console"
 	depends on BLACKFIN
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 82f240f..c987697 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, 2014, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,10 @@
  */
 
 #include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/console.h>
 
 #include <asm/dcc.h>
 #include <asm/processor.h>
@@ -48,6 +52,12 @@
 	return i;
 }
 
+/*
+ * Check if the DCC is enabled.  If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core 0.  That
+ * way, dcc_core0_available will be true only if it's available on core 0.
+ */
+#ifndef CONFIG_HVC_DCC_SERIALIZE_SMP
 static bool hvc_dcc_check(void)
 {
 	unsigned long time = jiffies + (HZ / 10);
@@ -62,12 +72,173 @@
 
 	return false;
 }
+#endif
+
+#ifdef CONFIG_HVC_DCC_SERIALIZE_SMP
+static bool hvc_dcc_check(void)
+{
+	unsigned long time = jiffies + (HZ / 10);
+
+	static bool dcc_core0_available;
+
+	/*
+	 * If we're not on core 0, but we previously confirmed that DCC is
+	 * active, then just return true.
+	 */
+	if (smp_processor_id() && dcc_core0_available)
+		return true;
+
+	/* Write a test character to check if it is handled */
+	__dcc_putchar('\n');
+
+	while (time_is_after_jiffies(time)) {
+		if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+			dcc_core0_available = true;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void dcc_put_work_fn(struct work_struct *work);
+static void dcc_get_work_fn(struct work_struct *work);
+static DECLARE_WORK(dcc_pwork, dcc_put_work_fn);
+static DECLARE_WORK(dcc_gwork, dcc_get_work_fn);
+static DEFINE_SPINLOCK(dcc_lock);
+static DEFINE_KFIFO(inbuf, unsigned char, 128);
+static DEFINE_KFIFO(outbuf, unsigned char, 1024);
+
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	/* While there's data in the output FIFO, write it to the DCC */
+	while (kfifo_get(&outbuf, &ch))
+		hvc_dcc_put_chars(0, &ch, 1);
+
+	/* While we're at it, check for any input characters */
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+	unsigned long irqflags;
+
+	/*
+	 * Read characters from DCC and put them into the input FIFO, as
+	 * long as there is room and we have characters to read.
+	 */
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(uint32_t vt, const char *buf,
+					     int count)
+{
+	int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+	if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+		len = kfifo_in(&outbuf, buf, count);
+		spin_unlock_irqrestore(&dcc_lock, irqflags);
+		/*
+		 * We just push data to the output FIFO, so schedule the
+		 * workqueue that will actually write that data to DCC.
+		 */
+		schedule_work_on(0, &dcc_pwork);
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * write the data to DCC.
+	 */
+	len = hvc_dcc_put_chars(vt, buf, count);
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+	return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(uint32_t vt, char *buf, int count)
+{
+	int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+		len = kfifo_out(&inbuf, buf, count);
+		spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+		/*
+		 * If the FIFO was empty, there may be characters in the DCC
+		 * that we haven't read yet.  Schedule a workqueue to fill
+		 * the input FIFO, so that the next time this function is
+		 * called, we'll have data.
+		 */
+		if (!len)
+			schedule_work_on(0, &dcc_gwork);
+
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * read the data from DCC.
+	 */
+	len = hvc_dcc_get_chars(vt, buf, count);
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+	return len;
+}
+
+static const struct hv_ops hvc_dcc_get_put_ops = {
+	.get_chars = hvc_dcc0_get_chars,
+	.put_chars = hvc_dcc0_put_chars,
+};
+
+#else
 
 static const struct hv_ops hvc_dcc_get_put_ops = {
 	.get_chars = hvc_dcc_get_chars,
 	.put_chars = hvc_dcc_put_chars,
 };
 
+#endif
+
 static int __init hvc_dcc_console_init(void)
 {
 	int ret;
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f44615f..3e2ef4f 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1036,8 +1036,10 @@
 	if (ourport->dma) {
 		ret = s3c24xx_serial_request_dma(ourport);
 		if (ret < 0) {
-			dev_warn(port->dev, "DMA request failed\n");
-			return ret;
+			dev_warn(port->dev,
+				 "DMA request failed, DMA will not be used\n");
+			devm_kfree(port->dev, ourport->dma);
+			ourport->dma = NULL;
 		}
 	}
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 479e223..f270ee9 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2228,8 +2228,65 @@
 	return hcd->driver->get_frame_number (hcd);
 }
 
+int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_setup(hcd, intr_num);
+}
+
+int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_cleanup(hcd, intr_num);
+}
+
 /*-------------------------------------------------------------------------*/
 
+dma_addr_t
+usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+}
+
+dma_addr_t
+usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_dcba_dma_addr(hcd, udev);
+}
+
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+}
+
 #ifdef	CONFIG_PM
 
 int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 5921514..9d1bcba 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -685,6 +685,54 @@
 }
 EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
 
+int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_sec_event_ring_setup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_setup);
+
+int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
+
+dma_addr_t
+usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+
+dma_addr_t
+usb_get_dcba_dma_addr(struct usb_device *dev)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_dcba_dma_addr(dev);
+}
+EXPORT_SYMBOL(usb_get_dcba_dma_addr);
+
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+}
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+
 /*-------------------------------------------------------------------*/
 /*
  * __usb_get_extra_descriptor() finds a descriptor of specific type in the
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 29e80cc..5dd1832 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -249,6 +249,7 @@
 		val = dwc3_omap_read_utmi_ctrl(omap);
 		val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
 		dwc3_omap_write_utmi_ctrl(omap, val);
+		break;
 
 	case OMAP_DWC3_VBUS_OFF:
 		val = dwc3_omap_read_utmi_ctrl(omap);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 645cfff..990f423 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -28,23 +28,23 @@
 #define gadget_to_dwc(g)	(container_of(g, struct dwc3, gadget))
 
 /* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n)		((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n)		(((n) & 0x1f) << 0)
 #define DWC3_DEPCFG_XFER_COMPLETE_EN	(1 << 8)
 #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN	(1 << 9)
 #define DWC3_DEPCFG_XFER_NOT_READY_EN	(1 << 10)
 #define DWC3_DEPCFG_FIFO_ERROR_EN	(1 << 11)
 #define DWC3_DEPCFG_STREAM_EVENT_EN	(1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n)	((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n)	(((n) & 0xff) << 16)
 #define DWC3_DEPCFG_STREAM_CAPABLE	(1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n)	((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n)	(((n) & 0x1f) << 25)
 #define DWC3_DEPCFG_BULK_BASED		(1 << 30)
 #define DWC3_DEPCFG_FIFO_BASED		(1 << 31)
 
 /* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n)		((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n)	((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n)	((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n)	((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n)		(((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n)	(((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n)	(((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n)	(((n) & 0xf) << 22)
 #define DWC3_DEPCFG_DATA_SEQ_NUM(n)	((n) << 26)
 /* This applies for core versions earlier than 1.94a */
 #define DWC3_DEPCFG_IGN_SEQ_NUM		(1 << 31)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 27ed51b..29b41b5 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -258,13 +258,6 @@
 	memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
 	v4l2_event_queue(&uvc->vdev, &v4l2_event);
 
-	/* Pass additional setup data to userspace */
-	if (uvc->event_setup_out && uvc->event_length) {
-		uvc->control_req->length = uvc->event_length;
-		return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
-			uvc->control_req, GFP_ATOMIC);
-	}
-
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a81d9ab..4fa5de2 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1031,6 +1031,8 @@
 	int		rc;
 
 	dum = *((void **)dev_get_platdata(&pdev->dev));
+	/* Clear usb_gadget region for new registration to udc-core */
+	memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
 	dum->gadget.name = gadget_name;
 	dum->gadget.ops = &dummy_ops;
 	dum->gadget.max_speed = USB_SPEED_SUPER;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index b38a228..af0566d 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -361,7 +361,7 @@
 
 		case USB_PORT_FEAT_SUSPEND:
 			dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
-			if (valid_port(wIndex)) {
+			if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
 				ohci_at91_port_suspend(ohci_at91->sfr_regmap,
 						       1);
 				return 0;
@@ -404,7 +404,7 @@
 
 		case USB_PORT_FEAT_SUSPEND:
 			dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
-			if (valid_port(wIndex)) {
+			if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
 				ohci_at91_port_suspend(ohci_at91->sfr_regmap,
 						       0);
 				return 0;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 74c42f7..3425154 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -111,7 +111,7 @@
 	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
 
 	/* xhci 1.1 controllers have the HCCPARAMS2 register */
-	if (hci_version > 100) {
+	if (hci_version > 0x100) {
 		temp = readl(&xhci->cap_regs->hcc_params2);
 		xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
 		xhci_dbg(xhci, "  HC %s Force save context capability",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 7064892..be0a89e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1824,25 +1824,151 @@
 	kfree(command);
 }
 
-void xhci_mem_cleanup(struct xhci_hcd *xhci)
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
 {
-	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	union xhci_trb *erdp_trb, *current_trb;
+	struct xhci_segment	*seg;
+	u64 erdp_reg;
+	u32 iman_reg;
+	dma_addr_t deq;
+	unsigned long segment_offset;
+
+	/* disable irq, ack pending interrupt and ack all pending events */
+
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg &= ~IMAN_IE;
+	writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	if (iman_reg & IMAN_IP)
+		writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+
+	/* last acked event trb is in erdp reg  */
+	erdp_reg =
+		xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+	deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+	if (!deq) {
+		pr_debug("%s: event ring handling not required\n", __func__);
+		return;
+	}
+
+	seg = xhci->sec_event_ring[intr_num]->first_seg;
+	segment_offset = deq - seg->dma;
+
+	/* find out virtual address of the last acked event trb */
+	erdp_trb = current_trb = &seg->trbs[0] +
+				(segment_offset/sizeof(*current_trb));
+
+	/* read cycle state of the last acked trb to find out CCS */
+	xhci->sec_event_ring[intr_num]->cycle_state =
+				(current_trb->event_cmd.flags & TRB_CYCLE);
+
+	while (1) {
+		/* last trb of the event ring: toggle cycle state */
+		if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+			xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+			current_trb = &seg->trbs[0];
+		} else {
+			current_trb++;
+		}
+
+		/* cycle state transition */
+		if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+		    xhci->sec_event_ring[intr_num]->cycle_state)
+			break;
+	}
+
+	if (erdp_trb != current_trb) {
+		deq =
+		xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+					current_trb);
+		if (deq == 0)
+			xhci_warn(xhci,
+				"WARN ivalid SW event ring dequeue ptr.\n");
+		/* Update HC event ring dequeue pointer */
+		erdp_reg &= ERST_PTR_MASK;
+		erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+	}
+
+	/* Clear the event handler busy flag (RW1C); event ring is empty. */
+	erdp_reg |= ERST_EHB;
+	xhci_write_64(xhci, erdp_reg,
+			&xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num)
+{
 	int size;
-	int i, j, num_ports;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
 
-	cancel_delayed_work_sync(&xhci->cmd_timer);
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "invalid secondary interrupter num %d\n",
+			intr_num);
+		return -EINVAL;
+	}
 
-	/* Free the Event Ring Segment Table and the actual Event Ring */
+	size =
+	sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
+	if (xhci->sec_erst[intr_num].entries) {
+		xhci_handle_sec_intr_events(xhci, intr_num);
+		dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
+				xhci->sec_erst[intr_num].erst_dma_addr);
+		xhci->sec_erst[intr_num].entries = NULL;
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
+		intr_num);
+	if (xhci->sec_event_ring[intr_num])
+		xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
+	xhci->sec_event_ring[intr_num] = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Freed sec event ring");
+
+	return 0;
+}
+
+void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
+{
+	int size;
+	unsigned int i;
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+
+	/* sec event ring clean up */
+	for (i = 1; i < xhci->max_interrupters; i++)
+		xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);
+
+	kfree(xhci->sec_ir_set);
+	xhci->sec_ir_set = NULL;
+	kfree(xhci->sec_erst);
+	xhci->sec_erst = NULL;
+	kfree(xhci->sec_event_ring);
+	xhci->sec_event_ring = NULL;
+
+	/* primary event ring clean up */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
 	if (xhci->erst.entries)
 		dma_free_coherent(dev, size,
 				xhci->erst.entries, xhci->erst.erst_dma_addr);
 	xhci->erst.entries = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
 	if (xhci->event_ring)
 		xhci_ring_free(xhci, xhci->event_ring);
 	xhci->event_ring = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	int i, j, num_ports;
+
+	cancel_delayed_work_sync(&xhci->cmd_timer);
+
+	xhci_event_ring_cleanup(xhci);
 
 	if (xhci->lpm_command)
 		xhci_free_command(xhci, xhci->lpm_command);
@@ -2083,30 +2209,6 @@
 	return 0;
 }
 
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
-	u64 temp;
-	dma_addr_t deq;
-
-	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
-			xhci->event_ring->dequeue);
-	if (deq == 0 && !in_interrupt())
-		xhci_warn(xhci, "WARN something wrong with SW event ring "
-				"dequeue ptr.\n");
-	/* Update HC event ring dequeue pointer */
-	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
-	temp &= ERST_PTR_MASK;
-	/* Don't clear the EHB bit (which is RW1C) because
-	 * there might be more events to service.
-	 */
-	temp &= ~ERST_EHB;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write event ring dequeue pointer, "
-			"preserving EHB bit");
-	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
-			&xhci->ir_set->erst_dequeue);
-}
-
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 		__le32 __iomem *addr, int max_caps)
 {
@@ -2365,13 +2467,183 @@
 	return 0;
 }
 
+int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
+	struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
+	unsigned int intr_num, gfp_t flags)
+{
+	dma_addr_t dma, deq;
+	u64 val_64;
+	unsigned int val;
+	struct xhci_segment *seg;
+	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+	*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
+	if (!*er)
+		return -ENOMEM;
+
+	erst->entries = dma_alloc_coherent(dev,
+			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+			flags);
+	if (!erst->entries) {
+		xhci_ring_free(xhci, *er);
+		return -ENOMEM;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: Allocated event ring segment table at 0x%llx",
+		intr_num, (unsigned long long)dma);
+
+	memset(erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+	erst->num_entries = ERST_NUM_SEGS;
+	erst->erst_dma_addr = dma;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: num segs = %i, virt addr = %p, dma addr = 0x%llx",
+			intr_num,
+			erst->num_entries,
+			erst->entries,
+			(unsigned long long)erst->erst_dma_addr);
+
+	/* set ring base address and size for each segment table entry */
+	for (val = 0, seg = (*er)->first_seg; val < ERST_NUM_SEGS; val++) {
+		struct xhci_erst_entry *entry = &erst->entries[val];
+
+		entry->seg_addr = cpu_to_le64(seg->dma);
+		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	/* set ERST count with the number of entries in the segment table */
+	val = readl_relaxed(&ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Write ERST size = %i to ir_set %d (some bits preserved)", val,
+		intr_num);
+	writel_relaxed(val, &ir_set->erst_size);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"intr# %d: Set ERST entries to point to event ring.",
+			intr_num);
+	/* set the segment table base address */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Set ERST base address for ir_set %d = 0x%llx",
+			intr_num,
+			(unsigned long long)erst->erst_dma_addr);
+	val_64 = xhci_read_64(xhci, &ir_set->erst_base);
+	val_64 &= ERST_PTR_MASK;
+	val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+	xhci_write_64(xhci, val_64, &ir_set->erst_base);
+
+	/* Set the event ring dequeue address */
+	deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci,
+		"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
+		intr_num);
+	/* Update HC event ring dequeue pointer */
+	val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
+	val_64 &= ERST_PTR_MASK;
+	/* Don't clear the EHB bit (which is RW1C) because
+	 * there might be more events to service.
+	 */
+	val_64 &= ~ERST_EHB;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
+		intr_num);
+	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
+			&ir_set->erst_dequeue);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Wrote ERST address to ir_set %d.", intr_num);
+	xhci_print_ir_set(xhci, intr_num);
+
+	return 0;
+}
+
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num)
+{
+	int ret;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
+		|| !xhci->sec_event_ring || !xhci->sec_erst ||
+		intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci,
+		"%s:state %x ir_set %p evt_ring %p erst %p intr# %d\n",
+		__func__, xhci->xhc_state, xhci->sec_ir_set,
+		xhci->sec_event_ring, xhci->sec_erst, intr_num);
+		return -EINVAL;
+	}
+
+	if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		goto done;
+
+	xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
+	ret = xhci_event_ring_setup(xhci,
+				&xhci->sec_event_ring[intr_num],
+				xhci->sec_ir_set[intr_num],
+				&xhci->sec_erst[intr_num],
+				intr_num, GFP_KERNEL);
+	if (ret) {
+		xhci_err(xhci, "sec event ring setup failed inter#%d\n",
+			intr_num);
+		return ret;
+	}
+done:
+	return 0;
+}
+
+int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	int ret = 0;
+
+	/* primary + secondary */
+	xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating primary event ring");
+
+	/* Set ir_set to interrupt register set 0 */
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
+	ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
+		&xhci->erst, 0, flags);
+	if (ret) {
+		xhci_err(xhci, "failed to setup primary event ring\n");
+		goto fail;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating sec event ring related pointers");
+
+	xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_ir_set), flags);
+	if (!xhci->sec_ir_set) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_event_ring), flags);
+	if (!xhci->sec_event_ring) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_erst = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_erst), flags);
+	if (!xhci->sec_erst)
+		ret = -ENOMEM;
+fail:
+	return ret;
+}
+
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 {
 	dma_addr_t	dma;
 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
 	unsigned int	val, val2;
 	u64		val_64;
-	struct xhci_segment	*seg;
 	u32 page_size, temp;
 	int i;
 
@@ -2497,74 +2769,17 @@
 	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
-	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
 	 * the event ring segment table (ERST).  Section 4.9.3.
 	 */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
-	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
-					0, flags);
-	if (!xhci->event_ring)
+	if (xhci_event_ring_init(xhci, GFP_KERNEL))
 		goto fail;
+
 	if (xhci_check_trb_in_td_math(xhci) < 0)
 		goto fail;
 
-	xhci->erst.entries = dma_alloc_coherent(dev,
-			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
-			flags);
-	if (!xhci->erst.entries)
-		goto fail;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Allocated event ring segment table at 0x%llx",
-			(unsigned long long)dma);
-
-	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
-	xhci->erst.num_entries = ERST_NUM_SEGS;
-	xhci->erst.erst_dma_addr = dma;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
-			xhci->erst.num_entries,
-			xhci->erst.entries,
-			(unsigned long long)xhci->erst.erst_dma_addr);
-
-	/* set ring base address and size for each segment table entry */
-	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
-		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
-		entry->seg_addr = cpu_to_le64(seg->dma);
-		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
-		entry->rsvd = 0;
-		seg = seg->next;
-	}
-
-	/* set ERST count with the number of entries in the segment table */
-	val = readl(&xhci->ir_set->erst_size);
-	val &= ERST_SIZE_MASK;
-	val |= ERST_NUM_SEGS;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
-			val);
-	writel(val, &xhci->ir_set->erst_size);
-
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST entries to point to event ring.");
-	/* set the segment table base address */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST base address for ir_set 0 = 0x%llx",
-			(unsigned long long)xhci->erst.erst_dma_addr);
-	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
-	val_64 &= ERST_PTR_MASK;
-	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
-	/* Set the event ring dequeue address */
-	xhci_set_hc_event_deq(xhci);
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Wrote ERST address to ir_set 0.");
-	xhci_print_ir_set(xhci, 0);
-
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
 	 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index aa3c706..129bb3f 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -287,6 +287,7 @@
 	struct clk *clk = xhci->clk;
 
 	pm_runtime_disable(&dev->dev);
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
 
 	usb_remove_hcd(xhci->shared_hcd);
 	usb_phy_shutdown(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34e23c7..64daa09 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4942,6 +4942,61 @@
 }
 EXPORT_SYMBOL_GPL(xhci_gen_setup);
 
+dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
+	unsigned int intr_num)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
+			xhci->max_interrupters);
+		return 0;
+	}
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
+		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		return xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+	return 0;
+}
+
+dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+		return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+	return 0;
+}
+
+dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+	int ret;
+	unsigned int ep_index;
+	struct xhci_virt_device *virt_dev;
+
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+	if (ret <= 0) {
+		xhci_err(xhci, "%s: invalid args\n", __func__);
+		return 0;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+
+	if (virt_dev->eps[ep_index].ring &&
+		virt_dev->eps[ep_index].ring->first_seg)
+		return virt_dev->eps[ep_index].ring->first_seg->dma;
+
+	return 0;
+}
+
 static const struct hc_driver xhci_hc_driver = {
 	.description =		"xhci-hcd",
 	.product_desc =		"xHCI Host Controller",
@@ -5001,6 +5056,11 @@
 	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
 	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
 	.find_raw_port_number =	xhci_find_raw_port_number,
+	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
+	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
+	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
+	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
+	.get_dcba_dma_addr =		xhci_get_dcba_dma_addr,
 };
 
 void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 5250c72..0fe91df 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1535,6 +1535,9 @@
 	/* Our HCD's current interrupter register set */
 	struct	xhci_intr_reg __iomem *ir_set;
 
+	/* secondary interrupter */
+	struct	xhci_intr_reg __iomem **sec_ir_set;
+
 	/* Cached register copies of read-only HC data */
 	__u32		hcs_params1;
 	__u32		hcs_params2;
@@ -1576,6 +1579,11 @@
 	struct xhci_command	*current_cmd;
 	struct xhci_ring	*event_ring;
 	struct xhci_erst	erst;
+
+	/* secondary event ring and erst */
+	struct xhci_ring	**sec_event_ring;
+	struct xhci_erst	*sec_erst;
+
 	/* Scratchpad */
 	struct xhci_scratchpad  *scratchpad;
 	/* Store LPM test failed devices' information */
@@ -1842,6 +1850,8 @@
 void xhci_urb_free_priv(struct urb_priv *urb_priv);
 void xhci_free_command(struct xhci_hcd *xhci,
 		struct xhci_command *command);
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num);
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num);
 
 /* xHCI host controller glue */
 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 095778f..37c63cb 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -781,12 +781,6 @@
 	iface_desc = interface->cur_altsetting;
 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
-	if (iface_desc->desc.bNumEndpoints < 1) {
-		dev_err(&interface->dev, "Invalid number of endpoints\n");
-		retval = -EINVAL;
-		goto error;
-	}
-
 	/* set up the endpoint information */
 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
 		endpoint = &iface_desc->endpoint[i].desc;
@@ -797,6 +791,21 @@
 			/* this one will match for the IOWarrior56 only */
 			dev->int_out_endpoint = endpoint;
 	}
+
+	if (!dev->int_in_endpoint) {
+		dev_err(&interface->dev, "no interrupt-in endpoint found\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+		if (!dev->int_out_endpoint) {
+			dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+			retval = -ENODEV;
+			goto error;
+		}
+	}
+
 	/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
 	dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
 	if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 2e731af..da08047 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -243,6 +243,10 @@
 #define MAX_VDM_RESPONSE_TIME	60 /* 2 * tVDMSenderResponse_max(30ms) */
 #define MAX_VDM_BUSY_TIME	100 /* 2 * tVDMBusy (50ms) */
 
+#define PD_SNK_PDO_FIXED(prs, hc, uc, usb_comm, drs, volt, curr) \
+	(((prs) << 29) | ((hc) << 28) | ((uc) << 27) | ((usb_comm) << 26) | \
+	 ((drs) << 25) | ((volt) << 10) | (curr))
+
 /* VDM header is the first 32-bit object following the 16-bit PD header */
 #define VDM_HDR_SVID(hdr)	((hdr) >> 16)
 #define VDM_IS_SVDM(hdr)	((hdr) & 0x8000)
@@ -273,7 +277,7 @@
 module_param(min_sink_current, int, 0600);
 
 static const u32 default_src_caps[] = { 0x36019096 };	/* VSafe5V @ 1.5A */
-static const u32 default_snk_caps[] = { 0x2601905A };	/* 5V @ 900mA */
+static const u32 default_snk_caps[] = { 0x2601912C };	/* VSafe5V @ 3A */
 
 struct vdm_tx {
 	u32			data[7];
@@ -305,7 +309,7 @@
 	spinlock_t		rx_lock;
 
 	u32			received_pdos[7];
-	int			src_cap_id;
+	u16			src_cap_id;
 	u8			selected_pdo;
 	u8			requested_pdo;
 	u32			rdo;	/* can be either source or sink */
@@ -318,6 +322,9 @@
 	bool			peer_pr_swap;
 	bool			peer_dr_swap;
 
+	u32			sink_caps[7];
+	int			num_sink_caps;
+
 	struct power_supply	*usb_psy;
 	struct notifier_block	psy_nb;
 
@@ -331,8 +338,10 @@
 	enum power_role		current_pr;
 	bool			in_pr_swap;
 	bool			pd_phy_opened;
-	struct completion	swap_complete;
+	bool			send_request;
+	struct completion	is_ready;
 
+	struct mutex		swap_lock;
 	struct dual_role_phy_instance	*dual_role;
 	struct dual_role_phy_desc	dr_desc;
 	bool			send_pr_swap;
@@ -456,6 +465,9 @@
 	 */
 	pd->rx_msgid = -1;
 	pd->tx_msgid = 0;
+	pd->send_request = false;
+	pd->send_pr_swap = false;
+	pd->send_dr_swap = false;
 }
 
 static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
@@ -835,7 +847,7 @@
 		}
 
 		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
-		complete(&pd->swap_complete);
+		complete(&pd->is_ready);
 		dual_role_instance_changed(pd->dual_role);
 		break;
 
@@ -970,7 +982,7 @@
 	case PE_SNK_READY:
 		pd->in_explicit_contract = true;
 		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
-		complete(&pd->swap_complete);
+		complete(&pd->is_ready);
 		dual_role_instance_changed(pd->dual_role);
 		break;
 
@@ -1539,9 +1551,9 @@
 		pd->hard_reset_recvd = false;
 		pd->caps_count = 0;
 		pd->hard_reset_count = 0;
-		pd->src_cap_id = 0;
 		pd->requested_voltage = 0;
 		pd->requested_current = 0;
+		pd->selected_pdo = pd->requested_pdo = 0;
 		memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
 		rx_msg_cleanup(pd);
 
@@ -1609,8 +1621,12 @@
 				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
 
 		pd->in_pr_swap = false;
+		pd->in_explicit_contract = false;
+		pd->selected_pdo = pd->requested_pdo = 0;
+		pd->rdo = 0;
 		rx_msg_cleanup(pd);
 		reset_vdm_state(pd);
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
 
 		if (pd->current_pr == PR_SINK) {
 			usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
@@ -1718,8 +1734,8 @@
 			}
 		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
 			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
-					default_snk_caps,
-					ARRAY_SIZE(default_snk_caps), SOP_MSG);
+					pd->sink_caps, pd->num_sink_caps,
+					SOP_MSG);
 			if (ret) {
 				usbpd_err(&pd->dev, "Error sending Sink Caps\n");
 				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
@@ -1833,8 +1849,10 @@
 
 		pd_send_hard_reset(pd);
 		pd->in_explicit_contract = false;
+		pd->rdo = 0;
 		rx_msg_cleanup(pd);
 		reset_vdm_state(pd);
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
 
 		pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
 		kick_sm(pd, PS_HARD_RESET_TIME);
@@ -1913,6 +1931,11 @@
 
 	case PE_SNK_SELECT_CAPABILITY:
 		if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+			u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+			bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+				&& (PD_SRC_PDO_TYPE(pdo) ==
+						PD_SRC_PDO_TYPE_AUGMENTED);
+
 			usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
 
 			/* prepare for voltage increase/decrease */
@@ -1924,11 +1947,12 @@
 					&val);
 
 			/*
-			 * if we are changing voltages, we must lower input
-			 * current to pSnkStdby (2.5W). Calculate it and set
-			 * PD_CURRENT_MAX accordingly.
+			 * if changing voltages (not within the same PPS PDO),
+			 * we must lower input current to pSnkStdby (2.5W).
+			 * Calculate it and set PD_CURRENT_MAX accordingly.
 			 */
-			if (pd->requested_voltage != pd->current_voltage) {
+			if (!same_pps &&
+				pd->requested_voltage != pd->current_voltage) {
 				int mv = max(pd->requested_voltage,
 						pd->current_voltage) / 1000;
 				val.intval = (2500000 / mv) * 1000;
@@ -1996,8 +2020,8 @@
 			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
 		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
 			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
-					default_snk_caps,
-					ARRAY_SIZE(default_snk_caps), SOP_MSG);
+					pd->sink_caps, pd->num_sink_caps,
+					SOP_MSG);
 			if (ret) {
 				usbpd_err(&pd->dev, "Error sending Sink Caps\n");
 				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
@@ -2068,6 +2092,9 @@
 			vconn_swap(pd);
 		} else if (IS_DATA(rx_msg, MSG_VDM)) {
 			handle_vdm_rx(pd, rx_msg);
+		} else if (pd->send_request) {
+			pd->send_request = false;
+			usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
 		} else if (pd->send_pr_swap && is_sink_tx_ok(pd)) {
 			pd->send_pr_swap = false;
 			ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
@@ -2151,7 +2178,10 @@
 
 		pd_send_hard_reset(pd);
 		pd->in_explicit_contract = false;
+		pd->selected_pdo = pd->requested_pdo = 0;
+		pd->rdo = 0;
 		reset_vdm_state(pd);
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
 		usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
 		break;
 
@@ -2533,17 +2563,21 @@
 				return -EAGAIN;
 			}
 
-			reinit_completion(&pd->swap_complete);
+			mutex_lock(&pd->swap_lock);
+			reinit_completion(&pd->is_ready);
 			pd->send_dr_swap = true;
 			kick_sm(pd, 0);
 
 			/* wait for operation to complete */
-			if (!wait_for_completion_timeout(&pd->swap_complete,
+			if (!wait_for_completion_timeout(&pd->is_ready,
 					msecs_to_jiffies(100))) {
 				usbpd_err(&pd->dev, "data_role swap timed out\n");
+				mutex_unlock(&pd->swap_lock);
 				return -ETIMEDOUT;
 			}
 
+			mutex_unlock(&pd->swap_lock);
+
 			if ((*val == DUAL_ROLE_PROP_DR_HOST &&
 					pd->current_dr != DR_DFP) ||
 				(*val == DUAL_ROLE_PROP_DR_DEVICE &&
@@ -2584,17 +2618,21 @@
 				return -EAGAIN;
 			}
 
-			reinit_completion(&pd->swap_complete);
+			mutex_lock(&pd->swap_lock);
+			reinit_completion(&pd->is_ready);
 			pd->send_pr_swap = true;
 			kick_sm(pd, 0);
 
 			/* wait for operation to complete */
-			if (!wait_for_completion_timeout(&pd->swap_complete,
+			if (!wait_for_completion_timeout(&pd->is_ready,
 					msecs_to_jiffies(2000))) {
 				usbpd_err(&pd->dev, "power_role swap timed out\n");
+				mutex_unlock(&pd->swap_lock);
 				return -ETIMEDOUT;
 			}
 
+			mutex_unlock(&pd->swap_lock);
+
 			if ((*val == DUAL_ROLE_PROP_PR_SRC &&
 					pd->current_pr != PR_SRC) ||
 				(*val == DUAL_ROLE_PROP_PR_SNK &&
@@ -2857,36 +2895,62 @@
 	int pdo, uv = 0, ua = 0;
 	int ret;
 
+	mutex_lock(&pd->swap_lock);
+
 	/* Only allowed if we are already in explicit sink contract */
 	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
 		usbpd_err(&pd->dev, "select_pdo: Cannot select new PDO yet\n");
-		return -EBUSY;
+		ret = -EBUSY;
+		goto out;
 	}
 
 	ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
 	if (ret != 2 && ret != 4) {
 		usbpd_err(&pd->dev, "select_pdo: Must specify <src cap id> <PDO> [<uV> <uA>]\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (src_cap_id != pd->src_cap_id) {
 		usbpd_err(&pd->dev, "select_pdo: src_cap_id mismatch.  Requested:%d, current:%d\n",
 				src_cap_id, pd->src_cap_id);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (pdo < 1 || pdo > 7) {
 		usbpd_err(&pd->dev, "select_pdo: invalid PDO:%d\n", pdo);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	ret = pd_select_pdo(pd, pdo, uv, ua);
 	if (ret)
-		return ret;
+		goto out;
 
-	usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
+	reinit_completion(&pd->is_ready);
+	pd->send_request = true;
+	kick_sm(pd, 0);
 
-	return size;
+	/* wait for operation to complete */
+	if (!wait_for_completion_timeout(&pd->is_ready,
+			msecs_to_jiffies(1000))) {
+		usbpd_err(&pd->dev, "select_pdo: request timed out\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* determine if request was accepted/rejected */
+	if (pd->selected_pdo != pd->requested_pdo ||
+			pd->current_voltage != pd->requested_voltage) {
+		usbpd_err(&pd->dev, "select_pdo: request rejected\n");
+		ret = -EINVAL;
+	}
+
+out:
+	pd->send_request = false;
+	mutex_unlock(&pd->swap_lock);
+	return ret ? ret : size;
 }
 
 static ssize_t select_pdo_show(struct device *dev,
@@ -3116,6 +3180,7 @@
 	INIT_WORK(&pd->sm_work, usbpd_sm);
 	hrtimer_init(&pd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	pd->timer.function = pd_timeout;
+	mutex_init(&pd->swap_lock);
 
 	pd->usb_psy = power_supply_get_by_name("usb");
 	if (!pd->usb_psy) {
@@ -3157,6 +3222,44 @@
 	pd->vconn_is_external = device_property_present(parent,
 					"qcom,vconn-uses-external-source");
 
+	pd->num_sink_caps = device_property_read_u32_array(parent,
+			"qcom,default-sink-caps", NULL, 0);
+	if (pd->num_sink_caps > 0) {
+		int i;
+		u32 sink_caps[14];
+
+		if (pd->num_sink_caps % 2 || pd->num_sink_caps > 14) {
+			ret = -EINVAL;
+			usbpd_err(&pd->dev, "default-sink-caps must be be specified as voltage/current, max 7 pairs\n");
+			goto put_psy;
+		}
+
+		ret = device_property_read_u32_array(parent,
+				"qcom,default-sink-caps", sink_caps,
+				pd->num_sink_caps);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error reading default-sink-caps\n");
+			goto put_psy;
+		}
+
+		pd->num_sink_caps /= 2;
+
+		for (i = 0; i < pd->num_sink_caps; i++) {
+			int v = sink_caps[i * 2] / 50;
+			int c = sink_caps[i * 2 + 1] / 10;
+
+			pd->sink_caps[i] =
+				PD_SNK_PDO_FIXED(0, 0, 0, 0, 0, v, c);
+		}
+
+		/* First PDO includes additional capabilities */
+		pd->sink_caps[0] |= PD_SNK_PDO_FIXED(1, 0, 0, 1, 1, 0, 0);
+	} else {
+		memcpy(pd->sink_caps, default_snk_caps,
+				sizeof(default_snk_caps));
+		pd->num_sink_caps = ARRAY_SIZE(default_snk_caps);
+	}
+
 	/*
 	 * Register the Android dual-role class (/sys/class/dual_role_usb/).
 	 * The first instance should be named "otg_default" as that's what
@@ -3188,7 +3291,7 @@
 	spin_lock_init(&pd->rx_lock);
 	INIT_LIST_HEAD(&pd->rx_q);
 	INIT_LIST_HEAD(&pd->svid_handlers);
-	init_completion(&pd->swap_complete);
+	init_completion(&pd->is_ready);
 
 	pd->psy_nb.notifier_call = psy_changed;
 	ret = power_supply_reg_notifier(&pd->psy_nb);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 58eb287..bf155ae9 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -118,15 +118,23 @@
 
 	if (!qphy->clocks_enabled && on) {
 		clk_prepare_enable(qphy->ref_clk_src);
-		clk_prepare_enable(qphy->ref_clk);
-		clk_prepare_enable(qphy->cfg_ahb_clk);
+		if (qphy->ref_clk)
+			clk_prepare_enable(qphy->ref_clk);
+
+		if (qphy->cfg_ahb_clk)
+			clk_prepare_enable(qphy->cfg_ahb_clk);
+
 		qphy->clocks_enabled = true;
 	}
 
 	if (qphy->clocks_enabled && !on) {
-		clk_disable_unprepare(qphy->ref_clk);
+		if (qphy->cfg_ahb_clk)
+			clk_disable_unprepare(qphy->cfg_ahb_clk);
+
+		if (qphy->ref_clk)
+			clk_disable_unprepare(qphy->ref_clk);
+
 		clk_disable_unprepare(qphy->ref_clk_src);
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = false;
 	}
 
@@ -744,15 +752,28 @@
 		}
 	}
 
+	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
-	if (IS_ERR(qphy->ref_clk_src))
+	if (IS_ERR(qphy->ref_clk_src)) {
 		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+		ret = PTR_ERR(qphy->ref_clk_src);
+		return ret;
+	}
 
-	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
-	if (IS_ERR(qphy->ref_clk))
-		dev_dbg(dev, "clk get failed for ref_clk\n");
-	else
+	/* ref_clk is needed only for DIFF_CLK case, hence make it optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "ref_clk") >= 0) {
+		qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+		if (IS_ERR(qphy->ref_clk)) {
+			ret = PTR_ERR(qphy->ref_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_dbg(dev,
+					"clk get failed for ref_clk\n");
+			return ret;
+		}
+
 		clk_set_rate(qphy->ref_clk, 19200000);
+	}
 
 	if (of_property_match_string(pdev->dev.of_node,
 				"clock-names", "cfg_ahb_clk") >= 0) {
@@ -933,14 +954,7 @@
 	struct qusb_phy *qphy = platform_get_drvdata(pdev);
 
 	usb_remove_phy(&qphy->phy);
-
-	if (qphy->clocks_enabled) {
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
-		clk_disable_unprepare(qphy->ref_clk);
-		clk_disable_unprepare(qphy->ref_clk_src);
-		qphy->clocks_enabled = false;
-	}
-
+	qusb_phy_enable_clocks(qphy, false);
 	qusb_phy_enable_power(qphy, false, true);
 
 	return 0;
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 43f06f3..ee521a0 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -84,6 +84,7 @@
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*aux_clk;
+	struct clk		*com_aux_clk;
 	struct clk		*cfg_ahb_clk;
 	struct clk		*pipe_clk;
 	struct reset_control	*phy_reset;
@@ -114,6 +115,8 @@
 };
 MODULE_DEVICE_TABLE(of, msm_usb_id_table);
 
+static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on);
+
 static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
 {
 	return phy->cable_connected ? "connected" : "disconnected";
@@ -292,21 +295,7 @@
 		return ret;
 	}
 
-	if (!phy->clk_enabled) {
-		if (phy->ref_clk_src)
-			clk_prepare_enable(phy->ref_clk_src);
-		if (phy->ref_clk)
-			clk_prepare_enable(phy->ref_clk);
-		clk_prepare_enable(phy->aux_clk);
-		clk_prepare_enable(phy->cfg_ahb_clk);
-		clk_set_rate(phy->pipe_clk, 125000000);
-		clk_prepare_enable(phy->pipe_clk);
-		phy->clk_enabled = true;
-	}
-
-	/* select usb3 phy mode */
-	if (phy->tcsr_usb3_dp_phymode)
-		writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+	msm_ssphy_qmp_enable_clks(phy, true);
 
 	writel_relaxed(0x01,
 		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -383,6 +372,10 @@
 		goto deassert_phy_phy_reset;
 	}
 
+	/* select usb3 phy mode */
+	if (phy->tcsr_usb3_dp_phymode)
+		writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+
 	/* Deassert USB3 PHY CSR reset */
 	ret = reset_control_deassert(phy->phy_reset);
 	if (ret) {
@@ -469,29 +462,13 @@
 		/* Make sure above write completed with PHY */
 		wmb();
 
-		clk_disable_unprepare(phy->cfg_ahb_clk);
-		clk_disable_unprepare(phy->aux_clk);
-		clk_disable_unprepare(phy->pipe_clk);
-		if (phy->ref_clk)
-			clk_disable_unprepare(phy->ref_clk);
-		if (phy->ref_clk_src)
-			clk_disable_unprepare(phy->ref_clk_src);
-		phy->clk_enabled = false;
+		msm_ssphy_qmp_enable_clks(phy, false);
 		phy->in_suspend = true;
 		msm_ssphy_power_enable(phy, 0);
 		dev_dbg(uphy->dev, "QMP PHY is suspend\n");
 	} else {
 		msm_ssphy_power_enable(phy, 1);
-		clk_prepare_enable(phy->pipe_clk);
-		if (!phy->clk_enabled) {
-			if (phy->ref_clk_src)
-				clk_prepare_enable(phy->ref_clk_src);
-			if (phy->ref_clk)
-				clk_prepare_enable(phy->ref_clk);
-			clk_prepare_enable(phy->aux_clk);
-			clk_prepare_enable(phy->cfg_ahb_clk);
-			phy->clk_enabled = true;
-		}
+		msm_ssphy_qmp_enable_clks(phy, true);
 		if (!phy->cable_connected) {
 			writel_relaxed(0x01,
 			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -533,16 +510,9 @@
 	return 0;
 }
 
-static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+static int msm_ssphy_qmp_get_clks(struct msm_ssphy_qmp *phy, struct device *dev)
 {
-	struct msm_ssphy_qmp *phy;
-	struct device *dev = &pdev->dev;
-	struct resource *res;
-	int ret = 0, size = 0, len;
-
-	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
-	if (!phy)
-		return -ENOMEM;
+	int ret = 0;
 
 	phy->aux_clk = devm_clk_get(dev, "aux_clk");
 	if (IS_ERR(phy->aux_clk)) {
@@ -552,11 +522,10 @@
 			dev_err(dev, "failed to get aux_clk\n");
 		goto err;
 	}
-
 	clk_set_rate(phy->aux_clk, clk_round_rate(phy->aux_clk, ULONG_MAX));
 
-	if (of_property_match_string(pdev->dev.of_node,
-				"clock-names", "cfg_ahb_clk") >= 0) {
+	if (of_property_match_string(dev->of_node,
+			"clock-names", "cfg_ahb_clk") >= 0) {
 		phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
 		if (IS_ERR(phy->cfg_ahb_clk)) {
 			ret = PTR_ERR(phy->cfg_ahb_clk);
@@ -576,6 +545,88 @@
 		goto err;
 	}
 
+	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(phy->ref_clk_src))
+		phy->ref_clk_src = NULL;
+
+	phy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(phy->ref_clk))
+		phy->ref_clk = NULL;
+
+	if (of_property_match_string(dev->of_node,
+			"clock-names", "com_aux_clk") >= 0) {
+		phy->com_aux_clk = devm_clk_get(dev, "com_aux_clk");
+		if (IS_ERR(phy->com_aux_clk)) {
+			ret = PTR_ERR(phy->com_aux_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"failed to get com_aux_clk ret %d\n", ret);
+			goto err;
+		}
+	}
+
+err:
+	return ret;
+}
+
+static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on)
+{
+	dev_dbg(phy->phy.dev, "%s(): clk_enabled:%d on:%d\n", __func__,
+					phy->clk_enabled, on);
+
+	if (!phy->clk_enabled && on) {
+		if (phy->ref_clk_src)
+			clk_prepare_enable(phy->ref_clk_src);
+
+		if (phy->ref_clk)
+			clk_prepare_enable(phy->ref_clk);
+
+		if (phy->com_aux_clk)
+			clk_prepare_enable(phy->com_aux_clk);
+
+		clk_prepare_enable(phy->aux_clk);
+		if (phy->cfg_ahb_clk)
+			clk_prepare_enable(phy->cfg_ahb_clk);
+
+		clk_prepare_enable(phy->pipe_clk);
+		phy->clk_enabled = true;
+	}
+
+	if (phy->clk_enabled && !on) {
+		clk_disable_unprepare(phy->pipe_clk);
+
+		if (phy->cfg_ahb_clk)
+			clk_disable_unprepare(phy->cfg_ahb_clk);
+
+		clk_disable_unprepare(phy->aux_clk);
+		if (phy->com_aux_clk)
+			clk_disable_unprepare(phy->com_aux_clk);
+
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+
+		if (phy->ref_clk_src)
+			clk_disable_unprepare(phy->ref_clk_src);
+
+		phy->clk_enabled = false;
+	}
+}
+
+static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0, len;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	ret = msm_ssphy_qmp_get_clks(phy, dev);
+	if (ret)
+		goto err;
+
 	phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
 	if (IS_ERR(phy->phy_reset)) {
 		ret = PTR_ERR(phy->phy_reset);
@@ -726,13 +777,6 @@
 		goto err;
 	}
 
-	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
-	if (IS_ERR(phy->ref_clk_src))
-		phy->ref_clk_src = NULL;
-	phy->ref_clk = devm_clk_get(dev, "ref_clk");
-	if (IS_ERR(phy->ref_clk))
-		phy->ref_clk = NULL;
-
 	platform_set_drvdata(pdev, phy);
 
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
@@ -760,14 +804,8 @@
 		return 0;
 
 	usb_remove_phy(&phy->phy);
-	if (phy->ref_clk)
-		clk_disable_unprepare(phy->ref_clk);
-	if (phy->ref_clk_src)
-		clk_disable_unprepare(phy->ref_clk_src);
+	msm_ssphy_qmp_enable_clks(phy, false);
 	msm_ssusb_qmp_ldo_enable(phy, 0);
-	clk_disable_unprepare(phy->aux_clk);
-	clk_disable_unprepare(phy->cfg_ahb_clk);
-	clk_disable_unprepare(phy->pipe_clk);
 	kfree(phy);
 	return 0;
 }
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 6a1df9e..30bf0f5 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1482,16 +1482,20 @@
 	struct usb_serial *serial = port->serial;
 	struct tty_struct *tty;
 	struct digi_port *priv = usb_get_serial_port_data(port);
+	unsigned char *buf = urb->transfer_buffer;
 	int opcode, line, status, val;
 	int i;
 	unsigned int rts;
 
+	if (urb->actual_length < 4)
+		return -1;
+
 	/* handle each oob command */
-	for (i = 0; i < urb->actual_length - 3;) {
-		opcode = ((unsigned char *)urb->transfer_buffer)[i++];
-		line = ((unsigned char *)urb->transfer_buffer)[i++];
-		status = ((unsigned char *)urb->transfer_buffer)[i++];
-		val = ((unsigned char *)urb->transfer_buffer)[i++];
+	for (i = 0; i < urb->actual_length - 3; i += 4) {
+		opcode = buf[i];
+		line = buf[i + 1];
+		status = buf[i + 2];
+		val = buf[i + 3];
 
 		dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n",
 			opcode, line, status, val);
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c02808a..f1a8fdc 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1674,6 +1674,12 @@
 	function    = TIUMP_GET_FUNC_FROM_CODE(data[0]);
 	dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
 		port_number, function, data[1]);
+
+	if (port_number >= edge_serial->serial->num_ports) {
+		dev_err(dev, "bad port number %d\n", port_number);
+		goto exit;
+	}
+
 	port = edge_serial->serial->port[port_number];
 	edge_port = usb_get_serial_port_data(port);
 	if (!edge_port) {
@@ -1755,7 +1761,7 @@
 
 	port_number = edge_port->port->port_number;
 
-	if (edge_port->lsr_event) {
+	if (urb->actual_length > 0 && edge_port->lsr_event) {
 		edge_port->lsr_event = 0;
 		dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
 			__func__, port_number, edge_port->lsr_mask, *data);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index a180b17..76564b3 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -142,12 +142,6 @@
 
 static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct usb_serial	*serial = port->serial;
-	struct usb_serial_port	*wport;
-
-	wport = serial->port[1];
-	tty_port_tty_set(&wport->port, tty);
-
 	return usb_serial_generic_open(tty, port);
 }
 
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 93c6c9b..8a069aa 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -200,6 +200,11 @@
 	if (!safe)
 		goto out;
 
+	if (length < 2) {
+		dev_err(&port->dev, "malformed packet\n");
+		return;
+	}
+
 	fcs = fcs_compute10(data, length, CRC10_INITFCS);
 	if (fcs) {
 		dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d1b7ac7..a826864 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3879,8 +3879,7 @@
 	unsigned blocksize;
 	struct inode *inode = mapping->host;
 
-	/* If we are processing an encrypted inode during orphan list
-	 * handling */
+	/* If we are processing an encrypted inode during orphan list handling */
 	if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
 		return 0;
 
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c01eeaa..5cc0a36 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1436,6 +1436,204 @@
 
 #endif
 
+/*
+ * Print out various scheduling related per-task fields:
+ */
+
+#ifdef CONFIG_SMP
+
+static int sched_wake_up_idle_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_wake_up_idle(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_wake_up_idle_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int wake_up_idle, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &wake_up_idle);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_wake_up_idle(p, wake_up_idle);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_wake_up_idle_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_wake_up_idle_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
+	.open		= sched_wake_up_idle_open,
+	.read		= seq_read,
+	.write		= sched_wake_up_idle_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SMP */
+
+#ifdef CONFIG_SCHED_HMP
+
+static int sched_init_task_load_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_init_task_load(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_init_task_load_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int init_task_load, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &init_task_load);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_init_task_load(p, init_task_load);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_init_task_load_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_init_task_load_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_init_task_load_operations = {
+	.open		= sched_init_task_load_open,
+	.read		= seq_read,
+	.write		= sched_init_task_load_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int sched_group_id_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_group_id(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_group_id_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int group_id, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &group_id);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_group_id(p, group_id);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_group_id_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_group_id_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_group_id_operations = {
+	.open		= sched_group_id_open,
+	.read		= seq_read,
+	.write		= sched_group_id_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SCHED_HMP */
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 /*
  * Print out autogroup related information:
@@ -2861,6 +3059,13 @@
 	ONE("status",     S_IRUGO, proc_pid_status),
 	ONE("personality", S_IRUSR, proc_pid_personality),
 	ONE("limits",	  S_IRUGO, proc_pid_limits),
+#ifdef CONFIG_SMP
+	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
+#endif
+#ifdef CONFIG_SCHED_HMP
+	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
+	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
+#endif
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index dbee8901..e169172 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,34 +88,17 @@
 #define CAM_CC_MCLK3_CLK_SRC					71
 #define CAM_CC_PLL0						72
 #define CAM_CC_PLL0_OUT_EVEN					73
-#define CAM_CC_PLL0_OUT_MAIN					74
-#define CAM_CC_PLL0_OUT_ODD					75
-#define CAM_CC_PLL0_OUT_TEST					76
-#define CAM_CC_PLL1						77
-#define CAM_CC_PLL1_OUT_EVEN					78
-#define CAM_CC_PLL1_OUT_MAIN					79
-#define CAM_CC_PLL1_OUT_ODD					80
-#define CAM_CC_PLL1_OUT_TEST					81
-#define CAM_CC_PLL2						82
-#define CAM_CC_PLL2_OUT_EVEN					83
-#define CAM_CC_PLL2_OUT_MAIN					84
-#define CAM_CC_PLL2_OUT_ODD					85
-#define CAM_CC_PLL2_OUT_TEST					86
-#define CAM_CC_PLL3						87
-#define CAM_CC_PLL3_OUT_EVEN					88
-#define CAM_CC_PLL3_OUT_MAIN					89
-#define CAM_CC_PLL3_OUT_ODD					90
-#define CAM_CC_PLL3_OUT_TEST					91
-#define CAM_CC_PLL_TEST_CLK					92
-#define CAM_CC_SLOW_AHB_CLK_SRC					93
-#define CAM_CC_SOC_AHB_CLK					94
-#define CAM_CC_SPDM_BPS_CLK					95
-#define CAM_CC_SPDM_IFE_0_CLK					96
-#define CAM_CC_SPDM_IFE_0_CSID_CLK				97
-#define CAM_CC_SPDM_IPE_0_CLK					98
-#define CAM_CC_SPDM_IPE_1_CLK					99
-#define CAM_CC_SPDM_JPEG_CLK					100
-#define CAM_CC_SYS_TMR_CLK					101
+#define CAM_CC_PLL1						74
+#define CAM_CC_PLL1_OUT_EVEN					75
+#define CAM_CC_PLL2						76
+#define CAM_CC_PLL2_OUT_EVEN					77
+#define CAM_CC_PLL2_OUT_ODD					78
+#define CAM_CC_PLL3						79
+#define CAM_CC_PLL3_OUT_EVEN					80
+#define CAM_CC_PLL_TEST_CLK					81
+#define CAM_CC_SLOW_AHB_CLK_SRC					82
+#define CAM_CC_SOC_AHB_CLK					83
+#define CAM_CC_SYS_TMR_CLK					84
 
 #define TITAN_CAM_CC_BPS_BCR					0
 #define TITAN_CAM_CC_CAMNOC_BCR					1
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 18594b9..5d3a4cd 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -140,6 +140,7 @@
 	DOMAIN_ATTR_USE_UPSTREAM_HINT,
 	DOMAIN_ATTR_EARLY_MAP,
 	DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+	DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
 	DOMAIN_ATTR_MAX,
 };
 
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index f3ce6b4..e1ad51e 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -49,6 +49,7 @@
 #define PINCTRL_DEFAULT	"default"
 #define PINCTRL_SLEEP	"sleep"
 
+/* Common SE registers */
 #define GENI_INIT_CFG_REVISION		(0x0)
 #define GENI_S_INIT_CFG_REVISION	(0x4)
 #define GENI_FORCE_DEFAULT_REG		(0x20)
@@ -126,6 +127,9 @@
 #define FW_REV_PROTOCOL_MSK	(GENMASK(15, 8))
 #define FW_REV_PROTOCOL_SHFT	(8)
 
+/* GENI_CLK_SEL fields */
+#define CLK_SEL_MSK		(GENMASK(2, 0))
+
 /* SE_GENI_DMA_MODE_EN */
 #define GENI_DMA_MODE_EN	(BIT(0))
 
@@ -280,9 +284,10 @@
 	switch (mode) {
 	case FIFO_MODE:
 	{
-		if (proto == I2C) {
+		if (proto != UART) {
 			common_geni_m_irq_en |=
-				(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+				(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+				M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
 			common_geni_s_irq_en |= S_CMD_DONE_EN;
 		}
 		break;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10..7e56a00 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -734,6 +734,16 @@
 
 /* for drivers using iso endpoints */
 extern int usb_get_current_frame_number(struct usb_device *usb_dev);
+extern int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned int intr_num);
+extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned int intr_num);
+
+extern dma_addr_t usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned int intr_num);
+extern dma_addr_t usb_get_dcba_dma_addr(struct usb_device *dev);
+extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep);
 
 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
 extern int usb_alloc_streams(struct usb_interface *interface,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 66fc137..f58f62c 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -398,6 +398,15 @@
 	/* Call for power on/off the port if necessary */
 	int	(*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
 
+	int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned int intr_num);
+	int (*sec_event_ring_cleanup)(struct usb_hcd *hcd,
+			unsigned int intr_num);
+	dma_addr_t (*get_sec_event_ring_dma_addr)(struct usb_hcd *hcd,
+			unsigned int intr_num);
+	dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev, struct usb_host_endpoint *ep);
+	dma_addr_t (*get_dcba_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev);
 };
 
 static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -436,6 +445,15 @@
 		struct usb_host_interface *old_alt,
 		struct usb_host_interface *new_alt);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
+extern int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned int intr_num);
+extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned int intr_num);
+extern dma_addr_t usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+		unsigned int intr_num);
+extern dma_addr_t usb_hcd_get_dcba_dma_addr(struct usb_device *udev);
+extern dma_addr_t usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+	struct usb_host_endpoint *ep);
 
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
 		struct device *dev, const char *bus_name);
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index eb209d4..dc79773 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -65,7 +65,7 @@
 	struct hlist_node node;
 	struct user_namespace *ns;
 	kuid_t uid;
-	atomic_t count;
+	int count;
 	atomic_t ucount[UCOUNT_COUNTS];
 };
 
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ac5898a..c558387 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -17,7 +17,7 @@
 #include <linux/poll.h>
 #include <linux/dma-buf.h>
 
-#define VB2_MAX_FRAME	(32)
+#define VB2_MAX_FRAME	(64)
 #define VB2_MAX_PLANES	(8)
 
 /**
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index cd334c9..225bae1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -125,6 +125,8 @@
 	return skb->protocol;
 }
 
+extern int tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle,
+				  int flow_enable);
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
diff --git a/include/soc/qcom/msm_qmi_interface.h b/include/soc/qcom/msm_qmi_interface.h
index 349ca2f..c421209 100644
--- a/include/soc/qcom/msm_qmi_interface.h
+++ b/include/soc/qcom/msm_qmi_interface.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,7 @@
 	QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
 	QMI_RESULT_SUCCESS_V01 = 0,
 	QMI_RESULT_FAILURE_V01 = 1,
+	QMI_ERR_DISABLED_V01 = 0x45,
 	QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
 
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 14e49c7..b35533b 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE syscalls
 
 #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 94d7fcb..c104244 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -244,6 +244,30 @@
 	__u32 retained;       /* out, whether backing store still exists */
 };
 
+/* HDR WRGB x and y index */
+#define DISPLAY_PRIMARIES_WX 0
+#define DISPLAY_PRIMARIES_WY 1
+#define DISPLAY_PRIMARIES_RX 2
+#define DISPLAY_PRIMARIES_RY 3
+#define DISPLAY_PRIMARIES_GX 4
+#define DISPLAY_PRIMARIES_GY 5
+#define DISPLAY_PRIMARIES_BX 6
+#define DISPLAY_PRIMARIES_BY 7
+#define DISPLAY_PRIMARIES_MAX 8
+
+struct drm_panel_hdr_properties {
+	__u32 hdr_enabled;
+
+	/* WRGB X and y values arrayed in format */
+	/* [WX, WY, RX, RY, GX, GY, BX, BY] */
+	__u32 display_primaries[DISPLAY_PRIMARIES_MAX];
+
+	/* peak brightness supported by panel */
+	__u32 peak_brightness;
+	/* Blackness level supported by panel */
+	__u32 blackness_level;
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index 943940e..e809c03 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -134,4 +134,151 @@
 	__u32 c1[PGC_TBL_LEN];
 	__u32 c2[PGC_TBL_LEN];
 };
+
+#define AD4_LUT_GRP0_SIZE 33
+#define AD4_LUT_GRP1_SIZE 32
+/*
+ * struct drm_msm_ad4_init - ad4 init structure set by user-space client.
+ *                           Init param values can change based on tuning
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_init {
+	__u32 init_param_001[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_002[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_003[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_004[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_005[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_006[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_007[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_008[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_009;
+	__u32 init_param_010;
+	__u32 init_param_011;
+	__u32 init_param_012;
+	__u32 init_param_013;
+	__u32 init_param_014;
+	__u32 init_param_015;
+	__u32 init_param_016;
+	__u32 init_param_017;
+	__u32 init_param_018;
+	__u32 init_param_019;
+	__u32 init_param_020;
+	__u32 init_param_021;
+	__u32 init_param_022;
+	__u32 init_param_023;
+	__u32 init_param_024;
+	__u32 init_param_025;
+	__u32 init_param_026;
+	__u32 init_param_027;
+	__u32 init_param_028;
+	__u32 init_param_029;
+	__u32 init_param_030;
+	__u32 init_param_031;
+	__u32 init_param_032;
+	__u32 init_param_033;
+	__u32 init_param_034;
+	__u32 init_param_035;
+	__u32 init_param_036;
+	__u32 init_param_037;
+	__u32 init_param_038;
+	__u32 init_param_039;
+	__u32 init_param_040;
+	__u32 init_param_041;
+	__u32 init_param_042;
+	__u32 init_param_043;
+	__u32 init_param_044;
+	__u32 init_param_045;
+	__u32 init_param_046;
+	__u32 init_param_047;
+	__u32 init_param_048;
+	__u32 init_param_049;
+	__u32 init_param_050;
+	__u32 init_param_051;
+	__u32 init_param_052;
+	__u32 init_param_053;
+	__u32 init_param_054;
+	__u32 init_param_055;
+	__u32 init_param_056;
+	__u32 init_param_057;
+	__u32 init_param_058;
+	__u32 init_param_059;
+	__u32 init_param_060;
+	__u32 init_param_061;
+	__u32 init_param_062;
+	__u32 init_param_063;
+	__u32 init_param_064;
+	__u32 init_param_065;
+	__u32 init_param_066;
+	__u32 init_param_067;
+	__u32 init_param_068;
+	__u32 init_param_069;
+	__u32 init_param_070;
+	__u32 init_param_071;
+	__u32 init_param_072;
+	__u32 init_param_073;
+	__u32 init_param_074;
+	__u32 init_param_075;
+};
+
+/*
+ * struct drm_msm_ad4_cfg - ad4 config structure set by user-space client.
+ *                           Config param values can vary based on tuning,
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_cfg {
+	__u32 cfg_param_001;
+	__u32 cfg_param_002;
+	__u32 cfg_param_003;
+	__u32 cfg_param_004;
+	__u32 cfg_param_005;
+	__u32 cfg_param_006;
+	__u32 cfg_param_007;
+	__u32 cfg_param_008;
+	__u32 cfg_param_009;
+	__u32 cfg_param_010;
+	__u32 cfg_param_011;
+	__u32 cfg_param_012;
+	__u32 cfg_param_013;
+	__u32 cfg_param_014;
+	__u32 cfg_param_015;
+	__u32 cfg_param_016;
+	__u32 cfg_param_017;
+	__u32 cfg_param_018;
+	__u32 cfg_param_019;
+	__u32 cfg_param_020;
+	__u32 cfg_param_021;
+	__u32 cfg_param_022;
+	__u32 cfg_param_023;
+	__u32 cfg_param_024;
+	__u32 cfg_param_025;
+	__u32 cfg_param_026;
+	__u32 cfg_param_027;
+	__u32 cfg_param_028;
+	__u32 cfg_param_029;
+	__u32 cfg_param_030;
+	__u32 cfg_param_031;
+	__u32 cfg_param_032;
+	__u32 cfg_param_033;
+	__u32 cfg_param_034;
+	__u32 cfg_param_035;
+	__u32 cfg_param_036;
+	__u32 cfg_param_037;
+	__u32 cfg_param_038;
+	__u32 cfg_param_039;
+	__u32 cfg_param_040;
+	__u32 cfg_param_041;
+	__u32 cfg_param_042;
+	__u32 cfg_param_043;
+	__u32 cfg_param_044;
+	__u32 cfg_param_045;
+	__u32 cfg_param_046;
+	__u32 cfg_param_047;
+	__u32 cfg_param_048;
+	__u32 cfg_param_049;
+	__u32 cfg_param_050;
+	__u32 cfg_param_051;
+	__u32 cfg_param_052;
+	__u32 cfg_param_053;
+};
+
 #endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 2dfbc95..4823794 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -265,6 +265,7 @@
 header-y += map_to_7segment.h
 header-y += matroxfb.h
 header-y += mdio.h
+header-y += mdss_rotator.h
 header-y += media.h
 header-y += media-bus-format.h
 header-y += mei.h
@@ -306,6 +307,8 @@
 header-y += msm_ion.h
 header-y += msm_ipc.h
 header-y += msm_kgsl.h
+header-y += msm_mdp.h
+header-y += msm_mdp_ext.h
 header-y += msm_rmnet.h
 header-y += mtio.h
 header-y += nbd.h
@@ -515,4 +518,6 @@
 header-y += ipa_qmi_service_v01.h
 header-y += msm_ipa.h
 header-y += rmnet_ipa_fd_ioctl.h
+header-y += msm_dsps.h
 header-y += msm-core-interface.h
+header-y += msm_rotator.h
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 1c31549..81c464a 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -59,6 +59,7 @@
 #define EPOLL_PACKED
 #endif
 
+#ifdef __KERNEL__
 struct epoll_event {
 	__u32 events;
 	__u64 data;
@@ -76,4 +77,5 @@
 	epev->events &= ~EPOLLWAKEUP;
 }
 #endif
+#endif /* __KERNEL__ */
 #endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/mdss_rotator.h b/include/uapi/linux/mdss_rotator.h
new file mode 100644
index 0000000..167e1426
--- /dev/null
+++ b/include/uapi/linux/mdss_rotator.h
@@ -0,0 +1,144 @@
+#ifndef _UAPI_MDSS_ROTATOR_H_
+#define _UAPI_MDSS_ROTATOR_H_
+
+#include <linux/msm_mdp_ext.h>
+
+#define MDSS_ROTATOR_IOCTL_MAGIC 'w'
+
+/* open a rotation session */
+#define MDSS_ROTATION_OPEN \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 1, struct mdp_rotation_config *)
+
+/* change the rotation session configuration */
+#define MDSS_ROTATION_CONFIG \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 2, struct mdp_rotation_config *)
+
+/* queue the rotation request */
+#define MDSS_ROTATION_REQUEST \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 3, struct mdp_rotation_request *)
+
+/* close a rotation session with the specified rotation session ID */
+#define MDSS_ROTATION_CLOSE	_IOW(MDSS_ROTATOR_IOCTL_MAGIC, 4, unsigned int)
+
+/*
+ * Rotation request flag
+ */
+/* no rotation flag, i.e. color space conversion */
+#define MDP_ROTATION_NOP	0x01
+
+/* left/right flip */
+#define MDP_ROTATION_FLIP_LR	0x02
+
+/* up/down flip */
+#define MDP_ROTATION_FLIP_UD	0x04
+
+/* rotate 90 degree */
+#define MDP_ROTATION_90		0x08
+
+/* rotate 180 degre */
+#define MDP_ROTATION_180	(MDP_ROTATION_FLIP_LR | MDP_ROTATION_FLIP_UD)
+
+/* rotate 270 degree */
+#define MDP_ROTATION_270	(MDP_ROTATION_90 | MDP_ROTATION_180)
+
+/* format is interlaced */
+#define MDP_ROTATION_DEINTERLACE 0x10
+
+/* enable bwc */
+#define MDP_ROTATION_BWC_EN	0x40
+
+/* secure data */
+#define MDP_ROTATION_SECURE	0x80
+
+/*
+ * Rotation commit flag
+ */
+/* Flag indicates to validate the rotation request */
+#define MDSS_ROTATION_REQUEST_VALIDATE	0x01
+
+#define MDP_ROTATION_REQUEST_VERSION_1_0	0x00010000
+
+/*
+ * Client can let driver to allocate the hardware resources with
+ * this particular hw resource id.
+ */
+#define MDSS_ROTATION_HW_ANY	0xFFFFFFFF
+
+/*
+ * Configuration Structures
+ */
+struct mdp_rotation_buf_info {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	struct mult_factor comp_ratio;
+};
+
+struct mdp_rotation_config {
+	uint32_t	version;
+	uint32_t	session_id;
+	struct mdp_rotation_buf_info	input;
+	struct mdp_rotation_buf_info	output;
+	uint32_t	frame_rate;
+	uint32_t	flags;
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_item {
+	/* rotation request flag */
+	uint32_t	flags;
+
+	/* Source crop rectangle */
+	struct mdp_rect	src_rect;
+
+	/* Destination rectangle */
+	struct mdp_rect	dst_rect;
+
+	/* Input buffer for the request */
+	struct mdp_layer_buffer	input;
+
+	/* The output buffer for the request */
+	struct mdp_layer_buffer	output;
+
+	/*
+	 * DMA pipe selection for this request by client:
+	 * 0: DMA pipe 0
+	 * 1: DMA pipe 1
+	 * or MDSS_ROTATION_HW_ANY if client wants
+	 * driver to allocate any that is available
+	 */
+	uint32_t	pipe_idx;
+
+	/*
+	 * Write-back block selection for this request by client:
+	 * 0: Write-back block 0
+	 * 1: Write-back block 1
+	 * or MDSS_ROTATION_HW_ANY if client wants
+	 * driver to allocate any that is available
+	 */
+	uint32_t	wb_idx;
+
+	/* Which session ID is this request scheduled on */
+	uint32_t	session_id;
+
+	/* 32bits reserved value for future usage */
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_request {
+	/* 32bit version indicates the request structure */
+	uint32_t	version;
+
+	uint32_t	flags;
+
+	/* Number of rotation request items in the list */
+	uint32_t	count;
+
+	/* Pointer to a list of rotation request items */
+	struct mdp_rotation_item __user	*list;
+
+	/* 32bits reserved value for future usage*/
+	uint32_t	reserved[6];
+};
+
+#endif /*_UAPI_MDSS_ROTATOR_H_*/
diff --git a/include/uapi/linux/msm_dsps.h b/include/uapi/linux/msm_dsps.h
new file mode 100644
index 0000000..a21927d
--- /dev/null
+++ b/include/uapi/linux/msm_dsps.h
@@ -0,0 +1,16 @@
+#ifndef _UAPI_DSPS_H_
+#define _UAPI_DSPS_H_
+
+#include <linux/ioctl.h>
+
+#define DSPS_IOCTL_MAGIC 'd'
+
+#define DSPS_IOCTL_ON	_IO(DSPS_IOCTL_MAGIC, 1)
+#define DSPS_IOCTL_OFF	_IO(DSPS_IOCTL_MAGIC, 2)
+
+#define DSPS_IOCTL_READ_SLOW_TIMER _IOR(DSPS_IOCTL_MAGIC, 3, unsigned int*)
+#define DSPS_IOCTL_READ_FAST_TIMER _IOR(DSPS_IOCTL_MAGIC, 4, unsigned int*)
+
+#define DSPS_IOCTL_RESET _IO(DSPS_IOCTL_MAGIC, 5)
+
+#endif	/* _UAPI_DSPS_H_ */
diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h
new file mode 100644
index 0000000..73f4938
--- /dev/null
+++ b/include/uapi/linux/msm_mdp.h
@@ -0,0 +1,1461 @@
+#ifndef _UAPI_MSM_MDP_H_
+#define _UAPI_MSM_MDP_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#endif
+#include <linux/fb.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP          _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int)
+#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int)
+#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor)
+#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap)
+#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data)
+/* new ioctls's for set/get ccs matrix */
+#define MSMFB_GET_CCS_MATRIX  _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs)
+#define MSMFB_SET_CCS_MATRIX  _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs)
+#define MSMFB_OVERLAY_SET       _IOWR(MSMFB_IOCTL_MAGIC, 135, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_UNSET     _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
+#define MSMFB_OVERLAY_PLAY      _IOW(MSMFB_IOCTL_MAGIC, 137, \
+						struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE	MSMFB_OVERLAY_PLAY
+
+#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
+					struct mdp_page_protection)
+#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
+					struct mdp_page_protection)
+#define MSMFB_OVERLAY_GET      _IOR(MSMFB_IOCTL_MAGIC, 140, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_PLAY_ENABLE     _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int)
+#define MSMFB_OVERLAY_BLT       _IOWR(MSMFB_IOCTL_MAGIC, 142, \
+						struct msmfb_overlay_blt)
+#define MSMFB_OVERLAY_BLT_OFFSET     _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int)
+#define MSMFB_HISTOGRAM_START	_IOR(MSMFB_IOCTL_MAGIC, 144, \
+						struct mdp_histogram_start_req)
+#define MSMFB_HISTOGRAM_STOP	_IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int)
+#define MSMFB_NOTIFY_UPDATE	_IOWR(MSMFB_IOCTL_MAGIC, 146, unsigned int)
+
+#define MSMFB_OVERLAY_3D       _IOWR(MSMFB_IOCTL_MAGIC, 147, \
+						struct msmfb_overlay_3d)
+
+#define MSMFB_MIXER_INFO       _IOWR(MSMFB_IOCTL_MAGIC, 148, \
+						struct msmfb_mixer_info_req)
+#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \
+						struct msmfb_overlay_data)
+#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150)
+#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151)
+#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152)
+#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
+#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+#define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
+#define MSMFB_BUFFER_SYNC  _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync)
+#define MSMFB_OVERLAY_COMMIT      _IO(MSMFB_IOCTL_MAGIC, 163)
+#define MSMFB_DISPLAY_COMMIT      _IOW(MSMFB_IOCTL_MAGIC, 164, \
+						struct mdp_display_commit)
+#define MSMFB_METADATA_SET  _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
+#define MSMFB_METADATA_GET  _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
+#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 167, \
+						unsigned int)
+#define MSMFB_ASYNC_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 168, unsigned int)
+#define MSMFB_OVERLAY_PREPARE		_IOWR(MSMFB_IOCTL_MAGIC, 169, \
+						struct mdp_overlay_list)
+#define MSMFB_LPM_ENABLE	_IOWR(MSMFB_IOCTL_MAGIC, 170, unsigned int)
+#define MSMFB_MDP_PP_GET_FEATURE_VERSION _IOWR(MSMFB_IOCTL_MAGIC, 171, \
+					      struct mdp_pp_feature_version)
+
+#define FB_TYPE_3D_PANEL 0x10101010
+#define MDP_IMGTYPE2_START 0x10000
+#define MSMFB_DRIVER_VERSION	0xF9E8D701
+/* Maximum number of formats supported by MDP*/
+#define MDP_IMGTYPE_END 0x100
+
+/* HW Revisions for different MDSS targets */
+#define MDSS_GET_MAJOR(rev)		((rev) >> 28)
+#define MDSS_GET_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define MDSS_GET_STEP(rev)		((rev) & 0xFFFF)
+#define MDSS_GET_MAJOR_MINOR(rev)	((rev) >> 16)
+
+#define IS_MDSS_MAJOR_MINOR_SAME(rev1, rev2)	\
+	(MDSS_GET_MAJOR_MINOR((rev1)) == MDSS_GET_MAJOR_MINOR((rev2)))
+
+#define MDSS_MDP_REV(major, minor, step)	\
+	((((major) & 0x000F) << 28) |		\
+	 (((minor) & 0x0FFF) << 16) |		\
+	 ((step)   & 0xFFFF))
+
+#define MDSS_MDP_HW_REV_100	MDSS_MDP_REV(1, 0, 0) /* 8974 v1.0 */
+#define MDSS_MDP_HW_REV_101	MDSS_MDP_REV(1, 1, 0) /* 8x26 v1.0 */
+#define MDSS_MDP_HW_REV_101_1	MDSS_MDP_REV(1, 1, 1) /* 8x26 v2.0, 8926 v1.0 */
+#define MDSS_MDP_HW_REV_101_2	MDSS_MDP_REV(1, 1, 2) /* 8926 v2.0 */
+#define MDSS_MDP_HW_REV_102	MDSS_MDP_REV(1, 2, 0) /* 8974 v2.0 */
+#define MDSS_MDP_HW_REV_102_1	MDSS_MDP_REV(1, 2, 1) /* 8974 v3.0 (Pro) */
+#define MDSS_MDP_HW_REV_103	MDSS_MDP_REV(1, 3, 0) /* 8084 v1.0 */
+#define MDSS_MDP_HW_REV_103_1	MDSS_MDP_REV(1, 3, 1) /* 8084 v1.1 */
+#define MDSS_MDP_HW_REV_105	MDSS_MDP_REV(1, 5, 0) /* 8994 v1.0 */
+#define MDSS_MDP_HW_REV_106	MDSS_MDP_REV(1, 6, 0) /* 8916 v1.0 */
+#define MDSS_MDP_HW_REV_107	MDSS_MDP_REV(1, 7, 0) /* 8996 v1 */
+#define MDSS_MDP_HW_REV_107_1	MDSS_MDP_REV(1, 7, 1) /* 8996 v2 */
+#define MDSS_MDP_HW_REV_107_2	MDSS_MDP_REV(1, 7, 2) /* 8996 v3 */
+#define MDSS_MDP_HW_REV_108	MDSS_MDP_REV(1, 8, 0) /* 8939 v1.0 */
+#define MDSS_MDP_HW_REV_109	MDSS_MDP_REV(1, 9, 0) /* 8994 v2.0 */
+#define MDSS_MDP_HW_REV_110	MDSS_MDP_REV(1, 10, 0) /* 8992 v1.0 */
+#define MDSS_MDP_HW_REV_200	MDSS_MDP_REV(2, 0, 0) /* 8092 v1.0 */
+#define MDSS_MDP_HW_REV_112	MDSS_MDP_REV(1, 12, 0) /* 8952 v1.0 */
+#define MDSS_MDP_HW_REV_114	MDSS_MDP_REV(1, 14, 0) /* 8937 v1.0 */
+#define MDSS_MDP_HW_REV_115	MDSS_MDP_REV(1, 15, 0) /* msmgold */
+#define MDSS_MDP_HW_REV_116	MDSS_MDP_REV(1, 16, 0) /* msmtitanium */
+#define MDSS_MDP_HW_REV_300	MDSS_MDP_REV(3, 0, 0)  /* msmcobalt */
+#define MDSS_MDP_HW_REV_301	MDSS_MDP_REV(3, 0, 1)  /* msmcobalt v1.0 */
+
+enum {
+	NOTIFY_UPDATE_INIT,
+	NOTIFY_UPDATE_DEINIT,
+	NOTIFY_UPDATE_START,
+	NOTIFY_UPDATE_STOP,
+	NOTIFY_UPDATE_POWER_OFF,
+};
+
+enum {
+	NOTIFY_TYPE_NO_UPDATE,
+	NOTIFY_TYPE_SUSPEND,
+	NOTIFY_TYPE_UPDATE,
+	NOTIFY_TYPE_BL_UPDATE,
+	NOTIFY_TYPE_BL_AD_ATTEN_UPDATE,
+};
+
+enum {
+	MDP_RGB_565,      /* RGB 565 planer */
+	MDP_XRGB_8888,    /* RGB 888 padded */
+	MDP_Y_CBCR_H2V2,  /* Y and CbCr, pseudo planer w/ Cb is in MSB */
+	MDP_Y_CBCR_H2V2_ADRENO,
+	MDP_ARGB_8888,    /* ARGB 888 */
+	MDP_RGB_888,      /* RGB 888 planer */
+	MDP_Y_CRCB_H2V2,  /* Y and CrCb, pseudo planer w/ Cr is in MSB */
+	MDP_YCRYCB_H2V1,  /* YCrYCb interleave */
+	MDP_CBYCRY_H2V1,  /* CbYCrY interleave */
+	MDP_Y_CRCB_H2V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H2V1,   /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CRCB_H1V2,
+	MDP_Y_CBCR_H1V2,
+	MDP_RGBA_8888,    /* ARGB 888 */
+	MDP_BGRA_8888,	  /* ABGR 888 */
+	MDP_RGBX_8888,	  /* RGBX 888 */
+	MDP_Y_CRCB_H2V2_TILE,  /* Y and CrCb, pseudo planer tile */
+	MDP_Y_CBCR_H2V2_TILE,  /* Y and CbCr, pseudo planer tile */
+	MDP_Y_CR_CB_H2V2,  /* Y, Cr and Cb, planar */
+	MDP_Y_CR_CB_GH2V2,  /* Y, Cr and Cb, planar aligned to Android YV12 */
+	MDP_Y_CB_CR_H2V2,  /* Y, Cb and Cr, planar */
+	MDP_Y_CRCB_H1V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H1V1,  /* Y and CbCr, pseduo planer w/ Cb is in MSB */
+	MDP_YCRCB_H1V1,   /* YCrCb interleave */
+	MDP_YCBCR_H1V1,   /* YCbCr interleave */
+	MDP_BGR_565,      /* BGR 565 planer */
+	MDP_BGR_888,      /* BGR 888 */
+	MDP_Y_CBCR_H2V2_VENUS,
+	MDP_BGRX_8888,   /* BGRX 8888 */
+	MDP_RGBA_8888_TILE,	  /* RGBA 8888 in tile format */
+	MDP_ARGB_8888_TILE,	  /* ARGB 8888 in tile format */
+	MDP_ABGR_8888_TILE,	  /* ABGR 8888 in tile format */
+	MDP_BGRA_8888_TILE,	  /* BGRA 8888 in tile format */
+	MDP_RGBX_8888_TILE,	  /* RGBX 8888 in tile format */
+	MDP_XRGB_8888_TILE,	  /* XRGB 8888 in tile format */
+	MDP_XBGR_8888_TILE,	  /* XBGR 8888 in tile format */
+	MDP_BGRX_8888_TILE,	  /* BGRX 8888 in tile format */
+	MDP_YCBYCR_H2V1,  /* YCbYCr interleave */
+	MDP_RGB_565_TILE,	  /* RGB 565 in tile format */
+	MDP_BGR_565_TILE,	  /* BGR 565 in tile format */
+	MDP_ARGB_1555,	/*ARGB 1555*/
+	MDP_RGBA_5551,	/*RGBA 5551*/
+	MDP_ARGB_4444,	/*ARGB 4444*/
+	MDP_RGBA_4444,	/*RGBA 4444*/
+	MDP_RGB_565_UBWC,
+	MDP_RGBA_8888_UBWC,
+	MDP_Y_CBCR_H2V2_UBWC,
+	MDP_RGBX_8888_UBWC,
+	MDP_Y_CRCB_H2V2_VENUS,
+	MDP_IMGTYPE_LIMIT,
+	MDP_RGB_BORDERFILL,	/* border fill pipe */
+	MDP_XRGB_1555,
+	MDP_RGBX_5551,
+	MDP_XRGB_4444,
+	MDP_RGBX_4444,
+	MDP_ABGR_1555,
+	MDP_BGRA_5551,
+	MDP_XBGR_1555,
+	MDP_BGRX_5551,
+	MDP_ABGR_4444,
+	MDP_BGRA_4444,
+	MDP_XBGR_4444,
+	MDP_BGRX_4444,
+	MDP_ABGR_8888,
+	MDP_XBGR_8888,
+	MDP_RGBA_1010102,
+	MDP_ARGB_2101010,
+	MDP_RGBX_1010102,
+	MDP_XRGB_2101010,
+	MDP_BGRA_1010102,
+	MDP_ABGR_2101010,
+	MDP_BGRX_1010102,
+	MDP_XBGR_2101010,
+	MDP_RGBA_1010102_UBWC,
+	MDP_RGBX_1010102_UBWC,
+	MDP_Y_CBCR_H2V2_P010,
+	MDP_Y_CBCR_H2V2_TP10_UBWC,
+	MDP_CRYCBY_H2V1,  /* CrYCbY interleave */
+	MDP_IMGTYPE_LIMIT1 = MDP_IMGTYPE_END,
+	MDP_FB_FORMAT = MDP_IMGTYPE2_START,    /* framebuffer format */
+	MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */
+};
+
+#define MDP_CRYCBY_H2V1 MDP_CRYCBY_H2V1
+
+enum {
+	PMEM_IMG,
+	FB_IMG,
+};
+
+enum {
+	HSIC_HUE = 0,
+	HSIC_SAT,
+	HSIC_INT,
+	HSIC_CON,
+	NUM_HSIC_PARAM,
+};
+
+enum mdss_mdp_max_bw_mode {
+	MDSS_MAX_BW_LIMIT_DEFAULT = 0x1,
+	MDSS_MAX_BW_LIMIT_CAMERA = 0x2,
+	MDSS_MAX_BW_LIMIT_HFLIP = 0x4,
+	MDSS_MAX_BW_LIMIT_VFLIP = 0x8,
+};
+
+#define MDSS_MDP_ROT_ONLY		0x80
+#define MDSS_MDP_RIGHT_MIXER		0x100
+#define MDSS_MDP_DUAL_PIPE		0x200
+
+/* mdp_blit_req flag values */
+#define MDP_ROT_NOP 0
+#define MDP_FLIP_LR 0x1
+#define MDP_FLIP_UD 0x2
+#define MDP_ROT_90 0x4
+#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER 0x8
+#define MDP_BLUR 0x10
+#define MDP_BLEND_FG_PREMULT 0x20000
+#define MDP_IS_FG 0x40000
+#define MDP_SOLID_FILL 0x00000020
+#define MDP_VPU_PIPE 0x00000040
+#define MDP_DEINTERLACE 0x80000000
+#define MDP_SHARPENING  0x40000000
+#define MDP_NO_DMA_BARRIER_START	0x20000000
+#define MDP_NO_DMA_BARRIER_END		0x10000000
+#define MDP_NO_BLIT			0x08000000
+#define MDP_BLIT_WITH_DMA_BARRIERS	0x000
+#define MDP_BLIT_WITH_NO_DMA_BARRIERS    \
+	(MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END)
+#define MDP_BLIT_SRC_GEM                0x04000000
+#define MDP_BLIT_DST_GEM                0x02000000
+#define MDP_BLIT_NON_CACHED		0x01000000
+#define MDP_OV_PIPE_SHARE		0x00800000
+#define MDP_DEINTERLACE_ODD		0x00400000
+#define MDP_OV_PLAY_NOWAIT		0x00200000
+#define MDP_SOURCE_ROTATED_90		0x00100000
+#define MDP_OVERLAY_PP_CFG_EN		0x00080000
+#define MDP_BACKEND_COMPOSITION		0x00040000
+#define MDP_BORDERFILL_SUPPORTED	0x00010000
+#define MDP_SECURE_OVERLAY_SESSION      0x00008000
+#define MDP_SECURE_DISPLAY_OVERLAY_SESSION	0x00002000
+#define MDP_OV_PIPE_FORCE_DMA		0x00004000
+#define MDP_MEMORY_ID_TYPE_FB		0x00001000
+#define MDP_BWC_EN			0x00000400
+#define MDP_DECIMATION_EN		0x00000800
+#define MDP_SMP_FORCE_ALLOC		0x00200000
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP 0xff
+
+#define MDP_FB_PAGE_PROTECTION_NONCACHED         (0)
+#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE      (1)
+#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE    (3)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE  (4)
+/* Sentinel: Don't use! */
+#define MDP_FB_PAGE_PROTECTION_INVALID           (5)
+/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */
+#define MDP_NUM_FB_PAGE_PROTECTION_VALUES        (5)
+
+#define MDP_DEEP_COLOR_YUV444    0x1
+#define MDP_DEEP_COLOR_RGB30B    0x2
+#define MDP_DEEP_COLOR_RGB36B    0x4
+#define MDP_DEEP_COLOR_RGB48B    0x8
+
+struct mdp_rect {
+	uint32_t x;
+	uint32_t y;
+	uint32_t w;
+	uint32_t h;
+};
+
+struct mdp_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	uint32_t offset;
+	int memory_id;		/* the file descriptor */
+	uint32_t priv;
+};
+
+struct mult_factor {
+	uint32_t numer;
+	uint32_t denom;
+};
+
+/*
+ * {3x3} + {3} ccs matrix
+ */
+
+#define MDP_CCS_RGB2YUV	0
+#define MDP_CCS_YUV2RGB	1
+
+#define MDP_CCS_SIZE	9
+#define MDP_BV_SIZE	3
+
+struct mdp_ccs {
+	int direction;			/* MDP_CCS_RGB2YUV or YUV2RGB */
+	uint16_t ccs[MDP_CCS_SIZE];	/* 3x3 color coefficients */
+	uint16_t bv[MDP_BV_SIZE];	/* 1x3 bias vector */
+};
+
+struct mdp_csc {
+	int id;
+	uint32_t csc_mv[9];
+	uint32_t csc_pre_bv[3];
+	uint32_t csc_post_bv[3];
+	uint32_t csc_pre_lv[6];
+	uint32_t csc_post_lv[6];
+};
+
+/* The version of the mdp_blit_req structure so that
+ * user applications can selectively decide which functionality
+ * to include
+ */
+
+#define MDP_BLIT_REQ_VERSION 3
+
+struct color {
+	uint32_t r;
+	uint32_t g;
+	uint32_t b;
+	uint32_t alpha;
+};
+
+struct mdp_blit_req {
+	struct mdp_img src;
+	struct mdp_img dst;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	struct color const_color;
+	uint32_t alpha;
+	uint32_t transp_mask;
+	uint32_t flags;
+	int sharpening_strength;  /* -127 <--> 127, default 64 */
+	uint8_t color_space;
+	uint32_t fps;
+};
+
+struct mdp_blit_req_list {
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MSMFB_DATA_VERSION 2
+
+struct msmfb_data {
+	uint32_t offset;
+	int memory_id;
+	int id;
+	uint32_t flags;
+	uint32_t priv;
+	uint32_t iova;
+};
+
+#define MSMFB_NEW_REQUEST -1
+
+struct msmfb_overlay_data {
+	uint32_t id;
+	struct msmfb_data data;
+	uint32_t version_key;
+	struct msmfb_data plane1_data;
+	struct msmfb_data plane2_data;
+	struct msmfb_data dst_data;
+};
+
+struct msmfb_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+};
+
+#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1
+struct msmfb_writeback_data {
+	struct msmfb_data buf_info;
+	struct msmfb_img img;
+};
+
+#define MDP_PP_OPS_ENABLE 0x1
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+#define MDP_PP_OPS_DISABLE 0x8
+#define MDP_PP_IGC_FLAG_ROM0	0x10
+#define MDP_PP_IGC_FLAG_ROM1	0x20
+
+
+#define MDSS_PP_DSPP_CFG	0x000
+#define MDSS_PP_SSPP_CFG	0x100
+#define MDSS_PP_LM_CFG	0x200
+#define MDSS_PP_WB_CFG	0x300
+
+#define MDSS_PP_ARG_MASK	0x3C00
+#define MDSS_PP_ARG_NUM		4
+#define MDSS_PP_ARG_SHIFT	10
+#define MDSS_PP_LOCATION_MASK	0x0300
+#define MDSS_PP_LOGICAL_MASK	0x00FF
+
+#define MDSS_PP_ADD_ARG(var, arg) ((var) | (0x1 << (MDSS_PP_ARG_SHIFT + (arg))))
+#define PP_ARG(x, var) ((var) & (0x1 << (MDSS_PP_ARG_SHIFT + (x))))
+#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
+#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
+
+
+struct mdp_qseed_cfg {
+	uint32_t table_num;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_sharp_cfg {
+	uint32_t flags;
+	uint32_t strength;
+	uint32_t edge_thr;
+	uint32_t smooth_thr;
+	uint32_t noise_thr;
+};
+
+struct mdp_qseed_cfg_data {
+	uint32_t block;
+	struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG         0x1
+#define MDP_OVERLAY_PP_QSEED_CFG       0x2
+#define MDP_OVERLAY_PP_PA_CFG          0x4
+#define MDP_OVERLAY_PP_IGC_CFG         0x8
+#define MDP_OVERLAY_PP_SHARP_CFG       0x10
+#define MDP_OVERLAY_PP_HIST_CFG        0x20
+#define MDP_OVERLAY_PP_HIST_LUT_CFG    0x40
+#define MDP_OVERLAY_PP_PA_V2_CFG       0x80
+#define MDP_OVERLAY_PP_PCC_CFG	       0x100
+
+#define MDP_CSC_FLAG_ENABLE	0x1
+#define MDP_CSC_FLAG_YUV_IN	0x2
+#define MDP_CSC_FLAG_YUV_OUT	0x4
+
+#define MDP_CSC_MATRIX_COEFF_SIZE	9
+#define MDP_CSC_CLAMP_SIZE		6
+#define MDP_CSC_BIAS_SIZE		3
+
+struct mdp_csc_cfg {
+	/* flags for enable CSC, toggling RGB,YUV input/output */
+	uint32_t flags;
+	uint32_t csc_mv[MDP_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[MDP_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[MDP_CSC_CLAMP_SIZE];
+};
+
+struct mdp_csc_cfg_data {
+	uint32_t block;
+	struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_pa_cfg {
+	uint32_t flags;
+	uint32_t hue_adj;
+	uint32_t sat_adj;
+	uint32_t val_adj;
+	uint32_t cont_adj;
+};
+
+struct mdp_pa_mem_col_cfg {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+#define MDP_SIX_ZONE_LUT_SIZE		384
+
+/* PA Write/Read extension flags */
+#define MDP_PP_PA_HUE_ENABLE		0x10
+#define MDP_PP_PA_SAT_ENABLE		0x20
+#define MDP_PP_PA_VAL_ENABLE		0x40
+#define MDP_PP_PA_CONT_ENABLE		0x80
+#define MDP_PP_PA_SIX_ZONE_ENABLE	0x100
+#define MDP_PP_PA_SKIN_ENABLE		0x200
+#define MDP_PP_PA_SKY_ENABLE		0x400
+#define MDP_PP_PA_FOL_ENABLE		0x800
+
+/* PA masks */
+/* Masks used in PA v1_7 only */
+#define MDP_PP_PA_MEM_PROT_HUE_EN	0x1
+#define MDP_PP_PA_MEM_PROT_SAT_EN	0x2
+#define MDP_PP_PA_MEM_PROT_VAL_EN	0x4
+#define MDP_PP_PA_MEM_PROT_CONT_EN	0x8
+#define MDP_PP_PA_MEM_PROT_SIX_EN	0x10
+#define MDP_PP_PA_MEM_PROT_BLEND_EN	0x20
+/* Masks used in all PAv2 versions */
+#define MDP_PP_PA_HUE_MASK		0x1000
+#define MDP_PP_PA_SAT_MASK		0x2000
+#define MDP_PP_PA_VAL_MASK		0x4000
+#define MDP_PP_PA_CONT_MASK		0x8000
+#define MDP_PP_PA_SIX_ZONE_HUE_MASK	0x10000
+#define MDP_PP_PA_SIX_ZONE_SAT_MASK	0x20000
+#define MDP_PP_PA_SIX_ZONE_VAL_MASK	0x40000
+#define MDP_PP_PA_MEM_COL_SKIN_MASK	0x80000
+#define MDP_PP_PA_MEM_COL_SKY_MASK	0x100000
+#define MDP_PP_PA_MEM_COL_FOL_MASK	0x200000
+#define MDP_PP_PA_MEM_PROTECT_EN	0x400000
+#define MDP_PP_PA_SAT_ZERO_EXP_EN	0x800000
+
+/* Flags for setting PA saturation and value hold */
+#define MDP_PP_PA_LEFT_HOLD		0x1
+#define MDP_PP_PA_RIGHT_HOLD		0x2
+
+struct mdp_pa_v2_data {
+	/* Mask bits for PA features */
+	uint32_t flags;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_cfg skin_cfg;
+	struct mdp_pa_mem_col_cfg sky_cfg;
+	struct mdp_pa_mem_col_cfg fol_cfg;
+	uint32_t six_zone_len;
+	uint32_t six_zone_thresh;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+struct mdp_pa_mem_col_data_v1_7 {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t color_adjust_p2;
+	uint32_t blend_gain;
+	uint8_t sat_hold;
+	uint8_t val_hold;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+struct mdp_pa_data_v1_7 {
+	uint32_t mode;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_data_v1_7 skin_cfg;
+	struct mdp_pa_mem_col_data_v1_7 sky_cfg;
+	struct mdp_pa_mem_col_data_v1_7 fol_cfg;
+	uint32_t six_zone_thresh;
+	uint32_t six_zone_adj_p0;
+	uint32_t six_zone_adj_p1;
+	uint8_t six_zone_sat_hold;
+	uint8_t six_zone_val_hold;
+	uint32_t six_zone_len;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+
+struct mdp_pa_v2_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	struct mdp_pa_v2_data pa_v2_data;
+	void *cfg_payload;
+};
+
+
+enum {
+	mdp_igc_rec601 = 1,
+	mdp_igc_rec709,
+	mdp_igc_srgb,
+	mdp_igc_custom,
+	mdp_igc_rec_max,
+};
+
+struct mdp_igc_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t len, ops;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+	void *cfg_payload;
+};
+
+struct mdp_igc_lut_data_v1_7 {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+};
+
+struct mdp_igc_lut_data_payload {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint64_t __user c0_c1_data;
+	uint64_t __user c2_data;
+	uint32_t strength;
+};
+
+struct mdp_histogram_cfg {
+	uint32_t ops;
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+struct mdp_hist_lut_data_v1_7 {
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_hist_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t hist_lut_first;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+	void *cfg_payload;
+};
+
+struct mdp_pcc_coeff {
+	uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
+};
+
+struct mdp_pcc_coeff_v1_7 {
+	uint32_t c, r, g, b, rg, gb, rb, rgb;
+};
+
+struct mdp_pcc_data_v1_7 {
+	struct mdp_pcc_coeff_v1_7 r, g, b;
+};
+
+struct mdp_pcc_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t ops;
+	struct mdp_pcc_coeff r, g, b;
+	void *cfg_payload;
+};
+
+enum {
+	mdp_lut_igc,
+	mdp_lut_pgc,
+	mdp_lut_hist,
+	mdp_lut_rgb,
+	mdp_lut_max,
+};
+struct mdp_overlay_pp_params {
+	uint32_t config_ops;
+	struct mdp_csc_cfg csc_cfg;
+	struct mdp_qseed_cfg qseed_cfg[2];
+	struct mdp_pa_cfg pa_cfg;
+	struct mdp_pa_v2_data pa_v2_cfg;
+	struct mdp_igc_lut_data igc_cfg;
+	struct mdp_sharp_cfg sharp_cfg;
+	struct mdp_histogram_cfg hist_cfg;
+	struct mdp_hist_lut_data hist_lut_cfg;
+	/* PAv2 cfg data for PA 2.x versions */
+	struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+	struct mdp_pcc_cfg_data pcc_cfg_data;
+};
+
+/**
+ * enum mdss_mdp_blend_op - Different blend operations set by userspace
+ *
+ * @BLEND_OP_NOT_DEFINED:    No blend operation defined for the layer.
+ * @BLEND_OP_OPAQUE:         Apply a constant blend operation. The layer
+ *                           would appear opaque in case fg plane alpha is
+ *                           0xff.
+ * @BLEND_OP_PREMULTIPLIED:  Apply source over blend rule. Layer already has
+ *                           alpha pre-multiplication done. If fg plane alpha
+ *                           is less than 0xff, apply modulation as well. This
+ *                           operation is intended on layers having alpha
+ *                           channel.
+ * @BLEND_OP_COVERAGE:       Apply source over blend rule. Layer is not alpha
+ *                           pre-multiplied. Apply pre-multiplication. If fg
+ *                           plane alpha is less than 0xff, apply modulation as
+ *                           well.
+ * @BLEND_OP_MAX:            Used to track maximum blend operation possible by
+ *                           mdp.
+ */
+enum mdss_mdp_blend_op {
+	BLEND_OP_NOT_DEFINED = 0,
+	BLEND_OP_OPAQUE,
+	BLEND_OP_PREMULTIPLIED,
+	BLEND_OP_COVERAGE,
+	BLEND_OP_MAX,
+};
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define MAX_PLANES	4
+struct mdp_scale_data {
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[MAX_PLANES];
+	int phase_step_x[MAX_PLANES];
+	int init_phase_y[MAX_PLANES];
+	int phase_step_y[MAX_PLANES];
+
+	int num_ext_pxls_left[MAX_PLANES];
+	int num_ext_pxls_right[MAX_PLANES];
+	int num_ext_pxls_top[MAX_PLANES];
+	int num_ext_pxls_btm[MAX_PLANES];
+
+	int left_ftch[MAX_PLANES];
+	int left_rpt[MAX_PLANES];
+	int right_ftch[MAX_PLANES];
+	int right_rpt[MAX_PLANES];
+
+	int top_rpt[MAX_PLANES];
+	int btm_rpt[MAX_PLANES];
+	int top_ftch[MAX_PLANES];
+	int btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+};
+
+/**
+ * enum mdp_overlay_pipe_type - Different pipe type set by userspace
+ *
+ * @PIPE_TYPE_AUTO:    Not specified, pipe will be selected according to flags.
+ * @PIPE_TYPE_VIG:     VIG pipe.
+ * @PIPE_TYPE_RGB:     RGB pipe.
+ * @PIPE_TYPE_DMA:     DMA pipe.
+ * @PIPE_TYPE_CURSOR:  CURSOR pipe.
+ * @PIPE_TYPE_MAX:     Used to track maximum number of pipe type.
+ */
+enum mdp_overlay_pipe_type {
+	PIPE_TYPE_AUTO = 0,
+	PIPE_TYPE_VIG,
+	PIPE_TYPE_RGB,
+	PIPE_TYPE_DMA,
+	PIPE_TYPE_CURSOR,
+	PIPE_TYPE_MAX,
+};
+
+/**
+ * struct mdp_overlay - overlay surface structure
+ * @src:	Source image information (width, height, format).
+ * @src_rect:	Source crop rectangle, portion of image that will be fetched.
+ *		This should always be within boundaries of source image.
+ * @dst_rect:	Destination rectangle, the position and size of image on screen.
+ *		This should always be within panel boundaries.
+ * @z_order:	Blending stage to occupy in display, if multiple layers are
+ *		present, highest z_order usually means the top most visible
+ *		layer. The range acceptable is from 0-3 to support blending
+ *		up to 4 layers.
+ * @is_fg:	This flag is used to disable blending of any layers with z_order
+ *		less than this overlay. It means that any layers with z_order
+ *		less than this layer will not be blended and will be replaced
+ *		by the background border color.
+ * @alpha:	Used to set plane opacity. The range can be from 0-255, where
+ *		0 means completely transparent and 255 means fully opaque.
+ * @transp_mask: Color used as color key for transparency. Any pixel in fetched
+ *		image matching this color will be transparent when blending.
+ *		The color should be in same format as the source image format.
+ * @flags:	This is used to customize operation of overlay. See MDP flags
+ *		for more information.
+ * @pipe_type:  Used to specify the type of overlay pipe.
+ * @user_data:	DEPRECATED* Used to store user application specific information.
+ * @bg_color:	Solid color used to fill the overlay surface when no source
+ *		buffer is provided.
+ * @horz_deci:	Horizontal decimation value, this indicates the amount of pixels
+ *		dropped for each pixel that is fetched from a line. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimate by 2 (drop 1 pixel for each pixel fetched)
+ *		2: decimate by 4 (drop 3 pixels for each pixel fetched)
+ *		3: decimate by 8 (drop 7 pixels for each pixel fetched)
+ *		4: decimate by 16 (drop 15 pixels for each pixel fetched)
+ * @vert_deci:	Vertical decimation value, this indicates the amount of lines
+ *		dropped for each line that is fetched from overlay. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimation by 2 (drop 1 line for each line fetched)
+ *		2: decimation by 4 (drop 3 lines for each line fetched)
+ *		3: decimation by 8 (drop 7 lines for each line fetched)
+ *		4: decimation by 16 (drop 15 lines for each line fetched)
+ * @overlay_pp_cfg: Overlay post processing configuration, for more information
+ *		see struct mdp_overlay_pp_params.
+ * @priority:	Priority is returned by the driver when overlay is set for the
+ *		first time. It indicates the priority of the underlying pipe
+ *		serving the overlay. This priority can be used by user-space
+ *		in source split when pipes are re-used and shuffled around to
+ *		reduce fallbacks.
+ */
+struct mdp_overlay {
+	struct msmfb_img src;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	uint32_t z_order;	/* stage number */
+	uint32_t is_fg;		/* control alpha & transp */
+	uint32_t alpha;
+	uint32_t blend_op;
+	uint32_t transp_mask;
+	uint32_t flags;
+	uint32_t pipe_type;
+	uint32_t id;
+	uint8_t priority;
+	uint32_t user_data[6];
+	uint32_t bg_color;
+	uint8_t horz_deci;
+	uint8_t vert_deci;
+	struct mdp_overlay_pp_params overlay_pp_cfg;
+	struct mdp_scale_data scale;
+	uint8_t color_space;
+	uint32_t frame_rate;
+};
+
+struct msmfb_overlay_3d {
+	uint32_t is_3d;
+	uint32_t width;
+	uint32_t height;
+};
+
+
+struct msmfb_overlay_blt {
+	uint32_t enable;
+	uint32_t offset;
+	uint32_t width;
+	uint32_t height;
+	uint32_t bpp;
+};
+
+struct mdp_histogram {
+	uint32_t frame_cnt;
+	uint32_t bin_cnt;
+	uint32_t *r;
+	uint32_t *g;
+	uint32_t *b;
+};
+
+#define MISR_CRC_BATCH_SIZE 32
+enum {
+	DISPLAY_MISR_EDP,
+	DISPLAY_MISR_DSI0,
+	DISPLAY_MISR_DSI1,
+	DISPLAY_MISR_HDMI,
+	DISPLAY_MISR_LCDC,
+	DISPLAY_MISR_MDP,
+	DISPLAY_MISR_ATV,
+	DISPLAY_MISR_DSI_CMD,
+	DISPLAY_MISR_MAX
+};
+
+enum {
+	MISR_OP_NONE,
+	MISR_OP_SFM,
+	MISR_OP_MFM,
+	MISR_OP_BM,
+	MISR_OP_MAX
+};
+
+struct mdp_misr {
+	uint32_t block_id;
+	uint32_t frame_count;
+	uint32_t crc_op_mode;
+	uint32_t crc_value[MISR_CRC_BATCH_SIZE];
+};
+
+/*
+ * mdp_block_type defines the identifiers for pipes in MDP 4.3 and up
+ *
+ * MDP_BLOCK_RESERVED is provided for backward compatibility and is
+ * deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used
+ * instead.
+ *
+ * MDP_LOGICAL_BLOCK_DISP_0 identifies the display pipe which fb0 uses,
+ * same for others.
+ */
+
+enum {
+	MDP_BLOCK_RESERVED = 0,
+	MDP_BLOCK_OVERLAY_0,
+	MDP_BLOCK_OVERLAY_1,
+	MDP_BLOCK_VG_1,
+	MDP_BLOCK_VG_2,
+	MDP_BLOCK_RGB_1,
+	MDP_BLOCK_RGB_2,
+	MDP_BLOCK_DMA_P,
+	MDP_BLOCK_DMA_S,
+	MDP_BLOCK_DMA_E,
+	MDP_BLOCK_OVERLAY_2,
+	MDP_LOGICAL_BLOCK_DISP_0 = 0x10,
+	MDP_LOGICAL_BLOCK_DISP_1,
+	MDP_LOGICAL_BLOCK_DISP_2,
+	MDP_BLOCK_MAX,
+};
+
+/*
+ * mdp_histogram_start_req is used to provide the parameters for
+ * histogram start request
+ */
+
+struct mdp_histogram_start_req {
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+/*
+ * mdp_histogram_data is used to return the histogram data, once
+ * the histogram is done/stopped/cance
+ */
+
+struct mdp_histogram_data {
+	uint32_t block;
+	uint32_t bin_cnt;
+	uint32_t *c0;
+	uint32_t *c1;
+	uint32_t *c2;
+	uint32_t *extra_info;
+};
+
+
+#define GC_LUT_ENTRIES_V1_7	512
+
+struct mdp_ar_gc_lut_data {
+	uint32_t x_start;
+	uint32_t slope;
+	uint32_t offset;
+};
+
+#define MDP_PP_PGC_ROUNDING_ENABLE 0x10
+struct mdp_pgc_lut_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint8_t num_r_stages;
+	uint8_t num_g_stages;
+	uint8_t num_b_stages;
+	struct mdp_ar_gc_lut_data *r_data;
+	struct mdp_ar_gc_lut_data *g_data;
+	struct mdp_ar_gc_lut_data *b_data;
+	void *cfg_payload;
+};
+
+#define PGC_LUT_ENTRIES 1024
+struct mdp_pgc_lut_data_v1_7 {
+	uint32_t  len;
+	uint32_t  *c0_data;
+	uint32_t  *c1_data;
+	uint32_t  *c2_data;
+};
+
+/*
+ * mdp_rgb_lut_data is used to provide parameters for configuring the
+ * generic RGB lut in case of gamma correction or other LUT updation usecases
+ */
+struct mdp_rgb_lut_data {
+	uint32_t flags;
+	uint32_t lut_type;
+	struct fb_cmap cmap;
+};
+
+enum {
+	mdp_rgb_lut_gc,
+	mdp_rgb_lut_hist,
+};
+
+struct mdp_lut_cfg_data {
+	uint32_t lut_type;
+	union {
+		struct mdp_igc_lut_data igc_lut_data;
+		struct mdp_pgc_lut_data pgc_lut_data;
+		struct mdp_hist_lut_data hist_lut_data;
+		struct mdp_rgb_lut_data rgb_lut_data;
+	} data;
+};
+
+struct mdp_bl_scale_data {
+	uint32_t min_lvl;
+	uint32_t scale;
+};
+
+struct mdp_pa_cfg_data {
+	uint32_t block;
+	struct mdp_pa_cfg pa_data;
+};
+
+#define MDP_DITHER_DATA_V1_7_SZ 16
+
+struct mdp_dither_data_v1_7 {
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	uint32_t len;
+	uint32_t data[MDP_DITHER_DATA_V1_7_SZ];
+	uint32_t temporal_en;
+};
+
+struct mdp_pa_dither_data {
+	uint64_t data_flags;
+	uint32_t matrix_sz;
+	uint64_t __user matrix_data;
+	uint32_t strength;
+	uint32_t offset_en;
+};
+
+struct mdp_dither_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint32_t mode;
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	void *cfg_payload;
+};
+
+#define MDP_GAMUT_TABLE_NUM		8
+#define MDP_GAMUT_TABLE_NUM_V1_7	4
+#define MDP_GAMUT_SCALE_OFF_TABLE_NUM	3
+#define MDP_GAMUT_TABLE_V1_7_SZ 1229
+#define MDP_GAMUT_SCALE_OFF_SZ 16
+#define MDP_GAMUT_TABLE_V1_7_COARSE_SZ 32
+
+struct mdp_gamut_cfg_data {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t version;
+	/* v1 version specific params */
+	uint32_t gamut_first;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+	uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+	/* params for newer versions of gamut */
+	void *cfg_payload;
+};
+
+enum {
+	mdp_gamut_fine_mode = 0x1,
+	mdp_gamut_coarse_mode,
+};
+
+struct mdp_gamut_data_v1_7 {
+	uint32_t mode;
+	uint32_t map_en;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c0_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t  tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+	uint32_t  *scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+};
+
+struct mdp_calib_config_data {
+	uint32_t ops;
+	uint32_t addr;
+	uint32_t data;
+};
+
+struct mdp_calib_config_buffer {
+	uint32_t ops;
+	uint32_t size;
+	uint32_t *buffer;
+};
+
+struct mdp_calib_dcm_state {
+	uint32_t ops;
+	uint32_t dcm_state;
+};
+
+enum {
+	DCM_UNINIT,
+	DCM_UNBLANK,
+	DCM_ENTER,
+	DCM_EXIT,
+	DCM_BLANK,
+	DTM_ENTER,
+	DTM_EXIT,
+};
+
+#define MDSS_PP_SPLIT_LEFT_ONLY		0x10000000
+#define MDSS_PP_SPLIT_RIGHT_ONLY	0x20000000
+#define MDSS_PP_SPLIT_MASK		0x30000000
+
+#define MDSS_MAX_BL_BRIGHTNESS 255
+#define AD_BL_LIN_LEN 256
+#define AD_BL_ATT_LUT_LEN 33
+
+#define MDSS_AD_MODE_AUTO_BL	0x0
+#define MDSS_AD_MODE_AUTO_STR	0x1
+#define MDSS_AD_MODE_TARG_STR	0x3
+#define MDSS_AD_MODE_MAN_STR	0x7
+#define MDSS_AD_MODE_CALIB	0xF
+
+#define MDP_PP_AD_INIT	0x10
+#define MDP_PP_AD_CFG	0x20
+
+struct mdss_ad_init {
+	uint32_t asym_lut[33];
+	uint32_t color_corr_lut[33];
+	uint8_t i_control[2];
+	uint16_t black_lvl;
+	uint16_t white_lvl;
+	uint8_t var;
+	uint8_t limit_ampl;
+	uint8_t i_dither;
+	uint8_t slope_max;
+	uint8_t slope_min;
+	uint8_t dither_ctl;
+	uint8_t format;
+	uint8_t auto_size;
+	uint16_t frame_w;
+	uint16_t frame_h;
+	uint8_t logo_v;
+	uint8_t logo_h;
+	uint32_t alpha;
+	uint32_t alpha_base;
+	uint32_t al_thresh;
+	uint32_t bl_lin_len;
+	uint32_t bl_att_len;
+	uint32_t *bl_lin;
+	uint32_t *bl_lin_inv;
+	uint32_t *bl_att_lut;
+};
+
+#define MDSS_AD_BL_CTRL_MODE_EN 1
+#define MDSS_AD_BL_CTRL_MODE_DIS 0
+struct mdss_ad_cfg {
+	uint32_t mode;
+	uint32_t al_calib_lut[33];
+	uint16_t backlight_min;
+	uint16_t backlight_max;
+	uint16_t backlight_scale;
+	uint16_t amb_light_min;
+	uint16_t filter[2];
+	uint16_t calib[4];
+	uint8_t strength_limit;
+	uint8_t t_filter_recursion;
+	uint16_t stab_itr;
+	uint32_t bl_ctrl_mode;
+};
+
+struct mdss_ad_bl_cfg {
+	uint32_t bl_min_delta;
+	uint32_t bl_low_limit;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg {
+	uint32_t ops;
+	union {
+		struct mdss_ad_init init;
+		struct mdss_ad_cfg cfg;
+	} params;
+};
+
+/* mode uses MDSS_AD_MODE_* flags */
+struct mdss_ad_input {
+	uint32_t mode;
+	union {
+		uint32_t amb_light;
+		uint32_t strength;
+		uint32_t calib_bl;
+	} in;
+	uint32_t output;
+};
+
+#define MDSS_CALIB_MODE_BL	0x1
+struct mdss_calib_cfg {
+	uint32_t ops;
+	uint32_t calib_mask;
+};
+
+enum {
+	mdp_op_pcc_cfg,
+	mdp_op_csc_cfg,
+	mdp_op_lut_cfg,
+	mdp_op_qseed_cfg,
+	mdp_bl_scale_cfg,
+	mdp_op_pa_cfg,
+	mdp_op_pa_v2_cfg,
+	mdp_op_dither_cfg,
+	mdp_op_gamut_cfg,
+	mdp_op_calib_cfg,
+	mdp_op_ad_cfg,
+	mdp_op_ad_input,
+	mdp_op_calib_mode,
+	mdp_op_calib_buffer,
+	mdp_op_calib_dcm_state,
+	mdp_op_max,
+	mdp_op_pa_dither_cfg,
+	mdp_op_ad_bl_cfg,
+	mdp_op_pp_max = 255,
+};
+#define mdp_op_pa_dither_cfg mdp_op_pa_dither_cfg
+#define mdp_op_pp_max mdp_op_pp_max
+
+#define mdp_op_ad_bl_cfg mdp_op_ad_bl_cfg
+
+enum {
+	WB_FORMAT_NV12,
+	WB_FORMAT_RGB_565,
+	WB_FORMAT_RGB_888,
+	WB_FORMAT_xRGB_8888,
+	WB_FORMAT_ARGB_8888,
+	WB_FORMAT_BGRA_8888,
+	WB_FORMAT_BGRX_8888,
+	WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
+struct msmfb_mdp_pp {
+	uint32_t op;
+	union {
+		struct mdp_pcc_cfg_data pcc_cfg_data;
+		struct mdp_csc_cfg_data csc_cfg_data;
+		struct mdp_lut_cfg_data lut_cfg_data;
+		struct mdp_qseed_cfg_data qseed_cfg_data;
+		struct mdp_bl_scale_data bl_scale_data;
+		struct mdp_pa_cfg_data pa_cfg_data;
+		struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+		struct mdp_dither_cfg_data dither_cfg_data;
+		struct mdp_gamut_cfg_data gamut_cfg_data;
+		struct mdp_calib_config_data calib_cfg;
+		struct mdss_ad_init_cfg ad_init_cfg;
+		struct mdss_calib_cfg mdss_calib_cfg;
+		struct mdss_ad_input ad_input;
+		struct mdp_calib_config_buffer calib_buffer;
+		struct mdp_calib_dcm_state calib_dcm;
+		struct mdss_ad_bl_cfg ad_bl_cfg;
+	} data;
+};
+
+#define FB_METADATA_VIDEO_INFO_CODE_SUPPORT 1
+enum {
+	metadata_op_none,
+	metadata_op_base_blend,
+	metadata_op_frame_rate,
+	metadata_op_vic,
+	metadata_op_wb_format,
+	metadata_op_wb_secure,
+	metadata_op_get_caps,
+	metadata_op_crc,
+	metadata_op_get_ion_fd,
+	metadata_op_max
+};
+
+struct mdp_blend_cfg {
+	uint32_t is_premultiplied;
+};
+
+struct mdp_mixer_cfg {
+	uint32_t writeback_format;
+	uint32_t alpha;
+};
+
+struct mdss_hw_caps {
+	uint32_t mdp_rev;
+	uint8_t rgb_pipes;
+	uint8_t vig_pipes;
+	uint8_t dma_pipes;
+	uint8_t max_smp_cnt;
+	uint8_t smp_per_pipe;
+	uint32_t features;
+};
+
+struct msmfb_metadata {
+	uint32_t op;
+	uint32_t flags;
+	union {
+		struct mdp_misr misr_request;
+		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
+		uint32_t panel_frame_rate;
+		uint32_t video_info_code;
+		struct mdss_hw_caps caps;
+		uint8_t secure_en;
+		int fbmem_ionfd;
+	} data;
+};
+
+#define MDP_MAX_FENCE_FD	32
+#define MDP_BUF_SYNC_FLAG_WAIT	1
+#define MDP_BUF_SYNC_FLAG_RETIRE_FENCE	0x10
+
+struct mdp_buf_sync {
+	uint32_t flags;
+	uint32_t acq_fen_fd_cnt;
+	uint32_t session_id;
+	int *acq_fen_fd;
+	int *rel_fen_fd;
+	int *retire_fen_fd;
+};
+
+struct mdp_async_blit_req_list {
+	struct mdp_buf_sync sync;
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MDP_DISPLAY_COMMIT_OVERLAY	1
+
+struct mdp_display_commit {
+	uint32_t flags;
+	uint32_t wait_for_finish;
+	struct fb_var_screeninfo var;
+	/*
+	 * user needs to follow guidelines as per below rules
+	 * 1. source split is enabled: l_roi = roi and r_roi = 0
+	 * 2. source split is disabled:
+	 *	2.1 split display: l_roi = l_roi and r_roi = r_roi
+	 *	2.2 non split display: l_roi = roi and r_roi = 0
+	 */
+	struct mdp_rect l_roi;
+	struct mdp_rect r_roi;
+};
+
+/**
+ * struct mdp_overlay_list - argument for ioctl MSMFB_OVERLAY_PREPARE
+ * @num_overlays:	Number of overlay layers as part of the frame.
+ * @overlay_list:	Pointer to a list of overlay structures identifying
+ *			the layers as part of the frame
+ * @flags:		Flags can be used to extend behavior.
+ * @processed_overlays:	Output parameter indicating how many pipes were
+ *			successful. If there are no errors this number should
+ *			match num_overlays. Otherwise it will indicate the last
+ *			successful index for overlay that couldn't be set.
+ */
+struct mdp_overlay_list {
+	uint32_t num_overlays;
+	struct mdp_overlay **overlay_list;
+	uint32_t flags;
+	uint32_t processed_overlays;
+};
+
+struct mdp_page_protection {
+	uint32_t page_protection;
+};
+
+
+struct mdp_mixer_info {
+	int pndx;
+	int pnum;
+	int ptype;
+	int mixer_num;
+	int z_order;
+};
+
+#define MAX_PIPE_PER_MIXER  7
+
+struct msmfb_mixer_info_req {
+	int mixer_num;
+	int cnt;
+	struct mdp_mixer_info info[MAX_PIPE_PER_MIXER];
+};
+
+enum {
+	DISPLAY_SUBSYSTEM_ID,
+	ROTATOR_SUBSYSTEM_ID,
+};
+
+enum {
+	MDP_IOMMU_DOMAIN_CP,
+	MDP_IOMMU_DOMAIN_NS,
+};
+
+enum {
+	MDP_WRITEBACK_MIRROR_OFF,
+	MDP_WRITEBACK_MIRROR_ON,
+	MDP_WRITEBACK_MIRROR_PAUSE,
+	MDP_WRITEBACK_MIRROR_RESUME,
+};
+
+enum mdp_color_space {
+	MDP_CSC_ITU_R_601,
+	MDP_CSC_ITU_R_601_FR,
+	MDP_CSC_ITU_R_709,
+};
+
+enum {
+	mdp_igc_v1_7 = 1,
+	mdp_igc_vmax,
+	mdp_hist_lut_v1_7,
+	mdp_hist_lut_vmax,
+	mdp_pgc_v1_7,
+	mdp_pgc_vmax,
+	mdp_dither_v1_7,
+	mdp_dither_vmax,
+	mdp_gamut_v1_7,
+	mdp_gamut_vmax,
+	mdp_pa_v1_7,
+	mdp_pa_vmax,
+	mdp_pcc_v1_7,
+	mdp_pcc_vmax,
+	mdp_pp_legacy,
+	mdp_dither_pa_v1_7,
+	mdp_igc_v3,
+	mdp_pp_unknown = 255
+};
+
+#define mdp_dither_pa_v1_7 mdp_dither_pa_v1_7
+#define mdp_pp_unknown mdp_pp_unknown
+#define mdp_igc_v3 mdp_igc_v3
+
+/* PP Features */
+enum {
+	IGC = 1,
+	PCC,
+	GC,
+	PA,
+	GAMUT,
+	DITHER,
+	QSEED,
+	HIST_LUT,
+	HIST,
+	PP_FEATURE_MAX,
+	PA_DITHER,
+	PP_MAX_FEATURES = 25,
+};
+
+#define PA_DITHER PA_DITHER
+#define PP_MAX_FEATURES PP_MAX_FEATURES
+
+struct mdp_pp_feature_version {
+	uint32_t pp_feature;
+	uint32_t version_info;
+};
+#endif /*_UAPI_MSM_MDP_H_*/
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
new file mode 100644
index 0000000..05a105b
--- /dev/null
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -0,0 +1,688 @@
+#ifndef _MSM_MDP_EXT_H_
+#define _MSM_MDP_EXT_H_
+
+#include <linux/msm_mdp.h>
+
+#define MDP_IOCTL_MAGIC 'S'
+/* atomic commit ioctl used for validate and commit request */
+#define MSMFB_ATOMIC_COMMIT	_IOWR(MDP_IOCTL_MAGIC, 128, void *)
+
+/*
+ * Ioctl for updating the layer position asynchronously. Initially, pipes
+ * should be configured with MDP_LAYER_ASYNC flag set during the atomic commit,
+ * after which any number of position update calls can be made. This would
+ * enable multiple position updates within a single vsync. However, the screen
+ * update would happen only after vsync, which would pick the latest update.
+ *
+ * Limitations:
+ * - Currently supported only for video mode panels with single LM or dual LM
+ *   with source_split enabled.
+ * - Only position update is supported with no scaling/cropping.
+ * - Async layers should have unique z_order.
+ */
+#define MSMFB_ASYNC_POSITION_UPDATE _IOWR(MDP_IOCTL_MAGIC, 129, \
+					struct mdp_position_update)
+
+/*
+ * Ioctl for sending the config information.
+ * QSEED3 coefficeint LUT tables is passed by the user space using this IOCTL.
+ */
+#define MSMFB_MDP_SET_CFG _IOW(MDP_IOCTL_MAGIC, 130, \
+					      struct mdp_set_cfg)
+
+/*
+ * To allow proper structure padding for 64bit/32bit target
+ */
+#ifdef __LP64
+#define MDP_LAYER_COMMIT_V1_PAD 3
+#else
+#define MDP_LAYER_COMMIT_V1_PAD 4
+#endif
+
+/*
+ * LAYER FLAG CONFIGURATION
+ */
+/* left-right layer flip flag */
+#define MDP_LAYER_FLIP_LR		0x1
+
+/* up-down layer flip flag */
+#define MDP_LAYER_FLIP_UD		0x2
+
+/*
+ * This flag enables pixel extension for the current layer. Validate/commit
+ * call uses scale parameters when this flag is enabled.
+ */
+#define MDP_LAYER_ENABLE_PIXEL_EXT	0x4
+
+/* Flag indicates that layer is foreground layer */
+#define MDP_LAYER_FORGROUND		0x8
+
+/* Flag indicates that layer is associated with secure session */
+#define MDP_LAYER_SECURE_SESSION	0x10
+
+/*
+ * Flag indicates that layer is drawing solid fill. Validate/commit call
+ * does not expect buffer when this flag is enabled.
+ */
+#define MDP_LAYER_SOLID_FILL		0x20
+
+/* Layer format is deinterlace */
+#define MDP_LAYER_DEINTERLACE		0x40
+
+/* layer contains bandwidth compressed format data */
+#define MDP_LAYER_BWC			0x80
+
+/* layer is async position updatable */
+#define MDP_LAYER_ASYNC			0x100
+
+/* layer contains postprocessing configuration data */
+#define MDP_LAYER_PP			0x200
+
+/* Flag indicates that layer is associated with secure display session */
+#define MDP_LAYER_SECURE_DISPLAY_SESSION 0x400
+
+/* Flag enabled qseed3 scaling for the current layer */
+#define MDP_LAYER_ENABLE_QSEED3_SCALE   0x800
+
+/*
+ * layer will work in multirect mode, where single hardware should
+ * fetch multiple rectangles with a single hardware
+ */
+#define MDP_LAYER_MULTIRECT_ENABLE		0x1000
+
+/*
+ * if flag present and multirect is enabled, multirect will work in parallel
+ * fetch mode, otherwise it will default to serial fetch mode.
+ */
+#define MDP_LAYER_MULTIRECT_PARALLEL_MODE	0x2000
+
+/*
+ * DESTINATION SCALER FLAG CONFIGURATION
+ */
+
+/* Enable/disable Destination scaler */
+#define MDP_DESTSCALER_ENABLE		0x1
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Scaling parameter update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_SCALE_UPDATE	0x2
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Detail enhancement setting update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_ENHANCER_UPDATE	0x4
+
+/*
+ * VALIDATE/COMMIT FLAG CONFIGURATION
+ */
+
+/*
+ * Client enables it to inform that call is to validate layers before commit.
+ * If this flag is not set then driver will use MSMFB_ATOMIC_COMMIT for commit.
+ */
+#define MDP_VALIDATE_LAYER			0x01
+
+/*
+ * This flag is only valid for commit call. Commit behavior is synchronous
+ * when this flag is defined. It blocks current call till processing is
+ * complete. Behavior is asynchronous otherwise.
+ */
+#define MDP_COMMIT_WAIT_FOR_FINISH		0x02
+
+/*
+ * This flag is only valid for commit call and used for debugging purpose. It
+ * forces the to wait for sync fences.
+ */
+#define MDP_COMMIT_SYNC_FENCE_WAIT		0x04
+
+/* Flag to enable AVR(Adaptive variable refresh) feature. */
+#define MDP_COMMIT_AVR_EN			0x08
+
+/*
+ * Flag to select one shot mode when AVR feature is enabled.
+ * Default mode is continuous mode.
+ */
+#define MDP_COMMIT_AVR_ONE_SHOT_MODE		0x10
+
+/* Flag to enable concurrent writeback for the frame */
+#define MDP_COMMIT_CWB_EN 0x800
+
+/*
+ * Flag to select DSPP as the data point for CWB. If CWB
+ * is enabled without this flag, LM will be selected as data point.
+ */
+#define MDP_COMMIT_CWB_DSPP 0x1000
+
+#define MDP_COMMIT_VERSION_1_0		0x00010000
+
+/*
+ * Configuration structures
+ * All parameters are input to driver unless mentioned output parameter
+ * explicitly.
+ */
+struct mdp_layer_plane {
+	/* DMA buffer file descriptor information. */
+	int fd;
+
+	/* Pixel offset in the dma buffer. */
+	uint32_t offset;
+
+	/* Number of bytes in one scan line including padding bytes. */
+	uint32_t stride;
+};
+
+struct mdp_layer_buffer {
+	/* layer width in pixels. */
+	uint32_t width;
+
+	/* layer height in pixels. */
+	uint32_t height;
+
+	/*
+	 * layer format in DRM-style fourcc, refer drm_fourcc.h for
+	 * standard formats
+	 */
+	uint32_t format;
+
+	/* plane to hold the fd, offset, etc for all color components */
+	struct mdp_layer_plane planes[MAX_PLANES];
+
+	/* valid planes count in layer planes list */
+	uint32_t plane_count;
+
+	/* compression ratio factor, value depends on the pixel format */
+	struct mult_factor comp_ratio;
+
+	/*
+	 * SyncFence associated with this buffer. It is used in two ways.
+	 *
+	 * 1. Driver waits to consume the buffer till producer signals in case
+	 * of primary and external display.
+	 *
+	 * 2. Writeback device uses buffer structure for output buffer where
+	 * driver is producer. However, client sends the fence with buffer to
+	 * indicate that consumer is still using the buffer and it is not ready
+	 * for new content.
+	 */
+	int	 fence;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t reserved;
+};
+
+/*
+ * One layer holds configuration for one pipe. If client wants to stage single
+ * layer on two pipes then it should send two different layers with relative
+ * (x,y) information. Client must send same information during validate and
+ * commit call. Commit call may fail if client sends different layer information
+ * attached to same pipe during validate and commit. Device invalidate the pipe
+ * once it receives the vsync for that commit.
+ */
+struct mdp_input_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag configuration section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the index
+	 * in validate and commit call. Device reserves the pipe once validate
+	 * is successful. Device only uses validated pipe during commit call.
+	 * If client sends different layer/pipe configuration in validate &
+	 * commit then commit may fail.
+	 */
+	uint32_t		pipe_ndx;
+
+	/*
+	 * Horizontal decimation value, this indicates the amount of pixels
+	 * dropped for each pixel that is fetched from a line. It does not
+	 * result in bandwidth reduction because pixels are still fetched from
+	 * memory but dropped internally by hardware.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimate by 2 (drop 1 pixel for each pixel fetched)
+	 * 2: decimate by 4 (drop 3 pixels for each pixel fetched)
+	 * 3: decimate by 8 (drop 7 pixels for each pixel fetched)
+	 * 4: decimate by 16 (drop 15 pixels for each pixel fetched)
+	 */
+	uint8_t			horz_deci;
+
+	/*
+	 * Vertical decimation value, this indicates the amount of lines
+	 * dropped for each line that is fetched from overlay. It saves
+	 * bandwidth because decimated pixels are not fetched.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimation by 2 (drop 1 line for each line fetched)
+	 * 2: decimation by 4 (drop 3 lines for each line fetched)
+	 * 3: decimation by 8 (drop 7 lines for each line fetched)
+	 * 4: decimation by 16 (drop 15 lines for each line fetched)
+	 */
+	uint8_t			vert_deci;
+
+	/*
+	 * Used to set plane opacity. The range can be from 0-255, where
+	 * 0 means completely transparent and 255 means fully opaque.
+	 */
+	uint8_t			alpha;
+
+	/*
+	 * Blending stage to occupy in display, if multiple layers are present,
+	 * highest z_order usually means the top most visible layer. The range
+	 * acceptable is from 0-7 to support blending up to 8 layers.
+	 */
+	uint16_t		z_order;
+
+	/*
+	 * Color used as color key for transparency. Any pixel in fetched
+	 * image matching this color will be transparent when blending.
+	 * The color should be in same format as the source image format.
+	 */
+	uint32_t		transp_mask;
+
+	/*
+	 * Solid color used to fill the overlay surface when no source
+	 * buffer is provided.
+	 */
+	uint32_t		bg_color;
+
+	/* blend operation defined in "mdss_mdp_blend_op" enum. */
+	enum mdss_mdp_blend_op		blend_op;
+
+	/* color space of the source */
+	enum mdp_color_space	color_space;
+
+	/*
+	 * Source crop rectangle, portion of image that will be fetched. This
+	 * should always be within boundaries of source image.
+	 */
+	struct mdp_rect		src_rect;
+
+	/*
+	 * Destination rectangle, the position and size of image on screen.
+	 * This should always be within panel boundaries.
+	 */
+	struct mdp_rect		dst_rect;
+
+	/* Scaling parameters. */
+	void __user	*scale;
+
+	/* Buffer attached with each layer. Device uses it for commit call. */
+	struct mdp_layer_buffer	buffer;
+
+	/*
+	 * Source side post processing configuration information for each
+	 * layer.
+	 */
+	void __user		*pp_info;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Only for validate call. Frame buffer device sets error code
+	 * based on validate call failure scenario.
+	 */
+	int			error_code;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t		reserved[6];
+};
+
+struct mdp_output_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Writeback destination selection for output. Client provides the index
+	 * in validate and commit call.
+	 */
+	uint32_t			writeback_ndx;
+
+	/* Buffer attached with output layer. Device uses it for commit call */
+	struct mdp_layer_buffer		buffer;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t			reserved[6];
+};
+
+/*
+ * Destination scaling info structure holds setup paramaters for upscaling
+ * setting in the destination scaling block.
+ */
+struct mdp_destination_scaler_data {
+	/*
+	 * Flag to switch between mode for destination scaler. Please Refer to
+	 * destination scaler flag config for all possible setting.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Destination scaler selection index. Client provides the index in
+	 * validate and commit call.
+	 */
+	uint32_t			dest_scaler_ndx;
+
+	/*
+	 * LM width configuration per Destination scaling updates
+	 */
+	uint32_t			lm_width;
+
+	/*
+	 * LM height configuration per Destination scaling updates
+	 */
+	uint32_t			lm_height;
+
+	/*
+	 * The scaling parameters for all the mode except disable. For
+	 * disabling the scaler, there is no need to provide the scale.
+	 * A userspace pointer points to struct mdp_scale_data_v2.
+	 */
+	uint64_t	__user scale;
+};
+
+/*
+ * Commit structure holds layer stack send by client for validate and commit
+ * call. If layers are different between validate and commit call then commit
+ * call will also do validation. In such case, commit may fail.
+ */
+struct mdp_layer_commit_v1 {
+	/*
+	 * Flag to enable/disable properties for commit/validate call. Refer
+	 * validate/commit flag config section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides release fence handle to client. It
+	 * triggers release fence when display hardware has consumed all the
+	 * buffers attached to this commit call and buffer is ready for reuse
+	 * for primary and external. For writeback case, it triggers it when
+	 * output buffer is ready for consumer.
+	 */
+	int			release_fence;
+
+	/*
+	 * Left_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * left part of panel when it is split display. For non-split display,
+	 * it defines the "region of interest" on the panel.
+	 */
+	struct mdp_rect		left_roi;
+
+	/*
+	 * Right_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * right part of panel for split display configuration. It is not
+	 * required for non-split display.
+	 */
+	struct mdp_rect		right_roi;
+
+	 /* Pointer to a list of input layers for composition. */
+	struct mdp_input_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t		input_layer_cnt;
+
+	/*
+	 * Output layer for writeback display. It supports only one
+	 * layer as output layer. This is not required for primary
+	 * and external displays
+	 */
+	struct mdp_output_layer __user *output_layer;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides retire fence handle if
+	 * COMMIT_RETIRE_FENCE flag is set in commit call. It triggers
+	 * retire fence when current layers are swapped with new layers
+	 * on display hardware. For video mode panel and writeback,
+	 * retire fence and release fences are triggered at the same
+	 * time while command mode panel triggers release fence first
+	 * (on pingpong done) and retire fence (on rdptr done)
+	 * after that.
+	 */
+	int			retire_fence;
+
+	/*
+	 * Scaler data and control for setting up destination scaler.
+	 * A userspace pointer that points to a list of
+	 * struct mdp_destination_scaler_data.
+	 */
+	void __user		*dest_scaler;
+
+	/*
+	 * Represents number of Destination scaler data provied by userspace.
+	 */
+	uint32_t		dest_scaler_cnt;
+
+	/* 32-bits reserved value for future usage. */
+	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
+};
+
+/*
+ * mdp_overlay_list - argument for ioctl MSMFB_ATOMIC_COMMIT
+ */
+struct mdp_layer_commit {
+	/*
+	 * 32bit version indicates the commit structure selection
+	 * from union. Lower 16bits indicates the minor version while
+	 * higher 16bits indicates the major version. It selects the
+	 * commit structure based on major version selection. Minor version
+	 * indicates that reserved fields are in use.
+	 *
+	 * Current supported version is 1.0 (Major:1 Minor:0)
+	 */
+	uint32_t version;
+	union {
+		/* Layer commit/validate definition for V1 */
+		struct mdp_layer_commit_v1 commit_v1;
+	};
+};
+
+struct mdp_point {
+	uint32_t x;
+	uint32_t y;
+};
+
+/*
+ * Async updatable layers. One layer holds configuration for one pipe.
+ */
+struct mdp_async_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the
+	 * pipe index that the device reserved during ATOMIC_COMMIT.
+	 */
+	uint32_t		pipe_ndx;
+
+	/* Source start x,y. */
+	struct mdp_point	src;
+
+	/* Destination start x,y. */
+	struct mdp_point	dst;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device sets error code based on the failure.
+	 */
+	int			error_code;
+
+	uint32_t		reserved[3];
+};
+
+/*
+ * mdp_position_update - argument for ioctl MSMFB_ASYNC_POSITION_UPDATE
+ */
+struct mdp_position_update {
+	 /* Pointer to a list of async updatable input layers */
+	struct mdp_async_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t input_layer_cnt;
+};
+
+#define MAX_DET_CURVES		3
+struct mdp_det_enhance_data {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[MAX_DET_CURVES];
+	int16_t adjust_b[MAX_DET_CURVES];
+	int16_t adjust_c[MAX_DET_CURVES];
+};
+
+/* Flags to enable Scaler and its sub components */
+#define ENABLE_SCALE			0x1
+#define ENABLE_DETAIL_ENHANCE		0x2
+#define ENABLE_DIRECTION_DETECTION	0x4
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
+/* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+
+/**
+ * struct mdp_scale_data_v2
+ * Driver uses this new Data structure for storing all scaling params
+ * This structure contains all pixel extension data and QSEED3 filter
+ * configuration and coefficient table indices
+ */
+struct mdp_scale_data_v2 {
+	uint32_t enable;
+
+	/* Init phase values */
+	int32_t init_phase_x[MAX_PLANES];
+	int32_t phase_step_x[MAX_PLANES];
+	int32_t init_phase_y[MAX_PLANES];
+	int32_t phase_step_y[MAX_PLANES];
+
+	/*
+	 * This should be set to toal horizontal pixels
+	 * left + right +  width
+	 */
+	uint32_t num_ext_pxls_left[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_right[MAX_PLANES];
+
+	/*
+	 * This should be set to vertical pixels
+	 * top + bottom + height
+	 */
+	uint32_t num_ext_pxls_top[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_btm[MAX_PLANES];
+
+	/* over fetch pixels */
+	int32_t left_ftch[MAX_PLANES];
+	int32_t left_rpt[MAX_PLANES];
+	int32_t right_ftch[MAX_PLANES];
+	int32_t right_rpt[MAX_PLANES];
+
+	/* Repeat pixels */
+	uint32_t top_rpt[MAX_PLANES];
+	uint32_t btm_rpt[MAX_PLANES];
+	uint32_t top_ftch[MAX_PLANES];
+	uint32_t btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+
+	/*
+	 * alpha plane can only be scaled using bilinear or pixel
+	 * repeat/drop, specify these for Y and UV planes only
+	 */
+	uint32_t preload_x[MAX_PLANES];
+	uint32_t preload_y[MAX_PLANES];
+	uint32_t src_width[MAX_PLANES];
+	uint32_t src_height[MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	struct mdp_det_enhance_data detail_enhance;
+
+	/* reserved value for future usage. */
+	uint64_t reserved[8];
+};
+
+/**
+ * struct mdp_scale_luts_info
+ * This struct pointer is received as payload in SET_CFG_IOCTL when the flags
+ * is set to MDP_QSEED3_LUT_CFG
+ * @dir_lut:      Direction detection coefficients table
+ * @cir_lut:      Circular coefficeints table
+ * @sep_lut:      Separable coefficeints table
+ * @dir_lut_size: Size of direction coefficients table
+ * @cir_lut_size: Size of circular coefficients table
+ * @sep_lut_size: Size of separable coefficients table
+ */
+struct mdp_scale_luts_info {
+	uint64_t __user dir_lut;
+	uint64_t __user cir_lut;
+	uint64_t __user sep_lut;
+	uint32_t dir_lut_size;
+	uint32_t cir_lut_size;
+	uint32_t sep_lut_size;
+};
+
+#define MDP_QSEED3_LUT_CFG 0x1
+
+struct mdp_set_cfg {
+	uint64_t flags;
+	uint32_t len;
+	uint64_t __user payload;
+};
+#endif
diff --git a/include/uapi/linux/msm_rotator.h b/include/uapi/linux/msm_rotator.h
new file mode 100644
index 0000000..e1a2ecb
--- /dev/null
+++ b/include/uapi/linux/msm_rotator.h
@@ -0,0 +1,60 @@
+#ifndef _UAPI__MSM_ROTATOR_H__
+#define _UAPI__MSM_ROTATOR_H__
+
+#include <linux/types.h>
+#include <linux/msm_mdp.h>
+
+#define MSM_ROTATOR_IOCTL_MAGIC 'R'
+
+#define MSM_ROTATOR_IOCTL_START   \
+		_IOWR(MSM_ROTATOR_IOCTL_MAGIC, 1, struct msm_rotator_img_info)
+#define MSM_ROTATOR_IOCTL_ROTATE   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 2, struct msm_rotator_data_info)
+#define MSM_ROTATOR_IOCTL_FINISH   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 3, int)
+
+#define ROTATOR_VERSION_01	0xA5B4C301
+
+enum rotator_clk_type {
+	ROTATOR_CORE_CLK,
+	ROTATOR_PCLK,
+	ROTATOR_IMEM_CLK
+};
+
+struct msm_rotator_img_info {
+	unsigned int session_id;
+	struct msmfb_img  src;
+	struct msmfb_img  dst;
+	struct mdp_rect src_rect;
+	unsigned int    dst_x;
+	unsigned int    dst_y;
+	unsigned char   rotations;
+	int enable;
+	unsigned int	downscale_ratio;
+	unsigned int secure;
+};
+
+struct msm_rotator_data_info {
+	int session_id;
+	struct msmfb_data src;
+	struct msmfb_data dst;
+	unsigned int version_key;
+	struct msmfb_data src_chroma;
+	struct msmfb_data dst_chroma;
+};
+
+struct msm_rot_clocks {
+	const char *clk_name;
+	enum rotator_clk_type clk_type;
+	unsigned int clk_rate;
+};
+
+struct msm_rotator_platform_data {
+	unsigned int number_of_clocks;
+	unsigned int hardware_version_number;
+	struct msm_rot_clocks *rotator_clks;
+	struct msm_bus_scale_pdata *bus_scale_table;
+	char rot_iommu_split_domain;
+};
+#endif
+
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index df7451d..4937c09 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -130,8 +130,11 @@
 struct tc_prio_qopt {
 	int	bands;			/* Number of bands */
 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
+	__u8	enable_flow;		/* Enable dequeue */
 };
 
+#define TCQ_PRIO_FLOW_CONTROL 1
+
 /* MULTIQ section */
 
 struct tc_multiq_qopt {
diff --git a/include/uapi/linux/rmnet_data.h b/include/uapi/linux/rmnet_data.h
index 7044df4..48c173e 100644
--- a/include/uapi/linux/rmnet_data.h
+++ b/include/uapi/linux/rmnet_data.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -206,8 +206,19 @@
 	 *       uint32_t MAP Flow Handle
 	 * Returns: status code
 	 */
-	RMNET_NETLINK_DEL_VND_TC_FLOW
+	RMNET_NETLINK_DEL_VND_TC_FLOW,
+
+	/*
+	 * RMNET_NETLINK_NEW_VND_WITH_NAME - Creates a new virtual network
+	 *                                   device node with the specified
+	 *                                   device name
+	 * Args: int32_t node number
+	 *       char[] vnd_name - Use as name
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND_WITH_NAME
 };
+#define RMNET_NETLINK_NEW_VND_WITH_NAME RMNET_NETLINK_NEW_VND_WITH_NAME
 
 enum rmnet_config_endpoint_modes_e {
 	/* Pass the frame up the stack with no modifications to skb->dev      */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index a242e72..fd379ec 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -70,7 +70,7 @@
  * Common stuff for both V4L1 and V4L2
  * Moved from videodev.h
  */
-#define VIDEO_MAX_FRAME               32
+#define VIDEO_MAX_FRAME               64
 #define VIDEO_MAX_PLANES               8
 
 /*
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index bf859ff7..d138beb 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -4,6 +4,7 @@
 header-y += cam_isp_vfe.h
 header-y += cam_isp_ife.h
 header-y += cam_sensor.h
+header-y += cam_sync.h
 header-y += msm_media_info.h
 header-y += msm_vidc.h
 header-y += msm_sde_rotator.h
diff --git a/include/uapi/media/cam_sync.h b/include/uapi/media/cam_sync.h
new file mode 100644
index 0000000..003c9ad
--- /dev/null
+++ b/include/uapi/media/cam_sync.h
@@ -0,0 +1,134 @@
+#ifndef __UAPI_CAM_SYNC_H__
+#define __UAPI_CAM_SYNC_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+
+#define CAM_SYNC_DEVICE_NAME                     "cam_sync_device"
+
+/* V4L event which user space will subscribe to */
+#define CAM_SYNC_V4L_EVENT                       (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Specific event ids to get notified in user space */
+#define CAM_SYNC_V4L_EVENT_ID_CB_TRIG            0
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define CAM_SYNC_USER_PAYLOAD_SIZE               2
+
+/* Device type for sync device needed for device discovery */
+#define CAM_SYNC_DEVICE_TYPE                     (MEDIA_ENT_F_OLD_BASE)
+
+#define CAM_SYNC_GET_PAYLOAD_PTR(ev, type)       \
+	(type *)((char *)ev.u.data + sizeof(struct cam_sync_ev_header))
+
+#define CAM_SYNC_GET_HEADER_PTR(ev)              \
+	((struct cam_sync_ev_header *)ev.u.data)
+
+#define CAM_SYNC_STATE_INVALID                   0
+#define CAM_SYNC_STATE_ACTIVE                    1
+#define CAM_SYNC_STATE_SIGNALED_SUCCESS          2
+#define CAM_SYNC_STATE_SIGNALED_ERROR            3
+
+/**
+ * struct cam_sync_ev_header - Event header for sync event notification
+ *
+ * @sync_obj: Sync object
+ * @status:   Status of the object
+ */
+struct cam_sync_ev_header {
+	int32_t sync_obj;
+	int32_t status;
+};
+
+/**
+ * struct cam_sync_info - Sync object creation information
+ *
+ * @name:       Optional string representation of the sync object
+ * @sync_obj:   Sync object returned after creation in kernel
+ */
+struct cam_sync_info {
+	char name[64];
+	int32_t sync_obj;
+};
+
+/**
+ * struct cam_sync_signal - Sync object signaling struct
+ *
+ * @sync_obj:   Sync object to be signaled
+ * @sync_state: State of the sync object to which it should be signaled
+ */
+struct cam_sync_signal {
+	int32_t sync_obj;
+	uint32_t sync_state;
+};
+
+/**
+ * struct cam_sync_merge - Merge information for sync objects
+ *
+ * @sync_objs:  Pointer to sync objects
+ * @num_objs:   Number of objects in the array
+ * @merged:     Merged sync object
+ */
+struct cam_sync_merge {
+	__u64 sync_objs;
+	uint32_t num_objs;
+	int32_t merged;
+};
+
+/**
+ * struct cam_sync_userpayload_info - Payload info from user space
+ *
+ * @sync_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct cam_sync_userpayload_info {
+	int32_t sync_obj;
+	uint32_t reserved;
+	__u64 payload[CAM_SYNC_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct cam_sync_wait - Sync object wait information
+ *
+ * @sync_obj:   Sync object to wait on
+ * @reserved:   Reserved
+ * @timeout_ms: Timeout in milliseconds
+ */
+struct cam_sync_wait {
+	int32_t sync_obj;
+	uint32_t reserved;
+	uint64_t timeout_ms;
+};
+
+/**
+ * struct cam_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id:         IOCTL command id
+ * @size:       Size of command payload
+ * @result:     Result of command execution
+ * @reserved:   Reserved
+ * @ioctl_ptr:  Pointer to user data
+ */
+struct cam_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__user __u64 ioctl_ptr;
+};
+
+#define CAM_PRIVATE_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_private_ioctl_arg)
+
+#define CAM_SYNC_CREATE                          0
+#define CAM_SYNC_DESTROY                         1
+#define CAM_SYNC_SIGNAL                          2
+#define CAM_SYNC_MERGE                           3
+#define CAM_SYNC_REGISTER_PAYLOAD                4
+#define CAM_SYNC_DEREGISTER_PAYLOAD              5
+#define CAM_SYNC_WAIT                            6
+
+#endif /* __UAPI_CAM_SYNC_H__ */
diff --git a/include/uapi/video/Kbuild b/include/uapi/video/Kbuild
index ac7203b..b98fa51 100644
--- a/include/uapi/video/Kbuild
+++ b/include/uapi/video/Kbuild
@@ -1,4 +1,6 @@
 # UAPI Header export list
 header-y += edid.h
+header-y += msm_hdmi_hdcp_mgr.h
+header-y += msm_hdmi_modes.h
 header-y += sisfb.h
 header-y += uvesafb.h
diff --git a/include/uapi/video/msm_hdmi_hdcp_mgr.h b/include/uapi/video/msm_hdmi_hdcp_mgr.h
new file mode 100644
index 0000000..85fa918
--- /dev/null
+++ b/include/uapi/video/msm_hdmi_hdcp_mgr.h
@@ -0,0 +1,54 @@
+#ifndef _UAPI__HDMI_HDCP_MGR_H
+#define _UAPI__MSM_HDMI_HDCP_MGR_H
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum {
+	MSG_ID_IDX,
+	RET_CODE_IDX,
+	HEADER_LEN,
+};
+
+enum RET_CODE {
+	HDCP_NOT_AUTHED,
+	HDCP_AUTHED,
+	HDCP_DISABLE,
+};
+
+enum MSG_ID { /* List of functions expected to be called after it */
+	DOWN_CHECK_TOPOLOGY,
+	UP_REQUEST_TOPOLOGY,
+	UP_SEND_TOPOLOGY,
+	DOWN_REQUEST_TOPOLOGY,
+	MSG_NUM,
+};
+
+enum SOURCE_ID {
+	HDCP_V1_TX,
+	HDCP_V1_RX,
+	HDCP_V2_RX,
+	HDCP_V2_TX,
+	SRC_NUM,
+};
+
+/*
+ * how to parse sysfs params buffer
+ * from hdcp_tx driver.
+ */
+
+struct HDCP_V2V1_MSG_TOPOLOGY {
+	/* indicates downstream's type */
+	uint32_t ds_type;
+	uint8_t bksv[5];
+	uint8_t dev_count;
+	uint8_t depth;
+	uint8_t ksv_list[5 * 127];
+	uint32_t max_cascade_exceeded;
+	uint32_t max_dev_exceeded;
+};
+
+#endif /* _UAPI__MSM_HDMI_HDCP_MGR_H */
diff --git a/include/uapi/video/msm_hdmi_modes.h b/include/uapi/video/msm_hdmi_modes.h
new file mode 100644
index 0000000..8a02997
--- /dev/null
+++ b/include/uapi/video/msm_hdmi_modes.h
@@ -0,0 +1,559 @@
+#ifndef _UAPI_MSM_HDMI_MODES_H__
+#define _UAPI_MSM_HDMI_MODES_H__
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define MSM_HDMI_RGB_888_24BPP_FORMAT       (1 << 0)
+#define MSM_HDMI_YUV_420_12BPP_FORMAT       (1 << 1)
+
+enum aspect_ratio {
+	HDMI_RES_AR_INVALID,
+	HDMI_RES_AR_4_3,
+	HDMI_RES_AR_5_4,
+	HDMI_RES_AR_16_9,
+	HDMI_RES_AR_16_10,
+	HDMI_RES_AR_64_27,
+	HDMI_RES_AR_256_135,
+	HDMI_RES_AR_MAX,
+};
+
+enum msm_hdmi_s3d_mode {
+	HDMI_S3D_NONE,
+	HDMI_S3D_SIDE_BY_SIDE,
+	HDMI_S3D_TOP_AND_BOTTOM,
+	HDMI_S3D_FRAME_PACKING,
+	HDMI_S3D_MAX,
+};
+
+struct msm_hdmi_mode_timing_info {
+	uint32_t	video_format;
+	uint32_t	active_h;
+	uint32_t	front_porch_h;
+	uint32_t	pulse_width_h;
+	uint32_t	back_porch_h;
+	uint32_t	active_low_h;
+	uint32_t	active_v;
+	uint32_t	front_porch_v;
+	uint32_t	pulse_width_v;
+	uint32_t	back_porch_v;
+	uint32_t	active_low_v;
+	/* Must divide by 1000 to get the actual frequency in MHZ */
+	uint32_t	pixel_freq;
+	/* Must divide by 1000 to get the actual frequency in HZ */
+	uint32_t	refresh_rate;
+	uint32_t	interlaced;
+	uint32_t	supported;
+	enum aspect_ratio ar;
+	/* Flags indicating support for specific pixel formats */
+	uint32_t        pixel_formats;
+};
+
+#define MSM_HDMI_INIT_RES_PAGE          1
+
+#define MSM_HDMI_MODES_CEA		(1 << 0)
+#define MSM_HDMI_MODES_XTND		(1 << 1)
+#define MSM_HDMI_MODES_DVI		(1 << 2)
+#define MSM_HDMI_MODES_ALL		(MSM_HDMI_MODES_CEA |\
+					 MSM_HDMI_MODES_XTND |\
+					 MSM_HDMI_MODES_DVI)
+
+/* all video formats defined by CEA 861D */
+#define HDMI_VFRMT_UNKNOWN		0
+#define HDMI_VFRMT_640x480p60_4_3	1
+#define HDMI_VFRMT_720x480p60_4_3	2
+#define HDMI_VFRMT_720x480p60_16_9	3
+#define HDMI_VFRMT_1280x720p60_16_9	4
+#define HDMI_VFRMT_1920x1080i60_16_9	5
+#define HDMI_VFRMT_720x480i60_4_3	6
+#define HDMI_VFRMT_1440x480i60_4_3	HDMI_VFRMT_720x480i60_4_3
+#define HDMI_VFRMT_720x480i60_16_9	7
+#define HDMI_VFRMT_1440x480i60_16_9	HDMI_VFRMT_720x480i60_16_9
+#define HDMI_VFRMT_720x240p60_4_3	8
+#define HDMI_VFRMT_1440x240p60_4_3	HDMI_VFRMT_720x240p60_4_3
+#define HDMI_VFRMT_720x240p60_16_9	9
+#define HDMI_VFRMT_1440x240p60_16_9	HDMI_VFRMT_720x240p60_16_9
+#define HDMI_VFRMT_2880x480i60_4_3	10
+#define HDMI_VFRMT_2880x480i60_16_9	11
+#define HDMI_VFRMT_2880x240p60_4_3	12
+#define HDMI_VFRMT_2880x240p60_16_9	13
+#define HDMI_VFRMT_1440x480p60_4_3	14
+#define HDMI_VFRMT_1440x480p60_16_9	15
+#define HDMI_VFRMT_1920x1080p60_16_9	16
+#define HDMI_VFRMT_720x576p50_4_3	17
+#define HDMI_VFRMT_720x576p50_16_9	18
+#define HDMI_VFRMT_1280x720p50_16_9	19
+#define HDMI_VFRMT_1920x1080i50_16_9	20
+#define HDMI_VFRMT_720x576i50_4_3	21
+#define HDMI_VFRMT_1440x576i50_4_3	HDMI_VFRMT_720x576i50_4_3
+#define HDMI_VFRMT_720x576i50_16_9	22
+#define HDMI_VFRMT_1440x576i50_16_9	HDMI_VFRMT_720x576i50_16_9
+#define HDMI_VFRMT_720x288p50_4_3	23
+#define HDMI_VFRMT_1440x288p50_4_3	HDMI_VFRMT_720x288p50_4_3
+#define HDMI_VFRMT_720x288p50_16_9	24
+#define HDMI_VFRMT_1440x288p50_16_9	HDMI_VFRMT_720x288p50_16_9
+#define HDMI_VFRMT_2880x576i50_4_3	25
+#define HDMI_VFRMT_2880x576i50_16_9	26
+#define HDMI_VFRMT_2880x288p50_4_3	27
+#define HDMI_VFRMT_2880x288p50_16_9	28
+#define HDMI_VFRMT_1440x576p50_4_3	29
+#define HDMI_VFRMT_1440x576p50_16_9	30
+#define HDMI_VFRMT_1920x1080p50_16_9	31
+#define HDMI_VFRMT_1920x1080p24_16_9	32
+#define HDMI_VFRMT_1920x1080p25_16_9	33
+#define HDMI_VFRMT_1920x1080p30_16_9	34
+#define HDMI_VFRMT_2880x480p60_4_3	35
+#define HDMI_VFRMT_2880x480p60_16_9	36
+#define HDMI_VFRMT_2880x576p50_4_3	37
+#define HDMI_VFRMT_2880x576p50_16_9	38
+#define HDMI_VFRMT_1920x1250i50_16_9	39
+#define HDMI_VFRMT_1920x1080i100_16_9	40
+#define HDMI_VFRMT_1280x720p100_16_9	41
+#define HDMI_VFRMT_720x576p100_4_3	42
+#define HDMI_VFRMT_720x576p100_16_9	43
+#define HDMI_VFRMT_720x576i100_4_3	44
+#define HDMI_VFRMT_1440x576i100_4_3	HDMI_VFRMT_720x576i100_4_3
+#define HDMI_VFRMT_720x576i100_16_9	45
+#define HDMI_VFRMT_1440x576i100_16_9	HDMI_VFRMT_720x576i100_16_9
+#define HDMI_VFRMT_1920x1080i120_16_9	46
+#define HDMI_VFRMT_1280x720p120_16_9	47
+#define HDMI_VFRMT_720x480p120_4_3	48
+#define HDMI_VFRMT_720x480p120_16_9	49
+#define HDMI_VFRMT_720x480i120_4_3	50
+#define HDMI_VFRMT_1440x480i120_4_3	HDMI_VFRMT_720x480i120_4_3
+#define HDMI_VFRMT_720x480i120_16_9	51
+#define HDMI_VFRMT_1440x480i120_16_9	HDMI_VFRMT_720x480i120_16_9
+#define HDMI_VFRMT_720x576p200_4_3	52
+#define HDMI_VFRMT_720x576p200_16_9	53
+#define HDMI_VFRMT_720x576i200_4_3	54
+#define HDMI_VFRMT_1440x576i200_4_3	HDMI_VFRMT_720x576i200_4_3
+#define HDMI_VFRMT_720x576i200_16_9	55
+#define HDMI_VFRMT_1440x576i200_16_9	HDMI_VFRMT_720x576i200_16_9
+#define HDMI_VFRMT_720x480p240_4_3	56
+#define HDMI_VFRMT_720x480p240_16_9	57
+#define HDMI_VFRMT_720x480i240_4_3	58
+#define HDMI_VFRMT_1440x480i240_4_3	HDMI_VFRMT_720x480i240_4_3
+#define HDMI_VFRMT_720x480i240_16_9	59
+#define HDMI_VFRMT_1440x480i240_16_9	HDMI_VFRMT_720x480i240_16_9
+#define HDMI_VFRMT_1280x720p24_16_9	60
+#define HDMI_VFRMT_1280x720p25_16_9	61
+#define HDMI_VFRMT_1280x720p30_16_9	62
+#define HDMI_VFRMT_1920x1080p120_16_9	63
+#define HDMI_VFRMT_1920x1080p100_16_9	64
+#define HDMI_VFRMT_1280x720p24_64_27    65
+#define HDMI_VFRMT_1280x720p25_64_27    66
+#define HDMI_VFRMT_1280x720p30_64_27    67
+#define HDMI_VFRMT_1280x720p50_64_27    68
+#define HDMI_VFRMT_1280x720p60_64_27    69
+#define HDMI_VFRMT_1280x720p100_64_27   70
+#define HDMI_VFRMT_1280x720p120_64_27   71
+#define HDMI_VFRMT_1920x1080p24_64_27   72
+#define HDMI_VFRMT_1920x1080p25_64_27   73
+#define HDMI_VFRMT_1920x1080p30_64_27   74
+#define HDMI_VFRMT_1920x1080p50_64_27   75
+#define HDMI_VFRMT_1920x1080p60_64_27   76
+#define HDMI_VFRMT_1920x1080p100_64_27  77
+#define HDMI_VFRMT_1920x1080p120_64_27  78
+#define HDMI_VFRMT_1680x720p24_64_27    79
+#define HDMI_VFRMT_1680x720p25_64_27    80
+#define HDMI_VFRMT_1680x720p30_64_27    81
+#define HDMI_VFRMT_1680x720p50_64_27    82
+#define HDMI_VFRMT_1680x720p60_64_27    83
+#define HDMI_VFRMT_1680x720p100_64_27   84
+#define HDMI_VFRMT_1680x720p120_64_27   85
+#define HDMI_VFRMT_2560x1080p24_64_27   86
+#define HDMI_VFRMT_2560x1080p25_64_27   87
+#define HDMI_VFRMT_2560x1080p30_64_27   88
+#define HDMI_VFRMT_2560x1080p50_64_27   89
+#define HDMI_VFRMT_2560x1080p60_64_27   90
+#define HDMI_VFRMT_2560x1080p100_64_27  91
+#define HDMI_VFRMT_2560x1080p120_64_27  92
+#define HDMI_VFRMT_3840x2160p24_16_9    93
+#define HDMI_VFRMT_3840x2160p25_16_9    94
+#define HDMI_VFRMT_3840x2160p30_16_9    95
+#define HDMI_VFRMT_3840x2160p50_16_9    96
+#define HDMI_VFRMT_3840x2160p60_16_9    97
+#define HDMI_VFRMT_4096x2160p24_256_135 98
+#define HDMI_VFRMT_4096x2160p25_256_135 99
+#define HDMI_VFRMT_4096x2160p30_256_135 100
+#define HDMI_VFRMT_4096x2160p50_256_135 101
+#define HDMI_VFRMT_4096x2160p60_256_135 102
+#define HDMI_VFRMT_3840x2160p24_64_27   103
+#define HDMI_VFRMT_3840x2160p25_64_27   104
+#define HDMI_VFRMT_3840x2160p30_64_27   105
+#define HDMI_VFRMT_3840x2160p50_64_27   106
+#define HDMI_VFRMT_3840x2160p60_64_27   107
+
+/* Video Identification Codes from 107-127 are reserved for the future */
+#define HDMI_VFRMT_END			127
+
+#define EVFRMT_OFF(x)			(HDMI_VFRMT_END + x)
+
+/* extended video formats */
+#define HDMI_EVFRMT_3840x2160p30_16_9	EVFRMT_OFF(1)
+#define HDMI_EVFRMT_3840x2160p25_16_9	EVFRMT_OFF(2)
+#define HDMI_EVFRMT_3840x2160p24_16_9	EVFRMT_OFF(3)
+#define HDMI_EVFRMT_4096x2160p24_16_9	EVFRMT_OFF(4)
+#define HDMI_EVFRMT_END			HDMI_EVFRMT_4096x2160p24_16_9
+
+#define WQXGA_OFF(x)			(HDMI_EVFRMT_END + x)
+
+/* WQXGA */
+#define HDMI_VFRMT_2560x1600p60_16_9	WQXGA_OFF(1)
+#define HDMI_WQXGAFRMT_END		HDMI_VFRMT_2560x1600p60_16_9
+
+#define WXGA_OFF(x)			(HDMI_WQXGAFRMT_END + x)
+
+/* WXGA */
+#define HDMI_VFRMT_1280x800p60_16_10	WXGA_OFF(1)
+#define HDMI_VFRMT_1366x768p60_16_10	WXGA_OFF(2)
+#define HDMI_WXGAFRMT_END		HDMI_VFRMT_1366x768p60_16_10
+
+#define ETI_OFF(x)			(HDMI_WXGAFRMT_END + x)
+
+/* ESTABLISHED TIMINGS I */
+#define HDMI_VFRMT_800x600p60_4_3	ETI_OFF(1)
+#define ETI_VFRMT_END			HDMI_VFRMT_800x600p60_4_3
+
+#define ETII_OFF(x)			(ETI_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS II */
+#define HDMI_VFRMT_1024x768p60_4_3	ETII_OFF(1)
+#define HDMI_VFRMT_1280x1024p60_5_4	ETII_OFF(2)
+#define ETII_VFRMT_END			HDMI_VFRMT_1280x1024p60_5_4
+
+#define ETIII_OFF(x)			(ETII_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS III */
+#define HDMI_VFRMT_848x480p60_16_9	ETIII_OFF(1)
+#define HDMI_VFRMT_1280x960p60_4_3	ETIII_OFF(2)
+#define HDMI_VFRMT_1360x768p60_16_9	ETIII_OFF(3)
+#define HDMI_VFRMT_1440x900p60_16_10	ETIII_OFF(4)
+#define HDMI_VFRMT_1400x1050p60_4_3	ETIII_OFF(5)
+#define HDMI_VFRMT_1680x1050p60_16_10	ETIII_OFF(6)
+#define HDMI_VFRMT_1600x1200p60_4_3	ETIII_OFF(7)
+#define HDMI_VFRMT_1920x1200p60_16_10	ETIII_OFF(8)
+#define ETIII_VFRMT_END			HDMI_VFRMT_1920x1200p60_16_10
+
+#define RESERVE_OFF(x)			(ETIII_VFRMT_END + x)
+
+#define HDMI_VFRMT_RESERVE1		RESERVE_OFF(1)
+#define HDMI_VFRMT_RESERVE2		RESERVE_OFF(2)
+#define HDMI_VFRMT_RESERVE3		RESERVE_OFF(3)
+#define HDMI_VFRMT_RESERVE4		RESERVE_OFF(4)
+#define HDMI_VFRMT_RESERVE5		RESERVE_OFF(5)
+#define HDMI_VFRMT_RESERVE6		RESERVE_OFF(6)
+#define HDMI_VFRMT_RESERVE7		RESERVE_OFF(7)
+#define HDMI_VFRMT_RESERVE8		RESERVE_OFF(8)
+#define RESERVE_VFRMT_END		HDMI_VFRMT_RESERVE8
+
+#define HDMI_VFRMT_MAX			(RESERVE_VFRMT_END + 1)
+
+/* Timing information for supported modes */
+#define VFRMT_NOT_SUPPORTED(VFRMT) \
+	{VFRMT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, false,		\
+		HDMI_RES_AR_INVALID}
+
+#define HDMI_VFRMT_640x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_640x480p60_4_3, 640, 16, 96, 48, true,		\
+	 480, 10, 2, 33, true, 25200, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_720x480p60_4_3, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_720x480p60_16_9, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p60_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p60_16_9, 1280, 110, 40, 220, false,	\
+	 720, 5, 5, 20, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080i60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 88, 44, 148, false,	\
+	 540, 2, 5, 5, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x480i60_4_3_TIMING				\
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x480i60_16_9_TIMING				\
+	{HDMI_VFRMT_1440x480i60_16_9, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_720x576p50_4_3_TIMING				\
+	{HDMI_VFRMT_720x576p50_4_3, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x576p50_16_9_TIMING				\
+	{HDMI_VFRMT_720x576p50_16_9, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p50_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p50_16_9, 1280, 440, 40, 220, false,	\
+	 720,  5, 5, 20, false, 74250, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x576i50_4_3_TIMING				\
+	{HDMI_VFRMT_1440x576i50_4_3, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x576i50_16_9_TIMING				\
+	{HDMI_VFRMT_1440x576i50_16_9, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p50_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p50_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p24_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 638, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 24000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p25_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p25_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 25000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p30_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 30000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1024x768p60_4_3_TIMING                               \
+	{HDMI_VFRMT_1024x768p60_4_3, 1024, 24, 136, 160, false,         \
+	768, 2, 6, 29, false, 65000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1280x1024p60_5_4_TIMING				\
+	{HDMI_VFRMT_1280x1024p60_5_4, 1280, 48, 112, 248, false,	\
+	1024, 1, 3, 38, false, 108000, 60000, false, true, HDMI_RES_AR_5_4, 0}
+#define HDMI_VFRMT_2560x1600p60_16_9_TIMING				\
+	{HDMI_VFRMT_2560x1600p60_16_9, 2560, 48, 32, 80, false,		\
+	 1600, 3, 6, 37, false, 268500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p30_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p25_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_4096x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_4096x2160p24_16_9, 4096, 1020, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_800x600p60_4_3_TIMING				\
+	{HDMI_VFRMT_800x600p60_4_3, 800, 40, 128, 88, false,	\
+	 600, 1, 4, 23, false, 40000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_848x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_848x480p60_16_9, 848, 16, 112, 112, false,	\
+	 480, 6, 8, 23, false, 33750, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x960p60_4_3_TIMING\
+	{HDMI_VFRMT_1280x960p60_4_3, 1280, 96, 112, 312, false,	\
+	 960, 1, 3, 36, false, 108000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1360x768p60_16_9_TIMING\
+	{HDMI_VFRMT_1360x768p60_16_9, 1360, 64, 112, 256, false,	\
+	 768, 3, 6, 18, false, 85500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x900p60_16_10_TIMING\
+	{HDMI_VFRMT_1440x900p60_16_10, 1440, 48, 32, 80, false,	\
+	 900, 3, 6, 17, true, 88750, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1400x1050p60_4_3_TIMING\
+	{HDMI_VFRMT_1400x1050p60_4_3, 1400, 48, 32, 80, false,	\
+	 1050, 3, 4, 23, true, 101000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1680x1050p60_16_10_TIMING\
+	{HDMI_VFRMT_1680x1050p60_16_10, 1680, 48, 32, 80, false,	\
+	 1050, 3, 6, 21, true, 119000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1600x1200p60_4_3_TIMING\
+	{HDMI_VFRMT_1600x1200p60_4_3, 1600, 64, 192, 304, false,	\
+	 1200, 1, 3, 46, false, 162000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1920x1200p60_16_10_TIMING\
+	{HDMI_VFRMT_1920x1200p60_16_10, 1920, 48, 32, 80, false,\
+	 1200, 3, 6, 26, true, 154000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1366x768p60_16_10_TIMING\
+	{HDMI_VFRMT_1366x768p60_16_10, 1366, 70, 143, 213, false,\
+	 768, 3, 3, 24, false, 85500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1280x800p60_16_10_TIMING\
+	{HDMI_VFRMT_1280x800p60_16_10, 1280, 72, 128, 200, true,\
+	 800, 3, 6, 22, false, 83500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_3840x2160p24_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p25_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p30_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p50_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p60_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_4096x2160p24_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p24_256_135, 4096, 1020, 88, 296, false,   \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p25_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p25_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p30_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p30_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p50_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p50_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p60_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p60_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+
+#define HDMI_VFRMT_3840x2160p24_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_64_27, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p25_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p30_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p50_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p60_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+
+#define MSM_HDMI_MODES_SET_TIMING(LUT, MODE) do {		\
+	struct msm_hdmi_mode_timing_info mode = MODE##_TIMING;	\
+	LUT[MODE] = mode;\
+	} while (0)
+
+#define MSM_HDMI_MODES_INIT_TIMINGS(__lut)	\
+do {	\
+	unsigned int i;	\
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {	\
+		struct msm_hdmi_mode_timing_info mode =	\
+			VFRMT_NOT_SUPPORTED(i);	\
+		(__lut)[i] = mode;	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_SET_SUPP_TIMINGS(__lut, __type)	\
+do {	\
+	if (__type & MSM_HDMI_MODES_CEA) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_640x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p24_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p25_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p30_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p50_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p60_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_64_27); \
+	}	\
+	if (__type & MSM_HDMI_MODES_XTND) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_4096x2160p24_16_9);	\
+	}	\
+	if (__type & MSM_HDMI_MODES_DVI) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1024x768p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x1024p60_5_4);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_2560x1600p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_800x600p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_848x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x960p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1360x768p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x900p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1400x1050p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1680x1050p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1600x1200p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1200p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1366x768p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x800p60_16_10);	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_GET_DETAILS(mode, MODE) do {		\
+	struct msm_hdmi_mode_timing_info info = MODE##_TIMING;	\
+	*mode = info;						\
+	} while (0)
+
+#endif /* _UAPI_MSM_HDMI_MODES_H__ */
diff --git a/kernel/exit.c b/kernel/exit.c
index 46a7c2b..83e8afa 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -778,6 +778,7 @@
 
 	exit_signals(tsk);  /* sets PF_EXITING */
 
+	sched_exit(tsk);
 	schedtune_exit_task(tsk);
 
 	/*
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index aac12bf..a904c18 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt)	"core_ctl: " fmt
+
 #include <linux/init.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
@@ -36,7 +38,7 @@
 	cpumask_t cpu_mask;
 	unsigned int need_cpus;
 	unsigned int task_thres;
-	s64 last_isolate_ts;
+	s64 need_ts;
 	struct list_head lru;
 	bool pending;
 	spinlock_t pending_lock;
@@ -50,7 +52,6 @@
 };
 
 struct cpu_data {
-	bool online;
 	bool is_busy;
 	unsigned int busy;
 	unsigned int cpu;
@@ -242,22 +243,6 @@
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
 }
 
-static ssize_t show_cpus(const struct cluster_data *state, char *buf)
-{
-	struct cpu_data *c;
-	ssize_t count = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&state_lock, flags);
-	list_for_each_entry(c, &state->lru, sib) {
-		count += snprintf(buf + count, PAGE_SIZE - count,
-				  "CPU%u (%s)\n", c->cpu,
-				  c->online ? "Online" : "Offline");
-	}
-	spin_unlock_irqrestore(&state_lock, flags);
-	return count;
-}
-
 static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -277,9 +262,6 @@
 
 	for_each_possible_cpu(cpu) {
 		c = &per_cpu(cpu_state, cpu);
-		if (!c->cluster)
-			continue;
-
 		cluster = c->cluster;
 		if (!cluster || !cluster->inited)
 			continue;
@@ -289,10 +271,11 @@
 		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tCPU: %u\n", c->cpu);
 		count += snprintf(buf + count, PAGE_SIZE - count,
-					"\tOnline: %u\n", c->online);
+					"\tOnline: %u\n",
+					cpu_online(c->cpu));
 		count += snprintf(buf + count, PAGE_SIZE - count,
-					"\tActive: %u\n",
-					!cpu_isolated(c->cpu));
+					"\tIsolated: %u\n",
+					cpu_isolated(c->cpu));
 		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tFirst CPU: %u\n",
 						cluster->first_cpu);
@@ -301,6 +284,9 @@
 		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tIs busy: %u\n", c->is_busy);
 		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tNot preferred: %u\n",
+						c->not_preferred);
+		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tNr running: %u\n", cluster->nrrun);
 		count += snprintf(buf + count, PAGE_SIZE - count,
 			"\tActive CPUs: %u\n", get_active_cpu_count(cluster));
@@ -323,13 +309,14 @@
 	int ret;
 
 	ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
-	if (ret != 1 && ret != state->num_cpus)
+	if (ret != state->num_cpus)
 		return -EINVAL;
 
-	i = 0;
 	spin_lock_irqsave(&state_lock, flags);
-	list_for_each_entry(c, &state->lru, sib)
-		c->not_preferred = val[i++];
+	for (i = 0; i < state->num_cpus; i++) {
+		c = &per_cpu(cpu_state, i + state->first_cpu);
+		c->not_preferred = val[i];
+	}
 	spin_unlock_irqrestore(&state_lock, flags);
 
 	return count;
@@ -340,11 +327,14 @@
 	struct cpu_data *c;
 	ssize_t count = 0;
 	unsigned long flags;
+	int i;
 
 	spin_lock_irqsave(&state_lock, flags);
-	list_for_each_entry(c, &state->lru, sib)
-		count += snprintf(buf + count, PAGE_SIZE - count,
-				"\tCPU:%d %u\n", c->cpu, c->not_preferred);
+	for (i = 0; i < state->num_cpus; i++) {
+		c = &per_cpu(cpu_state, i + state->first_cpu);
+		count += scnprintf(buf + count, PAGE_SIZE - count,
+				"CPU#%d: %u\n", c->cpu, c->not_preferred);
+	}
 	spin_unlock_irqrestore(&state_lock, flags);
 
 	return count;
@@ -372,7 +362,6 @@
 core_ctl_attr_rw(busy_down_thres);
 core_ctl_attr_rw(task_thres);
 core_ctl_attr_rw(is_big_cluster);
-core_ctl_attr_ro(cpus);
 core_ctl_attr_ro(need_cpus);
 core_ctl_attr_ro(active_cpus);
 core_ctl_attr_ro(global_state);
@@ -386,7 +375,6 @@
 	&busy_down_thres.attr,
 	&task_thres.attr,
 	&is_big_cluster.attr,
-	&cpus.attr,
 	&need_cpus.attr,
 	&active_cpus.attr,
 	&global_state.attr,
@@ -530,7 +518,7 @@
 
 static bool is_active(const struct cpu_data *state)
 {
-	return state->online && !cpu_isolated(state->cpu);
+	return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
 }
 
 static bool adjustment_possible(const struct cluster_data *cluster,
@@ -549,6 +537,7 @@
 	bool need_flag = false;
 	unsigned int active_cpus;
 	unsigned int new_need;
+	s64 now;
 
 	if (unlikely(!cluster->inited))
 		return 0;
@@ -573,9 +562,10 @@
 	need_flag = adjustment_possible(cluster, new_need);
 
 	last_need = cluster->need_cpus;
-	cluster->need_cpus = new_need;
+	now = ktime_to_ms(ktime_get());
 
-	if (!need_flag) {
+	if (new_need == last_need) {
+		cluster->need_ts = now;
 		spin_unlock_irqrestore(&state_lock, flags);
 		return 0;
 	}
@@ -583,12 +573,15 @@
 	if (need_cpus > cluster->active_cpus) {
 		ret = 1;
 	} else if (need_cpus < cluster->active_cpus) {
-		s64 now = ktime_to_ms(ktime_get());
-		s64 elapsed = now - cluster->last_isolate_ts;
+		s64 elapsed = now - cluster->need_ts;
 
 		ret = elapsed >= cluster->offline_delay_ms;
 	}
 
+	if (ret) {
+		cluster->need_ts = now;
+		cluster->need_cpus = new_need;
+	}
 	trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
 				 ret && need_flag);
 	spin_unlock_irqrestore(&state_lock, flags);
@@ -746,7 +739,6 @@
 		if (!sched_isolate_cpu(c->cpu)) {
 			c->isolated_by_us = true;
 			move_cpu_lru(c);
-			cluster->last_isolate_ts = ktime_to_ms(ktime_get());
 		} else {
 			pr_debug("Unable to isolate CPU%u\n", c->cpu);
 		}
@@ -779,7 +771,6 @@
 		if (!sched_isolate_cpu(c->cpu)) {
 			c->isolated_by_us = true;
 			move_cpu_lru(c);
-			cluster->last_isolate_ts = ktime_to_ms(ktime_get());
 		} else {
 			pr_debug("Unable to isolate CPU%u\n", c->cpu);
 		}
@@ -808,7 +799,7 @@
 
 		if (!c->isolated_by_us)
 			continue;
-		if ((c->online && !cpu_isolated(c->cpu)) ||
+		if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
 			(!force && c->not_preferred))
 			continue;
 		if (cluster->active_cpus == need)
@@ -897,19 +888,7 @@
 		return NOTIFY_OK;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-
-		/* If online state of CPU somehow got out of sync, fix it. */
-		if (state->online) {
-			state->online = false;
-			cluster->active_cpus = get_active_cpu_count(cluster);
-			pr_warn("CPU%d offline when state is online\n", cpu);
-		}
-		break;
-
 	case CPU_ONLINE:
-
-		state->online = true;
 		cluster->active_cpus = get_active_cpu_count(cluster);
 
 		/*
@@ -934,15 +913,6 @@
 		/* Move a CPU to the end of the LRU when it goes offline. */
 		move_cpu_lru(state);
 
-		/* Fall through */
-
-	case CPU_UP_CANCELED:
-
-		/* If online state of CPU somehow got out of sync, fix it. */
-		if (!state->online)
-			pr_warn("CPU%d online when state is offline\n", cpu);
-
-		state->online = false;
 		state->busy = 0;
 		cluster->active_cpus = get_active_cpu_count(cluster);
 		break;
@@ -961,6 +931,42 @@
 
 /* ============================ init code ============================== */
 
+static cpumask_var_t core_ctl_disable_cpumask;
+static bool core_ctl_disable_cpumask_present;
+
+static int __init core_ctl_disable_setup(char *str)
+{
+	if (!*str)
+		return -EINVAL;
+
+	alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
+
+	if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
+		free_bootmem_cpumask_var(core_ctl_disable_cpumask);
+		return -EINVAL;
+	}
+
+	core_ctl_disable_cpumask_present = true;
+	pr_info("disable_cpumask=%*pbl\n",
+			cpumask_pr_args(core_ctl_disable_cpumask));
+
+	return 0;
+}
+early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
+
+static bool should_skip(const struct cpumask *mask)
+{
+	if (!core_ctl_disable_cpumask_present)
+		return false;
+
+	/*
+	 * We operate on a cluster basis. Disable the core_ctl for
+	 * a cluster, if all of it's cpus are specified in
+	 * core_ctl_disable_cpumask
+	 */
+	return cpumask_subset(mask, core_ctl_disable_cpumask);
+}
+
 static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
 {
 	unsigned int i;
@@ -982,6 +988,9 @@
 	unsigned int cpu;
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
+	if (should_skip(mask))
+		return 0;
+
 	if (find_cluster_by_first_cpu(first_cpu))
 		return 0;
 
@@ -1021,8 +1030,6 @@
 		state = &per_cpu(cpu_state, cpu);
 		state->cluster = cluster;
 		state->cpu = cpu;
-		if (cpu_online(cpu))
-			state->online = true;
 		list_add_tail(&state->sib, &cluster->lru);
 	}
 	cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1084,6 +1091,9 @@
 {
 	unsigned int cpu;
 
+	if (should_skip(cpu_possible_mask))
+		return 0;
+
 	core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
 					* NSEC_PER_MSEC;
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4eaf13e..2a8643c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5440,7 +5440,7 @@
 }
 
 static int find_new_capacity(struct energy_env *eenv,
-	const struct sched_group_energy const *sge)
+	const struct sched_group_energy * const sge)
 {
 	int idx;
 	unsigned long util = group_max_util(eenv);
@@ -10554,7 +10554,6 @@
 	u8 need_idle:1;
 	u8 need_waker_cluster:1;
 	u8 sync:1;
-	u8 ignore_prev_cpu:1;
 	enum sched_boost_policy boost_policy;
 	u8 pack_task:1;
 	int prev_cpu;
@@ -10564,6 +10563,7 @@
 	u64 cpu_load;
 	u32 sbc_best_flag;
 	u32 sbc_best_cluster_flag;
+	struct cpumask search_cpus;
 };
 
 struct cluster_cpu_stats {
@@ -10768,11 +10768,14 @@
 {
 	struct sched_cluster *next = NULL;
 	int i;
+	struct cpumask search_cpus;
 
 	while (!bitmap_empty(env->backup_list, num_clusters)) {
 		next = next_candidate(env->backup_list, 0, num_clusters);
 		__clear_bit(next->id, env->backup_list);
-		for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
+
+		cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
+		for_each_cpu(i, &search_cpus) {
 			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
 			sched_irqload(i), power_cost(i, task_load(env->p) +
 					cpu_cravg_sync(i, env->sync)), 0);
@@ -10944,11 +10947,7 @@
 	int i;
 	struct cpumask search_cpus;
 
-	cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
-	cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask);
-
-	if (env->ignore_prev_cpu)
-		cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+	cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
 
 	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
 
@@ -10960,7 +10959,7 @@
 			power_cost(i, task_load(env->p) +
 					cpu_cravg_sync(i, env->sync)), 0);
 
-		if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
+		if (skip_cpu(i, env))
 			continue;
 
 		update_spare_capacity(stats, env, i, c->capacity,
@@ -11015,9 +11014,7 @@
 		return false;
 
 	prev_cpu = env->prev_cpu;
-	if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
-					unlikely(!cpu_active(prev_cpu)) ||
-					cpu_isolated(prev_cpu))
+	if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
 		return false;
 
 	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
@@ -11050,7 +11047,7 @@
 			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
 		update_spare_capacity(stats, env, prev_cpu,
 				cluster->capacity, env->cpu_load);
-		env->ignore_prev_cpu = 1;
+		cpumask_clear_cpu(prev_cpu, &env->search_cpus);
 		return false;
 	}
 
@@ -11066,23 +11063,17 @@
 }
 
 static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu)
+bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
 {
 	return sysctl_sched_prefer_sync_wakee_to_waker &&
 	       cpu_rq(cpu)->nr_running == 1 &&
-	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
-	       cpu_active(cpu) && !cpu_isolated(cpu);
+	       cpumask_test_cpu(cpu, &env->search_cpus);
 }
 
 static inline int
-cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
+cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
 {
-	cpumask_t tmp_mask;
-
-	cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
-	cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
-
-	return !cpumask_empty(&tmp_mask);
+	return cpumask_intersects(&env->search_cpus, &cluster->cpus);
 }
 
 /* return cheapest cpu that can fit this task */
@@ -11103,7 +11094,6 @@
 		.need_waker_cluster	= 0,
 		.sync			= sync,
 		.prev_cpu		= target,
-		.ignore_prev_cpu	= 0,
 		.rtg			= NULL,
 		.sbc_best_flag		= 0,
 		.sbc_best_cluster_flag	= 0,
@@ -11116,6 +11106,9 @@
 	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
 	bitmap_zero(env.backup_list, NR_CPUS);
 
+	cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
+	cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
+
 	init_cluster_cpu_stats(&stats);
 	special = env_has_special_flags(&env);
 
@@ -11125,19 +11118,19 @@
 
 	if (grp && grp->preferred_cluster) {
 		pref_cluster = grp->preferred_cluster;
-		if (!cluster_allowed(p, pref_cluster))
+		if (!cluster_allowed(&env, pref_cluster))
 			clear_bit(pref_cluster->id, env.candidate_list);
 		else
 			env.rtg = grp;
 	} else if (!special) {
 		cluster = cpu_rq(cpu)->cluster;
 		if (wake_to_waker_cluster(&env)) {
-			if (bias_to_waker_cpu(p, cpu)) {
+			if (bias_to_waker_cpu(&env, cpu)) {
 				target = cpu;
 				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
 					   SBC_FLAG_WAKER_CPU;
 				goto out;
-			} else if (cluster_allowed(p, cluster)) {
+			} else if (cluster_allowed(&env, cluster)) {
 				env.need_waker_cluster = 1;
 				bitmap_zero(env.candidate_list, NR_CPUS);
 				__set_bit(cluster->id, env.candidate_list);
@@ -11387,8 +11380,15 @@
 	nice = task_nice(p);
 	rcu_read_lock();
 	grp = task_related_thread_group(p);
+	/*
+	 * Don't assume higher capacity means higher power. If the task
+	 * is running on the power efficient CPU, avoid migrating it
+	 * to a lower capacity cluster.
+	 */
 	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
-	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
+			upmigrate_discouraged(p)) &&
+			cpu_capacity(cpu) > min_capacity &&
+			cpu_max_power_cost(cpu) == max_power_cost) {
 		rcu_read_unlock();
 		return DOWN_MIGRATION;
 	}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 1de1fb1..c0adf4e 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -454,6 +454,12 @@
 	cluster1 = container_of(a, struct sched_cluster, list);
 	cluster2 = container_of(b, struct sched_cluster, list);
 
+	/*
+	 * Don't assume higher capacity means higher power. If the
+	 * power cost is same, sort the higher capacity cluster before
+	 * the lower capacity cluster to start placing the tasks
+	 * on the higher capacity cluster.
+	 */
 	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
 		(cluster1->max_power_cost == cluster2->max_power_cost &&
 		cluster1->max_possible_capacity <
@@ -711,7 +717,7 @@
 unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
 
 
-__read_mostly unsigned int sysctl_sched_new_task_windows = 5;
+#define SCHED_NEW_TASK_WINDOWS 5
 
 #define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
 
@@ -952,8 +958,8 @@
 unsigned int __read_mostly sysctl_sched_short_burst;
 unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
 
-static void
-_update_up_down_migrate(unsigned int *up_migrate, unsigned int *down_migrate)
+static void _update_up_down_migrate(unsigned int *up_migrate,
+			unsigned int *down_migrate, bool is_group)
 {
 	unsigned int delta;
 
@@ -967,7 +973,8 @@
 	*up_migrate >>= 10;
 	*up_migrate *= NSEC_PER_USEC;
 
-	*up_migrate = min(*up_migrate, sched_ravg_window);
+	if (!is_group)
+		*up_migrate = min(*up_migrate, sched_ravg_window);
 
 	*down_migrate /= NSEC_PER_USEC;
 	*down_migrate *= up_down_migrate_scale_factor;
@@ -982,14 +989,14 @@
 	unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
 	unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
 
-	_update_up_down_migrate(&up_migrate, &down_migrate);
+	_update_up_down_migrate(&up_migrate, &down_migrate, false);
 	sched_upmigrate = up_migrate;
 	sched_downmigrate = down_migrate;
 
 	up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
 	down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
 
-	_update_up_down_migrate(&up_migrate, &down_migrate);
+	_update_up_down_migrate(&up_migrate, &down_migrate, true);
 	sched_group_upmigrate = up_migrate;
 	sched_group_downmigrate = down_migrate;
 }
@@ -1842,7 +1849,7 @@
 
 static inline bool is_new_task(struct task_struct *p)
 {
-	return p->ravg.active_windows < sysctl_sched_new_task_windows;
+	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
 }
 
 #define INC_STEP 8
@@ -2571,7 +2578,8 @@
 	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
 }
 
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
 {
 	/*
 	 * No need to bother updating task demand for exiting tasks
@@ -2590,6 +2598,17 @@
 			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
 		return 0;
 
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+	}
+
 	return 1;
 }
 
@@ -2730,7 +2749,7 @@
 	u64 runtime;
 
 	new_window = mark_start < window_start;
-	if (!account_busy_for_task_demand(p, event)) {
+	if (!account_busy_for_task_demand(rq, p, event)) {
 		if (new_window)
 			/*
 			 * If the time accounted isn't being accounted as
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 41622ca..e7f6794 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1031,7 +1031,7 @@
 
 	unsigned int group_weight;
 	struct sched_group_capacity *sgc;
-	const struct sched_group_energy const *sge;
+	const struct sched_group_energy *sge;
 
 	/*
 	 * The CPUs this group covers.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d2a397f..f55a02b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -451,13 +451,6 @@
 		.proc_handler   = proc_dointvec,
 	},
 	{
-		.procname       = "sched_new_task_windows",
-		.data           = &sysctl_sched_new_task_windows,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = sched_window_update_handler,
-	},
-	{
 		.procname	= "sched_pred_alert_freq",
 		.data		= &sysctl_sched_pred_alert_freq,
 		.maxlen		= sizeof(unsigned int),
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 39008d7..ad538fe 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1220,7 +1220,7 @@
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 			   cputime_t *newval, cputime_t *oldval)
 {
-	unsigned long long now;
+	unsigned long long now = 0;
 
 	WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
 	cpu_timer_sample_group(clock_idx, tsk, &now);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4274797..ed7ba6d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -547,6 +547,19 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_SWITCH_PROFILER
+	bool "CPU frequency switch time profiler"
+	select GENERIC_TRACER
+	help
+	  This option enables the CPU frequency switch profiler. A file is
+	  created in debugfs called "cpu_freq_switch_profile_enabled", which
+	  defaults to zero. When a 1 is echoed into this file, profiling begins.
+	  When a zero is echoed, profiling stops. A "cpu_freq_switch" file is
+	  also created in the trace_stats directory; this file shows the
+	  switches that have occurred and duration statistics.
+
+	  If in doubt, say N.
+
 config FTRACE_MCOUNT_RECORD
 	def_bool y
 	depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 08e5e47..8ee9cc1 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
 obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
+obj-$(CONFIG_CPU_FREQ_SWITCH_PROFILER) += trace_cpu_freq_switch.o
 obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 62110a3..fa7fd14 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -588,8 +588,12 @@
 static void tsv_read_data(struct encode_context *ectxt,
 			  void *data, uint32_t size)
 {
-	if (WARN_ON((ectxt->offset + size) > MAX_MSG_SIZE))
+	if (WARN_ON((ectxt->offset + size) > MAX_MSG_SIZE)) {
+		memcpy(data, (ectxt->buff + ectxt->offset),
+			MAX_MSG_SIZE - ectxt->offset - 1);
+		ectxt->offset += MAX_MSG_SIZE - ectxt->offset - 1;
 		return;
+	}
 	memcpy(data, (ectxt->buff + ectxt->offset), size);
 	ectxt->offset += size;
 }
@@ -604,8 +608,12 @@
 static void tsv_read_header(struct encode_context *ectxt,
 			    struct tsv_header *hdr)
 {
-	if (WARN_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE))
+	if (WARN_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE)) {
+		memcpy(hdr, (ectxt->buff + ectxt->offset),
+			MAX_MSG_SIZE - ectxt->offset - 1);
+		ectxt->offset += MAX_MSG_SIZE - ectxt->offset - 1;
 		return;
+	}
 	memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr));
 	ectxt->offset += sizeof(*hdr);
 }
diff --git a/kernel/trace/trace_cpu_freq_switch.c b/kernel/trace/trace_cpu_freq_switch.c
new file mode 100644
index 0000000..0fcfde3
--- /dev/null
+++ b/kernel/trace/trace_cpu_freq_switch.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hrtimer.h>
+#include <linux/tracefs.h>
+#include <linux/ktime.h>
+#include <trace/events/power.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+struct trans {
+	struct rb_node node;
+	unsigned int cpu;
+	unsigned int start_freq;
+	unsigned int end_freq;
+	unsigned int min_us;
+	unsigned int max_us;
+	ktime_t total_t;
+	unsigned int count;
+};
+static struct rb_root freq_trans_tree = RB_ROOT;
+
+static struct trans *tr_search(struct rb_root *root, unsigned int cpu,
+			       unsigned int start_freq, unsigned int end_freq)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct trans *tr = container_of(node, struct trans, node);
+
+		if (cpu < tr->cpu)
+			node = node->rb_left;
+		else if (cpu > tr->cpu)
+			node = node->rb_right;
+		else if (start_freq < tr->start_freq)
+			node = node->rb_left;
+		else if (start_freq > tr->start_freq)
+			node = node->rb_right;
+		else if (end_freq < tr->end_freq)
+			node = node->rb_left;
+		else if (end_freq > tr->end_freq)
+			node = node->rb_right;
+		else
+			return tr;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct trans *tr)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	while (*new) {
+		struct trans *this = container_of(*new, struct trans, node);
+
+		parent = *new;
+		if (tr->cpu < this->cpu)
+			new = &((*new)->rb_left);
+		else if (tr->cpu > this->cpu)
+			new = &((*new)->rb_right);
+		else if (tr->start_freq < this->start_freq)
+			new = &((*new)->rb_left);
+		else if (tr->start_freq > this->start_freq)
+			new = &((*new)->rb_right);
+		else if (tr->end_freq < this->end_freq)
+			new = &((*new)->rb_left);
+		else if (tr->end_freq > this->end_freq)
+			new = &((*new)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&tr->node, parent, new);
+	rb_insert_color(&tr->node, root);
+
+	return 0;
+}
+
+struct trans_state {
+	spinlock_t lock;
+	unsigned int start_freq;
+	unsigned int end_freq;
+	ktime_t start_t;
+	bool started;
+};
+static DEFINE_PER_CPU(struct trans_state, freq_trans_state);
+
+static DEFINE_SPINLOCK(state_lock);
+
+static void probe_start(void *ignore, unsigned int start_freq,
+			unsigned int end_freq, unsigned int cpu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	per_cpu(freq_trans_state, cpu).start_freq = start_freq;
+	per_cpu(freq_trans_state, cpu).end_freq = end_freq;
+	per_cpu(freq_trans_state, cpu).start_t = ktime_get();
+	per_cpu(freq_trans_state, cpu).started = true;
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void probe_end(void *ignore, unsigned int cpu)
+{
+	unsigned long flags;
+	struct trans *tr;
+	s64 dur_us;
+	ktime_t dur_t, end_t = ktime_get();
+
+	spin_lock_irqsave(&state_lock, flags);
+
+	if (!per_cpu(freq_trans_state, cpu).started)
+		goto out;
+
+	dur_t = ktime_sub(end_t, per_cpu(freq_trans_state, cpu).start_t);
+	dur_us = ktime_to_us(dur_t);
+
+	tr = tr_search(&freq_trans_tree, cpu,
+		       per_cpu(freq_trans_state, cpu).start_freq,
+		       per_cpu(freq_trans_state, cpu).end_freq);
+	if (!tr) {
+		tr = kzalloc(sizeof(*tr), GFP_ATOMIC);
+		if (!tr) {
+			WARN_ONCE(1, "CPU frequency trace is now invalid!\n");
+			goto out;
+		}
+
+		tr->start_freq = per_cpu(freq_trans_state, cpu).start_freq;
+		tr->end_freq = per_cpu(freq_trans_state, cpu).end_freq;
+		tr->cpu = cpu;
+		tr->min_us = UINT_MAX;
+		tr_insert(&freq_trans_tree, tr);
+	}
+	tr->total_t = ktime_add(tr->total_t, dur_t);
+	tr->count++;
+
+	if (dur_us > tr->max_us)
+		tr->max_us = dur_us;
+	if (dur_us < tr->min_us)
+		tr->min_us = dur_us;
+
+	per_cpu(freq_trans_state, cpu).started = false;
+out:
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void *freq_switch_stat_start(struct tracer_stat *trace)
+{
+	struct rb_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	n = rb_first(&freq_trans_tree);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return n;
+}
+
+static void *freq_switch_stat_next(void *prev, int idx)
+{
+	struct rb_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	n = rb_next(prev);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return n;
+}
+
+static int freq_switch_stat_show(struct seq_file *s, void *p)
+{
+	unsigned long flags;
+	struct trans *tr = p;
+
+	spin_lock_irqsave(&state_lock, flags);
+	seq_printf(s, "%3d %9d %8d %5d %6lld %6d %6d\n", tr->cpu,
+		   tr->start_freq, tr->end_freq, tr->count,
+		   div_s64(ktime_to_us(tr->total_t), tr->count),
+		   tr->min_us, tr->max_us);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return 0;
+}
+
+static void freq_switch_stat_release(void *stat)
+{
+	struct trans *tr = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	rb_erase(&tr->node, &freq_trans_tree);
+	spin_unlock_irqrestore(&state_lock, flags);
+	kfree(tr);
+}
+
+static int freq_switch_stat_headers(struct seq_file *s)
+{
+	seq_puts(s, "CPU START_KHZ  END_KHZ COUNT AVG_US MIN_US MAX_US\n");
+	seq_puts(s, "  |         |        |     |      |      |      |\n");
+	return 0;
+}
+
+struct tracer_stat freq_switch_stats __read_mostly = {
+	.name = "cpu_freq_switch",
+	.stat_start = freq_switch_stat_start,
+	.stat_next = freq_switch_stat_next,
+	.stat_show = freq_switch_stat_show,
+	.stat_release = freq_switch_stat_release,
+	.stat_headers = freq_switch_stat_headers
+};
+
+static void trace_freq_switch_disable(void)
+{
+	unregister_stat_tracer(&freq_switch_stats);
+	unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+	unregister_trace_cpu_frequency_switch_start(probe_start, NULL);
+	pr_info("disabled cpu frequency switch time profiling\n");
+}
+
+static int trace_freq_switch_enable(void)
+{
+	int ret;
+
+	ret = register_trace_cpu_frequency_switch_start(probe_start, NULL);
+	if (ret)
+		goto out;
+
+	ret = register_trace_cpu_frequency_switch_end(probe_end, NULL);
+	if (ret)
+		goto err_register_switch_end;
+
+	ret = register_stat_tracer(&freq_switch_stats);
+	if (ret)
+		goto err_register_stat_tracer;
+
+	pr_info("enabled cpu frequency switch time profiling\n");
+	return 0;
+
+err_register_stat_tracer:
+	unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+err_register_switch_end:
+	register_trace_cpu_frequency_switch_start(probe_start, NULL);
+out:
+	pr_err("failed to enable cpu frequency switch time profiling\n");
+
+	return ret;
+}
+
+static DEFINE_MUTEX(debugfs_lock);
+static bool trace_freq_switch_enabled;
+
+static int debug_toggle_tracing(void *data, u64 val)
+{
+	int ret = 0;
+
+	mutex_lock(&debugfs_lock);
+
+	if (val == 1 && !trace_freq_switch_enabled)
+		ret = trace_freq_switch_enable();
+	else if (val == 0 && trace_freq_switch_enabled)
+		trace_freq_switch_disable();
+	else if (val > 1)
+		ret = -EINVAL;
+
+	if (!ret)
+		trace_freq_switch_enabled = val;
+
+	mutex_unlock(&debugfs_lock);
+
+	return ret;
+}
+
+static int debug_tracing_state_get(void *data, u64 *val)
+{
+	mutex_lock(&debugfs_lock);
+	*val = trace_freq_switch_enabled;
+	mutex_unlock(&debugfs_lock);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debug_tracing_state_fops, debug_tracing_state_get,
+			debug_toggle_tracing, "%llu\n");
+
+static int __init trace_freq_switch_init(void)
+{
+	struct dentry *d_tracer = tracing_init_dentry();
+
+	if (IS_ERR(d_tracer))
+		return 0;
+
+	tracefs_create_file("cpu_freq_switch_profile_enabled",
+		0644, d_tracer, NULL, &debug_tracing_state_fops);
+
+	return 0;
+}
+late_initcall(trace_freq_switch_init);
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 4bbd38e..f4ac185 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -139,7 +139,7 @@
 
 		new->ns = ns;
 		new->uid = uid;
-		atomic_set(&new->count, 0);
+		new->count = 0;
 
 		spin_lock_irq(&ucounts_lock);
 		ucounts = find_ucounts(ns, uid, hashent);
@@ -150,8 +150,10 @@
 			ucounts = new;
 		}
 	}
-	if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+	if (ucounts->count == INT_MAX)
 		ucounts = NULL;
+	else
+		ucounts->count += 1;
 	spin_unlock_irq(&ucounts_lock);
 	return ucounts;
 }
@@ -160,13 +162,15 @@
 {
 	unsigned long flags;
 
-	if (atomic_dec_and_test(&ucounts->count)) {
-		spin_lock_irqsave(&ucounts_lock, flags);
+	spin_lock_irqsave(&ucounts_lock, flags);
+	ucounts->count -= 1;
+	if (!ucounts->count)
 		hlist_del_init(&ucounts->node);
-		spin_unlock_irqrestore(&ucounts_lock, flags);
+	else
+		ucounts = NULL;
+	spin_unlock_irqrestore(&ucounts_lock, flags);
 
-		kfree(ucounts);
-	}
+	kfree(ucounts);
 }
 
 static inline bool atomic_inc_below(atomic_t *v, int u)
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index f82676d..2a30d55 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -655,6 +655,13 @@
 						rmnet_header->vnd.vnd_name);
 		break;
 
+	case RMNET_NETLINK_NEW_VND_WITH_NAME:
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+		resp_rmnet->return_code = rmnet_create_vnd_name(
+						rmnet_header->vnd.id,
+						rmnet_header->vnd.vnd_name);
+		break;
+
 	case RMNET_NETLINK_FREE_VND:
 		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
 		/* Please check rmnet_vnd_free_dev documentation regarding
@@ -1096,10 +1103,10 @@
 
 	ASSERT_RTNL();
 	LOGL("(%d);", id);
-	return rmnet_vnd_create_dev(id, &dev, NULL);
+	return rmnet_vnd_create_dev(id, &dev, NULL, 0);
 }
 
-/* rmnet_create_vnd() - Create virtual network device node
+/* rmnet_create_vnd_prefix() - Create virtual network device node
  * @id:       RmNet virtual device node id
  * @prefix:   String prefix for device name
  *
@@ -1112,7 +1119,24 @@
 
 	ASSERT_RTNL();
 	LOGL("(%d, \"%s\");", id, prefix);
-	return rmnet_vnd_create_dev(id, &dev, prefix);
+	return rmnet_vnd_create_dev(id, &dev, prefix, 0);
+}
+
+/**
+ * rmnet_create_vnd_name() - Create virtual network device node
+ * @id:       RmNet virtual device node id
+ * @prefix:   String prefix for device name
+ *
+ * Return:
+ *      - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd_name(int id, const char *name)
+{
+	struct net_device *dev;
+
+	ASSERT_RTNL();
+	LOGL("(%d, \"%s\");", id, name);
+	return rmnet_vnd_create_dev(id, &dev, name, 1);
 }
 
 /* rmnet_free_vnd() - Free virtual network device node
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index b929158..5ce4600 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -121,6 +121,7 @@
 			   unsigned long event, void *data);
 int rmnet_create_vnd(int id);
 int rmnet_create_vnd_prefix(int id, const char *name);
+int rmnet_create_vnd_name(int id, const char *name);
 int rmnet_free_vnd(int id);
 
 struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index 64217bd..72f3c3b 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -556,7 +556,7 @@
  *      - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
  */
 int rmnet_vnd_create_dev(int id, struct net_device **new_device,
-			 const char *prefix)
+			 const char *prefix, int use_name)
 {
 	struct net_device *dev;
 	char dev_prefix[IFNAMSIZ];
@@ -572,11 +572,16 @@
 		return RMNET_CONFIG_DEVICE_IN_USE;
 	}
 
-	if (!prefix)
+	if (!prefix && !use_name)
 		p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
 			      RMNET_DATA_DEV_NAME_STR);
-	else
+	else if (prefix && use_name)
+		p = scnprintf(dev_prefix, IFNAMSIZ, "%s", prefix);
+	else if (prefix && !use_name)
 		p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
+	else
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+
 	if (p >= (IFNAMSIZ - 1)) {
 		LOGE("Specified prefix longer than IFNAMSIZ");
 		return RMNET_CONFIG_BAD_ARGUMENTS;
@@ -584,7 +589,7 @@
 
 	dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
 			   dev_prefix,
-			   NET_NAME_ENUM,
+			   use_name ? NET_NAME_UNKNOWN : NET_NAME_ENUM,
 			   rmnet_vnd_setup);
 	if (!dev) {
 		LOGE("Failed to to allocate netdev for id %d", id);
diff --git a/net/rmnet_data/rmnet_data_vnd.h b/net/rmnet_data/rmnet_data_vnd.h
index e0afeff..9d8eb54 100644
--- a/net/rmnet_data/rmnet_data_vnd.h
+++ b/net/rmnet_data/rmnet_data_vnd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
 struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev);
 int rmnet_vnd_get_name(int id, char *name, int name_len);
 int rmnet_vnd_create_dev(int id, struct net_device **new_device,
-			 const char *prefix);
+			 const char *prefix, int use_name);
 int rmnet_vnd_free_dev(int id);
 int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
 int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 206dc24..744cfe6c5 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1179,6 +1179,45 @@
 }
 
 /*
+ * enable/disable flow on qdisc.
+ */
+int
+tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle, int enable_flow)
+{
+	struct Qdisc *q;
+	int qdisc_len = 0;
+	struct __qdisc_change_req {
+		struct nlattr attr;
+		struct tc_prio_qopt data;
+	} req =	{
+		.attr = {sizeof(struct __qdisc_change_req), TCA_OPTIONS},
+		.data = {3, {1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, 1}
+		};
+
+	/* override flow bit */
+	req.data.enable_flow = enable_flow;
+
+	/* look up using tcm handle */
+	q = qdisc_lookup(dev, tcm_handle);
+
+	/* call registered change function */
+	if (likely(q && q->ops)) {
+		if (likely(q->ops->change)) {
+			qdisc_len = q->q.qlen;
+			if (q->ops->change(q, &req.attr))
+				pr_err("%s(): qdisc change failed", __func__);
+		} else {
+			WARN_ONCE(1, "%s(): called on queue which does %s",
+				  __func__, "not support change() operation");
+		}
+	} else {
+		WARN_ONCE(1, "%s(): called on bad queue", __func__);
+	}
+	return qdisc_len;
+}
+EXPORT_SYMBOL(tc_qdisc_flow_control);
+
+/*
  * Create/change qdisc.
  */
 
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8f57589..353c6a1 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -27,6 +27,7 @@
 	struct tcf_proto __rcu *filter_list;
 	u8  prio2band[TC_PRIO_MAX+1];
 	struct Qdisc *queues[TCQ_PRIO_BANDS];
+	u8 enable_flow;
 };
 
 
@@ -99,6 +100,9 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int prio;
 
+	if (!q->enable_flow)
+		return NULL;
+
 	for (prio = 0; prio < q->bands; prio++) {
 		struct Qdisc *qdisc = q->queues[prio];
 		struct sk_buff *skb = qdisc->ops->peek(qdisc);
@@ -113,6 +117,9 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int prio;
 
+	if (!q->enable_flow)
+		return NULL;
+
 	for (prio = 0; prio < q->bands; prio++) {
 		struct Qdisc *qdisc = q->queues[prio];
 		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
@@ -137,6 +144,7 @@
 		qdisc_reset(q->queues[prio]);
 	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
+	q->enable_flow = 1;
 }
 
 static void
@@ -181,6 +189,7 @@
 	}
 
 	sch_tree_lock(sch);
+	q->enable_flow = qopt->enable_flow;
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
@@ -214,6 +223,7 @@
 	struct tc_prio_qopt opt;
 
 	opt.bands = q->bands;
+	opt.enable_flow = q->enable_flow;
 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2d03d5b..459577e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -992,7 +992,7 @@
 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
 	char *sun_path = sunaddr->sun_path;
 	int err;
-	unsigned int hash;
+	unsigned int hash = 0;
 	struct unix_address *addr;
 	struct hlist_head *list;
 	struct path path = { NULL, NULL };
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index a452ad7..f32cfa4 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -162,5 +162,13 @@
 
 source "sound/usb/line6/Kconfig"
 
+config SND_USB_AUDIO_QMI
+	tristate "USB Audio QMI Service driver"
+	depends on MSM_QMI_INTERFACE
+	help
+	  Starts USB Audio QMI server to communicate with remote entity
+	  to perform operations like enable or disable particular audio
+	  stream on a connected USB device.
+
 endif	# SND_USB
 
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 2d2d122..d2ac038 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -26,3 +26,4 @@
 
 obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/
 obj-$(CONFIG_SND_USB_LINE6)	+= line6/
+obj-$(CONFIG_SND_USB_AUDIO_QMI) += usb_audio_qmi_v01.o usb_audio_qmi_svc.o
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 90a4e68..ccf06de 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -113,6 +113,71 @@
 static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
 static struct usb_driver usb_audio_driver;
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip))
+{
+	int idx;
+	struct snd_usb_stream *as;
+	struct snd_usb_substream *subs = NULL;
+	struct snd_usb_audio *chip = NULL;
+
+	mutex_lock(&register_mutex);
+	/*
+	 * legacy audio snd card number assignment is dynamic. Hence
+	 * search using chip->card->number
+	 */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!usb_chip[idx])
+			continue;
+		if (usb_chip[idx]->card->number == card_num) {
+			chip = usb_chip[idx];
+			break;
+		}
+	}
+
+	if (!chip || atomic_read(&chip->shutdown)) {
+		pr_debug("%s: instance of usb crad # %d does not exist\n",
+			__func__, card_num);
+		goto err;
+	}
+
+	if (pcm_idx >= chip->pcm_devs) {
+		pr_err("%s: invalid pcm dev number %u > %d\n", __func__,
+			pcm_idx, chip->pcm_devs);
+		goto err;
+	}
+
+	if (direction > SNDRV_PCM_STREAM_CAPTURE) {
+		pr_err("%s: invalid direction %u\n", __func__, direction);
+		goto err;
+	}
+
+	list_for_each_entry(as, &chip->pcm_list, list) {
+		if (as->pcm_index == pcm_idx) {
+			subs = &as->substream[direction];
+			if (subs->interface < 0 && !subs->data_endpoint &&
+				!subs->sync_endpoint) {
+				pr_debug("%s: stream disconnected, bail out\n",
+					__func__);
+				subs = NULL;
+				goto err;
+			}
+			goto done;
+		}
+	}
+
+done:
+	chip->card_num = card_num;
+	chip->disconnect_cb = disconnect_cb;
+err:
+	*uchip = chip;
+	if (!subs)
+		pr_debug("%s: substream instance not found\n", __func__);
+	mutex_unlock(&register_mutex);
+	return subs;
+}
+
 /*
  * disconnect streams
  * called from usb_audio_disconnect()
@@ -325,6 +390,7 @@
 	list_for_each_entry_safe(ep, n, &chip->ep_list, list)
 		snd_usb_endpoint_free(ep);
 
+	mutex_destroy(&chip->dev_lock);
 	mutex_destroy(&chip->mutex);
 	if (!atomic_read(&chip->shutdown))
 		dev_set_drvdata(&chip->dev->dev, NULL);
@@ -383,6 +449,7 @@
 	}
 
 	mutex_init(&chip->mutex);
+	mutex_init(&chip->dev_lock);
 	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
@@ -630,6 +697,8 @@
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
 	usb_set_intfdata(intf, chip);
+	intf->needs_remote_wakeup = 1;
+	usb_enable_autosuspend(chip->dev);
 	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
@@ -659,6 +728,9 @@
 
 	card = chip->card;
 
+	if (chip->disconnect_cb)
+		chip->disconnect_cb(chip);
+
 	mutex_lock(&register_mutex);
 	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 111b0f0..25cddcc 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -167,4 +167,8 @@
 	struct list_head list;
 };
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip));
+
 #endif /* __USBAUDIO_CARD_H */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c5251aa..70e1477 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -357,7 +357,7 @@
 		err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
 		if (err < 0)
 			usb_audio_err(ep->chip,
-				"Unable to submit urb #%d: %d (urb %p)\n",
+				"Unable to submit urb #%d: %d (urb %pK)\n",
 				ctx->index, err, ctx->urb);
 		else
 			set_bit(ctx->index, &ep->active_mask);
@@ -459,7 +459,7 @@
 		    ep->iface == alts->desc.bInterfaceNumber &&
 		    ep->altsetting == alts->desc.bAlternateSetting) {
 			usb_audio_dbg(ep->chip,
-				      "Re-using EP %x in iface %d,%d @%p\n",
+				      "Re-using EP %x in iface %d,%d @%pK\n",
 					ep_num, ep->iface, ep->altsetting, ep);
 			goto __exit_unlock;
 		}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 48afae0..db85d92 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -228,7 +228,7 @@
 	if (!test_and_set_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) {
 		struct snd_usb_endpoint *ep = subs->data_endpoint;
 
-		dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting data EP @%pK\n", ep);
 
 		ep->data_subs = subs;
 		err = snd_usb_endpoint_start(ep);
@@ -257,7 +257,7 @@
 			}
 		}
 
-		dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting sync EP @%pK\n", ep);
 
 		ep->sync_slave = subs->data_endpoint;
 		err = snd_usb_endpoint_start(ep);
@@ -554,6 +554,70 @@
 	return 0;
 }
 
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable)
+{
+	struct audioformat *fmt;
+	struct usb_host_interface *alts;
+	struct usb_interface *iface;
+	int ret;
+
+	if (!enable) {
+		if (subs->interface >= 0) {
+			usb_set_interface(subs->dev, subs->interface, 0);
+			subs->altset_idx = 0;
+			subs->interface = -1;
+			subs->cur_audiofmt = NULL;
+		}
+
+		snd_usb_autosuspend(subs->stream->chip);
+		return 0;
+	}
+
+	snd_usb_autoresume(subs->stream->chip);
+	fmt = find_format(subs);
+	if (!fmt) {
+		dev_err(&subs->dev->dev,
+		"cannot set format: format = %#x, rate = %d, channels = %d\n",
+			   subs->pcm_format, subs->cur_rate, subs->channels);
+		return -EINVAL;
+	}
+
+	subs->altset_idx = 0;
+	subs->interface = -1;
+	if (atomic_read(&subs->stream->chip->shutdown)) {
+		ret = -ENODEV;
+	} else {
+		ret = set_format(subs, fmt);
+		if (ret < 0)
+			return ret;
+
+		iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+		if (!iface) {
+			dev_err(&subs->dev->dev, "Could not get iface %d\n",
+				subs->cur_audiofmt->iface);
+			return -ENODEV;
+		}
+
+		alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+		ret = snd_usb_init_sample_rate(subs->stream->chip,
+					       subs->cur_audiofmt->iface,
+					       alts,
+					       subs->cur_audiofmt,
+					       subs->cur_rate);
+		if (ret < 0) {
+			dev_err(&subs->dev->dev, "failed to set rate %d\n",
+				subs->cur_rate);
+			return ret;
+		}
+	}
+
+	subs->interface = fmt->iface;
+	subs->altset_idx = fmt->altset_idx;
+
+	return 0;
+}
+
 /*
  * Return the score of matching two audioformats.
  * Veto the audioformat if:
@@ -571,13 +635,13 @@
 
 	if (fp->channels < 1) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no channels\n", __func__, fp);
+			"%s: (fmt @%pK) no channels\n", __func__, fp);
 		return 0;
 	}
 
 	if (!(fp->formats & pcm_format_to_bits(pcm_format))) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for format %d\n", __func__,
+			"%s: (fmt @%pK) no match for format %d\n", __func__,
 			fp, pcm_format);
 		return 0;
 	}
@@ -590,7 +654,7 @@
 	}
 	if (!score) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for rate %d\n", __func__,
+			"%s: (fmt @%pK) no match for rate %d\n", __func__,
 			fp, rate);
 		return 0;
 	}
@@ -599,7 +663,7 @@
 		score++;
 
 	dev_dbg(&subs->dev->dev,
-		"%s: (fmt @%p) score %d\n", __func__, fp, score);
+		"%s: (fmt @%pK) score %d\n", __func__, fp, score);
 
 	return score;
 }
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
index df7a003..d581f94 100644
--- a/sound/usb/pcm.h
+++ b/sound/usb/pcm.h
@@ -9,6 +9,7 @@
 int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 		       struct usb_host_interface *alts,
 		       struct audioformat *fmt);
-
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable);
 
 #endif /* __USBAUDIO_PCM_H */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 8e9548bc..7437cd5 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -69,9 +69,14 @@
 static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
 {
 	struct snd_usb_stream *stream = pcm->private_data;
+	struct snd_usb_audio *chip;
+
 	if (stream) {
+		mutex_lock(&stream->chip->dev_lock);
+		chip = stream->chip;
 		stream->pcm = NULL;
 		snd_usb_audio_stream_free(stream);
+		mutex_unlock(&chip->dev_lock);
 	}
 }
 
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
new file mode 100644
index 0000000..5a1974e
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -0,0 +1,1325 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/uaccess.h>
+#include <sound/pcm.h>
+#include <sound/core.h>
+#include <sound/asound.h>
+#include <linux/usb.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "helper.h"
+#include "pcm.h"
+#include "usb_audio_qmi_v01.h"
+
+#define SND_PCM_CARD_NUM_MASK 0xffff0000
+#define SND_PCM_DEV_NUM_MASK 0xff00
+#define SND_PCM_STREAM_DIRECTION 0xff
+
+#define PREPEND_SID_TO_IOVA(iova, sid) (u64)(((u64)(iova)) | \
+					(((u64)sid) << 32))
+
+/*  event ring iova base address */
+#define IOVA_BASE 0x1000
+
+#define IOVA_DCBA_BASE 0x2000
+#define IOVA_XFER_RING_BASE (IOVA_DCBA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
+#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
+#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
+
+#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
+
+struct iova_info {
+	struct list_head list;
+	unsigned long start_iova;
+	size_t size;
+	bool in_use;
+};
+
+struct intf_info {
+	unsigned long data_xfer_ring_va;
+	size_t data_xfer_ring_size;
+	unsigned long sync_xfer_ring_va;
+	size_t sync_xfer_ring_size;
+	unsigned long xfer_buf_va;
+	size_t xfer_buf_size;
+	phys_addr_t xfer_buf_pa;
+	u8 *xfer_buf;
+	u8 intf_num;
+	u8 pcm_card_num;
+	u8 pcm_dev_num;
+	u8 direction;
+	bool in_use;
+};
+
+struct uaudio_dev {
+	struct usb_device *udev;
+	/* audio control interface */
+	struct usb_host_interface *ctrl_intf;
+	unsigned int card_num;
+	atomic_t in_use;
+	struct kref kref;
+	unsigned long dcba_iova;
+	size_t dcba_size;
+	wait_queue_head_t disconnect_wq;
+
+	/* interface specific */
+	int num_intf;
+	struct intf_info *info;
+};
+
+static struct uaudio_dev uadev[SNDRV_CARDS];
+
+struct uaudio_qmi_dev {
+	struct device *dev;
+	u32 sid;
+	u32 intr_num;
+	struct iommu_domain *domain;
+
+	/* list to keep track of available iova */
+	struct list_head dcba_list;
+	size_t dcba_iova_size;
+	unsigned long curr_dcba_iova;
+	struct list_head xfer_ring_list;
+	size_t xfer_ring_iova_size;
+	unsigned long curr_xfer_ring_iova;
+	struct list_head xfer_buf_list;
+	size_t xfer_buf_iova_size;
+	unsigned long curr_xfer_buf_iova;
+	/* bit fields representing pcm card enabled */
+	unsigned long card_slot;
+	/* cache event ring phys addr */
+	u64 er_phys_addr;
+};
+
+static struct uaudio_qmi_dev *uaudio_qdev;
+
+struct uaudio_qmi_svc {
+	struct qmi_handle *uaudio_svc_hdl;
+	void *curr_conn;
+	struct work_struct recv_msg_work;
+	struct work_struct qmi_disconnect_work;
+	struct workqueue_struct *uaudio_wq;
+	ktime_t t_request_recvd;
+	ktime_t t_resp_sent;
+};
+
+static struct uaudio_qmi_svc *uaudio_svc;
+
+static struct msg_desc uaudio_stream_req_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_REQ_V01,
+	.ei_array = qmi_uaudio_stream_req_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_resp_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_RESP_V01,
+	.ei_array = qmi_uaudio_stream_resp_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_ind_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UADUIO_STREAM_IND_V01,
+	.ei_array = qmi_uaudio_stream_ind_msg_v01_ei,
+};
+
+enum mem_type {
+	MEM_EVENT_RING,
+	MEM_DCBA,
+	MEM_XFER_RING,
+	MEM_XFER_BUF,
+};
+
+enum usb_qmi_audio_format {
+	USB_QMI_PCM_FORMAT_S8 = 0,
+	USB_QMI_PCM_FORMAT_U8,
+	USB_QMI_PCM_FORMAT_S16_LE,
+	USB_QMI_PCM_FORMAT_S16_BE,
+	USB_QMI_PCM_FORMAT_U16_LE,
+	USB_QMI_PCM_FORMAT_U16_BE,
+	USB_QMI_PCM_FORMAT_S24_LE,
+	USB_QMI_PCM_FORMAT_S24_BE,
+	USB_QMI_PCM_FORMAT_U24_LE,
+	USB_QMI_PCM_FORMAT_U24_BE,
+	USB_QMI_PCM_FORMAT_S24_3LE,
+	USB_QMI_PCM_FORMAT_S24_3BE,
+	USB_QMI_PCM_FORMAT_U24_3LE,
+	USB_QMI_PCM_FORMAT_U24_3BE,
+	USB_QMI_PCM_FORMAT_S32_LE,
+	USB_QMI_PCM_FORMAT_S32_BE,
+	USB_QMI_PCM_FORMAT_U32_LE,
+	USB_QMI_PCM_FORMAT_U32_BE,
+};
+
+static unsigned long uaudio_get_iova(unsigned long *curr_iova,
+	size_t *curr_iova_size, struct list_head *head, size_t size)
+{
+	struct iova_info *info, *new_info = NULL;
+	struct list_head *curr_head;
+	unsigned long va = 0;
+	size_t tmp_size = size;
+	bool found = false;
+
+	if (size % PAGE_SIZE) {
+		pr_err("%s: size %zu is not page size multiple\n", __func__,
+			size);
+		goto done;
+	}
+
+	if (size > *curr_iova_size) {
+		pr_err("%s: size %zu > curr size %zu\n", __func__, size,
+			*curr_iova_size);
+		goto done;
+	}
+	if (*curr_iova_size == 0) {
+		pr_err("%s: iova mapping is full\n", __func__);
+		goto done;
+	}
+
+	list_for_each_entry(info, head, list) {
+		/* exact size iova_info */
+		if (!info->in_use && info->size == size) {
+			info->in_use = true;
+			va = info->start_iova;
+			*curr_iova_size -= size;
+			found = true;
+			pr_debug("%s: exact size :%zu found\n", __func__, size);
+			goto done;
+		} else if (!info->in_use && tmp_size >= info->size) {
+			if (!new_info)
+				new_info = info;
+			pr_debug("%s: partial size: %zu found\n", __func__,
+				info->size);
+			tmp_size -= info->size;
+			if (tmp_size)
+				continue;
+
+			va = new_info->start_iova;
+			for (curr_head = &new_info->list; curr_head !=
+			&info->list; curr_head = curr_head->next) {
+				new_info = list_entry(curr_head, struct
+						iova_info, list);
+				new_info->in_use = true;
+			}
+			info->in_use = true;
+			*curr_iova_size -= size;
+			found = true;
+			goto done;
+		} else {
+			/* iova region in use */
+			new_info = NULL;
+			tmp_size = size;
+		}
+	}
+
+	info = kzalloc(sizeof(struct iova_info), GFP_KERNEL);
+	if (!info) {
+		va = 0;
+		goto done;
+	}
+
+	va = info->start_iova = *curr_iova;
+	info->size = size;
+	info->in_use = true;
+	*curr_iova += size;
+	*curr_iova_size -= size;
+	found = true;
+	list_add_tail(&info->list, head);
+
+done:
+	if (!found)
+		pr_err("%s: unable to find %zu size iova\n", __func__, size);
+	else
+		pr_debug("%s: va:%lu curr_iova:%lu curr_iova_size:%zu\n",
+		__func__, va, *curr_iova, *curr_iova_size);
+
+	return va;
+}
+
+static unsigned long uaudio_iommu_map(enum mem_type mtype, phys_addr_t pa,
+		size_t size)
+{
+	unsigned long va = 0;
+	bool map = true;
+	int ret;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		va = IOVA_BASE;
+		/* er already mapped */
+		if (uaudio_qdev->er_phys_addr == pa)
+			map = false;
+		break;
+	case MEM_DCBA:
+		va = uaudio_get_iova(&uaudio_qdev->curr_dcba_iova,
+		&uaudio_qdev->dcba_iova_size, &uaudio_qdev->dcba_list, size);
+		break;
+	case MEM_XFER_RING:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
+		&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
+		size);
+		break;
+	case MEM_XFER_BUF:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova,
+		&uaudio_qdev->xfer_buf_iova_size, &uaudio_qdev->xfer_buf_list,
+		size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+	}
+
+	if (!va)
+		map = false;
+
+	if (!map)
+		goto done;
+
+	pr_debug("%s: map pa %pa to iova %lu for memtype %d\n", __func__, &pa,
+		va, mtype);
+	ret = iommu_map(uaudio_qdev->domain, va, pa, size,
+		IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+	if (ret)
+		pr_err("%s:failed to map pa:%pa iova:%lu memtype:%d ret:%d\n",
+			__func__, &pa, va, mtype, ret);
+done:
+	return va;
+}
+
+static void uaudio_put_iova(unsigned long va, size_t size, struct list_head
+	*head, size_t *curr_iova_size)
+{
+	struct iova_info *info;
+	size_t tmp_size = size;
+	bool found = false;
+
+	list_for_each_entry(info, head, list) {
+		if (info->start_iova == va) {
+			if (!info->in_use) {
+				pr_err("%s: va %lu is not in use\n", __func__,
+					va);
+				return;
+			}
+			found = true;
+			info->in_use = false;
+			if (info->size == size)
+				goto done;
+		}
+
+		if (found && tmp_size >= info->size) {
+			info->in_use = false;
+			tmp_size -= info->size;
+			if (!tmp_size)
+				goto done;
+		}
+	}
+
+	if (!found) {
+		pr_err("%s: unable to find the va %lu\n", __func__, va);
+		return;
+	}
+done:
+	*curr_iova_size += size;
+	pr_debug("%s: curr_iova_size %zu\n", __func__, *curr_iova_size);
+}
+
+static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long va,
+	size_t size)
+{
+	size_t umap_size;
+	bool unmap = true;
+
+	if (!va || !size)
+		return;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		if (uaudio_qdev->er_phys_addr)
+			uaudio_qdev->er_phys_addr = 0;
+		else
+			unmap = false;
+		break;
+	case MEM_DCBA:
+		uaudio_put_iova(va, size, &uaudio_qdev->dcba_list,
+		&uaudio_qdev->dcba_iova_size);
+		break;
+	case MEM_XFER_RING:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
+		&uaudio_qdev->xfer_ring_iova_size);
+		break;
+	case MEM_XFER_BUF:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_buf_list,
+		&uaudio_qdev->xfer_buf_iova_size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+		unmap = false;
+	}
+
+	if (!unmap)
+		return;
+
+	pr_debug("%s: unmap iova %lu for memtype %d\n", __func__, va, mtype);
+
+	umap_size = iommu_unmap(uaudio_qdev->domain, va, size);
+	if (umap_size != size)
+		pr_err("%s: unmapped size %zu for iova %lu\n", __func__,
+		umap_size, va);
+}
+
+static int prepare_qmi_response(struct snd_usb_substream *subs,
+		struct qmi_uaudio_stream_req_msg_v01 *req_msg,
+		struct qmi_uaudio_stream_resp_msg_v01 *resp, int info_idx)
+{
+	struct usb_interface *iface;
+	struct usb_host_interface *alts;
+	struct usb_interface_descriptor *altsd;
+	struct usb_host_endpoint *ep;
+	struct uac_format_type_i_continuous_descriptor *fmt;
+	struct uac_format_type_i_discrete_descriptor *fmt_v1;
+	struct uac_format_type_i_ext_descriptor *fmt_v2;
+	struct uac1_as_header_descriptor *as;
+	int ret = -ENODEV;
+	int protocol, card_num, pcm_dev_num;
+	void *hdr_ptr;
+	u8 *xfer_buf;
+	u32 len, mult, remainder, xfer_buf_len;
+	unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
+	xfer_buf_va = 0;
+	phys_addr_t xhci_pa, xfer_buf_pa;
+
+	iface = usb_ifnum_to_if(subs->dev, subs->interface);
+	if (!iface) {
+		pr_err("%s: interface # %d does not exist\n", __func__,
+			subs->interface);
+		goto err;
+	}
+
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+	xfer_buf_len = req_msg->xfer_buff_size;
+
+	alts = &iface->altsetting[subs->altset_idx];
+	altsd = get_iface_desc(alts);
+	protocol = altsd->bInterfaceProtocol;
+
+	/* get format type */
+	fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_FORMAT_TYPE);
+	if (!fmt) {
+		pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
+			subs->interface, subs->altset_idx);
+		goto err;
+	}
+
+	if (!uadev[card_num].ctrl_intf) {
+		pr_err("%s: audio ctrl intf info not cached\n", __func__);
+		goto err;
+	}
+
+	hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
+					uadev[card_num].ctrl_intf->extralen,
+					NULL, UAC_HEADER);
+	if (!hdr_ptr) {
+		pr_err("%s: no UAC_HEADER desc\n", __func__);
+		goto err;
+	}
+
+	if (protocol == UAC_VERSION_1) {
+		as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_AS_GENERAL);
+		if (!as) {
+			pr_err("%s: %u:%d : no UAC_AS_GENERAL desc\n", __func__,
+				subs->interface, subs->altset_idx);
+			goto err;
+		}
+		resp->data_path_delay = as->bDelay;
+		resp->data_path_delay_valid = 1;
+		fmt_v1 = (struct uac_format_type_i_discrete_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v1->bSubframeSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac1_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_2) {
+		fmt_v2 = (struct uac_format_type_i_ext_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v2->bSubslotSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else {
+		pr_err("%s: unknown protocol version %x\n", __func__, protocol);
+		goto err;
+	}
+
+	resp->slot_id = subs->dev->slot_id;
+	resp->slot_id_valid = 1;
+
+	memcpy(&resp->std_as_opr_intf_desc, &alts->desc, sizeof(alts->desc));
+	resp->std_as_opr_intf_desc_valid = 1;
+
+	ep = usb_pipe_endpoint(subs->dev, subs->data_endpoint->pipe);
+	if (!ep) {
+		pr_err("%s: data ep # %d context is null\n", __func__,
+			subs->data_endpoint->ep_num);
+		goto err;
+	}
+	memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
+	resp->std_as_data_ep_desc_valid = 1;
+
+	xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get data ep ring dma address\n", __func__);
+		goto err;
+	}
+
+	resp->xhci_mem_info.tr_data.pa = xhci_pa;
+
+	if (subs->sync_endpoint) {
+		ep = usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe);
+		if (!ep) {
+			pr_debug("%s: implicit fb on data ep\n", __func__);
+			goto skip_sync_ep;
+		}
+		memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
+		resp->std_as_sync_ep_desc_valid = 1;
+
+		xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+		if (!xhci_pa) {
+			pr_err("%s:failed to get sync ep ring dma address\n",
+				__func__);
+			goto err;
+		}
+		resp->xhci_mem_info.tr_sync.pa = xhci_pa;
+	}
+
+skip_sync_ep:
+	resp->interrupter_num = uaudio_qdev->intr_num;
+	resp->interrupter_num_valid = 1;
+
+	/*  map xhci data structures PA memory to iova */
+
+	/* event ring */
+	ret = usb_sec_event_ring_setup(subs->dev, resp->interrupter_num);
+	if (ret) {
+		pr_err("%s: failed to setup sec event ring ret %d\n", __func__,
+			ret);
+		goto err;
+	}
+	xhci_pa = usb_get_sec_event_ring_dma_addr(subs->dev,
+			resp->interrupter_num);
+	if (!xhci_pa) {
+		pr_err("%s: failed to get sec event ring dma address\n",
+		__func__);
+		goto err;
+	}
+
+	va = uaudio_iommu_map(MEM_EVENT_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto err;
+
+	resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.evt_ring.pa = xhci_pa;
+	resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
+	uaudio_qdev->er_phys_addr = xhci_pa;
+
+	/* dcba */
+	xhci_pa = usb_get_dcba_dma_addr(subs->dev);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get dcba dma address\n", __func__);
+		goto unmap_er;
+	}
+
+	if (!uadev[card_num].dcba_iova) { /* mappped per usb device */
+		va = uaudio_iommu_map(MEM_DCBA, xhci_pa, PAGE_SIZE);
+		if (!va)
+			goto unmap_er;
+
+		uadev[card_num].dcba_iova = va;
+		uadev[card_num].dcba_size = PAGE_SIZE;
+	}
+
+	dcba_va = uadev[card_num].dcba_iova;
+	resp->xhci_mem_info.dcba.va = PREPEND_SID_TO_IOVA(dcba_va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.dcba.pa = xhci_pa;
+	resp->xhci_mem_info.dcba.size = PAGE_SIZE;
+
+	/* data transfer ring */
+	xhci_pa = resp->xhci_mem_info.tr_data.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_dcba;
+
+	tr_data_va = va;
+	resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_data.size = PAGE_SIZE;
+
+	/* sync transfer ring */
+	if (!resp->xhci_mem_info.tr_sync.pa)
+		goto skip_sync;
+
+	xhci_pa = resp->xhci_mem_info.tr_sync.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_data;
+
+	tr_sync_va = va;
+	resp->xhci_mem_info.tr_sync.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_sync.size = PAGE_SIZE;
+
+skip_sync:
+	/* xfer buffer, multiple of 4K only */
+	if (!xfer_buf_len)
+		xfer_buf_len = PAGE_SIZE;
+
+	mult = xfer_buf_len / PAGE_SIZE;
+	remainder = xfer_buf_len % PAGE_SIZE;
+	len = mult * PAGE_SIZE;
+	len += remainder ? PAGE_SIZE : 0;
+
+	if (len > MAX_XFER_BUFF_LEN) {
+		pr_err("%s: req buf len %d > max buf len %lu, setting %lu\n",
+		__func__, len, MAX_XFER_BUFF_LEN, MAX_XFER_BUFF_LEN);
+		len = MAX_XFER_BUFF_LEN;
+	}
+
+	xfer_buf = usb_alloc_coherent(subs->dev, len, GFP_KERNEL, &xfer_buf_pa);
+	if (!xfer_buf)
+		goto unmap_sync;
+
+	resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa;
+	resp->xhci_mem_info.xfer_buff.size = len;
+
+	va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len);
+	if (!va)
+		goto unmap_sync;
+
+	xfer_buf_va = va;
+	resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+
+	resp->xhci_mem_info_valid = 1;
+
+	if (!atomic_read(&uadev[card_num].in_use)) {
+		kref_init(&uadev[card_num].kref);
+		init_waitqueue_head(&uadev[card_num].disconnect_wq);
+		uadev[card_num].num_intf =
+			subs->dev->config->desc.bNumInterfaces;
+		uadev[card_num].info =
+			kzalloc(sizeof(struct intf_info) *
+			uadev[card_num].num_intf, GFP_KERNEL);
+		if (!uadev[card_num].info) {
+			ret = -ENOMEM;
+			goto unmap_xfer_buf;
+		}
+		uadev[card_num].udev = subs->dev;
+		atomic_set(&uadev[card_num].in_use, 1);
+	} else {
+		kref_get(&uadev[card_num].kref);
+	}
+
+	uadev[card_num].card_num = card_num;
+
+	/* cache intf specific info to use it for unmap and free xfer buf */
+	uadev[card_num].info[info_idx].data_xfer_ring_va = tr_data_va;
+	uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].sync_xfer_ring_va = tr_sync_va;
+	uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].xfer_buf_va = xfer_buf_va;
+	uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
+	uadev[card_num].info[info_idx].xfer_buf_size = len;
+	uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
+	uadev[card_num].info[info_idx].pcm_card_num = card_num;
+	uadev[card_num].info[info_idx].pcm_dev_num = pcm_dev_num;
+	uadev[card_num].info[info_idx].direction = subs->direction;
+	uadev[card_num].info[info_idx].intf_num = subs->interface;
+	uadev[card_num].info[info_idx].in_use = true;
+
+	set_bit(card_num, &uaudio_qdev->card_slot);
+
+	return 0;
+
+unmap_xfer_buf:
+	uaudio_iommu_unmap(MEM_XFER_BUF, xfer_buf_va, len);
+unmap_sync:
+	usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_pa);
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
+unmap_data:
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
+unmap_dcba:
+	uaudio_iommu_unmap(MEM_DCBA, dcba_va, PAGE_SIZE);
+unmap_er:
+	uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+err:
+	return ret;
+}
+
+static void uaudio_dev_intf_cleanup(struct usb_device *udev,
+	struct intf_info *info)
+{
+	uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va,
+		info->data_xfer_ring_size);
+	info->data_xfer_ring_va = 0;
+	info->data_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_RING, info->sync_xfer_ring_va,
+		info->sync_xfer_ring_size);
+	info->sync_xfer_ring_va = 0;
+	info->sync_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_BUF, info->xfer_buf_va,
+		info->xfer_buf_size);
+	info->xfer_buf_va = 0;
+
+	usb_free_coherent(udev, info->xfer_buf_size,
+		info->xfer_buf, info->xfer_buf_pa);
+	info->xfer_buf_size = 0;
+	info->xfer_buf = NULL;
+	info->xfer_buf_pa = 0;
+
+	info->in_use = false;
+}
+
+static void uaudio_dev_cleanup(struct uaudio_dev *dev)
+{
+	int if_idx;
+
+	/* free xfer buffer and unmap xfer ring and buf per interface */
+	for (if_idx = 0; if_idx < dev->num_intf; if_idx++) {
+		if (!dev->info[if_idx].in_use)
+			continue;
+		uaudio_dev_intf_cleanup(dev->udev, &dev->info[if_idx]);
+		pr_debug("%s: release resources: intf# %d card# %d\n", __func__,
+			dev->info[if_idx].intf_num, dev->card_num);
+	}
+
+	/* iommu_unmap dcba iova for a usb device */
+	uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
+
+	dev->dcba_iova = 0;
+	dev->dcba_size = 0;
+	dev->num_intf = 0;
+
+	/* free interface info */
+	kfree(dev->info);
+	dev->info = NULL;
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	dev->udev = NULL;
+}
+
+static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
+{
+	int ret;
+	struct uaudio_dev *dev;
+	int card_num = chip->card_num;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct qmi_uaudio_stream_ind_msg_v01 disconnect_ind = {0};
+
+	pr_debug("%s: for card# %d\n", __func__, card_num);
+
+	if (card_num >=  SNDRV_CARDS) {
+		pr_err("%s: invalid card number\n", __func__);
+		return;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	dev = &uadev[card_num];
+
+	/* clean up */
+	if (!dev->udev) {
+		pr_debug("%s: no clean up required\n", __func__);
+		goto done;
+	}
+
+	if (atomic_read(&dev->in_use)) {
+		mutex_unlock(&chip->dev_lock);
+
+		pr_debug("%s: sending qmi indication disconnect\n", __func__);
+		disconnect_ind.dev_event = USB_AUDIO_DEV_DISCONNECT_V01;
+		disconnect_ind.slot_id = dev->udev->slot_id;
+		ret = qmi_send_ind(svc->uaudio_svc_hdl, svc->curr_conn,
+				&uaudio_stream_ind_desc, &disconnect_ind,
+				sizeof(disconnect_ind));
+		if (ret < 0) {
+			pr_err("%s: qmi send failed wiht err: %d\n",
+					__func__, ret);
+			return;
+		}
+
+		ret = wait_event_interruptible(dev->disconnect_wq,
+				!atomic_read(&dev->in_use));
+		if (ret < 0) {
+			pr_debug("%s: failed with ret %d\n", __func__, ret);
+			return;
+		}
+		mutex_lock(&chip->dev_lock);
+	}
+
+	uaudio_dev_cleanup(dev);
+done:
+	mutex_unlock(&chip->dev_lock);
+}
+
+static void uaudio_dev_release(struct kref *kref)
+{
+	struct uaudio_dev *dev = container_of(kref, struct uaudio_dev, kref);
+
+	pr_debug("%s for dev %pK\n", __func__, dev);
+
+	atomic_set(&dev->in_use, 0);
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	wake_up(&dev->disconnect_wq);
+}
+
+/* maps audio format received over QMI to asound.h based pcm format */
+static int map_pcm_format(unsigned int fmt_received)
+{
+	switch (fmt_received) {
+	case USB_QMI_PCM_FORMAT_S8:
+		return SNDRV_PCM_FORMAT_S8;
+	case USB_QMI_PCM_FORMAT_U8:
+		return SNDRV_PCM_FORMAT_U8;
+	case USB_QMI_PCM_FORMAT_S16_LE:
+		return SNDRV_PCM_FORMAT_S16_LE;
+	case USB_QMI_PCM_FORMAT_S16_BE:
+		return SNDRV_PCM_FORMAT_S16_BE;
+	case USB_QMI_PCM_FORMAT_U16_LE:
+		return SNDRV_PCM_FORMAT_U16_LE;
+	case USB_QMI_PCM_FORMAT_U16_BE:
+		return SNDRV_PCM_FORMAT_U16_BE;
+	case USB_QMI_PCM_FORMAT_S24_LE:
+		return SNDRV_PCM_FORMAT_S24_LE;
+	case USB_QMI_PCM_FORMAT_S24_BE:
+		return SNDRV_PCM_FORMAT_S24_BE;
+	case USB_QMI_PCM_FORMAT_U24_LE:
+		return SNDRV_PCM_FORMAT_U24_LE;
+	case USB_QMI_PCM_FORMAT_U24_BE:
+		return SNDRV_PCM_FORMAT_U24_BE;
+	case USB_QMI_PCM_FORMAT_S24_3LE:
+		return SNDRV_PCM_FORMAT_S24_3LE;
+	case USB_QMI_PCM_FORMAT_S24_3BE:
+		return SNDRV_PCM_FORMAT_S24_3BE;
+	case USB_QMI_PCM_FORMAT_U24_3LE:
+		return SNDRV_PCM_FORMAT_U24_3LE;
+	case USB_QMI_PCM_FORMAT_U24_3BE:
+		return SNDRV_PCM_FORMAT_U24_3BE;
+	case USB_QMI_PCM_FORMAT_S32_LE:
+		return SNDRV_PCM_FORMAT_S32_LE;
+	case USB_QMI_PCM_FORMAT_S32_BE:
+		return SNDRV_PCM_FORMAT_S32_BE;
+	case USB_QMI_PCM_FORMAT_U32_LE:
+		return SNDRV_PCM_FORMAT_U32_LE;
+	case USB_QMI_PCM_FORMAT_U32_BE:
+		return SNDRV_PCM_FORMAT_U32_BE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int info_idx_from_ifnum(int card_num, int intf_num, bool enable)
+{
+	int i;
+
+	/*
+	 * default index 0 is used when info is allocated upon
+	 * first enable audio stream req for a pcm device
+	 */
+	if (enable && !uadev[card_num].info)
+		return 0;
+
+	for (i = 0; i < uadev[card_num].num_intf; i++) {
+		if (enable && !uadev[card_num].info[i].in_use)
+			return i;
+		else if (!enable &&
+				uadev[card_num].info[i].intf_num == intf_num)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int handle_uaudio_stream_req(void *req_h, void *req)
+{
+	struct qmi_uaudio_stream_req_msg_v01 *req_msg;
+	struct qmi_uaudio_stream_resp_msg_v01 resp = {{0}, 0};
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct intf_info *info;
+	int pcm_format;
+	u8 pcm_card_num, pcm_dev_num, direction;
+	int info_idx = -EINVAL, ret = 0;
+
+	req_msg = (struct qmi_uaudio_stream_req_msg_v01 *)req;
+
+	if (!req_msg->audio_format_valid || !req_msg->bit_rate_valid ||
+	!req_msg->number_of_ch_valid || !req_msg->xfer_buff_size_valid) {
+		pr_err("%s: invalid request msg\n", __func__);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	direction = req_msg->usb_token & SND_PCM_STREAM_DIRECTION;
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	pcm_card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+
+	pr_debug("%s:card#:%d dev#:%d dir:%d en:%d fmt:%d rate:%d #ch:%d\n",
+		__func__, pcm_card_num, pcm_dev_num, direction, req_msg->enable,
+		req_msg->audio_format, req_msg->bit_rate,
+		req_msg->number_of_ch);
+
+	if (pcm_card_num >= SNDRV_CARDS) {
+		pr_err("%s: invalid card # %u", __func__, pcm_card_num);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	pcm_format = map_pcm_format(req_msg->audio_format);
+	if (pcm_format == -EINVAL) {
+		pr_err("%s: unsupported pcm format received %d\n",
+		__func__, req_msg->audio_format);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	subs = find_snd_usb_substream(pcm_card_num, pcm_dev_num, direction,
+					&chip, uaudio_disconnect_cb);
+	if (!subs || !chip || atomic_read(&chip->shutdown)) {
+		pr_err("%s: can't find substream for card# %u, dev# %u dir%u\n",
+			__func__, pcm_card_num, pcm_dev_num, direction);
+		ret = -ENODEV;
+		goto response;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	info_idx = info_idx_from_ifnum(pcm_card_num, subs->interface,
+		req_msg->enable);
+	if (atomic_read(&chip->shutdown) || !subs->stream || !subs->stream->pcm
+			|| !subs->stream->chip) {
+		ret = -ENODEV;
+		mutex_unlock(&chip->dev_lock);
+		goto response;
+	}
+
+	if (req_msg->enable) {
+		if (info_idx < 0) {
+			pr_err("%s interface# %d already in use card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			ret = -EBUSY;
+			mutex_unlock(&chip->dev_lock);
+			goto response;
+		}
+	}
+
+	subs->pcm_format = pcm_format;
+	subs->channels = req_msg->number_of_ch;
+	subs->cur_rate = req_msg->bit_rate;
+	uadev[pcm_card_num].ctrl_intf = chip->ctrl_intf;
+
+	ret = snd_usb_enable_audio_stream(subs, req_msg->enable);
+
+	if (!ret && req_msg->enable)
+		ret = prepare_qmi_response(subs, req_msg, &resp, info_idx);
+
+	mutex_unlock(&chip->dev_lock);
+
+response:
+	if (!req_msg->enable && ret != -EINVAL) {
+		if (info_idx >= 0) {
+			mutex_lock(&chip->dev_lock);
+			info = &uadev[pcm_card_num].info[info_idx];
+			uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
+			pr_debug("%s:release resources: intf# %d card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			mutex_unlock(&chip->dev_lock);
+		}
+		if (atomic_read(&uadev[pcm_card_num].in_use))
+			kref_put(&uadev[pcm_card_num].kref,
+					uaudio_dev_release);
+	}
+
+	resp.usb_token = req_msg->usb_token;
+	resp.usb_token_valid = 1;
+	resp.internal_status = ret;
+	resp.internal_status_valid = 1;
+	resp.status = ret ? USB_AUDIO_STREAM_REQ_FAILURE_V01 : ret;
+	resp.status_valid = 1;
+	ret = qmi_send_resp_from_cb(svc->uaudio_svc_hdl, svc->curr_conn, req_h,
+			&uaudio_stream_resp_desc, &resp, sizeof(resp));
+
+	svc->t_resp_sent = ktime_get();
+
+	pr_debug("%s: t_resp sent - t_req recvd (in ms) %lld\n", __func__,
+		ktime_to_ms(ktime_sub(svc->t_resp_sent, svc->t_request_recvd)));
+
+	return ret;
+}
+
+static int uaudio_qmi_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || !conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+	if (svc->curr_conn) {
+		pr_err("%s: Service is busy\n", __func__);
+		return -ECONNREFUSED;
+	}
+	svc->curr_conn = conn_h;
+	return 0;
+}
+
+static void uaudio_qmi_disconnect_work(struct work_struct *w)
+{
+	struct intf_info *info;
+	int idx, if_idx;
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+
+	/* find all active intf for set alt 0 and cleanup usb audio dev */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!atomic_read(&uadev[idx].in_use))
+			continue;
+
+		for (if_idx = 0; if_idx < uadev[idx].num_intf; if_idx++) {
+			if (!uadev[idx].info || !uadev[idx].info[if_idx].in_use)
+				continue;
+			info = &uadev[idx].info[if_idx];
+			subs = find_snd_usb_substream(info->pcm_card_num,
+							info->pcm_dev_num,
+							info->direction,
+							&chip,
+							uaudio_disconnect_cb);
+			if (!subs || !chip || atomic_read(&chip->shutdown)) {
+				pr_debug("%s:no subs for c#%u, dev#%u dir%u\n",
+					__func__, info->pcm_card_num,
+					info->pcm_dev_num,
+					info->direction);
+				continue;
+			}
+			snd_usb_enable_audio_stream(subs, 0);
+		}
+		atomic_set(&uadev[idx].in_use, 0);
+		mutex_lock(&chip->dev_lock);
+		uaudio_dev_cleanup(&uadev[idx]);
+		mutex_unlock(&chip->dev_lock);
+	}
+}
+
+static int uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	svc->curr_conn = NULL;
+	queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
+
+	return 0;
+}
+
+static int uaudio_qmi_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		ret = handle_uaudio_stream_req(req_h, req);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static int uaudio_qmi_svc_req_desc_cb(unsigned int msg_id,
+	struct msg_desc **req_desc)
+{
+	int ret;
+
+	pr_debug("%s: msg_id %d\n", __func__, msg_id);
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		*req_desc = &uaudio_stream_req_desc;
+		ret = sizeof(struct qmi_uaudio_stream_req_msg_v01);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static void uaudio_qmi_svc_recv_msg(struct work_struct *w)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = container_of(w, struct uaudio_qmi_svc,
+		recv_msg_work);
+
+	do {
+		pr_debug("%s: Notified about a Receive Event", __func__);
+	} while ((ret = qmi_recv_msg(svc->uaudio_svc_hdl)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", __func__);
+}
+
+static void uaudio_qmi_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	pr_debug("%s: event %d", __func__, event);
+
+	svc->t_request_recvd = ktime_get();
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_work(svc->uaudio_wq, &svc->recv_msg_work);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options uaudio_svc_ops_options = {
+	.version = 1,
+	.service_id = UAUDIO_STREAM_SERVICE_ID_V01,
+	.service_vers = UAUDIO_STREAM_SERVICE_VERS_V01,
+	.connect_cb = uaudio_qmi_svc_connect_cb,
+	.disconnect_cb = uaudio_qmi_svc_disconnect_cb,
+	.req_desc_cb = uaudio_qmi_svc_req_desc_cb,
+	.req_cb = uaudio_qmi_svc_req_cb,
+};
+
+static int uaudio_qmi_plat_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node = pdev->dev.of_node;
+
+	uaudio_qdev = devm_kzalloc(&pdev->dev, sizeof(struct uaudio_qmi_dev),
+		GFP_KERNEL);
+	if (!uaudio_qdev)
+		return -ENOMEM;
+
+	uaudio_qdev->dev = &pdev->dev;
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-stream-id",
+				&uaudio_qdev->sid);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read sid.\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-intr-num",
+				&uaudio_qdev->intr_num);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read intr num.\n");
+		return -ENODEV;
+	}
+
+	uaudio_qdev->domain = iommu_domain_alloc(pdev->dev.bus);
+	if (!uaudio_qdev->domain) {
+		dev_err(&pdev->dev, "failed to allocate iommu domain\n");
+		return -ENODEV;
+	}
+
+	/* attach to external processor iommu */
+	ret = iommu_attach_device(uaudio_qdev->domain, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to attach device ret = %d\n", ret);
+		goto free_domain;
+	}
+
+	/* initialize dcba, xfer ring and xfer buf iova list */
+	INIT_LIST_HEAD(&uaudio_qdev->dcba_list);
+	uaudio_qdev->curr_dcba_iova = IOVA_DCBA_BASE;
+	uaudio_qdev->dcba_iova_size = SNDRV_CARDS * PAGE_SIZE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
+	uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
+	uaudio_qdev->xfer_ring_iova_size =
+			IOVA_XFER_RING_MAX - IOVA_XFER_RING_BASE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_buf_list);
+	uaudio_qdev->curr_xfer_buf_iova = IOVA_XFER_BUF_BASE;
+	uaudio_qdev->xfer_buf_iova_size =
+		IOVA_XFER_BUF_MAX - IOVA_XFER_BUF_BASE;
+
+	return 0;
+
+free_domain:
+	iommu_domain_free(uaudio_qdev->domain);
+	return ret;
+}
+
+static int uaudio_qmi_plat_remove(struct platform_device *pdev)
+{
+	iommu_detach_device(uaudio_qdev->domain, &pdev->dev);
+	iommu_domain_free(uaudio_qdev->domain);
+	uaudio_qdev->domain = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id of_uaudio_matach[] = {
+	{
+		.compatible = "qcom,usb-audio-qmi-dev",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_uaudio_matach);
+
+static struct platform_driver uaudio_qmi_driver = {
+	.probe		= uaudio_qmi_plat_probe,
+	.remove		= uaudio_qmi_plat_remove,
+	.driver		= {
+		.name	= "uaudio-qmi",
+		.of_match_table	= of_uaudio_matach,
+	},
+};
+
+static int uaudio_qmi_svc_init(void)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc;
+
+	svc = kzalloc(sizeof(struct uaudio_qmi_svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->uaudio_wq = create_singlethread_workqueue("uaudio_svc");
+	if (!svc->uaudio_wq) {
+		ret = -ENOMEM;
+		goto free_svc;
+	}
+
+	svc->uaudio_svc_hdl = qmi_handle_create(uaudio_qmi_svc_ntfy, NULL);
+	if (!svc->uaudio_svc_hdl) {
+		pr_err("%s: Error creating svc_hdl\n", __func__);
+		ret = -EFAULT;
+		goto destroy_uaudio_wq;
+	}
+
+	ret = qmi_svc_register(svc->uaudio_svc_hdl, &uaudio_svc_ops_options);
+	if (ret < 0) {
+		pr_err("%s:Error registering uaudio svc %d\n", __func__, ret);
+		goto destroy_svc_handle;
+	}
+
+	INIT_WORK(&svc->recv_msg_work, uaudio_qmi_svc_recv_msg);
+	INIT_WORK(&svc->qmi_disconnect_work, uaudio_qmi_disconnect_work);
+
+	uaudio_svc = svc;
+
+	return 0;
+
+destroy_svc_handle:
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+destroy_uaudio_wq:
+	destroy_workqueue(svc->uaudio_wq);
+free_svc:
+	kfree(svc);
+	return ret;
+}
+
+static void uaudio_qmi_svc_exit(void)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	qmi_svc_unregister(svc->uaudio_svc_hdl);
+	flush_workqueue(svc->uaudio_wq);
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+	destroy_workqueue(svc->uaudio_wq);
+	kfree(svc);
+	uaudio_svc = NULL;
+}
+
+static int __init uaudio_qmi_plat_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&uaudio_qmi_driver);
+	if (ret)
+		return ret;
+
+	return uaudio_qmi_svc_init();
+}
+
+static void __exit uaudio_qmi_plat_exit(void)
+{
+	uaudio_qmi_svc_exit();
+	platform_driver_unregister(&uaudio_qmi_driver);
+}
+
+module_init(uaudio_qmi_plat_init);
+module_exit(uaudio_qmi_plat_exit);
+
+MODULE_DESCRIPTION("USB AUDIO QMI Service Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
new file mode 100644
index 0000000..fef7505
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -0,0 +1,833 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "usb_audio_qmi_v01.h"
+
+static struct elem_info mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   va),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   pa),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info apps_mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   evt_ring),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_data),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_sync),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   xfer_buff),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   dcba),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_endpoint_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bEndpointAddress),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bmAttributes),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   wMaxPacketSize),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bInterval),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bRefresh),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bSynchAddress),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_interface_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceNumber),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bAlternateSetting),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bNumEndpoints),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceSubClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceProtocol),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   iInterface),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   enable),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_stream_status_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(
+				enum usb_audio_device_indication_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   dev_event),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
new file mode 100644
index 0000000..83a966c
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef USB_QMI_V01_H
+#define USB_QMI_V01_H
+
+#define UAUDIO_STREAM_SERVICE_ID_V01 0x41D
+#define UAUDIO_STREAM_SERVICE_VERS_V01 0x01
+
+#define QMI_UAUDIO_STREAM_RESP_V01 0x0001
+#define QMI_UAUDIO_STREAM_REQ_V01 0x0001
+#define QMI_UADUIO_STREAM_IND_V01 0x0001
+
+
+struct mem_info_v01 {
+	uint64_t va;
+	uint64_t pa;
+	uint32_t size;
+};
+
+struct apps_mem_info_v01 {
+	struct mem_info_v01 evt_ring;
+	struct mem_info_v01 tr_data;
+	struct mem_info_v01 tr_sync;
+	struct mem_info_v01 xfer_buff;
+	struct mem_info_v01 dcba;
+};
+
+struct usb_endpoint_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bEndpointAddress;
+	uint8_t bmAttributes;
+	uint16_t wMaxPacketSize;
+	uint8_t bInterval;
+	uint8_t bRefresh;
+	uint8_t bSynchAddress;
+};
+
+struct usb_interface_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bInterfaceNumber;
+	uint8_t bAlternateSetting;
+	uint8_t bNumEndpoints;
+	uint8_t bInterfaceClass;
+	uint8_t bInterfaceSubClass;
+	uint8_t bInterfaceProtocol;
+	uint8_t iInterface;
+};
+
+enum usb_audio_stream_status_enum_v01 {
+	USB_AUDIO_STREAM_STATUS_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_STREAM_REQ_SUCCESS_V01 = 0,
+	USB_AUDIO_STREAM_REQ_FAILURE_V01 = 1,
+	USB_AUDIO_STREAM_REQ_FAILURE_NOT_FOUND_V01 = 2,
+	USB_AUDIO_STREAM_REQ_FAILURE_INVALID_PARAM_V01 = 3,
+	USB_AUDIO_STREAM_REQ_FAILURE_MEMALLOC_V01 = 4,
+	USB_AUDIO_STREAM_STATUS_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum usb_audio_device_indication_enum_v01 {
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEV_CONNECT_V01 = 0,
+	USB_AUDIO_DEV_DISCONNECT_V01 = 1,
+	USB_AUDIO_DEV_SUSPEND_V01 = 2,
+	USB_AUDIO_DEV_RESUME_V01 = 3,
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_uaudio_stream_req_msg_v01 {
+	uint8_t enable;
+	uint32_t usb_token;
+	uint8_t audio_format_valid;
+	uint32_t audio_format;
+	uint8_t number_of_ch_valid;
+	uint32_t number_of_ch;
+	uint8_t bit_rate_valid;
+	uint32_t bit_rate;
+	uint8_t xfer_buff_size_valid;
+	uint32_t xfer_buff_size;
+};
+#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 39
+extern struct elem_info qmi_uaudio_stream_req_msg_v01_ei[];
+
+struct qmi_uaudio_stream_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t status_valid;
+	enum usb_audio_stream_status_enum_v01 status;
+	uint8_t internal_status_valid;
+	uint32_t internal_status;
+	uint8_t slot_id_valid;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+};
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
+extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
+
+struct qmi_uaudio_stream_ind_msg_v01 {
+	enum usb_audio_device_indication_enum_v01 dev_event;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+};
+#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 177
+extern struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[];
+
+#endif
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 4d5c89a..93c4bed 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -61,6 +61,10 @@
 	bool autoclock;			/* from the 'autoclock' module param */
 
 	struct usb_host_interface *ctrl_intf;	/* the audio control interface */
+
+	struct mutex dev_lock;	/* to protect any race with disconnect */
+	int card_num;	/* cache pcm card number to use upon disconnect */
+	void (*disconnect_cb)(struct snd_usb_audio *chip);
 };
 
 #define usb_audio_err(chip, fmt, args...) \
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ebe1b9f..85814d1 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -187,21 +187,37 @@
 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 				    bool new_active_state)
 {
+	struct kvm_vcpu *requester_vcpu;
 	spin_lock(&irq->irq_lock);
+
+	/*
+	 * The vcpu parameter here can mean multiple things depending on how
+	 * this function is called; when handling a trap from the kernel it
+	 * depends on the GIC version, and these functions are also called as
+	 * part of save/restore from userspace.
+	 *
+	 * Therefore, we have to figure out the requester in a reliable way.
+	 *
+	 * When accessing VGIC state from user space, the requester_vcpu is
+	 * NULL, which is fine, because we guarantee that no VCPUs are running
+	 * when accessing VGIC state from user space so irq->vcpu->cpu is
+	 * always -1.
+	 */
+	requester_vcpu = kvm_arm_get_running_vcpu();
+
 	/*
 	 * If this virtual IRQ was written into a list register, we
 	 * have to make sure the CPU that runs the VCPU thread has
-	 * synced back LR state to the struct vgic_irq.  We can only
-	 * know this for sure, when either this irq is not assigned to
-	 * anyone's AP list anymore, or the VCPU thread is not
-	 * running on any CPUs.
+	 * synced back the LR state to the struct vgic_irq.
 	 *
-	 * In the opposite case, we know the VCPU thread may be on its
-	 * way back from the guest and still has to sync back this
-	 * IRQ, so we release and re-acquire the spin_lock to let the
-	 * other thread sync back the IRQ.
+	 * As long as the conditions below are true, we know the VCPU thread
+	 * may be on its way back from the guest (we kicked the VCPU thread in
+	 * vgic_change_active_prepare)  and still has to sync back this IRQ,
+	 * so we release and re-acquire the spin_lock to let the other thread
+	 * sync back the IRQ.
 	 */
 	while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+	       irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
 	       irq->vcpu->cpu != -1) /* VCPU thread is running */
 		cond_resched_lock(&irq->irq_lock);