Merge "ASoC: qdsp6v2: prevent null pointer dereference for _vol_cmds" into msm-4.9
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 9fa070c..d534246 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -176,3 +176,22 @@
 DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus
 attributes (i.e cacheablilty) provided by the client device. Some hardware
 may be designed to use the original attributes instead.
+
+DMA_ATTR_FORCE_COHERENT
+-----------------------
+
+When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA
+attribute can be used to force a buffer to be mapped as IO coherent.
+This DMA attribute is only currently supported for arm64 stage 1 IOMMU
+mappings.
+
+DMA_ATTR_FORCE_NON_COHERENT
+---------------------------
+
+When passed to a DMA map call the DMA_ATTR_FORCE_NON_COHERENT DMA
+attribute can be used to force a buffer to not be mapped as IO
+coherent.
+The DMA_ATTR_FORCE_NON_COHERENT DMA attribute overrides the buffer IO
+coherency configuration set by making the device IO coherent.
+This DMA attribute is only currently supported for arm64 stage 1 IOMMU
+mappings.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 405da11..d11af52 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -42,24 +42,26 @@
 will be updated when new workarounds are committed and backported to
 stable kernels.
 
-| Implementor    | Component       | Erratum ID      | Kconfig                 |
-+----------------+-----------------+-----------------+-------------------------+
-| ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319    |
-| ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319    |
-| ARM            | Cortex-A53      | #824069         | ARM64_ERRATUM_824069    |
-| ARM            | Cortex-A53      | #819472         | ARM64_ERRATUM_819472    |
-| ARM            | Cortex-A53      | #845719         | ARM64_ERRATUM_845719    |
-| ARM            | Cortex-A53      | #843419         | ARM64_ERRATUM_843419    |
-| ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075    |
-| ARM            | Cortex-A57      | #852523         | N/A                     |
-| ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220    |
-| ARM            | Cortex-A72      | #853709         | N/A                     |
-| ARM            | MMU-500         | #841119,#826419 | N/A                     |
-|                |                 |                 |                         |
-| Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
-| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144    |
-| Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
-| Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
-| Cavium         | ThunderX SMMUv2 | #27704          | N/A		       |
-|                |                 |                 |                         |
-| Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585     |
+| Implementor    | Component       | Erratum ID      | Kconfig                     |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A53      | #826319         | ARM64_ERRATUM_826319        |
+| ARM            | Cortex-A53      | #827319         | ARM64_ERRATUM_827319        |
+| ARM            | Cortex-A53      | #824069         | ARM64_ERRATUM_824069        |
+| ARM            | Cortex-A53      | #819472         | ARM64_ERRATUM_819472        |
+| ARM            | Cortex-A53      | #845719         | ARM64_ERRATUM_845719        |
+| ARM            | Cortex-A53      | #843419         | ARM64_ERRATUM_843419        |
+| ARM            | Cortex-A57      | #832075         | ARM64_ERRATUM_832075        |
+| ARM            | Cortex-A57      | #852523         | N/A                         |
+| ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220        |
+| ARM            | Cortex-A72      | #853709         | N/A                         |
+| ARM            | MMU-500         | #841119,#826419 | N/A                         |
+|                |                 |                 |                             |
+| Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375        |
+| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144        |
+| Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154        |
+| Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456        |
+| Cavium         | ThunderX SMMUv2 | #27704          | N/A                         |
+|                |                 |                 |                             |
+| Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
+|                |                 |                 |                             |
+| Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 0cf9a6b..472122f 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -227,49 +227,7 @@
 usage, similar to "ondemand" and "conservative" governors, but with a
 different set of configurable behaviors.
 
-The tunable values for this governor are:
-
-above_hispeed_delay: When speed is at or above hispeed_freq, wait for
-this long before raising speed in response to continued high load.
-The format is a single delay value, optionally followed by pairs of
-CPU speeds and the delay to use at or above those speeds.  Colons can
-be used between the speeds and associated delays for readability.  For
-example:
-
-   80000 1300000:200000 1500000:40000
-
-uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
-200000 uS is used until speed 1.5 GHz, at which speed (and above)
-delay 40000 uS is used.  If speeds are specified these must appear in
-ascending order.  Default is 20000 uS.
-
-boost: If non-zero, immediately boost speed of all CPUs to at least
-hispeed_freq until zero is written to this attribute.  If zero, allow
-CPU speeds to drop below hispeed_freq according to load as usual.
-Default is zero.
-
-boostpulse: On each write, immediately boost speed of all CPUs to
-hispeed_freq for at least the period of time specified by
-boostpulse_duration, after which speeds are allowed to drop below
-hispeed_freq according to load as usual. Its a write-only file.
-
-boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
-on a write to boostpulse, before allowing speed to drop according to
-load as usual.  Default is 80000 uS.
-
-go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
-Default is 99%.
-
-hispeed_freq: An intermediate "high speed" at which to initially ramp
-when CPU load hits the value specified in go_hispeed_load.  If load
-stays high for the amount of time specified in above_hispeed_delay,
-then speed may be bumped higher.  Default is the maximum speed allowed
-by the policy at governor initialization time.
-
-io_is_busy: If set, the governor accounts IO time as CPU busy time.
-
-min_sample_time: The minimum amount of time to spend at the current
-frequency before ramping down. Default is 80000 uS.
+The tuneable values for this governor are:
 
 target_loads: CPU load values used to adjust speed to influence the
 current CPU load toward that value.  In general, the lower the target
@@ -288,6 +246,32 @@
 values also usually appear in an ascending order. The default is
 target load 90% for all speeds.
 
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
 timer_rate: Sample rate for reevaluating CPU load when the CPU is not
 idle.  A deferrable timer is used, such that the CPU will not be woken
 from idle to service this timer until something else needs to run.
@@ -304,6 +288,65 @@
 when not at lowest speed.  A value of -1 means defer timers
 indefinitely at all speeds.  Default is 80000 uS.
 
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+align_windows: If non-zero, align governor timer window to fire at
+multiples of number of jiffies timer_rate converts to.
+
+use_sched_load: If non-zero, query scheduler for CPU busy time,
+instead of collecting it directly in governor. This would allow
+scheduler to adjust the busy time of each CPU to account for known
+information such as migration. If non-zero, this also implies governor
+sampling windows are aligned across CPUs, with same timer_rate,
+regardless what align_windows is set to. Default is zero.
+
+use_migration_notif: If non-zero, schedule hrtimer to fire in 1ms
+to reevaluate frequency of notified CPU, unless the hrtimer is already
+pending. If zero, ignore scheduler notification. Default is zero.
+
+max_freq_hysteresis: Each time freq evaluation chooses policy->max,
+next max_freq_hysteresis is considered as hysteresis period. During
+this period, frequency target will not drop below hispeed_freq, no
+matter how light actual workload is. If CPU load of any sampling
+window exceeds go_hispeed_load during this period, governor will
+directly increase frequency back to policy->max. Default is 0 uS.
+
+ignore_hispeed_on_notif: If non-zero, do not apply hispeed related
+logic if frequency evaluation is triggered by scheduler notification.
+This includes ignoring go_hispeed_load, hispeed_freq in frequency
+selection, and ignoring above_hispeed_delay that prevents frequency
+ramp up. For evaluation triggered by timer, hispeed logic is still
+always applied. ignore_hispeed_on_notif has no effect if
+use_migration_notif is set to zero. Default is zero.
+
+fast_ramp_down: If non-zero, do not apply min_sample_time if
+frequency evaluation is triggered by scheduler notification. For
+evaluation triggered by timer, min_sample_time is still always
+enforced. fast_ramp_down has no effect if use_migration_notif is
+set to zero. Default is zero.
+
+enable_prediction: If non-zero, two frequencies will be calculated
+during each sampling period: one based on busy time in previous sampling
+period (f_prev), and the other based on prediction provided by scheduler
+(f_pred). Max of both will be selected as final frequency. Hispeed
+related logic, including both frequency selection and delay is ignored
+if enable_prediction is set. If only f_pred but not f_prev picked
+policy->max, max_freq_hysteresis period is not started/extended.
+use_sched_load must be turned on before enabling this feature.
+Default is zero.
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
index 0450145..baae281 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -263,6 +263,7 @@
 compatible = "qcom,sdm845-cdp"
 compatible = "qcom,sdm845-mtp"
 compatible = "qcom,sdm845-mtp"
+compatible = "qcom,sdm845-qrd"
 compatible = "qcom,sdm830-sim"
 compatible = "qcom,sdm830-rumi"
 compatible = "qcom,sdm830-cdp"
diff --git a/Documentation/devicetree/bindings/clock/qcom,camcc.txt b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
new file mode 100644
index 0000000..dc93b35
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,camcc.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies Camera Clock & Reset Controller Binding
+----------------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,cam_cc-sdm845"
+- reg : shall contain base register location and length
+- reg-names: names of registers listed in the same order as in
+	     the reg property.
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- vdd_<rail>-supply: The logic rail supply.
+
+Example:
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,cam_cc-sdm845";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
diff --git a/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
new file mode 100644
index 0000000..6bed785
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
@@ -0,0 +1,25 @@
+MSM BIMC bandwidth monitor device
+
+bimc-bwmon is a device that represents the MSM BIMC bandwidth monitors that
+can be used to measure the bandwidth of read/write traffic from the BIMC
+master ports. For example, the CPU subsystem sits on one BIMC master port.
+
+Required properties:
+- compatible:		Must be "qcom,bimc-bwmon", "qcom,bimc-bwmon2"
+- reg:			Pairs of physical base addresses and region sizes of
+			memory mapped registers.
+- reg-names:		Names of the bases for the above registers. Expected
+			bases are: "base", "global_base"
+- interrupts:		Lists the threshold IRQ.
+- qcom,mport:		The hardware master port that this device can monitor
+- qcom,target-dev:	The DT device that corresponds to this master port
+
+Example:
+	qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon";
+		reg = <0xfc388000 0x300>, <0xfc381000 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 183 1>;
+		qcom,mport = <0>;
+		qcom,target-dev = <&cpubw>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devbw.txt b/Documentation/devicetree/bindings/devfreq/devbw.txt
new file mode 100644
index 0000000..ece0fa7
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devbw.txt
@@ -0,0 +1,39 @@
+MSM device bandwidth device
+
+devbw is a device that represents a MSM device's BW requirements from its
+master port(s) to a different device's slave port(s) in a MSM SoC. This
+device is typically used to vote for BW requirements from a device's (Eg:
+CPU, GPU) master port(s) to the slave (Eg: DDR) port(s).
+
+Required properties:
+- compatible:		Must be "qcom,devbw"
+- qcom,src-dst-ports:	A list of tuples where each tuple consists of a bus
+			master port number and a bus slave port number.
+- qcom,bw-tbl:		A list of meaningful instantaneous bandwidth values
+			(in MB/s) that can be requested from the device
+			master port to the slave port. The list of values
+			depend on the supported bus/slave frequencies and the
+			bus width.
+
+Optional properties:
+- qcom,active-only:	Indicates that the bandwidth votes need to be
+			enforced only when the CPU subsystem is active.
+- governor:		Initial governor to use for the device.
+			Default: "performance"
+
+Example:
+
+	qcom,cpubw {
+		compatible = "qcom,devbw";
+		qcom,src-dst-ports = <1 512>, <2 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  572 /*  75 MHz */ >,
+			< 1144 /* 150 MHz */ >,
+			< 1525 /* 200 MHz */ >,
+			< 2342 /* 307 MHz */ >,
+			< 3509 /* 460 MHz */ >,
+			< 4684 /* 614 MHz */ >,
+			< 6103 /* 800 MHz */ >,
+			< 7102 /* 931 MHz */ >;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt b/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt
new file mode 100644
index 0000000..6537538
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt
@@ -0,0 +1,53 @@
+Devfreq CPUfreq governor
+
+devfreq-cpufreq is a parent device that contains one or more child devices.
+Each child device provides CPU frequency to device frequency mapping for a
+specific device. Examples of devices that could use this are: DDR, cache and
+CCI.
+
+Parent device name shall be "devfreq-cpufreq".
+
+Required child device properties:
+- cpu-to-dev-map, or cpu-to-dev-map-<X>:
+			A list of tuples where each tuple consists of a
+			CPU frequency (KHz) and the corresponding device
+			frequency. CPU frequencies not listed in the table
+			will use the device frequency that corresponds to the
+			next rounded up CPU frequency.
+			Use "cpu-to-dev-map" if all CPUs in the system should
+			share same mapping.
+			Use cpu-to-dev-map-<cpuid> to describe different
+			mappings for different CPUs. The property should be
+			listed only for the first CPU if multiple CPUs are
+			synchronous.
+- target-dev:		Phandle to device that this mapping applies to.
+
+Example:
+	devfreq-cpufreq {
+		cpubw-cpufreq {
+			target-dev = <&cpubw>;
+			cpu-to-dev-map =
+				<  300000  1144 >,
+				<  422400  2288 >,
+				<  652800  3051 >,
+				<  883200  5996 >,
+				< 1190400  8056 >,
+				< 1497600 10101 >,
+				< 1728000 12145 >,
+				< 2649600 16250 >;
+		};
+
+		cache-cpufreq {
+			target-dev = <&cache>;
+			cpu-to-dev-map =
+				<  300000  300000 >,
+				<  422400  422400 >,
+				<  652800  499200 >,
+				<  883200  576000 >,
+				<  960000  960000 >,
+				< 1497600 1036800 >,
+				< 1574400 1574400 >,
+				< 1728000 1651200 >,
+				< 2649600 1728000 >;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
new file mode 100644
index 0000000..4072053
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
@@ -0,0 +1,47 @@
+Devfreq simple device
+
+devfreq-simple-dev is a device that represents a simple device that cannot do
+any status reporting and uses a clock that can be scaled by one of more
+devfreq governors.  It provides a list of usable frequencies for the device
+and some additional optional parameters.
+
+Required properties:
+- compatible:		Must be "devfreq-simple-dev"
+- clock-names:		Must be "devfreq_clk"
+- clocks:		Must refer to the clock that's fed to the device.
+- freq-tbl-khz:		A list of usable frequencies (in KHz) for the device
+			clock.
+Optional properties:
+- polling-ms:	Polling interval for the device in milliseconds. Default: 50
+- governor:	Initial governor to user for the device. Default: "performance"
+
+Example:
+
+	qcom,cache {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_krait clk_l2_clk>;
+		polling-ms = 50;
+		governor = "cpufreq";
+		freq-tbl-khz =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  729600 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1344000 >,
+			< 1420800 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >;
+	};
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index 5eee0c9..c38b45c 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -117,6 +117,11 @@
 				type.
 - qcom,sde-highest-bank-bit:	A u32 property to indicate GPU/Camera/Video highest memory
 				bank bit used for tile format buffers.
+- qcom,sde-ubwc-version:	Property to specify the UBWC feature version.
+- qcom,sde-ubwc-static:	Property to specify the default UBWC static
+				configuration value.
+- qcom,sde-ubwc-swizzle:	Property to specify the default UBWC swizzle
+				configuration value.
 - qcom,sde-panic-per-pipe:	Boolean property to indicate if panic signal
 				control feature is available on each source pipe.
 - qcom,sde-has-src-split:	Boolean property to indicate if source split
@@ -388,6 +393,9 @@
     qcom,sde-sspp-linewidth = <2560>;
     qcom,sde-mixer-blendstages = <0x7>;
     qcom,sde-highest-bank-bit = <0x2>;
+    qcom,sde-ubwc-version = <0x100>;
+    qcom,sde-ubwc-static = <0x100>;
+    qcom,sde-ubwc-swizzle = <0>;
     qcom,sde-panic-per-pipe;
     qcom,sde-has-cdp;
     qcom,sde-has-src-split;
diff --git a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
index 62efecc..3e7fcb7 100644
--- a/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/drm/msm/mdss-dsi-panel.txt
@@ -341,6 +341,28 @@
 					2A/2B command.
 - qcom,dcs-cmd-by-left:			Boolean to indicate that dcs command are sent
 					through the left DSI controller only in a dual-dsi configuration
+- qcom,mdss-dsi-panel-hdr-enabled:      Boolean to indicate HDR support in panel.
+- qcom,mdss-dsi-panel-hdr-color-primaries:
+                                        Array of 8 unsigned integers denoting chromaticity of panel.These
+                                        values are specified in nits units. The value range is 0 through 50000.
+                                        To obtain real chromacity, these values should be divided by factor of
+                                        50000. The structure of array is defined in below order
+                                        value 1: x value of white chromaticity of display panel
+                                        value 2: y value of white chromaticity of display panel
+                                        value 3: x value of red chromaticity of display panel
+                                        value 4: y value of red chromaticity of display panel
+                                        value 5: x value of green chromaticity of display panel
+                                        value 6: y value of green chromaticity of display panel
+                                        value 7: x value of blue chromaticity of display panel
+                                        value 8: y value of blue chromaticity of display panel
+- qcom,mdss-dsi-panel-peak-brightness:  Maximum brightness supported by panel.In absence of maximum value
+                                        typical value becomes peak brightness. Value is specified in nits units.
+                                        To obtain real peak brightness, this value should be divided by factor of
+                                        10000.
+- qcom,mdss-dsi-panel-blackness-level:  Blackness level supported by panel. Blackness level is defined as
+                                        ratio of peak brightness to contrast. Value is specified in nits units.
+                                        To obtain real blackness level, this value should be divided by factor of
+                                        10000.
 - qcom,mdss-dsi-lp11-init:		Boolean used to enable the DSI clocks and data lanes (low power 11)
 					before issuing hardware reset line.
 - qcom,mdss-dsi-init-delay-us:		Delay in microseconds(us) before performing any DSI activity in lp11
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index f6b7552..a244d6c 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -6,6 +6,10 @@
    * "qcom,i2c-geni.
  - reg: Should contain QUP register address and length.
  - interrupts: Should contain I2C interrupt.
+ - clocks: Serial engine core clock, and AHB clocks needed by the device.
+ - pinctrl-names/pinctrl-0/1: The GPIOs assigned to this core. The names
+   should be "active" and "sleep" for the pin confuguration when core is active
+   or when entering sleep state.
  - #address-cells: Should be <1> Address cells for i2c device address
  - #size-cells: Should be <0> as i2c addresses have no size component
 
@@ -17,6 +21,13 @@
 	compatible = "qcom,i2c-geni";
 	reg = <0xa94000 0x4000>;
 	interrupts = <GIC_SPI 358 0>;
+	clock-names = "se-clk", "m-ahb", "s-ahb";
+	clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>,
+		<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+		<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&qup_1_i2c_5_active>;
+	pinctrl-1 = <&qup_1_i2c_5_sleep>;
 	#address-cells = <1>;
 	#size-cells = <0>;
 };
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index bd35d80..b6bc475 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -15,6 +15,8 @@
 - clocks:		List of Phandles for clock device nodes
 			needed by the device.
 - clock-names:		List of clock names needed by the device.
+- #list-cells:		Number of rotator cells, must be 1
+
 Bus Scaling Data:
 - qcom,msm-bus,name:		String property describing rotator client.
 - qcom,msm-bus,num-cases:	This is the the number of Bus Scaling use cases
@@ -81,6 +83,12 @@
 				  priority for rotator clients.
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
+- qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
+- cache-slice-names:		A set of names that identify the usecase names of a client that uses
+				cache slice. These strings are used to look up the cache slice
+				entries by name.
+- cache-slices:			The tuple has phandle to llcc device as the first argument and the
+				second argument is the usecase id of the client.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -102,6 +110,9 @@
 		reg = <0xfd900000 0x22100>,
 			<0xfd925000 0x1000>;
 		reg-names = "mdp_phys", "rot_vbif_phys";
+
+		#list-cells = <1>;
+
 		interrupt-parent = <&mdss_mdp>;
 		interrupts = <2 0>;
 
@@ -131,6 +142,10 @@
 		qcom,mdss-default-ot-rd-limit = <8>;
 		qcom,mdss-default-ot-wr-limit = <16>;
 
+		qcom,mdss-sbuf-headroom = <20>;
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 3>;
+
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
 			iommus = <&mdp_smmu 0xe00>;
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index b894c31..6d72e8b 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -16,25 +16,6 @@
 - interrupts : should contain the vidc interrupt.
 - qcom,platform-version : mask and shift of the platform version bits
     in efuse register.
-- qcom,load-freq-tbl : load (in macroblocks/sec) and corresponding vcodec
-  clock required along with codec's config, which is a bitmap that describes
-  what the clock is used for. The bitmaps are as follows:
-    supports mvc encoder = 0x00000001
-    supports mvc decoder = 0x00000003
-    supports h264 encoder = 0x00000004
-    supports h264 decoder = 0x0000000c
-    supports mpeg1 encoder = 0x00000040
-    supports mpeg1 decoder = 0x000000c0
-    supports mpeg2 encoder = 0x00000100
-    supports mpeg2 decoder = 0x00000300
-    supports vp6 encoder = 0x00100000
-    supports vp6 decoder = 0x00300000
-    supports vp7 encoder = 0x00400000
-    supports vp7 decoder = 0x00c00000
-    supports vp8 encoder = 0x01000000
-    supports vp8 decoder = 0x03000000
-    supports hevc encoder = 0x04000000
-    supports hevc decoder = 0x0c000000
 - qcom,reg-presets : list of offset-value pairs for registers to be written.
   The offsets are from the base offset specified in 'reg'. This is mainly
   used for QoS, VBIF, etc. presets for video.
@@ -57,9 +38,26 @@
 - qcom,clock-freq-tbl = node containing individual domain nodes, each with:
      - qcom,codec-mask: a bitmap of supported codec types, every two bits
        represents a codec type.
+         supports mvc encoder = 0x00000001
+         supports mvc decoder = 0x00000003
+         supports h264 encoder = 0x00000004
+         supports h264 decoder = 0x0000000c
+         supports mpeg1 encoder = 0x00000040
+         supports mpeg1 decoder = 0x000000c0
+         supports mpeg2 encoder = 0x00000100
+         supports mpeg2 decoder = 0x00000300
+         supports vp6 encoder = 0x00100000
+         supports vp6 decoder = 0x00300000
+         supports vp7 encoder = 0x00400000
+         supports vp7 decoder = 0x00c00000
+         supports vp8 encoder = 0x01000000
+         supports vp8 decoder = 0x03000000
+         supports hevc encoder = 0x04000000
+         supports hevc decoder = 0x0c000000
      - qcom,cycles-per-mb: number of cycles required to process each macro
        block.
-     - qcom,low-power-mode-factor: the factor which needs to be multiple with
+     - qcom,low-power-cycles-per-mb: number of cycles required to process each
+       macro block in low power mode.
        the required frequency to get the final frequency, the factor is
        represented in Q16 format.
 - qcom,sw-power-collapse = A bool indicating if video hardware core can be
@@ -167,13 +165,6 @@
 		venus-supply = <&gdsc>;
 		venus-core0-supply = <&gdsc1>;
 		venus-core1-supply = <&gdsc2>;
-		qcom,load-freq-tbl =
-			<489600 266670000 0x030fcfff>, /* Legacy decoder 1080p 60fps  */
-			<108000 133330000 0x030fcfff>, /* Legacy decoder 720p 30fps   */
-			<108000 200000000 0x01000414>, /* Legacy encoder 720p 30fps   */
-			<72000 133330000 0x0c000000>, /* HEVC decoder VGA 60fps   */
-			<36000 133330000 0x0c000000>, /* HEVC VGA 30 fps  */
-			<36000 133330000 0x01000414>; /* Legacy encoder VGA 30 fps   */
 		qcom,hfi-version = "3xx";
 		qcom,reg-presets = <0x80004 0x1>,
 			<0x80178 0x00001FFF>;
@@ -190,6 +181,7 @@
 		qcom,use-non-secure-pil;
 		qcom,use_dynamic_bw_update;
 		qcom,fw-bias = <0xe000000>;
+		qcom,allowed-clock-rates = <200000000 300000000 400000000>;
 		msm_vidc_cb1: msm_vidc_cb1 {
 			compatible = "qcom,msm-vidc,context-bank";
 			label = "venus_ns";
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index f842ed6..ea828da 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -87,6 +87,13 @@
 - qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
 - qcom,override-acc-1: Override the default ACC settings with this value if present.
 
+One child node to represent the MBA image may be specified, when the MBA image
+needs to be loaded in a specifically carved out memory region.
+
+Required properties:
+- compatible: Must be "qcom,pil-mba-mem"
+- memory-region: A phandle that points to a reserved memory where the MBA image will be loaded.
+
 Example:
 	qcom,mss@fc880000 {
 		compatible = "qcom,pil-q6v5-mss";
@@ -126,4 +133,9 @@
 		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
 		qcom,ssctl-instance-id = <12>;
 		qcom,sysmon-id = <0>;
+
+		qcom,mba-mem@0 {
+			compatible = "qcom,pil-mba-mem";
+			memory-region = <&peripheral_mem>;
+		};
 	};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index f3e6ca9..3a03add 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -56,6 +56,9 @@
                                 a pipe reset via the IPA uC is required
 - qcom,ipa-wdi2:		Boolean context flag to indicate whether
 				using wdi-2.0 or not
+- qcom,bandwidth-vote-for-ipa:	Boolean context flag to indicate whether
+				ipa clock voting is done by bandwidth
+				voting via msm-bus-scale driver or not
 - qcom,use-dma-zone:            Boolean context flag to indicate whether memory
                                 allocations controlled by IPA driver that do not
 				specify a struct device * should use GFP_DMA to
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 6122f6e..8efa85d 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -185,6 +185,40 @@
 	Definition: The initial temp band considering 0-based index at which
 		    the baseline target quotients are derived and fused.
 
+- qcom,cpr-acd-adj-down-step-limit
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: The maximum number of PMIC steps to go down within a given
+		    corner due to all ACD adjustment recommendations. Valid
+		    values are 0 through 31.
+
+- qcom,cpr-acd-adj-up-step-limit
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: The maximum number of PMIC steps to go up within a given
+		    corner due to all ACD adjustment recommendations. Valid
+		    values are 0 through 7
+
+- qcom,cpr-acd-adj-down-step-size
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: Defines the step size in units of PMIC steps used for
+		    target quotient adjustment due to an ACD down recommendation.
+		    Valid values are 0 through 3.
+
+- qcom,cpr-acd-adj-up-step-size
+	Usage:      required if qcom,cpr-acd-avg-enable is specified.
+	Value type: <u32>
+	Definition: Defines the step size in units of PMIC steps used for
+		    target quotient adjustment due to an ACD up recommendation.
+		    Valid values are 0 through 3.
+
+- qcom,cpr-acd-avg-enable
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the CPRh controller
+		    should enable the ACD AVG feature.
+
 =================================================
 Second Level Nodes - CPR Threads for a Controller
 =================================================
diff --git a/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt b/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt
new file mode 100644
index 0000000..9d3fb78
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/qcom-usb-audio-qmi-dev.txt
@@ -0,0 +1,26 @@
+QTI USB Audio QMI Device
+
+USB Audio QMI device is used to attach to remote processor IOMMU and
+map USB Audio driver specific memory to iova to share with remote
+processor.
+
+Required Properties:
+
+- compatible : "qcom,usb-audio-qmi-dev"
+
+- iommus : A list of phandle and IOMMU specifier pairs that describe the
+  IOMMU master interfaces of the device.
+
+- qcom,usb-audio-stream-id : Stream id is prepended to iova before passing
+  iova to remote processor. This allows remote processor to access iova.
+
+- qcom,usb-audio-intr-num : Interrupter number for external sub system
+  destination.
+
+Example:
+	usb_audio_qmi_dev {
+		compatible = "qcom,usb-audio-qmi-dev";
+		iommus = <&lpass_q6_smmu 12>;
+		qcom,usb-audio-stream-id = <12>;
+		qcom,usb-audio-intr-num = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
new file mode 100644
index 0000000..868a5f0
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -0,0 +1,53 @@
+GENI based Qualcomm Universal Peripheral (QUP) Serial Peripheral Interface (SPI)
+
+The QUP v3 core is a GENI based AHB slave that provides a common data path
+(an output FIFO and an input FIFO) for serial peripheral interface (SPI)
+mini-core.
+
+SPI in master mode supports up to 50MHz, up to four chip selects, programmable
+data path from 4 bits to 32 bits and numerous protocol variants.
+
+Required properties:
+- compatible:	  Should contain "qcom,spi-geni"
+- reg:		  Should contain base register location and length
+- interrupts:	  Interrupt number used by this controller
+- clocks:	  Should contain the core clock and the AHB clock.
+- clock-names:	  Should be "core" for the core clock and "iface" for the
+		  AHB clock.
+- pinctrl-names:  Property should contain "default" and "sleep" for the
+		  pin configurations during the usecase and during idle.
+- pinctrl-x:	  phandle to the default/sleep pin configurations.
+- #address-cells: Number of cells required to define a chip select
+		  address on the SPI bus. Should be set to 1.
+- #size-cells:	  Should be zero.
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+		     Units - Hz. Definition as per
+		     Documentation/devicetree/bindings/spi/spi-bus.txt
+
+SPI slave nodes must be children of the SPI master node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+	qupv3_spi10: spi@a84000 {
+		compatible = "qcom,spi-geni";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0xa84000 0x4000>;
+		reg-names = "se_phys";
+		clock-names = "se-clk", "m-ahb", "s-ahb";
+		clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+			<&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+		pinctrl-names = "default", "sleep";
+		pinctrl-0 = <&qup_1_spi_2_active>;
+		pinctrl-1 = <&qup_1_spi_2_sleep>;
+		interrupts = <GIC_SPI 354 0>;
+		spi-max-frequency = <19200000>;
+
+		dev@0 {
+			compatible = "dummy,slave";
+			reg = <0>;
+			spi-max-frequency = <9600000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index ad4adf0..d20a7cb 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -43,7 +43,7 @@
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-   property. "cfg_ahb_clk" is an optional clock.
+   property. "cfg_ahb_clk" and "com_aux_clk" are an optional clocks.
  - qcom,vbus-valid-override: If present, indicates VBUS pin is not connected to
    the USB PHY and the controller must rely on external VBUS notification in
    order to manually relay the notification to the SSPHY.
@@ -91,6 +91,10 @@
 	"vdd" : vdd supply for digital circuit operation
 	"vdda18" : 1.8v high-voltage analog supply
 	"vdda33" : 3.3v high-voltage analog supply
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. "ref_clk_src" is a mandatory clock.
  - qcom,vdd-voltage-level: This property must be a list of three integer
    values (no, min, max) where each value represents either a voltage in
    microvolts or a value corresponding to voltage corner
@@ -109,7 +113,7 @@
  - clocks: a list of phandles to the PHY clocks. Use as per
    Documentation/devicetree/bindings/clock/clock-bindings.txt
  - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
-   property. "cfg_ahb_clk", "ref_clk_src" and "ref_clk" are optional clocks.
+   property. "cfg_ahb_clk" and "ref_clk" are optional clocks.
  - qcom,qusb-phy-init-seq: QUSB PHY initialization sequence with value,reg pair.
  - qcom,qusb-phy-host-init-seq: QUSB PHY initialization sequence for host mode
    with value,reg pair.
diff --git a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
index 54d342c..ab2bbe4 100644
--- a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
+++ b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
@@ -43,6 +43,9 @@
 - qcom,vconn-uses-external-source: Indicates whether VCONN supply is sourced
 			from an external regulator. If omitted, then it is
 			assumed it is connected to VBUS.
+- qcom,default-sink-caps: List of 32-bit values representing the nominal sink
+			capabilities in voltage (millivolts) and current
+			(milliamps) pairs.
 
 Example:
 	qcom,qpnp-pdphy@1700 {
@@ -64,4 +67,8 @@
 				  "msg-tx-failed",
 				  "msg-tx-discarded",
 				  "msg-rx-discarded";
+
+		qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+					 <9000 3000>, /* 9V @ 3A */
+					 <12000 2250>; /* 12V @ 2.25A */
 	};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4d6cdcf..9877ebf 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -848,6 +848,12 @@
 			seconds. Defaults to 10*60 = 10mins. A value of 0
 			disables the blank timer.
 
+	core_ctl_disable_cpumask= [SMP]
+			Exempt the CPUs from being managed by core_ctl.
+			core_ctl operates on a cluster basis. So all the
+			CPUs in a given cluster must be specified to disable
+			core_ctl for that cluster.
+
 	coredump_filter=
 			[KNL] Change the default value for
 			/proc/<pid>/coredump_filter.
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
index 09b7dc1..f485dc8 100644
--- a/Documentation/scheduler/sched-hmp.txt
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -910,7 +910,7 @@
 CPU. The same applies to nt_curr_runnable_sum and  nt_prev_runnable_sum.
 
 A 'new' task is defined as a task whose number of active windows since fork is
-less than sysctl_sched_new_task_windows. An active window is defined as a window
+less than SCHED_NEW_TASK_WINDOWS. An active window is defined as a window
 where a task was observed to be runnable.
 
 *** 6.2 Per-task window-based stats
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index f34a8ee6..f40b965 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -87,6 +87,13 @@
 pages_unshared   - how many pages unique but repeatedly checked for merging
 pages_volatile   - how many pages changing too fast to be placed in a tree
 full_scans       - how many times all mergeable areas have been scanned
+deferred_timer   - whether to use deferred timers or not
+                 e.g. "echo 1 > /sys/kernel/mm/ksm/deferred_timer"
+                 Default: 0 (means, we are not using deferred timers. Users
+		 might want to set deferred_timer option if they donot want
+		 ksm thread to wakeup CPU to carryout ksm activities thus
+		 gaining on battery while compromising slightly on memory
+		 that could have been saved.)
 
 A high ratio of pages_sharing to pages_shared indicates good sharing, but
 a high ratio of pages_unshared to pages_sharing indicates wasted effort.
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 8465241..7ca432e 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -252,6 +252,10 @@
 tells us that SLUB has restored the Redzone to its proper value and then
 system operations continue.
 
+If it is required to only report the details of the issue and panic immediately
+after in order to possibly catch any scribblers one can set the
+CONFIG_DEBUG_SLUB_PANIC_ON option.
+
 Emergency operations:
 ---------------------
 
diff --git a/Makefile b/Makefile
index 5bb6e42..71a7187 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 15
+SUBLEVEL = 17
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6293973..d0d096e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -478,6 +478,16 @@
 
 	  If unsure, say Y.
 
+config QCOM_QDF2400_ERRATUM_0065
+	bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size"
+	default y
+	help
+	  On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports
+	  ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have
+	  been indicated as 16Bytes (0xf), not 8Bytes (0x7).
+
+	  If unsure, say Y.
+
 endmenu
 
 
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 54acae6..3eea0af 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -5,7 +5,11 @@
 dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \
 	sdm845-rumi.dtb \
 	sdm845-mtp.dtb \
-	sdm845-cdp.dtb
+	sdm845-cdp.dtb \
+	sdm845-v2-rumi.dtb \
+	sdm845-v2-mtp.dtb \
+	sdm845-v2-cdp.dtb \
+	sdm845-qrd.dtb
 
 dtb-$(CONFIG_ARCH_SDM830) += sdm830-sim.dtb \
 	sdm830-rumi.dtb \
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
new file mode 100644
index 0000000..c6dfc8d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -0,0 +1,261 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_nt35597_truly_dsc_cmd: qcom,mdss_dsi_nt35597_dsc_cmd_truly {
+		qcom,mdss-dsi-panel-name =
+			"nt35597 cmd mode dsi truly panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1440>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <8>;
+		qcom,mdss-dsi-v-front-porch = <10>;
+		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1e
+			15 01 00 00 00 00 02 0b 73
+			15 01 00 00 00 00 02 0c 73
+			15 01 00 00 00 00 02 0e b0
+			15 01 00 00 00 00 02 0f ae
+			15 01 00 00 00 00 02 11 b8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5a 00
+			15 01 00 00 00 00 02 5b 01
+			15 01 00 00 00 00 02 5c 80
+			15 01 00 00 00 00 02 5d 81
+			15 01 00 00 00 00 02 5e 00
+			15 01 00 00 00 00 02 5f 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 1c
+			15 01 00 00 00 00 02 01 0b
+			15 01 00 00 00 00 02 02 0c
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0f
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8a
+			15 01 00 00 00 00 02 0a 13
+			15 01 00 00 00 00 02 0b 13
+			15 01 00 00 00 00 02 0c 15
+			15 01 00 00 00 00 02 0d 15
+			15 01 00 00 00 00 02 0e 17
+			15 01 00 00 00 00 02 0f 17
+			15 01 00 00 00 00 02 10 1c
+			15 01 00 00 00 00 02 11 0b
+			15 01 00 00 00 00 02 12 0c
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0f
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8a
+			15 01 00 00 00 00 02 1a 13
+			15 01 00 00 00 00 02 1b 13
+			15 01 00 00 00 00 02 1c 15
+			15 01 00 00 00 00 02 1d 15
+			15 01 00 00 00 00 02 1e 17
+			15 01 00 00 00 00 02 1f 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6d
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 e0 00
+			15 01 00 00 00 00 02 dc 21
+			15 01 00 00 00 00 02 dd 22
+			15 01 00 00 00 00 02 de 07
+			15 01 00 00 00 00 02 df 07
+			15 01 00 00 00 00 02 e3 6D
+			15 01 00 00 00 00 02 e1 07
+			15 01 00 00 00 00 02 e2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 d8
+			15 01 00 00 00 00 02 2a 2a
+			/* CLK */
+			15 01 00 00 00 00 02 4b 03
+			15 01 00 00 00 00 02 4c 11
+			15 01 00 00 00 00 02 4d 10
+			15 01 00 00 00 00 02 4e 01
+			15 01 00 00 00 00 02 4f 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5b 43
+			15 01 00 00 00 00 02 5c 00
+			15 01 00 00 00 00 02 5f 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7a 80
+			15 01 00 00 00 00 02 7b 91
+			15 01 00 00 00 00 02 7c D8
+			15 01 00 00 00 00 02 7d 60
+			15 01 00 00 00 00 02 7f 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 b3 C0
+			15 01 00 00 00 00 02 b4 00
+			15 01 00 00 00 00 02 b5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0a
+			15 01 00 00 00 00 02 94 0a
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8a 00
+			15 01 00 00 00 00 02 9b ff
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9d b0
+			15 01 00 00 00 00 02 9f 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 ec 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 ff 10
+			/* VESA DSC PPS settings(1440x2560 slide 16H) */
+			39 01 00 00 00 00 11 c1 09 20 00 10 02 00 02 68
+					01 bb 00 0a 06 67 04 c5
+			39 01 00 00 00 00 03 c2 10 f0
+			/* C0h = 0x0(2 Port SDC)0x01(1 PortA FBC)
+			 * 0x02(MTK) 0x03(1 PortA VESA)
+			 */
+			15 01 00 00 00 00 02 c0 03
+			/* VBP+VSA=,VFP = 10H */
+			15 01 00 00 00 00 04 3b 03 0a 0a
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 e5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 bb 10
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 fb 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+			05 01 00 00 78 00 02 10 00];
+
+		qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_nt35597_truly_dsc_cmd_config0>;
+
+		dsi_nt35597_truly_dsc_cmd_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_cmd_config1: config1 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_cmd_config2: config2 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
new file mode 100644
index 0000000..334120a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -0,0 +1,248 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_nt35597_truly_dsc_video: qcom,mdss_dsi_nt35597_dsc_video_truly {
+		qcom,mdss-dsi-panel-name =
+			"nt35597 video mode dsi truly panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1440>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <8>;
+		qcom,mdss-dsi-v-front-porch = <10>;
+		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1e
+			15 01 00 00 00 00 02 0b 73
+			15 01 00 00 00 00 02 0c 73
+			15 01 00 00 00 00 02 0e b0
+			15 01 00 00 00 00 02 0f aE
+			15 01 00 00 00 00 02 11 b8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5a 00
+			15 01 00 00 00 00 02 5b 01
+			15 01 00 00 00 00 02 5c 80
+			15 01 00 00 00 00 02 5d 81
+			15 01 00 00 00 00 02 5e 00
+			15 01 00 00 00 00 02 5f 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 1c
+			15 01 00 00 00 00 02 01 0b
+			15 01 00 00 00 00 02 02 0c
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0f
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8a
+			15 01 00 00 00 00 02 0a 13
+			15 01 00 00 00 00 02 0b 13
+			15 01 00 00 00 00 02 0c 15
+			15 01 00 00 00 00 02 0d 15
+			15 01 00 00 00 00 02 0e 17
+			15 01 00 00 00 00 02 0f 17
+			15 01 00 00 00 00 02 10 1c
+			15 01 00 00 00 00 02 11 0b
+			15 01 00 00 00 00 02 12 0c
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0f
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8a
+			15 01 00 00 00 00 02 1a 13
+			15 01 00 00 00 00 02 1b 13
+			15 01 00 00 00 00 02 1c 15
+			15 01 00 00 00 00 02 1d 15
+			15 01 00 00 00 00 02 1e 17
+			15 01 00 00 00 00 02 1f 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6d
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 e0 00
+			15 01 00 00 00 00 02 dc 21
+			15 01 00 00 00 00 02 dd 22
+			15 01 00 00 00 00 02 de 07
+			15 01 00 00 00 00 02 df 07
+			15 01 00 00 00 00 02 e3 6d
+			15 01 00 00 00 00 02 e1 07
+			15 01 00 00 00 00 02 e2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 d8
+			15 01 00 00 00 00 02 2a 2a
+			/* CLK */
+			15 01 00 00 00 00 02 4b 03
+			15 01 00 00 00 00 02 4c 11
+			15 01 00 00 00 00 02 4d 10
+			15 01 00 00 00 00 02 4e 01
+			15 01 00 00 00 00 02 4f 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5b 43
+			15 01 00 00 00 00 02 5c 00
+			15 01 00 00 00 00 02 5f 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7a 80
+			15 01 00 00 00 00 02 7b 91
+			15 01 00 00 00 00 02 7c d8
+			15 01 00 00 00 00 02 7d 60
+			15 01 00 00 00 00 02 7f 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 b3 c0
+			15 01 00 00 00 00 02 b4 00
+			15 01 00 00 00 00 02 b5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0a
+			15 01 00 00 00 00 02 94 0a
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8a 00
+			15 01 00 00 00 00 02 9b ff
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9d b0
+			15 01 00 00 00 00 02 9f 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 ec 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 ff 10
+			/* VESA DSC PPS settings(1440x2560 slide 16H) */
+			39 01 00 00 00 00 11 c1 09 20 00 10 02 00 02 68 01
+				bb 00 0a 06 67 04 c5
+			39 01 00 00 00 00 03 c2 10 f0
+			/* C0h = 0x00(2 Port SDC); 0x01(1 PortA FBC);
+			 * 0x02(MTK); 0x03(1 PortA VESA)
+			 */
+			15 01 00 00 00 00 02 c0 03
+			/* VBP+VSA=,VFP = 10H */
+			39 01 00 00 00 00 04 3b 03 0a 0a
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 e5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 bb 03
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 fb 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_nt35597_truly_dsc_video_config0>;
+
+		dsi_nt35597_truly_dsc_video_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_video_config1: config1 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <1>; /* 3D Mux */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+
+		dsi_nt35597_truly_dsc_video_config2: config2 {
+			qcom,lm-split = <720 720>;
+			qcom,mdss-dsc-encoders = <2>; /* DSC Merge */
+			qcom,mdss-dsc-slice-height = <16>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
new file mode 100644
index 0000000..e4a0370
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -0,0 +1,220 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt35597_truly_cmd: qcom,mdss_dsi_nt35597_truly_wqxga_cmd{
+		qcom,mdss-dsi-panel-name =
+			"Dual nt35597 cmd mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 FF 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1E
+			15 01 00 00 00 00 02 0B 73
+			15 01 00 00 00 00 02 0C 73
+			15 01 00 00 00 00 02 0E B0
+			15 01 00 00 00 00 02 0F AE
+			15 01 00 00 00 00 02 11 B8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5A 00
+			15 01 00 00 00 00 02 5B 01
+			15 01 00 00 00 00 02 5C 80
+			15 01 00 00 00 00 02 5D 81
+			15 01 00 00 00 00 02 5E 00
+			15 01 00 00 00 00 02 5F 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 1C
+			15 01 00 00 00 00 02 01 0B
+			15 01 00 00 00 00 02 02 0C
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0F
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8A
+			15 01 00 00 00 00 02 0A 13
+			15 01 00 00 00 00 02 0B 13
+			15 01 00 00 00 00 02 0C 15
+			15 01 00 00 00 00 02 0D 15
+			15 01 00 00 00 00 02 0E 17
+			15 01 00 00 00 00 02 0F 17
+			15 01 00 00 00 00 02 10 1C
+			15 01 00 00 00 00 02 11 0B
+			15 01 00 00 00 00 02 12 0C
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0F
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8A
+			15 01 00 00 00 00 02 1A 13
+			15 01 00 00 00 00 02 1B 13
+			15 01 00 00 00 00 02 1C 15
+			15 01 00 00 00 00 02 1D 15
+			15 01 00 00 00 00 02 1E 17
+			15 01 00 00 00 00 02 1F 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6D
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 E0 00
+			15 01 00 00 00 00 02 DC 21
+			15 01 00 00 00 00 02 DD 22
+			15 01 00 00 00 00 02 DE 07
+			15 01 00 00 00 00 02 DF 07
+			15 01 00 00 00 00 02 E3 6D
+			15 01 00 00 00 00 02 E1 07
+			15 01 00 00 00 00 02 E2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 D8
+			15 01 00 00 00 00 02 2A 2A
+			/* CLK */
+			15 01 00 00 00 00 02 4B 03
+			15 01 00 00 00 00 02 4C 11
+			15 01 00 00 00 00 02 4D 10
+			15 01 00 00 00 00 02 4E 01
+			15 01 00 00 00 00 02 4F 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5B 43
+			15 01 00 00 00 00 02 5C 00
+			15 01 00 00 00 00 02 5F 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7A 80
+			15 01 00 00 00 00 02 7B 91
+			15 01 00 00 00 00 02 7C D8
+			15 01 00 00 00 00 02 7D 60
+			15 01 00 00 00 00 02 7F 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 B3 C0
+			15 01 00 00 00 00 02 B4 00
+			15 01 00 00 00 00 02 B5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0A
+			15 01 00 00 00 00 02 94 0A
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8A 00
+			15 01 00 00 00 00 02 9B FF
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9D B0
+			15 01 00 00 00 00 02 9F 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 EC 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 ff 10
+			/* VBP+VSA=,VFP = 10H */
+			15 01 00 00 00 00 04 3B 03 0A 0A
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 E5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 BB 10
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 FB 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+			05 01 00 00 78 00 02 10 00];
+
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+		qcom,config-select = <&dsi_dual_nt35597_truly_cmd_config0>;
+
+		dsi_dual_nt35597_truly_cmd_config0: config0 {
+			qcom,split-mode = "dualctl-split";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
new file mode 100644
index 0000000..d6ef3d8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -0,0 +1,210 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt35597_truly_video: qcom,mdss_dsi_nt35597_wqxga_video_truly {
+		qcom,mdss-dsi-panel-name =
+			"Dual nt35597 video mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <32>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0x3ff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			/* CMD2_P0 */
+			15 01 00 00 00 00 02 FF 20
+			15 01 00 00 00 00 02 FB 01
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 02 45
+			15 01 00 00 00 00 02 05 40
+			15 01 00 00 00 00 02 06 19
+			15 01 00 00 00 00 02 07 1E
+			15 01 00 00 00 00 02 0B 73
+			15 01 00 00 00 00 02 0C 73
+			15 01 00 00 00 00 02 0E B0
+			15 01 00 00 00 00 02 0F AE
+			15 01 00 00 00 00 02 11 B8
+			15 01 00 00 00 00 02 13 00
+			15 01 00 00 00 00 02 58 80
+			15 01 00 00 00 00 02 59 01
+			15 01 00 00 00 00 02 5A 00
+			15 01 00 00 00 00 02 5B 01
+			15 01 00 00 00 00 02 5C 80
+			15 01 00 00 00 00 02 5D 81
+			15 01 00 00 00 00 02 5E 00
+			15 01 00 00 00 00 02 5F 01
+			15 01 00 00 00 00 02 72 31
+			15 01 00 00 00 00 02 68 03
+			/* CMD2_P4 */
+			15 01 00 00 00 00 02 FF 24
+			15 01 00 00 00 00 02 FB 01
+			15 01 00 00 00 00 02 00 1C
+			15 01 00 00 00 00 02 01 0B
+			15 01 00 00 00 00 02 02 0C
+			15 01 00 00 00 00 02 03 01
+			15 01 00 00 00 00 02 04 0F
+			15 01 00 00 00 00 02 05 10
+			15 01 00 00 00 00 02 06 10
+			15 01 00 00 00 00 02 07 10
+			15 01 00 00 00 00 02 08 89
+			15 01 00 00 00 00 02 09 8A
+			15 01 00 00 00 00 02 0A 13
+			15 01 00 00 00 00 02 0B 13
+			15 01 00 00 00 00 02 0C 15
+			15 01 00 00 00 00 02 0D 15
+			15 01 00 00 00 00 02 0E 17
+			15 01 00 00 00 00 02 0F 17
+			15 01 00 00 00 00 02 10 1C
+			15 01 00 00 00 00 02 11 0B
+			15 01 00 00 00 00 02 12 0C
+			15 01 00 00 00 00 02 13 01
+			15 01 00 00 00 00 02 14 0F
+			15 01 00 00 00 00 02 15 10
+			15 01 00 00 00 00 02 16 10
+			15 01 00 00 00 00 02 17 10
+			15 01 00 00 00 00 02 18 89
+			15 01 00 00 00 00 02 19 8A
+			15 01 00 00 00 00 02 1A 13
+			15 01 00 00 00 00 02 1B 13
+			15 01 00 00 00 00 02 1C 15
+			15 01 00 00 00 00 02 1D 15
+			15 01 00 00 00 00 02 1E 17
+			15 01 00 00 00 00 02 1F 17
+			/* STV */
+			15 01 00 00 00 00 02 20 40
+			15 01 00 00 00 00 02 21 01
+			15 01 00 00 00 00 02 22 00
+			15 01 00 00 00 00 02 23 40
+			15 01 00 00 00 00 02 24 40
+			15 01 00 00 00 00 02 25 6D
+			15 01 00 00 00 00 02 26 40
+			15 01 00 00 00 00 02 27 40
+			/* Vend */
+			15 01 00 00 00 00 02 E0 00
+			15 01 00 00 00 00 02 DC 21
+			15 01 00 00 00 00 02 DD 22
+			15 01 00 00 00 00 02 DE 07
+			15 01 00 00 00 00 02 DF 07
+			15 01 00 00 00 00 02 E3 6D
+			15 01 00 00 00 00 02 E1 07
+			15 01 00 00 00 00 02 E2 07
+			/* UD */
+			15 01 00 00 00 00 02 29 D8
+			15 01 00 00 00 00 02 2A 2A
+			/* CLK */
+			15 01 00 00 00 00 02 4B 03
+			15 01 00 00 00 00 02 4C 11
+			15 01 00 00 00 00 02 4D 10
+			15 01 00 00 00 00 02 4E 01
+			15 01 00 00 00 00 02 4F 01
+			15 01 00 00 00 00 02 50 10
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 52 80
+			15 01 00 00 00 00 02 53 00
+			15 01 00 00 00 00 02 56 00
+			15 01 00 00 00 00 02 54 07
+			15 01 00 00 00 00 02 58 07
+			15 01 00 00 00 00 02 55 25
+			/* Reset XDONB */
+			15 01 00 00 00 00 02 5B 43
+			15 01 00 00 00 00 02 5C 00
+			15 01 00 00 00 00 02 5F 73
+			15 01 00 00 00 00 02 60 73
+			15 01 00 00 00 00 02 63 22
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 67 08
+			15 01 00 00 00 00 02 68 04
+			/* Resolution:1440x2560*/
+			15 01 00 00 00 00 02 72 02
+			/* mux */
+			15 01 00 00 00 00 02 7A 80
+			15 01 00 00 00 00 02 7B 91
+			15 01 00 00 00 00 02 7C D8
+			15 01 00 00 00 00 02 7D 60
+			15 01 00 00 00 00 02 7F 15
+			15 01 00 00 00 00 02 75 15
+			/* ABOFF */
+			15 01 00 00 00 00 02 B3 C0
+			15 01 00 00 00 00 02 B4 00
+			15 01 00 00 00 00 02 B5 00
+			/* Source EQ */
+			15 01 00 00 00 00 02 78 00
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 80 00
+			15 01 00 00 00 00 02 83 00
+			/* FP BP */
+			15 01 00 00 00 00 02 93 0A
+			15 01 00 00 00 00 02 94 0A
+			/* Inversion Type */
+			15 01 00 00 00 00 02 8A 00
+			15 01 00 00 00 00 02 9B FF
+			/* IMGSWAP =1 @PortSwap=1 */
+			15 01 00 00 00 00 02 9D B0
+			15 01 00 00 00 00 02 9F 63
+			15 01 00 00 00 00 02 98 10
+			/* FRM */
+			15 01 00 00 00 00 02 EC 00
+			/* CMD1 */
+			15 01 00 00 00 00 02 FF 10
+			/* VBP+VSA=,VFP = 10H */
+			15 01 00 00 00 00 04 3B 03 0A 0A
+			/* FTE on */
+			15 01 00 00 00 00 02 35 00
+			/* EN_BK =1(auto black) */
+			15 01 00 00 00 00 02 E5 01
+			/* CMD mode(10) VDO mode(03) */
+			15 01 00 00 00 00 02 BB 03
+			/* Non Reload MTP */
+			15 01 00 00 00 00 02 FB 01
+			/* SlpOut + DispOn */
+			05 01 00 00 78 00 02 11 00
+			05 01 00 00 78 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,config-select = <&dsi_dual_nt35597_truly_video_config0>;
+
+		dsi_dual_nt35597_truly_video_config0: config0 {
+			qcom,split-mode = "dualctl-split";
+		};
+
+
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 0000000..834a08fd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,141 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_s6e3ha3_amoled_cmd: qcom,mdss_dsi_s6e3ha3_amoled_wqhd_cmd{
+		qcom,mdss-dsi-panel-name =
+			"Dual s6e3ha3 amoled cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <40>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <31>;
+		qcom,mdss-dsi-v-front-porch = <30>;
+		qcom,mdss-dsi-v-pulse-width = <8>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-on-command = [05 01 00 00 05 00 02 11 00
+					39 01 00 00 00 00 05 2a 00 00 05 9f
+					39 01 00 00 00 00 05 2b 00 00 09 ff
+					39 01 00 00 00 00 03 f0 5a 5a
+					39 01 00 00 00 00 02 b0 10
+					39 01 00 00 00 00 02 b5 a0
+					39 01 00 00 00 00 02 c4 03
+					39 01 00 00 00 00 0a
+						f6 42 57 37 00 aa cc d0 00 00
+					39 01 00 00 00 00 02 f9 03
+					39 01 00 00 00 00 14
+						c2 00 00 d8 d8 00 80 2b 05 08
+						0e 07 0b 05 0d 0a 15 13 20 1e
+					39 01 00 00 78 00 03 f0 a5 a5
+					39 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 02 53 20
+					39 01 00 00 00 00 02 51 60
+					05 01 00 00 05 00 02 29 00];
+		qcom,mdss-dsi-off-command = [05 01 00 00 3c 00 02 28 00
+					05 01 00 00 b4 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-lp-mode-on = [39 00 00 00 05 00 03 f0 5a 5a
+					39 00 00 00 05 00 03 f1 5a 5a
+					39 00 00 00 05 00 03 fc 5a 5a
+					39 00 00 00 05 00 02 b0 17
+					39 00 00 00 05 00 02 cb 10
+					39 00 00 00 05 00 02 b0 2d
+					39 00 00 00 05 00 02 cb cd
+					39 00 00 00 05 00 02 b0 0e
+					39 00 00 00 05 00 02 cb 02
+					39 00 00 00 05 00 02 b0 0f
+					39 00 00 00 05 00 02 cb 09
+					39 00 00 00 05 00 02 b0 02
+					39 00 00 00 05 00 02 f2 c9
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f2 c0
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f4 aa
+					39 00 00 00 05 00 02 b0 08
+					39 00 00 00 05 00 02 b1 30
+					39 00 00 00 05 00 02 b0 09
+					39 00 00 00 05 00 02 b1 0a
+					39 00 00 00 05 00 02 b0 0d
+					39 00 00 00 05 00 02 b1 10
+					39 00 00 00 05 00 02 b0 00
+					39 00 00 00 05 00 02 f7 03
+					39 00 00 00 05 00 02 fe 30
+					39 01 00 00 05 00 02 fe b0];
+		qcom,mdss-dsi-lp-mode-off = [39 00 00 00 05 00 03 f0 5a 5a
+					39 00 00 00 05 00 03 f1 5a 5a
+					39 00 00 00 05 00 03 fc 5a 5a
+					39 00 00 00 05 00 02 b0 2d
+					39 00 00 00 05 00 02 cb 4d
+					39 00 00 00 05 00 02 b0 17
+					39 00 00 00 05 00 02 cb 04
+					39 00 00 00 05 00 02 b0 0e
+					39 00 00 00 05 00 02 cb 06
+					39 00 00 00 05 00 02 b0 0f
+					39 00 00 00 05 00 02 cb 05
+					39 00 00 00 05 00 02 b0 02
+					39 00 00 00 05 00 02 f2 b8
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f2 80
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f4 8a
+					39 00 00 00 05 00 02 b0 08
+					39 00 00 00 05 00 02 b1 10
+					39 00 00 00 05 00 02 b0 09
+					39 00 00 00 05 00 02 b1 0a
+					39 00 00 00 05 00 02 b0 0d
+					39 00 00 00 05 00 02 b1 80
+					39 00 00 00 05 00 02 b0 00
+					39 00 00 00 05 00 02 f7 03
+					39 00 00 00 05 00 02 fe 30
+					39 01 00 00 05 00 02 fe b0];
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <255>;
+		qcom,mdss-pan-physical-width-dimension = <68>;
+		qcom,mdss-pan-physical-height-dimension = <122>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
new file mode 100644
index 0000000..aa52083
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -0,0 +1,77 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_sharp_1080_cmd: qcom,mdss_dsi_sharp_1080p_cmd {
+		qcom,mdss-dsi-panel-name = "sharp 1080p cmd mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-panel-clockrate = <850000000>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1080>;
+		qcom,mdss-dsi-panel-height = <1920>;
+		qcom,mdss-dsi-h-front-porch = <0>;
+		qcom,mdss-dsi-h-back-porch = <0>;
+		qcom,mdss-dsi-h-pulse-width = <0>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <0>;
+		qcom,mdss-dsi-v-front-porch = <0>;
+		qcom,mdss-dsi-v-pulse-width = <0>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-on-command = [
+			15 01 00 00 00 00 02 bb 10
+			15 01 00 00 00 00 02 b0 03
+			05 01 00 00 78 00 01 11
+			15 01 00 00 00 00 02 51 ff
+			15 01 00 00 00 00 02 53 24
+			15 01 00 00 00 00 02 ff 23
+			15 01 00 00 00 00 02 08 05
+			15 01 00 00 00 00 02 46 90
+			15 01 00 00 00 00 02 ff 10
+			15 01 00 00 00 00 02 ff f0
+			15 01 00 00 00 00 02 92 01
+			15 01 00 00 00 00 02 ff 10
+			05 01 00 00 28 00 01 29];
+		qcom,mdss-dsi-off-command = [
+			05 01 00 00 10 00 01 28
+			05 01 00 00 40 00 01 10];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
new file mode 100644
index 0000000..25c949c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -0,0 +1,93 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_sharp_4k_dsc_cmd: qcom,mdss_dsi_sharp_4k_dsc_cmd {
+		qcom,mdss-dsi-panel-name = "Sharp 4k cmd mode dsc dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1080>;
+		qcom,mdss-dsi-panel-height = <3840>;
+		qcom,mdss-dsi-h-front-porch = <30>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <4>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-on-command = [
+			39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
+			0f 03 19 01 97
+			39 01 00 00 00 00 03 92 10 f0
+			15 01 00 00 00 00 02 90 03
+			15 01 00 00 00 00 02 03 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 04
+			15 01 00 00 00 00 02 c0 03
+			39 01 00 00 00 00 06 f0 55 aa 52 08 07
+			15 01 00 00 00 00 02 ef 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 00
+			15 01 00 00 00 00 02 b4 01
+			15 01 00 00 00 00 02 35 00
+			39 01 00 00 00 00 06 f0 55 aa 52 08 01
+			39 01 00 00 00 00 05 ff aa 55 a5 80
+			15 01 00 00 00 00 02 6f 01
+			15 01 00 00 00 00 02 f3 10
+			39 01 00 00 00 00 05 ff aa 55 a5 00
+			05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
+			05 01 00 00 78 00 01 29 /* display on + delay 120ms */
+			];
+
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_sharp_dsc_cmd_config0>;
+
+		dsi_sharp_dsc_cmd_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <32>;
+			qcom,mdss-dsc-slice-width = <1080>;
+			qcom,mdss-dsc-slice-per-pkt = <1>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
new file mode 100644
index 0000000..cc093d6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -0,0 +1,86 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_sharp_4k_dsc_video: qcom,mdss_dsi_sharp_4k_dsc_video {
+		qcom,mdss-dsi-panel-name = "Sharp 4k video mode dsc dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1080>;
+		qcom,mdss-dsi-panel-height = <3840>;
+		qcom,mdss-dsi-h-front-porch = <30>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <4>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <7>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <1>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-on-command = [
+			39 01 00 00 00 00 11 91 09 20 00 20 02 00 03 1c 04 21 00
+			0f 03 19 01 97
+			39 01 00 00 00 00 03 92 10 f0
+			15 01 00 00 00 00 02 90 03
+			15 01 00 00 00 00 02 03 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 04
+			15 01 00 00 00 00 02 c0 03
+			39 01 00 00 00 00 06 f0 55 aa 52 08 07
+			15 01 00 00 00 00 02 ef 01
+			39 01 00 00 00 00 06 f0 55 aa 52 08 00
+			15 01 00 00 00 00 02 b4 10
+			15 01 00 00 00 00 02 35 00
+			39 01 00 00 00 00 06 f0 55 aa 52 08 01
+			39 01 00 00 00 00 05 ff aa 55 a5 80
+			15 01 00 00 00 00 02 6f 01
+			15 01 00 00 00 00 02 f3 10
+			39 01 00 00 00 00 05 ff aa 55 a5 00
+			05 01 00 00 78 00 01 11 /* sleep out + delay 120ms */
+			05 01 00 00 78 00 01 29 /* display on + delay 120ms */
+			];
+
+		qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
+				 05 01 00 00 78 00 02 10 00];
+
+		qcom,compression-mode = "dsc";
+		qcom,config-select = <&dsi_sharp_dsc_video_config0>;
+
+		dsi_sharp_dsc_video_config0: config0 {
+			qcom,mdss-dsc-encoders = <1>;
+			qcom,mdss-dsc-slice-height = <32>;
+			qcom,mdss-dsc-slice-width = <1080>;
+			qcom,mdss-dsc-slice-per-pkt = <1>;
+
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
new file mode 100644
index 0000000..2071649
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
@@ -0,0 +1,632 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_sharp_1080_120hz_cmd: qcom,mdss_dual_sharp_1080p_120hz_cmd {
+		qcom,mdss-dsi-panel-name =
+			"sharp 1080p 120hz dual dsi cmd mode panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <120>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <540>;
+		qcom,mdss-dsi-panel-height = <1920>;
+		qcom,mdss-dsi-h-front-porch = <28>;
+		qcom,mdss-dsi-h-back-porch = <4>;
+		qcom,mdss-dsi-h-pulse-width = <4>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <12>;
+		qcom,mdss-dsi-v-front-porch = <12>;
+		qcom,mdss-dsi-v-pulse-width = <2>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 10>;
+		qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 ba 07
+			15 01 00 00 00 00 02 c0 00
+			15 01 00 00 00 00 02 bb 10
+			15 01 00 00 00 00 02 d9 00
+			15 01 00 00 00 00 02 ef 70
+			15 01 00 00 00 00 02 f7 80
+			39 01 00 00 00 00 06 3b 03 0e 0c 08 1c
+			15 01 00 00 00 00 02 e9 0e
+			15 01 00 00 00 00 02 ea 0c
+			15 01 00 00 00 00 02 35 00
+			15 01 00 00 00 00 02 c0 00
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 59 6a
+			15 01 00 00 00 00 02 0b 1b
+			15 01 00 00 00 00 02 61 f7
+			15 01 00 00 00 00 02 62 6c
+			15 01 00 00 00 00 02 00 01
+			15 01 00 00 00 00 02 01 55
+			15 01 00 00 00 00 02 04 c8
+			15 01 00 00 00 00 02 05 1a
+			15 01 00 00 00 00 02 0d 93
+			15 01 00 00 00 00 02 0e 93
+			15 01 00 00 00 00 02 0f 7e
+			15 01 00 00 00 00 02 06 69
+			15 01 00 00 00 00 02 07 bc
+			15 01 00 00 00 00 02 10 03
+			15 01 00 00 00 00 02 11 64
+			15 01 00 00 00 00 02 12 5a
+			15 01 00 00 00 00 02 13 40
+			15 01 00 00 00 00 02 14 40
+			15 01 00 00 00 00 02 15 00
+			15 01 00 00 00 00 02 33 13
+			15 01 00 00 00 00 02 5a 40
+			15 01 00 00 00 00 02 5b 40
+			15 01 00 00 00 00 02 5e 80
+			15 01 00 00 00 00 02 ff 24
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 80
+			15 01 00 00 00 00 02 14 80
+			15 01 00 00 00 00 02 01 80
+			15 01 00 00 00 00 02 15 80
+			15 01 00 00 00 00 02 02 80
+			15 01 00 00 00 00 02 16 80
+			15 01 00 00 00 00 02 03 0a
+			15 01 00 00 00 00 02 17 0c
+			15 01 00 00 00 00 02 04 06
+			15 01 00 00 00 00 02 18 08
+			15 01 00 00 00 00 02 05 80
+			15 01 00 00 00 00 02 19 80
+			15 01 00 00 00 00 02 06 80
+			15 01 00 00 00 00 02 1a 80
+			15 01 00 00 00 00 02 07 80
+			15 01 00 00 00 00 02 1b 80
+			15 01 00 00 00 00 02 08 80
+			15 01 00 00 00 00 02 1c 80
+			15 01 00 00 00 00 02 09 80
+			15 01 00 00 00 00 02 1d 80
+			15 01 00 00 00 00 02 0a 80
+			15 01 00 00 00 00 02 1e 80
+			15 01 00 00 00 00 02 0b 1a
+			15 01 00 00 00 00 02 1f 1b
+			15 01 00 00 00 00 02 0c 16
+			15 01 00 00 00 00 02 20 17
+			15 01 00 00 00 00 02 0d 1c
+			15 01 00 00 00 00 02 21 1d
+			15 01 00 00 00 00 02 0e 18
+			15 01 00 00 00 00 02 22 19
+			15 01 00 00 00 00 02 0f 0e
+			15 01 00 00 00 00 02 23 10
+			15 01 00 00 00 00 02 10 80
+			15 01 00 00 00 00 02 24 80
+			15 01 00 00 00 00 02 11 80
+			15 01 00 00 00 00 02 25 80
+			15 01 00 00 00 00 02 12 80
+			15 01 00 00 00 00 02 26 80
+			15 01 00 00 00 00 02 13 80
+			15 01 00 00 00 00 02 27 80
+			15 01 00 00 00 00 02 74 ff
+			15 01 00 00 00 00 02 75 ff
+			15 01 00 00 00 00 02 8d 00
+			15 01 00 00 00 00 02 8e 00
+			15 01 00 00 00 00 02 8f 9c
+			15 01 00 00 00 00 02 90 0c
+			15 01 00 00 00 00 02 91 0e
+			15 01 00 00 00 00 02 d6 00
+			15 01 00 00 00 00 02 d7 20
+			15 01 00 00 00 00 02 d8 00
+			15 01 00 00 00 00 02 d9 88
+			15 01 00 00 00 00 02 e5 05
+			15 01 00 00 00 00 02 e6 10
+			15 01 00 00 00 00 02 54 06
+			15 01 00 00 00 00 02 55 05
+			15 01 00 00 00 00 02 56 04
+			15 01 00 00 00 00 02 58 03
+			15 01 00 00 00 00 02 59 33
+			15 01 00 00 00 00 02 5a 33
+			15 01 00 00 00 00 02 5b 01
+			15 01 00 00 00 00 02 5c 00
+			15 01 00 00 00 00 02 5d 01
+			15 01 00 00 00 00 02 5e 0a
+			15 01 00 00 00 00 02 5f 0a
+			15 01 00 00 00 00 02 60 0a
+			15 01 00 00 00 00 02 61 0a
+			15 01 00 00 00 00 02 62 10
+			15 01 00 00 00 00 02 63 01
+			15 01 00 00 00 00 02 64 00
+			15 01 00 00 00 00 02 65 00
+			15 01 00 00 00 00 02 ef 00
+			15 01 00 00 00 00 02 f0 00
+			15 01 00 00 00 00 02 6d 20
+			15 01 00 00 00 00 02 66 44
+			15 01 00 00 00 00 02 68 01
+			15 01 00 00 00 00 02 69 00
+			15 01 00 00 00 00 02 67 11
+			15 01 00 00 00 00 02 6a 06
+			15 01 00 00 00 00 02 6b 31
+			15 01 00 00 00 00 02 6c 90
+			15 01 00 00 00 00 02 ab c3
+			15 01 00 00 00 00 02 b1 49
+			15 01 00 00 00 00 02 aa 80
+			15 01 00 00 00 00 02 b0 90
+			15 01 00 00 00 00 02 b2 a4
+			15 01 00 00 00 00 02 b3 00
+			15 01 00 00 00 00 02 b4 23
+			15 01 00 00 00 00 02 b5 00
+			15 01 00 00 00 00 02 b6 00
+			15 01 00 00 00 00 02 b7 00
+			15 01 00 00 00 00 02 b8 00
+			15 01 00 00 00 00 02 b9 00
+			15 01 00 00 00 00 02 ba 00
+			15 01 00 00 00 00 02 bb 00
+			15 01 00 00 00 00 02 bc 00
+			15 01 00 00 00 00 02 bd 00
+			15 01 00 00 00 00 02 be 00
+			15 01 00 00 00 00 02 bf 00
+			15 01 00 00 00 00 02 c0 00
+			15 01 00 00 00 00 02 c7 40
+			15 01 00 00 00 00 02 c9 00
+			15 01 00 00 00 00 02 c1 2a
+			15 01 00 00 00 00 02 c2 2a
+			15 01 00 00 00 00 02 c3 00
+			15 01 00 00 00 00 02 c4 00
+			15 01 00 00 00 00 02 c5 00
+			15 01 00 00 00 00 02 c6 00
+			15 01 00 00 00 00 02 c8 ab
+			15 01 00 00 00 00 02 ca 00
+			15 01 00 00 00 00 02 cb 00
+			15 01 00 00 00 00 02 cc 20
+			15 01 00 00 00 00 02 cd 40
+			15 01 00 00 00 00 02 ce a8
+			15 01 00 00 00 00 02 cf a8
+			15 01 00 00 00 00 02 d0 00
+			15 01 00 00 00 00 02 d1 00
+			15 01 00 00 00 00 02 d2 00
+			15 01 00 00 00 00 02 d3 00
+			15 01 00 00 00 00 02 af 01
+			15 01 00 00 00 00 02 a4 1e
+			15 01 00 00 00 00 02 95 41
+			15 01 00 00 00 00 02 96 03
+			15 01 00 00 00 00 02 98 00
+			15 01 00 00 00 00 02 9a 9a
+			15 01 00 00 00 00 02 9b 03
+			15 01 00 00 00 00 02 9d 80
+			15 01 00 00 00 00 02 ff 26
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 fa d0
+			15 01 00 00 00 00 02 6b 80
+			15 01 00 00 00 00 02 6c 5c
+			15 01 00 00 00 00 02 6d 0c
+			15 01 00 00 00 00 02 6e 0e
+			15 01 00 00 00 00 02 58 01
+			15 01 00 00 00 00 02 59 15
+			15 01 00 00 00 00 02 5a 01
+			15 01 00 00 00 00 02 5b 00
+			15 01 00 00 00 00 02 5c 01
+			15 01 00 00 00 00 02 5d 2b
+			15 01 00 00 00 00 02 74 00
+			15 01 00 00 00 00 02 75 ba
+			15 01 00 00 00 00 02 81 0a
+			15 01 00 00 00 00 02 4e 81
+			15 01 00 00 00 00 02 4f 83
+			15 01 00 00 00 00 02 51 00
+			15 01 00 00 00 00 02 53 4d
+			15 01 00 00 00 00 02 54 03
+			15 01 00 00 00 00 02 ff e0
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 b2 81
+			15 01 00 00 00 00 02 62 28
+			15 01 00 00 00 00 02 a2 09
+			15 01 00 00 00 00 02 b3 01
+			15 01 00 00 00 00 02 ed 00
+			15 01 00 00 00 00 02 ff 10
+			05 01 00 00 78 00 01 11
+			15 01 00 00 00 00 02 ff 20
+			15 01 00 00 00 00 02 75 00
+			15 01 00 00 00 00 02 76 71
+			15 01 00 00 00 00 02 77 00
+			15 01 00 00 00 00 02 78 84
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 7a a5
+			15 01 00 00 00 00 02 7b 00
+			15 01 00 00 00 00 02 7c bb
+			15 01 00 00 00 00 02 7d 00
+			15 01 00 00 00 00 02 7e ce
+			15 01 00 00 00 00 02 7f 00
+			15 01 00 00 00 00 02 80 e0
+			15 01 00 00 00 00 02 81 00
+			15 01 00 00 00 00 02 82 ef
+			15 01 00 00 00 00 02 83 00
+			15 01 00 00 00 00 02 84 ff
+			15 01 00 00 00 00 02 85 01
+			15 01 00 00 00 00 02 86 0b
+			15 01 00 00 00 00 02 87 01
+			15 01 00 00 00 00 02 88 38
+			15 01 00 00 00 00 02 89 01
+			15 01 00 00 00 00 02 8a 5b
+			15 01 00 00 00 00 02 8b 01
+			15 01 00 00 00 00 02 8c 95
+			15 01 00 00 00 00 02 8d 01
+			15 01 00 00 00 00 02 8e c4
+			15 01 00 00 00 00 02 8f 02
+			15 01 00 00 00 00 02 90 0d
+			15 01 00 00 00 00 02 91 02
+			15 01 00 00 00 00 02 92 4a
+			15 01 00 00 00 00 02 93 02
+			15 01 00 00 00 00 02 94 4c
+			15 01 00 00 00 00 02 95 02
+			15 01 00 00 00 00 02 96 85
+			15 01 00 00 00 00 02 97 02
+			15 01 00 00 00 00 02 98 c3
+			15 01 00 00 00 00 02 99 02
+			15 01 00 00 00 00 02 9a e9
+			15 01 00 00 00 00 02 9b 03
+			15 01 00 00 00 00 02 9c 16
+			15 01 00 00 00 00 02 9d 03
+			15 01 00 00 00 00 02 9e 34
+			15 01 00 00 00 00 02 9f 03
+			15 01 00 00 00 00 02 a0 56
+			15 01 00 00 00 00 02 a2 03
+			15 01 00 00 00 00 02 a3 62
+			15 01 00 00 00 00 02 a4 03
+			15 01 00 00 00 00 02 a5 6c
+			15 01 00 00 00 00 02 a6 03
+			15 01 00 00 00 00 02 a7 74
+			15 01 00 00 00 00 02 a9 03
+			15 01 00 00 00 00 02 aa 80
+			15 01 00 00 00 00 02 ab 03
+			15 01 00 00 00 00 02 ac 89
+			15 01 00 00 00 00 02 ad 03
+			15 01 00 00 00 00 02 ae 8b
+			15 01 00 00 00 00 02 af 03
+			15 01 00 00 00 00 02 b0 8d
+			15 01 00 00 00 00 02 b1 03
+			15 01 00 00 00 00 02 b2 8e
+			15 01 00 00 00 00 02 b3 00
+			15 01 00 00 00 00 02 b4 71
+			15 01 00 00 00 00 02 b5 00
+			15 01 00 00 00 00 02 b6 84
+			15 01 00 00 00 00 02 b7 00
+			15 01 00 00 00 00 02 b8 a5
+			15 01 00 00 00 00 02 b9 00
+			15 01 00 00 00 00 02 ba bb
+			15 01 00 00 00 00 02 bb 00
+			15 01 00 00 00 00 02 bc ce
+			15 01 00 00 00 00 02 bd 00
+			15 01 00 00 00 00 02 be e0
+			15 01 00 00 00 00 02 bf 00
+			15 01 00 00 00 00 02 c0 ef
+			15 01 00 00 00 00 02 c1 00
+			15 01 00 00 00 00 02 c2 ff
+			15 01 00 00 00 00 02 c3 01
+			15 01 00 00 00 00 02 c4 0b
+			15 01 00 00 00 00 02 c5 01
+			15 01 00 00 00 00 02 c6 38
+			15 01 00 00 00 00 02 c7 01
+			15 01 00 00 00 00 02 c8 5b
+			15 01 00 00 00 00 02 c9 01
+			15 01 00 00 00 00 02 ca 95
+			15 01 00 00 00 00 02 cb 01
+			15 01 00 00 00 00 02 cc c4
+			15 01 00 00 00 00 02 cd 02
+			15 01 00 00 00 00 02 ce 0d
+			15 01 00 00 00 00 02 cf 02
+			15 01 00 00 00 00 02 d0 4a
+			15 01 00 00 00 00 02 d1 02
+			15 01 00 00 00 00 02 d2 4c
+			15 01 00 00 00 00 02 d3 02
+			15 01 00 00 00 00 02 d4 85
+			15 01 00 00 00 00 02 d5 02
+			15 01 00 00 00 00 02 d6 c3
+			15 01 00 00 00 00 02 d7 02
+			15 01 00 00 00 00 02 d8 e9
+			15 01 00 00 00 00 02 d9 03
+			15 01 00 00 00 00 02 da 16
+			15 01 00 00 00 00 02 db 03
+			15 01 00 00 00 00 02 dc 34
+			15 01 00 00 00 00 02 dd 03
+			15 01 00 00 00 00 02 de 56
+			15 01 00 00 00 00 02 df 03
+			15 01 00 00 00 00 02 e0 62
+			15 01 00 00 00 00 02 e1 03
+			15 01 00 00 00 00 02 e2 6c
+			15 01 00 00 00 00 02 e3 03
+			15 01 00 00 00 00 02 e4 74
+			15 01 00 00 00 00 02 e5 03
+			15 01 00 00 00 00 02 e6 80
+			15 01 00 00 00 00 02 e7 03
+			15 01 00 00 00 00 02 e8 89
+			15 01 00 00 00 00 02 e9 03
+			15 01 00 00 00 00 02 ea 8b
+			15 01 00 00 00 00 02 eb 03
+			15 01 00 00 00 00 02 ec 8d
+			15 01 00 00 00 00 02 ed 03
+			15 01 00 00 00 00 02 ee 8e
+			15 01 00 00 00 00 02 ef 00
+			15 01 00 00 00 00 02 f0 71
+			15 01 00 00 00 00 02 f1 00
+			15 01 00 00 00 00 02 f2 84
+			15 01 00 00 00 00 02 f3 00
+			15 01 00 00 00 00 02 f4 a5
+			15 01 00 00 00 00 02 f5 00
+			15 01 00 00 00 00 02 f6 bb
+			15 01 00 00 00 00 02 f7 00
+			15 01 00 00 00 00 02 f8 ce
+			15 01 00 00 00 00 02 f9 00
+			15 01 00 00 00 00 02 fa e0
+			15 01 00 00 00 00 02 ff 21
+			15 01 00 00 00 00 02 fb 01
+			15 01 00 00 00 00 02 00 00
+			15 01 00 00 00 00 02 01 ef
+			15 01 00 00 00 00 02 02 00
+			15 01 00 00 00 00 02 03 ff
+			15 01 00 00 00 00 02 04 01
+			15 01 00 00 00 00 02 05 0b
+			15 01 00 00 00 00 02 06 01
+			15 01 00 00 00 00 02 07 38
+			15 01 00 00 00 00 02 08 01
+			15 01 00 00 00 00 02 09 5b
+			15 01 00 00 00 00 02 0a 01
+			15 01 00 00 00 00 02 0b 95
+			15 01 00 00 00 00 02 0c 01
+			15 01 00 00 00 00 02 0d c4
+			15 01 00 00 00 00 02 0e 02
+			15 01 00 00 00 00 02 0f 0d
+			15 01 00 00 00 00 02 10 02
+			15 01 00 00 00 00 02 11 4a
+			15 01 00 00 00 00 02 12 02
+			15 01 00 00 00 00 02 13 4c
+			15 01 00 00 00 00 02 14 02
+			15 01 00 00 00 00 02 15 85
+			15 01 00 00 00 00 02 16 02
+			15 01 00 00 00 00 02 17 c3
+			15 01 00 00 00 00 02 18 02
+			15 01 00 00 00 00 02 19 e9
+			15 01 00 00 00 00 02 1a 03
+			15 01 00 00 00 00 02 1b 16
+			15 01 00 00 00 00 02 1c 03
+			15 01 00 00 00 00 02 1d 34
+			15 01 00 00 00 00 02 1e 03
+			15 01 00 00 00 00 02 1f 56
+			15 01 00 00 00 00 02 20 03
+			15 01 00 00 00 00 02 21 62
+			15 01 00 00 00 00 02 22 03
+			15 01 00 00 00 00 02 23 6c
+			15 01 00 00 00 00 02 24 03
+			15 01 00 00 00 00 02 25 74
+			15 01 00 00 00 00 02 26 03
+			15 01 00 00 00 00 02 27 80
+			15 01 00 00 00 00 02 28 03
+			15 01 00 00 00 00 02 29 89
+			15 01 00 00 00 00 02 2a 03
+			15 01 00 00 00 00 02 2b 8b
+			15 01 00 00 00 00 02 2d 03
+			15 01 00 00 00 00 02 2f 8d
+			15 01 00 00 00 00 02 30 03
+			15 01 00 00 00 00 02 31 8e
+			15 01 00 00 00 00 02 32 00
+			15 01 00 00 00 00 02 33 71
+			15 01 00 00 00 00 02 34 00
+			15 01 00 00 00 00 02 35 84
+			15 01 00 00 00 00 02 36 00
+			15 01 00 00 00 00 02 37 a5
+			15 01 00 00 00 00 02 38 00
+			15 01 00 00 00 00 02 39 bb
+			15 01 00 00 00 00 02 3a 00
+			15 01 00 00 00 00 02 3b ce
+			15 01 00 00 00 00 02 3d 00
+			15 01 00 00 00 00 02 3f e0
+			15 01 00 00 00 00 02 40 00
+			15 01 00 00 00 00 02 41 ef
+			15 01 00 00 00 00 02 42 00
+			15 01 00 00 00 00 02 43 ff
+			15 01 00 00 00 00 02 44 01
+			15 01 00 00 00 00 02 45 0b
+			15 01 00 00 00 00 02 46 01
+			15 01 00 00 00 00 02 47 38
+			15 01 00 00 00 00 02 48 01
+			15 01 00 00 00 00 02 49 5b
+			15 01 00 00 00 00 02 4a 01
+			15 01 00 00 00 00 02 4b 95
+			15 01 00 00 00 00 02 4c 01
+			15 01 00 00 00 00 02 4d c4
+			15 01 00 00 00 00 02 4e 02
+			15 01 00 00 00 00 02 4f 0d
+			15 01 00 00 00 00 02 50 02
+			15 01 00 00 00 00 02 51 4a
+			15 01 00 00 00 00 02 52 02
+			15 01 00 00 00 00 02 53 4c
+			15 01 00 00 00 00 02 54 02
+			15 01 00 00 00 00 02 55 85
+			15 01 00 00 00 00 02 56 02
+			15 01 00 00 00 00 02 58 c3
+			15 01 00 00 00 00 02 59 02
+			15 01 00 00 00 00 02 5a e9
+			15 01 00 00 00 00 02 5b 03
+			15 01 00 00 00 00 02 5c 16
+			15 01 00 00 00 00 02 5d 03
+			15 01 00 00 00 00 02 5e 34
+			15 01 00 00 00 00 02 5f 03
+			15 01 00 00 00 00 02 60 56
+			15 01 00 00 00 00 02 61 03
+			15 01 00 00 00 00 02 62 62
+			15 01 00 00 00 00 02 63 03
+			15 01 00 00 00 00 02 64 6c
+			15 01 00 00 00 00 02 65 03
+			15 01 00 00 00 00 02 66 74
+			15 01 00 00 00 00 02 67 03
+			15 01 00 00 00 00 02 68 80
+			15 01 00 00 00 00 02 69 03
+			15 01 00 00 00 00 02 6a 89
+			15 01 00 00 00 00 02 6b 03
+			15 01 00 00 00 00 02 6c 8b
+			15 01 00 00 00 00 02 6d 03
+			15 01 00 00 00 00 02 6e 8d
+			15 01 00 00 00 00 02 6f 03
+			15 01 00 00 00 00 02 70 8e
+			15 01 00 00 00 00 02 71 00
+			15 01 00 00 00 00 02 72 71
+			15 01 00 00 00 00 02 73 00
+			15 01 00 00 00 00 02 74 84
+			15 01 00 00 00 00 02 75 00
+			15 01 00 00 00 00 02 76 a5
+			15 01 00 00 00 00 02 77 00
+			15 01 00 00 00 00 02 78 bb
+			15 01 00 00 00 00 02 79 00
+			15 01 00 00 00 00 02 7a ce
+			15 01 00 00 00 00 02 7b 00
+			15 01 00 00 00 00 02 7c e0
+			15 01 00 00 00 00 02 7d 00
+			15 01 00 00 00 00 02 7e ef
+			15 01 00 00 00 00 02 7f 00
+			15 01 00 00 00 00 02 80 ff
+			15 01 00 00 00 00 02 81 01
+			15 01 00 00 00 00 02 82 0b
+			15 01 00 00 00 00 02 83 01
+			15 01 00 00 00 00 02 84 38
+			15 01 00 00 00 00 02 85 01
+			15 01 00 00 00 00 02 86 5b
+			15 01 00 00 00 00 02 87 01
+			15 01 00 00 00 00 02 88 95
+			15 01 00 00 00 00 02 89 01
+			15 01 00 00 00 00 02 8a c4
+			15 01 00 00 00 00 02 8b 02
+			15 01 00 00 00 00 02 8c 0d
+			15 01 00 00 00 00 02 8d 02
+			15 01 00 00 00 00 02 8e 4a
+			15 01 00 00 00 00 02 8f 02
+			15 01 00 00 00 00 02 90 4c
+			15 01 00 00 00 00 02 91 02
+			15 01 00 00 00 00 02 92 85
+			15 01 00 00 00 00 02 93 02
+			15 01 00 00 00 00 02 94 c3
+			15 01 00 00 00 00 02 95 02
+			15 01 00 00 00 00 02 96 e9
+			15 01 00 00 00 00 02 97 03
+			15 01 00 00 00 00 02 98 16
+			15 01 00 00 00 00 02 99 03
+			15 01 00 00 00 00 02 9a 34
+			15 01 00 00 00 00 02 9b 03
+			15 01 00 00 00 00 02 9c 56
+			15 01 00 00 00 00 02 9d 03
+			15 01 00 00 00 00 02 9e 62
+			15 01 00 00 00 00 02 9f 03
+			15 01 00 00 00 00 02 a0 6c
+			15 01 00 00 00 00 02 a2 03
+			15 01 00 00 00 00 02 a3 74
+			15 01 00 00 00 00 02 a4 03
+			15 01 00 00 00 00 02 a5 80
+			15 01 00 00 00 00 02 a6 03
+			15 01 00 00 00 00 02 a7 89
+			15 01 00 00 00 00 02 a9 03
+			15 01 00 00 00 00 02 aa 8b
+			15 01 00 00 00 00 02 ab 03
+			15 01 00 00 00 00 02 ac 8d
+			15 01 00 00 00 00 02 ad 03
+			15 01 00 00 00 00 02 ae 8e
+			15 01 00 00 00 00 02 af 00
+			15 01 00 00 00 00 02 b0 71
+			15 01 00 00 00 00 02 b1 00
+			15 01 00 00 00 00 02 b2 84
+			15 01 00 00 00 00 02 b3 00
+			15 01 00 00 00 00 02 b4 a5
+			15 01 00 00 00 00 02 b5 00
+			15 01 00 00 00 00 02 b6 bb
+			15 01 00 00 00 00 02 b7 00
+			15 01 00 00 00 00 02 b8 ce
+			15 01 00 00 00 00 02 b9 00
+			15 01 00 00 00 00 02 ba e0
+			15 01 00 00 00 00 02 bb 00
+			15 01 00 00 00 00 02 bc ef
+			15 01 00 00 00 00 02 bd 00
+			15 01 00 00 00 00 02 be ff
+			15 01 00 00 00 00 02 bf 01
+			15 01 00 00 00 00 02 c0 0b
+			15 01 00 00 00 00 02 c1 01
+			15 01 00 00 00 00 02 c2 38
+			15 01 00 00 00 00 02 c3 01
+			15 01 00 00 00 00 02 c4 5b
+			15 01 00 00 00 00 02 c5 01
+			15 01 00 00 00 00 02 c6 95
+			15 01 00 00 00 00 02 c7 01
+			15 01 00 00 00 00 02 c8 c4
+			15 01 00 00 00 00 02 c9 02
+			15 01 00 00 00 00 02 ca 0d
+			15 01 00 00 00 00 02 cb 02
+			15 01 00 00 00 00 02 cc 4a
+			15 01 00 00 00 00 02 cd 02
+			15 01 00 00 00 00 02 ce 4c
+			15 01 00 00 00 00 02 cf 02
+			15 01 00 00 00 00 02 d0 85
+			15 01 00 00 00 00 02 d1 02
+			15 01 00 00 00 00 02 d2 c3
+			15 01 00 00 00 00 02 d3 02
+			15 01 00 00 00 00 02 d4 e9
+			15 01 00 00 00 00 02 d5 03
+			15 01 00 00 00 00 02 d6 16
+			15 01 00 00 00 00 02 d7 03
+			15 01 00 00 00 00 02 d8 34
+			15 01 00 00 00 00 02 d9 03
+			15 01 00 00 00 00 02 da 56
+			15 01 00 00 00 00 02 db 03
+			15 01 00 00 00 00 02 dc 62
+			15 01 00 00 00 00 02 dd 03
+			15 01 00 00 00 00 02 de 6c
+			15 01 00 00 00 00 02 df 03
+			15 01 00 00 00 00 02 e0 74
+			15 01 00 00 00 00 02 e1 03
+			15 01 00 00 00 00 02 e2 80
+			15 01 00 00 00 00 02 e3 03
+			15 01 00 00 00 00 02 e4 89
+			15 01 00 00 00 00 02 e5 03
+			15 01 00 00 00 00 02 e6 8b
+			15 01 00 00 00 00 02 e7 03
+			15 01 00 00 00 00 02 e8 8d
+			15 01 00 00 00 00 02 e9 03
+			15 01 00 00 00 00 02 ea 8e
+			15 01 00 00 00 00 02 FF 10
+			05 01 00 00 00 00 01 29];
+		qcom,mdss-dsi-off-command = [15 01 00 00 00 00 02 ff 10
+			05 01 00 00 10 00 01 28
+			15 01 00 00 00 00 02 b0 00
+			05 01 00 00 40 00 01 10
+			15 01 00 00 00 00 02 4f 01];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,cmd-sync-wait-trigger;
+		qcom,mdss-tear-check-frame-rate = <12000>;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,config-select = <&dsi_dual_sharp_cmd_config0>;
+
+		dsi_dual_sharp_cmd_config0: config0 {
+			qcom,split-mode = "dualctl-split";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
index 0b768e0..241aa71 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -59,6 +59,11 @@
 		qcom,mdss-dsi-te-dcs-command = <1>;
 		qcom,mdss-dsi-te-check-enable;
 		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
 		qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
 			05 01 00 00 0a 00 01 00
 			/* Soft reset, wait 10ms */
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
new file mode 100644
index 0000000..509547f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -0,0 +1,88 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_cmd: qcom,mdss_dsi_dual_sim_cmd {
+		qcom,mdss-dsi-panel-name = "Sim dual cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1280>;
+		qcom,mdss-dsi-panel-height = <1440>;
+		qcom,mdss-dsi-h-front-porch = <120>;
+		qcom,mdss-dsi-h-back-porch = <44>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <4>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-hor-line-idle = <0 40 256>,
+						<40 120 128>,
+						<120 240 64>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-on-command = [29 01 00 00 00 00 02 b0 03
+			05 01 00 00 0a 00 01 00
+			/* Soft reset, wait 10ms */
+			15 01 00 00 0a 00 02 3a 77
+			/* Set Pixel format (24 bpp) */
+			39 01 00 00 0a 00 05 2a 00 00 04 ff
+			/* Set Column address */
+			39 01 00 00 0a 00 05 2b 00 00 05 9f
+			/* Set page address */
+			15 01 00 00 0a 00 02 35 00
+			/* Set tear on */
+			39 01 00 00 0a 00 03 44 00 00
+			/* Set tear scan line */
+			15 01 00 00 0a 00 02 51 ff
+			/* write display brightness */
+			15 01 00 00 0a 00 02 53 24
+			 /* write control brightness */
+			15 01 00 00 0a 00 02 55 00
+			/* CABC brightness */
+			05 01 00 00 78 00 01 11
+			/* exit sleep mode, wait 120ms */
+			05 01 00 00 10 00 01 29];
+			/* Set display on, wait 16ms */
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,panel-ack-disabled;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
new file mode 100644
index 0000000..cca28c7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
@@ -0,0 +1,55 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_vid: qcom,mdss_dsi_dual_sim_video {
+		qcom,mdss-dsi-panel-name = "Sim dual video mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <1280>;
+		qcom,mdss-dsi-panel-height = <1440>;
+		qcom,mdss-dsi-h-front-porch = <120>;
+		qcom,mdss-dsi-h-back-porch = <44>;
+		qcom,mdss-dsi-h-pulse-width = <16>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <4>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-panel-broadcast-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
+		qcom,panel-ack-disabled;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
index a73a796..98a1f61 100644
--- a/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
@@ -33,6 +33,11 @@
 		qcom,mdss-dsi-bpp = <24>;
 		qcom,mdss-dsi-underflow-color = <0xff>;
 		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
 		qcom,mdss-dsi-on-command = [32 01 00 00 00 00 02 00 00];
 		qcom,mdss-dsi-off-command = [22 01 00 00 00 00 02 00 00];
 		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
index b589fe5..3497e50 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-sdm845.dtsi
@@ -32,6 +32,19 @@
 				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
 				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+		attach-impl-defs =
+				<0x6000 0x2378>,
+				<0x6060 0x1055>,
+				<0x678c 0x8>,
+				<0x6794 0x28>,
+				<0x6800 0x6>,
+				<0x6900 0x3ff>,
+				<0x6924 0x204>,
+				<0x6928 0x11000>,
+				<0x6930 0x800>,
+				<0x6960 0xffffffff>,
+				<0x6b64 0x1a5551>,
+				<0x6b68 0x9a82a382>;
 	};
 
 	apps_smmu: apps-smmu@0x15000000 {
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
index 64cb626..84a6a84 100644
--- a/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-gdsc-sdm845.dtsi
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -179,6 +179,7 @@
 		regulator-name = "mdss_core_gdsc";
 		reg = <0xaf03000 0x4>;
 		qcom,poll-cfg-gdscr;
+		qcom,support-hw-trigger;
 		status = "disabled";
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
index f8d2304..115c7b8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi
@@ -11,6 +11,7 @@
  * GNU General Public License for more details.
  */
 
+#include "msm-audio-lpass.dtsi"
 #include "sdm845-wcd.dtsi"
 #include "msm-wsa881x.dtsi"
 #include <dt-bindings/clock/qcom,audio-ext-clk.h>
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 00bd301..d47dd36 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,5 +10,8 @@
  * GNU General Public License for more details.
  */
 
-#include "sdm845-pinctrl.dtsi"
-
+&soc {
+	sound-tavil {
+		qcom,us-euro-gpios = <&tavil_us_euro_sw>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 00bd301..cfba6f4 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,6 +9,3 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-
-#include "sdm845-pinctrl.dtsi"
-
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index 6d6f775..c5b53b8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -238,6 +238,61 @@
 			};
 		};
 
+		pmx_sde: pmx_sde {
+			sde_dsi_active: sde_dsi_active {
+				mux {
+					pins = "gpio6", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio52";
+					drive-strength = <8>;   /* 8 mA */
+					bias-disable = <0>;   /* no pull */
+				};
+			};
+			sde_dsi_suspend: sde_dsi_suspend {
+				mux {
+					pins = "gpio6", "gpio52";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio6", "gpio52";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
+		pmx_sde_te {
+			sde_te_active: sde_te_active {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+
+			sde_te_suspend: sde_te_suspend {
+				mux {
+					pins = "gpio10";
+					function = "mdp_vsync";
+				};
+
+				config {
+					pins = "gpio10";
+					drive-strength = <2>;   /* 2 mA */
+					bias-pull-down;         /* PULL DOWN */
+				};
+			};
+		};
+
 		sec_aux_pcm {
 			sec_aux_pcm_sleep: sec_aux_pcm_sleep {
 				mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
index 02e5bfa..21b5659 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi
@@ -17,7 +17,7 @@
 		qcom,use-psci;
 		#address-cells = <1>;
 		#size-cells = <0>;
-		qcom,pm-cluster@0{
+		qcom,pm-cluster@0 {
 			reg = <0>;
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -26,9 +26,9 @@
 			qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5 &CPU6
 				&CPU7>;
 			qcom,psci-mode-shift = <4>;
-			qcom,psci-mode-mask = <0xf>;
+			qcom,psci-mode-mask = <0xfff>;
 
-			qcom,pm-cluster-level@0{ /* D1 */
+			qcom,pm-cluster-level@0 { /* D1 */
 				reg = <0>;
 				label = "l3-wfi";
 				qcom,psci-mode = <0x1>;
@@ -37,7 +37,8 @@
 				qcom,energy-overhead = <69355>;
 				qcom,time-overhead = <99>;
 			};
-			qcom,pm-cluster-level@1{ /* D2 */
+
+			qcom,pm-cluster-level@1 { /* D2 */
 				reg = <1>;
 				label = "l3-dyn-ret";
 				qcom,psci-mode = <0x2>;
@@ -48,9 +49,9 @@
 				qcom,min-child-idx = <1>;
 			};
 
-			qcom,pm-cluster-level@2{ /* D4, D3 not supported */
+			qcom,pm-cluster-level@2 { /* D4, D3 is not supported */
 				reg = <2>;
-				label = "L3 PC";
+				label = "l3-pc";
 				qcom,psci-mode = <0x4>;
 				qcom,latency-us = <4562>;
 				qcom,ss-power = <408>;
@@ -60,6 +61,32 @@
 				qcom,is-reset;
 			};
 
+			qcom,pm-cluster-level@3 { /* Cx off */
+				reg = <3>;
+				label = "cx-off";
+				qcom,psci-mode = <0x224>;
+				qcom,latency-us = <5562>;
+				qcom,ss-power = <308>;
+				qcom,energy-overhead = <2521840>;
+				qcom,time-overhead = <6376>;
+				qcom,min-child-idx = <3>;
+				qcom,is-reset;
+				qcom,notify-rpm;
+			};
+
+			qcom,pm-cluster-level@4 { /* LLCC off, AOSS sleep */
+				reg = <4>;
+				label = "llcc-off";
+				qcom,psci-mode = <0xC24>;
+				qcom,latency-us = <6562>;
+				qcom,ss-power = <108>;
+				qcom,energy-overhead = <2621840>;
+				qcom,time-overhead = <7376>;
+				qcom,min-child-idx = <3>;
+				qcom,is-reset;
+				qcom,notify-rpm;
+			};
+
 			qcom,pm-cpu {
 				#address-cells = <1>;
 				#size-cells = <0>;
@@ -95,7 +122,9 @@
 					qcom,energy-overhead = <418225>;
 					qcom,time-overhead = <885>;
 					qcom,is-reset;
+					qcom,use-broadcast-timer;
 				};
+
 				qcom,pm-cpu-level@3 {  /* C4 */
 					reg = <3>;
 					qcom,spm-cpu-mode = "rail-pc";
@@ -105,10 +134,9 @@
 					qcom,energy-overhead = <428225>;
 					qcom,time-overhead = <1000>;
 					qcom,is-reset;
+					qcom,use-broadcast-timer;
 				};
 			};
 		};
-
 	};
-
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
new file mode 100644
index 0000000..228b924
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845.dtsi"
+#include "sdm845-qrd.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 QRD";
+	compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd";
+	qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
new file mode 100644
index 0000000..6ea92ee
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -0,0 +1,11 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 228bbb3..67dd934 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -26,30 +26,253 @@
 		regulator-min-microvolt = <1800000>;
 		regulator-max-microvolt = <1800000>;
 	};
-
-	apc0_pwrcl_vreg: regulator-pwrcl {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc0_pwrcl_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <23>;
-	};
-
-	apc0_l3_vreg: regulator-l3 {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc0_l3_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <19>;
-	};
-
-	apc1_perfcl_vreg: regulator-perfcl {
-		compatible = "qcom,stub-regulator";
-		regulator-name = "apc1_perfcl_corner";
-		regulator-min-microvolt = <1>;
-		regulator-max-microvolt = <26>;
-	};
 };
 
 &soc {
+	/* CPR controller regulators */
+	apc0_cpr: cprh-ctrl@17dc0000 {
+		compatible = "qcom,cprh-sdm845-v1-kbss-regulator";
+		reg =	<0x17dc0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17840000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc0";
+		qcom,cpr-controller-id = <0>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <11>;
+		qcom,cpr-step-quot-init-max = <12>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x3A00000>;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17dc3a84 0x17dc3a88 0x17840c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_SILVER_CPRH_STATUS_0",
+			"APSS_SILVER_CPRH_STATUS_1",
+			"SILVER_SAW4_PMIC_STS";
+
+		thread@1 {
+			qcom,cpr-thread-id = <1>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_pwrcl_vreg: regulator {
+				regulator-name = "apc0_pwrcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <17>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <17>;
+				qcom,cpr-corners = <17>;
+
+				qcom,cpr-corner-fmax-map = <6 12 17>;
+
+				qcom,cpr-voltage-ceiling =
+					<688000  688000  688000  688000  688000
+					 688000  756000  756000  756000  812000
+					 812000  812000  872000  872000  872000
+					 872000  928000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  584000
+					 584000  584000  632000  632000  632000
+					 632000  672000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  748800000
+					 825600000  902400000  979200000
+					1056000000 1132800000 1209600000
+					1286400000 1363200000 1440000000
+					1516800000 1593600000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc0_l3_vreg: regulator {
+				regulator-name = "apc0_l3_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <9>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <9>;
+				qcom,cpr-corners = <9>;
+
+				qcom,cpr-corner-fmax-map = <4 7 9>;
+
+				qcom,cpr-voltage-ceiling =
+					<688000  688000  688000  688000  756000
+					 812000  812000  872000  928000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 584000  584000  632000  672000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+	};
+
+	apc1_cpr: cprh-ctrl@17db0000 {
+		compatible = "qcom,cprh-sdm845-kbss-regulator";
+		reg =	<0x17db0000 0x4000>,
+			<0x00784000 0x1000>,
+			<0x17830000 0x1000>;
+		reg-names = "cpr_ctrl", "fuse_base", "saw";
+		clocks = <&clock_gcc GCC_CPUSS_RBCPR_CLK>;
+		clock-names = "core_clk";
+		qcom,cpr-ctrl-name = "apc1";
+		qcom,cpr-controller-id = <1>;
+
+		qcom,cpr-sensor-time = <1000>;
+		qcom,cpr-loop-time = <5000000>;
+		qcom,cpr-idle-cycles = <15>;
+		qcom,cpr-up-down-delay-time = <3000>;
+		qcom,cpr-step-quot-init-min = <9>;
+		qcom,cpr-step-quot-init-max = <14>;
+		qcom,cpr-count-mode = <0>;		/* All at once */
+		qcom,cpr-count-repeat = <1>;
+		qcom,cpr-down-error-step-limit = <1>;
+		qcom,cpr-up-error-step-limit = <1>;
+		qcom,cpr-corner-switch-delay-time = <1042>;
+		qcom,cpr-voltage-settling-time = <1760>;
+
+		qcom,apm-threshold-voltage = <800000>;
+		qcom,apm-crossover-voltage = <880000>;
+		qcom,mem-acc-threshold-voltage = <852000>;
+		qcom,mem-acc-crossover-voltage = <852000>;
+
+		qcom,voltage-step = <4000>;
+		qcom,voltage-base = <352000>;
+		qcom,cpr-saw-use-unit-mV;
+
+		qcom,saw-avs-ctrl = <0x101C031>;
+		qcom,saw-avs-limit = <0x4200000>;
+
+		qcom,cpr-panic-reg-addr-list =
+			<0x17db3a84 0x17830c18>;
+		qcom,cpr-panic-reg-name-list =
+			"APSS_GOLD_CPRH_STATUS_0", "GOLD_SAW4_PMIC_STS";
+
+		thread@0 {
+			qcom,cpr-thread-id = <0>;
+			qcom,cpr-consecutive-up = <0>;
+			qcom,cpr-consecutive-down = <2>;
+			qcom,cpr-up-threshold = <2>;
+			qcom,cpr-down-threshold = <2>;
+
+			apc1_perfcl_vreg: regulator {
+				regulator-name = "apc1_perfcl_corner";
+				regulator-min-microvolt = <1>;
+				regulator-max-microvolt = <24>;
+
+				qcom,cpr-fuse-corners = <3>;
+				qcom,cpr-fuse-combos = <8>;
+				qcom,cpr-speed-bins = <1>;
+				qcom,cpr-speed-bin-corners = <22>;
+				qcom,cpr-corners = <22>;
+
+				qcom,cpr-corner-fmax-map =
+					<10 17 22>;
+
+				qcom,cpr-voltage-ceiling =
+					<756000  756000  756000  756000  756000
+					 756000  756000  756000  756000  756000
+					 812000  812000  828000  828000  828000
+					 828000  828000  884000  952000  952000
+					1056000 1056000>;
+
+				qcom,cpr-voltage-floor =
+					<568000  568000  568000  568000  568000
+					 568000  568000  568000  568000  568000
+					 584000  584000  632000  632000  632000
+					 632000  632000  672000  712000  712000
+					 772000  772000>;
+
+				qcom,cpr-floor-to-ceiling-max-range =
+					<32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  32000  32000  32000
+					 32000  32000  40000  40000  40000
+					 40000  40000>;
+
+				qcom,corner-frequencies =
+					<300000000  422400000  499200000
+					 576000000  652800000  729600000
+					 806400000  883200000  960000000
+					1036800000 1113600000 1190400000
+					1267200000 1344000000 1420800000
+					1497600000 1574400000 1651200000
+					1728000000 1804800000 1881600000
+					1958400000>;
+
+				qcom,cpr-open-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
+				qcom,allow-voltage-interpolation;
+				qcom,allow-quotient-interpolation;
+				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+			};
+		};
+	};
+
 	/* RPMh regulators: */
 
 	/* PM8998 S1 = VDD_EBI supply */
@@ -165,12 +388,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa1";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l1: regulator-l1 {
 			regulator-name = "pm8998_l1";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <880000>;
 			regulator-max-microvolt = <880000>;
 			qcom,init-voltage = <880000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -178,12 +406,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa2";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l2: regulator-l2 {
 			regulator-name = "pm8998_l2";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -191,12 +424,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa3";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l3: regulator-l3 {
 			regulator-name = "pm8998_l3";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -217,12 +455,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa5";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l5: regulator-l5 {
 			regulator-name = "pm8998_l5";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <800000>;
 			regulator-max-microvolt = <800000>;
 			qcom,init-voltage = <800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -230,12 +473,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa6";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l6: regulator-l6 {
 			regulator-name = "pm8998_l6";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1856000>;
 			regulator-max-microvolt = <1856000>;
 			qcom,init-voltage = <1856000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -243,12 +491,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa7";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l7: regulator-l7 {
 			regulator-name = "pm8998_l7";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -256,12 +509,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa8";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l8: regulator-l8 {
 			regulator-name = "pm8998_l8";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -269,12 +527,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa9";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l9: regulator-l9 {
 			regulator-name = "pm8998_l9";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1808000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1808000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -282,12 +545,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa10";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l10: regulator-l10 {
 			regulator-name = "pm8998_l10";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1808000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1808000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -295,12 +563,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa11";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l11: regulator-l11 {
 			regulator-name = "pm8998_l11";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
 			qcom,init-voltage = <1000000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -308,12 +581,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa12";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l12: regulator-l12 {
 			regulator-name = "pm8998_l12";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -321,12 +599,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa13";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l13: regulator-l13 {
 			regulator-name = "pm8998_l13";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1808000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <1808000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -334,12 +617,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa14";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l14: regulator-l14 {
 			regulator-name = "pm8998_l14";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -347,12 +635,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa15";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l15: regulator-l15 {
 			regulator-name = "pm8998_l15";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
 			qcom,init-voltage = <1800000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -360,12 +653,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa16";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l16: regulator-l16 {
 			regulator-name = "pm8998_l16";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2704000>;
 			qcom,init-voltage = <2704000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -373,12 +671,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa17";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l17: regulator-l17 {
 			regulator-name = "pm8998_l17";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1304000>;
 			regulator-max-microvolt = <1304000>;
 			qcom,init-voltage = <1304000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -386,12 +689,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa18";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l18: regulator-l18 {
 			regulator-name = "pm8998_l18";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2704000>;
 			qcom,init-voltage = <2704000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -399,12 +707,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa19";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l19: regulator-l19 {
 			regulator-name = "pm8998_l19";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3008000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <3008000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -412,12 +725,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa20";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l20: regulator-l20 {
 			regulator-name = "pm8998_l20";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -425,12 +743,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa21";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l21: regulator-l21 {
 			regulator-name = "pm8998_l21";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2960000>;
 			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2960000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -438,12 +761,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa22";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l22: regulator-l22 {
 			regulator-name = "pm8998_l22";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2864000>;
 			regulator-max-microvolt = <2864000>;
 			qcom,init-voltage = <2864000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -451,12 +779,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa23";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l23: regulator-l23 {
 			regulator-name = "pm8998_l23";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3312000>;
 			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <3312000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -464,12 +797,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa24";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l24: regulator-l24 {
 			regulator-name = "pm8998_l24";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3088000>;
 			regulator-max-microvolt = <3088000>;
 			qcom,init-voltage = <3088000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -477,12 +815,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa25";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l25: regulator-l25 {
 			regulator-name = "pm8998_l25";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3104000>;
 			regulator-max-microvolt = <3104000>;
 			qcom,init-voltage = <3104000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -490,12 +833,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa26";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l26: regulator-l26 {
 			regulator-name = "pm8998_l26";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
@@ -516,14 +864,17 @@
 		compatible = "qcom,rpmh-vrm-regulator";
 		mboxes = <&apps_rsc 0>;
 		qcom,resource-name = "ldoa28";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LDO_LPM
+			 RPMH_REGULATOR_MODE_LDO_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
 		pm8998_l28: regulator-l28 {
 			regulator-name = "pm8998_l28";
-		mboxes = <&apps_rsc 0>;
-		qcom,resource-name = "";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <3008000>;
 			regulator-max-microvolt = <3008000>;
 			qcom,init-voltage = <3008000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index 221eb38..0f31c0a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -29,7 +29,7 @@
 	};
 };
 
-&usb3 {
+&usb0 {
 	/delete-property/ qcom,usb-dbm;
 	qcom,charging-disabled;
 	dwc3@a600000 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 663ff7e..124ed99 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -109,6 +109,14 @@
 	};
 };
 
+&apc0_cpr {
+	qcom,cpr-ignore-invalid-fuses;
+};
+
+&apc1_cpr {
+	qcom,cpr-ignore-invalid-fuses;
+};
+
 &ufsphy_card {
 	compatible = "qcom,ufs-phy-qrbtc-sdm845";
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
index 7144acd..5d81487 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi
@@ -10,7 +10,280 @@
  * GNU General Public License for more details.
  */
 
+#include "dsi-panel-sim-video.dtsi"
+#include "dsi-panel-sim-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-video.dtsi"
+#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-sharp-dsc-4k-video.dtsi"
+#include "dsi-panel-sharp-dsc-4k-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi"
+#include "dsi-panel-sharp-1080p-cmd.dtsi"
+#include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
+#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
+#include "sdm845-pinctrl.dtsi"
+
 &soc {
+	dsi_panel_pwr_supply: dsi_panel_pwr_supply {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "lab";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+		};
+
+		qcom,panel-supply-entry@2 {
+			reg = <2>;
+			qcom,supply-name = "ibb";
+			qcom,supply-min-voltage = <4600000>;
+			qcom,supply-max-voltage = <6000000>;
+			qcom,supply-enable-load = <100000>;
+			qcom,supply-disable-load = <100>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+	};
+
+	dsi_panel_pwr_supply_no_labibb: dsi_panel_pwr_supply_no_labibb {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+	};
+
+	dsi_panel_pwr_supply_vdd_no_labibb: dsi_panel_pwr_supply_vdd_no_labibb {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		qcom,panel-supply-entry@0 {
+			reg = <0>;
+			qcom,supply-name = "vddio";
+			qcom,supply-min-voltage = <1800000>;
+			qcom,supply-max-voltage = <1800000>;
+			qcom,supply-enable-load = <62000>;
+			qcom,supply-disable-load = <80>;
+			qcom,supply-post-on-sleep = <20>;
+		};
+
+		qcom,panel-supply-entry@1 {
+			reg = <1>;
+			qcom,supply-name = "vdd";
+			qcom,supply-min-voltage = <3000000>;
+			qcom,supply-max-voltage = <3000000>;
+			qcom,supply-enable-load = <857000>;
+			qcom,supply-disable-load = <0>;
+			qcom,supply-post-on-sleep = <0>;
+		};
+	};
+
+	dsi_sharp_4k_dsc_video_display: qcom,dsi-display@0 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sharp_4k_dsc_video";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_sharp_4k_dsc_video>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_sharp_4k_dsc_cmd_display: qcom,dsi-display@1 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sharp_4k_dsc_cmd";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_sharp_4k_dsc_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_sharp_1080_cmd_display: qcom,dsi-display@2 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_sharp_1080_cmd";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_sharp_1080_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_dual_sharp_1080_120hz_cmd_display: qcom,dsi-display@3 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_sharp_1080_120hz_cmd";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_dual_sharp_1080_120hz_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_dual_nt35597_truly_video_display: qcom,dsi-display@4 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt35597_truly_video";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt35597_truly_video>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_dual_nt35597_truly_cmd_display: qcom,dsi-display@5 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_dual_nt35597_truly_cmd";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_dual_nt35597_truly_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_nt35597_truly_dsc_cmd_display: qcom,dsi-display@6 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_nt35597_truly_dsc_cmd";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_cmd>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
+	dsi_nt35597_truly_dsc_video_display: qcom,dsi-display@7 {
+		compatible = "qcom,dsi-display";
+		label = "dsi_nt35597_truly_dsc_video";
+		qcom,display-type = "primary";
+
+		qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+		qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		       <&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "src_byte_clk", "src_pixel_clk";
+
+		pinctrl-names = "panel_active", "panel_suspend";
+		pinctrl-0 = <&sde_dsi_active &sde_te_active>;
+		pinctrl-1 = <&sde_dsi_suspend &sde_te_suspend>;
+		qcom,platform-te-gpio = <&tlmm 10 0>;
+		qcom,platform-reset-gpio = <&tlmm 6 0>;
+
+		qcom,dsi-panel = <&dsi_nt35597_truly_dsc_video>;
+		vddio-supply = <&pm8998_l14>;
+		lab-supply = <&lab_regulator>;
+		ibb-supply = <&ibb_regulator>;
+	};
+
 	sde_wb: qcom,wb-display@0 {
 		compatible = "qcom,wb-display";
 		cell-index = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index ca3c4fa..b157e04 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -77,6 +77,10 @@
 		qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>;
 		qcom,sde-dsc-size = <0x140>;
 
+		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>;
+		qcom,sde-dither-version = <0x00010000>;
+		qcom,sde-dither-size = <0x20>;
+
 		qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
 
 		qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
@@ -247,4 +251,159 @@
 			gdsc-mdss-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
 		};
 	};
+
+	mdss_dsi0: qcom,mdss_dsi_ctrl0@ae94000 {
+		compatible = "qcom,dsi-ctrl-hw-v2.0";
+		label = "dsi-ctrl-0";
+		status = "disabled";
+		cell-index = <0>;
+		reg =   <0xae94000 0x400>;
+		reg-names = "dsi_ctrl";
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <4 0>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
+					"pixel_clk", "pixel_clk_rcg";
+
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <925000>;
+				qcom,supply-max-voltage = <925000>;
+				qcom,supply-enable-load = <17000>;
+				qcom,supply-disable-load = <32>;
+			};
+
+			qcom,ctrl-supply-entry@1 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <18160>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
+	mdss_dsi1: qcom,mdss_dsi_ctrl1@ae96000 {
+		compatible = "qcom,dsi-ctrl-hw-v2.0";
+		label = "dsi-ctrl-1";
+		status = "disabled";
+		cell-index = <1>;
+		reg =   <0xae96000 0x400>;
+		reg-names = "dsi_ctrl";
+		interrupt-parent = <&mdss_mdp>;
+		interrupts = <5 0>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		vdda-0p9-supply = <&pm8998_l1>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_BYTE0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>,
+		<&clock_dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK>,
+		<&clock_dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+		clock-names = "byte_clk", "byte_clk_rcg", "byte_intf_clk",
+				"pixel_clk", "pixel_clk_rcg";
+		qcom,ctrl-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,ctrl-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-0p9";
+				qcom,supply-min-voltage = <925000>;
+				qcom,supply-max-voltage = <925000>;
+				qcom,supply-enable-load = <17000>;
+				qcom,supply-disable-load = <32>;
+			};
+
+			qcom,ctrl-supply-entry@1 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <18160>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
+	mdss_dsi_phy0: qcom,mdss_dsi_phy0@ae94400 {
+		compatible = "qcom,dsi-phy-v3.0";
+		status = "disabled";
+		label = "dsi-phy-0";
+		cell-index = <0>;
+		reg = <0xae94400 0x7c0>;
+		reg-names = "dsi_phy";
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		qcom,platform-strength-ctrl = [ff 06
+						ff 06
+						ff 06
+						ff 00];
+		qcom,platform-regulator-settings = [1d
+							1d 1d 1d 1d];
+		qcom,platform-lane-config = [00 00 10 0f
+						00 00 10 0f
+						00 00 10 0f
+						00 00 10 0f
+						00 00 10 8f];
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <2500>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
+	mdss_dsi_phy1: qcom,mdss_dsi_phy0@ae96400 {
+		compatible = "qcom,dsi-phy-v3.0";
+		status = "disabled";
+		label = "dsi-phy-1";
+		cell-index = <1>;
+		reg = <0xae96400 0x7c0>;
+		reg-names = "dsi_phy";
+		gdsc-supply = <&mdss_core_gdsc>;
+		vdda-1p2-supply = <&pm8998_l26>;
+		qcom,platform-strength-ctrl = [ff 06
+						ff 06
+						ff 06
+						ff 00];
+		qcom,platform-regulator-settings = [1d
+							1d 1d 1d 1d];
+		qcom,platform-lane-config = [00 00 10 0f
+						00 00 10 0f
+						00 00 10 0f
+						00 00 10 0f
+						00 00 10 8f];
+
+		qcom,phy-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,phy-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "vdda-1p2";
+				qcom,supply-min-voltage = <1250000>;
+				qcom,supply-max-voltage = <1250000>;
+				qcom,supply-enable-load = <2500>;
+				qcom,supply-disable-load = <1>;
+			};
+		};
+	};
+
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 06879c2..5399e99 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -13,7 +13,8 @@
 
 #include <dt-bindings/clock/qcom,gcc-sdm845.h>
 &soc {
-	usb3: ssusb@a600000 {
+	/* Primary USB port related DWC3 controller */
+	usb0: ssusb@a600000 {
 		compatible = "qcom,dwc-usb3-msm";
 		reg = <0x0a600000 0xf8c00>,
 		      <0x088ee000 0x400>;
@@ -22,8 +23,8 @@
 		#size-cells = <1>;
 		ranges;
 
-		interrupts = <0 346 0>, <0 130 0>;
-		interrupt-names = "hs_phy_irq", "pwr_event_irq";
+		interrupts = <0 489 0>, <0 130 0>, <0 486 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
 
 		USB3_GDSC-supply = <&usb30_prim_gdsc>;
 		qcom,usb-dbm = <&dbm_1p5>;
@@ -58,9 +59,11 @@
 			snps,disable-clk-gating;
 			snps,has-lpm-erratum;
 			snps,hird-threshold = /bits/ 8 <0x10>;
+			maximum-speed = "high-speed";
 		};
 	};
 
+	/* Primary USB port related QUSB2 PHY */
 	qusb_phy0: qusb@88e2000 {
 		compatible = "qcom,qusb2phy-v2";
 		reg = <0x088e2000 0x400>;
@@ -71,21 +74,23 @@
 		vdda33-supply = <&pm8998_l24>;
 		qcom,vdd-voltage-level = <0 880000 880000>;
 		qcom,qusb-phy-init-seq =
-				/* <value reg_offset> */
-					<0x13 0x04
-					0x7c 0x18c
-					0x80 0x2c
-					0x0a 0x184
-					0x00 0x240>;
+				     /* <value reg_offset> */
+					<0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+					0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+					0x80 0x2c  /* PLL_CMODE */
+					0x0a 0x184 /* PLL_LOCK_DELAY */
+					0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+					0xa5 0x240 /* TUNE1 */
+					0x09 0x244 /* TUNE2 */
+					0x00 0x220 /* IMP_CTRL1 */
+					0x58 0x224>; /* IMP_CTRL2 */
 		phy_type= "utmi";
-		clocks = <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
-			 <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
 			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
-		clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk";
+		clock-names = "ref_clk_src", "cfg_ahb_clk";
 
-		resets = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_BCR>;
+		resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>;
 		reset-names = "phy_reset";
-
 	};
 
 	dbm_1p5: dbm@a8f8000 {
@@ -97,4 +102,217 @@
 	usb_nop_phy: usb_nop_phy {
 		compatible = "usb-nop-xceiv";
 	};
+
+	/* Secondary USB port related DWC3 controller */
+	usb1: ssusb@a800000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a800000 0xf8c00>,
+		      <0x088ee000 0x400>;
+		reg-names = "core_base", "ahb2phy_base";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 491 0>, <0 135 0>, <0 487 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq";
+
+		USB3_GDSC-supply = <&usb30_sec_gdsc>;
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+
+		clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>,
+			 <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
+			 <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+			 <&clock_gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+			 <&clock_gcc GCC_USB30_SEC_SLEEP_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "cfg_ahb_clk", "xo";
+
+		qcom,core-clk-rate = <133333333>;
+		qcom,core-clk-rate-hs = <66666667>;
+
+		resets = <&clock_gcc GCC_USB30_SEC_BCR>;
+		reset-names = "core_reset";
+		status = "disabled";
+
+		dwc3@a600000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a800000 0xcd00>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 138 0>;
+			usb-phy = <&qusb_phy1>, <&usb_qmp_phy>;
+			tx-fifo-resize;
+			snps,disable-clk-gating;
+			snps,has-lpm-erratum;
+			snps,hird-threshold = /bits/ 8 <0x10>;
+		};
+	};
+
+	/* Secondary USB port related QUSB2 PHY */
+	qusb_phy1: qusb@88e3000 {
+		compatible = "qcom,qusb2phy-v2";
+		reg = <0x088e3000 0x400>;
+		reg-names = "qusb_phy_base";
+
+		vdd-supply = <&pm8998_l1>;
+		vdda18-supply = <&pm8998_l12>;
+		vdda33-supply = <&pm8998_l24>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,qusb-phy-init-seq =
+				     /* <value reg_offset> */
+					<0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+					0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+					0x80 0x2c  /* PLL_CMODE */
+					0x0a 0x184 /* PLL_LOCK_DELAY */
+					0x19 0xb4  /* PLL_DIGITAL_TIMERS_TWO */
+					0xa5 0x240 /* TUNE1 */
+					0x09 0x244 /* TUNE2 */
+					0x00 0x220 /* IMP_CTRL1 */
+					0x58 0x224>; /* IMP_CTRL2 */
+		phy_type= "utmi";
+		clocks = <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "ref_clk_src", "cfg_ahb_clk";
+
+		resets = <&clock_gcc GCC_QUSB2PHY_SEC_BCR>;
+		reset-names = "phy_reset";
+		status = "disabled";
+	};
+
+	/* Secondary USB port related QMP PHY */
+	usb_qmp_phy: ssphy@88eb000 {
+		compatible = "qcom,usb-ssphy-qmp-v2";
+		reg = <0x88eb000 0x1000>,
+			<0x01fcbff0 0x4>;
+		reg-names = "qmp_phy_base",
+			    "vls_clamp_reg";
+
+		vdd-supply = <&pm8998_l1>;
+		core-supply = <&pm8998_l26>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+		/* <reg_offset, value, delay> */
+			<0x048 0x07 0x00 /* QSERDES_COM_PLL_IVCO */
+			 0x080 0x14 0x00 /* QSERDES_COM_SYSCLK_EN_SEL */
+			 0x034 0x04 0x00 /* QSERDES_COM_BIAS_EN_CLKBUFLR_EN */
+			 0x138 0x30 0x00 /* QSERDES_COM_CLK_SELECT */
+			 0x03c 0x02 0x00 /* QSERDES_COM_SYS_CLK_CTRL */
+			 0x08c 0x08 0x00 /* QSERDES_COM_RESETSM_CNTRL2 */
+			 0x15c 0x06 0x00 /* QSERDES_COM_CMN_CONFIG */
+			 0x164 0x01 0x00 /* QSERDES_COM_SVS_MODE_CLK_SEL */
+			 0x13c 0x80 0x00 /* QSERDES_COM_HSCLK_SEL */
+			 0x0b0 0x82 0x00 /* QSERDES_COM_DEC_START_MODE0 */
+			 0x0b8 0xab 0x00 /* QSERDES_COM_DIV_FRAC_START1_MODE0 */
+			 0x0bc 0xea 0x00 /* QSERDES_COM_DIV_FRAC_START2_MODE0 */
+			 0x0c0 0x02 0x00 /* QSERDES_COM_DIV_FRAC_START3_MODE0 */
+			 0x060 0x06 0x00 /* QSERDES_COM_CP_CTRL_MODE0 */
+			 0x068 0x16 0x00 /* QSERDES_COM_PLL_RCTRL_MODE0 */
+			 0x070 0x36 0x00 /* QSERDES_COM_PLL_CCTRL_MODE0 */
+			 0x0dc 0x00 0x00 /* QSERDES_COM_INTEGLOOP_GAIN1_MODE0 */
+			 0x0d8 0x3f 0x00 /* QSERDES_COM_INTEGLOOP_GAIN0_MODE0 */
+			 0x0f8 0x01 0x00 /* QSERDES_COM_VCO_TUNE2_MODE0 */
+			 0x0f4 0xc9 0x00 /* QSERDES_COM_VCO_TUNE1_MODE0 */
+			 0x148 0x0a 0x00 /* QSERDES_COM_CORECLK_DIV_MODE0 */
+			 0x0a0 0x00 0x00 /* QSERDES_COM_LOCK_CMP3_MODE0 */
+			 0x09c 0x34 0x00 /* QSERDES_COM_LOCK_CMP2_MODE0 */
+			 0x098 0x15 0x00 /* QSERDES_COM_LOCK_CMP1_MODE0 */
+			 0x090 0x04 0x00 /* QSERDES_COM_LOCK_CMP_EN */
+			 0x154 0x00 0x00 /* QSERDES_COM_CORE_CLK_EN */
+			 0x094 0x00 0x00 /* QSERDES_COM_LOCK_CMP_CFG */
+			 0x0f0 0x00 0x00 /* QSERDES_COM_VCO_TUNE_MAP */
+			 0x040 0x0a 0x00 /* QSERDES_COM_SYSCLK_BUF_ENABLE */
+			 0x0d0 0x80 0x00 /* QSERDES_COM_INTEGLOOP_INITVAL */
+			 0x010 0x01 0x00 /* QSERDES_COM_SSC_EN_CENTER */
+			 0x01c 0x31 0x00 /* QSERDES_COM_SSC_PER1 */
+			 0x020 0x01 0x00 /* QSERDES_COM_SSC_PER2 */
+			 0x014 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER1 */
+			 0x018 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER2 */
+			 0x024 0x85 0x00 /* QSERDES_COM_SSC_STEP_SIZE1 */
+			 0x028 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE2 */
+			 0x4c0 0x0c 0x00 /* QSERDES_RX_VGA_CAL_CNTRL2 */
+			 0x564 0x50 0x00 /* QSERDES_RX_RX_MODE_00 */
+			 0x430 0x0b 0x00 /* QSERDES_RX_UCDR_FASTLOCK_FO_GAIN */
+			 0x4d4 0x0e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 */
+			 0x4d8 0x4e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 */
+			 0x4dc 0x18 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 */
+			 0x4f8 0x77 0x00 /* RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+			 0x4fc 0x80 0x00 /* RX_RX_OFFSET_ADAPTOR_CNTRL2 */
+			 0x504 0x03 0x00 /* QSERDES_RX_SIGDET_CNTRL */
+			 0x50c 0x1c 0x00 /* QSERDES_RX_SIGDET_DEGLITCH_CNTRL */
+			 0x434 0x75 0x00 /* RX_UCDR_SO_SATURATION_AND_ENABLE */
+			 0x444 0x80 0x00 /* QSERDES_RX_UCDR_PI_CONTROLS */
+			 0x408 0x0a 0x00 /* QSERDES_RX_UCDR_FO_GAIN */
+			 0x40c 0x06 0x00 /* QSERDES_RX_UCDR_SO_GAIN */
+			 0x500 0x00 0x00 /* QSERDES_RX_SIGDET_ENABLES */
+			 0x260 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */
+			 0x2a4 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */
+			 0x28c 0xc6 0x00 /* QSERDES_TX_LANE_MODE_1 */
+			 0x248 0x09 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */
+			 0x244 0x0d 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */
+			 0x8c8 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */
+			 0x8cc 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */
+			 0x8d0 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */
+			 0x8d4 0x40 0x00 /* USB3_UNI_PCS_FLL_MAN_CODE */
+			 0x8c4 0x02 0x00 /* USB3_UNI_PCS_FLL_CNTRL1 */
+			 0x864 0x1b 0x00 /* USB3_UNI_PCS_POWER_STATE_CONFIG2 */
+			 0x80c 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V0 */
+			 0x810 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V1 */
+			 0x814 0xb5 0x00 /* USB3_UNI_PCS_TXMGN_V2 */
+			 0x818 0x4c 0x00 /* USB3_UNI_PCS_TXMGN_V3 */
+			 0x81c 0x64 0x00 /* USB3_UNI_PCS_TXMGN_V4 */
+			 0x820 0x6a 0x00 /* USB3_UNI_PCS_TXMGN_LS */
+			 0x824 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V0 */
+			 0x828 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V0 */
+			 0x82c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V1 */
+			 0x830 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V1 */
+			 0x834 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V2 */
+			 0x838 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V2 */
+			 0x83c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V3 */
+			 0x840 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V3 */
+			 0x844 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V4 */
+			 0x848 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V4 */
+			 0x84c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_LS */
+			 0x850 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_LS */
+			 0x85c 0x02 0x00 /* USB3_UNI_PCS_RATE_SLEW_CNTRL */
+			 0x8a0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
+			 0x88c 0x44 0x00 /* USB3_UNI_PCS_TSYNC_RSYNC_TIME */
+			 0x880 0xd1 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG1 */
+			 0x884 0x1f 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG2 */
+			 0x888 0x47 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG3 */
+			 0x870 0xe7 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L */
+			 0x874 0x03 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H */
+			 0x878 0x40 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_L */
+			 0x87c 0x00 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_H */
+			 0x9d8 0xba 0x00 /* USB3_UNI_PCS_RX_SIGDET_LVL */
+			 0x8b8 0x75 0x00 /* RXEQTRAINING_WAIT_TIME */
+			 0x8b0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
+			 0x8bc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+			 0xa0c 0x21 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG1 */
+			 0xa10 0x60 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG2 */
+			 0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<0x974 /* USB3_UNI_PCS_PCS_STATUS */
+				 0x8d8 /* USB3_UNI_PCS_AUTONOMOUS_MODE_CTRL */
+				 0x8dc /* USB3_UNI_PCS_LFPS_RXTERM_IRQ_CLEAR */
+				 0x804 /* USB3_UNI_PCS_POWER_DOWN_CONTROL */
+				 0x800 /* USB3_UNI_PCS_SW_RESET */
+				 0x808>; /* USB3_UNI_PCS_START_CONTROL */
+
+		clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_SEC_CLKREF_CLK>;
+
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk";
+
+		resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>,
+			<&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>;
+		reset-names = "phy_reset", "phy_phy_reset";
+		status = "disabled";
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
new file mode 100644
index 0000000..8ab0593
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-cdp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845-v2.dtsi"
+#include "sdm845-cdp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM sdm845 V2 CDP";
+	compatible = "qcom,sdm845-cdp", "qcom,sdm845", "qcom,cdp";
+	qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
new file mode 100644
index 0000000..57c3e71
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-mtp.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm845-v2.dtsi"
+#include "sdm845-mtp.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM sdm845 V2 MTP";
+	compatible = "qcom,sdm845-mtp", "qcom,sdm845", "qcom,mtp";
+	qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-v2-rumi.dts
new file mode 100644
index 0000000..4b2213d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2-rumi.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/memreserve/ 0x90000000 0x00000100;
+
+#include "sdm845-v2.dtsi"
+#include "sdm845-rumi.dtsi"
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 V2 RUMI";
+	compatible = "qcom,sdm845-rumi", "qcom,sdm845", "qcom,rumi";
+	qcom,board-id = <15 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
new file mode 100644
index 0000000..4fdf383
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi
@@ -0,0 +1,18 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdm845.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. SDM845 V2";
+	qcom,msm-id = <321 0x20000>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
index 9545581..ed4956f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,50 +15,128 @@
 #include <dt-bindings/clock/qcom,videocc-sdm845.h>
 
 &soc {
-	msm_vidc: qcom,vidc@cc00000 {
-		  compatible = "qcom,msm-vidc";
-		  status = "disabled";
-		  reg = <0xcc00000 0x100000>;
-		  interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
-		  qcom,debug-timeout;
-		  qcom,reg-presets =
-			 <0x80124 0x00000003>,
-			 <0x80550 0x01111111>,
-			 <0x80560 0x01111111>,
-			 <0x80568 0x01111111>,
-			 <0x80570 0x01111111>,
-			 <0x80580 0x01111111>,
-			 <0x80588 0x01111111>,
-			 <0xe2010 0x00000000>;
-		  vdd-supply = <&venus_gdsc>;
-		  venus-core0-supply = <&vcodec0_gdsc>;
-		  venus-core1-supply = <&vcodec1_gdsc>;
-		  clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
-			 <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
-			 <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
-			 <&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
-			 <&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
-		  clock-names = "core_clk", "iface_clk", "bus_clk",
+	msm_vidc: qcom,vidc@aa00000 {
+		compatible = "qcom,msm-vidc";
+		status = "disabled";
+		reg = <0xaa00000 0x200000>;
+		interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+		qcom,hfi = "venus";
+		qcom,firmware-name = "venus";
+		qcom,max-secure-instances = <5>;
+		qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
+
+		/* Supply */
+		venus-supply = <&venus_gdsc>;
+		venus-core0-supply = <&vcodec0_gdsc>;
+		venus-core1-supply = <&vcodec1_gdsc>;
+
+		/* Clocks */
+		clock-names = "core_clk", "iface_clk", "bus_clk",
 			"core0_clk", "core1_clk";
-		  qcom,proxy-clock-names = "core_clk", "iface_clk",
+		clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+			<&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
+			<&clock_videocc VIDEO_CC_VCODEC1_CORE_CLK>;
+		qcom,proxy-clock-names = "core_clk", "iface_clk",
 			"bus_clk", "core0_clk", "core1_clk";
-		  qcom,clock-configs = <0x1 0x1 0x1 0x1 0x1>;
-		  qcom,proxy-reg-names = "vdd";
-		  bus_cnoc {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "cnoc";
-			  qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
-			  qcom,bus-governor = "performance";
-			  qcom,bus-range-kbps = <1 1>;
-		  };
-		  venus_bus_ddr {
-			  compatible = "qcom,msm-vidc,bus";
-			  label = "venus-ddr";
-			  qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
-			  qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
-			  qcom,bus-governor = "msm-vidc-ddr";
-			  qcom,bus-range-kbps = <1000 3388000>;
-		  };
-	  };
+		qcom,clock-configs = <0x0 0x0 0x0 0x0 0x0>;
+		qcom,allowed-clock-rates = <200000000 320000000 380000000
+			444000000 533000000>;
+		qcom,clock-freq-tbl {
+			qcom,profile-enc {
+				qcom,codec-mask = <0x55555555>;
+				qcom,vpp-cycles-per-mb = <675>;
+				qcom,vsp-cycles-per-mb = <125>;
+				qcom,low-power-cycles-per-mb = <320>;
+			};
+			qcom,profile-dec {
+				qcom,codec-mask = <0xffffffff>;
+				qcom,vpp-cycles-per-mb = <200>;
+				qcom,vsp-cycles-per-mb = <50>;
+			};
+		};
+
+		/* Buses */
+		bus_cnoc {
+			compatible = "qcom,msm-vidc,bus";
+			label = "cnoc";
+			qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 1000>;
+		};
+
+		venus_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 3388000>;
+		};
+		arm9_bus_ddr {
+			compatible = "qcom,msm-vidc,bus";
+			label = "venus-arm9-ddr";
+			qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+			qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+			qcom,bus-governor = "performance";
+			qcom,bus-range-kbps = <1000 1000>;
+		};
+
+		/* MMUs */
+		non_secure_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_ns";
+			iommus =
+				<&apps_smmu 0x10a0>,
+				<&apps_smmu 0x10a8>,
+				<&apps_smmu 0x10b0>;
+			buffer-types = <0xfff>;
+			virtual-addr-pool = <0x70800000 0x6f800000>;
+		};
+
+		firmware_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			qcom,fw-context-bank;
+			iommus =
+				<&apps_smmu 0x10b2>;
+		};
+
+		secure_bitstream_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_bitstream";
+			iommus =
+				<&apps_smmu 0x10a1>,
+				<&apps_smmu 0x10a9>,
+				<&apps_smmu 0x10a5>,
+				<&apps_smmu 0x10ad>;
+			buffer-types = <0x241>;
+			virtual-addr-pool = <0x4b000000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		secure_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_pixel";
+			iommus =
+				<&apps_smmu 0x10a3>,
+				<&apps_smmu 0x10ab>;
+			buffer-types = <0x106>;
+			virtual-addr-pool = <0x25800000 0x25800000>;
+			qcom,secure-context-bank;
+		};
+
+		secure_non_pixel_cb {
+			compatible = "qcom,msm-vidc,context-bank";
+			label = "venus_sec_non_pixel";
+			iommus =
+				<&apps_smmu 0x10a4>,
+				<&apps_smmu 0x10ac>,
+				<&apps_smmu 0x10b4>;
+			buffer-types = <0x480>;
+			virtual-addr-pool = <0x1000000 0x24800000>;
+			qcom,secure-context-bank;
+		};
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 9a988fc..2edd958 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -42,6 +42,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x0>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_0>;
@@ -72,6 +73,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x100>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_1>;
@@ -96,6 +98,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x200>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_2>;
@@ -120,6 +123,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x300>;
 			enable-method = "psci";
+			efficiency = <1024>;
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_3>;
@@ -144,6 +148,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x400>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_4>;
@@ -168,6 +173,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x500>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_5>;
@@ -192,6 +198,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x600>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_6>;
@@ -216,6 +223,7 @@
 			compatible = "arm,armv8";
 			reg = <0x0 0x700>;
 			enable-method = "psci";
+			efficiency = <1740>;
 			cache-size = <0x20000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_7>;
@@ -511,9 +519,12 @@
 		#reset-cells = <1>;
 	};
 
-	clock_camcc: qcom,camcc {
-		compatible = "qcom,dummycc";
-		clock-output-names = "camcc_clocks";
+	clock_camcc: qcom,camcc@ad00000 {
+		compatible = "qcom,cam_cc-sdm845";
+		reg = <0xad00000 0x10000>;
+		reg-names = "cc_base";
+		vdd_cx-supply = <&pm8998_s9_level>;
+		vdd_mx-supply = <&pm8998_s6_level>;
 		#clock-cells = <1>;
 		#reset-cells = <1>;
 	};
@@ -1096,10 +1107,22 @@
 			qcom,dump-node = <&L1_D_103>;
 			qcom,dump-id = <0x87>;
 		};
-		qcom,llcc_d_cache {
-			qcom,dump-node = <&llcc>;
+		qcom,llcc1_d_cache {
+			qcom,dump-node = <&LLCC_1>;
 			qcom,dump-id = <0x121>;
 		};
+		qcom,llcc2_d_cache {
+			qcom,dump-node = <&LLCC_2>;
+			qcom,dump-id = <0x122>;
+		};
+		qcom,llcc3_d_cache {
+			qcom,dump-node = <&LLCC_3>;
+			qcom,dump-id = <0x123>;
+		};
+		qcom,llcc4_d_cache {
+			qcom,dump-node = <&LLCC_4>;
+			qcom,dump-id = <0x124>;
+		};
 	};
 
 	kryo3xx-erp {
@@ -1136,6 +1159,22 @@
 		qcom,llcc-amon {
 			compatible = "qcom,llcc-amon";
 		};
+
+		LLCC_1: llcc_1_dcache {
+			qcom,dump-size = <0xd8000>;
+		};
+
+		LLCC_2: llcc_2_dcache {
+			qcom,dump-size = <0xd8000>;
+		};
+
+		LLCC_3: llcc_3_dcache {
+			qcom,dump-size = <0xd8000>;
+		};
+
+		LLCC_4: llcc_4_dcache {
+			qcom,dump-size = <0xd8000>;
+		};
 	};
 
 	qcom,ipc-spinlock@1f40000 {
@@ -1428,28 +1467,31 @@
 		qcom,modem-cfg-emb-pipe-flt;
 		qcom,ipa-wdi2;
 		qcom,use-64-bit-dma-mask;
-		clock-names = "core_clk";
-		clocks = <&clock_gcc 0xfa685cda>;
+		qcom,bandwidth-vote-for-ipa;
 		qcom,msm-bus,name = "ipa";
 		qcom,msm-bus,num-cases = <4>;
-		qcom,msm-bus,num-paths = <3>;
+		qcom,msm-bus,num-paths = <4>;
 		qcom,msm-bus,vectors-KBps =
 		/* No vote */
 			<90 512 0 0>,
 			<90 585 0 0>,
 			<1 676 0 0>,
+			<143 777 0 0>,
 		/* SVS */
 			<90 512 80000 640000>,
 			<90 585 80000 640000>,
 			<1 676 80000 80000>,
+			<143 777 0 150000000>,
 		/* NOMINAL */
 			<90 512 206000 960000>,
 			<90 585 206000 960000>,
 			<1 676 206000 160000>,
+			<143 777 0 300000000>,
 		/* TURBO */
 			<90 512 206000 3600000>,
 			<90 585 206000 3600000>,
-			<1 676 206000 300000>;
+			<1 676 206000 300000>,
+			<143 777 0 355333333>;
 		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
 
 		/* IPA RAM mmap */
@@ -1690,3 +1732,5 @@
 #include "sdm845-bus.dtsi"
 #include "sdm845-vidc.dtsi"
 #include "sdm845-pm.dtsi"
+#include "sdm845-pinctrl.dtsi"
+#include "sdm845-audio.dtsi"
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 50156d2..2a23d05 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -25,7 +25,6 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
@@ -308,6 +307,7 @@
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
 CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -317,6 +317,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
@@ -336,6 +337,7 @@
 CONFIG_USB_ISP1760_HOST_ROLE=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -386,6 +388,7 @@
 CONFIG_USB_BAM=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
 CONFIG_CLOCK_QPNP_DIV=y
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7113aa2..844b286 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -30,7 +30,6 @@
 # CONFIG_RD_XZ is not set
 # CONFIG_RD_LZO is not set
 # CONFIG_RD_LZ4 is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_AIO is not set
 # CONFIG_MEMBARRIER is not set
@@ -276,6 +275,7 @@
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HVC_DCC=y
+CONFIG_HVC_DCC_SERIALIZE_SMP=y
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
 CONFIG_I2C_CHARDEV=y
@@ -319,6 +319,7 @@
 CONFIG_MSM_SDE_ROTATOR=y
 CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
 CONFIG_DRM=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
 CONFIG_FB_VIRTUAL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -328,6 +329,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
@@ -346,6 +348,7 @@
 CONFIG_USB_ISP1760_HOST_ROLE=y
 CONFIG_USB_PD_POLICY=y
 CONFIG_QPNP_USB_PDPHY=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_DUAL_ROLE_USB_INTF=y
 CONFIG_USB_MSM_SSPHY_QMP=y
@@ -404,6 +407,7 @@
 CONFIG_USB_BAM=y
 CONFIG_MSM_GCC_SDM845=y
 CONFIG_MSM_VIDEOCC_SDM845=y
+CONFIG_MSM_CAMCC_SDM845=y
 CONFIG_CLOCK_QPNP_DIV=y
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
@@ -412,8 +416,11 @@
 CONFIG_QCOM_LAZY_MAPPING=y
 CONFIG_IOMMU_DEBUG=y
 CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_CPUSS_DUMP=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_SDM845_LLCC=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
 CONFIG_QCOM_EUD=y
 CONFIG_QCOM_WATCHDOG_V2=y
 CONFIG_QCOM_MEMORY_DUMP_V2=y
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..f7e2c32
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2013,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_DMA_CMA
+
+#include <linux/types.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 88e2f2b..55889d0 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -17,14 +17,62 @@
 
 #include <asm/kvm_hyp.h>
 
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+{
+	u64 val;
+
+	/*
+	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
+	 * most TLB operations target EL2/EL0. In order to affect the
+	 * guest TLBs (EL1/EL0), we need to change one of these two
+	 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
+	 * let's flip TGE before executing the TLB operation.
+	 */
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	val = read_sysreg(hcr_el2);
+	val &= ~HCR_TGE;
+	write_sysreg(val, hcr_el2);
+	isb();
+}
+
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+{
+	write_sysreg(kvm->arch.vttbr, vttbr_el2);
+	isb();
+}
+
+static hyp_alternate_select(__tlb_switch_to_guest,
+			    __tlb_switch_to_guest_nvhe,
+			    __tlb_switch_to_guest_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+{
+	/*
+	 * We're done with the TLB operation, let's restore the host's
+	 * view of HCR_EL2.
+	 */
+	write_sysreg(0, vttbr_el2);
+	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+}
+
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+{
+	write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__tlb_switch_to_host,
+			    __tlb_switch_to_host_nvhe,
+			    __tlb_switch_to_host_vhe,
+			    ARM64_HAS_VIRT_HOST_EXTN);
+
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
 	dsb(ishst);
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	write_sysreg(kvm->arch.vttbr, vttbr_el2);
-	isb();
+	__tlb_switch_to_guest()(kvm);
 
 	/*
 	 * We could do so much better if we had the VA as well.
@@ -45,7 +93,7 @@
 	dsb(ish);
 	isb();
 
-	write_sysreg(0, vttbr_el2);
+	__tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -54,14 +102,13 @@
 
 	/* Switch to requested VMID */
 	kvm = kern_hyp_va(kvm);
-	write_sysreg(kvm->arch.vttbr, vttbr_el2);
-	isb();
+	__tlb_switch_to_guest()(kvm);
 
 	asm volatile("tlbi vmalls12e1is" : : );
 	dsb(ish);
 	isb();
 
-	write_sysreg(0, vttbr_el2);
+	__tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -69,14 +116,13 @@
 	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
 
 	/* Switch to requested VMID */
-	write_sysreg(kvm->arch.vttbr, vttbr_el2);
-	isb();
+	__tlb_switch_to_guest()(kvm);
 
 	asm volatile("tlbi vmalle1" : : );
 	dsb(nsh);
 	isb();
 
-	write_sysreg(0, vttbr_el2);
+	__tlb_switch_to_host()(kvm);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index bdd409c..d8e6635 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -52,6 +52,22 @@
 	return prot;
 }
 
+static bool is_dma_coherent(struct device *dev, unsigned long attrs)
+{
+	bool is_coherent;
+
+	if (attrs & DMA_ATTR_FORCE_COHERENT)
+		is_coherent = true;
+	else if (attrs & DMA_ATTR_FORCE_NON_COHERENT)
+		is_coherent = false;
+	else if (is_device_dma_coherent(dev))
+		is_coherent = true;
+	else
+		is_coherent = false;
+
+	return is_coherent;
+}
+
 static struct gen_pool *atomic_pool;
 #define NO_KERNEL_MAPPING_DUMMY 0x2222
 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
@@ -207,7 +223,7 @@
 {
 	struct page *page;
 	void *ptr, *coherent_ptr;
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	size = PAGE_ALIGN(size);
 
@@ -264,7 +280,7 @@
 
 	size = PAGE_ALIGN(size);
 
-	if (!is_device_dma_coherent(dev)) {
+	if (!is_dma_coherent(dev, attrs)) {
 		if (__free_from_pool(vaddr, size))
 			return;
 		if (!(attrs & DMA_ATTR_NO_KERNEL_MAPPING))
@@ -281,7 +297,7 @@
 	dma_addr_t dev_addr;
 
 	dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 
 	return dev_addr;
@@ -292,7 +308,7 @@
 				 size_t size, enum dma_data_direction dir,
 				 unsigned long attrs)
 {
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
 	swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
 }
@@ -305,7 +321,7 @@
 	int i, ret;
 
 	ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		for_each_sg(sgl, sg, ret, i)
 			__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 				       sg->length, dir);
@@ -321,7 +337,7 @@
 	struct scatterlist *sg;
 	int i;
 
-	if (!is_device_dma_coherent(dev))
+	if (!is_dma_coherent(dev, attrs))
 		for_each_sg(sgl, sg, nelems, i)
 			__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
 					 sg->length, dir);
@@ -387,7 +403,7 @@
 	unsigned long off = vma->vm_pgoff;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
+					     is_dma_coherent(dev, attrs));
 
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
@@ -458,6 +474,7 @@
 {
 	struct vm_struct *area;
 
+	size = PAGE_ALIGN(size);
 	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
 
 	area = find_vm_area(remapped_addr);
@@ -686,7 +703,7 @@
 				 dma_addr_t *handle, gfp_t gfp,
 				 unsigned long attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 	int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
 	size_t iosize = size;
 	void *addr;
@@ -783,7 +800,7 @@
 	int ret;
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
+					     is_dma_coherent(dev, attrs));
 
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
@@ -840,7 +857,7 @@
 				   enum dma_data_direction dir,
 				   unsigned long attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 	int prot = dma_direction_to_prot(dir, coherent);
 	dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
 
@@ -893,7 +910,7 @@
 				int nelems, enum dma_data_direction dir,
 				unsigned long attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
 		__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
@@ -1208,7 +1225,7 @@
 	size_t count = size >> PAGE_SHIFT;
 	size_t array_size = count * sizeof(struct page *);
 	int i = 0;
-	bool is_coherent = is_device_dma_coherent(dev);
+	bool is_coherent = is_dma_coherent(dev, attrs);
 
 	if (array_size <= PAGE_SIZE)
 		pages = kzalloc(array_size, gfp);
@@ -1322,7 +1339,7 @@
 		return dma_addr;
 
 	prot = __get_iommu_pgprot(attrs, prot,
-				  is_device_dma_coherent(dev));
+				  is_dma_coherent(dev, attrs));
 
 	iova = dma_addr;
 	for (i = 0; i < count; ) {
@@ -1402,7 +1419,7 @@
 	size_t array_size = count * sizeof(struct page *);
 	int i;
 	void *addr;
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	if (array_size <= PAGE_SIZE)
 		pages = kzalloc(array_size, gfp);
@@ -1452,7 +1469,7 @@
 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
 {
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
 	struct page **pages;
 	void *addr = NULL;
@@ -1504,7 +1521,7 @@
 	unsigned long uaddr = vma->vm_start;
 	unsigned long usize = vma->vm_end - vma->vm_start;
 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
-	bool coherent = is_device_dma_coherent(dev);
+	bool coherent = is_dma_coherent(dev, attrs);
 
 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
 					     coherent);
@@ -1621,7 +1638,7 @@
 		return 0;
 	}
 	prot = __get_iommu_pgprot(attrs, prot,
-				  is_device_dma_coherent(dev));
+				  is_dma_coherent(dev, attrs));
 
 	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
 	if (ret != total_length) {
@@ -1674,8 +1691,11 @@
 {
 	struct scatterlist *s;
 	int i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
 
-	if (is_device_dma_coherent(dev))
+	if (iova_coherent)
 		return;
 
 	for_each_sg(sg, s, nents, i)
@@ -1695,8 +1715,11 @@
 {
 	struct scatterlist *s;
 	int i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = sg_dma_address(sg);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
 
-	if (is_device_dma_coherent(dev))
+	if (iova_coherent)
 		return;
 
 	for_each_sg(sg, s, nents, i)
@@ -1728,7 +1751,7 @@
 
 	prot = __dma_direction_to_prot(dir);
 	prot = __get_iommu_pgprot(attrs, prot,
-				  is_device_dma_coherent(dev));
+				  is_dma_coherent(dev, attrs));
 
 	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
 			prot);
@@ -1755,7 +1778,7 @@
 	     unsigned long offset, size_t size, enum dma_data_direction dir,
 	     unsigned long attrs)
 {
-	if (!is_device_dma_coherent(dev) &&
+	if (!is_dma_coherent(dev, attrs) &&
 	      !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
 		__dma_page_cpu_to_dev(page, offset, size, dir);
 
@@ -1781,11 +1804,10 @@
 						mapping->domain, iova));
 	int offset = handle & ~PAGE_MASK;
 	int len = PAGE_ALIGN(size + offset);
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain,
+							handle);
 
-	if (!iova)
-		return;
-
-	if (!(is_device_dma_coherent(dev) ||
+	if (!(iova_coherent ||
 	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
 		__dma_page_dev_to_cpu(page, offset, size, dir);
 
@@ -1801,11 +1823,9 @@
 	struct page *page = phys_to_page(iommu_iova_to_phys(
 						mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
 
-	if (!iova)
-		return;
-
-	if (!is_device_dma_coherent(dev))
+	if (!iova_coherent)
 		__dma_page_dev_to_cpu(page, offset, size, dir);
 }
 
@@ -1817,11 +1837,9 @@
 	struct page *page = phys_to_page(iommu_iova_to_phys(
 						mapping->domain, iova));
 	unsigned int offset = handle & ~PAGE_MASK;
+	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
 
-	if (!iova)
-		return;
-
-	if (!is_device_dma_coherent(dev))
+	if (!iova_coherent)
 		__dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 05615a3..f70b433 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -30,6 +30,8 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
 
 #include <asm/barrier.h>
 #include <asm/cputype.h>
@@ -59,6 +61,40 @@
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
 
+struct dma_contig_early_reserve {
+	phys_addr_t base;
+	unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS];
+static int dma_mmu_remap_num;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+	if (dma_mmu_remap_num >= ARRAY_SIZE(dma_mmu_remap)) {
+		pr_err("ARM64: Not enough slots for DMA fixup reserved regions!\n");
+		return;
+	}
+	dma_mmu_remap[dma_mmu_remap_num].base = base;
+	dma_mmu_remap[dma_mmu_remap_num].size = size;
+	dma_mmu_remap_num++;
+}
+
+static bool dma_overlap(phys_addr_t start, phys_addr_t end)
+{
+	int i;
+
+	for (i = 0; i < dma_mmu_remap_num; i++) {
+		phys_addr_t dma_base = dma_mmu_remap[i].base;
+		phys_addr_t dma_end = dma_mmu_remap[i].base +
+			dma_mmu_remap[i].size;
+
+		if ((dma_base < end) && (dma_end > start))
+			return true;
+	}
+	return false;
+}
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 			      unsigned long size, pgprot_t vma_prot)
 {
@@ -149,7 +185,8 @@
 		next = pmd_addr_end(addr, end);
 		/* try section mapping first */
 		if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
-		      allow_block_mappings) {
+		      allow_block_mappings &&
+		      !dma_overlap(phys, phys + next - addr)) {
 			pmd_t old_pmd =*pmd;
 			pmd_set_huge(pmd, phys, prot);
 			/*
@@ -209,7 +246,8 @@
 		/*
 		 * For 4K granule only, attempt to put down a 1GB block
 		 */
-		if (use_1G_block(addr, next, phys) && allow_block_mappings) {
+		if (use_1G_block(addr, next, phys) && allow_block_mappings &&
+		    !dma_overlap(phys, phys + next - addr)) {
 			pud_t old_pud = *pud;
 			pud_set_huge(pud, phys, prot);
 
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 5d83ff7..ec8e968 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -67,8 +67,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 2b74aee..e582069 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -133,7 +133,7 @@
 CONFIG_SCSI_QLOGIC_1280=y
 CONFIG_SCSI_PMCRAID=m
 CONFIG_SCSI_BFA_FC=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
@@ -205,7 +205,6 @@
 # CONFIG_MLX4_DEBUG is not set
 CONFIG_TEHUTI=m
 CONFIG_BNX2X=m
-CONFIG_QLGE=m
 CONFIG_SFC=m
 CONFIG_BE2NET=m
 CONFIG_LIBERTAS_THINFIRM=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 5da76e0..0cdb431 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -39,7 +39,7 @@
 CONFIG_PM_STD_PARTITION="/dev/hda3"
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEBUG=y
-CONFIG_CPU_FREQ_STAT=m
+CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 58d43f3..078ecac 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -59,8 +59,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index c8f7e28..e233f87 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -60,8 +60,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index d2f54e5..fbe085c 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -59,8 +59,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 3d0d9cb..2942610 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -61,8 +61,8 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index b496c25..07d0182 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -110,7 +110,7 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 8e99ad8..f59969a 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -90,7 +90,7 @@
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h
index 4775a11..24d5e31 100644
--- a/arch/mips/include/asm/mach-ip27/spaces.h
+++ b/arch/mips/include/asm/mach-ip27/spaces.h
@@ -12,14 +12,16 @@
 
 /*
  * IP27 uses the R10000's uncached attribute feature.  Attribute 3 selects
- * uncached memory addressing.
+ * uncached memory addressing. Hide the definitions on 32-bit compilation
+ * of the compat-vdso code.
  */
-
+#ifdef CONFIG_64BIT
 #define HSPEC_BASE		0x9000000000000000
 #define IO_BASE			0x9200000000000000
 #define MSPEC_BASE		0x9400000000000000
 #define UNCAC_BASE		0x9600000000000000
 #define CAC_BASE		0xa800000000000000
+#endif
 
 #define TO_MSPEC(x)		(MSPEC_BASE | ((x) & TO_PHYS_MASK))
 #define TO_HSPEC(x)		(HSPEC_BASE | ((x) & TO_PHYS_MASK))
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 5a73c5e..23198c9 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -30,8 +30,10 @@
 	return soc_info.sys_type;
 }
 
-static __init void prom_init_cmdline(int argc, char **argv)
+static __init void prom_init_cmdline(void)
 {
+	int argc;
+	char **argv;
 	int i;
 
 	pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
@@ -60,14 +62,11 @@
 
 void __init prom_init(void)
 {
-	int argc;
-	char **argv;
-
 	prom_soc_init(&soc_info);
 
 	pr_info("SoC Type: %s\n", get_system_type());
 
-	prom_init_cmdline(argc, argv);
+	prom_init_cmdline();
 }
 
 void __init prom_free_prom_memory(void)
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 285796e..2b76e36 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -40,16 +40,6 @@
 	{ 0 }
 };
 
-static void rt288x_wdt_reset(void)
-{
-	u32 t;
-
-	/* enable WDT reset output on pin SRAM_CS_N */
-	t = rt_sysc_r32(SYSC_REG_CLKCFG);
-	t |= CLKCFG_SRAM_CS_N_WDT;
-	rt_sysc_w32(t, SYSC_REG_CLKCFG);
-}
-
 void __init ralink_clk_init(void)
 {
 	unsigned long cpu_rate, wmac_rate = 40000000;
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index c8a28c4b..e778e0b 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -89,17 +89,6 @@
 	{ 0 }
 };
 
-static void rt305x_wdt_reset(void)
-{
-	u32 t;
-
-	/* enable WDT reset output on pin SRAM_CS_N */
-	t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
-	t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
-		RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
-	rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
-}
-
 static unsigned long rt5350_get_mem_size(void)
 {
 	void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index 4cef916..3e0aa09 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -63,16 +63,6 @@
 	{ 0 }
 };
 
-static void rt3883_wdt_reset(void)
-{
-	u32 t;
-
-	/* enable WDT reset output on GPIO 2 */
-	t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
-	t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
-	rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
-}
-
 void __init ralink_clk_init(void)
 {
 	unsigned long cpu_rate, sys_rate;
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index 8077ff3..d4469b2 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -71,11 +71,6 @@
 	return err;
 }
 
-static void rt_timer_free(struct rt_timer *rt)
-{
-	free_irq(rt->irq, rt);
-}
-
 static int rt_timer_config(struct rt_timer *rt, unsigned long divisor)
 {
 	if (rt->timer_freq < divisor)
@@ -101,15 +96,6 @@
 	return 0;
 }
 
-static void rt_timer_disable(struct rt_timer *rt)
-{
-	u32 t;
-
-	t = rt_timer_r32(rt, TIMER_REG_TMR0CTL);
-	t &= ~TMR0CTL_ENABLE;
-	rt_timer_w32(rt, TIMER_REG_TMR0CTL, t);
-}
-
 static int rt_timer_probe(struct platform_device *pdev)
 {
 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
index b7a4b7e..e8f6b3a 100644
--- a/arch/mips/sgi-ip22/Platform
+++ b/arch/mips/sgi-ip22/Platform
@@ -25,7 +25,7 @@
 # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
 #
 ifdef CONFIG_SGI_IP28
-  ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
+  ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
       $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
   endif
 endif
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index 9fa046d..4119945 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -52,7 +52,7 @@
 {
 	u32 *key = crypto_tfm_ctx(tfm);
 
-	*key = 0;
+	*key = ~0;
 
 	return 0;
 }
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 5c45114..b9e3f0a 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -19,16 +19,18 @@
 struct mm_iommu_table_group_mem_t;
 
 extern int isolate_lru_page(struct page *page);	/* from internal.h */
-extern bool mm_iommu_preregistered(void);
-extern long mm_iommu_get(unsigned long ua, unsigned long entries,
+extern bool mm_iommu_preregistered(struct mm_struct *mm);
+extern long mm_iommu_get(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries,
 		struct mm_iommu_table_group_mem_t **pmem);
-extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem);
-extern void mm_iommu_init(mm_context_t *ctx);
-extern void mm_iommu_cleanup(mm_context_t *ctx);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
-		unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
-		unsigned long entries);
+extern long mm_iommu_put(struct mm_struct *mm,
+		struct mm_iommu_table_group_mem_t *mem);
+extern void mm_iommu_init(struct mm_struct *mm);
+extern void mm_iommu_cleanup(struct mm_struct *mm);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size);
+extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries);
 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
 		unsigned long ua, unsigned long *hpa);
 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 270ee30..f516ac5 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -915,7 +915,7 @@
 	init_mm.context.pte_frag = NULL;
 #endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_init(&init_mm.context);
+	mm_iommu_init(&init_mm);
 #endif
 	irqstack_early_init();
 	exc_lvl_early_init();
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 3362299..6ca3b90 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1807,8 +1807,6 @@
 		goto instr_done;
 
 	case LARX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (op.ea & (size - 1))
 			break;		/* can't handle misaligned */
 		err = -EFAULT;
@@ -1832,8 +1830,6 @@
 		goto ldst_done;
 
 	case STCX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (op.ea & (size - 1))
 			break;		/* can't handle misaligned */
 		err = -EFAULT;
@@ -1859,8 +1855,6 @@
 		goto ldst_done;
 
 	case LOAD:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
 		if (!err) {
 			if (op.type & SIGNEXT)
@@ -1872,8 +1866,6 @@
 
 #ifdef CONFIG_PPC_FPU
 	case LOAD_FP:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (size == 4)
 			err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
 		else
@@ -1882,15 +1874,11 @@
 #endif
 #ifdef CONFIG_ALTIVEC
 	case LOAD_VMX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
 		goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
 	case LOAD_VSX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
 		goto ldst_done;
 #endif
@@ -1913,8 +1901,6 @@
 		goto instr_done;
 
 	case STORE:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if ((op.type & UPDATE) && size == sizeof(long) &&
 		    op.reg == 1 && op.update_reg == 1 &&
 		    !(regs->msr & MSR_PR) &&
@@ -1927,8 +1913,6 @@
 
 #ifdef CONFIG_PPC_FPU
 	case STORE_FP:
-		if (regs->msr & MSR_LE)
-			return 0;
 		if (size == 4)
 			err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
 		else
@@ -1937,15 +1921,11 @@
 #endif
 #ifdef CONFIG_ALTIVEC
 	case STORE_VMX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
 		goto ldst_done;
 #endif
 #ifdef CONFIG_VSX
 	case STORE_VSX:
-		if (regs->msr & MSR_LE)
-			return 0;
 		err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
 		goto ldst_done;
 #endif
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index b114f8b..73bf6e1 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -115,7 +115,7 @@
 	mm->context.pte_frag = NULL;
 #endif
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_init(&mm->context);
+	mm_iommu_init(mm);
 #endif
 	return 0;
 }
@@ -156,13 +156,11 @@
 }
 #endif
 
-
 void destroy_context(struct mm_struct *mm)
 {
 #ifdef CONFIG_SPAPR_TCE_IOMMU
-	mm_iommu_cleanup(&mm->context);
+	WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list));
 #endif
-
 #ifdef CONFIG_PPC_ICSWX
 	drop_cop(mm->context.acop, mm);
 	kfree(mm->context.cop_lockp);
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index e0f1c33..7de7124 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -56,7 +56,7 @@
 	}
 
 	pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
-			current->pid,
+			current ? current->pid : 0,
 			incr ? '+' : '-',
 			npages << PAGE_SHIFT,
 			mm->locked_vm << PAGE_SHIFT,
@@ -66,12 +66,9 @@
 	return ret;
 }
 
-bool mm_iommu_preregistered(void)
+bool mm_iommu_preregistered(struct mm_struct *mm)
 {
-	if (!current || !current->mm)
-		return false;
-
-	return !list_empty(&current->mm->context.iommu_group_mem_list);
+	return !list_empty(&mm->context.iommu_group_mem_list);
 }
 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
 
@@ -124,19 +121,16 @@
 	return 0;
 }
 
-long mm_iommu_get(unsigned long ua, unsigned long entries,
+long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
 		struct mm_iommu_table_group_mem_t **pmem)
 {
 	struct mm_iommu_table_group_mem_t *mem;
 	long i, j, ret = 0, locked_entries = 0;
 	struct page *page = NULL;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	mutex_lock(&mem_list_mutex);
 
-	list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
 			next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			++mem->used;
@@ -154,7 +148,7 @@
 
 	}
 
-	ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
+	ret = mm_iommu_adjust_locked_vm(mm, entries, true);
 	if (ret)
 		goto unlock_exit;
 
@@ -190,7 +184,7 @@
 		 * of the CMA zone if possible. NOTE: faulting in + migration
 		 * can be expensive. Batching can be considered later
 		 */
-		if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
+		if (is_migrate_cma_page(page)) {
 			if (mm_iommu_move_page_from_cma(page))
 				goto populate;
 			if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
@@ -215,11 +209,11 @@
 	mem->entries = entries;
 	*pmem = mem;
 
-	list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
+	list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
 
 unlock_exit:
 	if (locked_entries && ret)
-		mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
+		mm_iommu_adjust_locked_vm(mm, locked_entries, false);
 
 	mutex_unlock(&mem_list_mutex);
 
@@ -264,17 +258,13 @@
 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
 {
 	list_del_rcu(&mem->next);
-	mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
 	call_rcu(&mem->rcu, mm_iommu_free);
 }
 
-long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
+long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
 {
 	long ret = 0;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	mutex_lock(&mem_list_mutex);
 
 	if (mem->used == 0) {
@@ -297,6 +287,8 @@
 	/* @mapped became 0 so now mappings are disabled, release the region */
 	mm_iommu_release(mem);
 
+	mm_iommu_adjust_locked_vm(mm, mem->entries, false);
+
 unlock_exit:
 	mutex_unlock(&mem_list_mutex);
 
@@ -304,14 +296,12 @@
 }
 EXPORT_SYMBOL_GPL(mm_iommu_put);
 
-struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
-		unsigned long size)
+struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
+		unsigned long ua, unsigned long size)
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
-	list_for_each_entry_rcu(mem,
-			&current->mm->context.iommu_group_mem_list,
-			next) {
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua <= ua) &&
 				(ua + size <= mem->ua +
 				 (mem->entries << PAGE_SHIFT))) {
@@ -324,14 +314,12 @@
 }
 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
 
-struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
-		unsigned long entries)
+struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+		unsigned long ua, unsigned long entries)
 {
 	struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
 
-	list_for_each_entry_rcu(mem,
-			&current->mm->context.iommu_group_mem_list,
-			next) {
+	list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
 		if ((mem->ua == ua) && (mem->entries == entries)) {
 			ret = mem;
 			break;
@@ -373,17 +361,7 @@
 }
 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
 
-void mm_iommu_init(mm_context_t *ctx)
+void mm_iommu_init(struct mm_struct *mm)
 {
-	INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
-}
-
-void mm_iommu_cleanup(mm_context_t *ctx)
-{
-	struct mm_iommu_table_group_mem_t *mem, *tmp;
-
-	list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
-		list_del_rcu(&mem->next);
-		mm_iommu_do_free(mem);
-	}
+	INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
 }
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index c96c0cb..32c46b4 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -91,6 +91,16 @@
 
 static void icp_opal_set_cpu_priority(unsigned char cppr)
 {
+	/*
+	 * Here be dragons. The caller has asked to allow only IPI's and not
+	 * external interrupts. But OPAL XIVE doesn't support that. So instead
+	 * of allowing no interrupts allow all. That's still not right, but
+	 * currently the only caller who does this is xics_migrate_irqs_away()
+	 * and it works in that case.
+	 */
+	if (cppr >= DEFAULT_PRIORITY)
+		cppr = LOWEST_PRIORITY;
+
 	xics_set_base_cppr(cppr);
 	opal_int_set_cppr(cppr);
 	iosync();
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 69d858e..23efe4e 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/delay.h>
 
 #include <asm/prom.h>
 #include <asm/io.h>
@@ -198,9 +199,6 @@
 	/* Remove ourselves from the global interrupt queue */
 	xics_set_cpu_giq(xics_default_distrib_server, 0);
 
-	/* Allow IPIs again... */
-	icp_ops->set_priority(DEFAULT_PRIORITY);
-
 	for_each_irq_desc(virq, desc) {
 		struct irq_chip *chip;
 		long server;
@@ -255,6 +253,19 @@
 unlock:
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
+
+	/* Allow "sufficient" time to drop any inflight IRQ's */
+	mdelay(5);
+
+	/*
+	 * Allow IPIs again. This is done at the very end, after migrating all
+	 * interrupts, the expectation is that we'll only get woken up by an IPI
+	 * interrupt beyond this point, but leave externals masked just to be
+	 * safe. If we're using icp-opal this may actually allow all
+	 * interrupts anyway, but that should be OK.
+	 */
+	icp_ops->set_priority(DEFAULT_PRIORITY);
+
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index d56ef26..7678f79 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -606,12 +606,29 @@
 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
 {
 	spinlock_t *ptl;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
 	pgste_t pgste;
 	pte_t *ptep;
 	pte_t pte;
 	bool dirty;
 
-	ptep = get_locked_pte(mm, addr, &ptl);
+	pgd = pgd_offset(mm, addr);
+	pud = pud_alloc(mm, pgd, addr);
+	if (!pud)
+		return false;
+	pmd = pmd_alloc(mm, pud, addr);
+	if (!pmd)
+		return false;
+	/* We can't run guests backed by huge pages, but userspace can
+	 * still set them up and then try to migrate them without any
+	 * migration support.
+	 */
+	if (pmd_large(*pmd))
+		return true;
+
+	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
 	if (unlikely(!ptep))
 		return false;
 
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 7fe88bb..38623e2 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2096,8 +2096,8 @@
 
 static void refresh_pce(void *ignored)
 {
-	if (current->mm)
-		load_mm_cr4(current->mm);
+	if (current->active_mm)
+		load_mm_cr4(current->active_mm);
 }
 
 static void x86_pmu_event_mapped(struct perf_event *event)
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 8f44c5a..f228f74 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -31,6 +31,7 @@
 #include <asm/apic.h>
 #include <asm/timer.h>
 #include <asm/reboot.h>
+#include <asm/nmi.h>
 
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -158,6 +159,26 @@
 	return 0;
 }
 
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
+ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
+ * unknown NMI on the first CPU which gets it.
+ */
+static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
+{
+	static atomic_t nmi_cpu = ATOMIC_INIT(-1);
+
+	if (!unknown_nmi_panic)
+		return NMI_DONE;
+
+	if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
+		return NMI_HANDLED;
+
+	return NMI_DONE;
+}
+#endif
+
 static void __init ms_hyperv_init_platform(void)
 {
 	/*
@@ -183,6 +204,9 @@
 		pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
 			lapic_timer_frequency);
 	}
+
+	register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
+			     "hv_nmi_unknown");
 #endif
 
 	if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 54a2372..b5785c1 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -4,6 +4,7 @@
  *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  */
 
+#define DISABLE_BRANCH_PROFILING
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 46b2f41..eea88fe 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1287,6 +1287,8 @@
 	 * exporting a reliable TSC.
 	 */
 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+		if (boot_cpu_has(X86_FEATURE_ART))
+			art_related_clocksource = &clocksource_tsc;
 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
 		return 0;
 	}
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0493c17..333362f 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define DISABLE_BRANCH_PROFILING
 #define pr_fmt(fmt) "kasan: " fmt
 #include <linux/bootmem.h>
 #include <linux/kasan.h>
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index bedfab9..a00a6c0 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -234,23 +234,14 @@
 		return 1;
 
 	for_each_pci_msi_entry(msidesc, dev) {
-		__pci_read_msi_msg(msidesc, &msg);
-		pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
-			((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
-		if (msg.data != XEN_PIRQ_MSI_DATA ||
-		    xen_irq_from_pirq(pirq) < 0) {
-			pirq = xen_allocate_pirq_msi(dev, msidesc);
-			if (pirq < 0) {
-				irq = -ENODEV;
-				goto error;
-			}
-			xen_msi_compose_msg(dev, pirq, &msg);
-			__pci_write_msi_msg(msidesc, &msg);
-			dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
-		} else {
-			dev_dbg(&dev->dev,
-				"xen: msi already bound to pirq=%d\n", pirq);
+		pirq = xen_allocate_pirq_msi(dev, msidesc);
+		if (pirq < 0) {
+			irq = -ENODEV;
+			goto error;
 		}
+		xen_msi_compose_msg(dev, pirq, &msg);
+		__pci_write_msi_msg(msidesc, &msg);
+		dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
 		irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
 					       (type == PCI_CAP_ID_MSI) ? nvec : 1,
 					       (type == PCI_CAP_ID_MSIX) ?
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 0774799..c6fee74 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -182,6 +182,9 @@
 	__set_bit(WRITE_16, filter->write_ok);
 	__set_bit(WRITE_LONG, filter->write_ok);
 	__set_bit(WRITE_LONG_2, filter->write_ok);
+	__set_bit(WRITE_SAME, filter->write_ok);
+	__set_bit(WRITE_SAME_16, filter->write_ok);
+	__set_bit(WRITE_SAME_32, filter->write_ok);
 	__set_bit(ERASE, filter->write_ok);
 	__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
 	__set_bit(MODE_SELECT, filter->write_ok);
diff --git a/crypto/Makefile b/crypto/Makefile
index bd6a029..9e52b3c 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -71,6 +71,7 @@
 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
 obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
 obj-$(CONFIG_CRYPTO_WP512) += wp512.o
+CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
 obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
 obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
 obj-$(CONFIG_CRYPTO_ECB) += ecb.o
@@ -94,6 +95,7 @@
 obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
 obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
 obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
+CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
 obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
 obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
 obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bdc67ba..4421f7c 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -160,6 +160,34 @@
 		      DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
 		},
 	},
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Precision 5520",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+		},
+	},
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Precision 3520",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+		},
+	},
+	/*
+	 * Resolves a quirk with the Dell Latitude 3350 that
+	 * causes the ethernet adapter to not function.
+	 */
+	{
+	 .callback = dmi_enable_rev_override,
+	 .ident = "DELL Latitude 3350",
+	 .matches = {
+		      DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+		      DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+		},
+	},
 #endif
 	{}
 };
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 1bd8401..79902e7 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -406,6 +406,7 @@
 	struct files_struct *files = proc->files;
 	unsigned long rlim_cur;
 	unsigned long irqs;
+	int ret;
 
 	if (files == NULL)
 		return -ESRCH;
@@ -416,7 +417,11 @@
 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
 	unlock_task_sighand(proc->tsk, &irqs);
 
-	return __alloc_fd(files, 0, rlim_cur, flags);
+	preempt_enable_no_resched();
+	ret = __alloc_fd(files, 0, rlim_cur, flags);
+	preempt_disable();
+
+	return ret;
 }
 
 /*
@@ -425,8 +430,11 @@
 static void task_fd_install(
 	struct binder_proc *proc, unsigned int fd, struct file *file)
 {
-	if (proc->files)
+	if (proc->files) {
+		preempt_enable_no_resched();
 		__fd_install(proc->files, fd, file);
+		preempt_disable();
+	}
 }
 
 /*
@@ -454,6 +462,7 @@
 {
 	trace_binder_lock(tag);
 	mutex_lock(&binder_main_lock);
+	preempt_disable();
 	trace_binder_locked(tag);
 }
 
@@ -461,8 +470,62 @@
 {
 	trace_binder_unlock(tag);
 	mutex_unlock(&binder_main_lock);
+	preempt_enable();
 }
 
+static inline void *kzalloc_preempt_disabled(size_t size)
+{
+	void *ptr;
+
+	ptr = kzalloc(size, GFP_NOWAIT);
+	if (ptr)
+		return ptr;
+
+	preempt_enable_no_resched();
+	ptr = kzalloc(size, GFP_KERNEL);
+	preempt_disable();
+
+	return ptr;
+}
+
+static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+{
+	long ret;
+
+	preempt_enable_no_resched();
+	ret = copy_to_user(to, from, n);
+	preempt_disable();
+	return ret;
+}
+
+static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+{
+	long ret;
+
+	preempt_enable_no_resched();
+	ret = copy_from_user(to, from, n);
+	preempt_disable();
+	return ret;
+}
+
+#define get_user_preempt_disabled(x, ptr)	\
+({						\
+	int __ret;				\
+	preempt_enable_no_resched();		\
+	__ret = get_user(x, ptr);		\
+	preempt_disable();			\
+	__ret;					\
+})
+
+#define put_user_preempt_disabled(x, ptr)	\
+({						\
+	int __ret;				\
+	preempt_enable_no_resched();		\
+	__ret = put_user(x, ptr);		\
+	preempt_disable();			\
+	__ret;					\
+})
+
 static void binder_set_nice(long nice)
 {
 	long min_nice;
@@ -595,6 +658,8 @@
 	else
 		mm = get_task_mm(proc->tsk);
 
+	preempt_enable_no_resched();
+
 	if (mm) {
 		down_write(&mm->mmap_sem);
 		vma = proc->vma;
@@ -649,6 +714,9 @@
 		up_write(&mm->mmap_sem);
 		mmput(mm);
 	}
+
+	preempt_disable();
+
 	return 0;
 
 free_range:
@@ -671,6 +739,9 @@
 		up_write(&mm->mmap_sem);
 		mmput(mm);
 	}
+
+	preempt_disable();
+	
 	return -ENOMEM;
 }
 
@@ -939,7 +1010,7 @@
 			return NULL;
 	}
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kzalloc_preempt_disabled(sizeof(*node));
 	if (node == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_NODE);
@@ -1083,7 +1154,7 @@
 		else
 			return ref;
 	}
-	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	new_ref = kzalloc_preempt_disabled(sizeof(*ref));
 	if (new_ref == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_REF);
@@ -1955,14 +2026,14 @@
 	e->to_proc = target_proc->pid;
 
 	/* TODO: reuse incoming transaction for reply */
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	t = kzalloc_preempt_disabled(sizeof(*t));
 	if (t == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_t_failed;
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 
-	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
 	if (tcomplete == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_tcomplete_failed;
@@ -2023,14 +2094,14 @@
 				      ALIGN(tr->data_size, sizeof(void *)));
 	offp = off_start;
 
-	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+	if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
 			   tr->data.ptr.buffer, tr->data_size)) {
 		binder_user_error("%d:%d got transaction with invalid data ptr\n",
 				proc->pid, thread->pid);
 		return_error = BR_FAILED_REPLY;
 		goto err_copy_data_failed;
 	}
-	if (copy_from_user(offp, (const void __user *)(uintptr_t)
+	if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
 			   tr->data.ptr.offsets, tr->offsets_size)) {
 		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 				proc->pid, thread->pid);
@@ -2148,9 +2219,10 @@
 				return_error = BR_FAILED_REPLY;
 				goto err_bad_offset;
 			}
-			if (copy_from_user(sg_bufp,
-					   (const void __user *)(uintptr_t)
-					   bp->buffer, bp->length)) {
+			if (copy_from_user_preempt_disabled(
+					sg_bufp,
+					(const void __user *)(uintptr_t)
+					bp->buffer, bp->length)) {
 				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
 						  proc->pid, thread->pid);
 				return_error = BR_FAILED_REPLY;
@@ -2257,7 +2329,7 @@
 	void __user *end = buffer + size;
 
 	while (ptr < end && thread->return_error == BR_OK) {
-		if (get_user(cmd, (uint32_t __user *)ptr))
+		if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		trace_binder_command(cmd);
@@ -2275,7 +2347,7 @@
 			struct binder_ref *ref;
 			const char *debug_string;
 
-			if (get_user(target, (uint32_t __user *)ptr))
+			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 			if (target == 0 && context->binder_context_mgr_node &&
@@ -2327,10 +2399,10 @@
 			binder_uintptr_t cookie;
 			struct binder_node *node;
 
-			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			node = binder_get_node(proc, node_ptr);
@@ -2388,7 +2460,7 @@
 			binder_uintptr_t data_ptr;
 			struct binder_buffer *buffer;
 
-			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 
@@ -2430,7 +2502,8 @@
 		case BC_REPLY_SG: {
 			struct binder_transaction_data_sg tr;
 
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
+			if (copy_from_user_preempt_disabled(&tr, ptr,
+							    sizeof(tr)))
 				return -EFAULT;
 			ptr += sizeof(tr);
 			binder_transaction(proc, thread, &tr.transaction_data,
@@ -2441,7 +2514,7 @@
 		case BC_REPLY: {
 			struct binder_transaction_data tr;
 
-			if (copy_from_user(&tr, ptr, sizeof(tr)))
+			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
 				return -EFAULT;
 			ptr += sizeof(tr);
 			binder_transaction(proc, thread, &tr,
@@ -2492,10 +2565,10 @@
 			struct binder_ref *ref;
 			struct binder_ref_death *death;
 
-			if (get_user(target, (uint32_t __user *)ptr))
+			if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			ref = binder_get_ref(proc, target, false);
@@ -2524,7 +2597,7 @@
 						proc->pid, thread->pid);
 					break;
 				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				death = kzalloc_preempt_disabled(sizeof(*death));
 				if (death == NULL) {
 					thread->return_error = BR_ERROR;
 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2578,8 +2651,7 @@
 			struct binder_work *w;
 			binder_uintptr_t cookie;
 			struct binder_ref_death *death = NULL;
-
-			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+			if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
 				return -EFAULT;
 
 			ptr += sizeof(cookie);
@@ -2611,7 +2683,8 @@
 					wake_up_interruptible(&proc->wait);
 				}
 			}
-		} break;
+		}
+		break;
 
 		default:
 			pr_err("%d:%d unknown command %d\n",
@@ -2660,7 +2733,7 @@
 	int wait_for_proc_work;
 
 	if (*consumed == 0) {
-		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 	}
@@ -2671,7 +2744,7 @@
 
 	if (thread->return_error != BR_OK && ptr < end) {
 		if (thread->return_error2 != BR_OK) {
-			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+			if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 			binder_stat_br(proc, thread, thread->return_error2);
@@ -2679,7 +2752,7 @@
 				goto done;
 			thread->return_error2 = BR_OK;
 		}
-		if (put_user(thread->return_error, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
 		binder_stat_br(proc, thread, thread->return_error);
@@ -2757,7 +2830,7 @@
 		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 			cmd = BR_TRANSACTION_COMPLETE;
-			if (put_user(cmd, (uint32_t __user *)ptr))
+				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
 
@@ -2799,14 +2872,14 @@
 				node->has_weak_ref = 0;
 			}
 			if (cmd != BR_NOOP) {
-				if (put_user(cmd, (uint32_t __user *)ptr))
+					if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 					return -EFAULT;
 				ptr += sizeof(uint32_t);
-				if (put_user(node->ptr,
+					if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
 					     (binder_uintptr_t __user *)ptr))
 					return -EFAULT;
 				ptr += sizeof(binder_uintptr_t);
-				if (put_user(node->cookie,
+					if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
 					     (binder_uintptr_t __user *)ptr))
 					return -EFAULT;
 				ptr += sizeof(binder_uintptr_t);
@@ -2850,11 +2923,10 @@
 				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
 			else
 				cmd = BR_DEAD_BINDER;
-			if (put_user(cmd, (uint32_t __user *)ptr))
+				if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(uint32_t);
-			if (put_user(death->cookie,
-				     (binder_uintptr_t __user *)ptr))
+			if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
 				return -EFAULT;
 			ptr += sizeof(binder_uintptr_t);
 			binder_stat_br(proc, thread, cmd);
@@ -2921,10 +2993,10 @@
 					ALIGN(t->buffer->data_size,
 					    sizeof(void *));
 
-		if (put_user(cmd, (uint32_t __user *)ptr))
+		if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
 			return -EFAULT;
 		ptr += sizeof(uint32_t);
-		if (copy_to_user(ptr, &tr, sizeof(tr)))
+		if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
 			return -EFAULT;
 		ptr += sizeof(tr);
 
@@ -2966,7 +3038,7 @@
 		binder_debug(BINDER_DEBUG_THREADS,
 			     "%d:%d BR_SPAWN_LOOPER\n",
 			     proc->pid, thread->pid);
-		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+		if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
 			return -EFAULT;
 		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
 	}
@@ -3041,7 +3113,7 @@
 			break;
 	}
 	if (*p == NULL) {
-		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		thread = kzalloc_preempt_disabled(sizeof(*thread));
 		if (thread == NULL)
 			return NULL;
 		binder_stats_created(BINDER_STAT_THREAD);
@@ -3145,7 +3217,7 @@
 		ret = -EINVAL;
 		goto out;
 	}
-	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+	if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3163,7 +3235,7 @@
 		trace_binder_write_done(ret);
 		if (ret < 0) {
 			bwr.read_consumed = 0;
-			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 			goto out;
 		}
@@ -3177,7 +3249,7 @@
 		if (!list_empty(&proc->todo))
 			wake_up_interruptible(&proc->wait);
 		if (ret < 0) {
-			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+			if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
 				ret = -EFAULT;
 			goto out;
 		}
@@ -3187,7 +3259,7 @@
 		     proc->pid, thread->pid,
 		     (u64)bwr.write_consumed, (u64)bwr.write_size,
 		     (u64)bwr.read_consumed, (u64)bwr.read_size);
-	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+	if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
 		ret = -EFAULT;
 		goto out;
 	}
@@ -3271,7 +3343,7 @@
 			goto err;
 		break;
 	case BINDER_SET_MAX_THREADS:
-		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+		if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
 			ret = -EINVAL;
 			goto err;
 		}
@@ -3294,9 +3366,8 @@
 			ret = -EINVAL;
 			goto err;
 		}
-		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
-			     &ver->protocol_version)) {
-			ret = -EINVAL;
+			if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
+				ret = -EINVAL;
 			goto err;
 		}
 		break;
@@ -3357,12 +3428,13 @@
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 	int ret;
+
 	struct vm_struct *area;
 	struct binder_proc *proc = filp->private_data;
 	const char *failure_string;
 	struct binder_buffer *buffer;
 
-	if (proc->tsk != current)
+	if (proc->tsk != current->group_leader)
 		return -EINVAL;
 
 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
@@ -3417,7 +3489,11 @@
 	vma->vm_ops = &binder_vm_ops;
 	vma->vm_private_data = proc;
 
-	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+	/* binder_update_page_range assumes preemption is disabled */
+	preempt_disable();
+	ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+	preempt_enable_no_resched();
+	if (ret) {
 		ret = -ENOMEM;
 		failure_string = "alloc small buf";
 		goto err_alloc_small_buf_failed;
@@ -3464,9 +3540,9 @@
 	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
 	if (proc == NULL)
 		return -ENOMEM;
-	get_task_struct(current);
-	proc->tsk = current;
-	proc->vma_vm_mm = current->mm;
+	get_task_struct(current->group_leader);
+	proc->tsk = current->group_leader;
+	proc->vma_vm_mm = current->group_leader->mm;
 	INIT_LIST_HEAD(&proc->todo);
 	init_waitqueue_head(&proc->wait);
 	proc->default_priority = task_nice(current);
@@ -3703,8 +3779,12 @@
 	int defer;
 
 	do {
-		binder_lock(__func__);
+		trace_binder_lock(__func__);
+		mutex_lock(&binder_main_lock);
+		trace_binder_locked(__func__);
+
 		mutex_lock(&binder_deferred_lock);
+		preempt_disable();
 		if (!hlist_empty(&binder_deferred_list)) {
 			proc = hlist_entry(binder_deferred_list.first,
 					struct binder_proc, deferred_work_node);
@@ -3730,7 +3810,9 @@
 		if (defer & BINDER_DEFERRED_RELEASE)
 			binder_deferred_release(proc); /* frees proc */
 
-		binder_unlock(__func__);
+		trace_binder_unlock(__func__);
+		mutex_unlock(&binder_main_lock);
+		preempt_enable_no_resched();
 		if (files)
 			put_files_struct(files);
 	} while (proc);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index fb9796d..5ba619a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1116,7 +1116,13 @@
 	error = dpm_sysfs_add(dev);
 	if (error)
 		goto DPMError;
-	device_pm_add(dev);
+	if ((dev->pm_domain) || (dev->type && dev->type->pm)
+		|| (dev->class && (dev->class->pm || dev->class->resume))
+		|| (dev->bus && (dev->bus->pm || dev->bus->resume)) ||
+		(dev->driver && dev->driver->pm)) {
+		device_pm_add(dev);
+	}
+
 
 	if (MAJOR(dev->devt)) {
 		error = device_create_file(dev, &dev_attr_dev);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97..f95593a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -201,22 +201,43 @@
 	driver_deferred_probe_trigger();
 }
 
+static void enable_trigger_defer_cycle(void)
+{
+	driver_deferred_probe_enable = true;
+	driver_deferred_probe_trigger();
+	/*
+	 * Sort as many dependencies as possible before the next initcall
+	 * level
+	 */
+	flush_work(&deferred_probe_work);
+}
+
 /**
  * deferred_probe_initcall() - Enable probing of deferred devices
  *
  * We don't want to get in the way when the bulk of drivers are getting probed.
  * Instead, this initcall makes sure that deferred probing is delayed until
- * late_initcall time.
+ * all the registered initcall functions at a particular level are completed.
+ * This function is invoked at every *_initcall_sync level.
  */
 static int deferred_probe_initcall(void)
 {
-	driver_deferred_probe_enable = true;
-	driver_deferred_probe_trigger();
-	/* Sort as many dependencies as possible before exiting initcalls */
-	flush_work(&deferred_probe_work);
+	enable_trigger_defer_cycle();
+	driver_deferred_probe_enable = false;
 	return 0;
 }
-late_initcall(deferred_probe_initcall);
+arch_initcall_sync(deferred_probe_initcall);
+subsys_initcall_sync(deferred_probe_initcall);
+fs_initcall_sync(deferred_probe_initcall);
+device_initcall_sync(deferred_probe_initcall);
+
+static int deferred_probe_enable_fn(void)
+{
+	/* Enable deferred probing for all time */
+	enable_trigger_defer_cycle();
+	return 0;
+}
+late_initcall(deferred_probe_enable_fn);
 
 /**
  * device_is_bound() - Check if device is bound to a driver
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 26cf6b9..a95e1e5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -294,8 +294,7 @@
 	"/lib/firmware/updates/" UTS_RELEASE,
 	"/lib/firmware/updates",
 	"/lib/firmware/" UTS_RELEASE,
-	"/lib/firmware",
-	"/firmware/image"
+	"/lib/firmware"
 };
 
 /*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 4f99101..c1e56c3 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -162,6 +162,12 @@
 	pr_debug("PM: Moving %s:%s before %s:%s\n",
 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+	if (!((devb->pm_domain) || (devb->type && devb->type->pm)
+		|| (devb->class && (devb->class->pm || devb->class->resume))
+		|| (devb->bus && (devb->bus->pm || devb->bus->resume)) ||
+		(devb->driver && devb->driver->pm))) {
+		device_pm_add(devb);
+	}
 	/* Delete deva from dpm_list and reinsert before devb. */
 	list_move_tail(&deva->power.entry, &devb->power.entry);
 }
@@ -176,6 +182,12 @@
 	pr_debug("PM: Moving %s:%s after %s:%s\n",
 		 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
 		 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+	if (!((devb->pm_domain) || (devb->type && devb->type->pm)
+		|| (devb->class && (devb->class->pm || devb->class->resume))
+		|| (devb->bus && (devb->bus->pm || devb->bus->resume)) ||
+		(devb->driver && devb->driver->pm))) {
+		device_pm_add(devb);
+	}
 	/* Delete deva from dpm_list and reinsert after devb. */
 	list_move(&deva->power.entry, &devb->power.entry);
 }
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 16d307b..270cdd4 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1040,7 +1040,7 @@
 		active_time = ktime_set(0, 0);
 	}
 
-	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+	seq_printf(m, "%-32s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
 		   ws->name, active_count, ws->event_count,
 		   ws->wakeup_count, ws->expire_count,
 		   ktime_to_ms(active_time), ktime_to_ms(total_time),
@@ -1060,7 +1060,7 @@
 {
 	struct wakeup_source *ws;
 
-	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+	seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t"
 		"expire_count\tactive_since\ttotal_time\tmax_time\t"
 		"last_change\tprevent_suspend_time\n");
 
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index ea684fc..9c26f87 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -1433,7 +1433,7 @@
 
 void diag_dci_notify_client(int peripheral_mask, int data, int proc)
 {
-	int stat;
+	int stat = 0;
 	struct siginfo info;
 	struct list_head *start, *temp;
 	struct diag_dci_client_tbl *entry = NULL;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 3bbd2a5..2acaa77 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1598,7 +1598,7 @@
 		.a2w_reg = A2W_PLLH_AUX,
 		.load_mask = CM_PLLH_LOADAUX,
 		.hold_mask = 0,
-		.fixed_divider = 10),
+		.fixed_divider = 1),
 	[BCM2835_PLLH_PIX]	= REGISTER_PLL_DIV(
 		.name = "pllh_pix",
 		.source_pll = "pllh",
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 13f747a..34a7d97 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -169,6 +169,15 @@
 	  Say Y if you want to support video devices and functionality such as
 	  video encode/decode.
 
+config MSM_CAMCC_SDM845
+	tristate "SDM845 Camera Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	  Support for the camera clock controller on Qualcomm Technologies, Inc
+	  sdm845 devices.
+	  Say Y if you want to support camera devices and functionality such as
+	  capturing pictures.
+
 config CLOCK_QPNP_DIV
 	tristate "QPNP PMIC clkdiv driver"
 	depends on COMMON_CLK_QCOM && SPMI
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 321587b..b97efe4 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,6 +21,7 @@
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
+obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
new file mode 100644
index 0000000..c49eddf
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -0,0 +1,1935 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,camcc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "clk-alpha-pll.h"
+#include "vdd-level-sdm845.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_CX_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_CX_NUM, 1, vdd_corner);
+
+enum {
+	P_BI_TCXO,
+	P_CAM_CC_PLL0_OUT_EVEN,
+	P_CAM_CC_PLL1_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_EVEN,
+	P_CAM_CC_PLL2_OUT_ODD,
+	P_CAM_CC_PLL3_OUT_EVEN,
+	P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL2_OUT_EVEN, 1 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 5 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_0[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_even",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll3_out_even",
+	"cam_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+	{ P_BI_TCXO, 0 },
+	{ P_CAM_CC_PLL2_OUT_EVEN, 1 },
+	{ P_CAM_CC_PLL1_OUT_EVEN, 2 },
+	{ P_CAM_CC_PLL2_OUT_ODD, 4 },
+	{ P_CAM_CC_PLL3_OUT_EVEN, 5 },
+	{ P_CAM_CC_PLL0_OUT_EVEN, 6 },
+	{ P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const cam_cc_parent_names_1[] = {
+	"bi_tcxo",
+	"cam_cc_pll2_out_even",
+	"cam_cc_pll1_out_even",
+	"cam_cc_pll2_out_odd",
+	"cam_cc_pll3_out_even",
+	"cam_cc_pll0_out_even",
+	"core_bi_pll_test_se",
+};
+
+static struct pll_vco fabia_vco[] = {
+	{ 250000000, 2000000000, 0 },
+	{ 125000000, 1000000000, 1 },
+};
+
+static const struct pll_config cam_cc_pll0_config = {
+	.l = 0x1f,
+	.frac = 0x4000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+	.offset = 0x0,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll0",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP2(
+				MIN, 19200000,
+				LOWER, 600000000),
+		},
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_even[] = {
+	{ 0x0, 1 },
+	{ 0x1, 2 },
+	{ 0x3, 4 },
+	{ 0x7, 8 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+	.offset = 0x0,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll0_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll0" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll1_config = {
+	.l = 0x2a,
+	.frac = 0x1556,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+	.offset = 0x1000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll1",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP2(
+				MIN, 19200000,
+				LOW, 808000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+	.offset = 0x1000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll1_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll1" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll2_config = {
+	.l = 0x32,
+	.frac = 0x0,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+	.offset = 0x2000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll2",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_MX_FMAX_MAP2(
+				MIN, 19200000,
+				LOWER, 960000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_even = {
+	.offset = 0x2000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct clk_div_table post_div_table_fabia_odd[] = {
+	{ 0x0, 1 },
+	{ 0x3, 3 },
+	{ 0x5, 5 },
+	{ 0x7, 7 },
+	{ }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_odd = {
+	.offset = 0x2000,
+	.post_div_shift = 12,
+	.post_div_table = post_div_table_fabia_odd,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_odd),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll2_out_odd",
+		.parent_names = (const char *[]){ "cam_cc_pll2" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct pll_config cam_cc_pll3_config = {
+	.l = 0x14,
+	.frac = 0x0,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+	.offset = 0x3000,
+	.vco_table = fabia_vco,
+	.num_vco = ARRAY_SIZE(fabia_vco),
+	.type = FABIA_PLL,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll3",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_fabia_pll_ops,
+			VDD_CX_FMAX_MAP2(
+				MIN, 19200000,
+				LOWER, 384000000),
+		},
+	},
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+	.offset = 0x3000,
+	.post_div_shift = 8,
+	.post_div_table = post_div_table_fabia_even,
+	.num_post_div = ARRAY_SIZE(post_div_table_fabia_even),
+	.width = 4,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_pll3_out_even",
+		.parent_names = (const char *[]){ "cam_cc_pll3" },
+		.num_parents = 1,
+		.ops = &clk_generic_pll_postdiv_ops,
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+	.cmd_rcgr = 0x600c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_bps_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	{ }
+};
+
+
+static struct clk_rcg2 cam_cc_cci_clk_src = {
+	.cmd_rcgr = 0xb0d8,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_cci_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cci_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 37500000,
+			LOW, 50000000,
+			NOMINAL, 100000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_ODD, 3, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+	.cmd_rcgr = 0x9060,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_cphy_rx_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP4(
+			MIN, 19200000,
+			LOWER, 300000000,
+			LOW, 320000000,
+			HIGH, 384000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(240000000, P_CAM_CC_PLL2_OUT_EVEN, 2, 0, 0),
+	F(269333333, P_CAM_CC_PLL1_OUT_EVEN, 3, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+	.cmd_rcgr = 0x5004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi0phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+	.cmd_rcgr = 0x5028,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi1phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+	.cmd_rcgr = 0x504c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_csi2phytimer_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 269333333),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+	F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+	.cmd_rcgr = 0x6038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fast_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 100000000,
+			LOW, 200000000,
+			LOW_L1, 300000000,
+			NOMINAL, 400000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_fd_core_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_fd_core_clk_src = {
+	.cmd_rcgr = 0xb0b0,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_fd_core_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 400000000,
+			LOW_L1, 538666667,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+	.cmd_rcgr = 0xb088,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_fd_core_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_icp_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 400000000,
+			LOW_L1, 538666667,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(320000000, P_CAM_CC_PLL2_OUT_EVEN, 1.5, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+	.cmd_rcgr = 0x900c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+	F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+	F(384000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+	.cmd_rcgr = 0x9038,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_0_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+	.cmd_rcgr = 0xa00c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+	.cmd_rcgr = 0xa030,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_1_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+	.cmd_rcgr = 0xb004,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 320000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+	.cmd_rcgr = 0xb024,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_1,
+	.freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ife_lite_csid_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP3(
+			MIN, 19200000,
+			LOWER, 384000000,
+			NOMINAL, 538666667),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+	F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(538666667, P_CAM_CC_PLL1_OUT_EVEN, 1.5, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+	.cmd_rcgr = 0x700c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP6(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 538666667,
+			HIGH, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_ipe_1_clk_src = {
+	.cmd_rcgr = 0x800c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_ipe_1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP6(
+			MIN, 19200000,
+			LOWER, 240000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 538666667,
+			HIGH, 600000000),
+	},
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+	.cmd_rcgr = 0xb04c,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.enable_safe_config = true,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_bps_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_jpeg_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 404000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+	F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+	F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+	F(384000000, P_CAM_CC_PLL2_OUT_ODD, 2.5, 0, 0),
+	F(480000000, P_CAM_CC_PLL2_OUT_EVEN, 1, 0, 0),
+	F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+	.cmd_rcgr = 0xb0f8,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_1,
+	.enable_safe_config = true,
+	.freq_tbl = ftbl_cam_cc_lrme_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_lrme_clk_src",
+		.parent_names = cam_cc_parent_names_1,
+		.num_parents = 7,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 200000000,
+			LOW, 384000000,
+			LOW_L1, 480000000,
+			NOMINAL, 600000000),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+	F(33333333, P_CAM_CC_PLL0_OUT_EVEN, 2, 1, 9),
+	F(34285714, P_CAM_CC_PLL2_OUT_EVEN, 14, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+	.cmd_rcgr = 0x4004,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk0_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+	.cmd_rcgr = 0x4024,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk1_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+	.cmd_rcgr = 0x4044,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk2_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+	.cmd_rcgr = 0x4064,
+	.mnd_width = 8,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_mclk3_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP2(
+			MIN, 19200000,
+			LOWER, 34285714),
+	},
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+	F(19200000, P_BI_TCXO, 1, 0, 0),
+	F(60000000, P_CAM_CC_PLL0_OUT_EVEN, 10, 0, 0),
+	F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0),
+	F(73846154, P_CAM_CC_PLL2_OUT_EVEN, 6.5, 0, 0),
+	F(80000000, P_CAM_CC_PLL2_OUT_EVEN, 6, 0, 0),
+	{ }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+	.cmd_rcgr = 0x6054,
+	.mnd_width = 0,
+	.hid_width = 5,
+	.parent_map = cam_cc_parent_map_0,
+	.freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+	.clkr.hw.init = &(struct clk_init_data){
+		.name = "cam_cc_slow_ahb_clk_src",
+		.parent_names = cam_cc_parent_names_0,
+		.num_parents = 6,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_rcg2_ops,
+		VDD_CX_FMAX_MAP5(
+			MIN, 19200000,
+			LOWER, 60000000,
+			LOW, 66666667,
+			LOW_L1, 73846154,
+			NOMINAL, 80000000),
+	},
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+	.halt_reg = 0x606c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x606c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+	.halt_reg = 0x6050,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x6050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+	.halt_reg = 0x6034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+	.halt_reg = 0x6024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x6024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_bps_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_bps_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_atb_clk = {
+	.halt_reg = 0xb12c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb12c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+	.halt_reg = 0xb124,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb124,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_camnoc_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cci_clk = {
+	.halt_reg = 0xb0f0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0f0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cci_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cci_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+	.halt_reg = 0xb11c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xb11c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_cpas_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+	.halt_reg = 0x501c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x501c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi0phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi0phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+	.halt_reg = 0x5040,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5040,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi1phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi1phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+	.halt_reg = 0x5064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x5064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csi2phytimer_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_csi2phytimer_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+	.halt_reg = 0x5020,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5020,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+	.halt_reg = 0x5044,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+	.halt_reg = 0x5068,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x5068,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_csiphy2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_debug_clk = {
+	.halt_reg = 0xc008,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc008,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_debug_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_clk = {
+	.halt_reg = 0xb0c8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0c8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_fd_core_uar_clk = {
+	.halt_reg = 0xb0d0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0d0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_fd_core_uar_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fd_core_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_apb_clk = {
+	.halt_reg = 0xb084,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb084,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_apb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_atb_clk = {
+	.halt_reg = 0xb078,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_atb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+	.halt_reg = 0xb0a0,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0a0,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_icp_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_cti_clk = {
+	.halt_reg = 0xb07c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb07c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_cti_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_icp_ts_clk = {
+	.halt_reg = 0xb080,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb080,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_icp_ts_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+	.halt_reg = 0x907c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x907c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+	.halt_reg = 0x9024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+	.halt_reg = 0x9078,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x9078,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+	.halt_reg = 0x9050,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+	.halt_reg = 0x9034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x9034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_0_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_0_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+	.halt_reg = 0xa054,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa054,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+	.halt_reg = 0xa024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+	.halt_reg = 0xa050,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xa050,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+	.halt_reg = 0xa048,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa048,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+	.halt_reg = 0xa02c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xa02c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_1_dsp_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_1_clk_src",
+			},
+			.num_parents = 1,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+	.halt_reg = 0xb01c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb01c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+	.halt_reg = 0xb044,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0xb044,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_cphy_rx_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_cphy_rx_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+	.halt_reg = 0xb03c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb03c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ife_lite_csid_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ife_lite_csid_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+	.halt_reg = 0x703c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x703c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+	.halt_reg = 0x7038,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x7038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+	.halt_reg = 0x7034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+	.halt_reg = 0x7024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x7024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_ahb_clk = {
+	.halt_reg = 0x803c,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x803c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_ahb_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_slow_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_areg_clk = {
+	.halt_reg = 0x8038,
+	.halt_check = BRANCH_HALT,
+	.aggr_sibling_rates = true,
+	.clkr = {
+		.enable_reg = 0x8038,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_areg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_fast_ahb_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_axi_clk = {
+	.halt_reg = 0x8034,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8034,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_axi_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_ipe_1_clk = {
+	.halt_reg = 0x8024,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x8024,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_ipe_1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_ipe_1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+	.halt_reg = 0xb064,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb064,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_jpeg_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_jpeg_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+	.halt_reg = 0xb110,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb110,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_lrme_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_lrme_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+	.halt_reg = 0x401c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x401c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk0_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk0_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+	.halt_reg = 0x403c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x403c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk1_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk1_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+	.halt_reg = 0x405c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x405c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk2_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk2_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+	.halt_reg = 0x407c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0x407c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_mclk3_clk",
+			.parent_names = (const char *[]){
+				"cam_cc_mclk3_clk_src",
+			},
+			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_pll_test_clk = {
+	.halt_reg = 0xc014,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xc014,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_pll_test_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+	.halt_reg = 0xb13c,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb13c,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_soc_ahb_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+	.halt_reg = 0xb0a8,
+	.halt_check = BRANCH_HALT,
+	.clkr = {
+		.enable_reg = 0xb0a8,
+		.enable_mask = BIT(0),
+		.hw.init = &(struct clk_init_data){
+			.name = "cam_cc_sys_tmr_clk",
+			.ops = &clk_branch2_ops,
+		},
+	},
+};
+
+static struct clk_regmap *cam_cc_sdm845_clocks[] = {
+	[CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+	[CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+	[CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+	[CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+	[CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+	[CAM_CC_CAMNOC_ATB_CLK] = &cam_cc_camnoc_atb_clk.clkr,
+	[CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+	[CAM_CC_CCI_CLK] = &cam_cc_cci_clk.clkr,
+	[CAM_CC_CCI_CLK_SRC] = &cam_cc_cci_clk_src.clkr,
+	[CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+	[CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+	[CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+	[CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+	[CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+	[CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+	[CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+	[CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+	[CAM_CC_DEBUG_CLK] = &cam_cc_debug_clk.clkr,
+	[CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+	[CAM_CC_FD_CORE_CLK] = &cam_cc_fd_core_clk.clkr,
+	[CAM_CC_FD_CORE_CLK_SRC] = &cam_cc_fd_core_clk_src.clkr,
+	[CAM_CC_FD_CORE_UAR_CLK] = &cam_cc_fd_core_uar_clk.clkr,
+	[CAM_CC_ICP_APB_CLK] = &cam_cc_icp_apb_clk.clkr,
+	[CAM_CC_ICP_ATB_CLK] = &cam_cc_icp_atb_clk.clkr,
+	[CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+	[CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+	[CAM_CC_ICP_CTI_CLK] = &cam_cc_icp_cti_clk.clkr,
+	[CAM_CC_ICP_TS_CLK] = &cam_cc_icp_ts_clk.clkr,
+	[CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+	[CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+	[CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+	[CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+	[CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+	[CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+	[CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+	[CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+	[CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+	[CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+	[CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+	[CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+	[CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+	[CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+	[CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+	[CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+	[CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+	[CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+	[CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+	[CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+	[CAM_CC_IPE_1_AHB_CLK] = &cam_cc_ipe_1_ahb_clk.clkr,
+	[CAM_CC_IPE_1_AREG_CLK] = &cam_cc_ipe_1_areg_clk.clkr,
+	[CAM_CC_IPE_1_AXI_CLK] = &cam_cc_ipe_1_axi_clk.clkr,
+	[CAM_CC_IPE_1_CLK] = &cam_cc_ipe_1_clk.clkr,
+	[CAM_CC_IPE_1_CLK_SRC] = &cam_cc_ipe_1_clk_src.clkr,
+	[CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+	[CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+	[CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+	[CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+	[CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+	[CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+	[CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+	[CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+	[CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+	[CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+	[CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+	[CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+	[CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+	[CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+	[CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+	[CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+	[CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+	[CAM_CC_PLL2_OUT_EVEN] = &cam_cc_pll2_out_even.clkr,
+	[CAM_CC_PLL2_OUT_ODD] = &cam_cc_pll2_out_odd.clkr,
+	[CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+	[CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+	[CAM_CC_PLL_TEST_CLK] = &cam_cc_pll_test_clk.clkr,
+	[CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+	[CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+	[CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+
+static const struct qcom_reset_map cam_cc_sdm845_resets[] = {
+	[TITAN_CAM_CC_BPS_BCR] = { 0x6000 },
+	[TITAN_CAM_CC_CAMNOC_BCR] = { 0xb120 },
+	[TITAN_CAM_CC_CCI_BCR] = { 0xb0d4 },
+	[TITAN_CAM_CC_CPAS_BCR] = { 0xb118 },
+	[TITAN_CAM_CC_CSI0PHY_BCR] = { 0x5000 },
+	[TITAN_CAM_CC_CSI1PHY_BCR] = { 0x5024 },
+	[TITAN_CAM_CC_CSI2PHY_BCR] = { 0x5048 },
+	[TITAN_CAM_CC_FD_BCR] = { 0xb0ac },
+	[TITAN_CAM_CC_ICP_BCR] = { 0xb074 },
+	[TITAN_CAM_CC_IFE_0_BCR] = { 0x9000 },
+	[TITAN_CAM_CC_IFE_1_BCR] = { 0xa000 },
+	[TITAN_CAM_CC_IFE_LITE_BCR] = { 0xb000 },
+	[TITAN_CAM_CC_IPE_0_BCR] = { 0x7000 },
+	[TITAN_CAM_CC_IPE_1_BCR] = { 0x8000 },
+	[TITAN_CAM_CC_JPEG_BCR] = { 0xb048 },
+	[TITAN_CAM_CC_LRME_BCR] = { 0xb0f4 },
+	[TITAN_CAM_CC_MCLK0_BCR] = { 0x4000 },
+	[TITAN_CAM_CC_MCLK1_BCR] = { 0x4020 },
+	[TITAN_CAM_CC_MCLK2_BCR] = { 0x4040 },
+	[TITAN_CAM_CC_MCLK3_BCR] = { 0x4060 },
+	[TITAN_CAM_CC_TITAN_TOP_BCR] = { 0xb130 },
+};
+
+static const struct regmap_config cam_cc_sdm845_regmap_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register	= 0xd004,
+	.fast_io	= true,
+};
+
+static const struct qcom_cc_desc cam_cc_sdm845_desc = {
+	.config = &cam_cc_sdm845_regmap_config,
+	.clks = cam_cc_sdm845_clocks,
+	.num_clks = ARRAY_SIZE(cam_cc_sdm845_clocks),
+	.resets = cam_cc_sdm845_resets,
+	.num_resets = ARRAY_SIZE(cam_cc_sdm845_resets),
+};
+
+static const struct of_device_id cam_cc_sdm845_match_table[] = {
+	{ .compatible = "qcom,cam_cc-sdm845" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sdm845_match_table);
+
+static int cam_cc_sdm845_probe(struct platform_device *pdev)
+{
+	struct regmap *regmap;
+	int ret = 0;
+
+	regmap = qcom_cc_map(pdev, &cam_cc_sdm845_desc);
+	if (IS_ERR(regmap)) {
+		pr_err("Failed to map the Camera CC registers\n");
+		return PTR_ERR(regmap);
+	}
+
+	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(vdd_cx.regulator[0])) {
+		if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_cx regulator\n");
+		return PTR_ERR(vdd_cx.regulator[0]);
+	}
+
+	vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(vdd_mx.regulator[0])) {
+		if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+			dev_err(&pdev->dev,
+				"Unable to get vdd_mx regulator\n");
+		return PTR_ERR(vdd_mx.regulator[0]);
+	}
+
+	clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+	clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+	clk_fabia_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+	clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+
+	ret = qcom_cc_really_probe(pdev, &cam_cc_sdm845_desc, regmap);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register Camera CC clocks\n");
+		return ret;
+	}
+
+	dev_info(&pdev->dev, "Registered Camera CC clocks\n");
+	return ret;
+}
+
+static struct platform_driver cam_cc_sdm845_driver = {
+	.probe		= cam_cc_sdm845_probe,
+	.driver		= {
+		.name	= "cam_cc-sdm845",
+		.of_match_table = cam_cc_sdm845_match_table,
+	},
+};
+
+static int __init cam_cc_sdm845_init(void)
+{
+	return platform_driver_register(&cam_cc_sdm845_driver);
+}
+core_initcall(cam_cc_sdm845_init);
+
+static void __exit cam_cc_sdm845_exit(void)
+{
+	platform_driver_unregister(&cam_cc_sdm845_driver);
+}
+module_exit(cam_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cam_cc-sdm845");
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index adf2f7f..53f736c 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -218,11 +218,20 @@
 		unsigned long *parent_rate)
 {
 	struct clk_hw *parent = clk_hw_get_parent(hw);
+	unsigned long rrate = 0;
 
 	if (!parent)
 		return -EPERM;
 
-	return clk_hw_round_rate(parent, rate);
+	rrate = clk_hw_round_rate(parent, rate);
+	/*
+	 * If the rounded rate that's returned is valid, update the parent_rate
+	 * field so that the set_rate() call can be propagated to the parent.
+	 */
+	if (rrate > 0)
+		*parent_rate = rrate;
+
+	return rrate;
 }
 
 static unsigned long clk_branch2_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f4cc1bd..0f6039e 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -328,32 +328,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_gcc_mmss_qm_core_clk_src[] = {
-	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
-	F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
-	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gcc_mmss_qm_core_clk_src = {
-	.cmd_rcgr = 0xb040,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gcc_parent_map_0,
-	.freq_tbl = ftbl_gcc_mmss_qm_core_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gcc_mmss_qm_core_clk_src",
-		.parent_names = gcc_parent_names_0,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP3(
-			MIN, 75000000,
-			LOWER, 150000000,
-			LOW, 300000000),
-	},
-};
-
 static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
 	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
@@ -1669,37 +1643,6 @@
 	},
 };
 
-static struct clk_branch gcc_mmss_qm_ahb_clk = {
-	.halt_reg = 0xb05c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0xb05c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mmss_qm_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_mmss_qm_core_clk = {
-	.halt_reg = 0xb038,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0xb038,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mmss_qm_core_clk",
-			.parent_names = (const char *[]){
-				"gcc_mmss_qm_core_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_mss_axis2_clk = {
 	.halt_reg = 0x8a008,
 	.halt_check = BRANCH_HALT,
@@ -3233,9 +3176,6 @@
 	[GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
 	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
 	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
-	[GCC_MMSS_QM_AHB_CLK] = &gcc_mmss_qm_ahb_clk.clkr,
-	[GCC_MMSS_QM_CORE_CLK] = &gcc_mmss_qm_core_clk.clkr,
-	[GCC_MMSS_QM_CORE_CLK_SRC] = &gcc_mmss_qm_core_clk_src.clkr,
 	[GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
 	[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
 	[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
diff --git a/drivers/clk/qcom/vdd-level-sdm845.h b/drivers/clk/qcom/vdd-level-sdm845.h
index 5be7a28..1771c15 100644
--- a/drivers/clk/qcom/vdd-level-sdm845.h
+++ b/drivers/clk/qcom/vdd-level-sdm845.h
@@ -90,6 +90,13 @@
 	},					\
 	.num_rate_max = VDD_CX_NUM
 
+#define VDD_MX_FMAX_MAP2(l1, f1, l2, f2) \
+	.vdd_class = &vdd_mx,			\
+	.rate_max = (unsigned long[VDD_CX_NUM]) {	\
+		[VDD_CX_##l1] = (f1),		\
+		[VDD_CX_##l2] = (f2),		\
+	},					\
+	.num_rate_max = VDD_CX_NUM
 
 enum vdd_cx_levels {
 	VDD_CX_NONE,
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 884e557..f18dccf 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -113,7 +113,6 @@
 config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
 	bool "interactive"
 	select CPU_FREQ_GOV_INTERACTIVE
-	select CPU_FREQ_GOV_PERFORMANCE
 	help
 	  Use the CPUFreq governor 'interactive' as default. This allows
 	  you to get a full dynamic cpu frequency capable system by simply
@@ -187,6 +186,23 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	tristate "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cpufreq_interactive.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
@@ -211,6 +227,17 @@
 
 	  If in doubt, say N.
 
+config CPU_BOOST
+	tristate "Event base short term CPU freq boost"
+	depends on CPU_FREQ
+	help
+	  This driver boosts the frequency of one or more CPUs based on
+	  various events that might occur in the system. As of now, the
+	  events it reacts to are:
+	  - Migration of important threads from one CPU to another.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_SCHED
 	bool "'sched' cpufreq governor"
 	depends on CPU_FREQ
@@ -224,26 +251,6 @@
 
 	  If in doubt, say N.
 
-config CPU_FREQ_GOV_INTERACTIVE
-	tristate "'interactive' cpufreq policy governor"
-	depends on CPU_FREQ
-	select CPU_FREQ_GOV_ATTR_SET
-	select IRQ_WORK
-	help
-	  'interactive' - This driver adds a dynamic cpufreq policy governor
-	  designed for latency-sensitive workloads.
-
-	  This governor attempts to reduce the latency of clock
-	  increases so that the system is more responsive to
-	  interactive workloads.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called cpufreq_interactive.
-
-	  For details, take a look at linux/Documentation/cpu-freq.
-
-	  If in doubt, say N.
-
 config CPU_FREQ_GOV_SCHEDUTIL
 	bool "'schedutil' cpufreq policy governor"
 	depends on CPU_FREQ && SMP
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index d89b8af..96e18162 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -258,3 +258,9 @@
 	  support for its operation.
 
 	  If in doubt, say N.
+
+config CPU_FREQ_MSM
+	bool "MSM CPUFreq support"
+	depends on CPU_FREQ
+	help
+	  This enables the CPUFreq driver for Qualcomm CPUs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f0c9905..bf98b28 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
 obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET)	+= cpufreq_governor_attr_set.o
+obj-$(CONFIG_CPU_BOOST)			+= cpu-boost.o
 
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
 obj-$(CONFIG_CPUFREQ_DT_PLATDEV)	+= cpufreq-dt-platdev.o
@@ -60,6 +61,7 @@
 obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
 obj-$(CONFIG_ARM_MT8173_CPUFREQ)	+= mt8173-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_MSM)              += qcom-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
 obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)	+= pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)			+= pxa3xx-cpufreq.o
@@ -82,7 +84,6 @@
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
 obj-$(CONFIG_MACH_MVEBU_V7)		+= mvebu-cpufreq.o
 
-
 ##################################################################################
 # PowerPC platform drivers
 obj-$(CONFIG_CPU_FREQ_CBE)		+= ppc-cbe-cpufreq.o
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
new file mode 100644
index 0000000..07603fe
--- /dev/null
+++ b/drivers/cpufreq/cpu-boost.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpu-boost: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/time.h>
+
+struct cpu_sync {
+	int cpu;
+	unsigned int input_boost_min;
+	unsigned int input_boost_freq;
+};
+
+static DEFINE_PER_CPU(struct cpu_sync, sync_info);
+static struct workqueue_struct *cpu_boost_wq;
+
+static struct work_struct input_boost_work;
+
+static bool input_boost_enabled;
+
+static unsigned int input_boost_ms = 40;
+module_param(input_boost_ms, uint, 0644);
+
+static bool sched_boost_on_input;
+module_param(sched_boost_on_input, bool, 0644);
+
+static bool sched_boost_active;
+
+static struct delayed_work input_boost_rem;
+static u64 last_input_time;
+#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
+
+static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	bool enabled = false;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* single number: apply to all CPUs */
+	if (!ntokens) {
+		if (sscanf(buf, "%u\n", &val) != 1)
+			return -EINVAL;
+		for_each_possible_cpu(i)
+			per_cpu(sync_info, i).input_boost_freq = val;
+		goto check_enable;
+	}
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > num_possible_cpus())
+			return -EINVAL;
+
+		per_cpu(sync_info, cpu).input_boost_freq = val;
+		cp = strchr(cp, ' ');
+		cp++;
+	}
+
+check_enable:
+	for_each_possible_cpu(i) {
+		if (per_cpu(sync_info, i).input_boost_freq) {
+			enabled = true;
+			break;
+		}
+	}
+	input_boost_enabled = enabled;
+
+	return 0;
+}
+
+static int get_input_boost_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+	struct cpu_sync *s;
+
+	for_each_possible_cpu(cpu) {
+		s = &per_cpu(sync_info, cpu);
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, s->input_boost_freq);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_input_boost_freq = {
+	.set = set_input_boost_freq,
+	.get = get_input_boost_freq,
+};
+module_param_cb(input_boost_freq, &param_ops_input_boost_freq, NULL, 0644);
+
+/*
+ * The CPUFREQ_ADJUST notifier is used to override the current policy min to
+ * make sure policy min >= boost_min. The cpufreq framework then does the job
+ * of enforcing the new policy.
+ */
+static int boost_adjust_notify(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct cpufreq_policy *policy = data;
+	unsigned int cpu = policy->cpu;
+	struct cpu_sync *s = &per_cpu(sync_info, cpu);
+	unsigned int ib_min = s->input_boost_min;
+
+	switch (val) {
+	case CPUFREQ_ADJUST:
+		if (!ib_min)
+			break;
+
+		pr_debug("CPU%u policy min before boost: %u kHz\n",
+			 cpu, policy->min);
+		pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min);
+
+		cpufreq_verify_within_limits(policy, ib_min, UINT_MAX);
+
+		pr_debug("CPU%u policy min after boost: %u kHz\n",
+			 cpu, policy->min);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block boost_adjust_nb = {
+	.notifier_call = boost_adjust_notify,
+};
+
+static void update_policy_online(void)
+{
+	unsigned int i;
+
+	/* Re-evaluate policy to trigger adjust notifier for online CPUs */
+	get_online_cpus();
+	for_each_online_cpu(i) {
+		pr_debug("Updating policy for CPU%d\n", i);
+		cpufreq_update_policy(i);
+	}
+	put_online_cpus();
+}
+
+static void do_input_boost_rem(struct work_struct *work)
+{
+	unsigned int i, ret;
+	struct cpu_sync *i_sync_info;
+
+	/* Reset the input_boost_min for all CPUs in the system */
+	pr_debug("Resetting input boost min for all CPUs\n");
+	for_each_possible_cpu(i) {
+		i_sync_info = &per_cpu(sync_info, i);
+		i_sync_info->input_boost_min = 0;
+	}
+
+	/* Update policies for all online CPUs */
+	update_policy_online();
+
+	if (sched_boost_active) {
+		ret = sched_set_boost(0);
+		if (ret)
+			pr_err("cpu-boost: HMP boost disable failed\n");
+		sched_boost_active = false;
+	}
+}
+
+static void do_input_boost(struct work_struct *work)
+{
+	unsigned int i, ret;
+	struct cpu_sync *i_sync_info;
+
+	cancel_delayed_work_sync(&input_boost_rem);
+	if (sched_boost_active) {
+		sched_set_boost(0);
+		sched_boost_active = false;
+	}
+
+	/* Set the input_boost_min for all CPUs in the system */
+	pr_debug("Setting input boost min for all CPUs\n");
+	for_each_possible_cpu(i) {
+		i_sync_info = &per_cpu(sync_info, i);
+		i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
+	}
+
+	/* Update policies for all online CPUs */
+	update_policy_online();
+
+	/* Enable scheduler boost to migrate tasks to big cluster */
+	if (sched_boost_on_input) {
+		ret = sched_set_boost(1);
+		if (ret)
+			pr_err("cpu-boost: HMP boost enable failed\n");
+		else
+			sched_boost_active = true;
+	}
+
+	queue_delayed_work(cpu_boost_wq, &input_boost_rem,
+					msecs_to_jiffies(input_boost_ms));
+}
+
+static void cpuboost_input_event(struct input_handle *handle,
+		unsigned int type, unsigned int code, int value)
+{
+	u64 now;
+
+	if (!input_boost_enabled)
+		return;
+
+	now = ktime_to_us(ktime_get());
+	if (now - last_input_time < MIN_INPUT_INTERVAL)
+		return;
+
+	if (work_pending(&input_boost_work))
+		return;
+
+	queue_work(cpu_boost_wq, &input_boost_work);
+	last_input_time = ktime_to_us(ktime_get());
+}
+
+static int cpuboost_input_connect(struct input_handler *handler,
+		struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int error;
+
+	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "cpufreq";
+
+	error = input_register_handle(handle);
+	if (error)
+		goto err2;
+
+	error = input_open_device(handle);
+	if (error)
+		goto err1;
+
+	return 0;
+err1:
+	input_unregister_handle(handle);
+err2:
+	kfree(handle);
+	return error;
+}
+
+static void cpuboost_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id cpuboost_ids[] = {
+	/* multi-touch touchscreen */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	/* touchpad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		.absbit = { [BIT_WORD(ABS_X)] =
+			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+	},
+	/* Keypad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
+	{ },
+};
+
+static struct input_handler cpuboost_input_handler = {
+	.event          = cpuboost_input_event,
+	.connect        = cpuboost_input_connect,
+	.disconnect     = cpuboost_input_disconnect,
+	.name           = "cpu-boost",
+	.id_table       = cpuboost_ids,
+};
+
+static int cpu_boost_init(void)
+{
+	int cpu, ret;
+	struct cpu_sync *s;
+
+	cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
+	if (!cpu_boost_wq)
+		return -EFAULT;
+
+	INIT_WORK(&input_boost_work, do_input_boost);
+	INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
+
+	for_each_possible_cpu(cpu) {
+		s = &per_cpu(sync_info, cpu);
+		s->cpu = cpu;
+	}
+	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
+
+	ret = input_register_handler(&cpuboost_input_handler);
+	return 0;
+}
+late_initcall(cpu_boost_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 019e817..f7e1c1b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -93,6 +93,7 @@
  */
 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
 static struct srcu_notifier_head cpufreq_transition_notifier_list;
+struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 static bool init_cpufreq_transition_notifier_list_called;
 static int __init init_cpufreq_transition_notifier_list(void)
@@ -103,6 +104,15 @@
 }
 pure_initcall(init_cpufreq_transition_notifier_list);
 
+static bool init_cpufreq_govinfo_notifier_list_called;
+static int __init init_cpufreq_govinfo_notifier_list(void)
+{
+	ATOMIC_INIT_NOTIFIER_HEAD(&cpufreq_govinfo_notifier_list);
+	init_cpufreq_govinfo_notifier_list_called = true;
+	return 0;
+}
+pure_initcall(init_cpufreq_govinfo_notifier_list);
+
 static int off __read_mostly;
 static int cpufreq_disabled(void)
 {
@@ -1078,7 +1088,8 @@
 	if (has_target()) {
 		ret = cpufreq_start_governor(policy);
 		if (ret)
-			pr_err("%s: Failed to start governor\n", __func__);
+			pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 	}
 	up_write(&policy->rwsem);
 	return ret;
@@ -1250,6 +1261,9 @@
 		for_each_cpu(j, policy->related_cpus)
 			per_cpu(cpufreq_cpu_data, j) = policy;
 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	} else {
+		policy->min = policy->user_policy.min;
+		policy->max = policy->user_policy.max;
 	}
 
 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1776,7 +1790,8 @@
 	if (cpufreq_disabled())
 		return -EINVAL;
 
-	WARN_ON(!init_cpufreq_transition_notifier_list_called);
+	WARN_ON(!init_cpufreq_transition_notifier_list_called ||
+		!init_cpufreq_govinfo_notifier_list_called);
 
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
@@ -1797,6 +1812,10 @@
 		ret = blocking_notifier_chain_register(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_register(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1837,6 +1856,10 @@
 		ret = blocking_notifier_chain_unregister(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_unregister(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1980,15 +2003,6 @@
 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
 		 policy->cpu, target_freq, relation, old_target_freq);
 
-	/*
-	 * This might look like a redundant call as we are checking it again
-	 * after finding index. But it is left intentionally for cases where
-	 * exactly same freq is called again and so we can save on few function
-	 * calls.
-	 */
-	if (target_freq == policy->cur)
-		return 0;
-
 	/* Save last value to restore later on errors */
 	policy->restore_freq = policy->cur;
 
@@ -2533,7 +2547,7 @@
 	hp_online = ret;
 	ret = 0;
 
-	pr_debug("driver %s up and running\n", driver_data->name);
+	pr_info("driver %s up and running\n", driver_data->name);
 	goto out;
 
 err_if_unreg:
@@ -2565,7 +2579,7 @@
 	if (!cpufreq_driver || (driver != cpufreq_driver))
 		return -EINVAL;
 
-	pr_debug("unregistering driver %s\n", driver->name);
+	pr_info("unregistering driver %s\n", driver->name);
 
 	/* Protect against concurrent cpu hotplug */
 	get_online_cpus();
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index d6cac0e..1b8c739 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1,7 +1,7 @@
 /*
  * drivers/cpufreq/cpufreq_interactive.c
  *
- * Copyright (C) 2010-2016 Google, Inc.
+ * Copyright (C) 2010 Google, Inc.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -13,14 +13,12 @@
  * GNU General Public License for more details.
  *
  * Author: Mike Chan (mike@android.com)
+ *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpufreq.h>
-#include <linux/irq_work.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/rwsem.h>
@@ -29,50 +27,92 @@
 #include <linux/tick.h>
 #include <linux/time.h>
 #include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/cpufreq_interactive.h>
 
-#define gov_attr_ro(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0444, show_##_name, NULL)
+struct cpufreq_interactive_policyinfo {
+	struct timer_list policy_timer;
+	struct timer_list policy_slack_timer;
+	struct hrtimer notif_timer;
+	spinlock_t load_lock; /* protects load tracking stat */
+	u64 last_evaluated_jiffy;
+	struct cpufreq_policy *policy;
+	struct cpufreq_policy p_nolim; /* policy copy with no limits */
+	struct cpufreq_frequency_table *freq_table;
+	spinlock_t target_freq_lock; /*protects target freq */
+	unsigned int target_freq;
+	unsigned int floor_freq;
+	unsigned int min_freq;
+	u64 floor_validate_time;
+	u64 hispeed_validate_time;
+	u64 max_freq_hyst_start_time;
+	struct rw_semaphore enable_sem;
+	bool reject_notification;
+	bool notif_pending;
+	unsigned long notif_cpu;
+	int governor_enabled;
+	struct cpufreq_interactive_tunables *cached_tunables;
+	struct sched_load *sl;
+};
 
-#define gov_attr_wo(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0200, NULL, store_##_name)
+/* Protected by per-policy load_lock */
+struct cpufreq_interactive_cpuinfo {
+	u64 time_in_idle;
+	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
+	unsigned int loadadjfreq;
+};
 
-#define gov_attr_rw(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0644, show_##_name, store_##_name)
+static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
 
-/* Separate instance required for each 'interactive' directory in sysfs */
-struct interactive_tunables {
-	struct gov_attr_set attr_set;
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
 
+static int set_window_count;
+static int migration_register_count;
+static struct mutex sched_lock;
+static cpumask_t controlled_cpus;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+	DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+	int usage_count;
 	/* Hi speed to bump to from lo speed when load burst (default max) */
 	unsigned int hispeed_freq;
-
 	/* Go to hi speed when CPU load at or above this value. */
 #define DEFAULT_GO_HISPEED_LOAD 99
 	unsigned long go_hispeed_load;
-
 	/* Target load. Lower values result in higher CPU speeds. */
 	spinlock_t target_loads_lock;
 	unsigned int *target_loads;
 	int ntarget_loads;
-
 	/*
 	 * The minimum amount of time to spend at a frequency before we can ramp
 	 * down.
 	 */
 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
 	unsigned long min_sample_time;
-
-	/* The sample rate of the timer used to increase frequency */
-	unsigned long sampling_rate;
-
+	/*
+	 * The sample rate of the timer used to increase frequency
+	 */
+	unsigned long timer_rate;
 	/*
 	 * Wait this long before raising speed above hispeed, by default a
 	 * single timer interval.
@@ -80,175 +120,181 @@
 	spinlock_t above_hispeed_delay_lock;
 	unsigned int *above_hispeed_delay;
 	int nabove_hispeed_delay;
-
 	/* Non-zero means indefinite speed boost active */
-	int boost;
+	int boost_val;
 	/* Duration of a boot pulse in usecs */
-	int boostpulse_duration;
+	int boostpulse_duration_val;
 	/* End time of boost pulse in ktime converted to usecs */
 	u64 boostpulse_endtime;
 	bool boosted;
-
 	/*
-	 * Max additional time to wait in idle, beyond sampling_rate, at speeds
+	 * Max additional time to wait in idle, beyond timer_rate, at speeds
 	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
 	 */
-#define DEFAULT_TIMER_SLACK (4 * DEFAULT_SAMPLING_RATE)
-	unsigned long timer_slack_delay;
-	unsigned long timer_slack;
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+	int timer_slack_val;
 	bool io_is_busy;
+
+	/* scheduler input related flags */
+	bool use_sched_load;
+	bool use_migration_notif;
+
+	/*
+	 * Whether to align timer windows across all CPUs. When
+	 * use_sched_load is true, this flag is ignored and windows
+	 * will always be aligned.
+	 */
+	bool align_windows;
+
+	/*
+	 * Stay at max freq for at least max_freq_hysteresis before dropping
+	 * frequency.
+	 */
+	unsigned int max_freq_hysteresis;
+
+	/* Ignore hispeed_freq and above_hispeed_delay for notification */
+	bool ignore_hispeed_on_notif;
+
+	/* Ignore min_sample_time for notification */
+	bool fast_ramp_down;
+
+	/* Whether to enable prediction or not */
+	bool enable_prediction;
 };
 
-/* Separate instance required for each 'struct cpufreq_policy' */
-struct interactive_policy {
-	struct cpufreq_policy *policy;
-	struct interactive_tunables *tunables;
-	struct list_head tunables_hook;
-};
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+static struct cpufreq_interactive_tunables *cached_common_tunables;
 
-/* Separate instance required for each CPU */
-struct interactive_cpu {
-	struct update_util_data update_util;
-	struct interactive_policy *ipolicy;
+static struct attribute_group *get_sysfs_attr(void);
 
-	struct irq_work irq_work;
-	u64 last_sample_time;
-	unsigned long next_sample_jiffies;
-	bool work_in_progress;
-
-	struct rw_semaphore enable_sem;
-	struct timer_list slack_timer;
-
-	spinlock_t load_lock; /* protects the next 4 fields */
-	u64 time_in_idle;
-	u64 time_in_idle_timestamp;
-	u64 cputime_speedadj;
-	u64 cputime_speedadj_timestamp;
-
-	spinlock_t target_freq_lock; /*protects target freq */
-	unsigned int target_freq;
-
-	unsigned int floor_freq;
-	u64 pol_floor_val_time; /* policy floor_validate_time */
-	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
-	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
-	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
-};
-
-static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
-
-/* Realtime thread handles frequency scaling */
-static struct task_struct *speedchange_task;
-static cpumask_t speedchange_cpumask;
-static spinlock_t speedchange_cpumask_lock;
-
-/* Target load. Lower values result in higher CPU speeds. */
-#define DEFAULT_TARGET_LOAD 90
-static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
-
-#define DEFAULT_SAMPLING_RATE (20 * USEC_PER_MSEC)
-#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_SAMPLING_RATE
-static unsigned int default_above_hispeed_delay[] = {
-	DEFAULT_ABOVE_HISPEED_DELAY
-};
-
-/* Iterate over interactive policies for tunables */
-#define for_each_ipolicy(__ip)	\
-	list_for_each_entry(__ip, &tunables->attr_set.policy_list, tunables_hook)
-
-static struct interactive_tunables *global_tunables;
-static DEFINE_MUTEX(global_tunables_lock);
-
-static inline void update_slack_delay(struct interactive_tunables *tunables)
+/* Round to starting jiffy of next evaluation window */
+static u64 round_to_nw_start(u64 jif,
+			     struct cpufreq_interactive_tunables *tunables)
 {
-	tunables->timer_slack_delay = usecs_to_jiffies(tunables->timer_slack +
-						       tunables->sampling_rate);
-}
+	unsigned long step = usecs_to_jiffies(tunables->timer_rate);
+	u64 ret;
 
-static bool timer_slack_required(struct interactive_cpu *icpu)
-{
-	struct interactive_policy *ipolicy = icpu->ipolicy;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-
-	if (tunables->timer_slack < 0)
-		return false;
-
-	if (icpu->target_freq > ipolicy->policy->min)
-		return true;
-
-	return false;
-}
-
-static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-
-	icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay;
-	add_timer_on(&icpu->slack_timer, cpu);
-}
-
-static void gov_slack_timer_modify(struct interactive_cpu *icpu)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-
-	mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay);
-}
-
-static void slack_timer_resched(struct interactive_cpu *icpu, int cpu,
-				bool modify)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	unsigned long flags;
-
-	spin_lock_irqsave(&icpu->load_lock, flags);
-
-	icpu->time_in_idle = get_cpu_idle_time(cpu,
-					       &icpu->time_in_idle_timestamp,
-					       tunables->io_is_busy);
-	icpu->cputime_speedadj = 0;
-	icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp;
-
-	if (timer_slack_required(icpu)) {
-		if (modify)
-			gov_slack_timer_modify(icpu);
-		else
-			gov_slack_timer_start(icpu, cpu);
+	if (tunables->use_sched_load || tunables->align_windows) {
+		do_div(jif, step);
+		ret = (jif + 1) * step;
+	} else {
+		ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
 	}
 
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
-}
-
-static unsigned int
-freq_to_above_hispeed_delay(struct interactive_tunables *tunables,
-			    unsigned int freq)
-{
-	unsigned long flags;
-	unsigned int ret;
-	int i;
-
-	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
-
-	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
-	     freq >= tunables->above_hispeed_delay[i + 1]; i += 2)
-		;
-
-	ret = tunables->above_hispeed_delay[i];
-	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return ret;
 }
 
-static unsigned int freq_to_targetload(struct interactive_tunables *tunables,
-				       unsigned int freq)
+static inline int set_window_helper(
+			struct cpufreq_interactive_tunables *tunables)
 {
+	return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
+			 usecs_to_jiffies(tunables->timer_rate));
+}
+
+static void cpufreq_interactive_timer_resched(unsigned long cpu,
+					      bool slack_only)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 expires;
 	unsigned long flags;
-	unsigned int ret;
 	int i;
 
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	if (!slack_only) {
+		for_each_cpu(i, ppol->policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, i);
+			pcpu->time_in_idle = get_cpu_idle_time(i,
+						&pcpu->time_in_idle_timestamp,
+						tunables->io_is_busy);
+			pcpu->cputime_speedadj = 0;
+			pcpu->cputime_speedadj_timestamp =
+						pcpu->time_in_idle_timestamp;
+		}
+		del_timer(&ppol->policy_timer);
+		ppol->policy_timer.expires = expires;
+		add_timer(&ppol->policy_timer);
+	}
+
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		del_timer(&ppol->policy_slack_timer);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The policy_timer and policy_slack_timer must be deactivated when calling
+ * this function.
+ */
+static void cpufreq_interactive_timer_start(
+	struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	ppol->policy_timer.expires = expires;
+	add_timer(&ppol->policy_timer);
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		pcpu->time_in_idle =
+			get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
+					  tunables->io_is_busy);
+		pcpu->cputime_speedadj = 0;
+		pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	}
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+		;
+
+	ret = tunables->above_hispeed_delay[i];
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static unsigned int freq_to_targetload(
+	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
 	for (i = 0; i < tunables->ntarget_loads - 1 &&
-	     freq >= tunables->target_loads[i + 1]; i += 2)
+		    freq >= tunables->target_loads[i+1]; i += 2)
 		;
 
 	ret = tunables->target_loads[i];
@@ -256,76 +302,102 @@
 	return ret;
 }
 
+#define DEFAULT_MAX_LOAD 100
+u32 get_freq_max_load(int cpu, unsigned int freq)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+
+	if (!cpumask_test_cpu(cpu, &controlled_cpus))
+		return DEFAULT_MAX_LOAD;
+
+	if (have_governor_per_policy()) {
+		if (!ppol || !ppol->cached_tunables)
+			return DEFAULT_MAX_LOAD;
+		return freq_to_targetload(ppol->cached_tunables, freq);
+	}
+
+	if (!cached_common_tunables)
+		return DEFAULT_MAX_LOAD;
+	return freq_to_targetload(cached_common_tunables, freq);
+}
+
 /*
  * If increasing frequencies never map to a lower target load then
  * choose_freq() will find the minimum frequency that does not exceed its
  * target load given the current load.
  */
-static unsigned int choose_freq(struct interactive_cpu *icpu,
-				unsigned int loadadjfreq)
+static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
+		unsigned int loadadjfreq)
 {
-	struct cpufreq_policy *policy = icpu->ipolicy->policy;
-	struct cpufreq_frequency_table *freq_table = policy->freq_table;
-	unsigned int prevfreq, freqmin = 0, freqmax = UINT_MAX, tl;
-	unsigned int freq = policy->cur;
+	unsigned int freq = pcpu->policy->cur;
+	unsigned int prevfreq, freqmin, freqmax;
+	unsigned int tl;
 	int index;
 
+	freqmin = 0;
+	freqmax = UINT_MAX;
+
 	do {
 		prevfreq = freq;
-		tl = freq_to_targetload(icpu->ipolicy->tunables, freq);
+		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
 
 		/*
 		 * Find the lowest frequency where the computed load is less
 		 * than or equal to the target load.
 		 */
 
-		index = cpufreq_frequency_table_target(policy, loadadjfreq / tl,
+		index = cpufreq_frequency_table_target(&pcpu->p_nolim,
+						       loadadjfreq / tl,
 						       CPUFREQ_RELATION_L);
-
-		freq = freq_table[index].frequency;
+		freq = pcpu->freq_table[index].frequency;
 
 		if (freq > prevfreq) {
-			/* The previous frequency is too low */
+			/* The previous frequency is too low. */
 			freqmin = prevfreq;
 
-			if (freq < freqmax)
-				continue;
-
-			/* Find highest frequency that is less than freqmax */
-			index = cpufreq_frequency_table_target(policy,
-					freqmax - 1, CPUFREQ_RELATION_H);
-
-			freq = freq_table[index].frequency;
-
-			if (freq == freqmin) {
+			if (freq >= freqmax) {
 				/*
-				 * The first frequency below freqmax has already
-				 * been found to be too low. freqmax is the
-				 * lowest speed we found that is fast enough.
+				 * Find the highest frequency that is less
+				 * than freqmax.
 				 */
-				freq = freqmax;
-				break;
+				index = cpufreq_frequency_table_target(
+					    &pcpu->p_nolim,
+					    freqmax - 1, CPUFREQ_RELATION_H);
+				freq = pcpu->freq_table[index].frequency;
+
+				if (freq == freqmin) {
+					/*
+					 * The first frequency below freqmax
+					 * has already been found to be too
+					 * low.  freqmax is the lowest speed
+					 * we found that is fast enough.
+					 */
+					freq = freqmax;
+					break;
+				}
 			}
 		} else if (freq < prevfreq) {
 			/* The previous frequency is high enough. */
 			freqmax = prevfreq;
 
-			if (freq > freqmin)
-				continue;
+			if (freq <= freqmin) {
+				/*
+				 * Find the lowest frequency that is higher
+				 * than freqmin.
+				 */
+				index = cpufreq_frequency_table_target(
+					    &pcpu->p_nolim,
+					    freqmin + 1, CPUFREQ_RELATION_L);
+				freq = pcpu->freq_table[index].frequency;
 
-			/* Find lowest frequency that is higher than freqmin */
-			index = cpufreq_frequency_table_target(policy,
-					freqmin + 1, CPUFREQ_RELATION_L);
-
-			freq = freq_table[index].frequency;
-
-			/*
-			 * If freqmax is the first frequency above
-			 * freqmin then we have already found that
-			 * this speed is fast enough.
-			 */
-			if (freq == freqmax)
-				break;
+				/*
+				 * If freqmax is the first frequency above
+				 * freqmin then we have already found that
+				 * this speed is fast enough.
+				 */
+				if (freq == freqmax)
+					break;
+			}
 		}
 
 		/* If same frequency chosen as previous then done. */
@@ -334,97 +406,216 @@
 	return freq;
 }
 
-static u64 update_load(struct interactive_cpu *icpu, int cpu)
+static u64 update_load(int cpu)
 {
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	unsigned int delta_idle, delta_time;
-	u64 now_idle, now, active_time;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 now;
+	u64 now_idle;
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	u64 active_time;
 
 	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
-	delta_idle = (unsigned int)(now_idle - icpu->time_in_idle);
-	delta_time = (unsigned int)(now - icpu->time_in_idle_timestamp);
+	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
 
 	if (delta_time <= delta_idle)
 		active_time = 0;
 	else
 		active_time = delta_time - delta_idle;
 
-	icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur;
+	pcpu->cputime_speedadj += active_time * ppol->policy->cur;
 
-	icpu->time_in_idle = now_idle;
-	icpu->time_in_idle_timestamp = now;
-
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
 	return now;
 }
 
-/* Re-evaluate load to see if a frequency change is required or not */
-static void eval_target_freq(struct interactive_cpu *icpu)
+static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
+				   unsigned long busy)
 {
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	struct cpufreq_policy *policy = icpu->ipolicy->policy;
-	struct cpufreq_frequency_table *freq_table = policy->freq_table;
-	u64 cputime_speedadj, now, max_fvtime;
-	unsigned int new_freq, loadadjfreq, index, delta_time;
-	unsigned long flags;
+	int prev_load;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+
+	prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
+				busy, tunables->timer_rate);
+	return prev_load;
+}
+
+#define NEW_TASK_RATIO 75
+#define PRED_TOLERANCE_PCT 10
+static void cpufreq_interactive_timer(unsigned long data)
+{
+	s64 now;
+	unsigned int delta_time;
+	u64 cputime_speedadj;
 	int cpu_load;
-	int cpu = smp_processor_id();
+	int pol_load = 0;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	struct sched_load *sl = ppol->sl;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	unsigned int new_freq;
+	unsigned int prev_laf = 0, t_prevlaf;
+	unsigned int pred_laf = 0, t_predlaf = 0;
+	unsigned int prev_chfreq, pred_chfreq, chosen_freq;
+	unsigned int index;
+	unsigned long flags;
+	unsigned long max_cpu;
+	int cpu, i;
+	int new_load_pct = 0;
+	int prev_l, pred_l = 0;
+	struct cpufreq_govinfo govinfo;
+	bool skip_hispeed_logic, skip_min_sample_time;
+	bool jump_to_max_no_ts = false;
+	bool jump_to_max = false;
 
-	spin_lock_irqsave(&icpu->load_lock, flags);
-	now = update_load(icpu, smp_processor_id());
-	delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp);
-	cputime_speedadj = icpu->cputime_speedadj;
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
-
-	if (WARN_ON_ONCE(!delta_time))
+	if (!down_read_trylock(&ppol->enable_sem))
 		return;
-
-	spin_lock_irqsave(&icpu->target_freq_lock, flags);
-	do_div(cputime_speedadj, delta_time);
-	loadadjfreq = (unsigned int)cputime_speedadj * 100;
-	cpu_load = loadadjfreq / policy->cur;
-	tunables->boosted = tunables->boost ||
-			    now < tunables->boostpulse_endtime;
-
-	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
-		if (policy->cur < tunables->hispeed_freq) {
-			new_freq = tunables->hispeed_freq;
-		} else {
-			new_freq = choose_freq(icpu, loadadjfreq);
-
-			if (new_freq < tunables->hispeed_freq)
-				new_freq = tunables->hispeed_freq;
-		}
-	} else {
-		new_freq = choose_freq(icpu, loadadjfreq);
-		if (new_freq > tunables->hispeed_freq &&
-		    policy->cur < tunables->hispeed_freq)
-			new_freq = tunables->hispeed_freq;
-	}
-
-	if (policy->cur >= tunables->hispeed_freq &&
-	    new_freq > policy->cur &&
-	    now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) {
-		trace_cpufreq_interactive_notyet(cpu, cpu_load,
-				icpu->target_freq, policy->cur, new_freq);
+	if (!ppol->governor_enabled)
 		goto exit;
+
+	now = ktime_to_us(ktime_get());
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	spin_lock(&ppol->load_lock);
+
+	skip_hispeed_logic =
+		tunables->ignore_hispeed_on_notif && ppol->notif_pending;
+	skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
+	ppol->notif_pending = false;
+	now = ktime_to_us(ktime_get());
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+
+	if (tunables->use_sched_load)
+		sched_get_cpus_busy(sl, ppol->policy->cpus);
+	max_cpu = cpumask_first(ppol->policy->cpus);
+	i = 0;
+	for_each_cpu(cpu, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, cpu);
+		if (tunables->use_sched_load) {
+			t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
+			prev_l = t_prevlaf / ppol->target_freq;
+			if (tunables->enable_prediction) {
+				t_predlaf = sl_busy_to_laf(ppol,
+						sl[i].predicted_load);
+				pred_l = t_predlaf / ppol->target_freq;
+			}
+			if (sl[i].prev_load)
+				new_load_pct = sl[i].new_task_load * 100 /
+							sl[i].prev_load;
+			else
+				new_load_pct = 0;
+		} else {
+			now = update_load(cpu);
+			delta_time = (unsigned int)
+				(now - pcpu->cputime_speedadj_timestamp);
+			if (WARN_ON_ONCE(!delta_time))
+				continue;
+			cputime_speedadj = pcpu->cputime_speedadj;
+			do_div(cputime_speedadj, delta_time);
+			t_prevlaf = (unsigned int)cputime_speedadj * 100;
+			prev_l = t_prevlaf / ppol->target_freq;
+		}
+
+		/* find max of loadadjfreq inside policy */
+		if (t_prevlaf > prev_laf) {
+			prev_laf = t_prevlaf;
+			max_cpu = cpu;
+		}
+		pred_laf = max(t_predlaf, pred_laf);
+
+		cpu_load = max(prev_l, pred_l);
+		pol_load = max(pol_load, cpu_load);
+		trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
+						  prev_l, pred_l);
+
+		/* save loadadjfreq for notification */
+		pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
+
+		/* detect heavy new task and jump to policy->max */
+		if (prev_l >= tunables->go_hispeed_load &&
+		    new_load_pct >= NEW_TASK_RATIO) {
+			skip_hispeed_logic = true;
+			jump_to_max = true;
+		}
+		i++;
+	}
+	spin_unlock(&ppol->load_lock);
+
+	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+	prev_chfreq = choose_freq(ppol, prev_laf);
+	pred_chfreq = choose_freq(ppol, pred_laf);
+	chosen_freq = max(prev_chfreq, pred_chfreq);
+
+	if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
+
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis &&
+	    pol_load >= tunables->go_hispeed_load &&
+	    ppol->target_freq < ppol->policy->max) {
+		skip_hispeed_logic = true;
+		skip_min_sample_time = true;
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
 	}
 
-	icpu->loc_hispeed_val_time = now;
+	new_freq = chosen_freq;
+	if (jump_to_max_no_ts || jump_to_max) {
+		new_freq = ppol->policy->cpuinfo.max_freq;
+	} else if (!skip_hispeed_logic) {
+		if (pol_load >= tunables->go_hispeed_load ||
+		    tunables->boosted) {
+			if (ppol->target_freq < tunables->hispeed_freq)
+				new_freq = tunables->hispeed_freq;
+			else
+				new_freq = max(new_freq,
+					       tunables->hispeed_freq);
+		}
+	}
 
-	index = cpufreq_frequency_table_target(policy, new_freq,
-					       CPUFREQ_RELATION_L);
-	new_freq = freq_table[index].frequency;
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis)
+		new_freq = max(tunables->hispeed_freq, new_freq);
+
+	if (!skip_hispeed_logic &&
+	    ppol->target_freq >= tunables->hispeed_freq &&
+	    new_freq > ppol->target_freq &&
+	    now - ppol->hispeed_validate_time <
+	    freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
+		trace_cpufreq_interactive_notyet(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	ppol->hispeed_validate_time = now;
+
+	index = cpufreq_frequency_table_target(&ppol->p_nolim, new_freq,
+					   CPUFREQ_RELATION_L);
+	new_freq = ppol->freq_table[index].frequency;
 
 	/*
 	 * Do not scale below floor_freq unless we have been at or above the
 	 * floor frequency for the minimum sample time since last validated.
 	 */
-	max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time);
-	if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) {
-		if (now - max_fvtime < tunables->min_sample_time) {
-			trace_cpufreq_interactive_notyet(cpu, cpu_load,
-				icpu->target_freq, policy->cur, new_freq);
-			goto exit;
+	if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
+		if (now - ppol->floor_validate_time <
+				tunables->min_sample_time) {
+			trace_cpufreq_interactive_notyet(
+				max_cpu, pol_load, ppol->target_freq,
+				ppol->policy->cur, new_freq);
+			spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+			goto rearm;
 		}
 	}
 
@@ -433,114 +624,63 @@
 	 * or above the selected frequency for a minimum of min_sample_time,
 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
 	 * allow the speed to drop as soon as the boostpulse duration expires
-	 * (or the indefinite boost is turned off).
+	 * (or the indefinite boost is turned off). If policy->max is restored
+	 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
+	 * could incorrectly extended the duration of max_freq_hysteresis by
+	 * min_sample_time.
 	 */
 
-	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
-		icpu->floor_freq = new_freq;
-		if (icpu->target_freq >= policy->cur || new_freq >= policy->cur)
-			icpu->loc_floor_val_time = now;
+	if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
+	    && !jump_to_max_no_ts) {
+		ppol->floor_freq = new_freq;
+		ppol->floor_validate_time = now;
 	}
 
-	if (icpu->target_freq == new_freq &&
-	    icpu->target_freq <= policy->cur) {
-		trace_cpufreq_interactive_already(cpu, cpu_load,
-			icpu->target_freq, policy->cur, new_freq);
-		goto exit;
+	if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
+		ppol->max_freq_hyst_start_time = now;
+
+	if (ppol->target_freq == new_freq &&
+			ppol->target_freq <= ppol->policy->cur) {
+		trace_cpufreq_interactive_already(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
 	}
 
-	trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq,
-					 policy->cur, new_freq);
+	trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
+					 ppol->policy->cur, new_freq);
 
-	icpu->target_freq = new_freq;
-	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
-
+	ppol->target_freq = new_freq;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
-	cpumask_set_cpu(cpu, &speedchange_cpumask);
+	cpumask_set_cpu(max_cpu, &speedchange_cpumask);
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	wake_up_process_no_notif(speedchange_task);
 
-	wake_up_process(speedchange_task);
-	return;
+rearm:
+	if (!timer_pending(&ppol->policy_timer))
+		cpufreq_interactive_timer_resched(data, false);
+
+	/*
+	 * Send govinfo notification.
+	 * Govinfo notification could potentially wake up another thread
+	 * managed by its clients. Thread wakeups might trigger a load
+	 * change callback that executes this function again. Therefore
+	 * no spinlock could be held when sending the notification.
+	 */
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		govinfo.cpu = i;
+		govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
+		govinfo.sampling_rate_us = tunables->timer_rate;
+		atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
+					   CPUFREQ_LOAD_CHANGE, &govinfo);
+	}
 
 exit:
-	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
-}
-
-static void cpufreq_interactive_update(struct interactive_cpu *icpu)
-{
-	eval_target_freq(icpu);
-	slack_timer_resched(icpu, smp_processor_id(), true);
-}
-
-static void cpufreq_interactive_idle_end(void)
-{
-	struct interactive_cpu *icpu = &per_cpu(interactive_cpu,
-						smp_processor_id());
-
-	if (!down_read_trylock(&icpu->enable_sem))
-		return;
-
-	if (icpu->ipolicy) {
-		/*
-		 * We haven't sampled load for more than sampling_rate time, do
-		 * it right now.
-		 */
-		if (time_after_eq(jiffies, icpu->next_sample_jiffies))
-			cpufreq_interactive_update(icpu);
-	}
-
-	up_read(&icpu->enable_sem);
-}
-
-static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
-						unsigned int *pmax_freq,
-						u64 *phvt, u64 *pfvt)
-{
-	struct interactive_cpu *icpu;
-	u64 hvt = ~0ULL, fvt = 0;
-	unsigned int max_freq = 0, i;
-
-	for_each_cpu(i, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, i);
-
-		fvt = max(fvt, icpu->loc_floor_val_time);
-		if (icpu->target_freq > max_freq) {
-			max_freq = icpu->target_freq;
-			hvt = icpu->loc_hispeed_val_time;
-		} else if (icpu->target_freq == max_freq) {
-			hvt = min(hvt, icpu->loc_hispeed_val_time);
-		}
-	}
-
-	*pmax_freq = max_freq;
-	*phvt = hvt;
-	*pfvt = fvt;
-}
-
-static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
-					   struct cpufreq_policy *policy)
-{
-	struct interactive_cpu *icpu;
-	u64 hvt, fvt;
-	unsigned int max_freq;
-	int i;
-
-	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
-
-	for_each_cpu(i, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, i);
-		icpu->pol_floor_val_time = fvt;
-	}
-
-	if (max_freq != policy->cur) {
-		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
-		for_each_cpu(i, policy->cpus) {
-			icpu = &per_cpu(interactive_cpu, i);
-			icpu->pol_hispeed_val_time = hvt;
-		}
-	}
-
-	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
+	up_read(&ppol->enable_sem);
+	return;
 }
 
 static int cpufreq_interactive_speedchange_task(void *data)
@@ -548,112 +688,182 @@
 	unsigned int cpu;
 	cpumask_t tmp_mask;
 	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
 
-again:
-	set_current_state(TASK_INTERRUPTIBLE);
-	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
-
-	if (cpumask_empty(&speedchange_cpumask)) {
-		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-		schedule();
-
-		if (kthread_should_stop())
-			return 0;
-
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+		if (cpumask_empty(&speedchange_cpumask)) {
+			spin_unlock_irqrestore(&speedchange_cpumask_lock,
+					       flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		tmp_mask = speedchange_cpumask;
+		cpumask_clear(&speedchange_cpumask);
+		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			ppol = per_cpu(polinfo, cpu);
+			if (!down_read_trylock(&ppol->enable_sem))
+				continue;
+			if (!ppol->governor_enabled) {
+				up_read(&ppol->enable_sem);
+				continue;
+			}
+
+			if (ppol->target_freq != ppol->policy->cur)
+				__cpufreq_driver_target(ppol->policy,
+							ppol->target_freq,
+							CPUFREQ_RELATION_H);
+			trace_cpufreq_interactive_setspeed(cpu,
+						     ppol->target_freq,
+						     ppol->policy->cur);
+			up_read(&ppol->enable_sem);
+		}
 	}
 
-	set_current_state(TASK_RUNNING);
-	tmp_mask = speedchange_cpumask;
-	cpumask_clear(&speedchange_cpumask);
-	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-
-	for_each_cpu(cpu, &tmp_mask) {
-		struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
-		struct cpufreq_policy *policy = icpu->ipolicy->policy;
-
-		if (unlikely(!down_read_trylock(&icpu->enable_sem)))
-			continue;
-
-		if (likely(icpu->ipolicy))
-			cpufreq_interactive_adjust_cpu(cpu, policy);
-
-		up_read(&icpu->enable_sem);
-	}
-
-	goto again;
+	return 0;
 }
 
-static void cpufreq_interactive_boost(struct interactive_tunables *tunables)
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
 {
-	struct interactive_policy *ipolicy;
-	struct cpufreq_policy *policy;
-	struct interactive_cpu *icpu;
-	unsigned long flags[2];
-	bool wakeup = false;
 	int i;
+	int anyboost = 0;
+	unsigned long flags[2];
+	struct cpufreq_interactive_policyinfo *ppol;
 
 	tunables->boosted = true;
 
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
-	for_each_ipolicy(ipolicy) {
-		policy = ipolicy->policy;
+	for_each_online_cpu(i) {
+		ppol = per_cpu(polinfo, i);
+		if (!ppol || tunables != ppol->policy->governor_data)
+			continue;
 
-		for_each_cpu(i, policy->cpus) {
-			icpu = &per_cpu(interactive_cpu, i);
-
-			if (!down_read_trylock(&icpu->enable_sem))
-				continue;
-
-			if (!icpu->ipolicy) {
-				up_read(&icpu->enable_sem);
-				continue;
-			}
-
-			spin_lock_irqsave(&icpu->target_freq_lock, flags[1]);
-			if (icpu->target_freq < tunables->hispeed_freq) {
-				icpu->target_freq = tunables->hispeed_freq;
-				cpumask_set_cpu(i, &speedchange_cpumask);
-				icpu->pol_hispeed_val_time = ktime_to_us(ktime_get());
-				wakeup = true;
-			}
-			spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]);
-
-			up_read(&icpu->enable_sem);
+		spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
+		if (ppol->target_freq < tunables->hispeed_freq) {
+			ppol->target_freq = tunables->hispeed_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			ppol->hispeed_validate_time =
+				ktime_to_us(ktime_get());
+			anyboost = 1;
 		}
+
+		/*
+		 * Set floor freq and (re)start timer for when last
+		 * validated.
+		 */
+
+		ppol->floor_freq = tunables->hispeed_freq;
+		ppol->floor_validate_time = ktime_to_us(ktime_get());
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
+		break;
 	}
 
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
 
-	if (wakeup)
-		wake_up_process(speedchange_task);
+	if (anyboost)
+		wake_up_process_no_notif(speedchange_task);
 }
 
-static int cpufreq_interactive_notifier(struct notifier_block *nb,
-					unsigned long val, void *data)
+static int load_change_callback(struct notifier_block *nb, unsigned long val,
+				void *data)
 {
-	struct cpufreq_freqs *freq = data;
-	struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu);
+	unsigned long cpu = (unsigned long) data;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables;
 	unsigned long flags;
 
-	if (val != CPUFREQ_POSTCHANGE)
+	if (!ppol || ppol->reject_notification)
 		return 0;
 
-	if (!down_read_trylock(&icpu->enable_sem))
+	if (!down_read_trylock(&ppol->enable_sem))
 		return 0;
+	if (!ppol->governor_enabled)
+		goto exit;
 
-	if (!icpu->ipolicy) {
-		up_read(&icpu->enable_sem);
+	tunables = ppol->policy->governor_data;
+	if (!tunables->use_sched_load || !tunables->use_migration_notif)
+		goto exit;
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	ppol->notif_pending = true;
+	ppol->notif_cpu = cpu;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+
+	if (!hrtimer_is_queued(&ppol->notif_timer))
+		hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
+			      HRTIMER_MODE_REL);
+exit:
+	up_read(&ppol->enable_sem);
+	return 0;
+}
+
+static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
+{
+	struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
+			struct cpufreq_interactive_policyinfo, notif_timer);
+	int cpu;
+
+	if (!down_read_trylock(&ppol->enable_sem))
+		return 0;
+	if (!ppol->governor_enabled) {
+		up_read(&ppol->enable_sem);
 		return 0;
 	}
+	cpu = ppol->notif_cpu;
+	trace_cpufreq_interactive_load_change(cpu);
+	del_timer(&ppol->policy_timer);
+	del_timer(&ppol->policy_slack_timer);
+	cpufreq_interactive_timer(cpu);
 
-	spin_lock_irqsave(&icpu->load_lock, flags);
-	update_load(icpu, freq->cpu);
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
+	up_read(&ppol->enable_sem);
+	return HRTIMER_NORESTART;
+}
 
-	up_read(&icpu->enable_sem);
+static struct notifier_block load_notifier_block = {
+	.notifier_call = load_change_callback,
+};
 
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_policyinfo *ppol;
+	int cpu;
+	unsigned long flags;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		ppol = per_cpu(polinfo, freq->cpu);
+		if (!ppol)
+			return 0;
+		if (!down_read_trylock(&ppol->enable_sem))
+			return 0;
+		if (!ppol->governor_enabled) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+
+		if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+		spin_lock_irqsave(&ppol->load_lock, flags);
+		for_each_cpu(cpu, ppol->policy->cpus)
+			update_load(cpu);
+		spin_unlock_irqrestore(&ppol->load_lock, flags);
+
+		up_read(&ppol->enable_sem);
+	}
 	return 0;
 }
 
@@ -663,26 +873,29 @@
 
 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
 {
-	const char *cp = buf;
-	int ntokens = 1, i = 0;
+	const char *cp;
+	int i;
+	int ntokens = 1;
 	unsigned int *tokenized_data;
 	int err = -EINVAL;
 
+	cp = buf;
 	while ((cp = strpbrk(cp + 1, " :")))
 		ntokens++;
 
 	if (!(ntokens & 0x1))
 		goto err;
 
-	tokenized_data = kcalloc(ntokens, sizeof(*tokenized_data), GFP_KERNEL);
+	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
 	if (!tokenized_data) {
 		err = -ENOMEM;
 		goto err;
 	}
 
 	cp = buf;
+	i = 0;
 	while (i < ntokens) {
-		if (kstrtouint(cp, 0, &tokenized_data[i++]) < 0)
+		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
 			goto err_kfree;
 
 		cp = strpbrk(cp, " :");
@@ -703,25 +916,13 @@
 	return ERR_PTR(err);
 }
 
-/* Interactive governor sysfs interface */
-static struct interactive_tunables *to_tunables(struct gov_attr_set *attr_set)
+static ssize_t show_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	char *buf)
 {
-	return container_of(attr_set, struct interactive_tunables, attr_set);
-}
-
-#define show_one(file_name, type)					\
-static ssize_t show_##file_name(struct gov_attr_set *attr_set, char *buf) \
-{									\
-	struct interactive_tunables *tunables = to_tunables(attr_set);	\
-	return sprintf(buf, type "\n", tunables->file_name);		\
-}
-
-static ssize_t show_target_loads(struct gov_attr_set *attr_set, char *buf)
-{
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long flags;
-	ssize_t ret = 0;
 	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
 
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
@@ -731,21 +932,20 @@
 
 	sprintf(buf + ret - 1, "\n");
 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
-
 	return ret;
 }
 
-static ssize_t store_target_loads(struct gov_attr_set *attr_set,
-				  const char *buf, size_t count)
+static ssize_t store_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned int *new_target_loads;
-	unsigned long flags;
 	int ntokens;
+	unsigned int *new_target_loads = NULL;
+	unsigned long flags;
 
 	new_target_loads = get_tokenized_data(buf, &ntokens);
 	if (IS_ERR(new_target_loads))
-		return PTR_ERR(new_target_loads);
+		return PTR_RET(new_target_loads);
 
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 	if (tunables->target_loads != default_target_loads)
@@ -754,16 +954,17 @@
 	tunables->ntarget_loads = ntokens;
 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
 
+	sched_update_freq_max_load(&controlled_cpus);
+
 	return count;
 }
 
-static ssize_t show_above_hispeed_delay(struct gov_attr_set *attr_set,
-					char *buf)
+static ssize_t show_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long flags;
-	ssize_t ret = 0;
 	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
 
 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
 
@@ -774,21 +975,20 @@
 
 	sprintf(buf + ret - 1, "\n");
 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return ret;
 }
 
-static ssize_t store_above_hispeed_delay(struct gov_attr_set *attr_set,
-					 const char *buf, size_t count)
+static ssize_t store_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
+	int ntokens;
 	unsigned int *new_above_hispeed_delay = NULL;
 	unsigned long flags;
-	int ntokens;
 
 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
 	if (IS_ERR(new_above_hispeed_delay))
-		return PTR_ERR(new_above_hispeed_delay);
+		return PTR_RET(new_above_hispeed_delay);
 
 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
 	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
@@ -796,71 +996,105 @@
 	tunables->above_hispeed_delay = new_above_hispeed_delay;
 	tunables->nabove_hispeed_delay = ntokens;
 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return count;
+
 }
 
-static ssize_t store_hispeed_freq(struct gov_attr_set *attr_set,
-				  const char *buf, size_t count)
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long int val;
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	long unsigned int val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->hispeed_freq = val;
-
 	return count;
 }
 
-static ssize_t store_go_hispeed_load(struct gov_attr_set *attr_set,
-				     const char *buf, size_t count)
+#define show_store_one(file_name)					\
+static ssize_t show_##file_name(					\
+	struct cpufreq_interactive_tunables *tunables, char *buf)	\
+{									\
+	return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name);	\
+}									\
+static ssize_t store_##file_name(					\
+		struct cpufreq_interactive_tunables *tunables,		\
+		const char *buf, size_t count)				\
+{									\
+	int ret;							\
+	unsigned long int val;						\
+									\
+	ret = kstrtoul(buf, 0, &val);				\
+	if (ret < 0)							\
+		return ret;						\
+	tunables->file_name = val;					\
+	return count;							\
+}
+show_store_one(max_freq_hysteresis);
+show_store_one(align_windows);
+show_store_one(ignore_hispeed_on_notif);
+show_store_one(fast_ramp_down);
+show_store_one(enable_prediction);
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->go_hispeed_load = val;
-
 	return count;
 }
 
-static ssize_t store_min_sample_time(struct gov_attr_set *attr_set,
-				     const char *buf, size_t count)
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->min_sample_time = val;
-
 	return count;
 }
 
-static ssize_t show_timer_rate(struct gov_attr_set *attr_set, char *buf)
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-
-	return sprintf(buf, "%lu\n", tunables->sampling_rate);
+	return sprintf(buf, "%lu\n", tunables->timer_rate);
 }
 
-static ssize_t store_timer_rate(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val, val_round;
 	int ret;
+	unsigned long val, val_round;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
@@ -870,43 +1104,62 @@
 	if (val != val_round)
 		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
 			val_round);
+	tunables->timer_rate = val_round;
 
-	tunables->sampling_rate = val_round;
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->timer_rate = val_round;
+	}
+	set_window_helper(tunables);
 
 	return count;
 }
 
-static ssize_t store_timer_slack(struct gov_attr_set *attr_set, const char *buf,
-				 size_t count)
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtol(buf, 10, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->timer_slack = val;
-	update_slack_delay(tunables);
-
+	tunables->timer_slack_val = val;
 	return count;
 }
 
-static ssize_t store_boost(struct gov_attr_set *attr_set, const char *buf,
-			   size_t count)
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+			  char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+			   const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->boost = val;
+	tunables->boost_val = val;
 
-	if (tunables->boost) {
+	if (tunables->boost_val) {
 		trace_cpufreq_interactive_boost("on");
 		if (!tunables->boosted)
 			cpufreq_interactive_boost(tunables);
@@ -918,111 +1171,469 @@
 	return count;
 }
 
-static ssize_t store_boostpulse(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+				const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
 	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
-					tunables->boostpulse_duration;
+		tunables->boostpulse_duration_val;
 	trace_cpufreq_interactive_boost("pulse");
 	if (!tunables->boosted)
 		cpufreq_interactive_boost(tunables);
-
 	return count;
 }
 
-static ssize_t store_boostpulse_duration(struct gov_attr_set *attr_set,
-					 const char *buf, size_t count)
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->boostpulse_duration = val;
-
+	tunables->boostpulse_duration_val = val;
 	return count;
 }
 
-static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->io_is_busy = val;
 
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->io_is_busy = val;
+	}
+	sched_set_io_is_busy(val);
+
 	return count;
 }
 
-show_one(hispeed_freq, "%u");
-show_one(go_hispeed_load, "%lu");
-show_one(min_sample_time, "%lu");
-show_one(timer_slack, "%lu");
-show_one(boost, "%u");
-show_one(boostpulse_duration, "%u");
-show_one(io_is_busy, "%u");
-
-gov_attr_rw(target_loads);
-gov_attr_rw(above_hispeed_delay);
-gov_attr_rw(hispeed_freq);
-gov_attr_rw(go_hispeed_load);
-gov_attr_rw(min_sample_time);
-gov_attr_rw(timer_rate);
-gov_attr_rw(timer_slack);
-gov_attr_rw(boost);
-gov_attr_wo(boostpulse);
-gov_attr_rw(boostpulse_duration);
-gov_attr_rw(io_is_busy);
-
-static struct attribute *interactive_attributes[] = {
-	&target_loads.attr,
-	&above_hispeed_delay.attr,
-	&hispeed_freq.attr,
-	&go_hispeed_load.attr,
-	&min_sample_time.attr,
-	&timer_rate.attr,
-	&timer_slack.attr,
-	&boost.attr,
-	&boostpulse.attr,
-	&boostpulse_duration.attr,
-	&io_is_busy.attr,
-	NULL
-};
-
-static struct kobj_type interactive_tunables_ktype = {
-	.default_attrs = interactive_attributes,
-	.sysfs_ops = &governor_sysfs_ops,
-};
-
-static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
-					     unsigned long val, void *data)
+static int cpufreq_interactive_enable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
 {
-	if (val == IDLE_END)
-		cpufreq_interactive_idle_end();
+	int rc = 0, j;
+	struct cpufreq_interactive_tunables *t;
 
+	mutex_lock(&sched_lock);
+
+	set_window_count++;
+	if (set_window_count > 1) {
+		for_each_possible_cpu(j) {
+			if (!per_cpu(polinfo, j))
+				continue;
+			t = per_cpu(polinfo, j)->cached_tunables;
+			if (t && t->use_sched_load) {
+				tunables->timer_rate = t->timer_rate;
+				tunables->io_is_busy = t->io_is_busy;
+				break;
+			}
+		}
+	} else {
+		rc = set_window_helper(tunables);
+		if (rc) {
+			pr_err("%s: Failed to set sched window\n", __func__);
+			set_window_count--;
+			goto out;
+		}
+		sched_set_io_is_busy(tunables->io_is_busy);
+	}
+
+	if (!tunables->use_migration_notif)
+		goto out;
+
+	migration_register_count++;
+	if (migration_register_count > 1)
+		goto out;
+	else
+		atomic_notifier_chain_register(&load_alert_notifier_head,
+						&load_notifier_block);
+out:
+	mutex_unlock(&sched_lock);
+	return rc;
+}
+
+static int cpufreq_interactive_disable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
+{
+	mutex_lock(&sched_lock);
+
+	if (tunables->use_migration_notif) {
+		migration_register_count--;
+		if (migration_register_count < 1)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	set_window_count--;
+
+	mutex_unlock(&sched_lock);
 	return 0;
 }
 
-static struct notifier_block cpufreq_interactive_idle_nb = {
-	.notifier_call = cpufreq_interactive_idle_notifier,
+static ssize_t show_use_sched_load(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
+}
+
+static ssize_t store_use_sched_load(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_sched_load == (bool) val)
+		return count;
+
+	tunables->use_sched_load = val;
+
+	if (val)
+		ret = cpufreq_interactive_enable_sched_input(tunables);
+	else
+		ret = cpufreq_interactive_disable_sched_input(tunables);
+
+	if (ret) {
+		tunables->use_sched_load = !val;
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t show_use_migration_notif(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			tunables->use_migration_notif);
+}
+
+static ssize_t store_use_migration_notif(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_migration_notif == (bool) val)
+		return count;
+	tunables->use_migration_notif = val;
+
+	if (!tunables->use_sched_load)
+		return count;
+
+	mutex_lock(&sched_lock);
+	if (val) {
+		migration_register_count++;
+		if (migration_register_count == 1)
+			atomic_notifier_chain_register(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	} else {
+		migration_register_count--;
+		if (!migration_register_count)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	mutex_unlock(&sched_lock);
+
+	return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	return show_##file_name(common_tunables, buf);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	return show_##file_name(policy->governor_data, buf);		\
+}
+
+#define store_gov_pol_sys(file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf,		\
+	size_t count)							\
+{									\
+	return store_##file_name(common_tunables, buf, count);		\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	return store_##file_name(policy->governor_data, buf, count);	\
+}
+
+#define show_store_gov_pol_sys(file_name)				\
+show_gov_pol_sys(file_name);						\
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+show_store_gov_pol_sys(use_sched_load);
+show_store_gov_pol_sys(use_migration_notif);
+show_store_gov_pol_sys(max_freq_hysteresis);
+show_store_gov_pol_sys(align_windows);
+show_store_gov_pol_sys(ignore_hispeed_on_notif);
+show_store_gov_pol_sys(fast_ramp_down);
+show_store_gov_pol_sys(enable_prediction);
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(use_sched_load);
+gov_sys_pol_attr_rw(use_migration_notif);
+gov_sys_pol_attr_rw(max_freq_hysteresis);
+gov_sys_pol_attr_rw(align_windows);
+gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
+gov_sys_pol_attr_rw(fast_ramp_down);
+gov_sys_pol_attr_rw(enable_prediction);
+
+static struct global_attr boostpulse_gov_sys =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+	&target_loads_gov_sys.attr,
+	&above_hispeed_delay_gov_sys.attr,
+	&hispeed_freq_gov_sys.attr,
+	&go_hispeed_load_gov_sys.attr,
+	&min_sample_time_gov_sys.attr,
+	&timer_rate_gov_sys.attr,
+	&timer_slack_gov_sys.attr,
+	&boost_gov_sys.attr,
+	&boostpulse_gov_sys.attr,
+	&boostpulse_duration_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
+	&use_sched_load_gov_sys.attr,
+	&use_migration_notif_gov_sys.attr,
+	&max_freq_hysteresis_gov_sys.attr,
+	&align_windows_gov_sys.attr,
+	&ignore_hispeed_on_notif_gov_sys.attr,
+	&fast_ramp_down_gov_sys.attr,
+	&enable_prediction_gov_sys.attr,
+	NULL,
 };
 
+static struct attribute_group interactive_attr_group_gov_sys = {
+	.attrs = interactive_attributes_gov_sys,
+	.name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+	&target_loads_gov_pol.attr,
+	&above_hispeed_delay_gov_pol.attr,
+	&hispeed_freq_gov_pol.attr,
+	&go_hispeed_load_gov_pol.attr,
+	&min_sample_time_gov_pol.attr,
+	&timer_rate_gov_pol.attr,
+	&timer_slack_gov_pol.attr,
+	&boost_gov_pol.attr,
+	&boostpulse_gov_pol.attr,
+	&boostpulse_duration_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	&use_sched_load_gov_pol.attr,
+	&use_migration_notif_gov_pol.attr,
+	&max_freq_hysteresis_gov_pol.attr,
+	&align_windows_gov_pol.attr,
+	&ignore_hispeed_on_notif_gov_pol.attr,
+	&fast_ramp_down_gov_pol.attr,
+	&enable_prediction_gov_pol.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+	.attrs = interactive_attributes_gov_pol,
+	.name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+	if (have_governor_per_policy())
+		return &interactive_attr_group_gov_pol;
+	else
+		return &interactive_attr_group_gov_sys;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static struct cpufreq_interactive_tunables *alloc_tunable(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_tunables *tunables;
+
+	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+	if (!tunables)
+		return ERR_PTR(-ENOMEM);
+
+	tunables->above_hispeed_delay = default_above_hispeed_delay;
+	tunables->nabove_hispeed_delay =
+		ARRAY_SIZE(default_above_hispeed_delay);
+	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+	tunables->target_loads = default_target_loads;
+	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_rate = DEFAULT_TIMER_RATE;
+	tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+	spin_lock_init(&tunables->target_loads_lock);
+	spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+	return tunables;
+}
+
+static struct cpufreq_interactive_policyinfo *get_policyinfo(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_policyinfo *ppol =
+				per_cpu(polinfo, policy->cpu);
+	int i;
+	struct sched_load *sl;
+
+	/* polinfo already allocated for policy, return */
+	if (ppol)
+		return ppol;
+
+	ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
+	if (!ppol)
+		return ERR_PTR(-ENOMEM);
+
+	sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
+		     GFP_KERNEL);
+	if (!sl) {
+		kfree(ppol);
+		return ERR_PTR(-ENOMEM);
+	}
+	ppol->sl = sl;
+
+	init_timer_deferrable(&ppol->policy_timer);
+	ppol->policy_timer.function = cpufreq_interactive_timer;
+	init_timer(&ppol->policy_slack_timer);
+	ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
+	hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	ppol->notif_timer.function = cpufreq_interactive_hrtimer;
+	spin_lock_init(&ppol->load_lock);
+	spin_lock_init(&ppol->target_freq_lock);
+	init_rwsem(&ppol->enable_sem);
+
+	for_each_cpu(i, policy->related_cpus)
+		per_cpu(polinfo, i) = ppol;
+	return ppol;
+}
+
+/* This function is not multithread-safe. */
+static void free_policyinfo(int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	int j;
+
+	if (!ppol)
+		return;
+
+	for_each_possible_cpu(j)
+		if (per_cpu(polinfo, j) == ppol)
+			per_cpu(polinfo, cpu) = NULL;
+	kfree(ppol->cached_tunables);
+	kfree(ppol->sl);
+	kfree(ppol);
+}
+
+static struct cpufreq_interactive_tunables *get_tunables(
+				struct cpufreq_interactive_policyinfo *ppol)
+{
+	if (have_governor_per_policy())
+		return ppol->cached_tunables;
+	else
+		return cached_common_tunables;
+}
+
 /* Interactive Governor callbacks */
 struct interactive_governor {
 	struct cpufreq_governor gov;
@@ -1033,305 +1644,207 @@
 
 #define CPU_FREQ_GOV_INTERACTIVE	(&interactive_gov.gov)
 
-static void irq_work(struct irq_work *irq_work)
-{
-	struct interactive_cpu *icpu = container_of(irq_work, struct
-						    interactive_cpu, irq_work);
-
-	cpufreq_interactive_update(icpu);
-	icpu->work_in_progress = false;
-}
-
-static void update_util_handler(struct update_util_data *data, u64 time,
-				unsigned int flags)
-{
-	struct interactive_cpu *icpu = container_of(data,
-					struct interactive_cpu, update_util);
-	struct interactive_policy *ipolicy = icpu->ipolicy;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-	u64 delta_ns;
-
-	/*
-	 * The irq-work may not be allowed to be queued up right now.
-	 * Possible reasons:
-	 * - Work has already been queued up or is in progress.
-	 * - It is too early (too little time from the previous sample).
-	 */
-	if (icpu->work_in_progress)
-		return;
-
-	delta_ns = time - icpu->last_sample_time;
-	if ((s64)delta_ns < tunables->sampling_rate * NSEC_PER_USEC)
-		return;
-
-	icpu->last_sample_time = time;
-	icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) +
-				    jiffies;
-
-	icpu->work_in_progress = true;
-	irq_work_queue(&icpu->irq_work);
-}
-
-static void gov_set_update_util(struct interactive_policy *ipolicy)
-{
-	struct cpufreq_policy *policy = ipolicy->policy;
-	struct interactive_cpu *icpu;
-	int cpu;
-
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
-
-		icpu->last_sample_time = 0;
-		icpu->next_sample_jiffies = 0;
-		cpufreq_add_update_util_hook(cpu, &icpu->update_util,
-					     update_util_handler);
-	}
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
-	int i;
-
-	for_each_cpu(i, policy->cpus)
-		cpufreq_remove_update_util_hook(i);
-
-	synchronize_sched();
-}
-
-static void icpu_cancel_work(struct interactive_cpu *icpu)
-{
-	irq_work_sync(&icpu->irq_work);
-	icpu->work_in_progress = false;
-	del_timer_sync(&icpu->slack_timer);
-}
-
-static struct interactive_policy *
-interactive_policy_alloc(struct cpufreq_policy *policy)
-{
-	struct interactive_policy *ipolicy;
-
-	ipolicy = kzalloc(sizeof(*ipolicy), GFP_KERNEL);
-	if (!ipolicy)
-		return NULL;
-
-	ipolicy->policy = policy;
-
-	return ipolicy;
-}
-
-static void interactive_policy_free(struct interactive_policy *ipolicy)
-{
-	kfree(ipolicy);
-}
-
-static struct interactive_tunables *
-interactive_tunables_alloc(struct interactive_policy *ipolicy)
-{
-	struct interactive_tunables *tunables;
-
-	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
-	if (!tunables)
-		return NULL;
-
-	gov_attr_set_init(&tunables->attr_set, &ipolicy->tunables_hook);
-	if (!have_governor_per_policy())
-		global_tunables = tunables;
-
-	ipolicy->tunables = tunables;
-
-	return tunables;
-}
-
-static void interactive_tunables_free(struct interactive_tunables *tunables)
-{
-	if (!have_governor_per_policy())
-		global_tunables = NULL;
-
-	kfree(tunables);
-}
-
 int cpufreq_interactive_init(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy;
-	struct interactive_tunables *tunables;
-	int ret;
+	int rc;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	/* State should be equivalent to EXIT */
-	if (policy->governor_data)
-		return -EBUSY;
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	ipolicy = interactive_policy_alloc(policy);
-	if (!ipolicy)
-		return -ENOMEM;
+	ppol = get_policyinfo(policy);
+	if (IS_ERR(ppol))
+		return PTR_ERR(ppol);
 
-	mutex_lock(&global_tunables_lock);
-
-	if (global_tunables) {
-		if (WARN_ON(have_governor_per_policy())) {
-			ret = -EINVAL;
-			goto free_int_policy;
-		}
-
-		policy->governor_data = ipolicy;
-		ipolicy->tunables = global_tunables;
-
-		gov_attr_set_get(&global_tunables->attr_set,
-				 &ipolicy->tunables_hook);
-		goto out;
+	if (have_governor_per_policy()) {
+		WARN_ON(tunables);
+	} else if (tunables) {
+		tunables->usage_count++;
+		cpumask_or(&controlled_cpus, &controlled_cpus,
+			   policy->related_cpus);
+		sched_update_freq_max_load(policy->related_cpus);
+		policy->governor_data = tunables;
+		return 0;
 	}
 
-	tunables = interactive_tunables_alloc(ipolicy);
+	tunables = get_tunables(ppol);
 	if (!tunables) {
-		ret = -ENOMEM;
-		goto free_int_policy;
+		tunables = alloc_tunable(policy);
+		if (IS_ERR(tunables))
+			return PTR_ERR(tunables);
 	}
 
-	tunables->hispeed_freq = policy->max;
-	tunables->above_hispeed_delay = default_above_hispeed_delay;
-	tunables->nabove_hispeed_delay =
-		ARRAY_SIZE(default_above_hispeed_delay);
-	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
-	tunables->target_loads = default_target_loads;
-	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
-	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
-	tunables->boostpulse_duration = DEFAULT_MIN_SAMPLE_TIME;
-	tunables->sampling_rate = DEFAULT_SAMPLING_RATE;
-	tunables->timer_slack = DEFAULT_TIMER_SLACK;
-	update_slack_delay(tunables);
+	tunables->usage_count = 1;
+	policy->governor_data = tunables;
+	if (!have_governor_per_policy())
+		common_tunables = tunables;
 
-	spin_lock_init(&tunables->target_loads_lock);
-	spin_lock_init(&tunables->above_hispeed_delay_lock);
+	rc = sysfs_create_group(get_governor_parent_kobj(policy),
+			get_sysfs_attr());
+	if (rc) {
+		kfree(tunables);
+		policy->governor_data = NULL;
+		if (!have_governor_per_policy())
+			common_tunables = NULL;
+		return rc;
+	}
 
-	policy->governor_data = ipolicy;
-
-	ret = kobject_init_and_add(&tunables->attr_set.kobj,
-				   &interactive_tunables_ktype,
-				   get_governor_parent_kobj(policy), "%s",
-				   interactive_gov.gov.name);
-	if (ret)
-		goto fail;
-
-	/* One time initialization for governor */
-	if (!interactive_gov.usage_count++) {
-		idle_notifier_register(&cpufreq_interactive_idle_nb);
+	if (!interactive_gov.usage_count++)
 		cpufreq_register_notifier(&cpufreq_notifier_block,
-					  CPUFREQ_TRANSITION_NOTIFIER);
-	}
+				CPUFREQ_TRANSITION_NOTIFIER);
 
- out:
-	mutex_unlock(&global_tunables_lock);
+	if (tunables->use_sched_load)
+		cpufreq_interactive_enable_sched_input(tunables);
+
+	cpumask_or(&controlled_cpus, &controlled_cpus,
+		   policy->related_cpus);
+	sched_update_freq_max_load(policy->related_cpus);
+
+	if (have_governor_per_policy())
+		ppol->cached_tunables = tunables;
+	else
+		cached_common_tunables = tunables;
+
 	return 0;
-
- fail:
-	policy->governor_data = NULL;
-	interactive_tunables_free(tunables);
-
- free_int_policy:
-	mutex_unlock(&global_tunables_lock);
-
-	interactive_policy_free(ipolicy);
-	pr_err("governor initialization failed (%d)\n", ret);
-
-	return ret;
 }
 
 void cpufreq_interactive_exit(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-	unsigned int count;
+	struct cpufreq_interactive_tunables *tunables;
 
-	mutex_lock(&global_tunables_lock);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	/* Last policy using the governor ? */
-	if (!--interactive_gov.usage_count) {
-		cpufreq_unregister_notifier(&cpufreq_notifier_block,
-					    CPUFREQ_TRANSITION_NOTIFIER);
-		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+	BUG_ON(!tunables);
+
+	cpumask_andnot(&controlled_cpus, &controlled_cpus,
+		       policy->related_cpus);
+	sched_update_freq_max_load(cpu_possible_mask);
+	if (!--tunables->usage_count) {
+		/* Last policy using the governor ? */
+		if (!--interactive_gov.usage_count)
+			cpufreq_unregister_notifier(&cpufreq_notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+		sysfs_remove_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr());
+
+		common_tunables = NULL;
 	}
 
-	count = gov_attr_set_put(&tunables->attr_set, &ipolicy->tunables_hook);
 	policy->governor_data = NULL;
-	if (!count)
-		interactive_tunables_free(tunables);
 
-	mutex_unlock(&global_tunables_lock);
-
-	interactive_policy_free(ipolicy);
+	if (tunables->use_sched_load)
+		cpufreq_interactive_disable_sched_input(tunables);
 }
 
 int cpufreq_interactive_start(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_frequency_table *freq_table;
+	struct cpufreq_interactive_tunables *tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-		icpu->target_freq = policy->cur;
-		icpu->floor_freq = icpu->target_freq;
-		icpu->pol_floor_val_time = ktime_to_us(ktime_get());
-		icpu->loc_floor_val_time = icpu->pol_floor_val_time;
-		icpu->pol_hispeed_val_time = icpu->pol_floor_val_time;
-		icpu->loc_hispeed_val_time = icpu->pol_floor_val_time;
+	BUG_ON(!tunables);
+	mutex_lock(&gov_lock);
 
-		down_write(&icpu->enable_sem);
-		icpu->ipolicy = ipolicy;
-		up_write(&icpu->enable_sem);
+	freq_table = policy->freq_table;
+	if (!tunables->hispeed_freq)
+		tunables->hispeed_freq = policy->max;
 
-		slack_timer_resched(icpu, cpu, false);
-	}
+	ppol = per_cpu(polinfo, policy->cpu);
+	ppol->policy = policy;
+	ppol->target_freq = policy->cur;
+	ppol->freq_table = freq_table;
+	ppol->p_nolim = *policy;
+	ppol->p_nolim.min = policy->cpuinfo.min_freq;
+	ppol->p_nolim.max = policy->cpuinfo.max_freq;
+	ppol->floor_freq = ppol->target_freq;
+	ppol->floor_validate_time = ktime_to_us(ktime_get());
+	ppol->hispeed_validate_time = ppol->floor_validate_time;
+	ppol->min_freq = policy->min;
+	ppol->reject_notification = true;
+	ppol->notif_pending = false;
+	down_write(&ppol->enable_sem);
+	del_timer_sync(&ppol->policy_timer);
+	del_timer_sync(&ppol->policy_slack_timer);
+	ppol->policy_timer.data = policy->cpu;
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+	cpufreq_interactive_timer_start(tunables, policy->cpu);
+	ppol->governor_enabled = 1;
+	up_write(&ppol->enable_sem);
+	ppol->reject_notification = false;
 
-	gov_set_update_util(ipolicy);
+	mutex_unlock(&gov_lock);
 	return 0;
 }
 
 void cpufreq_interactive_stop(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	gov_clear_update_util(ipolicy->policy);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	BUG_ON(!tunables);
 
-		icpu_cancel_work(icpu);
+	mutex_lock(&gov_lock);
 
-		down_write(&icpu->enable_sem);
-		icpu->ipolicy = NULL;
-		up_write(&icpu->enable_sem);
-	}
+	ppol = per_cpu(polinfo, policy->cpu);
+	ppol->reject_notification = true;
+	down_write(&ppol->enable_sem);
+	ppol->governor_enabled = 0;
+	ppol->target_freq = 0;
+	del_timer_sync(&ppol->policy_timer);
+	del_timer_sync(&ppol->policy_slack_timer);
+	up_write(&ppol->enable_sem);
+	ppol->reject_notification = false;
+
+	mutex_unlock(&gov_lock);
 }
 
 void cpufreq_interactive_limits(struct cpufreq_policy *policy)
 {
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
-	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	cpufreq_policy_apply_limits(policy);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	BUG_ON(!tunables);
+	ppol = per_cpu(polinfo, policy->cpu);
 
-		spin_lock_irqsave(&icpu->target_freq_lock, flags);
+	__cpufreq_driver_target(policy,
+			ppol->target_freq, CPUFREQ_RELATION_L);
 
-		if (policy->max < icpu->target_freq)
-			icpu->target_freq = policy->max;
-		else if (policy->min > icpu->target_freq)
-			icpu->target_freq = policy->min;
-
-		spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
+	down_read(&ppol->enable_sem);
+	if (ppol->governor_enabled) {
+		if (policy->min < ppol->min_freq)
+			cpufreq_interactive_timer_resched(policy->cpu,
+							  true);
+		ppol->min_freq = policy->min;
 	}
+	up_read(&ppol->enable_sem);
 }
 
 static struct interactive_governor interactive_gov = {
 	.gov = {
 		.name			= "interactive",
-		.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
+		.max_transition_latency	= 10000000,
 		.owner			= THIS_MODULE,
 		.init			= cpufreq_interactive_init,
 		.exit			= cpufreq_interactive_exit,
@@ -1341,47 +1854,24 @@
 	}
 };
 
-static void cpufreq_interactive_nop_timer(unsigned long data)
-{
-	/*
-	 * The purpose of slack-timer is to wake up the CPU from IDLE, in order
-	 * to decrease its frequency if it is not set to minimum already.
-	 *
-	 * This is important for platforms where CPU with higher frequencies
-	 * consume higher power even at IDLE.
-	 */
-}
-
 static int __init cpufreq_interactive_gov_init(void)
 {
-	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
-
-	for_each_possible_cpu(cpu) {
-		icpu = &per_cpu(interactive_cpu, cpu);
-
-		init_irq_work(&icpu->irq_work, irq_work);
-		spin_lock_init(&icpu->load_lock);
-		spin_lock_init(&icpu->target_freq_lock);
-		init_rwsem(&icpu->enable_sem);
-
-		/* Initialize per-cpu slack-timer */
-		init_timer_pinned(&icpu->slack_timer);
-		icpu->slack_timer.function = cpufreq_interactive_nop_timer;
-	}
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
 	spin_lock_init(&speedchange_cpumask_lock);
-	speedchange_task = kthread_create(cpufreq_interactive_speedchange_task,
-					  NULL, "cfinteractive");
+	mutex_init(&gov_lock);
+	mutex_init(&sched_lock);
+	speedchange_task =
+		kthread_create(cpufreq_interactive_speedchange_task, NULL,
+			       "cfinteractive");
 	if (IS_ERR(speedchange_task))
 		return PTR_ERR(speedchange_task);
 
 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
 	get_task_struct(speedchange_task);
 
-	/* wake up so the thread does not look hung to the freezer */
-	wake_up_process(speedchange_task);
+	/* NB: wake up so the thread does not look hung to the freezer */
+	wake_up_process_no_notif(speedchange_task);
 
 	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
 }
@@ -1399,12 +1889,19 @@
 
 static void __exit cpufreq_interactive_gov_exit(void)
 {
+	int cpu;
+
 	cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
 	kthread_stop(speedchange_task);
 	put_task_struct(speedchange_task);
+
+	for_each_possible_cpu(cpu)
+		free_policyinfo(cpu);
 }
+
 module_exit(cpufreq_interactive_gov_exit);
 
 MODULE_AUTHOR("Mike Chan <mike@android.com>");
-MODULE_DESCRIPTION("'cpufreq_interactive' - A dynamic cpufreq governor for Latency sensitive workloads");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
 MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 0000000..0caa8d1
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,496 @@
+/* drivers/cpufreq/qcom-cpufreq.c
+ *
+ * MSM architecture cpufreq driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Author: Mike A. Chan <mikechan@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/suspend.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+
+static DEFINE_MUTEX(l2bw_lock);
+
+static struct clk *cpu_clk[NR_CPUS];
+static struct clk *l2_clk;
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
+static bool hotplug_ready;
+
+struct cpufreq_suspend_t {
+	struct mutex suspend_mutex;
+	int device_suspended;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+
+static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
+			unsigned int index)
+{
+	int ret = 0;
+	struct cpufreq_freqs freqs;
+	unsigned long rate;
+
+	freqs.old = policy->cur;
+	freqs.new = new_freq;
+	freqs.cpu = policy->cpu;
+
+	trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
+	cpufreq_freq_transition_begin(policy, &freqs);
+
+	rate = new_freq * 1000;
+	rate = clk_round_rate(cpu_clk[policy->cpu], rate);
+	ret = clk_set_rate(cpu_clk[policy->cpu], rate);
+	cpufreq_freq_transition_end(policy, &freqs, ret);
+	if (!ret)
+		trace_cpu_frequency_switch_end(policy->cpu);
+
+	return ret;
+}
+
+static int msm_cpufreq_target(struct cpufreq_policy *policy,
+				unsigned int target_freq,
+				unsigned int relation)
+{
+	int ret = 0;
+	int index;
+	struct cpufreq_frequency_table *table;
+
+	mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+
+	if (target_freq == policy->cur)
+		goto done;
+
+	if (per_cpu(suspend_data, policy->cpu).device_suspended) {
+		pr_debug("cpufreq: cpu%d scheduling frequency change in suspend.\n",
+			 policy->cpu);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	table = policy->freq_table;
+	if (!table) {
+		pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
+		       policy->cpu);
+		ret = -ENODEV;
+		goto done;
+	}
+	index = cpufreq_frequency_table_target(policy, target_freq, relation);
+
+	pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
+		policy->cpu, target_freq, relation,
+		policy->min, policy->max, table[index].frequency);
+
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+done:
+	mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+	return ret;
+}
+
+static int msm_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+			policy->cpuinfo.max_freq);
+	return 0;
+}
+
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+	return clk_get_rate(cpu_clk[cpu]) / 1000;
+}
+
+static int msm_cpufreq_init(struct cpufreq_policy *policy)
+{
+	int cur_freq;
+	int index;
+	int ret = 0;
+	struct cpufreq_frequency_table *table =
+			per_cpu(freq_table, policy->cpu);
+	int cpu;
+
+	/*
+	 * In some SoC, some cores are clocked by same source, and their
+	 * frequencies can not be changed independently. Find all other
+	 * CPUs that share same clock, and mark them as controlled by
+	 * same policy.
+	 */
+	for_each_possible_cpu(cpu)
+		if (cpu_clk[cpu] == cpu_clk[policy->cpu])
+			cpumask_set_cpu(cpu, policy->cpus);
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("cpufreq: failed to get policy min/max\n");
+		return ret;
+	}
+
+	cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
+
+	index =  cpufreq_frequency_table_target(policy, cur_freq,
+						CPUFREQ_RELATION_H);
+	/*
+	 * Call set_cpu_freq unconditionally so that when cpu is set to
+	 * online, frequency limit will always be updated.
+	 */
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+	if (ret)
+		return ret;
+	pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
+			policy->cpu, cur_freq, table[index].frequency);
+	policy->cur = table[index].frequency;
+
+	return 0;
+}
+
+static int qcom_cpufreq_dead_cpu(unsigned int cpu)
+{
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	clk_unprepare(cpu_clk[cpu]);
+	clk_unprepare(l2_clk);
+	return 0;
+}
+
+static int qcom_cpufreq_up_cpu(unsigned int cpu)
+{
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	rc = clk_prepare(l2_clk);
+	if (rc < 0)
+		return rc;
+	rc = clk_prepare(cpu_clk[cpu]);
+	if (rc < 0)
+		clk_unprepare(l2_clk);
+	return rc;
+}
+
+static int qcom_cpufreq_dying_cpu(unsigned int cpu)
+{
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	clk_disable(cpu_clk[cpu]);
+	clk_disable(l2_clk);
+	return 0;
+}
+
+static int qcom_cpufreq_starting_cpu(unsigned int cpu)
+{
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	rc = clk_enable(l2_clk);
+	if (rc < 0)
+		return rc;
+	rc = clk_enable(cpu_clk[cpu]);
+	if (rc < 0)
+		clk_disable(l2_clk);
+	return rc;
+}
+
+static int msm_cpufreq_suspend(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
+		per_cpu(suspend_data, cpu).device_suspended = 1;
+		mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_resume(void)
+{
+	int cpu, ret;
+	struct cpufreq_policy policy;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	/*
+	 * Freq request might be rejected during suspend, resulting
+	 * in policy->cur violating min/max constraint.
+	 * Correct the frequency as soon as possible.
+	 */
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		ret = cpufreq_get_policy(&policy, cpu);
+		if (ret)
+			continue;
+		if (policy.cur <= policy.max && policy.cur >= policy.min)
+			continue;
+		ret = cpufreq_update_policy(cpu);
+		if (ret)
+			pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
+			       cpu);
+		else
+			pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
+				cpu);
+	}
+	put_online_cpus();
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_pm_event(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	switch (event) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		return msm_cpufreq_resume();
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		return msm_cpufreq_suspend();
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block msm_cpufreq_pm_notifier = {
+	.notifier_call = msm_cpufreq_pm_event,
+};
+
+static struct freq_attr *msm_freq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver msm_cpufreq_driver = {
+	/* lps calculations are handled here. */
+	.flags		= CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+	.init		= msm_cpufreq_init,
+	.verify		= msm_cpufreq_verify,
+	.target		= msm_cpufreq_target,
+	.get		= msm_cpufreq_get_freq,
+	.name		= "msm",
+	.attr		= msm_freq_attr,
+};
+
+static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
+						char *tbl_name, int cpu)
+{
+	int ret, nf, i, j;
+	u32 *data;
+	struct cpufreq_frequency_table *ftbl;
+
+	/* Parse list of usable CPU frequencies. */
+	if (!of_find_property(dev->of_node, tbl_name, &nf))
+		return ERR_PTR(-EINVAL);
+	nf /= sizeof(*data);
+
+	if (nf == 0)
+		return ERR_PTR(-EINVAL);
+
+	data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
+	if (!ftbl)
+		return ERR_PTR(-ENOMEM);
+
+	j = 0;
+	for (i = 0; i < nf; i++) {
+		unsigned long f;
+
+		f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			break;
+		f /= 1000;
+
+		/*
+		 * Don't repeat frequencies if they round up to the same clock
+		 * frequency.
+		 *
+		 */
+		if (j > 0 && f <= ftbl[j - 1].frequency)
+			continue;
+
+		ftbl[j].driver_data = j;
+		ftbl[j].frequency = f;
+		j++;
+	}
+
+	ftbl[j].driver_data = j;
+	ftbl[j].frequency = CPUFREQ_TABLE_END;
+
+	devm_kfree(dev, data);
+
+	return ftbl;
+}
+
+static int msm_cpufreq_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	char clk_name[] = "cpu??_clk";
+	char tbl_name[] = "qcom,cpufreq-table-??";
+	struct clk *c;
+	int cpu;
+	struct cpufreq_frequency_table *ftbl;
+
+	l2_clk = devm_clk_get(dev, "l2_clk");
+	if (IS_ERR(l2_clk))
+		l2_clk = NULL;
+
+	for_each_possible_cpu(cpu) {
+		snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+		c = devm_clk_get(dev, clk_name);
+		if (cpu == 0 && IS_ERR(c))
+			return PTR_ERR(c);
+		else if (IS_ERR(c))
+			c = cpu_clk[cpu-1];
+		cpu_clk[cpu] = c;
+	}
+	hotplug_ready = true;
+
+	/* Use per-policy governor tunable for some targets */
+	if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
+		msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+
+	/* Parse commong cpufreq table for all CPUs */
+	ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
+	if (!IS_ERR(ftbl)) {
+		for_each_possible_cpu(cpu)
+			per_cpu(freq_table, cpu) = ftbl;
+		return 0;
+	}
+
+	/*
+	 * No common table. Parse individual tables for each unique
+	 * CPU clock.
+	 */
+	for_each_possible_cpu(cpu) {
+		snprintf(tbl_name, sizeof(tbl_name),
+			 "qcom,cpufreq-table-%d", cpu);
+		ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
+
+		/* CPU0 must contain freq table */
+		if (cpu == 0 && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU0's freq table\n");
+			return PTR_ERR(ftbl);
+		}
+		if (cpu == 0) {
+			per_cpu(freq_table, cpu) = ftbl;
+			continue;
+		}
+
+		if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU%d's freq table\n",
+				cpu);
+			return PTR_ERR(ftbl);
+		}
+
+		/* Use previous CPU's table if it shares same clock */
+		if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
+			if (!IS_ERR(ftbl)) {
+				dev_warn(dev, "Conflicting tables for CPU%d\n",
+					 cpu);
+				devm_kfree(dev, ftbl);
+			}
+			ftbl = per_cpu(freq_table, cpu - 1);
+		}
+		per_cpu(freq_table, cpu) = ftbl;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id msm_cpufreq_match_table[] = {
+	{ .compatible = "qcom,msm-cpufreq" },
+	{}
+};
+
+static struct platform_driver msm_cpufreq_plat_driver = {
+	.probe = msm_cpufreq_probe,
+	.driver = {
+		.name = "msm-cpufreq",
+		.of_match_table = msm_cpufreq_match_table,
+	},
+};
+
+static int __init msm_cpufreq_register(void)
+{
+	int cpu, rc;
+
+	for_each_possible_cpu(cpu) {
+		mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	rc = platform_driver_register(&msm_cpufreq_plat_driver);
+	if (rc < 0) {
+		/* Unblock hotplug if msm-cpufreq probe fails */
+		cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
+		cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
+		for_each_possible_cpu(cpu)
+			mutex_destroy(&(per_cpu(suspend_data, cpu).
+					suspend_mutex));
+		return rc;
+	}
+
+	register_pm_notifier(&msm_cpufreq_pm_notifier);
+	return cpufreq_register_driver(&msm_cpufreq_driver);
+}
+
+subsys_initcall(msm_cpufreq_register);
+
+static int __init msm_cpufreq_early_register(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
+					"AP_QCOM_CPUFREQ_STARTING",
+					qcom_cpufreq_starting_cpu,
+					qcom_cpufreq_dying_cpu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
+					"QCOM_CPUFREQ_PREPARE",
+					qcom_cpufreq_up_cpu,
+					qcom_cpufreq_dead_cpu);
+	if (!ret)
+		return ret;
+	cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
+	return ret;
+}
+core_initcall(msm_cpufreq_early_register);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3c24e57..817f3b9 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -72,6 +72,42 @@
 	  through sysfs entries. The passive governor recommends that
 	  devfreq device uses the OPP table to get the frequency/voltage.
 
+config DEVFREQ_GOV_CPUFREQ
+	tristate "CPUfreq"
+	depends on CPU_FREQ
+	help
+	  Chooses frequency based on the online CPUs' current frequency and a
+	  CPU frequency to device frequency mapping table(s). This governor
+	  can be useful for controlling devices such as DDR, cache, CCI, etc.
+
+config QCOM_BIMC_BWMON
+	tristate "QCOM BIMC Bandwidth monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The BIMC Bandwidth monitor hardware allows for monitoring the
+	  traffic coming from each master port connected to the BIMC. It also
+	  has the capability to raise an IRQ when the count exceeds a
+	  programmable limit.
+
+config DEVFREQ_GOV_QCOM_BW_HWMON
+	tristate "HW monitor based governor for device BW"
+	depends on QCOM_BIMC_BWMON
+	help
+	  HW monitor based governor for device to DDR bandwidth voting.
+	  This governor sets the CPU BW vote by using BIMC counters to monitor
+	  the CPU's use of DDR. Since this uses target specific counters it
+	  can conflict with existing profiling tools.  This governor is unlikely
+	  to be useful for non-QCOM devices.
+
+config DEVFREQ_GOV_QCOM_CACHE_HWMON
+	tristate "HW monitor based governor for cache frequency"
+	help
+	  HW monitor based governor for cache frequency scaling. This
+	  governor sets the cache frequency by using PM counters to monitor the
+	  CPU's use of cache. Since this governor uses some of the PM counters
+	  it can conflict with existing profiling tools. This governor is
+	  unlikely to be useful for other devices.
+
 comment "DEVFREQ Drivers"
 
 config DEVFREQ_GOV_QCOM_ADRENO_TZ
@@ -121,6 +157,30 @@
           It sets the frequency for the memory controller and reads the usage counts
           from hardware.
 
+config DEVFREQ_SIMPLE_DEV
+	tristate "Device driver for simple clock device with no status info"
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	help
+	  Device driver for simple devices that control their frequency using
+	  clock APIs and don't have any form of status reporting.
+
+config QCOM_DEVFREQ_DEVBW
+	bool "QCOM DEVFREQ device for device master <-> slave IB/AB BW voting"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	default n
+	help
+	  Different devfreq governors use this devfreq device to make CPU to
+	  DDR IB/AB bandwidth votes. This driver provides a SoC topology
+	  agnostic interface to so that some of the devfreq governors can be
+	  shared across SoCs.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 0fedc4c..05f4a83 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -5,12 +5,19 @@
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
 obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)	+= governor_passive.o
+obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ)	+= governor_cpufreq.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_QCOM_BIMC_BWMON)		+= bimc-bwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON)	+= governor_bw_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON)	+= governor_cache_hwmon.o
+
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
 obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ)	+= rk3399_dmc.o
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
+obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
+obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
new file mode 100644
index 0000000..df0f4e9
--- /dev/null
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bimc-bwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_bw_hwmon.h"
+
+#define GLB_INT_STATUS(m)	((m)->global_base + 0x100)
+#define GLB_INT_CLR(m)		((m)->global_base + 0x108)
+#define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
+#define MON_INT_STATUS(m)	((m)->base + 0x100)
+#define MON_INT_CLR(m)		((m)->base + 0x108)
+#define	MON_INT_EN(m)		((m)->base + 0x10C)
+#define	MON_EN(m)		((m)->base + 0x280)
+#define MON_CLEAR(m)		((m)->base + 0x284)
+#define MON_CNT(m)		((m)->base + 0x288)
+#define MON_THRES(m)		((m)->base + 0x290)
+#define MON_MASK(m)		((m)->base + 0x298)
+#define MON_MATCH(m)		((m)->base + 0x29C)
+
+struct bwmon_spec {
+	bool wrap_on_thres;
+	bool overflow;
+};
+
+struct bwmon {
+	void __iomem *base;
+	void __iomem *global_base;
+	unsigned int mport;
+	unsigned int irq;
+	const struct bwmon_spec *spec;
+	struct device *dev;
+	struct bw_hwmon hw;
+};
+
+#define to_bwmon(ptr)		container_of(ptr, struct bwmon, hw)
+
+static DEFINE_SPINLOCK(glb_lock);
+static void mon_enable(struct bwmon *m)
+{
+	writel_relaxed(0x1, MON_EN(m));
+}
+
+static void mon_disable(struct bwmon *m)
+{
+	writel_relaxed(0x0, MON_EN(m));
+}
+
+static void mon_clear(struct bwmon *m)
+{
+	writel_relaxed(0x1, MON_CLEAR(m));
+	/*
+	 * The counter clear and IRQ clear bits are not in the same 4KB
+	 * region. So, we need to make sure the counter clear is completed
+	 * before we try to clear the IRQ or do any other counter operations.
+	 */
+	mb();
+}
+
+static void mon_irq_enable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val |= 1 << m->mport;
+	writel_relaxed(val, GLB_INT_EN(m));
+	spin_unlock(&glb_lock);
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val |= 0x1;
+	writel_relaxed(val, MON_INT_EN(m));
+}
+
+static void mon_irq_disable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val &= ~(1 << m->mport);
+	writel_relaxed(val, GLB_INT_EN(m));
+	spin_unlock(&glb_lock);
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val &= ~0x1;
+	writel_relaxed(val, MON_INT_EN(m));
+}
+
+static unsigned int mon_irq_status(struct bwmon *m)
+{
+	u32 mval;
+
+	mval = readl_relaxed(MON_INT_STATUS(m));
+
+	dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+			readl_relaxed(GLB_INT_STATUS(m)));
+
+	return mval;
+}
+
+static void mon_irq_clear(struct bwmon *m)
+{
+	writel_relaxed(0x3, MON_INT_CLR(m));
+	mb();
+	writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+	mb();
+}
+
+static void mon_set_limit(struct bwmon *m, u32 count)
+{
+	writel_relaxed(count, MON_THRES(m));
+	dev_dbg(m->dev, "Thres: %08x\n", count);
+}
+
+static u32 mon_get_limit(struct bwmon *m)
+{
+	return readl_relaxed(MON_THRES(m));
+}
+
+#define THRES_HIT(status)	(status & BIT(0))
+#define OVERFLOW(status)	(status & BIT(1))
+static unsigned long mon_get_count(struct bwmon *m)
+{
+	unsigned long count, status;
+
+	count = readl_relaxed(MON_CNT(m));
+	status = mon_irq_status(m);
+
+	dev_dbg(m->dev, "Counter: %08lx\n", count);
+
+	if (OVERFLOW(status) && m->spec->overflow)
+		count += 0xFFFFFFFF;
+	if (THRES_HIT(status) && m->spec->wrap_on_thres)
+		count += mon_get_limit(m);
+
+	dev_dbg(m->dev, "Actual Count: %08lx\n", count);
+
+	return count;
+}
+
+/* ********** CPUBW specific code  ********** */
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
+{
+	bytes *= USEC_PER_SEC;
+	do_div(bytes, us);
+	bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
+	return bytes;
+}
+
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
+				  unsigned int tolerance_percent)
+{
+	mbps *= (100 + tolerance_percent) * ms;
+	mbps /= 100;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	mbps *= SZ_1M;
+	return mbps;
+}
+
+static unsigned long meas_bw_and_set_irq(struct bw_hwmon *hw,
+					 unsigned int tol, unsigned int us)
+{
+	unsigned long mbps;
+	u32 limit;
+	unsigned int sample_ms = hw->df->profile->polling_ms;
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_disable(m);
+
+	mbps = mon_get_count(m);
+	mbps = bytes_to_mbps(mbps, us);
+
+	/*
+	 * If the counter wraps on thres, don't set the thres too low.
+	 * Setting it too low runs the risk of the counter wrapping around
+	 * multiple times before the IRQ is processed.
+	 */
+	if (likely(!m->spec->wrap_on_thres))
+		limit = mbps_to_bytes(mbps, sample_ms, tol);
+	else
+		limit = mbps_to_bytes(max(mbps, 400UL), sample_ms, tol);
+
+	mon_set_limit(m, limit);
+
+	mon_clear(m);
+	mon_irq_clear(m);
+	mon_enable(m);
+
+	dev_dbg(m->dev, "MBps = %lu\n", mbps);
+	return mbps;
+}
+
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+	struct bwmon *m = dev;
+	if (mon_irq_status(m)) {
+		update_bw_hwmon(&m->hw);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+	struct bwmon *m = to_bwmon(hw);
+	u32 limit;
+	int ret;
+
+	ret = request_threaded_irq(m->irq, NULL, bwmon_intr_handler,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	mon_disable(m);
+
+	limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+	mon_set_limit(m, limit);
+
+	mon_clear(m);
+	mon_irq_clear(m);
+	mon_irq_enable(m);
+	mon_enable(m);
+
+	return 0;
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_irq_disable(m);
+	mon_clear(m);
+	mon_irq_clear(m);
+}
+
+static int suspend_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_irq_disable(m);
+	mon_irq_clear(m);
+
+	return 0;
+}
+
+static int resume_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+	int ret;
+
+	mon_clear(m);
+	mon_irq_enable(m);
+	mon_enable(m);
+	ret = request_threaded_irq(m->irq, NULL, bwmon_intr_handler,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*************************************************************************/
+
+static const struct bwmon_spec spec[] = {
+	{ .wrap_on_thres = true, .overflow = false },
+	{ .wrap_on_thres = false, .overflow = true },
+};
+
+static const struct of_device_id bimc_bwmon_match_table[] = {
+	{ .compatible = "qcom,bimc-bwmon", .data = &spec[0] },
+	{ .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
+	{}
+};
+
+static int bimc_bwmon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct bwmon *m;
+	const struct of_device_id *id;
+	int ret;
+	u32 data;
+
+	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+	if (!m)
+		return -ENOMEM;
+	m->dev = dev;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
+	if (ret) {
+		dev_err(dev, "mport not found!\n");
+		return ret;
+	}
+	m->mport = data;
+
+	id = of_match_device(bimc_bwmon_match_table, dev);
+	if (!id) {
+		dev_err(dev, "Unknown device type!\n");
+		return -ENODEV;
+	}
+	m->spec = id->data;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+	if (!res) {
+		dev_err(dev, "base not found!\n");
+		return -EINVAL;
+	}
+	m->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->base) {
+		dev_err(dev, "Unable map base!\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global_base");
+	if (!res) {
+		dev_err(dev, "global_base not found!\n");
+		return -EINVAL;
+	}
+	m->global_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->global_base) {
+		dev_err(dev, "Unable map global_base!\n");
+		return -ENOMEM;
+	}
+
+	m->irq = platform_get_irq(pdev, 0);
+	if (m->irq < 0) {
+		dev_err(dev, "Unable to get IRQ number\n");
+		return m->irq;
+	}
+
+	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!m->hw.of_node)
+		return -EINVAL;
+	m->hw.start_hwmon = &start_bw_hwmon;
+	m->hw.stop_hwmon = &stop_bw_hwmon;
+	m->hw.suspend_hwmon = &suspend_bw_hwmon;
+	m->hw.resume_hwmon = &resume_bw_hwmon;
+	m->hw.meas_bw_and_set_irq = &meas_bw_and_set_irq;
+
+	ret = register_bw_hwmon(dev, &m->hw);
+	if (ret) {
+		dev_err(dev, "Dev BW hwmon registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct platform_driver bimc_bwmon_driver = {
+	.probe = bimc_bwmon_driver_probe,
+	.driver = {
+		.name = "bimc-bwmon",
+		.of_match_table = bimc_bwmon_match_table,
+	},
+};
+
+module_platform_driver(bimc_bwmon_driver);
+MODULE_DESCRIPTION("BIMC bandwidth monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 7309c08..fd9ada6f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -1133,19 +1133,26 @@
 	struct devfreq *df = to_devfreq(d);
 	struct device *dev = df->dev.parent;
 	struct dev_pm_opp *opp;
+	unsigned int i = 0, max_state = df->profile->max_state;
+	bool use_opp;
 	ssize_t count = 0;
 	unsigned long freq = 0;
 
 	rcu_read_lock();
-	do {
-		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-		if (IS_ERR(opp))
-			break;
+	use_opp = dev_pm_opp_get_opp_count(dev) > 0;
+	while (use_opp || (!use_opp && i < max_state)) {
+		if (use_opp) {
+			opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+			if (IS_ERR(opp))
+				break;
+		} else {
+			freq = df->profile->freq_table[i++];
+		}
 
 		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 				   "%lu ", freq);
 		freq++;
-	} while (1);
+	}
 	rcu_read_unlock();
 
 	/* Truncate the trailing space */
diff --git a/drivers/devfreq/devfreq_devbw.c b/drivers/devfreq/devfreq_devbw.c
new file mode 100644
index 0000000..5c7959c
--- /dev/null
+++ b/drivers/devfreq/devfreq_devbw.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devbw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+/* Has to be ULL to prevent overflow where this macro is used. */
+#define MBYTE (1ULL << 20)
+#define MAX_PATHS	2
+#define DBL_BUF		2
+
+struct dev_data {
+	struct msm_bus_vectors vectors[MAX_PATHS * DBL_BUF];
+	struct msm_bus_paths bw_levels[DBL_BUF];
+	struct msm_bus_scale_pdata bw_data;
+	int num_paths;
+	u32 bus_client;
+	int cur_idx;
+	int cur_ab;
+	int cur_ib;
+	long gov_ab;
+	struct devfreq *df;
+	struct devfreq_dev_profile dp;
+};
+
+static int set_bw(struct device *dev, int new_ib, int new_ab)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	int i, ret;
+
+	if (d->cur_ib == new_ib && d->cur_ab == new_ab)
+		return 0;
+
+	i = (d->cur_idx + 1) % DBL_BUF;
+
+	d->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[0].ab = new_ab / d->num_paths * MBYTE;
+	d->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[1].ab = new_ab / d->num_paths * MBYTE;
+
+	dev_dbg(dev, "BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
+
+	ret = msm_bus_scale_client_update_request(d->bus_client, i);
+	if (ret) {
+		dev_err(dev, "bandwidth request failed (%d)\n", ret);
+	} else {
+		d->cur_idx = i;
+		d->cur_ib = new_ib;
+		d->cur_ab = new_ab;
+	}
+
+	return ret;
+}
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int devbw_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	find_freq(&d->dp, freq, flags);
+	return set_bw(dev, *freq, d->gov_ab);
+}
+
+static int devbw_get_dev_status(struct device *dev,
+				struct devfreq_dev_status *stat)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	stat->private_data = &d->gov_ab;
+	return 0;
+}
+
+#define PROP_PORTS "qcom,src-dst-ports"
+#define PROP_TBL "qcom,bw-tbl"
+#define PROP_ACTIVE "qcom,active-only"
+
+int devfreq_add_devbw(struct device *dev)
+{
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, ports[MAX_PATHS * 2];
+	const char *gov_name;
+	int ret, len, i, num_paths;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	dev_set_drvdata(dev, d);
+
+	if (of_find_property(dev->of_node, PROP_PORTS, &len)) {
+		len /= sizeof(ports[0]);
+		if (len % 2 || len > ARRAY_SIZE(ports)) {
+			dev_err(dev, "Unexpected number of ports\n");
+			return -EINVAL;
+		}
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_PORTS,
+						 ports, len);
+		if (ret)
+			return ret;
+
+		num_paths = len / 2;
+	} else {
+		return -EINVAL;
+	}
+
+	d->bw_levels[0].vectors = &d->vectors[0];
+	d->bw_levels[1].vectors = &d->vectors[MAX_PATHS];
+	d->bw_data.usecase = d->bw_levels;
+	d->bw_data.num_usecases = ARRAY_SIZE(d->bw_levels);
+	d->bw_data.name = dev_name(dev);
+	d->bw_data.active_only = of_property_read_bool(dev->of_node,
+							PROP_ACTIVE);
+
+	for (i = 0; i < num_paths; i++) {
+		d->bw_levels[0].vectors[i].src = ports[2 * i];
+		d->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+		d->bw_levels[1].vectors[i].src = ports[2 * i];
+		d->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+	}
+	d->bw_levels[0].num_paths = num_paths;
+	d->bw_levels[1].num_paths = num_paths;
+	d->num_paths = num_paths;
+
+	p = &d->dp;
+	p->polling_ms = 50;
+	p->target = devbw_target;
+	p->get_dev_status = devbw_get_dev_status;
+
+	if (of_find_property(dev->of_node, PROP_TBL, &len)) {
+		len /= sizeof(*data);
+		data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		p->freq_table = devm_kzalloc(dev,
+					     len * sizeof(*p->freq_table),
+					     GFP_KERNEL);
+		if (!p->freq_table)
+			return -ENOMEM;
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_TBL,
+						 data, len);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < len; i++)
+			p->freq_table[i] = data[i];
+		p->max_state = len;
+	}
+
+	d->bus_client = msm_bus_scale_register_client(&d->bw_data);
+	if (!d->bus_client) {
+		dev_err(dev, "Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df)) {
+		msm_bus_scale_unregister_client(d->bus_client);
+		return PTR_ERR(d->df);
+	}
+
+	return 0;
+}
+
+int devfreq_remove_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	msm_bus_scale_unregister_client(d->bus_client);
+	devfreq_remove_device(d->df);
+	return 0;
+}
+
+int devfreq_suspend_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	return devfreq_suspend_device(d->df);
+}
+
+int devfreq_resume_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	return devfreq_resume_device(d->df);
+}
+
+static int devfreq_devbw_probe(struct platform_device *pdev)
+{
+	return devfreq_add_devbw(&pdev->dev);
+}
+
+static int devfreq_devbw_remove(struct platform_device *pdev)
+{
+	return devfreq_remove_devbw(&pdev->dev);
+}
+
+static const struct of_device_id devbw_match_table[] = {
+	{ .compatible = "qcom,devbw" },
+	{}
+};
+
+static struct platform_driver devbw_driver = {
+	.probe = devfreq_devbw_probe,
+	.remove = devfreq_devbw_remove,
+	.driver = {
+		.name = "devbw",
+		.of_match_table = devbw_match_table,
+	},
+};
+
+module_platform_driver(devbw_driver);
+MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_simple_dev.c b/drivers/devfreq/devfreq_simple_dev.c
new file mode 100644
index 0000000..a21f3f3
--- /dev/null
+++ b/drivers/devfreq/devfreq_simple_dev.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devfreq-simple-dev: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <trace/events/power.h>
+
+struct dev_data {
+	struct clk *clk;
+	struct devfreq *df;
+	struct devfreq_dev_profile profile;
+};
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int dev_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	unsigned long rfreq;
+
+	find_freq(&d->profile, freq, flags);
+
+	rfreq = clk_round_rate(d->clk, *freq * 1000);
+	if (IS_ERR_VALUE(rfreq)) {
+		dev_err(dev, "devfreq: Cannot find matching frequency for %lu\n",
+			*freq);
+		return rfreq;
+	}
+
+	return clk_set_rate(d->clk, rfreq);
+}
+
+static int dev_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	unsigned long f;
+
+	f = clk_get_rate(d->clk);
+	if (IS_ERR_VALUE(f))
+		return f;
+	*freq = f / 1000;
+	return 0;
+}
+
+#define PROP_TBL "freq-tbl-khz"
+static int devfreq_clock_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, poll;
+	const char *gov_name;
+	int ret, len, i, j;
+	unsigned long f;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	d->clk = devm_clk_get(dev, "devfreq_clk");
+	if (IS_ERR(d->clk))
+		return PTR_ERR(d->clk);
+
+	if (!of_find_property(dev->of_node, PROP_TBL, &len))
+		return -EINVAL;
+
+	len /= sizeof(*data);
+	data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	p = &d->profile;
+	p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table),
+				     GFP_KERNEL);
+	if (!p->freq_table)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len);
+	if (ret)
+		return ret;
+
+	j = 0;
+	for (i = 0; i < len; i++) {
+		f = clk_round_rate(d->clk, data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			dev_warn(dev, "Unable to find dev rate for %d KHz",
+				 data[i]);
+		else
+			p->freq_table[j++] = f / 1000;
+	}
+	p->max_state = j;
+	devm_kfree(dev, data);
+
+	if (p->max_state == 0) {
+		dev_err(dev, "Error parsing property %s!\n", PROP_TBL);
+		return -EINVAL;
+	}
+
+	p->target = dev_target;
+	p->get_cur_freq = dev_get_cur_freq;
+	ret = dev_get_cur_freq(dev, &p->initial_freq);
+	if (ret)
+		return ret;
+
+	p->polling_ms = 50;
+	if (!of_property_read_u32(dev->of_node, "polling-ms", &poll))
+		p->polling_ms = poll;
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df))
+		return PTR_ERR(d->df);
+
+	return 0;
+}
+
+static int devfreq_clock_remove(struct platform_device *pdev)
+{
+	struct dev_data *d = platform_get_drvdata(pdev);
+
+	devfreq_remove_device(d->df);
+
+	return 0;
+}
+
+static const struct of_device_id devfreq_simple_match_table[] = {
+	{ .compatible = "devfreq-simple-dev" },
+	{}
+};
+
+static struct platform_driver devfreq_clock_driver = {
+	.probe = devfreq_clock_probe,
+	.remove = devfreq_clock_remove,
+	.driver = {
+		.name = "devfreq-simple-dev",
+		.of_match_table = devfreq_simple_match_table,
+	},
+};
+module_platform_driver(devfreq_clock_driver);
+MODULE_DESCRIPTION("Devfreq driver for setting generic device clock frequency");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
new file mode 100644
index 0000000..400943a
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bw-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_bw_hwmon.h"
+
+struct hwmon_node {
+	unsigned int tolerance_percent;
+	unsigned int guard_band_mbps;
+	unsigned int decay_rate;
+	unsigned int io_percent;
+	unsigned int bw_step;
+	unsigned long prev_ab;
+	unsigned long *dev_ab;
+	unsigned long resume_freq;
+	unsigned long resume_ab;
+	ktime_t prev_ts;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct bw_hwmon *hw;
+	struct devfreq_governor *gov;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = sscanf(buf, "%u", &val);					\
+	if (ret != 1)							\
+		return -EINVAL;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+static unsigned long measure_bw_and_set_irq(struct hwmon_node *node)
+{
+	ktime_t ts;
+	unsigned int us;
+	unsigned long mbps;
+	struct bw_hwmon *hw = node->hw;
+
+	/*
+	 * Since we are stopping the counters, we don't want this short work
+	 * to be interrupted by other tasks and cause the measurements to be
+	 * wrong. Not blocking interrupts to avoid affecting interrupt
+	 * latency and since they should be short anyway because they run in
+	 * atomic context.
+	 */
+	preempt_disable();
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (!us)
+		us = 1;
+
+	mbps = hw->meas_bw_and_set_irq(hw, node->tolerance_percent, us);
+	node->prev_ts = ts;
+
+	preempt_enable();
+
+	dev_dbg(hw->df->dev.parent, "BW MBps = %6lu, period = %u\n", mbps, us);
+	trace_bw_hwmon_meas(dev_name(hw->df->dev.parent),
+				mbps,
+				us,
+				0);
+
+	return mbps;
+}
+
+static void compute_bw(struct hwmon_node *node, int mbps,
+			unsigned long *freq, unsigned long *ab)
+{
+	int new_bw;
+
+	mbps += node->guard_band_mbps;
+
+	if (mbps > node->prev_ab) {
+		new_bw = mbps;
+	} else {
+		new_bw = mbps * node->decay_rate
+			+ node->prev_ab * (100 - node->decay_rate);
+		new_bw /= 100;
+	}
+
+	node->prev_ab = new_bw;
+	if (ab)
+		*ab = roundup(new_bw, node->bw_step);
+	*freq = (new_bw * 100) / node->io_percent;
+	trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
+				new_bw,
+				*freq,
+				0,
+				0);
+}
+
+static struct hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node ||
+		    (!node->hw->dev && !node->hw->of_node &&
+		     node->gov == df->governor)) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+#define TOO_SOON_US	(1 * USEC_PER_MSEC)
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	ktime_t ts;
+	unsigned int us;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = find_hwmon_node(df);
+	if (!node)
+		return -ENODEV;
+
+	if (!node->mon_started)
+		return -EBUSY;
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	/*
+	 * Don't recalc bandwidth if the interrupt comes right after a
+	 * previous bandwidth calculation.  This is done for two reasons:
+	 *
+	 * 1. Sampling the BW during a very short duration can result in a
+	 *    very inaccurate measurement due to very short bursts.
+	 * 2. This can only happen if the limit was hit very close to the end
+	 *    of the previous sample period. Which means the current BW
+	 *    estimate is not very off and doesn't need to be readjusted.
+	 */
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (us > TOO_SOON_US) {
+		mutex_lock(&df->lock);
+		ret = update_devfreq(df);
+		if (ret)
+			dev_err(df->dev.parent,
+				"Unable to update freq on request!\n");
+		mutex_unlock(&df->lock);
+	}
+
+	devfreq_monitor_start(df);
+
+	return 0;
+}
+
+static int start_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+	struct device *dev = df->dev.parent;
+	unsigned long mbps;
+	int ret;
+
+	node->prev_ts = ktime_get();
+
+	if (init) {
+		node->prev_ab = 0;
+		node->resume_freq = 0;
+		node->resume_ab = 0;
+		mbps = (df->previous_freq * node->io_percent) / 100;
+		ret = hw->start_hwmon(hw, mbps);
+	} else {
+		ret = hw->resume_hwmon(hw);
+	}
+
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+		return ret;
+	}
+
+	if (init)
+		devfreq_monitor_start(df);
+	else
+		devfreq_monitor_resume(df);
+
+	node->mon_started = true;
+
+	return 0;
+}
+
+static void stop_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	node->mon_started = false;
+
+	if (init) {
+		devfreq_monitor_stop(df);
+		hw->stop_hwmon(hw);
+	} else {
+		devfreq_monitor_suspend(df);
+		hw->suspend_hwmon(hw);
+	}
+
+}
+
+static int gov_start(struct devfreq *df)
+{
+	int ret = 0;
+	struct device *dev = df->dev.parent;
+	struct hwmon_node *node;
+	struct bw_hwmon *hw;
+	struct devfreq_dev_status stat;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+
+	stat.private_data = NULL;
+	if (df->profile->get_dev_status)
+		ret = df->profile->get_dev_status(df->dev.parent, &stat);
+	if (ret || !stat.private_data)
+		dev_warn(dev, "Device doesn't take AB votes!\n");
+	else
+		node->dev_ab = stat.private_data;
+
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	if (start_monitor(df, true))
+		goto err_start;
+
+	ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+err_sysfs:
+	stop_monitor(df, true);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	node->dev_ab = NULL;
+	return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+	stop_monitor(df, true);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	/*
+	 * Not all governors know about this additional extended device
+	 * configuration. To avoid leaving the extended configuration at a
+	 * stale state, set it to 0 and let the next governor take it from
+	 * there.
+	 */
+	if (node->dev_ab)
+		*node->dev_ab = 0;
+	node->dev_ab = NULL;
+}
+
+static int gov_suspend(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	unsigned long resume_freq = df->previous_freq;
+	unsigned long resume_ab = *node->dev_ab;
+
+	if (!node->hw->suspend_hwmon)
+		return -ENOSYS;
+
+	if (node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already suspended!\n");
+		return -EBUSY;
+	}
+
+	stop_monitor(df, false);
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = resume_freq;
+	node->resume_ab = resume_ab;
+
+	return 0;
+}
+
+static int gov_resume(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+
+	if (!node->hw->resume_hwmon)
+		return -ENOSYS;
+
+	if (!node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already resumed!\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = 0;
+	node->resume_ab = 0;
+
+	return start_monitor(df, false);
+}
+
+static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	unsigned long mbps;
+	struct hwmon_node *node = df->data;
+
+	/* Suspend/resume sequence */
+	if (!node->mon_started) {
+		*freq = node->resume_freq;
+		*node->dev_ab = node->resume_ab;
+		return 0;
+	}
+
+	mbps = measure_bw_and_set_irq(node);
+	compute_bw(node, mbps, freq, node->dev_ab);
+
+	return 0;
+}
+
+gov_attr(tolerance_percent, 0U, 30U);
+gov_attr(guard_band_mbps, 0U, 2000U);
+gov_attr(decay_rate, 0U, 100U);
+gov_attr(io_percent, 1U, 100U);
+gov_attr(bw_step, 50U, 1000U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_tolerance_percent.attr,
+	&dev_attr_guard_band_mbps.attr,
+	&dev_attr_decay_rate.attr,
+	&dev_attr_io_percent.attr,
+	&dev_attr_bw_step.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "bw_hwmon",
+	.attrs = dev_attr,
+};
+
+static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = gov_start(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent,
+			"Enabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		gov_stop(df);
+		dev_dbg(df->dev.parent,
+			"Disabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		ret = gov_suspend(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to suspend BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		ret = gov_resume(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to resume BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_gov_bw_hwmon = {
+	.name = "bw_hwmon",
+	.get_target_freq = devfreq_bw_hwmon_get_freq,
+	.event_handler = devfreq_bw_hwmon_ev_handler,
+};
+
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
+{
+	int ret = 0;
+	struct hwmon_node *node;
+	struct attribute_group *attr_grp;
+
+	if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	if (hwmon->gov) {
+		attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
+		if (!attr_grp)
+			return -ENOMEM;
+
+		hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
+		hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
+		attr_grp->name = hwmon->gov->name;
+		attr_grp->attrs = dev_attr;
+
+		node->gov = hwmon->gov;
+		node->attr_grp = attr_grp;
+	} else {
+		node->gov = &devfreq_gov_bw_hwmon;
+		node->attr_grp = &dev_attr_group;
+	}
+
+	node->tolerance_percent = 10;
+	node->guard_band_mbps = 100;
+	node->decay_rate = 90;
+	node->io_percent = 16;
+	node->bw_step = 190;
+	node->hw = hwmon;
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &hwmon_list);
+	mutex_unlock(&list_lock);
+
+	if (hwmon->gov) {
+		ret = devfreq_add_governor(hwmon->gov);
+	} else {
+		mutex_lock(&state_lock);
+		if (!use_cnt)
+			ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
+		if (!ret)
+			use_cnt++;
+		mutex_unlock(&state_lock);
+	}
+
+	if (!ret)
+		dev_info(dev, "BW HWmon governor registered.\n");
+	else
+		dev_err(dev, "BW HWmon governor registration failed!\n");
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.h b/drivers/devfreq/governor_bw_hwmon.h
new file mode 100644
index 0000000..8c368e5
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct bw_hwmon - dev BW HW monitor info
+ * @start_hwmon:		Start the HW monitoring of the dev BW
+ * @stop_hwmon:			Stop the HW monitoring of dev BW
+ * @is_valid_irq:		Check whether the IRQ was triggered by the
+ *				counters used to monitor dev BW.
+ * @meas_bw_and_set_irq:	Return the measured bandwidth and set up the
+ *				IRQ to fire if the usage exceeds current
+ *				measurement by @tol percent.
+ * @irq:			IRQ number that corresponds to this HW
+ *				monitor.
+ * @dev:			Pointer to device that this HW monitor can
+ *				monitor.
+ * @of_node:			OF node of device that this HW monitor can
+ *				monitor.
+ * @gov:			devfreq_governor struct that should be used
+ *				when registering this HW monitor with devfreq.
+ *				Only the name field is expected to be
+ *				initialized.
+ * @df:				Devfreq node that this HW monitor is being
+ *				used for. NULL when not actively in use and
+ *				non-NULL when in use.
+ *
+ * One of dev, of_node or governor_name needs to be specified for a
+ * successful registration.
+ *
+ */
+struct bw_hwmon {
+	int (*start_hwmon)(struct bw_hwmon *hw, unsigned long mbps);
+	void (*stop_hwmon)(struct bw_hwmon *hw);
+	int (*suspend_hwmon)(struct bw_hwmon *hw);
+	int (*resume_hwmon)(struct bw_hwmon *hw);
+	unsigned long (*meas_bw_and_set_irq)(struct bw_hwmon *hw,
+					unsigned int tol, unsigned int us);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq_governor *gov;
+
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon);
+int update_bw_hwmon(struct bw_hwmon *hwmon);
+#else
+static inline int register_bw_hwmon(struct device *dev,
+					struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
diff --git a/drivers/devfreq/governor_cache_hwmon.c b/drivers/devfreq/governor_cache_hwmon.c
new file mode 100644
index 0000000..89c012a
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cache-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+#include "governor_cache_hwmon.h"
+
+struct cache_hwmon_node {
+	unsigned int cycles_per_low_req;
+	unsigned int cycles_per_med_req;
+	unsigned int cycles_per_high_req;
+	unsigned int min_busy;
+	unsigned int max_busy;
+	unsigned int tolerance_mrps;
+	unsigned int guard_band_mhz;
+	unsigned int decay_rate;
+	unsigned long prev_mhz;
+	ktime_t prev_ts;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct cache_hwmon *hw;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(cache_hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct cache_hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	int ret;							\
+	unsigned int val;						\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct cache_hwmon_node *hw = df->data;				\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+static struct cache_hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct cache_hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &cache_hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+static unsigned long measure_mrps_and_set_irq(struct cache_hwmon_node *node,
+			struct mrps_stats *stat)
+{
+	ktime_t ts;
+	unsigned int us;
+	struct cache_hwmon *hw = node->hw;
+
+	/*
+	 * Since we are stopping the counters, we don't want this short work
+	 * to be interrupted by other tasks and cause the measurements to be
+	 * wrong. Not blocking interrupts to avoid affecting interrupt
+	 * latency and since they should be short anyway because they run in
+	 * atomic context.
+	 */
+	preempt_disable();
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (!us)
+		us = 1;
+
+	hw->meas_mrps_and_set_irq(hw, node->tolerance_mrps, us, stat);
+	node->prev_ts = ts;
+
+	preempt_enable();
+
+	dev_dbg(hw->df->dev.parent,
+		"stat H=%3lu, M=%3lu, T=%3lu, b=%3u, f=%4lu, us=%d\n",
+		 stat->high, stat->med, stat->high + stat->med,
+		 stat->busy_percent, hw->df->previous_freq / 1000, us);
+
+	return 0;
+}
+
+static void compute_cache_freq(struct cache_hwmon_node *node,
+		struct mrps_stats *mrps, unsigned long *freq)
+{
+	unsigned long new_mhz;
+	unsigned int busy;
+
+	new_mhz = mrps->high * node->cycles_per_high_req
+		+ mrps->med * node->cycles_per_med_req
+		+ mrps->low * node->cycles_per_low_req;
+
+	busy = max(node->min_busy, mrps->busy_percent);
+	busy = min(node->max_busy, busy);
+
+	new_mhz *= 100;
+	new_mhz /= busy;
+
+	if (new_mhz < node->prev_mhz) {
+		new_mhz = new_mhz * node->decay_rate + node->prev_mhz
+				* (100 - node->decay_rate);
+		new_mhz /= 100;
+	}
+	node->prev_mhz = new_mhz;
+
+	new_mhz += node->guard_band_mhz;
+	*freq = new_mhz * 1000;
+}
+
+#define TOO_SOON_US	(1 * USEC_PER_MSEC)
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+	struct cache_hwmon_node *node;
+	struct devfreq *df;
+	ktime_t ts;
+	unsigned int us;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = df->data;
+	if (!node)
+		return -ENODEV;
+	if (!node->mon_started)
+		return -EBUSY;
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	/*
+	 * Don't recalc cache freq if the interrupt comes right after a
+	 * previous cache freq calculation.  This is done for two reasons:
+	 *
+	 * 1. Sampling the cache request during a very short duration can
+	 *    result in a very inaccurate measurement due to very short
+	 *    bursts.
+	 * 2. This can only happen if the limit was hit very close to the end
+	 *    of the previous sample period. Which means the current cache
+	 *    request estimate is not very off and doesn't need to be
+	 *    readjusted.
+	 */
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (us > TOO_SOON_US) {
+		mutex_lock(&df->lock);
+		ret = update_devfreq(df);
+		if (ret)
+			dev_err(df->dev.parent,
+				"Unable to update freq on request!\n");
+		mutex_unlock(&df->lock);
+	}
+
+	devfreq_monitor_start(df);
+
+	return 0;
+}
+
+static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	struct mrps_stats stat;
+	struct cache_hwmon_node *node = df->data;
+
+	memset(&stat, 0, sizeof(stat));
+	measure_mrps_and_set_irq(node, &stat);
+	compute_cache_freq(node, &stat, freq);
+
+	return 0;
+}
+
+gov_attr(cycles_per_low_req, 1U, 100U);
+gov_attr(cycles_per_med_req, 1U, 100U);
+gov_attr(cycles_per_high_req, 1U, 100U);
+gov_attr(min_busy, 1U, 100U);
+gov_attr(max_busy, 1U, 100U);
+gov_attr(tolerance_mrps, 0U, 100U);
+gov_attr(guard_band_mhz, 0U, 500U);
+gov_attr(decay_rate, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_cycles_per_low_req.attr,
+	&dev_attr_cycles_per_med_req.attr,
+	&dev_attr_cycles_per_high_req.attr,
+	&dev_attr_min_busy.attr,
+	&dev_attr_max_busy.attr,
+	&dev_attr_tolerance_mrps.attr,
+	&dev_attr_guard_band_mhz.attr,
+	&dev_attr_decay_rate.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cache_hwmon",
+	.attrs = dev_attr,
+};
+
+static int start_monitoring(struct devfreq *df)
+{
+	int ret;
+	struct mrps_stats mrps;
+	struct device *dev = df->dev.parent;
+	struct cache_hwmon_node *node;
+	struct cache_hwmon *hw;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	node->prev_ts = ktime_get();
+	node->prev_mhz = 0;
+	mrps.high = (df->previous_freq / 1000) - node->guard_band_mhz;
+	mrps.high /= node->cycles_per_high_req;
+	mrps.med = mrps.low = 0;
+
+	ret = hw->start_hwmon(hw, &mrps);
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor!\n");
+		goto err_start;
+	}
+
+	devfreq_monitor_start(df);
+	node->mon_started = true;
+
+	ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
+	if (ret) {
+		dev_err(dev, "Error creating sys entries!\n");
+		goto sysfs_fail;
+	}
+
+	return 0;
+
+sysfs_fail:
+	node->mon_started = false;
+	devfreq_monitor_stop(df);
+	hw->stop_hwmon(hw);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	return ret;
+}
+
+static void stop_monitoring(struct devfreq *df)
+{
+	struct cache_hwmon_node *node = df->data;
+	struct cache_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
+	node->mon_started = false;
+	devfreq_monitor_stop(df);
+	hw->stop_hwmon(hw);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+}
+
+static int devfreq_cache_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = start_monitoring(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent, "Enabled Cache HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		stop_monitoring(df);
+		dev_dbg(df->dev.parent, "Disabled Cache HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cache_hwmon = {
+	.name = "cache_hwmon",
+	.get_target_freq = devfreq_cache_hwmon_get_freq,
+	.event_handler = devfreq_cache_hwmon_ev_handler,
+};
+
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon)
+{
+	int ret = 0;
+	struct cache_hwmon_node *node;
+
+	if (!hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->cycles_per_med_req = 20;
+	node->cycles_per_high_req = 35;
+	node->min_busy = 100;
+	node->max_busy = 100;
+	node->tolerance_mrps = 5;
+	node->guard_band_mhz = 100;
+	node->decay_rate = 90;
+	node->hw = hwmon;
+	node->attr_grp = &dev_attr_group;
+
+	mutex_lock(&state_lock);
+	if (!use_cnt) {
+		ret = devfreq_add_governor(&devfreq_cache_hwmon);
+		if (!ret)
+			use_cnt++;
+	}
+	mutex_unlock(&state_lock);
+
+	if (!ret) {
+		dev_info(dev, "Cache HWmon governor registered.\n");
+	} else {
+		dev_err(dev, "Failed to add Cache HWmon governor\n");
+		return ret;
+	}
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &cache_hwmon_list);
+	mutex_unlock(&list_lock);
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based cache freq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_cache_hwmon.h b/drivers/devfreq/governor_cache_hwmon.h
new file mode 100644
index 0000000..c6baf6e
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_CACHE_HWMON_H
+#define _GOVERNOR_CACHE_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+struct mrps_stats {
+	unsigned long high;
+	unsigned long med;
+	unsigned long low;
+	unsigned int busy_percent;
+};
+
+/**
+ * struct cache_hwmon - devfreq Cache HW monitor info
+ * @start_hwmon:	Start the HW monitoring
+ * @stop_hwmon:		Stop the HW monitoring
+ * @meas_mrps_and_set_irq:	Return the measured count and set up the
+ *				IRQ to fire if usage exceeds current
+ *				measurement by @tol percent.
+ * @dev:		device that this HW monitor can monitor.
+ * @of_node:		OF node of device that this HW monitor can monitor.
+ * @df:			Devfreq node that this HW montior is being used
+ *			for. NULL when not actively in use, and non-NULL
+ *			when in use.
+ */
+struct cache_hwmon {
+	int (*start_hwmon)(struct cache_hwmon *hw, struct mrps_stats *mrps);
+	void (*stop_hwmon)(struct cache_hwmon *hw);
+	unsigned long (*meas_mrps_and_set_irq)(struct cache_hwmon *hw,
+					unsigned int tol, unsigned int us,
+					struct mrps_stats *mrps);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon);
+int update_cache_hwmon(struct cache_hwmon *hwmon);
+#else
+static inline int register_cache_hwmon(struct device *dev,
+				       struct cache_hwmon *hwmon)
+{
+	return 0;
+}
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_CACHE_HWMON_H */
diff --git a/drivers/devfreq/governor_cpufreq.c b/drivers/devfreq/governor_cpufreq.c
new file mode 100644
index 0000000..bae1d39
--- /dev/null
+++ b/drivers/devfreq/governor_cpufreq.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dev-cpufreq: " fmt
+
+#include <linux/devfreq.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct cpu_state {
+	unsigned int freq;
+	unsigned int min_freq;
+	unsigned int max_freq;
+	bool on;
+	unsigned int first_cpu;
+};
+static struct cpu_state *state[NR_CPUS];
+static int cpufreq_cnt;
+
+struct freq_map {
+	unsigned int cpu_khz;
+	unsigned int target_freq;
+};
+
+struct devfreq_node {
+	struct devfreq *df;
+	void *orig_data;
+	struct device *dev;
+	struct device_node *of_node;
+	struct list_head list;
+	struct freq_map **map;
+	struct freq_map *common_map;
+	unsigned int timeout;
+	struct delayed_work dwork;
+	bool drop;
+	unsigned long prev_tgt;
+};
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", n->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	n->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static int update_node(struct devfreq_node *node)
+{
+	int ret;
+	struct devfreq *df = node->df;
+
+	if (!df)
+		return 0;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&df->lock);
+	node->drop = false;
+	ret = update_devfreq(df);
+	if (ret) {
+		dev_err(df->dev.parent, "Unable to update frequency\n");
+		goto out;
+	}
+
+	if (!node->timeout)
+		goto out;
+
+	if (df->previous_freq <= df->min_freq)
+		goto out;
+
+	schedule_delayed_work(&node->dwork,
+			      msecs_to_jiffies(node->timeout));
+out:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+static void update_all_devfreqs(void)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list) {
+		update_node(node);
+	}
+}
+
+static void do_timeout(struct work_struct *work)
+{
+	struct devfreq_node *node = container_of(to_delayed_work(work),
+						struct devfreq_node, dwork);
+	struct devfreq *df = node->df;
+
+	mutex_lock(&df->lock);
+	node->drop = true;
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+}
+
+static struct devfreq_node *find_devfreq_node(struct device *dev)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list)
+		if (node->dev == dev || node->of_node == dev->of_node)
+			return node;
+
+	return NULL;
+}
+
+/* ==================== cpufreq part ==================== */
+static void add_policy(struct cpufreq_policy *policy)
+{
+	struct cpu_state *new_state;
+	unsigned int cpu, first_cpu;
+
+	if (state[policy->cpu]) {
+		state[policy->cpu]->freq = policy->cur;
+		state[policy->cpu]->on = true;
+	} else {
+		new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL);
+		if (!new_state)
+			return;
+
+		first_cpu = cpumask_first(policy->related_cpus);
+		new_state->first_cpu = first_cpu;
+		new_state->freq = policy->cur;
+		new_state->min_freq = policy->cpuinfo.min_freq;
+		new_state->max_freq = policy->cpuinfo.max_freq;
+		new_state->on = true;
+
+		for_each_cpu(cpu, policy->related_cpus)
+			state[cpu] = new_state;
+	}
+}
+
+static int cpufreq_policy_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+
+	switch (event) {
+	case CPUFREQ_CREATE_POLICY:
+		mutex_lock(&state_lock);
+		add_policy(policy);
+		update_all_devfreqs();
+		mutex_unlock(&state_lock);
+		break;
+
+	case CPUFREQ_REMOVE_POLICY:
+		mutex_lock(&state_lock);
+		if (state[policy->cpu]) {
+			state[policy->cpu]->on = false;
+			update_all_devfreqs();
+		}
+		mutex_unlock(&state_lock);
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_policy_nb = {
+	.notifier_call = cpufreq_policy_notifier
+};
+
+static int cpufreq_trans_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpu_state *s;
+
+	if (event != CPUFREQ_POSTCHANGE)
+		return 0;
+
+	mutex_lock(&state_lock);
+
+	s = state[freq->cpu];
+	if (!s)
+		goto out;
+
+	if (s->freq != freq->new) {
+		s->freq = freq->new;
+		update_all_devfreqs();
+	}
+
+out:
+	mutex_unlock(&state_lock);
+	return 0;
+}
+
+static struct notifier_block cpufreq_trans_nb = {
+	.notifier_call = cpufreq_trans_notifier
+};
+
+static int register_cpufreq(void)
+{
+	int ret = 0;
+	unsigned int cpu;
+	struct cpufreq_policy *policy;
+
+	mutex_lock(&state_lock);
+
+	if (cpufreq_cnt)
+		goto cnt_not_zero;
+
+	get_online_cpus();
+	ret = cpufreq_register_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	if (ret)
+		goto out;
+
+	ret = cpufreq_register_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+	if (ret) {
+		cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+		goto out;
+	}
+
+	for_each_online_cpu(cpu) {
+		policy = cpufreq_cpu_get(cpu);
+		if (policy) {
+			add_policy(policy);
+			cpufreq_cpu_put(policy);
+		}
+	}
+out:
+	put_online_cpus();
+cnt_not_zero:
+	if (!ret)
+		cpufreq_cnt++;
+	mutex_unlock(&state_lock);
+	return ret;
+}
+
+static int unregister_cpufreq(void)
+{
+	int ret = 0;
+	int cpu;
+
+	mutex_lock(&state_lock);
+
+	if (cpufreq_cnt > 1)
+		goto out;
+
+	cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_unregister_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+
+	for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) {
+		if (!state[cpu])
+			continue;
+		if (state[cpu]->first_cpu == cpu)
+			kfree(state[cpu]);
+		state[cpu] = NULL;
+	}
+
+out:
+	cpufreq_cnt--;
+	mutex_unlock(&state_lock);
+	return ret;
+}
+
+/* ==================== devfreq part ==================== */
+
+static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu)
+{
+	unsigned long *freq_table = df->profile->freq_table;
+	unsigned int cpu_min = state[cpu]->min_freq;
+	unsigned int cpu_max = state[cpu]->max_freq;
+	unsigned int cpu_freq = state[cpu]->freq;
+	unsigned int dev_min, dev_max, cpu_percent;
+
+	if (freq_table) {
+		dev_min = freq_table[0];
+		dev_max = freq_table[df->profile->max_state - 1];
+	} else {
+		if (df->max_freq <= df->min_freq)
+			return 0;
+		dev_min = df->min_freq;
+		dev_max = df->max_freq;
+	}
+
+	cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min);
+	return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
+}
+
+static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu)
+{
+	struct freq_map *map = NULL;
+	unsigned int cpu_khz = 0, freq;
+	struct devfreq_node *n = df->data;
+
+	if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) {
+		freq = 0;
+		goto out;
+	}
+
+	if (n->common_map)
+		map = n->common_map;
+	else if (n->map)
+		map = n->map[cpu];
+
+	cpu_khz = state[cpu]->freq;
+
+	if (!map) {
+		freq = interpolate_freq(df, cpu);
+		goto out;
+	}
+
+	while (map->cpu_khz && map->cpu_khz < cpu_khz)
+		map++;
+	if (!map->cpu_khz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq);
+	return freq;
+}
+
+static int devfreq_cpufreq_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	unsigned int cpu, tgt_freq = 0;
+	struct devfreq_node *node;
+
+	node = df->data;
+	if (!node) {
+		pr_err("Unable to find devfreq node!\n");
+		return -ENODEV;
+	}
+
+	if (node->drop) {
+		*freq = 0;
+		return 0;
+	}
+
+	for_each_possible_cpu(cpu)
+		tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu));
+
+	if (node->timeout && tgt_freq < node->prev_tgt)
+		*freq = 0;
+	else
+		*freq = tgt_freq;
+
+	node->prev_tgt = tgt_freq;
+
+	return 0;
+}
+
+static unsigned int show_table(char *buf, unsigned int len,
+				struct freq_map *map)
+{
+	unsigned int cnt = 0;
+
+	cnt += snprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n");
+
+	while (map->cpu_khz && cnt < len) {
+		cnt += snprintf(buf + cnt, len - cnt, "%8u\t%11u\n",
+				map->cpu_khz, map->target_freq);
+		map++;
+	}
+	if (cnt < len)
+		cnt += snprintf(buf + cnt, len - cnt, "\n");
+
+	return cnt;
+}
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct devfreq_node *n = df->data;
+	struct freq_map *map;
+	unsigned int cnt = 0, cpu;
+
+	mutex_lock(&state_lock);
+	if (n->common_map) {
+		map = n->common_map;
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Common table for all CPUs:\n");
+		cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+	} else if (n->map) {
+		for_each_possible_cpu(cpu) {
+			map = n->map[cpu];
+			if (!map)
+				continue;
+			cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+					"CPU %u:\n", cpu);
+			if (cnt >= PAGE_SIZE)
+				break;
+			cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+			if (cnt >= PAGE_SIZE)
+				break;
+		}
+	} else {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Device freq interpolated based on CPU freq\n");
+	}
+	mutex_unlock(&state_lock);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+gov_attr(timeout, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_freq_map.attr,
+	&dev_attr_timeout.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cpufreq",
+	.attrs = dev_attr,
+};
+
+static int devfreq_cpufreq_gov_start(struct devfreq *devfreq)
+{
+	int ret = 0;
+	struct devfreq_node *node;
+	bool alloc = false;
+
+	ret = register_cpufreq();
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+	if (ret) {
+		unregister_cpufreq();
+		return ret;
+	}
+
+	mutex_lock(&state_lock);
+
+	node = find_devfreq_node(devfreq->dev.parent);
+	if (node == NULL) {
+		node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+		if (!node) {
+			pr_err("Out of memory!\n");
+			ret = -ENOMEM;
+			goto alloc_fail;
+		}
+		alloc = true;
+		node->dev = devfreq->dev.parent;
+		list_add_tail(&node->list, &devfreq_list);
+	}
+
+	INIT_DELAYED_WORK(&node->dwork, do_timeout);
+
+	node->df = devfreq;
+	node->orig_data = devfreq->data;
+	devfreq->data = node;
+
+	ret = update_node(node);
+	if (ret)
+		goto update_fail;
+
+	mutex_unlock(&state_lock);
+	return 0;
+
+update_fail:
+	devfreq->data = node->orig_data;
+	if (alloc) {
+		list_del(&node->list);
+		kfree(node);
+	}
+alloc_fail:
+	mutex_unlock(&state_lock);
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+	return ret;
+}
+
+static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq)
+{
+	struct devfreq_node *node = devfreq->data;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&state_lock);
+	devfreq->data = node->orig_data;
+	if (node->map || node->common_map) {
+		node->df = NULL;
+	} else {
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+}
+
+static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	int ret;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+
+		ret = devfreq_cpufreq_gov_start(devfreq);
+		if (ret) {
+			pr_err("Governor start failed!\n");
+			return ret;
+		}
+		pr_debug("Enabled dev CPUfreq governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+
+		devfreq_cpufreq_gov_stop(devfreq);
+		pr_debug("Disabled dev CPUfreq governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cpufreq = {
+	.name = "cpufreq",
+	.get_target_freq = devfreq_cpufreq_get_freq,
+	.event_handler = devfreq_cpufreq_ev_handler,
+};
+
+#define NUM_COLS	2
+static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct freq_map *tbl;
+
+	if (!of_find_property(of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		of_property_read_u32_index(of_node, prop_name, j, &data);
+		tbl[i].cpu_khz = data;
+
+		of_property_read_u32_index(of_node, prop_name, j + 1, &data);
+		tbl[i].target_freq = data;
+	}
+	tbl[i].cpu_khz = 0;
+
+	return tbl;
+}
+
+#define PROP_TARGET "target-dev"
+#define PROP_TABLE "cpu-to-dev-map"
+static int add_table_from_of(struct device_node *of_node)
+{
+	struct device_node *target_of_node;
+	struct devfreq_node *node;
+	struct freq_map *common_tbl;
+	struct freq_map **tbl_list = NULL;
+	static char prop_name[] = PROP_TABLE "-999999";
+	int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name);
+
+	target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0);
+	if (!target_of_node)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	common_tbl = read_tbl(of_node, PROP_TABLE);
+	if (!common_tbl) {
+		tbl_list = kzalloc(sizeof(*tbl_list) * NR_CPUS, GFP_KERNEL);
+		if (!tbl_list) {
+			ret = -ENOMEM;
+			goto err_list;
+		}
+
+		for_each_possible_cpu(cpu) {
+			ret = snprintf(prop_name, prop_sz, "%s-%d",
+					PROP_TABLE, cpu);
+			if (ret >= prop_sz) {
+				pr_warn("More CPUs than I can handle!\n");
+				pr_warn("Skipping rest of the tables!\n");
+				break;
+			}
+			tbl_list[cpu] = read_tbl(of_node, prop_name);
+			if (tbl_list[cpu])
+				cnt++;
+		}
+	}
+	if (!common_tbl && !cnt) {
+		ret = -EINVAL;
+		goto err_tbl;
+	}
+
+	mutex_lock(&state_lock);
+	node->of_node = target_of_node;
+	node->map = tbl_list;
+	node->common_map = common_tbl;
+	list_add_tail(&node->list, &devfreq_list);
+	mutex_unlock(&state_lock);
+
+	return 0;
+err_tbl:
+	kfree(tbl_list);
+err_list:
+	kfree(node);
+	return ret;
+}
+
+static int __init devfreq_cpufreq_init(void)
+{
+	int ret;
+	struct device_node *of_par, *of_child;
+
+	of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
+	if (of_par) {
+		for_each_child_of_node(of_par, of_child) {
+			ret = add_table_from_of(of_child);
+			if (ret)
+				pr_err("Parsing %s failed!\n", of_child->name);
+			else
+				pr_debug("Parsed %s.\n", of_child->name);
+		}
+		of_node_put(of_par);
+	} else {
+		pr_info("No tables parsed from DT.\n");
+	}
+
+	ret = devfreq_add_governor(&devfreq_cpufreq);
+	if (ret) {
+		pr_err("Governor add failed!\n");
+		return ret;
+	}
+
+	return 0;
+}
+subsys_initcall(devfreq_cpufreq_init);
+
+static void __exit devfreq_cpufreq_exit(void)
+{
+	int ret, cpu;
+	struct devfreq_node *node, *tmp;
+
+	ret = devfreq_remove_governor(&devfreq_cpufreq);
+	if (ret)
+		pr_err("Governor remove failed!\n");
+
+	mutex_lock(&state_lock);
+	list_for_each_entry_safe(node, tmp, &devfreq_list, list) {
+		kfree(node->common_map);
+		for_each_possible_cpu(cpu)
+			kfree(node->map[cpu]);
+		kfree(node->map);
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+}
+module_exit(devfreq_cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index b29a9e8..8a9cf92 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -158,6 +158,12 @@
 			return -ENOMEM;
 
 		sync_file->fence = &array->base;
+
+		/*
+		 * Register for callbacks so that we know when each fence
+		 * in the array is signaled
+		 */
+		fence_enable_sw_signaling(sync_file->fence);
 	}
 
 	return 0;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 015f711..d235fbe 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -691,7 +691,7 @@
 	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
 	ioat_chan->completion =
 		dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
-				GFP_KERNEL, &ioat_chan->completion_dma);
+				GFP_NOWAIT, &ioat_chan->completion_dma);
 	if (!ioat_chan->completion)
 		return -ENOMEM;
 
@@ -701,7 +701,7 @@
 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 
 	order = IOAT_MAX_ORDER;
-	ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+	ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
 	if (!ring)
 		return -ENOMEM;
 
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
index 18b2da7..d469869 100644
--- a/drivers/edac/qcom_llcc_edac.c
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -223,10 +223,9 @@
 	int sb_err_ways;
 	u32 synd_reg;
 	u32 synd_val;
-	u32 synd_reg_offset;
 
 	for (i = 0; i < DRP_SYN_REG_CNT; i++) {
-		synd_reg_offset = DRP_ECC_SB_ERR_SYN0 + (i * 4);
+		synd_reg = DRP_ECC_SB_ERR_SYN0 + (i * 4);
 		regmap_read(llcc_map, synd_reg, &synd_val);
 		edac_printk(KERN_CRIT, EDAC_LLCC, "DRP_ECC_SYN%d: 0x%8x\n",
 			i, synd_val);
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
index 0efca1e..a56c7e0 100644
--- a/drivers/esoc/Kconfig
+++ b/drivers/esoc/Kconfig
@@ -61,4 +61,12 @@
 	  by command engine to the external modem. Also allows masking
 	  of certain notifications being sent to the external modem.
 
+config MDM_DBG_REQ_ENG
+	tristate "manual request engine for 4x series external modems"
+	depends on ESOC_MDM_DBG_ENG
+	help
+	  Provides a user interface to handle incoming requests from
+	  the external modem. Allows for debugging of IPC mechanism
+	  between the external modem and the primary soc.
+
 endif
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index b1834e2..6c42f54 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -435,11 +435,12 @@
 {
 	int value;
 	struct esoc_clink *esoc;
+	struct device *dev;
 	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
-	struct device *dev = mdm->dev;
 
 	if (!mdm)
 		return IRQ_HANDLED;
+	dev = mdm->dev;
 	esoc = mdm->esoc;
 	value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
 	if (value == 0 && mdm->ready) {
@@ -500,7 +501,7 @@
 	struct device_node *node = mdm->dev->of_node;
 
 	addr = of_iomap(node, 0);
-	if (IS_ERR(addr)) {
+	if (IS_ERR_OR_NULL(addr)) {
 		dev_err(mdm->dev, "failed to get debug base address\n");
 		return;
 	}
@@ -509,7 +510,7 @@
 	if (val == MDM_DBG_MODE) {
 		mdm->dbg_mode = true;
 		mdm->cti = coresight_cti_get(MDM_CTI_NAME);
-		if (IS_ERR(mdm->cti)) {
+		if (IS_ERR_OR_NULL(mdm->cti)) {
 			dev_err(mdm->dev, "unable to get cti handle\n");
 			goto cti_get_err;
 		}
@@ -743,7 +744,7 @@
 	mdm->dev = &pdev->dev;
 	mdm->pon_ops = pon_ops;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -813,7 +814,7 @@
 	mdm->pon_ops = pon_ops;
 	node = pdev->dev.of_node;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -901,7 +902,7 @@
 	mdm->pon_ops = pon_ops;
 	node = pdev->dev.of_node;
 	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
-	if (IS_ERR(esoc)) {
+	if (IS_ERR_OR_NULL(esoc)) {
 		dev_err(mdm->dev, "cannot allocate esoc device\n");
 		return PTR_ERR(esoc);
 	}
@@ -1001,11 +1002,11 @@
 	struct mdm_ctrl *mdm;
 
 	match = of_match_node(mdm_dt_match, node);
-	if (IS_ERR(match))
+	if (IS_ERR_OR_NULL(match))
 		return PTR_ERR(match);
 	mdm_ops = match->data;
 	mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
-	if (IS_ERR(mdm))
+	if (IS_ERR_OR_NULL(mdm))
 		return PTR_ERR(mdm);
 	return mdm_ops->config_hw(mdm, mdm_ops, pdev);
 }
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
index a186ea8..309c820 100644
--- a/drivers/esoc/esoc-mdm-dbg-eng.c
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -179,7 +179,165 @@
 }
 static DRIVER_ATTR(notifier_mask, 00200, NULL, notifier_mask_store);
 
-int mdm_dbg_eng_init(struct esoc_drv *esoc_drv)
+#ifdef CONFIG_MDM_DBG_REQ_ENG
+static struct esoc_clink *dbg_clink;
+/* Last recorded request from esoc */
+static enum esoc_req last_req;
+static DEFINE_SPINLOCK(req_lock);
+/*
+ * esoc_to_user: Conversion of esoc ids to user visible strings
+ * id: esoc request, command, notifier, event id
+ * str: string equivalent of the above
+ */
+struct esoc_to_user {
+	unsigned int id;
+	char str[20];
+};
+
+static struct esoc_to_user in_to_resp[] = {
+	{
+		.id = ESOC_IMG_XFER_DONE,
+		.str = "XFER_DONE",
+	},
+	{
+		.id = ESOC_BOOT_DONE,
+		.str = "BOOT_DONE",
+	},
+	{
+		.id = ESOC_BOOT_FAIL,
+		.str = "BOOT_FAIL",
+	},
+	{
+		.id = ESOC_IMG_XFER_RETRY,
+		.str = "XFER_RETRY",
+	},
+	{	.id = ESOC_IMG_XFER_FAIL,
+		.str = "XFER_FAIL",
+	},
+	{
+		.id = ESOC_UPGRADE_AVAILABLE,
+		.str = "UPGRADE",
+	},
+	{	.id = ESOC_DEBUG_DONE,
+		.str = "DEBUG_DONE",
+	},
+	{
+		.id = ESOC_DEBUG_FAIL,
+		.str = "DEBUG_FAIL",
+	},
+};
+
+static struct esoc_to_user req_to_str[] = {
+	{
+		.id = ESOC_REQ_IMG,
+		.str = "REQ_IMG",
+	},
+	{
+		.id = ESOC_REQ_DEBUG,
+		.str = "REQ_DEBUG",
+	},
+	{
+		.id = ESOC_REQ_SHUTDOWN,
+		.str = "REQ_SHUTDOWN",
+	},
+};
+
+static ssize_t req_eng_resp_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int i;
+	const struct esoc_clink_ops *const clink_ops = dbg_clink->clink_ops;
+
+	dev_dbg(&dbg_clink->dev, "user input req eng response %s\n", buf);
+	for (i = 0; i < ARRAY_SIZE(in_to_resp); i++) {
+		size_t len1 = strlen(buf);
+		size_t len2 = strlen(in_to_resp[i].str);
+
+		if (len1 == len2 && !strcmp(buf, in_to_resp[i].str)) {
+			clink_ops->notify(in_to_resp[i].id, dbg_clink);
+			break;
+		}
+	}
+	if (i > ARRAY_SIZE(in_to_resp))
+		dev_err(&dbg_clink->dev, "Invalid resp %s, specified\n", buf);
+	return count;
+}
+
+static DRIVER_ATTR(req_eng_resp, 0200, NULL, req_eng_resp_store);
+
+static ssize_t last_esoc_req_show(struct device_driver *drv, char *buf)
+{
+	unsigned int i;
+	unsigned long flags;
+	size_t count;
+
+	spin_lock_irqsave(&req_lock, flags);
+	for (i = 0; i < ARRAY_SIZE(req_to_str); i++) {
+		if (last_req == req_to_str[i].id) {
+			count = snprintf(buf, PAGE_SIZE, "%s\n",
+					req_to_str[i].str);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&req_lock, flags);
+	return count;
+}
+static DRIVER_ATTR(last_esoc_req, 0400, last_esoc_req_show, NULL);
+
+static void esoc_handle_req(enum esoc_req req, struct esoc_eng *eng)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&req_lock, flags);
+	last_req = req;
+	spin_unlock_irqrestore(&req_lock, flags);
+}
+
+static void esoc_handle_evt(enum esoc_evt evt, struct esoc_eng *eng)
+{
+}
+
+static struct esoc_eng dbg_req_eng = {
+	.handle_clink_req = esoc_handle_req,
+	.handle_clink_evt = esoc_handle_evt,
+};
+
+int register_dbg_req_eng(struct esoc_clink *clink,
+					struct device_driver *drv)
+{
+	int ret;
+
+	dbg_clink = clink;
+	ret = driver_create_file(drv, &driver_attr_req_eng_resp);
+	if (ret)
+		return ret;
+	ret = driver_create_file(drv, &driver_attr_last_esoc_req);
+	if (ret) {
+		dev_err(&clink->dev, "Unable to create last esoc req\n");
+		goto last_req_err;
+	}
+	ret = esoc_clink_register_req_eng(clink, &dbg_req_eng);
+	if (ret) {
+		pr_err("Unable to register req eng\n");
+		goto req_eng_fail;
+	}
+	spin_lock_init(&req_lock);
+	return 0;
+last_req_err:
+	driver_remove_file(drv, &driver_attr_last_esoc_req);
+req_eng_fail:
+	driver_remove_file(drv, &driver_attr_req_eng_resp);
+	return ret;
+}
+#else
+int register_dbg_req_eng(struct esoc_clink *clink, struct device_driver *d)
+{
+	return 0;
+}
+#endif
+
+int mdm_dbg_eng_init(struct esoc_drv *esoc_drv,
+			struct esoc_clink *clink)
 {
 	int ret;
 	struct device_driver *drv = &esoc_drv->driver;
@@ -194,7 +352,14 @@
 		pr_err("Unable to create notify mask file\n");
 		goto notify_mask_err;
 	}
+	ret = register_dbg_req_eng(clink, drv);
+	if (ret) {
+		pr_err("Failed to register esoc dbg req eng\n");
+		goto dbg_req_fail;
+	}
 	return 0;
+dbg_req_fail:
+	driver_remove_file(drv, &driver_attr_notifier_mask);
 notify_mask_err:
 	driver_remove_file(drv, &driver_attr_command_mask);
 cmd_mask_err:
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 473a9c7..31cd8c4 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -233,7 +233,7 @@
 	struct esoc_eng *esoc_eng;
 
 	mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
-	if (IS_ERR(mdm_drv))
+	if (IS_ERR_OR_NULL(mdm_drv))
 		return PTR_ERR(mdm_drv);
 	esoc_eng = &mdm_drv->cmd_eng;
 	esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
@@ -261,7 +261,7 @@
 	ret = register_reboot_notifier(&mdm_drv->esoc_restart);
 	if (ret)
 		dev_err(&esoc_clink->dev, "register for reboot failed\n");
-	ret = mdm_dbg_eng_init(drv);
+	ret = mdm_dbg_eng_init(drv, esoc_clink);
 	if (ret) {
 		debug_init_done = false;
 		dev_err(&esoc_clink->dev, "dbg engine failure\n");
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
index 4807e2b..dc94742 100644
--- a/drivers/esoc/esoc_bus.c
+++ b/drivers/esoc/esoc_bus.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -129,7 +129,7 @@
 	struct device *dev;
 
 	dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
-	if (IS_ERR(dev))
+	if (IS_ERR_OR_NULL(dev))
 		return NULL;
 	esoc_clink = to_esoc_clink(dev);
 	return esoc_clink;
@@ -143,7 +143,7 @@
 
 	dev = bus_find_device(&esoc_bus_type, NULL, node,
 						esoc_clink_match_node);
-	if (IS_ERR(dev))
+	if (IS_ERR_OR_NULL(dev))
 		return NULL;
 	esoc_clink = to_esoc_clink(dev);
 	return esoc_clink;
@@ -175,14 +175,14 @@
 
 	len = strlen("esoc") + sizeof(esoc_clink->id);
 	subsys_name = kzalloc(len, GFP_KERNEL);
-	if (IS_ERR(subsys_name))
+	if (IS_ERR_OR_NULL(subsys_name))
 		return PTR_ERR(subsys_name);
 	snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
 	esoc_clink->subsys.name = subsys_name;
 	esoc_clink->dev.of_node = esoc_clink->np;
 	esoc_clink->subsys.dev = &esoc_clink->dev;
 	esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
-	if (IS_ERR(esoc_clink->subsys_dev)) {
+	if (IS_ERR_OR_NULL(esoc_clink->subsys_dev)) {
 		dev_err(&esoc_clink->dev, "failed to register ssr node\n");
 		ret = PTR_ERR(esoc_clink->subsys_dev);
 		goto subsys_err;
diff --git a/drivers/esoc/esoc_client.c b/drivers/esoc/esoc_client.c
index 5b194e31..b9d6833 100644
--- a/drivers/esoc/esoc_client.c
+++ b/drivers/esoc/esoc_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -48,6 +48,8 @@
 
 	for (index = 0;; index++) {
 		esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
+		if (IS_ERR_OR_NULL(esoc_prop))
+			return ERR_PTR(-ENOMEM);
 		parp = of_get_property(np, esoc_prop, NULL);
 		if (parp == NULL) {
 			dev_err(dev, "esoc device not present\n");
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 17a30b8..39090dc 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -259,7 +259,16 @@
 	unsigned int minor = iminor(inode);
 
 	esoc_udev = esoc_udev_get_by_minor(minor);
+	if (!esoc_udev) {
+		pr_err("failed to get udev\n");
+		return -ENOMEM;
+	}
+
 	esoc_clink = get_esoc_clink(esoc_udev->clink->id);
+	if (!esoc_clink) {
+		pr_err("failed to get clink\n");
+		return -ENOMEM;
+	}
 
 	uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
 	if (!uhandle) {
@@ -304,12 +313,12 @@
 	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
 
 	esoc_udev = get_free_esoc_udev(esoc_clink);
-	if (IS_ERR(esoc_udev))
+	if (IS_ERR_OR_NULL(esoc_udev))
 		return PTR_ERR(esoc_udev);
 	esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
 					MKDEV(esoc_major, esoc_clink->id),
 					esoc_clink, "esoc-%d", esoc_clink->id);
-	if (IS_ERR(esoc_udev->dev)) {
+	if (IS_ERR_OR_NULL(esoc_udev->dev)) {
 		pr_err("failed to create user device\n");
 		goto dev_err;
 	}
@@ -357,8 +366,7 @@
 	int ret = 0;
 
 	esoc_class = class_create(THIS_MODULE, "esoc-dev");
-
-	if (IS_ERR(esoc_class)) {
+	if (IS_ERR_OR_NULL(esoc_class)) {
 		pr_err("coudn't create class");
 		return PTR_ERR(esoc_class);
 	}
diff --git a/drivers/esoc/mdm-dbg.h b/drivers/esoc/mdm-dbg.h
index ae31339..ffba87c 100644
--- a/drivers/esoc/mdm-dbg.h
+++ b/drivers/esoc/mdm-dbg.h
@@ -24,7 +24,8 @@
 	return false;
 }
 
-static inline int mdm_dbg_eng_init(struct esoc_drv *drv)
+static inline int mdm_dbg_eng_init(struct esoc_drv *drv,
+						struct esoc_clink *clink)
 {
 	return 0;
 }
@@ -32,7 +33,8 @@
 #else
 extern bool dbg_check_cmd_mask(unsigned int cmd);
 extern bool dbg_check_notify_mask(unsigned int notify);
-extern int mdm_dbg_eng_init(struct esoc_drv *drv);
+extern int mdm_dbg_eng_init(struct esoc_drv *drv,
+				struct esoc_clink *clink);
 #endif
 
 static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 7c75a8d..6bdf39e 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -65,6 +65,7 @@
 	bool systab_found;
 
 	efi_mm.pgd = pgd_alloc(&efi_mm);
+	mm_init_cpumask(&efi_mm);
 	init_new_context(NULL, &efi_mm);
 
 	systab_found = false;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index a9c5aa6..71000d5 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -104,3 +104,12 @@
 	help
 	  Choose this option for writeback connector support.
 
+config DRM_SDE_EVTLOG_DEBUG
+	bool "Enable event logging in MSM DRM"
+	depends on DRM_MSM
+	help
+          The SDE DRM debugging provides support to enable display debugging
+          features to: dump SDE registers during driver errors, panic
+          driver during fatal errors and enable some display-driver logging
+          into an internal buffer (this avoids logging overhead).
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index abd1503..a402fe8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -51,6 +51,7 @@
 	sde/sde_hw_color_proc_v4.o \
 	sde_rsc.o \
 	sde_rsc_hw.o \
+	sde/sde_hw_ad4.o \
 
 # use drm gpu driver only if qcom_kgsl driver not available
 ifneq ($(CONFIG_QCOM_KGSL),y)
@@ -136,7 +137,7 @@
 	sde/sde_hw_color_processing_v1_7.o \
 	sde/sde_reg_dma.o \
 	sde/sde_hw_reg_dma_v1.o \
-
+	sde/sde_hw_dsc.o
 
 msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
 	sde/sde_encoder_phys_wb.o
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index aa3152f..ddf791c 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -1914,7 +1914,7 @@
 	struct dsi_display_mode *panel_mode;
 	struct dsi_dfps_capabilities dfps_caps;
 	int rc = 0;
-	int i;
+	int i = 0;
 
 	if (!display || !dsi_mode) {
 		pr_err("Invalid params\n");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 3f82819..24a740b 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -395,7 +395,7 @@
 {
 	u32 count = 0;
 	u32 size = 0;
-	struct dsi_display_mode *modes;
+	struct dsi_display_mode *modes = NULL;
 	struct drm_display_mode drm_mode;
 	int rc, i;
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 4da92ee..bda9c2d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1970,6 +1970,52 @@
 	return rc;
 }
 
+static int dsi_panel_parse_hdr_config(struct dsi_panel *panel,
+				     struct device_node *of_node)
+{
+
+	int rc = 0;
+	struct drm_panel_hdr_properties *hdr_prop;
+
+	hdr_prop = &panel->hdr_props;
+	hdr_prop->hdr_enabled = of_property_read_bool(of_node,
+		"qcom,mdss-dsi-panel-hdr-enabled");
+
+	if (hdr_prop->hdr_enabled) {
+		rc = of_property_read_u32_array(of_node,
+				"qcom,mdss-dsi-panel-hdr-color-primaries",
+				hdr_prop->display_primaries,
+				DISPLAY_PRIMARIES_MAX);
+		if (rc) {
+			pr_err("%s:%d, Unable to read color primaries,rc:%u",
+					__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"qcom,mdss-dsi-panel-peak-brightness",
+			&(hdr_prop->peak_brightness));
+		if (rc) {
+			pr_err("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"qcom,mdss-dsi-panel-blackness-level",
+			&(hdr_prop->blackness_level));
+		if (rc) {
+			pr_err("%s:%d, Unable to read hdr brightness, rc:%u",
+				__func__, __LINE__, rc);
+			hdr_prop->hdr_enabled = false;
+			return rc;
+		}
+	}
+	return 0;
+}
+
 struct dsi_panel *dsi_panel_get(struct device *parent,
 				struct device_node *of_node)
 {
@@ -2071,6 +2117,10 @@
 	if (rc)
 		pr_err("failed to parse panel jitter config, rc=%d\n", rc);
 
+	rc = dsi_panel_parse_hdr_config(panel, of_node);
+	if (rc)
+		pr_err("failed to parse hdr config, rc=%d\n", rc);
+
 	panel->panel_of_node = of_node;
 	drm_panel_init(&panel->drm_panel);
 	mutex_init(&panel->panel_lock);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index ab30e16..57226ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -22,6 +22,7 @@
 #include <linux/leds.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_mipi_dsi.h>
+#include <drm/msm_drm.h>
 
 #include "dsi_defs.h"
 #include "dsi_ctrl_hw.h"
@@ -173,6 +174,7 @@
 	struct dsi_backlight_config bl_config;
 	struct dsi_panel_reset_config reset_config;
 	struct dsi_pinctrl_info pinctrl;
+	struct drm_panel_hdr_properties hdr_props;
 
 	bool lp11_init;
 	bool ulps_enabled;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 48bccd9..600b250 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -45,10 +45,20 @@
 		drm_fb_helper_hotplug_event(priv->fbdev);
 }
 
+int msm_atomic_check(struct drm_device *dev,
+			    struct drm_atomic_state *state)
+{
+	if (msm_is_suspend_blocked(dev)) {
+		DRM_DEBUG("rejecting commit during suspend\n");
+		return -EBUSY;
+	}
+	return drm_atomic_helper_check(dev, state);
+}
+
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = msm_framebuffer_create,
 	.output_poll_changed = msm_fb_output_poll_changed,
-	.atomic_check = drm_atomic_helper_check,
+	.atomic_check = msm_atomic_check,
 	.atomic_commit = msm_atomic_commit,
 };
 
@@ -296,11 +306,8 @@
 
 	sde_dbg_destroy();
 
-	sde_power_client_destroy(&priv->phandle, priv->pclient);
-	sde_power_resource_deinit(pdev, &priv->phandle);
-
 	component_unbind_all(dev, ddev);
-
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
 	sde_power_resource_deinit(pdev, &priv->phandle);
 
 	msm_mdss_destroy(ddev);
@@ -483,20 +490,20 @@
 	ret = sde_power_resource_init(pdev, &priv->phandle);
 	if (ret) {
 		pr_err("sde power resource init failed\n");
-		goto fail;
+		goto power_init_fail;
 	}
 
 	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
 	if (IS_ERR_OR_NULL(priv->pclient)) {
 		pr_err("sde power client create failed\n");
 		ret = -EINVAL;
-		goto fail;
+		goto power_client_fail;
 	}
 
 	/* Bind all our sub-components: */
 	ret = msm_component_bind_all(dev, ddev);
 	if (ret)
-		return ret;
+		goto bind_fail;
 
 	ret = msm_init_vram(ddev);
 	if (ret)
@@ -626,6 +633,12 @@
 fail:
 	msm_drm_uninit(dev);
 	return ret;
+bind_fail:
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
+power_client_fail:
+	sde_power_resource_deinit(pdev, &priv->phandle);
+power_init_fail:
+	msm_mdss_destroy(ddev);
 mdss_init_fail:
 	kfree(priv);
 priv_alloc_fail:
@@ -1123,7 +1136,7 @@
 	.debugfs_cleanup    = msm_debugfs_cleanup,
 #endif
 	.ioctls             = msm_ioctls,
-	.num_ioctls         = DRM_MSM_NUM_IOCTLS,
+	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
 	.fops               = &fops,
 	.name               = "msm_drm",
 	.desc               = "MSM Snapdragon DRM",
@@ -1136,8 +1149,86 @@
 #ifdef CONFIG_PM_SLEEP
 static int msm_pm_suspend(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_device *ddev;
+	struct drm_modeset_acquire_ctx ctx;
+	struct drm_connector *conn;
+	struct drm_atomic_state *state;
+	struct drm_crtc_state *crtc_state;
+	struct msm_drm_private *priv;
+	int ret = 0;
 
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+	SDE_EVT32(0);
+
+	/* acquire modeset lock(s) */
+	drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+	ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+	if (ret)
+		goto unlock;
+
+	/* save current state for resume */
+	if (priv->suspend_state)
+		drm_atomic_state_free(priv->suspend_state);
+	priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+	if (IS_ERR_OR_NULL(priv->suspend_state)) {
+		DRM_ERROR("failed to back up suspend state\n");
+		priv->suspend_state = NULL;
+		goto unlock;
+	}
+
+	/* create atomic state to disable all CRTCs */
+	state = drm_atomic_state_alloc(ddev);
+	if (IS_ERR_OR_NULL(state)) {
+		DRM_ERROR("failed to allocate crtc disable state\n");
+		goto unlock;
+	}
+
+	state->acquire_ctx = &ctx;
+	drm_for_each_connector(conn, ddev) {
+
+		if (!conn->state || !conn->state->crtc ||
+				conn->dpms != DRM_MODE_DPMS_ON)
+			continue;
+
+		/* force CRTC to be inactive */
+		crtc_state = drm_atomic_get_crtc_state(state,
+				conn->state->crtc);
+		if (IS_ERR_OR_NULL(crtc_state)) {
+			DRM_ERROR("failed to get crtc %d state\n",
+					conn->state->crtc->base.id);
+			drm_atomic_state_free(state);
+			goto unlock;
+		}
+		crtc_state->active = false;
+	}
+
+	/* commit the "disable all" state */
+	ret = drm_atomic_commit(state);
+	if (ret < 0) {
+		DRM_ERROR("failed to disable crtcs, %d\n", ret);
+		drm_atomic_state_free(state);
+	} else {
+		priv->suspend_block = true;
+	}
+
+unlock:
+	if (ret == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	}
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+
+	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
 
 	return 0;
@@ -1145,8 +1236,40 @@
 
 static int msm_pm_resume(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	int ret;
 
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+
+	SDE_EVT32(priv->suspend_state != NULL);
+
+	drm_mode_config_reset(ddev);
+
+	drm_modeset_lock_all(ddev);
+
+	priv->suspend_block = false;
+
+	if (priv->suspend_state) {
+		priv->suspend_state->acquire_ctx =
+			ddev->mode_config.acquire_ctx;
+		ret = drm_atomic_commit(priv->suspend_state);
+		if (ret < 0) {
+			DRM_ERROR("failed to restore state, %d\n", ret);
+			drm_atomic_state_free(priv->suspend_state);
+		}
+		priv->suspend_state = NULL;
+	}
+	drm_modeset_unlock_all(ddev);
+
+	/* enable hot-plug polling */
 	drm_kms_helper_poll_enable(ddev);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 817fcd2..6a63bfd 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -137,6 +137,7 @@
 enum msm_mdp_conn_property {
 	/* blob properties, always put these first */
 	CONNECTOR_PROP_SDE_INFO,
+	CONNECTOR_PROP_HDR_INFO,
 
 	/* # of blob properties */
 	CONNECTOR_PROP_BLOBCOUNT,
@@ -169,12 +170,10 @@
  * enum msm_display_compression_type - compression method used for pixel stream
  * @MSM_DISPLAY_COMPRESSION_NONE:     Pixel data is not compressed
  * @MSM_DISPLAY_COMPRESSION_DSC:      DSC compresison is used
- * @MSM_DISPLAY_COMPRESSION_FBC:      FBC compression is used
  */
 enum msm_display_compression_type {
 	MSM_DISPLAY_COMPRESSION_NONE,
 	MSM_DISPLAY_COMPRESSION_DSC,
-	MSM_DISPLAY_COMPRESSION_FBC,
 };
 
 /**
@@ -474,6 +473,10 @@
 	 */
 	struct task_struct *struct_mutex_task;
 
+	/* saved atomic state during system suspend */
+	struct drm_atomic_state *suspend_state;
+	bool suspend_block;
+
 	/* list of clients waiting for events */
 	struct list_head client_event_list;
 
@@ -501,6 +504,25 @@
 		(_cb)->func = _func;                         \
 	} while (0)
 
+static inline bool msm_is_suspend_state(struct drm_device *dev)
+{
+	if (!dev || !dev->dev_private)
+		return false;
+
+	return ((struct msm_drm_private *)dev->dev_private)->suspend_state != 0;
+}
+
+static inline bool msm_is_suspend_blocked(struct drm_device *dev)
+{
+	if (!dev || !dev->dev_private)
+		return false;
+
+	if (!msm_is_suspend_state(dev))
+		return false;
+
+	return ((struct msm_drm_private *)dev->dev_private)->suspend_block != 0;
+}
+
 int msm_atomic_commit(struct drm_device *dev,
 		struct drm_atomic_state *state, bool nonblock);
 
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index cdd0125..c279d01 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -315,10 +315,10 @@
 	}
 
 	if (!compat) {
-		DRM_ERROR("unable to find matching domain for %d\n", domain);
+		DRM_DEBUG("unable to find matching domain for %d\n", domain);
 		return ERR_PTR(-ENOENT);
 	}
-	DRM_INFO("found domain %d compat: %s\n", domain, compat);
+	DRM_DEBUG("found domain %d compat: %s\n", domain, compat);
 
 	if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
 		int rc;
@@ -343,7 +343,7 @@
 
 	child = of_find_compatible_node(dev->of_node, NULL, compat);
 	if (!child) {
-		DRM_ERROR("unable to find compatible node for %s\n", compat);
+		DRM_DEBUG("unable to find compatible node for %s\n", compat);
 		return ERR_PTR(-ENODEV);
 	}
 
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
new file mode 100644
index 0000000..5ed7ae2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SDE_AD4_H_
+#define _SDE_AD4_H_
+
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+#include "sde_hw_dspp.h"
+
+/**
+ * enum ad4_modes - ad4 modes supported by driver
+ */
+enum ad4_modes {
+	AD4_OFF,
+	AD4_AUTO_STRENGTH,
+	AD4_CALIBRATION,
+	AD4_MANUAL,
+};
+
+/**
+ * struct drm_prop_enum_list - drm structure for creating enum property and
+ *                             enumerating values
+ */
+static const struct drm_prop_enum_list ad4_modes[] = {
+	{AD4_OFF, "off"},
+	{AD4_AUTO_STRENGTH, "auto_strength_mode"},
+	{AD4_CALIBRATION, "calibration_mode"},
+	{AD4_MANUAL, "manual_mode"},
+};
+
+/**
+ * enum ad_property - properties that can be set for ad
+ */
+enum ad_property {
+	AD_MODE,
+	AD_INIT,
+	AD_CFG,
+	AD_INPUT,
+	AD_SUSPEND,
+	AD_ASSERTIVE,
+	AD_BACKLIGHT,
+	AD_PROPMAX,
+};
+
+/**
+ * struct sde_ad_hw_cfg - structure for setting the ad properties
+ * @prop: enum of ad property
+ * @hw_cfg: payload for the prop being set.
+ */
+struct sde_ad_hw_cfg {
+	enum ad_property prop;
+	struct sde_hw_cp_cfg *hw_cfg;
+};
+
+/**
+ * sde_validate_dspp_ad4() - api to validate if ad property is allowed for
+ *                           the display with allocated dspp/mixers.
+ * @dspp: pointer to dspp info structure.
+ * @prop: pointer to u32 pointing to ad property
+ */
+int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop);
+
+/**
+ * sde_setup_dspp_ad4 - api to apply the ad property, sde_validate_dspp_ad4
+ *                      should be called before call this function
+ * @dspp: pointer to dspp info structure.
+ * @cfg: pointer to struct sde_ad_hw_cfg
+ */
+void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
+#endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index f7fcd01..cb6917a 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -21,6 +21,7 @@
 #include "sde_crtc.h"
 #include "sde_hw_dspp.h"
 #include "sde_hw_lm.h"
+#include "sde_ad4.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -60,6 +61,12 @@
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
 
+static void sde_cp_update_list(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc, bool dirty_list);
+
+static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc);
+
 #define setup_dspp_prop_install_funcs(func) \
 do { \
 	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -96,6 +103,12 @@
 	SDE_CP_CRTC_DSPP_HIST,
 	SDE_CP_CRTC_DSPP_AD,
 	SDE_CP_CRTC_DSPP_VLUT,
+	SDE_CP_CRTC_DSPP_AD_MODE,
+	SDE_CP_CRTC_DSPP_AD_INIT,
+	SDE_CP_CRTC_DSPP_AD_CFG,
+	SDE_CP_CRTC_DSPP_AD_INPUT,
+	SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS,
+	SDE_CP_CRTC_DSPP_AD_BACKLIGHT,
 	SDE_CP_CRTC_DSPP_MAX,
 	/* DSPP features end */
 
@@ -136,9 +149,10 @@
 	} else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
 		/* Check if local blob is Set */
 		if (!blob) {
-			hw_cfg->len = sizeof(prop_node->prop_val);
-			if (prop_node->prop_val)
+			if (prop_node->prop_val) {
+				hw_cfg->len = sizeof(prop_node->prop_val);
 				hw_cfg->payload = &prop_node->prop_val;
+			}
 		} else {
 			hw_cfg->len = (prop_node->prop_val) ? blob->length :
 					0;
@@ -147,6 +161,10 @@
 		}
 		if (prop_node->prop_val)
 			*feature_enabled = true;
+	} else if (prop_node->prop_flags & DRM_MODE_PROP_ENUM) {
+		*feature_enabled = (prop_node->prop_val != 0);
+		hw_cfg->len = sizeof(prop_node->prop_val);
+		hw_cfg->payload = &prop_node->prop_val;
 	} else {
 		DRM_ERROR("property type is not supported\n");
 	}
@@ -178,7 +196,7 @@
 		}
 	}
 
-	if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+	if (!found || !(prop_node->prop_flags & DRM_MODE_PROP_RANGE)) {
 		DRM_ERROR("local blob create failed prop found %d flags %d\n",
 		       found, prop_node->prop_flags);
 		return ret;
@@ -232,10 +250,14 @@
 {
 	int ret = -EINVAL;
 
-	if (property->flags & DRM_MODE_PROP_BLOB)
+	if (property->flags & DRM_MODE_PROP_BLOB) {
 		ret = sde_cp_disable_crtc_blob_property(prop_node);
-	else if (property->flags & DRM_MODE_PROP_RANGE)
+	} else if (property->flags & DRM_MODE_PROP_RANGE) {
 		ret = sde_cp_handle_range_property(prop_node, 0);
+	} else if (property->flags & DRM_MODE_PROP_ENUM) {
+		ret = 0;
+		prop_node->prop_val = 0;
+	}
 	return ret;
 }
 
@@ -275,10 +297,14 @@
 {
 	int ret = -EINVAL;
 
-	if (property->flags & DRM_MODE_PROP_BLOB)
+	if (property->flags & DRM_MODE_PROP_BLOB) {
 		ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
-	else if (property->flags & DRM_MODE_PROP_RANGE)
+	} else if (property->flags & DRM_MODE_PROP_RANGE) {
 		ret = sde_cp_handle_range_property(prop_node, val);
+	} else if (property->flags & DRM_MODE_PROP_ENUM) {
+		ret = 0;
+		prop_node->prop_val = val;
+	}
 	return ret;
 }
 
@@ -331,6 +357,8 @@
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
+	INIT_LIST_HEAD(&sde_crtc->ad_dirty);
+	INIT_LIST_HEAD(&sde_crtc->ad_active);
 }
 
 static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
@@ -357,8 +385,8 @@
 	prop = priv->cp_property[feature];
 
 	if (!prop) {
-		prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
-					   name, 0);
+		prop = drm_property_create_range(crtc->dev,
+				DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
 		if (!prop) {
 			DRM_ERROR("property create failed: %s\n", name);
 			kfree(prop_node);
@@ -412,7 +440,7 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
-static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+static void sde_cp_crtc_install_blob_property(struct drm_crtc *crtc, char *name,
 			u32 feature, u32 blob_sz)
 {
 	struct drm_property *prop;
@@ -452,6 +480,46 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
+static void sde_cp_crtc_install_enum_property(struct drm_crtc *crtc,
+	u32 feature, const struct drm_prop_enum_list *list, u32 enum_sz,
+	char *name)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	uint64_t val = 0;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create_enum(crtc->dev, 0, name,
+			list, enum_sz);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 				   struct sde_crtc *sde_crtc, u32 last_feature)
 {
@@ -462,13 +530,18 @@
 	int i = 0;
 	bool feature_enabled = false;
 	int ret = 0;
+	struct sde_ad_hw_cfg ad_cfg;
 
 	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
+	hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
+	hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
 
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
 		hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
+		hw_cfg.mixer_info = hw_lm;
 		if (i == num_mixers - 1)
 			hw_cfg.last_feature = last_feature;
 		else
@@ -558,6 +631,60 @@
 			}
 			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
 			break;
+		case SDE_CP_CRTC_DSPP_AD_MODE:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_MODE;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INIT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_INIT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_CFG:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_CFG;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INPUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_INPUT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_ASSERTIVE;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_BACKLIGHT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
 		default:
 			ret = -EINVAL;
 			break;
@@ -574,7 +701,7 @@
 	if (feature_enabled) {
 		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
 				 prop_node->property_id);
-		list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+		sde_cp_update_list(prop_node, sde_crtc, false);
 	} else {
 		DRM_DEBUG_DRIVER("remove feature from active list %d\n",
 			 prop_node->property_id);
@@ -612,10 +739,17 @@
 		return;
 	}
 
-	/* Check if dirty list is empty for early return */
-	if (list_empty(&sde_crtc->dirty_list)) {
-		DRM_DEBUG_DRIVER("Dirty list is empty\n");
-		return;
+	/* Check if dirty lists are empty and ad features are disabled for
+	 * early return. If ad properties are active then we need to issue
+	 * dspp flush.
+	 **/
+	if (list_empty(&sde_crtc->dirty_list) &&
+		list_empty(&sde_crtc->ad_dirty)) {
+		if (list_empty(&sde_crtc->ad_active)) {
+			DRM_DEBUG_DRIVER("Dirty list is empty\n");
+			return;
+		}
+		set_dspp_flush = true;
 	}
 
 	num_of_features = 0;
@@ -623,7 +757,7 @@
 		num_of_features++;
 
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
-							dirty_list) {
+				dirty_list) {
 		num_of_features--;
 		sde_cp_crtc_setfeature(prop_node, sde_crtc,
 				(num_of_features == 0));
@@ -634,6 +768,18 @@
 			set_lm_flush = true;
 	}
 
+	num_of_features = 0;
+	list_for_each_entry(prop_node, &sde_crtc->ad_dirty, dirty_list)
+		num_of_features++;
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty,
+				dirty_list) {
+		num_of_features--;
+		set_dspp_flush = true;
+		sde_cp_crtc_setfeature(prop_node, sde_crtc,
+				(num_of_features == 0));
+	}
+
 	for (i = 0; i < num_mixers; i++) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl)
@@ -791,6 +937,13 @@
 			sde_crtc->num_mixers);
 		return -EINVAL;
 	}
+
+	ret = sde_cp_ad_validate_prop(prop_node, sde_crtc);
+	if (ret) {
+		DRM_ERROR("ad property validation failed ret %d\n", ret);
+		return ret;
+	}
+
 	/* remove the property from dirty list */
 	list_del_init(&prop_node->dirty_list);
 
@@ -804,7 +957,7 @@
 		/* remove the property from active list */
 		list_del_init(&prop_node->active_list);
 		/* Mark the feature as dirty */
-		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		sde_cp_update_list(prop_node, sde_crtc, true);
 	}
 	return ret;
 }
@@ -888,7 +1041,7 @@
 
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
 				 active_list) {
-		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		sde_cp_update_list(prop_node, sde_crtc, true);
 		list_del_init(&prop_node->active_list);
 	}
 }
@@ -913,7 +1066,7 @@
 		"SDE_DSPP_PCC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
 		break;
 	default:
@@ -988,6 +1141,33 @@
 		sde_cp_crtc_install_immutable_property(crtc,
 			feature_name, SDE_CP_CRTC_DSPP_AD);
 		break;
+	case 4:
+		sde_cp_crtc_install_immutable_property(crtc,
+			feature_name, SDE_CP_CRTC_DSPP_AD);
+
+		sde_cp_crtc_install_enum_property(crtc,
+			SDE_CP_CRTC_DSPP_AD_MODE, ad4_modes,
+			ARRAY_SIZE(ad4_modes), "SDE_DSPP_AD_V4_MODE");
+
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INIT",
+			SDE_CP_CRTC_DSPP_AD_INIT, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_INIT,
+			sizeof(struct drm_msm_ad4_init));
+
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_CFG",
+			SDE_CP_CRTC_DSPP_AD_CFG, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_CFG,
+			sizeof(struct drm_msm_ad4_cfg));
+		sde_cp_crtc_install_range_property(crtc,
+			"SDE_DSPP_AD_V4_ASSERTIVNESS",
+			SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS, 0, (BIT(8) - 1), 0);
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INPUT",
+			SDE_CP_CRTC_DSPP_AD_INPUT, 0, U16_MAX, 0);
+		sde_cp_crtc_install_range_property(crtc,
+				"SDE_DSPP_AD_V4_BACKLIGHT",
+			SDE_CP_CRTC_DSPP_AD_BACKLIGHT, 0, (BIT(16) - 1),
+			0);
+		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
 		break;
@@ -1008,7 +1188,7 @@
 		 "SDE_LM_GC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_LM_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
@@ -1032,7 +1212,7 @@
 		"SDE_DSPP_GAMUT_V", version);
 	switch (version) {
 	case 4:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_GAMUT,
 			sizeof(struct drm_msm_3d_gamut));
 		break;
@@ -1057,7 +1237,7 @@
 		"SDE_DSPP_GC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
@@ -1065,3 +1245,74 @@
 		break;
 	}
 }
+
+static void sde_cp_update_list(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc, bool dirty_list)
+{
+	switch (prop_node->feature) {
+	case SDE_CP_CRTC_DSPP_AD_MODE:
+	case SDE_CP_CRTC_DSPP_AD_INIT:
+	case SDE_CP_CRTC_DSPP_AD_CFG:
+	case SDE_CP_CRTC_DSPP_AD_INPUT:
+	case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+	case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list, &crtc->ad_dirty);
+		else
+			list_add_tail(&prop_node->active_list,
+					&crtc->ad_active);
+		break;
+	default:
+		/* color processing properties handle here */
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list,
+					&crtc->dirty_list);
+		else
+			list_add_tail(&prop_node->active_list,
+					&crtc->active_list);
+		break;
+	};
+}
+
+static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc)
+{
+	int i = 0, ret = 0;
+	u32 ad_prop;
+
+	for (i = 0; i < crtc->num_mixers && !ret; i++) {
+		if (!crtc->mixers[i].hw_dspp) {
+			ret = -EINVAL;
+			continue;
+		}
+		switch (prop_node->feature) {
+		case SDE_CP_CRTC_DSPP_AD_MODE:
+			ad_prop = AD_MODE;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INIT:
+			ad_prop = AD_INIT;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_CFG:
+			ad_prop = AD_CFG;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INPUT:
+			ad_prop = AD_INPUT;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+			ad_prop = AD_ASSERTIVE;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+			ad_prop = AD_BACKLIGHT;
+			break;
+		default:
+			/* Not an AD property */
+			return 0;
+		}
+		if (!crtc->mixers[i].hw_dspp->ops.validate_ad)
+			ret = -EINVAL;
+		else
+			ret = crtc->mixers[i].hw_dspp->ops.validate_ad(
+				crtc->mixers[i].hw_dspp, &ad_prop);
+	}
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 0e5342f..9caadca 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -17,6 +17,7 @@
 #include "sde_connector.h"
 #include <linux/backlight.h>
 #include "dsi_drm.h"
+#include "dsi_display.h"
 
 #define BL_NODE_NAME_SIZE 32
 
@@ -115,6 +116,83 @@
 	return 0;
 }
 
+int sde_connector_trigger_event(void *drm_connector,
+		uint32_t event_idx, uint32_t instance_idx,
+		uint32_t data0, uint32_t data1,
+		uint32_t data2, uint32_t data3)
+{
+	struct sde_connector *c_conn;
+	unsigned long irq_flags;
+	void (*cb_func)(uint32_t event_idx,
+			uint32_t instance_idx, void *usr,
+			uint32_t data0, uint32_t data1,
+			uint32_t data2, uint32_t data3);
+	void *usr;
+	int rc = 0;
+
+	/*
+	 * This function may potentially be called from an ISR context, so
+	 * avoid excessive logging/etc.
+	 */
+	if (!drm_connector)
+		return -EINVAL;
+	else if (event_idx >= SDE_CONN_EVENT_COUNT)
+		return -EINVAL;
+	c_conn = to_sde_connector(drm_connector);
+
+	spin_lock_irqsave(&c_conn->event_lock, irq_flags);
+	cb_func = c_conn->event_table[event_idx].cb_func;
+	usr = c_conn->event_table[event_idx].usr;
+	spin_unlock_irqrestore(&c_conn->event_lock, irq_flags);
+
+	if (cb_func)
+		cb_func(event_idx, instance_idx, usr,
+			data0, data1, data2, data3);
+	else
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+int sde_connector_register_event(struct drm_connector *connector,
+		uint32_t event_idx,
+		void (*cb_func)(uint32_t event_idx,
+			uint32_t instance_idx, void *usr,
+			uint32_t data0, uint32_t data1,
+			uint32_t data2, uint32_t data3),
+		void *usr)
+{
+	struct sde_connector *c_conn;
+	unsigned long irq_flags;
+
+	if (!connector) {
+		SDE_ERROR("invalid connector\n");
+		return -EINVAL;
+	} else if (event_idx >= SDE_CONN_EVENT_COUNT) {
+		SDE_ERROR("conn%d, invalid event %d\n",
+				connector->base.id, event_idx);
+		return -EINVAL;
+	}
+	c_conn = to_sde_connector(connector);
+
+	spin_lock_irqsave(&c_conn->event_lock, irq_flags);
+	c_conn->event_table[event_idx].cb_func = cb_func;
+	c_conn->event_table[event_idx].usr = usr;
+	spin_unlock_irqrestore(&c_conn->event_lock, irq_flags);
+
+	/* optionally notify display of event registration */
+	if (c_conn->ops.enable_event && c_conn->display)
+		c_conn->ops.enable_event(connector, event_idx,
+				cb_func != NULL, c_conn->display);
+	return 0;
+}
+
+void sde_connector_unregister_event(struct drm_connector *connector,
+		uint32_t event_idx)
+{
+	(void)sde_connector_register_event(connector, event_idx, 0, 0);
+}
+
 int sde_connector_get_info(struct drm_connector *connector,
 		struct msm_display_info *info)
 {
@@ -150,6 +228,8 @@
 
 	if (c_conn->blob_caps)
 		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 
 	drm_connector_unregister(connector);
@@ -589,6 +669,7 @@
 	struct sde_kms *sde_kms;
 	struct sde_kms_info *info;
 	struct sde_connector *c_conn = NULL;
+	struct dsi_display *dsi_display;
 	int rc;
 
 	if (!dev || !dev->dev_private || !encoder) {
@@ -616,6 +697,8 @@
 	if (rc)
 		goto error_free_conn;
 
+	spin_lock_init(&c_conn->event_lock);
+
 	c_conn->connector_type = connector_type;
 	c_conn->encoder = encoder;
 	c_conn->panel = panel;
@@ -702,6 +785,23 @@
 		kfree(info);
 	}
 
+	if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+		dsi_display = (struct dsi_display *)(display);
+		if (dsi_display && dsi_display->panel &&
+			dsi_display->panel->hdr_props.hdr_enabled == true) {
+			msm_property_install_blob(&c_conn->property_info,
+				"hdr_properties",
+				DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_HDR_INFO);
+
+			msm_property_set_blob(&c_conn->property_info,
+				&c_conn->blob_hdr,
+				&dsi_display->panel->hdr_props,
+				sizeof(dsi_display->panel->hdr_props),
+				CONNECTOR_PROP_HDR_INFO);
+		}
+	}
+
 	msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
 			0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
 
@@ -731,6 +831,8 @@
 error_destroy_property:
 	if (c_conn->blob_caps)
 		drm_property_unreference_blob(c_conn->blob_caps);
+	if (c_conn->blob_hdr)
+		drm_property_unreference_blob(c_conn->blob_hdr);
 	msm_property_destroy(&c_conn->property_info);
 error_cleanup_fence:
 	sde_fence_deinit(&c_conn->retire_fence);
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index a88957c..0ece0d2 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -64,6 +64,14 @@
 			void *display);
 
 	/**
+	 * update_pps - update pps command for the display panel
+	 * @pps_cmd: Pointer to pps command
+	 * @display: Pointer to private display handle
+	 * Returns: Zero on success
+	 */
+	int (*update_pps)(char *pps_cmd, void *display);
+
+	/**
 	 * mode_valid - determine if specified mode is valid
 	 * @connector: Pointer to drm connector structure
 	 * @mode: Pointer to drm mode structure
@@ -112,6 +120,16 @@
 	 */
 	int (*get_info)(struct msm_display_info *info, void *display);
 
+	/**
+	 * enable_event - notify display of event registration/unregistration
+	 * @connector: Pointer to drm connector structure
+	 * @event_idx: SDE connector event index
+	 * @enable: Whether the event is being enabled/disabled
+	 * @display: Pointer to private display structure
+	 */
+	void (*enable_event)(struct drm_connector *connector,
+			uint32_t event_idx, bool enable, void *display);
+
 	int (*set_backlight)(void *display, u32 bl_lvl);
 
 	/**
@@ -123,6 +141,28 @@
 };
 
 /**
+ * enum sde_connector_events - list of recognized connector events
+ */
+enum sde_connector_events {
+	SDE_CONN_EVENT_VID_DONE, /* video mode frame done */
+	SDE_CONN_EVENT_CMD_DONE, /* command mode frame done */
+	SDE_CONN_EVENT_COUNT,
+};
+
+/**
+ * struct sde_connector_evt - local event registration entry structure
+ * @cb_func: Pointer to desired callback function
+ * @usr: User pointer to pass to callback on event trigger
+ */
+struct sde_connector_evt {
+	void (*cb_func)(uint32_t event_idx,
+			uint32_t instance_idx, void *usr,
+			uint32_t data0, uint32_t data1,
+			uint32_t data2, uint32_t data3);
+	void *usr;
+};
+
+/**
  * struct sde_connector - local sde connector structure
  * @base: Base drm connector structure
  * @connector_type: Set to one of DRM_MODE_CONNECTOR_ types
@@ -137,7 +177,10 @@
  * @property_info: Private structure for generic property handling
  * @property_data: Array of private data for generic property handling
  * @blob_caps: Pointer to blob structure for 'capabilities' property
+ * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
  * @fb_kmap: true if kernel mapping of framebuffer is requested
+ * @event_table: Array of registered events
+ * @event_lock: Lock object for event_table
  */
 struct sde_connector {
 	struct drm_connector base;
@@ -158,8 +201,11 @@
 	struct msm_property_info property_info;
 	struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
 	struct drm_property_blob *blob_caps;
+	struct drm_property_blob *blob_hdr;
 
 	bool fb_kmap;
+	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];
+	spinlock_t event_lock;
 };
 
 /**
@@ -304,5 +350,47 @@
 int sde_connector_get_info(struct drm_connector *connector,
 		struct msm_display_info *info);
 
+/**
+ * sde_connector_trigger_event - indicate that an event has occurred
+ *	Any callbacks that have been registered against this event will
+ *	be called from the same thread context.
+ * @connector: Pointer to drm connector structure
+ * @event_idx: Index of event to trigger
+ * @instance_idx: Event-specific "instance index" to pass to callback
+ * @data0: Event-specific "data" to pass to callback
+ * @data1: Event-specific "data" to pass to callback
+ * @data2: Event-specific "data" to pass to callback
+ * @data3: Event-specific "data" to pass to callback
+ * Returns: Zero on success
+ */
+int sde_connector_trigger_event(void *drm_connector,
+		uint32_t event_idx, uint32_t instance_idx,
+		uint32_t data0, uint32_t data1,
+		uint32_t data2, uint32_t data3);
+
+/**
+ * sde_connector_register_event - register a callback function for an event
+ * @connector: Pointer to drm connector structure
+ * @event_idx: Index of event to register
+ * @cb_func: Pointer to desired callback function
+ * @usr: User pointer to pass to callback on event trigger
+ * Returns: Zero on success
+ */
+int sde_connector_register_event(struct drm_connector *connector,
+		uint32_t event_idx,
+		void (*cb_func)(uint32_t event_idx,
+			uint32_t instance_idx, void *usr,
+			uint32_t data0, uint32_t data1,
+			uint32_t data2, uint32_t data3),
+		void *usr);
+
+/**
+ * sde_connector_unregister_event - unregister all callbacks for an event
+ * @connector: Pointer to drm connector structure
+ * @event_idx: Index of event to register
+ */
+void sde_connector_unregister_event(struct drm_connector *connector,
+		uint32_t event_idx);
+
 #endif /* _SDE_CONNECTOR_H_ */
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 2666990..bd0e4fd 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -53,11 +53,33 @@
 
 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
-	struct msm_drm_private *priv = crtc->dev->dev_private;
+	struct msm_drm_private *priv;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid crtc\n");
+		return NULL;
+	}
+	priv = crtc->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return NULL;
+	}
 
 	return to_sde_kms(priv->kms);
 }
 
+static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
+{
+	if (!sde_crtc)
+		return;
+
+	if (sde_crtc->event_thread) {
+		kthread_flush_worker(&sde_crtc->event_worker);
+		kthread_stop(sde_crtc->event_thread);
+		sde_crtc->event_thread = NULL;
+	}
+}
+
 static void sde_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
@@ -72,10 +94,11 @@
 	msm_property_destroy(&sde_crtc->property_info);
 	sde_cp_crtc_destroy_properties(crtc);
 
-	mutex_destroy(&sde_crtc->crtc_lock);
 	sde_fence_deinit(&sde_crtc->output_fence);
+	_sde_crtc_deinit_events(sde_crtc);
 
 	drm_crtc_cleanup(crtc);
+	mutex_destroy(&sde_crtc->crtc_lock);
 	kfree(sde_crtc);
 }
 
@@ -532,6 +555,9 @@
 					ktime_to_ns(fevent->ts),
 					atomic_read(&sde_crtc->frame_pending));
 			SDE_EVT32(DRMID(crtc), fevent->event, 0);
+
+			/* don't propagate unexpected frame done events */
+			return;
 		} else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
 			/* release bandwidth and other resources */
 			SDE_DEBUG("crtc%d ts:%lld last pending\n",
@@ -1026,6 +1052,112 @@
 }
 
 /**
+ * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
+ * @sde_crtc: Pointer to sde crtc structure
+ * @enable: Whether to enable/disable vblanks
+ */
+static void _sde_crtc_vblank_enable_nolock(
+		struct sde_crtc *sde_crtc, bool enable)
+{
+	struct drm_device *dev;
+	struct drm_crtc *crtc;
+	struct drm_encoder *enc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!sde_crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	crtc = &sde_crtc->base;
+	dev = crtc->dev;
+	priv = dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid kms\n");
+		return;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	if (enable) {
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, true);
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+			sde_encoder_register_vblank_callback(enc,
+					sde_crtc_vblank_cb, (void *)crtc);
+		}
+	} else {
+		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+			if (enc->crtc != crtc)
+				continue;
+
+			SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+			sde_encoder_register_vblank_callback(enc, NULL, NULL);
+		}
+		sde_power_resource_enable(&priv->phandle,
+				sde_kms->core_client, false);
+	}
+}
+
+/**
+ * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+	struct sde_crtc *sde_crtc;
+	struct msm_drm_private *priv;
+	struct sde_kms *sde_kms;
+
+	if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+	priv = crtc->dev->dev_private;
+
+	if (!priv->kms) {
+		SDE_ERROR("invalid crtc kms\n");
+		return;
+	}
+	sde_kms = to_sde_kms(priv->kms);
+
+	SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+	mutex_lock(&sde_crtc->crtc_lock);
+
+	/*
+	 * Update CP on suspend/resume transitions
+	 */
+	if (enable && !sde_crtc->suspend)
+		sde_cp_crtc_suspend(crtc);
+	else if (!enable && sde_crtc->suspend)
+		sde_cp_crtc_resume(crtc);
+
+	/*
+	 * If the vblank refcount != 0, release a power reference on suspend
+	 * and take it back during resume (if it is still != 0).
+	 */
+	if (sde_crtc->suspend == enable)
+		SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+				crtc->base.id, enable);
+	else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
+		_sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
+
+	sde_crtc->suspend = enable;
+
+	mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+/**
  * sde_crtc_duplicate_state - state duplicate hook
  * @crtc: Pointer to drm crtc structure
  * @Returns: Pointer to new drm_crtc_state structure
@@ -1075,6 +1207,10 @@
 		return;
 	}
 
+	/* revert suspend actions, if necessary */
+	if (msm_is_suspend_state(crtc->dev))
+		_sde_crtc_set_suspend(crtc, false);
+
 	/* remove previous state, if present */
 	if (crtc->state) {
 		sde_crtc_destroy_state(crtc, crtc->state);
@@ -1100,27 +1236,26 @@
 
 static void sde_crtc_disable(struct drm_crtc *crtc)
 {
-	struct msm_drm_private *priv;
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
 	struct drm_encoder *encoder;
-	struct sde_kms *sde_kms;
 
-	if (!crtc) {
+	if (!crtc || !crtc->dev || !crtc->state) {
 		SDE_ERROR("invalid crtc\n");
 		return;
 	}
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	sde_kms = _sde_crtc_get_kms(crtc);
-	priv = sde_kms->dev->dev_private;
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
+	if (msm_is_suspend_state(crtc->dev))
+		_sde_crtc_set_suspend(crtc, true);
+
 	mutex_lock(&sde_crtc->crtc_lock);
 	SDE_EVT32(DRMID(crtc));
 
-	if (atomic_read(&sde_crtc->vblank_refcount)) {
+	if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
 		SDE_ERROR("crtc%d invalid vblank refcount\n",
 				crtc->base.id);
 		SDE_EVT32(DRMID(crtc));
@@ -1164,7 +1299,6 @@
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_mixer *lm;
 	struct drm_display_mode *mode;
-	struct sde_hw_mixer_cfg cfg;
 	struct drm_encoder *encoder;
 	int i;
 
@@ -1195,11 +1329,11 @@
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		lm = mixer[i].hw_lm;
-		cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
-		cfg.out_height = mode->vdisplay;
-		cfg.right_mixer = (i == 0) ? false : true;
-		cfg.flags = 0;
-		lm->ops.setup_mixer_out(lm, &cfg);
+		lm->cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
+		lm->cfg.out_height = mode->vdisplay;
+		lm->cfg.right_mixer = (i == 0) ? false : true;
+		lm->cfg.flags = 0;
+		lm->ops.setup_mixer_out(lm, &lm->cfg);
 	}
 }
 
@@ -1471,40 +1605,36 @@
 
 int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
 {
-	struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-	struct drm_encoder *encoder;
-	struct drm_device *dev = crtc->dev;
+	struct sde_crtc *sde_crtc;
+	int rc = 0;
 
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+	sde_crtc = to_sde_crtc(crtc);
+
+	mutex_lock(&sde_crtc->crtc_lock);
 	if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
 		SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
+		if (!sde_crtc->suspend)
+			_sde_crtc_vblank_enable_nolock(sde_crtc, true);
 	} else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
 		SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
-		return -EINVAL;
+		rc = -EINVAL;
 	} else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
 		SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
+		if (!sde_crtc->suspend)
+			_sde_crtc_vblank_enable_nolock(sde_crtc, false);
 	} else {
 		SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
 				crtc->base.id,
 				en ? "enable" : "disable",
 				atomic_read(&sde_crtc->vblank_refcount));
-		return 0;
 	}
 
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		if (encoder->crtc != crtc)
-			continue;
-
-		SDE_EVT32(DRMID(crtc), en);
-
-		if (en)
-			sde_encoder_register_vblank_callback(encoder,
-					sde_crtc_vblank_cb, (void *)crtc);
-		else
-			sde_encoder_register_vblank_callback(encoder, NULL,
-					NULL);
-	}
-
-	return 0;
+	mutex_unlock(&sde_crtc->crtc_lock);
+	return rc;
 }
 
 void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -1964,6 +2094,100 @@
 	.atomic_flush = sde_crtc_atomic_flush,
 };
 
+static void _sde_crtc_event_cb(struct kthread_work *work)
+{
+	struct sde_crtc_event *event;
+	struct sde_crtc *sde_crtc;
+	unsigned long irq_flags;
+
+	if (!work) {
+		SDE_ERROR("invalid work item\n");
+		return;
+	}
+
+	event = container_of(work, struct sde_crtc_event, kt_work);
+	if (event->cb_func)
+		event->cb_func(event->usr);
+
+	/* set sde_crtc to NULL for static work structures */
+	sde_crtc = event->sde_crtc;
+	if (!sde_crtc)
+		return;
+
+	spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
+	list_add_tail(&event->list, &sde_crtc->event_free_list);
+	spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
+}
+
+int sde_crtc_event_queue(struct drm_crtc *crtc,
+		void (*func)(void *usr), void *usr)
+{
+	unsigned long irq_flags;
+	struct sde_crtc *sde_crtc;
+	struct sde_crtc_event *event = NULL;
+
+	if (!crtc || !func)
+		return -EINVAL;
+	sde_crtc = to_sde_crtc(crtc);
+
+	/*
+	 * Obtain an event struct from the private cache. This event
+	 * queue may be called from ISR contexts, so use a private
+	 * cache to avoid calling any memory allocation functions.
+	 */
+	spin_lock_irqsave(&sde_crtc->event_lock, irq_flags);
+	if (!list_empty(&sde_crtc->event_free_list)) {
+		event = list_first_entry(&sde_crtc->event_free_list,
+				struct sde_crtc_event, list);
+		list_del_init(&event->list);
+	}
+	spin_unlock_irqrestore(&sde_crtc->event_lock, irq_flags);
+
+	if (!event)
+		return -ENOMEM;
+
+	/* populate event node */
+	event->sde_crtc = sde_crtc;
+	event->cb_func = func;
+	event->usr = usr;
+
+	/* queue new event request */
+	kthread_init_work(&event->kt_work, _sde_crtc_event_cb);
+	kthread_queue_work(&sde_crtc->event_worker, &event->kt_work);
+
+	return 0;
+}
+
+static int _sde_crtc_init_events(struct sde_crtc *sde_crtc)
+{
+	int i, rc = 0;
+
+	if (!sde_crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	spin_lock_init(&sde_crtc->event_lock);
+
+	INIT_LIST_HEAD(&sde_crtc->event_free_list);
+	for (i = 0; i < SDE_CRTC_MAX_EVENT_COUNT; ++i)
+		list_add_tail(&sde_crtc->event_cache[i].list,
+				&sde_crtc->event_free_list);
+
+	kthread_init_worker(&sde_crtc->event_worker);
+	sde_crtc->event_thread = kthread_run(kthread_worker_fn,
+			&sde_crtc->event_worker, "crtc_event:%d",
+			sde_crtc->base.base.id);
+
+	if (IS_ERR_OR_NULL(sde_crtc->event_thread)) {
+		SDE_ERROR("failed to create event thread\n");
+		rc = PTR_ERR(sde_crtc->event_thread);
+		sde_crtc->event_thread = NULL;
+	}
+
+	return rc;
+}
+
 /* initialize crtc */
 struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
 {
@@ -1971,7 +2195,7 @@
 	struct sde_crtc *sde_crtc = NULL;
 	struct msm_drm_private *priv = NULL;
 	struct sde_kms *kms = NULL;
-	int i;
+	int i, rc;
 
 	priv = dev->dev_private;
 	kms = to_sde_kms(priv->kms);
@@ -1984,6 +2208,7 @@
 	crtc->dev = dev;
 	atomic_set(&sde_crtc->vblank_refcount, 0);
 
+	mutex_init(&sde_crtc->crtc_lock);
 	spin_lock_init(&sde_crtc->spin_lock);
 	atomic_set(&sde_crtc->frame_pending, 0);
 
@@ -2005,8 +2230,15 @@
 	/* save user friendly CRTC name for later */
 	snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
 
+	/* initialize event handling */
+	rc = _sde_crtc_init_events(sde_crtc);
+	if (rc) {
+		drm_crtc_cleanup(crtc);
+		kfree(sde_crtc);
+		return ERR_PTR(rc);
+	}
+
 	/* initialize output fence support */
-	mutex_init(&sde_crtc->crtc_lock);
 	sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
 
 	/* create CRTC properties */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 19dd005..3d95799 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -19,6 +19,7 @@
 #ifndef _SDE_CRTC_H_
 #define _SDE_CRTC_H_
 
+#include <linux/kthread.h>
 #include "drm_crtc.h"
 #include "msm_prop.h"
 #include "sde_fence.h"
@@ -79,6 +80,28 @@
 };
 
 /**
+ * struct sde_crtc_event - event callback tracking structure
+ * @list:     Linked list tracking node
+ * @kt_work:  Kthread worker structure
+ * @sde_crtc: Pointer to associated sde_crtc structure
+ * @cb_func:  Pointer to callback function
+ * @usr:      Pointer to user data to be provided to the callback
+ */
+struct sde_crtc_event {
+	struct list_head list;
+	struct kthread_work kt_work;
+	void *sde_crtc;
+
+	void (*cb_func)(void *usr);
+	void *usr;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define SDE_CRTC_MAX_EVENT_COUNT	16
+
+/**
  * struct sde_crtc - virtualized CRTC data structure
  * @base          : Base drm crtc structure
  * @name          : ASCII description of this crtc
@@ -97,14 +120,22 @@
  * @vblank_cb_count : count of vblank callback since last reset
  * @vblank_cb_time  : ktime at vblank count reset
  * @vblank_refcount : reference count for vblank enable request
+ * @suspend         : whether or not a suspend operation is in progress
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
  * @crtc_lock     : crtc lock around create, destroy and access.
  * @frame_pending : Whether or not an update is pending
  * @frame_events  : static allocation of in-flight frame events
  * @frame_event_list : available frame event list
  * @spin_lock     : spin lock for frame event, transaction status, etc...
+ * @event_thread  : Pointer to event handler thread
+ * @event_worker  : Event worker queue
+ * @event_cache   : Local cache of event worker structures
+ * @event_free_list : List of available event structures
+ * @event_lock    : Spinlock around event handling code
  */
 struct sde_crtc {
 	struct drm_crtc base;
@@ -131,10 +162,13 @@
 	u32 vblank_cb_count;
 	ktime_t vblank_cb_time;
 	atomic_t vblank_refcount;
+	bool suspend;
 
 	struct list_head feature_list;
 	struct list_head active_list;
 	struct list_head dirty_list;
+	struct list_head ad_dirty;
+	struct list_head ad_active;
 
 	struct mutex crtc_lock;
 
@@ -142,6 +176,13 @@
 	struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
 	struct list_head frame_event_list;
 	spinlock_t spin_lock;
+
+	/* for handling internal event thread */
+	struct task_struct *event_thread;
+	struct kthread_worker event_worker;
+	struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT];
+	struct list_head event_free_list;
+	spinlock_t event_lock;
 };
 
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -308,4 +349,14 @@
 	return crtc ? crtc->enabled : false;
 }
 
+/**
+ * sde_crtc_event_queue - request event callback
+ * @crtc: Pointer to drm crtc structure
+ * @func: Pointer to callback function
+ * @usr: Pointer to user data to be passed to callback
+ * Returns: Zero on success
+ */
+int sde_crtc_event_queue(struct drm_crtc *crtc,
+		void (*func)(void *usr), void *usr);
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index a845f4d..7db44d3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -33,8 +33,8 @@
 #include "sde_encoder_phys.h"
 #include "sde_color_processing.h"
 #include "sde_rsc.h"
-
 #include "sde_power_handle.h"
+#include "sde_hw_dsc.h"
 
 #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
 		(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
@@ -72,6 +72,7 @@
  *			Only valid after enable. Cleared as disable.
  * @hw_pp		Handle to the pingpong blocks used for the display. No.
  *                      pingpong blocks can be different than num_phys_encs.
+ * @hw_dsc:		Array of DSC block handles used for the display.
  * @crtc_vblank_cb:	Callback into the upper layer / CRTC for
  *			notification of the VBLANK
  * @crtc_vblank_cb_data:	Data from upper layer for VBLANK notification
@@ -101,6 +102,7 @@
 	struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
 	struct sde_encoder_phys *cur_master;
 	struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+	struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
 
 	void (*crtc_vblank_cb)(void *);
 	void *crtc_vblank_cb_data;
@@ -122,6 +124,13 @@
 
 #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
 
+inline bool _sde_is_dsc_enabled(struct sde_encoder_virt *sde_enc)
+{
+	struct msm_compression_info *comp_info = &sde_enc->disp_info.comp_info;
+
+	return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
+}
+
 void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
 		struct sde_encoder_hw_resources *hw_res,
 		struct drm_connector_state *conn_state)
@@ -142,6 +151,9 @@
 	memset(hw_res, 0, sizeof(*hw_res));
 	hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
 
+	if (_sde_is_dsc_enabled(sde_enc))
+		hw_res->needs_dsc = true;
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
@@ -298,6 +310,319 @@
 	return ret;
 }
 
+static int _sde_encoder_dsc_update_pic_dim(struct msm_display_dsc_info *dsc,
+		int pic_width, int pic_height)
+{
+	if (!dsc || !pic_width || !pic_height) {
+		SDE_ERROR("invalid input: pic_width=%d pic_height=%d\n",
+			pic_width, pic_height);
+		return -EINVAL;
+	}
+
+	if ((pic_width % dsc->slice_width) ||
+	    (pic_height % dsc->slice_height)) {
+		SDE_ERROR("pic_dim=%dx%d has to be multiple of slice=%dx%d\n",
+			pic_width, pic_height,
+			dsc->slice_width, dsc->slice_height);
+		return -EINVAL;
+	}
+
+	dsc->pic_width = pic_width;
+	dsc->pic_height = pic_height;
+
+	return 0;
+}
+
+static void _sde_encoder_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc,
+		int intf_width)
+{
+	int slice_per_pkt, slice_per_intf;
+	int bytes_in_slice, total_bytes_per_intf;
+
+	if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
+	    (intf_width < dsc->slice_width)) {
+		SDE_ERROR("invalid input: intf_width=%d slice_width=%d\n",
+			intf_width, dsc ? dsc->slice_width : -1);
+		return;
+	}
+
+	slice_per_pkt = dsc->slice_per_pkt;
+	slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
+
+	/*
+	 * If slice_per_pkt is greater than slice_per_intf then default to 1.
+	 * This can happen during partial update.
+	 */
+	if (slice_per_pkt > slice_per_intf)
+		slice_per_pkt = 1;
+
+	bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
+	total_bytes_per_intf = bytes_in_slice * slice_per_intf;
+
+	dsc->eol_byte_num = total_bytes_per_intf % 3;
+	dsc->pclk_per_line =  DIV_ROUND_UP(total_bytes_per_intf, 3);
+	dsc->bytes_in_slice = bytes_in_slice;
+	dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
+	dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
+}
+
+static int _sde_encoder_dsc_initial_line_calc(struct msm_display_dsc_info *dsc,
+		int enc_ip_width)
+{
+	int ssm_delay, total_pixels, soft_slice_per_enc;
+
+	soft_slice_per_enc = enc_ip_width / dsc->slice_width;
+
+	/*
+	 * minimum number of initial line pixels is a sum of:
+	 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
+	 *    91 for 10 bpc) * 3
+	 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
+	 * 3. the initial xmit delay
+	 * 4. total pipeline delay through the "lock step" of encoder (47)
+	 * 5. 6 additional pixels as the output of the rate buffer is
+	 *    48 bits wide
+	 */
+	ssm_delay = ((dsc->bpc < 10) ? 84 : 92);
+	total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
+	if (soft_slice_per_enc > 1)
+		total_pixels += (ssm_delay * 3);
+	dsc->initial_lines = DIV_ROUND_UP(total_pixels, dsc->slice_width);
+	return 0;
+}
+
+static bool _sde_encoder_dsc_ich_reset_override_needed(bool pu_en,
+		struct msm_display_dsc_info *dsc)
+{
+	/*
+	 * As per the DSC spec, ICH_RESET can be either end of the slice line
+	 * or at the end of the slice. HW internally generates ich_reset at
+	 * end of the slice line if DSC_MERGE is used or encoder has two
+	 * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
+	 * is not used then it will generate ich_reset at the end of slice.
+	 *
+	 * Now as per the spec, during one PPS session, position where
+	 * ich_reset is generated should not change. Now if full-screen frame
+	 * has more than 1 soft slice then HW will automatically generate
+	 * ich_reset at the end of slice_line. But for the same panel, if
+	 * partial frame is enabled and only 1 encoder is used with 1 slice,
+	 * then HW will generate ich_reset at end of the slice. This is a
+	 * mismatch. Prevent this by overriding HW's decision.
+	 */
+	return pu_en && dsc && (dsc->full_frame_slices > 1) &&
+		(dsc->slice_width == dsc->pic_width);
+}
+
+static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
+		struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
+		u32 common_mode, bool ich_reset)
+{
+	if (hw_dsc->ops.dsc_config)
+		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
+
+	if (hw_dsc->ops.dsc_config_thresh)
+		hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
+
+	if (hw_pp->ops.setup_dsc)
+		hw_pp->ops.setup_dsc(hw_pp);
+
+	if (hw_pp->ops.enable_dsc)
+		hw_pp->ops.enable_dsc(hw_pp);
+}
+
+static int _sde_encoder_dsc_1_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
+{
+	int pic_width, pic_height;
+	int this_frame_slices;
+	int intf_ip_w, enc_ip_w;
+	int ich_res, dsc_common_mode = 0;
+
+	struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
+	struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
+	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
+	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	struct msm_display_dsc_info *dsc =
+		&sde_enc->disp_info.comp_info.dsc_info;
+
+	if (dsc == NULL || hw_dsc == NULL || hw_pp == NULL ||
+						hw_mdp_top == NULL) {
+		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+		return -EINVAL;
+	}
+
+	pic_width = dsc->pic_width;
+	pic_height = dsc->pic_height;
+
+	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+	this_frame_slices = pic_width / dsc->slice_width;
+	intf_ip_w = this_frame_slices * dsc->slice_width;
+	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+	enc_ip_w = intf_ip_w;
+	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
+
+	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+		dsc_common_mode = DSC_MODE_VIDEO;
+
+	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
+		pic_width, pic_height, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
+			dsc_common_mode);
+
+	_sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
+			ich_res);
+
+	return 0;
+}
+static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc)
+{
+	int pic_width, pic_height;
+	int this_frame_slices;
+	int intf_ip_w, enc_ip_w;
+	int ich_res, dsc_common_mode;
+
+	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
+	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
+	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
+	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
+	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
+	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	struct msm_display_dsc_info *dsc =
+		&sde_enc->disp_info.comp_info.dsc_info;
+
+	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
+		l_hw_pp == NULL || r_hw_pp == NULL) {
+		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+		return -EINVAL;
+	}
+
+	pic_width = dsc->pic_width * sde_enc->display_num_of_h_tiles;
+	pic_height = dsc->pic_height;
+
+	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+	this_frame_slices = pic_width / dsc->slice_width;
+	intf_ip_w = this_frame_slices * dsc->slice_width;
+
+	intf_ip_w /= 2;
+	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+	enc_ip_w = intf_ip_w;
+	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
+
+	dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+		dsc_common_mode |= DSC_MODE_VIDEO;
+
+	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
+		pic_width, pic_height, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
+			dsc_common_mode);
+
+	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
+			ich_res);
+	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
+			ich_res);
+
+	return 0;
+}
+
+static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc)
+{
+	int pic_width, pic_height;
+	int this_frame_slices;
+	int intf_ip_w, enc_ip_w;
+	int ich_res, dsc_common_mode;
+
+	struct sde_encoder_phys *enc_master = sde_enc->cur_master;
+	struct sde_hw_dsc *l_hw_dsc = sde_enc->hw_dsc[0];
+	struct sde_hw_dsc *r_hw_dsc = sde_enc->hw_dsc[1];
+	struct sde_hw_pingpong *l_hw_pp = sde_enc->hw_pp[0];
+	struct sde_hw_pingpong *r_hw_pp = sde_enc->hw_pp[1];
+	struct sde_hw_mdp *hw_mdp_top  = enc_master->hw_mdptop;
+	struct msm_display_dsc_info *dsc =
+		&sde_enc->disp_info.comp_info.dsc_info;
+
+	if (l_hw_dsc == NULL || r_hw_dsc == NULL || hw_mdp_top == NULL ||
+					l_hw_pp == NULL || r_hw_pp == NULL) {
+		SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
+		return -EINVAL;
+	}
+
+	pic_width = dsc->pic_width;
+	pic_height = dsc->pic_height;
+	_sde_encoder_dsc_update_pic_dim(dsc, pic_width, pic_height);
+
+	this_frame_slices = pic_width / dsc->slice_width;
+	intf_ip_w = this_frame_slices * dsc->slice_width;
+	_sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
+
+	/*
+	 * when using 2 encoders for the same stream, no. of slices
+	 * need to be same on both the encoders.
+	 */
+	enc_ip_w = intf_ip_w / 2;
+	_sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
+
+	ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
+
+	dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+	if (enc_master->intf_mode == INTF_MODE_VIDEO)
+		dsc_common_mode |= DSC_MODE_VIDEO;
+
+	SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
+		pic_width, pic_height, dsc_common_mode);
+	SDE_EVT32(DRMID(&sde_enc->base), pic_width, pic_height,
+			dsc_common_mode);
+
+	_sde_encoder_dsc_pipe_cfg(l_hw_dsc, l_hw_pp, dsc, dsc_common_mode,
+			ich_res);
+	_sde_encoder_dsc_pipe_cfg(r_hw_dsc, r_hw_pp, dsc, dsc_common_mode,
+			ich_res);
+
+	return 0;
+}
+
+static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc)
+{
+	enum sde_rm_topology_name topology;
+	struct drm_connector *drm_conn = sde_enc->phys_encs[0]->connector;
+	int ret = 0;
+
+	topology = sde_connector_get_topology_name(drm_conn);
+	if (topology == SDE_RM_TOPOLOGY_UNKNOWN) {
+		SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
+		return -EINVAL;
+	}
+
+	SDE_DEBUG_ENC(sde_enc, "\n");
+	SDE_EVT32(DRMID(&sde_enc->base));
+
+	switch (topology) {
+	case SDE_RM_TOPOLOGY_SINGLEPIPE:
+		ret = _sde_encoder_dsc_1_lm_1_enc_1_intf(sde_enc);
+		break;
+	case SDE_RM_TOPOLOGY_DUALPIPEMERGE:
+		ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc);
+		break;
+	case SDE_RM_TOPOLOGY_DUALPIPE:
+		ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc);
+		break;
+	case SDE_RM_TOPOLOGY_PPSPLIT:
+	default:
+		SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
+				topology);
+		return -EINVAL;
+	};
+
+	return ret;
+}
+
 static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 				      struct drm_display_mode *mode,
 				      struct drm_display_mode *adj_mode)
@@ -307,7 +632,7 @@
 	struct sde_kms *sde_kms;
 	struct list_head *connector_list;
 	struct drm_connector *conn = NULL, *conn_iter;
-	struct sde_rm_hw_iter pp_iter;
+	struct sde_rm_hw_iter dsc_iter, pp_iter;
 	int i = 0, ret;
 
 	if (!drm_enc) {
@@ -353,6 +678,14 @@
 		sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
 	}
 
+	sde_rm_init_hw_iter(&dsc_iter, drm_enc->base.id, SDE_HW_BLK_DSC);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		sde_enc->hw_dsc[i] = NULL;
+		if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
+			break;
+		sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
+	}
+
 	for (i = 0; i < sde_enc->num_phys_encs; i++) {
 		struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
 
@@ -376,6 +709,7 @@
 	struct msm_drm_private *priv;
 	struct sde_kms *sde_kms;
 	int i = 0;
+	int ret = 0;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -417,6 +751,13 @@
 		SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
 	else if (sde_enc->cur_master->ops.enable)
 		sde_enc->cur_master->ops.enable(sde_enc->cur_master);
+
+	if (_sde_is_dsc_enabled(sde_enc)) {
+		ret = _sde_encoder_dsc_setup(sde_enc);
+		if (ret)
+			SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n",
+					ret);
+	}
 }
 
 static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -562,6 +903,7 @@
 	enum sde_rsc_state rsc_state;
 	struct sde_rsc_cmd_config rsc_config;
 	int ret;
+	struct msm_display_info *disp_info;
 
 	if (!drm_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -569,18 +911,25 @@
 	}
 
 	sde_enc = to_sde_encoder_virt(drm_enc);
-	if (!sde_enc->disp_info.is_primary)
-		return NULL;
+	disp_info = &sde_enc->disp_info;
 
+	/**
+	 * only primary command mode panel can request CMD state.
+	 * all other panels/displays can request for VID state including
+	 * secondary command mode panel.
+	 */
 	rsc_state = enable ?
-		(sde_enc->disp_info.capabilities & MSM_DISPLAY_CAP_CMD_MODE ?
-		SDE_RSC_CMD_STATE : SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
+		(((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) &&
+		  disp_info->is_primary) ? SDE_RSC_CMD_STATE :
+		SDE_RSC_VID_STATE) : SDE_RSC_IDLE_STATE;
 
-	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update) {
-		rsc_config.fps = sde_enc->disp_info.frame_rate;
-		rsc_config.vtotal = sde_enc->disp_info.vtotal;
-		rsc_config.prefill_lines = sde_enc->disp_info.prefill_lines;
-		rsc_config.jitter = sde_enc->disp_info.jitter;
+	if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_update
+					&& disp_info->is_primary) {
+		rsc_config.fps = disp_info->frame_rate;
+		rsc_config.vtotal = disp_info->vtotal;
+		rsc_config.prefill_lines = disp_info->prefill_lines;
+		rsc_config.jitter = disp_info->jitter;
+		/* update it only once */
 		sde_enc->rsc_state_update = true;
 
 		ret = sde_rsc_client_state_update(sde_enc->rsc_client,
@@ -595,7 +944,7 @@
 	if (ret)
 		SDE_ERROR("sde rsc client update failed ret:%d\n", ret);
 
-	return sde_enc->rsc_client;
+	return sde_enc->disp_info.is_primary ? sde_enc->rsc_client : NULL;
 }
 
 void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
@@ -922,6 +1271,71 @@
 	}
 }
 
+int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
+		struct drm_framebuffer *fb)
+{
+	struct drm_encoder *drm_enc;
+	struct sde_hw_mixer_cfg mixer;
+	struct sde_rm_hw_iter lm_iter;
+	bool lm_valid = false;
+
+	if (!phys_enc || !phys_enc->parent) {
+		SDE_ERROR("invalid encoder\n");
+		return -EINVAL;
+	}
+
+	drm_enc = phys_enc->parent;
+	memset(&mixer, 0, sizeof(mixer));
+
+	/* reset associated CTL/LMs */
+	if (phys_enc->hw_ctl->ops.clear_pending_flush)
+		phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
+	if (phys_enc->hw_ctl->ops.clear_all_blendstages)
+		phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
+
+	sde_rm_init_hw_iter(&lm_iter, drm_enc->base.id, SDE_HW_BLK_LM);
+	while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &lm_iter)) {
+		struct sde_hw_mixer *hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
+
+		if (!hw_lm)
+			continue;
+
+		/* need to flush LM to remove it */
+		if (phys_enc->hw_ctl->ops.get_bitmask_mixer &&
+				phys_enc->hw_ctl->ops.update_pending_flush)
+			phys_enc->hw_ctl->ops.update_pending_flush(
+					phys_enc->hw_ctl,
+					phys_enc->hw_ctl->ops.get_bitmask_mixer(
+					phys_enc->hw_ctl, hw_lm->idx));
+
+		if (fb) {
+			/* assume a single LM if targeting a frame buffer */
+			if (lm_valid)
+				continue;
+
+			mixer.out_height = fb->height;
+			mixer.out_width = fb->width;
+
+			if (hw_lm->ops.setup_mixer_out)
+				hw_lm->ops.setup_mixer_out(hw_lm, &mixer);
+		}
+
+		lm_valid = true;
+
+		/* only enable border color on LM */
+		if (phys_enc->hw_ctl->ops.setup_blendstage)
+			phys_enc->hw_ctl->ops.setup_blendstage(
+					phys_enc->hw_ctl,
+					hw_lm->idx, 0, 0);
+	}
+
+	if (!lm_valid) {
+		SDE_DEBUG_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
 static int _sde_encoder_status_show(struct seq_file *s, void *data)
 {
 	struct sde_encoder_virt *sde_enc;
@@ -1285,6 +1699,8 @@
 
 	SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
 
+	phys_params.comp_type = disp_info->comp_info.comp_type;
+
 	mutex_lock(&sde_enc->enc_lock);
 	for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
 		/*
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index e0c28b5..bd7ef69 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -33,12 +33,14 @@
  * @intfs:	Interfaces this encoder is using, INTF_MODE_NONE if unused
  * @wbs:	Writebacks this encoder is using, INTF_MODE_NONE if unused
  * @needs_cdm:	Encoder requests a CDM based on pixel format conversion needs
+ * @needs_dsc:	Request to allocate DSC block
  * @display_num_of_h_tiles:
  */
 struct sde_encoder_hw_resources {
 	enum sde_intf_mode intfs[INTF_MAX];
 	enum sde_intf_mode wbs[WB_MAX];
 	bool needs_cdm;
+	bool needs_dsc;
 	u32 display_num_of_h_tiles;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 22cec11..6d50c53 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -51,6 +51,8 @@
 
 /**
  * enum sde_enc_enable_state - current enabled state of the physical encoder
+ * @SDE_ENC_DISABLING:	Encoder transitioning to disable state
+ *			Events bounding transition are encoder type specific
  * @SDE_ENC_DISABLED:	Encoder is disabled
  * @SDE_ENC_ENABLING:	Encoder transitioning to enabled
  *			Events bounding transition are encoder type specific
@@ -59,6 +61,7 @@
  *				to recover from a previous error
  */
 enum sde_enc_enable_state {
+	SDE_ENC_DISABLING,
 	SDE_ENC_DISABLED,
 	SDE_ENC_ENABLING,
 	SDE_ENC_ENABLED,
@@ -185,6 +188,7 @@
  * @split_role:		Role to play in a split-panel configuration
  * @intf_mode:		Interface mode
  * @intf_idx:		Interface index on sde hardware
+ * @comp_type:      Type of compression supported
  * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  * @enable_state:	Enable state tracking
  * @vblank_refcount:	Reference count of vblank request
@@ -212,6 +216,7 @@
 	enum sde_enc_split_role split_role;
 	enum sde_intf_mode intf_mode;
 	enum sde_intf intf_idx;
+	enum msm_display_compression_type comp_type;
 	spinlock_t *enc_spinlock;
 	enum sde_enc_enable_state enable_state;
 	atomic_t vblank_refcount;
@@ -284,6 +289,8 @@
  * @wb_dev:		Pointer to writeback device
  * @start_time:		Start time of writeback latest request
  * @end_time:		End time of writeback latest request
+ * @bo_disable:		Buffer object(s) to use during the disabling state
+ * @fb_disable:		Frame buffer to use during the disabling state
  */
 struct sde_encoder_phys_wb {
 	struct sde_encoder_phys base;
@@ -303,6 +310,8 @@
 	struct sde_wb_device *wb_dev;
 	ktime_t start_time;
 	ktime_t end_time;
+	struct drm_gem_object *bo_disable[SDE_MAX_PLANES];
+	struct drm_framebuffer *fb_disable;
 };
 
 /**
@@ -313,6 +322,7 @@
  * @split_role:		Role to play in a split-panel configuration
  * @intf_idx:		Interface index this phys_enc will control
  * @wb_idx:		Writeback index this phys_enc will control
+ * @comp_type:      Type of compression supported
  * @enc_spinlock:	Virtual-Encoder-Wide Spin Lock for IRQ purposes
  */
 struct sde_enc_phys_init_params {
@@ -322,6 +332,7 @@
 	enum sde_enc_split_role split_role;
 	enum sde_intf intf_idx;
 	enum sde_wb wb_idx;
+	enum msm_display_compression_type comp_type;
 	spinlock_t *enc_spinlock;
 };
 
@@ -402,9 +413,13 @@
 {
 	enum sde_rm_topology_name topology;
 
+	if (!phys_enc || phys_enc->enable_state == SDE_ENC_DISABLING)
+		return BLEND_3D_NONE;
+
 	topology = sde_connector_get_topology_name(phys_enc->connector);
 	if (phys_enc->split_role == ENC_ROLE_SOLO &&
-			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE)
+			topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE &&
+			phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_NONE)
 		return BLEND_3D_H_ROW_INT;
 
 	return BLEND_3D_NONE;
@@ -421,4 +436,13 @@
 		struct sde_encoder_phys *phys_enc,
 		enum sde_intf interface);
 
+/**
+ * sde_encoder_helper_hw_release - prepare for h/w reset during disable
+ * @phys_enc: Pointer to physical encoder structure
+ * @fb: Optional fb for specifying new mixer output resolution, may be NULL
+ * Return: Zero on success
+ */
+int sde_encoder_helper_hw_release(struct sde_encoder_phys *phys_enc,
+		struct drm_framebuffer *fb);
+
 #endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 85fbe4f..afc21ed 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -182,7 +182,7 @@
 				atomic_read(&phys_enc->pending_kickoff_cnt));
 
 		SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
-				"dsi1_phy", "vbif", "vbif_nrt", "dbg_bus",
+				"dsi1_phy", "vbif", "dbg_bus",
 				"vbif_dbg_bus", "panic");
 	}
 
@@ -756,6 +756,7 @@
 	phys_enc->enc_spinlock = p->enc_spinlock;
 	cmd_enc->stream_sel = 0;
 	phys_enc->enable_state = SDE_ENC_DISABLED;
+	phys_enc->comp_type = p->comp_type;
 	for (i = 0; i < INTR_IDX_MAX; i++)
 		INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index f61077a..01dd982 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -15,6 +15,7 @@
 #include "sde_hw_interrupts.h"
 #include "sde_core_irq.h"
 #include "sde_formats.h"
+#include "dsi_display.h"
 
 #define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
 		(e) && (e)->base.parent ? \
@@ -58,6 +59,9 @@
 	 * <---------------------------- [hv]total ------------->
 	 */
 	timing->width = mode->hdisplay;	/* active width */
+	if (vid_enc->base.comp_type == MSM_DISPLAY_COMPRESSION_DSC)
+		timing->width = DIV_ROUND_UP(timing->width, 3);
+
 	timing->height = mode->vdisplay;	/* active height */
 	timing->xres = timing->width;
 	timing->yres = timing->height;
@@ -878,6 +882,7 @@
 	phys_enc->split_role = p->split_role;
 	phys_enc->intf_mode = INTF_MODE_VIDEO;
 	phys_enc->enc_spinlock = p->enc_spinlock;
+	phys_enc->comp_type = p->comp_type;
 	for (i = 0; i < INTR_IDX_MAX; i++)
 		INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
 	atomic_set(&phys_enc->vblank_refcount, 0);
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 75eddc0..5187627 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -9,7 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -380,7 +379,7 @@
 	}
 
 	if (SDE_FORMAT_IS_UBWC(fmt) &&
-			!(wb_cfg->features & BIT(SDE_WB_UBWC_1_0))) {
+			!(wb_cfg->features & BIT(SDE_WB_UBWC))) {
 		SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
 		return -EINVAL;
 	}
@@ -434,10 +433,10 @@
 }
 
 /**
- * sde_encoder_phys_wb_flush - flush hardware update
+ * _sde_encoder_phys_wb_update_flush - flush hardware update
  * @phys_enc:	Pointer to physical encoder
  */
-static void sde_encoder_phys_wb_flush(struct sde_encoder_phys *phys_enc)
+static void _sde_encoder_phys_wb_update_flush(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
 	struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
@@ -461,7 +460,10 @@
 	if (hw_ctl->ops.update_pending_flush)
 		hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask);
 
-	SDE_DEBUG("Flushing CTL_ID %d, flush_mask %x, WB %d\n",
+	if (hw_ctl->ops.get_pending_flush)
+		flush_mask = hw_ctl->ops.get_pending_flush(hw_ctl);
+
+	SDE_DEBUG("Pending flush mask for CTL_%d is 0x%x, WB %d\n",
 			hw_ctl->idx - CTL_0, flush_mask, hw_wb->idx - WB_0);
 }
 
@@ -484,7 +486,15 @@
 
 	memset(wb_roi, 0, sizeof(struct sde_rect));
 
-	fb = sde_wb_get_output_fb(wb_enc->wb_dev);
+	if (phys_enc->enable_state == SDE_ENC_DISABLING) {
+		fb = wb_enc->fb_disable;
+		wb_roi->w = 0;
+		wb_roi->h = 0;
+	} else {
+		fb = sde_wb_get_output_fb(wb_enc->wb_dev);
+		sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
+	}
+
 	if (!fb) {
 		SDE_DEBUG("no output framebuffer\n");
 		return;
@@ -493,7 +503,6 @@
 	SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
 			fb->width, fb->height);
 
-	sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
 	if (wb_roi->w == 0 || wb_roi->h == 0) {
 		wb_roi->x = 0;
 		wb_roi->y = 0;
@@ -564,6 +573,10 @@
 	SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
 			wb_enc->frame_count);
 
+	/* don't notify upper layer for internal commit */
+	if (phys_enc->enable_state == SDE_ENC_DISABLING)
+		goto complete;
+
 	if (phys_enc->parent_ops.handle_frame_done)
 		phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
 				phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
@@ -571,6 +584,7 @@
 	phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 			phys_enc);
 
+complete:
 	complete_all(&wb_enc->wbdone_complete);
 }
 
@@ -700,8 +714,10 @@
 	u32 timeout = max_t(u32, wb_enc->wbdone_timeout, KICKOFF_TIMEOUT_MS);
 
 	/* Return EWOULDBLOCK since we know the wait isn't necessary */
-	if (WARN_ON(phys_enc->enable_state != SDE_ENC_ENABLED))
+	if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+		SDE_ERROR("encoder already disabled\n");
 		return -EWOULDBLOCK;
+	}
 
 	SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count);
 
@@ -784,7 +800,7 @@
 	/* set OT limit & enable traffic shaper */
 	sde_encoder_phys_wb_setup(phys_enc);
 
-	sde_encoder_phys_wb_flush(phys_enc);
+	_sde_encoder_phys_wb_update_flush(phys_enc);
 
 	/* vote for iommu/clk/bus */
 	wb_enc->start_time = ktime_get();
@@ -807,6 +823,111 @@
 }
 
 /**
+ * _sde_encoder_phys_wb_init_internal_fb - create fb for internal commit
+ * @wb_enc:		Pointer to writeback encoder
+ * @pixel_format:	DRM pixel format
+ * @width:		Desired fb width
+ * @height:		Desired fb height
+ */
+static int _sde_encoder_phys_wb_init_internal_fb(
+		struct sde_encoder_phys_wb *wb_enc,
+		uint32_t pixel_format, uint32_t width, uint32_t height)
+{
+	struct drm_device *dev;
+	struct drm_framebuffer *fb;
+	struct drm_mode_fb_cmd2 mode_cmd;
+	uint32_t size;
+	int nplanes, i, ret;
+
+	if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) {
+		SDE_ERROR("invalid params\n");
+		return -EINVAL;
+	}
+
+	dev = wb_enc->base.sde_kms->dev;
+	if (!dev) {
+		SDE_ERROR("invalid dev\n");
+		return -EINVAL;
+	}
+
+	memset(&mode_cmd, 0, sizeof(mode_cmd));
+	mode_cmd.pixel_format = pixel_format;
+	mode_cmd.width = width;
+	mode_cmd.height = height;
+
+	size = sde_format_get_framebuffer_size(pixel_format,
+			mode_cmd.width, mode_cmd.height, 0, 0);
+	if (!size) {
+		SDE_DEBUG("not creating zero size buffer\n");
+		return -EINVAL;
+	}
+
+	/* allocate gem tracking object */
+	nplanes = drm_format_num_planes(pixel_format);
+	if (nplanes > SDE_MAX_PLANES) {
+		SDE_ERROR("requested format has too many planes\n");
+		return -EINVAL;
+	}
+	mutex_lock(&dev->struct_mutex);
+	wb_enc->bo_disable[0] = msm_gem_new(dev, size,
+			MSM_BO_SCANOUT | MSM_BO_WC);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (IS_ERR_OR_NULL(wb_enc->bo_disable[0])) {
+		ret = PTR_ERR(wb_enc->bo_disable[0]);
+		wb_enc->bo_disable[0] = NULL;
+
+		SDE_ERROR("failed to create bo, %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < nplanes; ++i) {
+		wb_enc->bo_disable[i] = wb_enc->bo_disable[0];
+		mode_cmd.pitches[i] = width *
+			drm_format_plane_cpp(pixel_format, i);
+	}
+
+	fb = msm_framebuffer_init(dev, &mode_cmd, wb_enc->bo_disable);
+	if (IS_ERR_OR_NULL(fb)) {
+		ret = PTR_ERR(fb);
+		drm_gem_object_unreference(wb_enc->bo_disable[0]);
+		wb_enc->bo_disable[0] = NULL;
+
+		SDE_ERROR("failed to init fb, %d\n", ret);
+		return ret;
+	}
+
+	/* prepare the backing buffer now so that it's available later */
+	ret = msm_framebuffer_prepare(fb,
+			wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]);
+	if (!ret)
+		wb_enc->fb_disable = fb;
+	return ret;
+}
+
+/**
+ * _sde_encoder_phys_wb_destroy_internal_fb - deconstruct internal fb
+ * @wb_enc:		Pointer to writeback encoder
+ */
+static void _sde_encoder_phys_wb_destroy_internal_fb(
+		struct sde_encoder_phys_wb *wb_enc)
+{
+	if (!wb_enc)
+		return;
+
+	if (wb_enc->fb_disable) {
+		drm_framebuffer_unregister_private(wb_enc->fb_disable);
+		drm_framebuffer_remove(wb_enc->fb_disable);
+		wb_enc->fb_disable = NULL;
+	}
+
+	if (wb_enc->bo_disable[0]) {
+		drm_gem_object_unreference(wb_enc->bo_disable[0]);
+		wb_enc->bo_disable[0] = NULL;
+	}
+}
+
+/**
  * sde_encoder_phys_wb_enable - enable writeback encoder
  * @phys_enc:	Pointer to physical encoder
  */
@@ -826,12 +947,7 @@
 	dev = wb_enc->base.parent->dev;
 
 	/* find associated writeback connector */
-	mutex_lock(&dev->mode_config.mutex);
-	drm_for_each_connector(connector, phys_enc->parent->dev) {
-		if (connector->encoder == phys_enc->parent)
-			break;
-	}
-	mutex_unlock(&dev->mode_config.mutex);
+	connector = phys_enc->connector;
 
 	if (!connector || connector->encoder != phys_enc->parent) {
 		SDE_ERROR("failed to find writeback connector\n");
@@ -865,11 +981,23 @@
 		sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
 	}
 
-	if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
-		SDE_DEBUG_DRIVER("[cdm_disable]\n");
-		phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+	if (!phys_enc->hw_ctl || !phys_enc->parent ||
+			!phys_enc->sde_kms || !wb_enc->fb_disable) {
+		SDE_DEBUG("invalid enc, skipping extra commit\n");
+		goto exit;
 	}
 
+	/* reset h/w before final flush */
+	if (sde_encoder_helper_hw_release(phys_enc, wb_enc->fb_disable))
+		goto exit;
+
+	phys_enc->enable_state = SDE_ENC_DISABLING;
+	sde_encoder_phys_wb_prepare_for_kickoff(phys_enc);
+	if (phys_enc->hw_ctl->ops.trigger_flush)
+		phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
+	sde_encoder_helper_trigger_start(phys_enc);
+	sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
+exit:
 	phys_enc->enable_state = SDE_ENC_DISABLED;
 }
 
@@ -971,6 +1099,8 @@
 	if (!phys_enc)
 		return;
 
+	_sde_encoder_phys_wb_destroy_internal_fb(wb_enc);
+
 	kfree(wb_enc);
 }
 
@@ -1009,8 +1139,15 @@
 
 	SDE_DEBUG("\n");
 
+	if (!p || !p->parent) {
+		SDE_ERROR("invalid params\n");
+		ret = -EINVAL;
+		goto fail_alloc;
+	}
+
 	wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
 	if (!wb_enc) {
+		SDE_ERROR("failed to allocate wb enc\n");
 		ret = -ENOMEM;
 		goto fail_alloc;
 	}
@@ -1078,6 +1215,13 @@
 	phys_enc->enc_spinlock = p->enc_spinlock;
 	INIT_LIST_HEAD(&wb_enc->irq_cb.list);
 
+	/* create internal buffer for disable logic */
+	if (_sde_encoder_phys_wb_init_internal_fb(wb_enc,
+				DRM_FORMAT_RGB888, 2, 1)) {
+		SDE_ERROR("failed to init internal fb\n");
+		goto fail_wb_init;
+	}
+
 	SDE_DEBUG("Created sde_encoder_phys_wb for wb %d\n",
 			wb_enc->hw_wb->idx - WB_0);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 0482a65..46823b6 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -76,6 +76,10 @@
 	int fd;
 };
 
+static void sde_fence_destroy(struct kref *kref)
+{
+}
+
 static inline struct sde_fence *to_sde_fence(struct fence *fence)
 {
 	return container_of(fence, struct sde_fence, base);
@@ -114,7 +118,26 @@
 static void sde_fence_release(struct fence *fence)
 {
 	struct sde_fence *f = to_sde_fence(fence);
+	struct sde_fence *fc, *next;
+	struct sde_fence_context *ctx = f->ctx;
+	unsigned long flags;
+	bool release_kref = false;
 
+	spin_lock_irqsave(&ctx->lock, flags);
+	list_for_each_entry_safe(fc, next, &ctx->fence_list_head,
+				 fence_list) {
+		/* fence release called before signal */
+		if (f == fc) {
+			list_del_init(&fc->fence_list);
+			release_kref = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->lock, flags);
+
+	/* keep kput outside spin_lock because it may release ctx */
+	if (release_kref)
+		kref_put(&ctx->kref, sde_fence_destroy);
 	kfree_rcu(f, base.rcu);
 }
 
@@ -223,10 +246,6 @@
 	return 0;
 }
 
-static void sde_fence_destroy(struct kref *kref)
-{
-}
-
 void sde_fence_deinit(struct sde_fence_context *ctx)
 {
 	if (!ctx) {
@@ -295,6 +314,7 @@
 {
 	unsigned long flags;
 	struct sde_fence *fc, *next;
+	uint32_t count = 0;
 
 	if (!ctx) {
 		SDE_ERROR("invalid ctx, %pK\n", ctx);
@@ -324,7 +344,7 @@
 				 fence_list) {
 		if (fence_is_signaled_locked(&fc->base)) {
 			list_del_init(&fc->fence_list);
-			kref_put(&ctx->kref, sde_fence_destroy);
+			count++;
 		}
 	}
 
@@ -332,4 +352,8 @@
 
 end:
 	spin_unlock_irqrestore(&ctx->lock, flags);
+
+	/* keep this outside spin_lock because same ctx may be released */
+	for (; count > 0; count--)
+		kref_put(&ctx->kref, sde_fence_destroy);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 33be5a0..acfcb5e 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -689,6 +689,26 @@
 	return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
 }
 
+uint32_t sde_format_get_framebuffer_size(
+		const uint32_t format,
+		const uint32_t width,
+		const uint32_t height,
+		const uint64_t *modifiers,
+		const uint32_t modifiers_len)
+{
+	const struct sde_format *fmt;
+	struct sde_hw_fmt_layout layout;
+
+	fmt = sde_get_sde_format_ext(format, modifiers, modifiers_len);
+	if (!fmt)
+		return 0;
+
+	if (_sde_format_get_plane_sizes(fmt, width, height, &layout))
+		layout.total_size = 0;
+
+	return layout.total_size;
+}
+
 static int _sde_format_populate_addrs_ubwc(
 		int mmu_id,
 		struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 5dcdfbb..894dee9 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -104,4 +104,21 @@
 		struct sde_rect *roi,
 		struct sde_hw_fmt_layout *fmtl);
 
+/**
+ * sde_format_get_framebuffer_size - get framebuffer memory size
+ * @format:            DRM pixel format
+ * @width:             pixel width
+ * @height:            pixel height
+ * @modifiers:         array to populate with drm modifiers, can be NULL
+ * @modifiers_len:     length of modifers array
+ *
+ * Return: memory size required for frame buffer
+ */
+uint32_t sde_format_get_framebuffer_size(
+		const uint32_t format,
+		const uint32_t width,
+		const uint32_t height,
+		const uint64_t *modifiers,
+		const uint32_t modifiers_len);
+
 #endif /*_SDE_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
new file mode 100644
index 0000000..78fa634
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -0,0 +1,883 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_catalog.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_lm.h"
+#include "sde_ad4.h"
+
+#define IDLE_2_RUN(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
+#define MERGE_WIDTH_RIGHT 4
+#define MERGE_WIDTH_LEFT 3
+
+enum ad4_ops_bitmask {
+	ad4_init = BIT(AD_INIT),
+	ad4_cfg = BIT(AD_CFG),
+	ad4_mode = BIT(AD_MODE),
+	ad4_input = BIT(AD_INPUT),
+	ad4_ops_max = BIT(31),
+};
+
+enum ad4_state {
+	ad4_state_idle,
+	ad4_state_run,
+	ad4_state_max,
+};
+
+typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *ad);
+
+static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+
+static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
+	[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
+	[ad4_state_idle][AD_CFG] = ad4_cfg_setup_idle,
+	[ad4_state_idle][AD_INPUT] = ad4_input_setup_idle,
+	[ad4_state_idle][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup,
+	[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_run][AD_INIT] = ad4_init_setup,
+	[ad4_state_run][AD_CFG] = ad4_cfg_setup,
+	[ad4_state_run][AD_INPUT] = ad4_input_setup,
+	[ad4_state_run][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
+};
+
+struct ad4_info {
+	enum ad4_state state;
+	u32 completed_ops_mask;
+	bool ad4_support;
+	enum ad4_modes cached_mode;
+	u32 cached_als;
+};
+
+static struct ad4_info info[DSPP_MAX] = {
+	[DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF},
+	[DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF},
+	[DSPP_2] = {ad4_state_max, 0, false, AD4_OFF},
+	[DSPP_3] = {ad4_state_max, 0, false, AD4_OFF},
+};
+
+void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
+{
+	int ret = 0;
+	struct sde_ad_hw_cfg *cfg = ad_cfg;
+
+	ret = ad4_params_check(dspp, ad_cfg);
+	if (ret)
+		return;
+
+	ret = prop_set_func[info[dspp->idx].state][cfg->prop](dspp, ad_cfg);
+	if (ret)
+		DRM_ERROR("op failed %d ret %d\n", cfg->prop, ret);
+}
+
+int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop)
+{
+
+	if (!dspp || !prop) {
+		DRM_ERROR("invalid params dspp %pK prop %pK\n", dspp, prop);
+		return -EINVAL;
+	}
+
+	if (*prop >= AD_PROPMAX) {
+		DRM_ERROR("invalid prop set %d\n", *prop);
+		return -EINVAL;
+	}
+
+	if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	struct sde_hw_mixer *hw_lm;
+
+	if (!dspp || !cfg || !cfg->hw_cfg) {
+		DRM_ERROR("invalid dspp %pK cfg %pk hw_cfg %pK\n",
+			dspp, cfg, ((cfg) ? (cfg->hw_cfg) : NULL));
+		return -EINVAL;
+	}
+
+	if (!cfg->hw_cfg->mixer_info) {
+		DRM_ERROR("invalid mixed info\n");
+		return -EINVAL;
+	}
+
+	if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	if (cfg->prop >= AD_PROPMAX) {
+		DRM_ERROR("invalid prop set %d\n", cfg->prop);
+		return -EINVAL;
+	}
+
+	if (info[dspp->idx].state >= ad4_state_max) {
+		DRM_ERROR("in max state for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	if (!prop_set_func[info[dspp->idx].state][cfg->prop]) {
+		DRM_ERROR("prop set not implemented for state %d prop %d\n",
+				info[dspp->idx].state, cfg->prop);
+		return -EINVAL;
+	}
+
+	if (!cfg->hw_cfg->num_of_mixers ||
+	    cfg->hw_cfg->num_of_mixers > CRTC_DUAL_MIXERS) {
+		DRM_ERROR("invalid mixer cnt %d\n",
+				cfg->hw_cfg->num_of_mixers);
+		return -EINVAL;
+	}
+	hw_lm = cfg->hw_cfg->mixer_info;
+
+	if (cfg->hw_cfg->num_of_mixers == 1 &&
+	    hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
+	    hw_lm->cfg.out_width != cfg->hw_cfg->displayh) {
+		DRM_ERROR("single_lm lmh %d lmw %d displayh %d displayw %d\n",
+			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
+			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
+		return -EINVAL;
+	} else if (hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
+		    hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 2)) {
+		DRM_ERROR("dual_lm lmh %d lmw %d displayh %d displayw %d\n",
+			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
+			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
+{
+	u32 blk_offset;
+
+	blk_offset = 0x04;
+	if (mode == AD4_OFF) {
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				0x101);
+		info[dspp->idx].state = ad4_state_idle;
+		info[dspp->idx].completed_ops_mask = 0;
+	} else {
+		info[dspp->idx].state = ad4_state_run;
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				0);
+	}
+
+	return 0;
+}
+
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 frame_start, frame_end, proc_start, proc_end;
+	struct sde_hw_mixer *hw_lm;
+	u32 blk_offset, tile_ctl, val, i;
+	u32 off1, off2, off3, off4, off5, off6;
+	struct drm_msm_ad4_init *init;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_init;
+		return 0;
+	}
+
+	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+			cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	hw_lm = cfg->hw_cfg->mixer_info;
+	if (cfg->hw_cfg->num_of_mixers == 1) {
+		frame_start = 0;
+		frame_end = 0xffff;
+		proc_start = 0;
+		proc_end = 0xffff;
+		tile_ctl = 0;
+	} else {
+		tile_ctl = 0x5;
+		if (hw_lm->cfg.right_mixer) {
+			frame_start = (cfg->hw_cfg->displayh >> 1) -
+				MERGE_WIDTH_RIGHT;
+			frame_end = cfg->hw_cfg->displayh - 1;
+			proc_start = (cfg->hw_cfg->displayh >> 1);
+			proc_end = frame_end;
+			tile_ctl |= 0x10;
+		} else {
+			frame_start = 0;
+			frame_end = (cfg->hw_cfg->displayh >> 1) +
+				MERGE_WIDTH_LEFT;
+			proc_start = 0;
+			proc_end = (cfg->hw_cfg->displayh >> 1) - 1;
+		}
+	}
+
+	init = cfg->hw_cfg->payload;
+	blk_offset = 8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    init->init_param_009);
+
+	blk_offset = 0xc;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    init->init_param_010);
+
+	init->init_param_012 = cfg->hw_cfg->displayv & (BIT(17) - 1);
+	init->init_param_011 = cfg->hw_cfg->displayh & (BIT(17) - 1);
+	blk_offset = 0x10;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    ((init->init_param_011 << 16) | init->init_param_012));
+
+	blk_offset = 0x14;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			tile_ctl);
+
+	blk_offset = 0x44;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		((((init->init_param_013) & (BIT(17) - 1)) << 16) |
+		 (init->init_param_014 & (BIT(17) - 1))));
+
+	blk_offset = 0x5c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_015 & (BIT(16) - 1)));
+	blk_offset = 0x60;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_016 & (BIT(8) - 1)));
+	blk_offset = 0x64;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_017 & (BIT(12) - 1)));
+	blk_offset = 0x68;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_018 & (BIT(12) - 1)));
+	blk_offset = 0x6c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_019 & (BIT(12) - 1)));
+	blk_offset = 0x70;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_020 & (BIT(16) - 1)));
+	blk_offset = 0x74;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_021 & (BIT(8) - 1)));
+	blk_offset = 0x78;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_022 & (BIT(8) - 1)));
+	blk_offset = 0x7c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_023 & (BIT(16) - 1)));
+	blk_offset = 0x80;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_024 & (BIT(16) - 1)) << 16) |
+		((init->init_param_025 & (BIT(16) - 1)))));
+	blk_offset = 0x84;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_026 & (BIT(16) - 1)) << 16) |
+		((init->init_param_027 & (BIT(16) - 1)))));
+
+	blk_offset = 0x90;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_028 & (BIT(16) - 1)));
+	blk_offset = 0x94;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_029 & (BIT(16) - 1)));
+
+	blk_offset = 0x98;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_035 & (BIT(16) - 1)) << 16) |
+		((init->init_param_030 & (BIT(16) - 1)))));
+
+	blk_offset = 0x9c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_032 & (BIT(16) - 1)) << 16) |
+		((init->init_param_031 & (BIT(16) - 1)))));
+	blk_offset = 0xa0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_034 & (BIT(16) - 1)) << 16) |
+		((init->init_param_033 & (BIT(16) - 1)))));
+
+	blk_offset = 0xb4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_036 & (BIT(8) - 1)));
+	blk_offset = 0xcc;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_037 & (BIT(8) - 1)));
+	blk_offset = 0xc0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_038 & (BIT(8) - 1)));
+	blk_offset = 0xd8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_039 & (BIT(8) - 1)));
+
+	blk_offset = 0xe8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_040 & (BIT(16) - 1)));
+
+	blk_offset = 0xf4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_041 & (BIT(8) - 1)));
+
+	blk_offset = 0x100;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_042 & (BIT(16) - 1)));
+
+	blk_offset = 0x10c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_043 & (BIT(8) - 1)));
+
+	blk_offset = 0x120;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_044 & (BIT(16) - 1)));
+	blk_offset = 0x124;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_045 & (BIT(16) - 1)));
+
+	blk_offset = 0x128;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_046 & (BIT(1) - 1)));
+	blk_offset = 0x12c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_047 & (BIT(8) - 1)));
+
+	blk_offset = 0x13c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_048 & (BIT(5) - 1)));
+	blk_offset = 0x140;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_049 & (BIT(8) - 1)));
+
+	blk_offset = 0x144;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_050 & (BIT(8) - 1)));
+	blk_offset = 0x148;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_051 & (BIT(8) - 1)) << 8) |
+		((init->init_param_052 & (BIT(8) - 1)))));
+
+	blk_offset = 0x14c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_053 & (BIT(10) - 1)));
+	blk_offset = 0x150;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_054 & (BIT(10) - 1)));
+	blk_offset = 0x154;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_055 & (BIT(8) - 1)));
+
+	blk_offset = 0x158;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_056 & (BIT(8) - 1)));
+	blk_offset = 0x164;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_057 & (BIT(8) - 1)));
+	blk_offset = 0x168;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_058 & (BIT(4) - 1)));
+
+	blk_offset = 0x17c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(frame_start & (BIT(16) - 1)));
+	blk_offset = 0x180;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(frame_end & (BIT(16) - 1)));
+	blk_offset = 0x184;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(proc_start & (BIT(16) - 1)));
+	blk_offset = 0x188;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(proc_end & (BIT(16) - 1)));
+
+	blk_offset = 0x18c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_059 & (BIT(4) - 1)));
+
+	blk_offset = 0x190;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_061 & (BIT(8) - 1)) << 8) |
+		((init->init_param_060 & (BIT(8) - 1)))));
+
+	blk_offset = 0x194;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_062 & (BIT(10) - 1)));
+
+	blk_offset = 0x1a0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_063 & (BIT(10) - 1)));
+	blk_offset = 0x1a4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_064 & (BIT(10) - 1)));
+	blk_offset = 0x1a8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_065 & (BIT(10) - 1)));
+	blk_offset = 0x1ac;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_066 & (BIT(8) - 1)));
+	blk_offset = 0x1b0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_067 & (BIT(8) - 1)));
+	blk_offset = 0x1b4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_068 & (BIT(6) - 1)));
+
+	blk_offset = 0x460;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_069 & (BIT(16) - 1)));
+	blk_offset = 0x464;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_070 & (BIT(10) - 1)));
+	blk_offset = 0x468;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_071 & (BIT(10) - 1)));
+	blk_offset = 0x46c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_072 & (BIT(10) - 1)));
+	blk_offset = 0x470;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_073 & (BIT(8) - 1)));
+	blk_offset = 0x474;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_074 & (BIT(10) - 1)));
+	blk_offset = 0x478;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_075 & (BIT(10) - 1)));
+
+	off1 = 0x1c0;
+	off2 = 0x210;
+	off3 = 0x260;
+	off4 = 0x2b0;
+	off5 = 0x380;
+	off6 = 0x3d0;
+	for (i = 0; i < AD4_LUT_GRP0_SIZE - 1; i = i + 2) {
+		val = (init->init_param_001[i] & (BIT(16) - 1));
+		val |= ((init->init_param_001[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+		off1 += 4;
+
+		val = (init->init_param_002[i] & (BIT(16) - 1));
+		val |= ((init->init_param_002[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+		off2 += 4;
+
+		val = (init->init_param_003[i] & (BIT(16) - 1));
+		val |= ((init->init_param_003[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
+		off3 += 4;
+
+		val = (init->init_param_004[i] & (BIT(16) - 1));
+		val |= ((init->init_param_004[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
+		off4 += 4;
+
+		val = (init->init_param_007[i] & (BIT(16) - 1));
+		val |= ((init->init_param_007[i + 1] &
+				(BIT(16) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
+		off5 += 4;
+
+		val = (init->init_param_008[i] & (BIT(12) - 1));
+		val |= ((init->init_param_008[i + 1] &
+				(BIT(12) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
+		off6 += 4;
+	}
+	/* write last index data */
+	i = AD4_LUT_GRP0_SIZE - 1;
+	val = ((init->init_param_001[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+	val = ((init->init_param_002[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+	val = ((init->init_param_003[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
+	val = ((init->init_param_004[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
+	val = ((init->init_param_007[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
+	val = ((init->init_param_008[i] & (BIT(12) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
+
+	off1 = 0x300;
+	off2 = 0x340;
+	for (i = 0; i < AD4_LUT_GRP1_SIZE; i = i + 2) {
+		val = (init->init_param_005[i] & (BIT(16) - 1));
+		val |= ((init->init_param_005[i + 1] &
+				(BIT(16) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+		off1 += 4;
+
+		val = (init->init_param_006[i] & (BIT(16) - 1));
+		val |= ((init->init_param_006[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+		off2 += 4;
+	}
+
+	return 0;
+}
+
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 blk_offset, val;
+	struct drm_msm_ad4_cfg *ad_cfg;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+		return 0;
+	}
+
+	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_cfg)) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(struct drm_msm_ad4_cfg), cfg->hw_cfg->len,
+			cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+	ad_cfg = cfg->hw_cfg->payload;
+
+	blk_offset = 0x18;
+	val = (ad_cfg->cfg_param_002 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_001 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_004 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_003 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_005 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x30;
+	val = (ad_cfg->cfg_param_007 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_009 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_010 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = ((ad_cfg->cfg_param_011 & (BIT(16) - 1)) << 16);
+	val |= (ad_cfg->cfg_param_012 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+
+	blk_offset = 0x88;
+	val = (ad_cfg->cfg_param_013 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_014 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xa4;
+	val = (ad_cfg->cfg_param_015 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_016 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_017 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_018 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xc4;
+	val = (ad_cfg->cfg_param_019 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_020 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xb8;
+	val = (ad_cfg->cfg_param_021 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_022 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xd0;
+	val = (ad_cfg->cfg_param_023 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_024 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xdc;
+	val = (ad_cfg->cfg_param_025 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_026 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_027 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_028 & (BIT(16) - 1)) << 16);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_029 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xec;
+	val = (ad_cfg->cfg_param_030 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_031 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xf8;
+	val = (ad_cfg->cfg_param_032 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_033 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x104;
+	val = (ad_cfg->cfg_param_034 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_035 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x110;
+	val = (ad_cfg->cfg_param_036 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_037 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_038 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_039 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x134;
+	val = (ad_cfg->cfg_param_040 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x15c;
+	val = (ad_cfg->cfg_param_042 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_043 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x16c;
+	val = (ad_cfg->cfg_param_044 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_045 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_046 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	return 0;
+}
+
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, als;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x28;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		als = 0;
+		val = &als;
+	}
+	info[dspp->idx].cached_als = *val;
+	info[dspp->idx].completed_ops_mask |= ad4_input;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(16) - 1)));
+
+	return 0;
+}
+
+static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	info[dspp->idx].state = ad4_state_idle;
+	info[dspp->idx].completed_ops_mask = 0;
+	return 0;
+}
+
+static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+
+	if (cfg->hw_cfg->len != sizeof(u64) || !cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	info[dspp->idx].cached_mode = *((enum ad4_modes *)
+					(cfg->hw_cfg->payload));
+	info[dspp->idx].completed_ops_mask |= ad4_mode;
+
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_init;
+		return 0;
+	}
+
+	ret = ad4_init_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_init;
+
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+		return 0;
+	}
+
+	ret = ad4_cfg_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_cfg;
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	return 0;
+}
+
+static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	ret = ad4_input_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_input;
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, assertive;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x30;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		assertive = 0;
+		val = &assertive;
+	}
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(8) - 1)));
+	return 0;
+}
+
+static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, bl;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x2c;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		bl = 0;
+		val = &bl;
+	}
+
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(16) - 1)));
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index aa679e7..c545361 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -43,6 +43,15 @@
 /* max bank bit for macro tile and ubwc format */
 #define DEFAULT_SDE_HIGHEST_BANK_BIT 15
 
+/* default ubwc version */
+#define DEFAULT_SDE_UBWC_VERSION SDE_HW_UBWC_VER_10
+
+/* default ubwc static config register value */
+#define DEFAULT_SDE_UBWC_STATIC 0x0
+
+/* default ubwc swizzle register value */
+#define DEFAULT_SDE_UBWC_SWIZZLE 0x0
+
 /* default hardware block size if dtsi entry is not present */
 #define DEFAULT_SDE_HW_BLOCK_LEN 0x100
 
@@ -97,6 +106,9 @@
 	MIXER_BLEND,
 	WB_LINEWIDTH,
 	BANK_BIT,
+	UBWC_VERSION,
+	UBWC_STATIC,
+	UBWC_SWIZZLE,
 	QSEED_TYPE,
 	CSC_TYPE,
 	PANIC_PER_PIPE,
@@ -162,13 +174,17 @@
 	TE_LEN,
 	TE2_OFF,
 	TE2_LEN,
-	DSC_OFF,
-	DSC_LEN,
 	PP_SLAVE,
 	PP_PROP_MAX,
 };
 
 enum {
+	DSC_OFF,
+	DSC_LEN,
+	DSC_PROP_MAX,
+};
+
+enum {
 	DSPP_OFF,
 	DSPP_SIZE,
 	DSPP_BLOCKS,
@@ -283,6 +299,9 @@
 	{MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
 	{WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
 	{BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
+	{UBWC_VERSION, "qcom,sde-ubwc-version", false, PROP_TYPE_U32},
+	{UBWC_STATIC, "qcom,sde-ubwc-static", false, PROP_TYPE_U32},
+	{UBWC_SWIZZLE, "qcom,sde-ubwc-swizzle", false, PROP_TYPE_U32},
 	{QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
 	{CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
 	{PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
@@ -387,9 +406,12 @@
 	{TE_LEN, "qcom,sde-te-size", false, PROP_TYPE_U32},
 	{TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
 	{TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
+	{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type dsc_prop[] = {
 	{DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
 	{DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
-	{PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
 };
 
 static struct sde_prop_type cdm_prop[] = {
@@ -539,6 +561,7 @@
 				rc = -EINVAL;
 			}
 			*off_count = 0;
+			memset(prop_count, 0, sizeof(int *) * prop_size);
 			return rc;
 		}
 	}
@@ -607,7 +630,7 @@
 			rc = 0;
 			prop_count[i] = 0;
 		}
-		if (!off_count && prop_count[i] < 0) {
+		if (prop_count[i] < 0) {
 			prop_count[i] = 0;
 			if (sde_prop[i].is_mandatory) {
 				SDE_ERROR("prop:%s count:%d is negative\n",
@@ -802,6 +825,8 @@
 		sblk->pcc_blk.len = 0;
 		set_bit(SDE_SSPP_PCC, &sspp->features);
 	}
+
+	sblk->format_list = sde_cfg->vig_formats;
 }
 
 static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
@@ -849,15 +874,21 @@
 		sblk->pcc_blk.len = 0;
 		set_bit(SDE_SSPP_PCC, &sspp->features);
 	}
+
+	sblk->format_list = sde_cfg->dma_formats;
 }
 
 static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
 	struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
 	struct sde_prop_value *prop_value, u32 *cursor_count)
 {
+	if (!IS_SDE_MAJOR_MINOR_SAME(sde_cfg->hwversion, SDE_HW_VER_300))
+		SDE_ERROR("invalid sspp type %d, xin id %d\n",
+				sspp->type, sspp->xin_id);
 	set_bit(SDE_SSPP_CURSOR, &sspp->features);
 	sblk->maxupscale = SSPP_UNITY_SCALE;
 	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sblk->format_list = sde_cfg->cursor_formats;
 	sspp->id = SSPP_CURSOR0 + *cursor_count;
 	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
 	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
@@ -871,6 +902,7 @@
 {
 	sblk->maxupscale = SSPP_UNITY_SCALE;
 	sblk->maxdwnscale = SSPP_UNITY_SCALE;
+	sblk->format_list = sde_cfg->dma_formats;
 	sspp->id = SSPP_DMA0 + *dma_count;
 	sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
 	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
@@ -1407,6 +1439,9 @@
 		set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
 		set_bit(SDE_WB_YUV_CONFIG, &wb->features);
 
+		if (sde_cfg->has_wb_ubwc)
+			set_bit(SDE_WB_UBWC, &wb->features);
+
 		for (j = 0; j < sde_cfg->mdp_count; j++) {
 			sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
 				PROP_BITVALUE_ACCESS(prop_value,
@@ -1416,6 +1451,8 @@
 						WB_CLK_CTRL, i, 1);
 		}
 
+		wb->format_list = sde_cfg->wb_formats;
+
 		SDE_DEBUG(
 			"wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
 			wb->id - WB_0,
@@ -1632,6 +1669,7 @@
 					blocks_prop_exists, blocks_prop_value);
 
 		sblk->ad.id = SDE_DSPP_AD;
+		sde_cfg->ad_count = ad_off_count;
 		if (ad_prop_value && (i < ad_off_count) &&
 		    ad_prop_exists[AD_OFF]) {
 			sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
@@ -1649,6 +1687,54 @@
 	return rc;
 }
 
+static int sde_dsc_parse_dt(struct device_node *np,
+			struct sde_mdss_cfg *sde_cfg)
+{
+	int rc, prop_count[MAX_BLOCKS], i;
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[DSC_PROP_MAX];
+	u32 off_count;
+	struct sde_dsc_cfg *dsc;
+
+	if (!sde_cfg) {
+		SDE_ERROR("invalid argument\n");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	prop_value = kzalloc(DSC_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	rc = _validate_dt_entry(np, dsc_prop, ARRAY_SIZE(dsc_prop), prop_count,
+		&off_count);
+	if (rc)
+		goto end;
+
+	sde_cfg->dsc_count = off_count;
+
+	rc = _read_dt_entry(np, dsc_prop, ARRAY_SIZE(dsc_prop), prop_count,
+		prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	for (i = 0; i < off_count; i++) {
+		dsc = sde_cfg->dsc + i;
+		dsc->base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
+		dsc->id = DSC_0 + i;
+		dsc->len = PROP_VALUE_ACCESS(prop_value, DSC_LEN, 0);
+		if (!prop_exists[DSC_LEN])
+			dsc->len = DEFAULT_SDE_HW_BLOCK_LEN;
+	}
+
+end:
+	kfree(prop_value);
+	return rc;
+};
+
 static int sde_cdm_parse_dt(struct device_node *np,
 				struct sde_mdss_cfg *sde_cfg)
 {
@@ -1982,6 +2068,19 @@
 	if (!prop_exists[BANK_BIT])
 		cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
 
+	cfg->ubwc_version = PROP_VALUE_ACCESS(prop_value, UBWC_VERSION, 0);
+	if (!prop_exists[UBWC_VERSION])
+		cfg->ubwc_version = DEFAULT_SDE_UBWC_VERSION;
+
+	cfg->mdp[0].ubwc_static = PROP_VALUE_ACCESS(prop_value, UBWC_STATIC, 0);
+	if (!prop_exists[UBWC_STATIC])
+		cfg->mdp[0].ubwc_static = DEFAULT_SDE_UBWC_STATIC;
+
+	cfg->mdp[0].ubwc_swizzle = PROP_VALUE_ACCESS(prop_value,
+			UBWC_SWIZZLE, 0);
+	if (!prop_exists[UBWC_SWIZZLE])
+		cfg->mdp[0].ubwc_swizzle = DEFAULT_SDE_UBWC_SWIZZLE;
+
 	rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
 	if (!rc && !strcmp(type, "qseedv3")) {
 		cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
@@ -2099,10 +2198,9 @@
 static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
 	uint32_t hw_rev)
 {
-	int i, rc = 0;
+	int rc = 0;
 	uint32_t dma_list_size, vig_list_size, wb2_list_size;
 	uint32_t cursor_list_size = 0;
-	struct sde_sspp_sub_blks *sblk;
 	uint32_t index = 0;
 
 	if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) {
@@ -2184,43 +2282,19 @@
 	index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
 		index, tp10_ubwc_formats,
 		ARRAY_SIZE(tp10_ubwc_formats));
-
-	for (i = 0; i < sde_cfg->sspp_count; ++i) {
-		struct sde_sspp_cfg *sspp = &sde_cfg->sspp[i];
-
-		sblk = (struct sde_sspp_sub_blks *)sspp->sblk;
-		switch (sspp->type) {
-		case SSPP_TYPE_VIG:
-			sblk->format_list = sde_cfg->vig_formats;
-			break;
-		case SSPP_TYPE_CURSOR:
-			if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300))
-				sblk->format_list = sde_cfg->cursor_formats;
-			else
-				SDE_ERROR("invalid sspp type %d, xin id %d\n",
-					sspp->type, sspp->xin_id);
-			break;
-		case SSPP_TYPE_DMA:
-			sblk->format_list = sde_cfg->dma_formats;
-			break;
-		default:
-			SDE_ERROR("invalid sspp type %d\n", sspp->type);
-			rc = -EINVAL;
-			goto end;
-		}
-	}
-
-	for (i = 0; i < sde_cfg->wb_count; ++i)
-		sde_cfg->wb[i].format_list = sde_cfg->wb_formats;
-
 end:
 	return rc;
 }
 
-static int sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+static int _sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 {
 	int rc = 0;
 
+	if (!sde_cfg)
+		return -EINVAL;
+
+	rc = sde_hardware_format_caps(sde_cfg, hw_rev);
+
 	switch (hw_rev) {
 	case SDE_HW_VER_170:
 	case SDE_HW_VER_171:
@@ -2231,7 +2305,9 @@
 	case SDE_HW_VER_301:
 	case SDE_HW_VER_400:
 		/* update msm8998 and sdm845 target here */
-		rc = sde_hardware_format_caps(sde_cfg, hw_rev);
+		sde_cfg->has_wb_ubwc = true;
+		break;
+	default:
 		break;
 	}
 
@@ -2288,6 +2364,10 @@
 
 	sde_cfg->hwversion = hw_rev;
 
+	rc = _sde_hardware_caps(sde_cfg, hw_rev);
+	if (rc)
+		goto end;
+
 	rc = sde_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
@@ -2304,6 +2384,10 @@
 	if (rc)
 		goto end;
 
+	rc = sde_dsc_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = sde_pp_parse_dt(np, sde_cfg);
 	if (rc)
 		goto end;
@@ -2338,10 +2422,6 @@
 	if (rc)
 		goto end;
 
-	rc = sde_hardware_caps(sde_cfg, hw_rev);
-	if (rc)
-		goto end;
-
 	return sde_cfg;
 
 end:
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 5e35d62..980ee66 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -61,6 +61,17 @@
 #define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
 
 /**
+ * Supported UBWC feature versions
+ */
+enum {
+	SDE_HW_UBWC_VER_10 = 0x100,
+	SDE_HW_UBWC_VER_20 = 0x200,
+	SDE_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev)       ((rev) >= SDE_HW_UBWC_VER_20)
+
+/**
  * MDP TOP BLOCK features
  * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
  * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
@@ -207,9 +218,7 @@
  * @SDE_WB_DOWNSCALE,       Writeback integer downscaler,
  * @SDE_WB_DITHER,          Dither block
  * @SDE_WB_TRAFFIC_SHAPER,  Writeback traffic shaper bloc
- * @SDE_WB_UBWC_1_0,        Writeback Universal bandwidth compression 1.0
- *                          support
- * @SDE_WB_UBWC_1_5         UBWC 1.5 support
+ * @SDE_WB_UBWC,            Writeback Universal bandwidth compression
  * @SDE_WB_YUV_CONFIG       Writeback supports output of YUV colorspace
  * @SDE_WB_PIPE_ALPHA       Writeback supports pipe alpha
  * @SDE_WB_XY_ROI_OFFSET    Writeback supports x/y-offset of out ROI in
@@ -225,7 +234,7 @@
 	SDE_WB_DOWNSCALE,
 	SDE_WB_DITHER,
 	SDE_WB_TRAFFIC_SHAPER,
-	SDE_WB_UBWC_1_0,
+	SDE_WB_UBWC,
 	SDE_WB_YUV_CONFIG,
 	SDE_WB_PIPE_ALPHA,
 	SDE_WB_XY_ROI_OFFSET,
@@ -447,11 +456,15 @@
  * @base:              register base offset to mdss
  * @features           bit mask identifying sub-blocks/features
  * @highest_bank_bit:  UBWC parameter
+ * @ubwc_static:       ubwc static configuration
+ * @ubwc_swizzle:      ubwc default swizzle setting
  * @clk_ctrls          clock control register definition
  */
 struct sde_mdp_cfg {
 	SDE_HW_BLK_INFO;
 	u32 highest_bank_bit;
+	u32 ubwc_static;
+	u32 ubwc_swizzle;
 	struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
 };
 
@@ -526,6 +539,16 @@
 };
 
 /**
+ * struct sde_dsc_cfg - information of DSC blocks
+ * @id                 enum identifying this block
+ * @base               register offset of this block
+ * @features           bit mask identifying sub-blocks/features
+ */
+struct sde_dsc_cfg {
+	SDE_HW_BLK_INFO;
+};
+
+/**
  * struct sde_cdm_cfg - information of chroma down blocks
  * @id                 enum identifying this block
  * @base               register offset of this block
@@ -650,12 +673,13 @@
  * @max_mixer_blendstages max layer mixer blend stages or
  *                       supported z order
  * @max_wb_linewidth   max writeback line width support.
- * @highest_bank_bit   highest memory bit setting for tile buffers.
  * @qseed_type         qseed2 or qseed3 support.
  * @csc_type           csc or csc_10bit support.
  * @smart_dma_rev      Supported version of SmartDMA feature.
  * @has_src_split      source split feature status
  * @has_cdp            Client driver prefetch feature status
+ * @has_wb_ubwc        UBWC feature supported on WB
+ * @ubwc_version       UBWC feature version (0x0 for not supported)
  * @dma_formats        Supported formats for dma pipe
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
@@ -668,13 +692,14 @@
 	u32 max_mixer_width;
 	u32 max_mixer_blendstages;
 	u32 max_wb_linewidth;
-	u32 highest_bank_bit;
 	u32 qseed_type;
 	u32 csc_type;
 	u32 smart_dma_rev;
 	bool has_src_split;
 	bool has_cdp;
 	bool has_dim_layer;
+	bool has_wb_ubwc;
+	u32 ubwc_version;
 
 	u32 mdss_count;
 	struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
@@ -697,6 +722,9 @@
 	u32 pingpong_count;
 	struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
 
+	u32 dsc_count;
+	struct sde_dsc_cfg dsc[MAX_BLOCKS];
+
 	u32 cdm_count;
 	struct sde_cdm_cfg cdm[MAX_BLOCKS];
 
@@ -711,6 +739,9 @@
 
 	u32 reg_dma_count;
 	struct sde_reg_dma_cfg dma_cfg;
+
+	u32 ad_count;
+
 	/* Add additional block data structures here */
 
 	struct sde_perf_cfg perf;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 0e756b4..e6b2fd5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -29,6 +29,9 @@
 #define   CTL_SW_RESET                  0x030
 #define   CTL_LAYER_EXTN_OFFSET         0x40
 
+#define CTL_MIXER_BORDER_OUT            BIT(24)
+#define CTL_FLUSH_MASK_CTL              BIT(17)
+
 #define SDE_REG_RESET_TIMEOUT_COUNT    20
 
 static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
@@ -180,7 +183,7 @@
 		return -EINVAL;
 	}
 
-	flushbits |= BIT(17); /* CTL */
+	flushbits |= CTL_FLUSH_MASK_CTL;
 
 	return flushbits;
 }
@@ -313,6 +316,7 @@
 	for (i = 0; i < ctx->mixer_count; i++) {
 		SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
 		SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+		SDE_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
 	}
 }
 
@@ -339,7 +343,10 @@
 	else
 		pipes_per_stage = 1;
 
-	mixercfg = BIT(24); /* always set BORDER_OUT */
+	mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+	if (!stage_cfg)
+		goto exit;
 
 	for (i = 0; i <= stages; i++) {
 		/* overflow to ext register if 'i + 1 > 7' */
@@ -443,6 +450,7 @@
 		}
 	}
 
+exit:
 	SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
 	SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
new file mode 100644
index 0000000..f546710
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
@@ -0,0 +1,232 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_dsc.h"
+#include "sde_hw_pingpong.h"
+#include "sde_dbg.h"
+
+#define DSC_COMMON_MODE	                0x000
+#define DSC_ENC                         0X004
+#define DSC_PICTURE                     0x008
+#define DSC_SLICE                       0x00C
+#define DSC_CHUNK_SIZE                  0x010
+#define DSC_DELAY                       0x014
+#define DSC_SCALE_INITIAL               0x018
+#define DSC_SCALE_DEC_INTERVAL          0x01C
+#define DSC_SCALE_INC_INTERVAL          0x020
+#define DSC_FIRST_LINE_BPG_OFFSET       0x024
+#define DSC_BPG_OFFSET                  0x028
+#define DSC_DSC_OFFSET                  0x02C
+#define DSC_FLATNESS                    0x030
+#define DSC_RC_MODEL_SIZE               0x034
+#define DSC_RC                          0x038
+#define DSC_RC_BUF_THRESH               0x03C
+#define DSC_RANGE_MIN_QP                0x074
+#define DSC_RANGE_MAX_QP                0x0B0
+#define DSC_RANGE_BPG_OFFSET            0x0EC
+
+static void sde_hw_dsc_disable(struct sde_hw_dsc *dsc)
+{
+	struct sde_hw_blk_reg_map *dsc_c = &dsc->hw;
+
+	SDE_REG_WRITE(dsc_c, DSC_COMMON_MODE, 0);
+}
+
+static void sde_hw_dsc_config(struct sde_hw_dsc *hw_dsc,
+		struct msm_display_dsc_info *dsc, u32 mode,
+		bool ich_reset_override)
+{
+	u32 data;
+	int bpp, lsb;
+	u32 initial_lines = dsc->initial_lines;
+	bool is_cmd_mode = !(mode & BIT(2));
+	struct sde_hw_blk_reg_map *dsc_c = &hw_dsc->hw;
+
+	SDE_REG_WRITE(dsc_c, DSC_COMMON_MODE, mode);
+
+	data = 0;
+	if (ich_reset_override)
+		data = 3 << 28;
+
+	if (is_cmd_mode)
+		initial_lines += 1;
+
+	data |= (initial_lines << 20);
+	data |= ((dsc->slice_last_group_size - 1) << 18);
+	/* bpp is 6.4 format, 4 LSBs bits are for fractional part */
+	lsb = dsc->bpp % 4;
+	bpp = dsc->bpp / 4;
+	bpp *= 4;	/* either 8 or 12 */
+	bpp <<= 4;
+	bpp |= lsb;
+	data |= (bpp << 8);
+	data |= (dsc->block_pred_enable << 7);
+	data |= (dsc->line_buf_depth << 3);
+	data |= (dsc->enable_422 << 2);
+	data |= (dsc->convert_rgb << 1);
+	data |= dsc->input_10_bits;
+
+	SDE_REG_WRITE(dsc_c, DSC_ENC, data);
+
+	data = dsc->pic_width << 16;
+	data |= dsc->pic_height;
+	SDE_REG_WRITE(dsc_c, DSC_PICTURE, data);
+
+	data = dsc->slice_width << 16;
+	data |= dsc->slice_height;
+	SDE_REG_WRITE(dsc_c, DSC_SLICE, data);
+
+	data = dsc->chunk_size << 16;
+	SDE_REG_WRITE(dsc_c, DSC_CHUNK_SIZE, data);
+
+	data = dsc->initial_dec_delay << 16;
+	data |= dsc->initial_xmit_delay;
+	SDE_REG_WRITE(dsc_c, DSC_DELAY, data);
+
+	data = dsc->initial_scale_value;
+	SDE_REG_WRITE(dsc_c, DSC_SCALE_INITIAL, data);
+
+	data = dsc->scale_decrement_interval;
+	SDE_REG_WRITE(dsc_c, DSC_SCALE_DEC_INTERVAL, data);
+
+	data = dsc->scale_increment_interval;
+	SDE_REG_WRITE(dsc_c, DSC_SCALE_INC_INTERVAL, data);
+
+	data = dsc->first_line_bpg_offset;
+	SDE_REG_WRITE(dsc_c, DSC_FIRST_LINE_BPG_OFFSET, data);
+
+	data = dsc->nfl_bpg_offset << 16;
+	data |= dsc->slice_bpg_offset;
+	SDE_REG_WRITE(dsc_c, DSC_BPG_OFFSET, data);
+
+	data = dsc->initial_offset << 16;
+	data |= dsc->final_offset;
+	SDE_REG_WRITE(dsc_c, DSC_DSC_OFFSET, data);
+
+	data = dsc->det_thresh_flatness << 10;
+	data |= dsc->max_qp_flatness << 5;
+	data |= dsc->min_qp_flatness;
+	SDE_REG_WRITE(dsc_c, DSC_FLATNESS, data);
+	SDE_REG_WRITE(dsc_c, DSC_FLATNESS, 0x983);
+
+	data = dsc->rc_model_size;
+	SDE_REG_WRITE(dsc_c, DSC_RC_MODEL_SIZE, data);
+
+	data = dsc->tgt_offset_lo << 18;
+	data |= dsc->tgt_offset_hi << 14;
+	data |= dsc->quant_incr_limit1 << 9;
+	data |= dsc->quant_incr_limit0 << 4;
+	data |= dsc->edge_factor;
+	SDE_REG_WRITE(dsc_c, DSC_RC, data);
+}
+
+static void sde_hw_dsc_config_thresh(struct sde_hw_dsc *hw_dsc,
+		struct msm_display_dsc_info *dsc)
+{
+	u32 *lp;
+	char *cp;
+	int i;
+
+	struct sde_hw_blk_reg_map *dsc_c = &hw_dsc->hw;
+	u32 off = 0x0;
+
+	lp = dsc->buf_thresh;
+	off = DSC_RC_BUF_THRESH;
+	for (i = 0; i < 14; i++) {
+		SDE_REG_WRITE(dsc_c, off, *lp++);
+		off += 4;
+	}
+
+	cp = dsc->range_min_qp;
+	off = DSC_RANGE_MIN_QP;
+	for (i = 0; i < 15; i++) {
+		SDE_REG_WRITE(dsc_c, off, *cp++);
+		off += 4;
+	}
+
+	cp = dsc->range_max_qp;
+	off = DSC_RANGE_MAX_QP;
+	for (i = 0; i < 15; i++) {
+		SDE_REG_WRITE(dsc_c, off, *cp++);
+		off += 4;
+	}
+
+	cp = dsc->range_bpg_offset;
+	off = DSC_RANGE_BPG_OFFSET;
+	for (i = 0; i < 15; i++) {
+		SDE_REG_WRITE(dsc_c, off, *cp++);
+		off += 4;
+	}
+}
+
+static struct sde_dsc_cfg *_dsc_offset(enum sde_dsc dsc,
+		struct sde_mdss_cfg *m,
+		void __iomem *addr,
+		struct sde_hw_blk_reg_map *b)
+{
+	int i;
+
+	for (i = 0; i < m->dsc_count; i++) {
+		if (dsc == m->dsc[i].id) {
+			b->base_off = addr;
+			b->blk_off = m->dsc[i].base;
+			b->hwversion = m->hwversion;
+			b->log_mask = SDE_DBG_MASK_DSC;
+			return &m->dsc[i];
+		}
+	}
+
+	return NULL;
+}
+
+static void _setup_dsc_ops(struct sde_hw_dsc_ops *ops,
+		unsigned long cap)
+{
+	ops->dsc_disable = sde_hw_dsc_disable;
+	ops->dsc_config = sde_hw_dsc_config;
+	ops->dsc_config_thresh = sde_hw_dsc_config_thresh;
+};
+
+struct sde_hw_dsc *sde_hw_dsc_init(enum sde_dsc idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_dsc *c;
+	struct sde_dsc_cfg *cfg;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _dsc_offset(idx, m, addr, &c->hw);
+	if (IS_ERR_OR_NULL(cfg)) {
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	c->idx = idx;
+	c->dsc_hw_cap = cfg;
+	_setup_dsc_ops(&c->ops, c->dsc_hw_cap->features);
+
+	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+		c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+	return c;
+}
+
+void sde_hw_dsc_destroy(struct sde_hw_dsc *dsc)
+{
+	kfree(dsc);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.h b/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
new file mode 100644
index 0000000..0703531
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_DSC_H
+#define _SDE_HW_DSC_H
+
+struct sde_hw_dsc;
+struct msm_display_dsc_info;
+
+#define DSC_MODE_SPLIT_PANEL            BIT(0)
+#define DSC_MODE_MULTIPLEX              BIT(1)
+#define DSC_MODE_VIDEO                  BIT(2)
+
+/**
+ * struct sde_hw_dsc_ops - interface to the dsc hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_dsc_ops {
+	/**
+	 * dsc_disable - disable dsc
+	 * @hw_dsc: Pointer to dsc context
+	 */
+	void (*dsc_disable)(struct sde_hw_dsc *hw_dsc);
+
+	/**
+	 * dsc_config - configures dsc encoder
+	 * @hw_dsc: Pointer to dsc context
+	 * @dsc: panel dsc parameters
+	 * @mode: dsc topology mode to be set
+	 * @ich_reset_override: option to reset ich
+	 */
+	void (*dsc_config)(struct sde_hw_dsc *hw_dsc,
+			struct msm_display_dsc_info *dsc,
+			u32 mode, bool ich_reset_override);
+
+	/**
+	 * dsc_config_thresh - programs panel thresholds
+	 * @hw_dsc: Pointer to dsc context
+	 * @dsc: panel dsc parameters
+	 */
+	void (*dsc_config_thresh)(struct sde_hw_dsc *hw_dsc,
+			struct msm_display_dsc_info *dsc);
+};
+
+struct sde_hw_dsc {
+	/* base */
+	struct sde_hw_blk_reg_map hw;
+
+	/* dsc */
+	enum sde_dsc idx;
+	const struct sde_dsc_cfg *dsc_hw_cap;
+
+	/* ops */
+	struct sde_hw_dsc_ops ops;
+};
+
+/**
+ * sde_hw_dsc_init - initializes the dsc block for the passed
+ *                   dsc idx.
+ * @idx:  DSC index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m:    Pointer to mdss catalog data
+ * Returns: Error code or allocated sde_hw_dsc context
+ */
+struct sde_hw_dsc *sde_hw_dsc_init(enum sde_dsc idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_dsc_destroy - destroys dsc driver context
+ *                      should be called to free the context
+ * @dsc:   Pointer to dsc driver context returned by sde_hw_dsc_init
+ */
+void sde_hw_dsc_destroy(struct sde_hw_dsc *dsc);
+
+#endif /*_SDE_HW_DSC_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 6110a07..51680d3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -16,6 +16,7 @@
 #include "sde_hw_dspp.h"
 #include "sde_hw_color_processing.h"
 #include "sde_dbg.h"
+#include "sde_ad4.h"
 
 static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
 		struct sde_mdss_cfg *m,
@@ -96,6 +97,13 @@
 						sde_setup_dspp_gc_v1_7;
 			}
 			break;
+		case SDE_DSPP_AD:
+			if (c->cap->sblk->ad.version ==
+			    SDE_COLOR_PROCESS_VER(4, 0)) {
+				c->ops.setup_ad = sde_setup_dspp_ad4;
+				c->ops.validate_ad = sde_validate_dspp_ad4;
+			}
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 25e1f3b..455daa4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -139,6 +139,20 @@
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * validate_ad - check if ad property can be set
+	 * @ctx: Pointer to dspp context
+	 * @prop: Pointer to ad property being validated
+	 */
+	int (*validate_ad)(struct sde_hw_dspp *ctx, u32 *prop);
+
+	/**
+	 * setup_ad - update the ad property
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to ad configuration
+	 */
+	void (*setup_ad)(struct sde_hw_dspp *ctx, void *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 5af260a..1ef36ac 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -93,6 +93,9 @@
 
 	/* ops */
 	struct sde_hw_lm_ops ops;
+
+	/* store mixer info specific to display */
+	struct sde_hw_mixer_cfg cfg;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index c500966..9eae387 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -85,6 +85,7 @@
 	SDE_HW_BLK_PINGPONG,
 	SDE_HW_BLK_INTF,
 	SDE_HW_BLK_WB,
+	SDE_HW_BLK_DSC,
 	SDE_HW_BLK_MAX,
 };
 
@@ -179,6 +180,17 @@
 	PINGPONG_MAX
 };
 
+enum sde_dsc {
+	DSC_NONE = 0,
+	DSC_0,
+	DSC_1,
+	DSC_2,
+	DSC_3,
+	DSC_4,
+	DSC_5,
+	DSC_MAX
+};
+
 enum sde_intf {
 	INTF_0 = 1,
 	INTF_1,
@@ -442,6 +454,7 @@
 #define SDE_DBG_MASK_WB       (1 << 8)
 #define SDE_DBG_MASK_TOP      (1 << 9)
 #define SDE_DBG_MASK_VBIF     (1 << 10)
+#define SDE_DBG_MASK_DSC      (1 << 11)
 
 /**
  * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
@@ -449,12 +462,20 @@
  * @len: Length of the payload.
  * @ctl: control pointer associated with dspp/lm.
  * @last_feature: last feature that will be set.
+ * @num_of_mixers: number of layer mixers for the display.
+ * @mixer_info: mixer info pointer associated with lm.
+ * @displayv: height of the display.
+ * @displayh: width of the display.
  */
 struct sde_hw_cp_cfg {
 	void *payload;
 	u32 len;
 	void *ctl;
 	u32 last_feature;
+	u32 num_of_mixers;
+	void *mixer_info;
+	u32 displayv;
+	u32 displayh;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
index 8488d03..a77b8d3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -101,11 +101,31 @@
 	return 0;
 }
 
-int sde_hw_pp_setup_dsc_compression(struct sde_hw_pingpong *pp,
-		struct sde_hw_dsc_cfg *cfg)
+void sde_hw_pp_dsc_enable(struct sde_hw_pingpong *pp)
 {
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+
+	SDE_REG_WRITE(c, PP_DSC_MODE, 1);
+}
+
+void sde_hw_pp_dsc_disable(struct sde_hw_pingpong *pp)
+{
+	struct sde_hw_blk_reg_map *c = &pp->hw;
+
+	SDE_REG_WRITE(c, PP_DSC_MODE, 0);
+}
+
+int sde_hw_pp_setup_dsc(struct sde_hw_pingpong *pp)
+{
+	struct sde_hw_blk_reg_map *pp_c = &pp->hw;
+	int data;
+
+	data = SDE_REG_READ(pp_c, PP_DCE_DATA_OUT_SWAP);
+	data |= BIT(18); /* endian flip */
+	SDE_REG_WRITE(pp_c, PP_DCE_DATA_OUT_SWAP, data);
 	return 0;
 }
+
 int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
 {
 	struct sde_hw_blk_reg_map *c = &pp->hw;
@@ -137,7 +157,9 @@
 	ops->enable_tearcheck = sde_hw_pp_enable_te;
 	ops->get_vsync_info = sde_hw_pp_get_vsync_info;
 	ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
-	ops->setup_dsc = sde_hw_pp_setup_dsc_compression;
+	ops->setup_dsc = sde_hw_pp_setup_dsc;
+	ops->enable_dsc = sde_hw_pp_dsc_enable;
+	ops->disable_dsc = sde_hw_pp_dsc_disable;
 };
 
 struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
index fc3bea5..90f6171 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,11 +49,13 @@
  *
  * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions
  *  Assumption is these functions will be called after clocks are enabled
- *  @setup_tearcheck :
- *  @enable_tearcheck :
- *  @get_vsync_info :
- *  @setup_autorefresh :
- *  #setup_dsc :
+ *  @setup_tearcheck : program tear check values
+ *  @enable_tearcheck : enables tear check
+ *  @get_vsync_info : retries timing info of the panel
+ *  @setup_autorefresh : program auto refresh
+ *  @setup_dsc : program DSC block with encoding details
+ *  @enable_dsc : enables DSC encoder
+ *  @disable_dsc : disables DSC encoder
  */
 struct sde_hw_pingpong_ops {
 	/**
@@ -85,8 +87,17 @@
 	/**
 	 * Program the dsc compression block
 	 */
-	int (*setup_dsc)(struct sde_hw_pingpong *pp,
-			struct sde_hw_dsc_cfg *cfg);
+	int (*setup_dsc)(struct sde_hw_pingpong *pp);
+
+	/**
+	 * Enables DSC encoder
+	 */
+	void (*enable_dsc)(struct sde_hw_pingpong *pp);
+
+	/**
+	 * Disables DSC encoder
+	 */
+	void (*disable_dsc)(struct sde_hw_pingpong *pp);
 };
 
 struct sde_hw_pingpong {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 14230c27..71c3855 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -62,6 +62,7 @@
 
 #define SSPP_SRC_CONSTANT_COLOR            0x3c
 #define SSPP_EXCL_REC_CTL                  0x40
+#define SSPP_UBWC_STATIC_CTRL              0x44
 #define SSPP_FETCH_CONFIG                  0x048
 #define SSPP_DANGER_LUT                    0x60
 #define SSPP_SAFE_LUT                      0x64
@@ -366,7 +367,11 @@
 		src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
 		SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
 			SDE_FETCH_CONFIG_RESET_VALUE |
-			ctx->highest_bank_bit << 18);
+			ctx->mdp->highest_bank_bit << 18);
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+			SDE_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+					BIT(31) | (ctx->mdp->ubwc_swizzle) |
+					(ctx->mdp->highest_bank_bit << 4));
 	}
 
 	opmode |= MDSS_MDP_OP_PE_OVERRIDE;
@@ -1074,6 +1079,9 @@
 	struct sde_hw_pipe *hw_pipe;
 	struct sde_sspp_cfg *cfg;
 
+	if (!addr || !catalog)
+		return ERR_PTR(-EINVAL);
+
 	hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
 	if (!hw_pipe)
 		return ERR_PTR(-ENOMEM);
@@ -1085,10 +1093,11 @@
 	}
 
 	/* Assign ops */
+	hw_pipe->catalog = catalog;
+	hw_pipe->mdp = &catalog->mdp[0];
 	hw_pipe->idx = idx;
 	hw_pipe->cap = cfg;
 	_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
-	hw_pipe->highest_bank_bit = catalog->mdp[0].highest_bank_bit;
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
 			hw_pipe->hw.blk_off,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
index a224234..2fa01e4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -496,21 +496,23 @@
  * @blk_off:      pipe offset relative to mdss offset
  * @length        length of register block offset
  * @hwversion     mdss hw version number
+ * @catalog:      back pointer to catalog
+ * @mdp:          pointer to associated mdp portion of the catalog
  * @idx:          pipe index
  * @type :        pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
  *                supported for each pipe type
  * @pipe_hw_cap:  pointer to layer_cfg
- * @highest_bank_bit:
  * @ops:          pointer to operations possible for this pipe
  */
 struct sde_hw_pipe {
 	/* base */
-	 struct sde_hw_blk_reg_map hw;
+	struct sde_hw_blk_reg_map hw;
+	struct sde_mdss_cfg *catalog;
+	struct sde_mdp_cfg *mdp;
 
 	/* Pipe */
 	enum sde_sspp idx;
 	const struct sde_sspp_cfg *cap;
-	u32 highest_bank_bit;
 
 	/* Ops */
 	struct sde_hw_sspp_ops ops;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index c4b4592..a7bebc2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -16,6 +16,7 @@
 #include "sde_dbg.h"
 
 #define SSPP_SPARE                        0x28
+#define UBWC_STATIC                       0x144
 
 #define FLD_SPLIT_DISPLAY_CMD             BIT(1)
 #define FLD_SMART_PANEL_FREE_RUN          BIT(2)
@@ -33,6 +34,8 @@
 #define TRAFFIC_SHAPER_WR_CLIENT(num)     (0x060 + (num * 4))
 #define TRAFFIC_SHAPER_FIXPOINT_FACTOR    4
 
+#define DCE_SEL                           0x450
+
 static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
 		struct split_pipe_cfg *cfg)
 {
@@ -200,6 +203,13 @@
 	status->wb[WB_3] = 0;
 }
 
+static void sde_hw_setup_dce(struct sde_hw_mdp *mdp, u32 dce_sel)
+{
+	struct sde_hw_blk_reg_map *c = &mdp->hw;
+
+	SDE_REG_WRITE(c, DCE_SEL, dce_sel);
+}
+
 static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
 		unsigned long cap)
 {
@@ -209,6 +219,7 @@
 	ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
 	ops->get_danger_status = sde_hw_get_danger_status;
 	ops->get_safe_status = sde_hw_get_safe_status;
+	ops->setup_dce = sde_hw_setup_dce;
 }
 
 static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
@@ -232,6 +243,25 @@
 	return ERR_PTR(-EINVAL);
 }
 
+static inline void _sde_hw_mdptop_init_ubwc(void __iomem *addr,
+		const struct sde_mdss_cfg *m)
+{
+	struct sde_hw_blk_reg_map hw;
+
+	if (!addr || !m)
+		return;
+
+	if (!IS_UBWC_20_SUPPORTED(m->ubwc_version))
+		return;
+
+	memset(&hw, 0, sizeof(hw));
+	hw.base_off = addr;
+	hw.blk_off = 0x0;
+	hw.hwversion = m->hwversion;
+	hw.log_mask = SDE_DBG_MASK_TOP;
+	SDE_REG_WRITE(&hw, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
 struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
 		void __iomem *addr,
 		const struct sde_mdss_cfg *m)
@@ -239,6 +269,9 @@
 	struct sde_hw_mdp *mdp;
 	const struct sde_mdp_cfg *cfg;
 
+	if (!addr || !m)
+		return ERR_PTR(-EINVAL);
+
 	mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
 	if (!mdp)
 		return ERR_PTR(-ENOMEM);
@@ -260,6 +293,8 @@
 			mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
 			mdp->hw.xin_id);
 
+	_sde_hw_mdptop_init_ubwc(addr, m);
+
 	return mdp;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
index 780d051..9cb0c55 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,13 @@
 			enum sde_clk_ctrl_type clk_ctrl, bool enable);
 
 	/**
+	 * setup_dce - set DCE mux for DSC ctrl path
+	 * @mdp: mdp top context driver
+	 * @dce_sel: dce_mux value
+	 */
+	void (*setup_dce)(struct sde_hw_mdp *mdp, u32 dce_sel);
+
+	/**
 	 * get_danger_status - get danger status
 	 * @mdp: mdp top context driver
 	 * @status: Pointer to danger safe status
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
index 320b05f..98aff0f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -41,6 +41,7 @@
 #define WB_N16_INIT_PHASE_Y_C12		0x06C
 #define WB_OUT_SIZE			0x074
 #define WB_ALPHA_X_VALUE		0x078
+#define WB_UBWC_STATIC_CTRL		0x144
 #define WB_CSC_BASE			0x260
 #define WB_DST_ADDR_SW_STATUS		0x2B0
 #define WB_CDP_CTRL			0x2B4
@@ -135,10 +136,13 @@
 	if (SDE_FORMAT_IS_UBWC(fmt)) {
 		opmode |= BIT(0);
 		dst_format |= BIT(31);
-		if (ctx->highest_bank_bit)
-			write_config |= (ctx->highest_bank_bit << 8);
+		write_config |= (ctx->mdp->highest_bank_bit << 8);
 		if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
 			write_config |= 0x8;
+		if (IS_UBWC_20_SUPPORTED(ctx->catalog->ubwc_version))
+			SDE_REG_WRITE(c, WB_UBWC_STATIC_CTRL,
+					(ctx->mdp->ubwc_swizzle << 0) |
+					(ctx->mdp->highest_bank_bit << 4));
 	}
 
 	if (data->is_secure)
@@ -199,6 +203,9 @@
 	struct sde_hw_wb *c;
 	struct sde_wb_cfg *cfg;
 
+	if (!addr || !m || !hw_mdp)
+		return ERR_PTR(-EINVAL);
+
 	c = kzalloc(sizeof(*c), GFP_KERNEL);
 	if (!c)
 		return ERR_PTR(-ENOMEM);
@@ -211,10 +218,11 @@
 	}
 
 	/* Assign ops */
+	c->catalog = m;
+	c->mdp = &m->mdp[0];
 	c->idx = idx;
 	c->caps = cfg;
 	_setup_wb_ops(&c->ops, c->caps->features);
-	c->highest_bank_bit = m->mdp[0].highest_bank_bit;
 	c->hw_mdp = hw_mdp;
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
index 52a5ee5..9d17fb3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_wb.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -62,15 +62,18 @@
 /**
  * struct sde_hw_wb : WB driver object
  * @struct sde_hw_blk_reg_map *hw;
+ * @catalog: back pointer to catalog
+ * @mdp:          pointer to associated mdp portion of the catalog
  * @idx
  * @wb_hw_caps
  * @ops
- * @highest_bank_bit: GPU highest memory bank bit used
  * @hw_mdp: MDP top level hardware block
  */
 struct sde_hw_wb {
 	/* base */
 	struct sde_hw_blk_reg_map hw;
+	struct sde_mdss_cfg *catalog;
+	struct sde_mdp_cfg *mdp;
 
 	/* wb path */
 	int idx;
@@ -79,8 +82,6 @@
 	/* ops */
 	struct sde_hw_wb_ops ops;
 
-	u32 highest_bank_bit;
-
 	struct sde_hw_mdp *hw_mdp;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 1a177d1..6ab555c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -339,24 +339,12 @@
 
 static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-
 	return sde_crtc_vblank(crtc, true);
 }
 
 static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
-	struct sde_kms *sde_kms = to_sde_kms(kms);
-	struct drm_device *dev = sde_kms->dev;
-	struct msm_drm_private *priv = dev->dev_private;
-
 	sde_crtc_vblank(crtc, false);
-
-	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 }
 
 static void sde_kms_prepare_commit(struct msm_kms *kms,
@@ -873,7 +861,6 @@
 	/* safe to call these more than once during shutdown */
 	_sde_debugfs_destroy(sde_kms);
 	_sde_kms_mmu_destroy(sde_kms);
-	sde_core_perf_destroy(&sde_kms->perf);
 
 	if (sde_kms->catalog) {
 		for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
@@ -998,8 +985,8 @@
 		mmu = msm_smmu_new(sde_kms->dev->dev, i);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
-			SDE_ERROR("failed to init iommu id %d: rc: %d\n", i,
-					ret);
+			SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
+								i, ret);
 			continue;
 		}
 
@@ -1155,6 +1142,8 @@
 	sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
 	if (IS_ERR_OR_NULL(sde_kms->core_client)) {
 		rc = PTR_ERR(sde_kms->core_client);
+		if (!sde_kms->core_client)
+			rc = -EINVAL;
 		SDE_ERROR("sde power client create failed: %d\n", rc);
 		sde_kms->core_client = NULL;
 		goto error;
@@ -1174,6 +1163,8 @@
 	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
 	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
 		rc = PTR_ERR(sde_kms->catalog);
+		if (!sde_kms->catalog)
+			rc = -EINVAL;
 		SDE_ERROR("catalog init failed: %d\n", rc);
 		sde_kms->catalog = NULL;
 		goto power_error;
@@ -1201,6 +1192,8 @@
 	sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
 	if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
 		rc = PTR_ERR(sde_kms->hw_mdp);
+		if (!sde_kms->hw_mdp)
+			rc = -EINVAL;
 		SDE_ERROR("failed to get hw_mdp: %d\n", rc);
 		sde_kms->hw_mdp = NULL;
 		goto power_error;
@@ -1213,6 +1206,8 @@
 				sde_kms->vbif[vbif_idx], sde_kms->catalog);
 		if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
 			rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
+			if (!sde_kms->hw_vbif[vbif_idx])
+				rc = -EINVAL;
 			SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
 			sde_kms->hw_vbif[vbif_idx] = NULL;
 			goto power_error;
@@ -1264,6 +1259,8 @@
 	sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
 	if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
 		rc = PTR_ERR(sde_kms->hw_intr);
+		if (!sde_kms->hw_intr)
+			rc = -EINVAL;
 		SDE_ERROR("hw_intr init failed: %d\n", rc);
 		sde_kms->hw_intr = NULL;
 		goto hw_intr_init_err;
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 908926c..78c596d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -136,6 +136,7 @@
 	struct sde_debugfs_regset32 debugfs_src;
 	struct sde_debugfs_regset32 debugfs_scaler;
 	struct sde_debugfs_regset32 debugfs_csc;
+	bool debugfs_default_scale;
 };
 
 #define to_sde_plane(x) container_of(x, struct sde_plane, base)
@@ -666,6 +667,7 @@
 	}
 
 	memset(scale_cfg, 0, sizeof(*scale_cfg));
+	memset(&psde->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
 
 	decimated = DECIMATED_DIMENSION(src_w,
 			psde->pipe_cfg.horz_decimation);
@@ -1007,7 +1009,8 @@
 		int error;
 
 		error = _sde_plane_setup_scaler3_lut(psde, pstate);
-		if (error || !psde->pixel_ext_usr) {
+		if (error || !psde->pixel_ext_usr ||
+				psde->debugfs_default_scale) {
 			/* calculate default config for QSEED3 */
 			_sde_plane_setup_scaler3(psde,
 					psde->pipe_cfg.src_rect.w,
@@ -1017,7 +1020,8 @@
 					psde->scaler3_cfg, fmt,
 					chroma_subsmpl_h, chroma_subsmpl_v);
 		}
-	} else if (!psde->pixel_ext_usr || !pstate) {
+	} else if (!psde->pixel_ext_usr || !pstate ||
+			psde->debugfs_default_scale) {
 		uint32_t deci_dim, i;
 
 		/* calculate default configuration for QSEED2 */
@@ -2549,6 +2553,10 @@
 		sde_debugfs_create_regset32("scaler_blk", 0444,
 				psde->debugfs_root,
 				&psde->debugfs_scaler);
+		debugfs_create_bool("default_scaling",
+				0644,
+				psde->debugfs_root,
+				&psde->debugfs_default_scale);
 	}
 
 	if (cfg->features & BIT(SDE_SSPP_CSC) ||
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 204a9e5..baa214a 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -23,6 +23,7 @@
 #include "sde_hw_wb.h"
 #include "sde_encoder.h"
 #include "sde_connector.h"
+#include "sde_hw_dsc.h"
 
 #define RESERVED_BY_OTHER(h, r) \
 	((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
@@ -168,7 +169,7 @@
 	blk_list = &rm->hw_blks[i->type];
 
 	if (i->blk && (&i->blk->list == blk_list)) {
-		SDE_ERROR("attempt resume iteration past last\n");
+		SDE_DEBUG("attempt resume iteration past last\n");
 		return false;
 	}
 
@@ -221,6 +222,9 @@
 	case SDE_HW_BLK_WB:
 		sde_hw_wb_destroy(hw);
 		break;
+	case SDE_HW_BLK_DSC:
+		sde_hw_dsc_destroy(hw);
+		break;
 	case SDE_HW_BLK_SSPP:
 		/* SSPPs are not managed by the resource manager */
 	case SDE_HW_BLK_TOP:
@@ -309,6 +313,10 @@
 		hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
 		name = "wb";
 		break;
+	case SDE_HW_BLK_DSC:
+		hw = sde_hw_dsc_init(id, mmio, cat);
+		name = "dsc";
+		break;
 	case SDE_HW_BLK_SSPP:
 		/* SSPPs are not managed by the resource manager */
 	case SDE_HW_BLK_TOP:
@@ -416,6 +424,15 @@
 		}
 	}
 
+	for (i = 0; i < cat->dsc_count; i++) {
+		rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSC,
+			cat->dsc[i].id, &cat->dsc[i]);
+		if (rc) {
+			SDE_ERROR("failed: dsc hw not available\n");
+			goto fail;
+		}
+	}
+
 	for (i = 0; i < cat->intf_count; i++) {
 		if (cat->intf[i].type == INTF_NONE) {
 			SDE_DEBUG("skip intf %d with type none\n", i);
@@ -723,6 +740,37 @@
 	return 0;
 }
 
+static int _sde_rm_reserve_dsc(
+		struct sde_rm *rm,
+		struct sde_rm_rsvp *rsvp,
+		struct sde_rm_requirements *reqs)
+{
+	struct sde_rm_hw_iter iter;
+	int alloc_count = 0;
+	int num_dsc_enc = reqs->num_lm;
+
+	if (!reqs->hw_res.needs_dsc)
+		return 0;
+
+	sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSC);
+
+	while (sde_rm_get_hw(rm, &iter)) {
+		if (RESERVED_BY_OTHER(iter.blk, rsvp))
+			continue;
+
+		iter.blk->rsvp_nxt = rsvp;
+		SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+
+		if (++alloc_count == num_dsc_enc)
+			return 0;
+	}
+
+	SDE_ERROR("couldn't reserve %d dsc blocks for enc id %d\n",
+		num_dsc_enc, rsvp->enc_id);
+
+	return -ENAVAIL;
+}
+
 static int _sde_rm_reserve_cdm(
 		struct sde_rm *rm,
 		struct sde_rm_rsvp *rsvp,
@@ -889,6 +937,10 @@
 	if (ret)
 		return ret;
 
+	ret = _sde_rm_reserve_dsc(rm, rsvp, reqs);
+	if (ret)
+		return ret;
+
 	return ret;
 }
 
@@ -938,6 +990,10 @@
 			CONNECTOR_PROP_TOPOLOGY_CONTROL);
 	sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
 
+	/* DSC blocks are hardwired for control path 0 and 1 */
+	if (reqs->hw_res.needs_dsc)
+		reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+
 	/* Base assumption is LMs = h_tiles, conditions below may override */
 	reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
 
@@ -1074,14 +1130,6 @@
 	}
 
 	kfree(rsvp);
-
-	/* if no remaining reservation, then clear the topology name */
-	if (!_sde_rm_get_rsvp(rm, conn->encoder))
-		(void) msm_property_set_property(
-				sde_connector_get_propinfo(conn),
-				sde_connector_get_property_values(conn->state),
-				CONNECTOR_PROP_TOPOLOGY_NAME,
-				SDE_RM_TOPOLOGY_UNKNOWN);
 }
 
 void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
@@ -1117,6 +1165,12 @@
 		SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
 				rsvp->enc_id);
 		_sde_rm_release_rsvp(rm, rsvp, conn);
+
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(conn),
+				sde_connector_get_property_values(conn->state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
 	}
 }
 
@@ -1134,8 +1188,12 @@
 			sde_connector_get_property_values(conn_state),
 			CONNECTOR_PROP_TOPOLOGY_NAME,
 			rsvp->topology);
-	if (ret)
+	if (ret) {
+		SDE_ERROR("failed to set topology name property, ret %d\n",
+				ret);
 		_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+		return ret;
+	}
 
 	/* Swap next rsvp to be the active */
 	for (type = 0; type < SDE_HW_BLK_MAX; type++) {
@@ -1228,6 +1286,12 @@
 		_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
 		rsvp_cur = NULL;
 		_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+		(void) msm_property_set_property(
+				sde_connector_get_propinfo(
+						conn_state->connector),
+				sde_connector_get_property_values(conn_state),
+				CONNECTOR_PROP_TOPOLOGY_NAME,
+				SDE_RM_TOPOLOGY_UNKNOWN);
 	}
 
 	/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 4479e5e..2220925 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -341,7 +341,7 @@
 			wb_dev->wb_cfg->sblk->maxlinewidth);
 
 	sde_kms_info_start(info, "features");
-	if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC_1_0))
+	if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC))
 		sde_kms_info_append(info, "wb_ubwc");
 	sde_kms_info_stop(info);
 
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 4c1260b..697b7f7 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -1974,6 +1974,9 @@
 	char *end_addr;
 	int i;
 
+	if (!len_bytes)
+		return;
+
 	in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
 	in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
 
@@ -2446,8 +2449,12 @@
 			sizeof(sde_dbg_base.req_dump_blks));
 
 	va_start(args, name);
-	for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
-		blk_name = va_arg(args, char*);
+	i = 0;
+	while ((blk_name = va_arg(args, char*))) {
+		if (i++ >= SDE_EVTLOG_MAX_DATA) {
+			pr_err("could not parse all dump arguments\n");
+			break;
+		}
 		if (IS_ERR_OR_NULL(blk_name))
 			break;
 
@@ -2471,9 +2478,6 @@
 		if (!strcmp(blk_name, "panic"))
 			do_panic = true;
 	}
-	blk_name = va_arg(args, char*);
-	if (!IS_ERR_OR_NULL(blk_name))
-		pr_err("could not parse all dump arguments\n");
 	va_end(args);
 
 	if (queue_work) {
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
index 70a4225..d5a438e 100644
--- a/drivers/gpu/drm/msm/sde_io_util.c
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -355,7 +355,11 @@
 	return rc;
 
 error:
-	msm_dss_put_clk(clk_arry, num_clk);
+	for (i--; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
 
 	return rc;
 } /* msm_dss_get_clk */
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 5157b9c..a44d4f2 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -470,6 +470,8 @@
 		if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
 			pr_err("reg bus handle parsing failed\n");
 			rc = PTR_ERR(pdbus->data_bus_scale_table);
+			if (!pdbus->data_bus_scale_table)
+				rc = -EINVAL;
 			goto end;
 		}
 		pdbus->data_bus_hdl = msm_bus_scale_register_client(
@@ -511,6 +513,8 @@
 		if (IS_ERR_OR_NULL(bus_scale_table)) {
 			pr_err("reg bus handle parsing failed\n");
 			rc = PTR_ERR(bus_scale_table);
+			if (!bus_scale_table)
+				rc = -EINVAL;
 			goto end;
 		}
 		phandle->reg_bus_hdl = msm_bus_scale_register_client(
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index b36e17c..2464551 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -48,6 +48,8 @@
 #define MAX_BUFFER_SIZE 256
 
 #define TRY_CMD_MODE_SWITCH		0xFFFF
+#define TRY_CLK_MODE_SWITCH		0xFFFE
+#define STATE_UPDATE_NOT_ALLOWED	0xFFFD
 
 static struct sde_rsc_priv *rsc_prv_list[MAX_RSC_COUNT];
 
@@ -140,6 +142,111 @@
 	return;
 }
 
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr)
+{
+	struct sde_rsc_event *evt;
+	struct sde_rsc_priv *rsc;
+
+	if (rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index:%d\n", rsc_index);
+		return ERR_PTR(-EINVAL);
+	} else if (!rsc_prv_list[rsc_index]) {
+		pr_err("rsc idx:%d not probed yet or not available\n",
+								rsc_index);
+		return ERR_PTR(-EINVAL);
+	} else if (!cb_func || !event_type) {
+		pr_err("no event or cb func\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rsc = rsc_prv_list[rsc_index];
+	evt = kzalloc(sizeof(struct sde_rsc_event), GFP_KERNEL);
+	if (!evt)
+		return ERR_PTR(-ENOMEM);
+
+	evt->event_type = event_type;
+	evt->rsc_index = rsc_index;
+	evt->usr = usr;
+	evt->cb_func = cb_func;
+	pr_debug("event register type:%d rsc index:%d\n",
+						event_type, rsc_index);
+
+	mutex_lock(&rsc->client_lock);
+	list_add(&evt->list, &rsc->event_list);
+	mutex_unlock(&rsc->client_lock);
+
+	return evt;
+}
+
+void sde_rsc_unregister_event(struct sde_rsc_event *event)
+{
+	struct sde_rsc_priv *rsc;
+
+	if (!event) {
+		pr_debug("invalid event client\n");
+		goto end;
+	} else if (event->rsc_index >= MAX_RSC_COUNT) {
+		pr_err("invalid rsc index\n");
+		goto end;
+	}
+
+	pr_debug("event client destroyed\n");
+	rsc = rsc_prv_list[event->rsc_index];
+	if (!rsc)
+		goto end;
+
+	mutex_lock(&rsc->client_lock);
+	list_del_init(&event->list);
+	mutex_unlock(&rsc->client_lock);
+
+	kfree(event);
+end:
+	return;
+}
+
+static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
+	struct sde_power_client *pclient, bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp;
+
+	if (!phandle || !pclient) {
+		pr_err("invalid input argument\n");
+		return -EINVAL;
+	}
+
+	mp = &phandle->mp;
+
+	if (enable)
+		pclient->refcount++;
+	else if (pclient->refcount)
+		pclient->refcount--;
+
+	if (pclient->refcount)
+		pclient->usecase_ndx = VOTE_INDEX_LOW;
+	else
+		pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+	if (phandle->current_usecase_ndx == pclient->usecase_ndx)
+		goto end;
+
+	if (enable) {
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+		if (rc) {
+			pr_err("clock enable failed rc:%d\n", rc);
+			goto end;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+	}
+
+	phandle->current_usecase_ndx = pclient->usecase_ndx;
+
+end:
+	return rc;
+}
+
 static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *cmd_config)
 {
@@ -237,24 +344,50 @@
 static int sde_rsc_switch_to_idle(struct sde_rsc_priv *rsc)
 {
 	struct sde_rsc_client *client;
-	int rc = 0;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
+	bool idle_switch = true;
 
 	list_for_each_entry(client, &rsc->client_list, list)
-		if (client->current_state != SDE_RSC_IDLE_STATE)
-			return TRY_CMD_MODE_SWITCH;
+		if (client->current_state != SDE_RSC_IDLE_STATE) {
+			idle_switch = false;
+			break;
+		}
 
-	if (rsc->hw_ops.state_update)
+	if (!idle_switch) {
+		/**
+		 * following code needs to run the loop through each
+		 * client because they might be in different order
+		 * sorting is not possible; only preference is available
+		 */
+
+		/* first check if any vid client active */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_VID_STATE)
+				return rc;
+
+		/* now try cmd state switch */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_CMD_STATE)
+				return TRY_CMD_MODE_SWITCH;
+
+		/* now try clk state switch */
+		list_for_each_entry(client, &rsc->client_list, list)
+			if (client->current_state == SDE_RSC_CLK_STATE)
+				return TRY_CLK_MODE_SWITCH;
+
+	} else if (rsc->hw_ops.state_update) {
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_IDLE_STATE);
+	}
 
 	return rc;
 }
 
-static bool sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
+static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client, bool wait_req)
 {
 	struct sde_rsc_client *client;
-	int rc = 0;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
 
 	if (!rsc->primary_client) {
 		pr_err("primary client not available for cmd state switch\n");
@@ -276,6 +409,12 @@
 		if (client->current_state == SDE_RSC_VID_STATE)
 			goto end;
 
+	/* no need to enable solver again */
+	if (rsc->current_state == SDE_RSC_CLK_STATE) {
+		rc = 0;
+		goto end;
+	}
+
 	if (rsc->hw_ops.state_update)
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
 
@@ -287,6 +426,28 @@
 	return rc;
 }
 
+static bool sde_rsc_switch_to_clk(struct sde_rsc_priv *rsc)
+{
+	struct sde_rsc_client *client;
+	int rc = STATE_UPDATE_NOT_ALLOWED;
+
+	list_for_each_entry(client, &rsc->client_list, list)
+		if ((client->current_state == SDE_RSC_VID_STATE) ||
+		    (client->current_state == SDE_RSC_CMD_STATE))
+			goto end;
+
+	/* no need to enable the solver again */
+	if (rsc->current_state == SDE_RSC_CMD_STATE) {
+		rc = 0;
+		goto end;
+	}
+
+	if (rsc->hw_ops.state_update)
+		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+end:
+	return rc;
+}
+
 static bool sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
 	struct sde_rsc_cmd_config *config,
 	struct sde_rsc_client *caller_client, bool wait_req)
@@ -310,7 +471,7 @@
 
 /**
  * sde_rsc_client_state_update() - rsc client state update
- * Video mode and command mode are supported as modes. A client need to
+ * Video mode, cmd mode and clk state are suppoed as modes. A client need to
  * set this property during panel config time. A switching client can set the
  * property to change the state
  *
@@ -350,8 +511,7 @@
 		pr_err("invalid master component binding\n");
 		rc = -EINVAL;
 		goto end;
-	} else if ((rsc->current_state == state) &&
-				(state != SDE_RSC_CMD_UPDATE_STATE)) {
+	} else if ((rsc->current_state == state) && !config) {
 		pr_debug("no state change: %d\n", state);
 		goto end;
 	}
@@ -360,22 +520,33 @@
 		__builtin_return_address(0), rsc->current_state,
 		caller_client->name, state);
 
-	wait_requested = (rsc->current_state != SDE_RSC_IDLE_STATE);
+	/* only switch state needs vsync wait */
+	wait_requested = (rsc->current_state == SDE_RSC_VID_STATE) ||
+			(rsc->current_state == SDE_RSC_CMD_STATE);
 
 	if (rsc->power_collapse)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	switch (state) {
 	case SDE_RSC_IDLE_STATE:
 		rc = sde_rsc_switch_to_idle(rsc);
+
 		/* video state client might be exiting; try cmd state switch */
-		if (rc == TRY_CMD_MODE_SWITCH)
+		if (rc == TRY_CMD_MODE_SWITCH) {
 			rc = sde_rsc_switch_to_cmd(rsc, NULL,
 					rsc->primary_client, wait_requested);
+			if (!rc)
+				state = SDE_RSC_CMD_STATE;
+
+		/* cmd state client might be exiting; try clk state switch */
+		} else if (rc == TRY_CLK_MODE_SWITCH) {
+			rc = sde_rsc_switch_to_clk(rsc);
+			if (!rc)
+				state = SDE_RSC_CLK_STATE;
+		}
 		break;
 
 	case SDE_RSC_CMD_STATE:
-	case SDE_RSC_CMD_UPDATE_STATE:
 		rc = sde_rsc_switch_to_cmd(rsc, config, caller_client,
 								wait_requested);
 		break;
@@ -385,21 +556,29 @@
 								wait_requested);
 		break;
 
+	case SDE_RSC_CLK_STATE:
+		rc = sde_rsc_switch_to_clk(rsc);
+		break;
+
 	default:
 		pr_err("invalid state handling %d\n", state);
 		break;
 	}
 
-	if (rc) {
+	if (rc == STATE_UPDATE_NOT_ALLOWED) {
+		rc = 0;
+		goto clk_disable;
+	} else if (rc) {
 		pr_err("state update failed rc:%d\n", rc);
-		goto end;
+		goto clk_disable;
 	}
 
 	pr_debug("state switch successfully complete: %d\n", state);
 	rsc->current_state = state;
 
+clk_disable:
 	if (rsc->power_collapse)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 end:
 	mutex_unlock(&rsc->client_lock);
 	return rc;
@@ -518,7 +697,7 @@
 		seq_printf(s, "\t client:%s state:%d\n",
 				client->name, client->current_state);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	if (rsc->hw_ops.debug_show) {
 		ret = rsc->hw_ops.debug_show(s, rsc);
@@ -526,7 +705,7 @@
 			pr_err("sde rsc: hw debug failed ret:%d\n", ret);
 	}
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	return 0;
@@ -555,12 +734,12 @@
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
 							MAX_BUFFER_SIZE, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	if (blen < 0)
@@ -594,7 +773,7 @@
 	input[count - 1] = '\0';
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	mode = strnstr(input, "mode0=", strlen("mode0="));
 	if (mode) {
@@ -620,7 +799,7 @@
 	}
 
 end:
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	pr_err("req: mode0:%d mode1:%d mode2:%d\n", mode0_state, mode1_state,
@@ -647,12 +826,12 @@
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
 						MAX_BUFFER_SIZE, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	if (blen < 0)
@@ -692,7 +871,7 @@
 	}
 
 	mutex_lock(&rsc->client_lock);
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, true);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
 
 	if (vsync_state)
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -700,7 +879,7 @@
 	else
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
 
-	sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	mutex_unlock(&rsc->client_lock);
 
 	kfree(input);
@@ -750,7 +929,7 @@
 		return;
 
 	if (rsc->pclient)
-		sde_power_resource_enable(&rsc->phandle, rsc->pclient, false);
+		sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 	if (rsc->fs)
 		devm_regulator_put(rsc->fs);
 	if (rsc->wrapper_io.base)
@@ -890,8 +1069,7 @@
 		goto sde_rsc_fail;
 	}
 
-	/* these clocks are always on */
-	if (sde_power_resource_enable(&rsc->phandle, rsc->pclient, true)) {
+	if (sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true)) {
 		pr_err("failed to enable sde rsc power resources\n");
 		goto sde_rsc_fail;
 	}
@@ -899,6 +1077,8 @@
 	if (sde_rsc_timer_calculate(rsc, NULL))
 		goto sde_rsc_fail;
 
+	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
+
 	INIT_LIST_HEAD(&rsc->client_list);
 	mutex_init(&rsc->client_lock);
 
diff --git a/drivers/gpu/drm/msm/sde_rsc.h b/drivers/gpu/drm/msm/sde_rsc.h
index e9a55b6..2775d21 100644
--- a/drivers/gpu/drm/msm/sde_rsc.h
+++ b/drivers/gpu/drm/msm/sde_rsc.h
@@ -33,6 +33,37 @@
 struct sde_rsc_priv;
 
 /**
+ * event will be triggered before sde core power collapse,
+ * mdss gdsc is still on
+ */
+#define SDE_RSC_EVENT_PRE_CORE_PC 0x1
+/**
+ * event will be triggered after sde core collapse complete,
+ * mdss gdsc is off now
+ */
+#define SDE_RSC_EVENT_POST_CORE_PC 0x2
+/**
+ * event will be triggered before restoring the sde core from power collapse,
+ * mdss gdsc is still off
+ */
+#define SDE_RSC_EVENT_PRE_CORE_RESTORE 0x4
+/**
+ * event will be triggered after restoring the sde core from power collapse,
+ * mdss gdsc is on now
+ */
+#define SDE_RSC_EVENT_POST_CORE_RESTORE 0x8
+/**
+ * event attached with solver state enabled
+ * all clients in clk_state or cmd_state
+ */
+#define SDE_RSC_EVENT_SOLVER_ENABLED 0x10
+/**
+ * event attached with solver state disabled
+ * one of the client requested for vid state
+ */
+#define SDE_RSC_EVENT_SOLVER_DISABLED 0x20
+
+/**
  * rsc_mode_req: sde rsc mode request information
  * MODE_READ: read vsync status
  * MODE0_UPDATE: mode0 status , this should be 0x0
@@ -60,23 +91,22 @@
 
 /**
  * sde_rsc_state: sde rsc state information
- * SDE_RSC_MODE_IDLE: A client requests for idle state when there is no
+ * SDE_RSC_IDLE_STATE: A client requests for idle state when there is no
  *                    pixel or cmd transfer expected. An idle vote from
  *                    all clients lead to power collapse state.
- * SDE_RSC_MODE_CMD:  A client requests for cmd state when it wants to
+ * SDE_RSC_CLK_STATE:  A client requests for clk state when it wants to
+ *                    only avoid mode-2 entry/exit. For ex: V4L2 driver,
+ *                    sde power handle, etc.
+ * SDE_RSC_CMD_STATE:  A client requests for cmd state when it wants to
  *                    enable the solver mode.
- * SDE_RSC_MODE_CMD_UPDATE: A clients requests for cmd_update state when
- *                    it wants to update the backoff time during solver
- *                    enable state. Inline-rotation is one good example
- *                    use case. It increases the prefill lines by 128 lines.
- * SDE_RSC_MODE_VID:  A client requests for vid state it wants to avoid
+ * SDE_RSC_VID_STATE:  A client requests for vid state it wants to avoid
  *                    solver enable because client is fetching data from
  *                    continuously.
  */
 enum sde_rsc_state {
 	SDE_RSC_IDLE_STATE,
+	SDE_RSC_CLK_STATE,
 	SDE_RSC_CMD_STATE,
-	SDE_RSC_CMD_UPDATE_STATE,
 	SDE_RSC_VID_STATE,
 };
 
@@ -86,7 +116,7 @@
  * @current_state:   current client state
  * @crtc_id:		crtc_id associated with this rsc client.
  * @rsc_index:	rsc index of a client - only index "0" valid.
- * @list:	list to attach power handle master list
+ * @list:	list to attach client master list
  */
 struct sde_rsc_client {
 	char name[MAX_RSC_CLIENT_NAME_LEN];
@@ -97,6 +127,22 @@
 };
 
 /**
+ * struct sde_rsc_event: local event registration entry structure
+ * @cb_func:	Pointer to desired callback function
+ * @usr:	User pointer to pass to callback on event trigger
+ * @rsc_index:	rsc index of a client - only index "0" valid.
+ * @event_type:	refer comments in event_register
+ * @list:	list to attach event master list
+ */
+struct sde_rsc_event {
+	void (*cb_func)(uint32_t event_type, void *usr);
+	void *usr;
+	u32 rsc_index;
+	uint32_t event_type;
+	struct list_head list;
+};
+
+/**
  * struct sde_rsc_hw_ops - sde resource state coordinator hardware ops
  * @init:			Initialize the sequencer, solver, qtimer,
 				etc. hardware blocks on RSC.
@@ -183,6 +229,7 @@
  * @wrapper_io:		wrapper io data mapping
  *
  * @client_list:	current rsc client list handle
+ * @event_list:		current rsc event list handle
  * @client_lock:	current rsc client synchronization lock
  *
  * timer_config:	current rsc timer configuration
@@ -212,6 +259,7 @@
 	struct dss_io_data wrapper_io;
 
 	struct list_head client_list;
+	struct list_head event_list;
 	struct mutex client_lock;
 
 	struct sde_rsc_timer_config timer_config;
@@ -261,7 +309,7 @@
 
 /**
  * sde_rsc_client_state_update() - rsc client state update
- * Video mode and command mode are supported as modes. A client need to
+ * Video mode, cmd mode and clk state are supported as modes. A client need to
  * set this property during panel time. A switching client can set the
  * property to change the state
  *
@@ -298,5 +346,23 @@
  */
 int sde_rsc_hw_register(struct sde_rsc_priv *rsc);
 
+/**
+ * sde_rsc_register_event - register a callback function for an event
+ * @rsc_index:   A client will be created on this RSC. As of now only
+ *               SDE_RSC_INDEX is valid rsc index.
+ * @event_type:  event type to register; client sets 0x3 if it wants
+ *               to register for CORE_PC and CORE_RESTORE - both events.
+ * @cb_func:     Pointer to desired callback function
+ * @usr:         User pointer to pass to callback on event trigger
+ * Returns: sde_rsc_event pointer on success
+ */
+struct sde_rsc_event *sde_rsc_register_event(int rsc_index, uint32_t event_type,
+		void (*cb_func)(uint32_t event_type, void *usr), void *usr);
+
+/**
+ * sde_rsc_unregister_event - unregister callback for an event
+ * @sde_rsc_event: event returned by sde_rsc_register_event
+ */
+void sde_rsc_unregister_event(struct sde_rsc_event *event);
 
 #endif /* _SDE_RSC_H_ */
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 8dd04bd..dd7f37a 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -93,15 +93,17 @@
 #define SDE_RSCC_F0_QTMR_V1_CNTP_CTL			0x202C
 #define SDE_RSCC_F1_QTMR_V1_CNTP_CTL			0x302C
 
-/* mdp and dsi clocks in clock gate state */
-#define DISP_MDP_DSI_CLK_GATE		0x7f0
-
-/* mdp and dsi clocks in clock ungate state */
-#define MDSS_CORE_GDSCR			0x0
-#define DISP_MDP_DSI_CLK_UNGATE		0x5000
-
 #define MAX_CHECK_LOOPS			500
 
+static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
+{
+	struct sde_rsc_event *event;
+
+	list_for_each_entry(event, &rsc->event_list, list)
+		if (event->event_type & event_type)
+			event->cb_func(event_type, event->usr);
+}
+
 static int rsc_hw_qtimer_init(struct sde_rsc_priv *rsc)
 {
 	pr_debug("rsc hardware qtimer init\n");
@@ -182,31 +184,33 @@
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x10,
 						0x888babec, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x14,
-						0xaaa8a020, rsc->debug_mode);
+						0xa806a020, rsc->debug_mode);
 
 	/* Mode - 2 sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x18,
-						0xe1a138eb, rsc->debug_mode);
+						0xa138ebaa, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x1c,
-						0xa2ede081, rsc->debug_mode);
+						0xe0a581e1, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x20,
-						0x8a3982e2, rsc->debug_mode);
+						0x82e2a2ed, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x24,
-						0xa92088ea, rsc->debug_mode);
+						0x88ea8a39, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x28,
-						0x89e6a6e9, rsc->debug_mode);
+						0xa6e9a920, rsc->debug_mode);
 
 	/* tcs sleep sequence */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x2c,
-						0xa7e9a920, rsc->debug_mode);
+						0xa92089e6, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x30,
-						0x002089e7, rsc->debug_mode);
+						0x89e7a7e9, rsc->debug_mode);
+	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_MEM_0_DRV0 + 0x34,
+						0x00000020, rsc->debug_mode);
 
 	/* branch address */
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_0_DRV0,
-						0x27, rsc->debug_mode);
+						0x29, rsc->debug_mode);
 	dss_reg_w(&rsc->drv_io, SDE_RSCC_SEQ_CFG_BR_ADDR_1_DRV0,
-						0x2d, rsc->debug_mode);
+						0x2f, rsc->debug_mode);
 
 	return 0;
 }
@@ -297,10 +301,13 @@
 	rc = regulator_set_mode(rsc->fs, REGULATOR_MODE_FAST);
 	if (rc) {
 		pr_err("vdd reg fast mode set failed rc:%d\n", rc);
-		goto end;
+		return rc;
 	}
 
 	rc = -EBUSY;
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC);
+
 	wrapper_status = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 				rsc->debug_mode);
 	wrapper_status |= BIT(3);
@@ -319,10 +326,20 @@
 		usleep_range(1, 2);
 	}
 
-	if (rc)
+	if (rc) {
 		pr_err("vdd fs is still enabled\n");
+		goto end;
+	}
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
+
+	return 0;
 
 end:
+	regulator_set_mode(rsc->fs, REGULATOR_MODE_NORMAL);
+
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
+
 	return rc;
 }
 
@@ -331,6 +348,8 @@
 	int rc = -EBUSY;
 	int count, reg;
 
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
+
 	// needs review with HPG sequence
 	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
 					0x0, rsc->debug_mode);
@@ -374,6 +393,8 @@
 	if (rc)
 		pr_err("vdd reg normal mode set failed rc:%d\n", rc);
 
+	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE);
+
 	return rc;
 }
 
@@ -407,6 +428,8 @@
 							reg, rsc->debug_mode);
 		/* make sure that solver is enabled */
 		wmb();
+
+		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_ENABLED);
 		break;
 
 	case SDE_RSC_VID_STATE:
@@ -424,6 +447,8 @@
 							0x1, rsc->debug_mode);
 		/* make sure that solver mode is override */
 		wmb();
+
+		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_DISABLED);
 		break;
 
 	case SDE_RSC_IDLE_STATE:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 77a52b5..70f0344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -95,9 +95,11 @@
 nvkm-y += nvkm/engine/disp/cursgt215.o
 nvkm-y += nvkm/engine/disp/cursgf119.o
 nvkm-y += nvkm/engine/disp/cursgk104.o
+nvkm-y += nvkm/engine/disp/cursgp102.o
 
 nvkm-y += nvkm/engine/disp/oimmnv50.o
 nvkm-y += nvkm/engine/disp/oimmg84.o
 nvkm-y += nvkm/engine/disp/oimmgt215.o
 nvkm-y += nvkm/engine/disp/oimmgf119.o
 nvkm-y += nvkm/engine/disp/oimmgk104.o
+nvkm-y += nvkm/engine/disp/oimmgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
index dd2953b..9d90d8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c
@@ -82,7 +82,7 @@
 
 			if (mthd->addr) {
 				snprintf(cname_, sizeof(cname_), "%s %d",
-					 mthd->name, chan->chid);
+					 mthd->name, chan->chid.user);
 				cname = cname_;
 			}
 
@@ -139,7 +139,7 @@
 	if (!(ret = nvif_unvers(ret, &data, &size, args->none))) {
 		notify->size  = sizeof(struct nvif_notify_uevent_rep);
 		notify->types = 1;
-		notify->index = chan->chid;
+		notify->index = chan->chid.user;
 		return 0;
 	}
 
@@ -159,7 +159,7 @@
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
-	*data = nvkm_rd32(device, 0x640000 + (chan->chid * 0x1000) + addr);
+	*data = nvkm_rd32(device, 0x640000 + (chan->chid.user * 0x1000) + addr);
 	return 0;
 }
 
@@ -169,7 +169,7 @@
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
-	nvkm_wr32(device, 0x640000 + (chan->chid * 0x1000) + addr, data);
+	nvkm_wr32(device, 0x640000 + (chan->chid.user * 0x1000) + addr, data);
 	return 0;
 }
 
@@ -196,7 +196,7 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_device *device = disp->base.engine.subdev.device;
 	*addr = device->func->resource_addr(device, 0) +
-		0x640000 + (chan->chid * 0x1000);
+		0x640000 + (chan->chid.user * 0x1000);
 	*size = 0x001000;
 	return 0;
 }
@@ -243,8 +243,8 @@
 {
 	struct nv50_disp_chan *chan = nv50_disp_chan(object);
 	struct nv50_disp *disp = chan->root->disp;
-	if (chan->chid >= 0)
-		disp->chan[chan->chid] = NULL;
+	if (chan->chid.user >= 0)
+		disp->chan[chan->chid.user] = NULL;
 	return chan->func->dtor ? chan->func->dtor(chan) : chan;
 }
 
@@ -263,7 +263,7 @@
 int
 nv50_disp_chan_ctor(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int chid, int head,
+		    struct nv50_disp_root *root, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
 		    struct nv50_disp_chan *chan)
 {
@@ -273,21 +273,22 @@
 	chan->func = func;
 	chan->mthd = mthd;
 	chan->root = root;
-	chan->chid = chid;
+	chan->chid.ctrl = ctrl;
+	chan->chid.user = user;
 	chan->head = head;
 
-	if (disp->chan[chan->chid]) {
-		chan->chid = -1;
+	if (disp->chan[chan->chid.user]) {
+		chan->chid.user = -1;
 		return -EBUSY;
 	}
-	disp->chan[chan->chid] = chan;
+	disp->chan[chan->chid.user] = chan;
 	return 0;
 }
 
 int
 nv50_disp_chan_new_(const struct nv50_disp_chan_func *func,
 		    const struct nv50_disp_chan_mthd *mthd,
-		    struct nv50_disp_root *root, int chid, int head,
+		    struct nv50_disp_root *root, int ctrl, int user, int head,
 		    const struct nvkm_oclass *oclass,
 		    struct nvkm_object **pobject)
 {
@@ -297,5 +298,6 @@
 		return -ENOMEM;
 	*pobject = &chan->object;
 
-	return nv50_disp_chan_ctor(func, mthd, root, chid, head, oclass, chan);
+	return nv50_disp_chan_ctor(func, mthd, root, ctrl, user,
+				   head, oclass, chan);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index f5f683d..737b38f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -7,7 +7,11 @@
 	const struct nv50_disp_chan_func *func;
 	const struct nv50_disp_chan_mthd *mthd;
 	struct nv50_disp_root *root;
-	int chid;
+
+	struct {
+		int ctrl;
+		int user;
+	} chid;
 	int head;
 
 	struct nvkm_object object;
@@ -25,11 +29,11 @@
 
 int nv50_disp_chan_ctor(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int chid, int head,
+			struct nv50_disp_root *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nv50_disp_chan *);
 int nv50_disp_chan_new_(const struct nv50_disp_chan_func *,
 			const struct nv50_disp_chan_mthd *,
-			struct nv50_disp_root *, int chid, int head,
+			struct nv50_disp_root *, int ctrl, int user, int head,
 			const struct nvkm_oclass *, struct nvkm_object **);
 
 extern const struct nv50_disp_chan_func nv50_disp_pioc_func;
@@ -90,13 +94,16 @@
 struct nv50_disp_pioc_oclass {
 	int (*ctor)(const struct nv50_disp_chan_func *,
 		    const struct nv50_disp_chan_mthd *,
-		    struct nv50_disp_root *, int chid,
+		    struct nv50_disp_root *, int ctrl, int user,
 		    const struct nvkm_oclass *, void *data, u32 size,
 		    struct nvkm_object **);
 	struct nvkm_sclass base;
 	const struct nv50_disp_chan_func *func;
 	const struct nv50_disp_chan_mthd *mthd;
-	int chid;
+	struct {
+		int ctrl;
+		int user;
+	} chid;
 };
 
 extern const struct nv50_disp_pioc_oclass nv50_disp_oimm_oclass;
@@ -114,15 +121,17 @@
 extern const struct nv50_disp_pioc_oclass gk104_disp_oimm_oclass;
 extern const struct nv50_disp_pioc_oclass gk104_disp_curs_oclass;
 
+extern const struct nv50_disp_pioc_oclass gp102_disp_oimm_oclass;
+extern const struct nv50_disp_pioc_oclass gp102_disp_curs_oclass;
 
 int nv50_disp_curs_new(const struct nv50_disp_chan_func *,
 		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
+		       struct nv50_disp_root *, int ctrl, int user,
 		       const struct nvkm_oclass *, void *data, u32 size,
 		       struct nvkm_object **);
 int nv50_disp_oimm_new(const struct nv50_disp_chan_func *,
 		       const struct nv50_disp_chan_mthd *,
-		       struct nv50_disp_root *, int chid,
+		       struct nv50_disp_root *, int ctrl, int user,
 		       const struct nvkm_oclass *, void *data, u32 size,
 		       struct nvkm_object **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
index dd99fc7..fa781b5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursg84.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 7,
+	.chid = { 7, 7 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
index 2a1574e..2be6fb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgf119.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 13,
+	.chid = { 13, 13 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
index 28e8f06..2a99db4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgk104.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 13,
+	.chid = { 13, 13 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
new file mode 100644
index 0000000..e958210
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_curs_oclass = {
+	.base.oclass = GK104_DISP_CURSOR,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_curs_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = { 13, 17 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
index d8a4b9c..00a7f35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgt215.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 7,
+	.chid = { 7, 7 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
index 8b13204..82ff82d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/cursnv50.c
@@ -33,7 +33,7 @@
 int
 nv50_disp_curs_new(const struct nv50_disp_chan_func *func,
 		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
+		   struct nv50_disp_root *root, int ctrl, int user,
 		   const struct nvkm_oclass *oclass, void *data, u32 size,
 		   struct nvkm_object **pobject)
 {
@@ -54,7 +54,7 @@
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+	return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
@@ -65,5 +65,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_curs_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 7,
+	.chid = { 7, 7 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index a57f7ce..ce7cd74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -32,8 +32,8 @@
 		     struct nvkm_object *object, u32 handle)
 {
 	return nvkm_ramht_insert(chan->base.root->ramht, object,
-				 chan->base.chid, -9, handle,
-				 chan->base.chid << 27 | 0x00000001);
+				 chan->base.chid.user, -9, handle,
+				 chan->base.chid.user << 27 | 0x00000001);
 }
 
 void
@@ -42,22 +42,23 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* deactivate channel */
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x001e0000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d fini: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 	}
 
 	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
 }
 
 static int
@@ -66,26 +67,27 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
 
 	/* initialise channel for dma command submission */
-	nvkm_wr32(device, 0x610494 + (chid * 0x0010), chan->push);
-	nvkm_wr32(device, 0x610498 + (chid * 0x0010), 0x00010000);
-	nvkm_wr32(device, 0x61049c + (chid * 0x0010), 0x00000001);
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
-	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+	nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
 
 	/* wait for it to go inactive */
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
index ad24c2c..d26d3b4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
@@ -32,26 +32,27 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
 
 	/* initialise channel for dma command submission */
-	nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
-	nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
-	nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
-	nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
-	nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+	nvkm_wr32(device, 0x611494 + (ctrl * 0x0010), chan->push);
+	nvkm_wr32(device, 0x611498 + (ctrl * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61149c + (ctrl * 0x0010), 0x00000001);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
 
 	/* wait for it to go inactive */
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
index 9c6645a..0a1381a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.c
@@ -149,7 +149,7 @@
 	chan->func = func;
 
 	ret = nv50_disp_chan_ctor(&nv50_disp_dmac_func_, mthd, root,
-				  chid, head, oclass, &chan->base);
+				  chid, chid, head, oclass, &chan->base);
 	if (ret)
 		return ret;
 
@@ -179,9 +179,9 @@
 		    struct nvkm_object *object, u32 handle)
 {
 	return nvkm_ramht_insert(chan->base.root->ramht, object,
-				 chan->base.chid, -10, handle,
-				 chan->base.chid << 28 |
-				 chan->base.chid);
+				 chan->base.chid.user, -10, handle,
+				 chan->base.chid.user << 28 |
+				 chan->base.chid.user);
 }
 
 static void
@@ -190,21 +190,22 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* deactivate channel */
-	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
-	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00001010, 0x00001000);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000003, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x001e0000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x001e0000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d fini timeout, %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d fini timeout, %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 	}
 
 	/* disable error reporting and completion notifications */
-	nvkm_mask(device, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
+	nvkm_mask(device, 0x610028, 0x00010001 << user, 0x00000000 << user);
 }
 
 static int
@@ -213,26 +214,27 @@
 	struct nv50_disp *disp = chan->base.root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->base.chid;
+	int ctrl = chan->base.chid.ctrl;
+	int user = chan->base.chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x610028, 0x00010000 << chid, 0x00010000 << chid);
+	nvkm_mask(device, 0x610028, 0x00010000 << user, 0x00010000 << user);
 
 	/* initialise channel for dma command submission */
-	nvkm_wr32(device, 0x610204 + (chid * 0x0010), chan->push);
-	nvkm_wr32(device, 0x610208 + (chid * 0x0010), 0x00010000);
-	nvkm_wr32(device, 0x61020c + (chid * 0x0010), chid);
-	nvkm_mask(device, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
-	nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
-	nvkm_wr32(device, 0x610200 + (chid * 0x0010), 0x00000013);
+	nvkm_wr32(device, 0x610204 + (ctrl * 0x0010), chan->push);
+	nvkm_wr32(device, 0x610208 + (ctrl * 0x0010), 0x00010000);
+	nvkm_wr32(device, 0x61020c + (ctrl * 0x0010), ctrl);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x0010), 0x00000010, 0x00000010);
+	nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), 0x00000000);
+	nvkm_wr32(device, 0x610200 + (ctrl * 0x0010), 0x00000013);
 
 	/* wait for it to go inactive */
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x80000000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x80000000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init timeout, %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init timeout, %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
index 54a4ae8..5ad5d0f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmg84.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 5,
+	.chid = { 5, 5 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
index c658db5..1f9fd34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgf119.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 9,
+	.chid = { 9, 9 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
index b1fde8c..0c09fe8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgk104.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &gf119_disp_pioc_func,
-	.chid = 9,
+	.chid = { 9, 9 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
new file mode 100644
index 0000000..abf8236
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgp102.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_pioc_oclass
+gp102_disp_oimm_oclass = {
+	.base.oclass = GK104_DISP_OVERLAY,
+	.base.minver = 0,
+	.base.maxver = 0,
+	.ctor = nv50_disp_oimm_new,
+	.func = &gf119_disp_pioc_func,
+	.chid = { 9, 13 },
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
index f4e7eb3..1281db2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmgt215.c
@@ -33,5 +33,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 5,
+	.chid = { 5, 5 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
index 3940b9c..07540f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/oimmnv50.c
@@ -33,7 +33,7 @@
 int
 nv50_disp_oimm_new(const struct nv50_disp_chan_func *func,
 		   const struct nv50_disp_chan_mthd *mthd,
-		   struct nv50_disp_root *root, int chid,
+		   struct nv50_disp_root *root, int ctrl, int user,
 		   const struct nvkm_oclass *oclass, void *data, u32 size,
 		   struct nvkm_object **pobject)
 {
@@ -54,7 +54,7 @@
 	} else
 		return ret;
 
-	return nv50_disp_chan_new_(func, mthd, root, chid + head,
+	return nv50_disp_chan_new_(func, mthd, root, ctrl + head, user + head,
 				   head, oclass, pobject);
 }
 
@@ -65,5 +65,5 @@
 	.base.maxver = 0,
 	.ctor = nv50_disp_oimm_new,
 	.func = &nv50_disp_pioc_func,
-	.chid = 5,
+	.chid = { 5, 5 },
 };
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
index a625a98..0abaa64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocgf119.c
@@ -32,20 +32,21 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
-	nvkm_mask(device, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x00030000))
+		if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d fini: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d fini: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 	}
 
 	/* disable error reporting and completion notification */
-	nvkm_mask(device, 0x610090, 0x00000001 << chid, 0x00000000);
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000000);
+	nvkm_mask(device, 0x610090, 0x00000001 << user, 0x00000000);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000000);
 }
 
 static int
@@ -54,20 +55,21 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
 	/* enable error reporting */
-	nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+	nvkm_mask(device, 0x6100a0, 0x00000001 << user, 0x00000001 << user);
 
 	/* activate channel */
-	nvkm_wr32(device, 0x610490 + (chid * 0x10), 0x00000001);
+	nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
 	if (nvkm_msec(device, 2000,
-		u32 tmp = nvkm_rd32(device, 0x610490 + (chid * 0x10));
+		u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
 		if ((tmp & 0x00030000) == 0x00010000)
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d init: %08x\n", chid,
-			   nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d init: %08x\n", user,
+			   nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
index 9d2618d..0211e0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/piocnv50.c
@@ -32,15 +32,16 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
-	nvkm_mask(device, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
+	nvkm_mask(device, 0x610200 + (ctrl * 0x10), 0x00000001, 0x00000000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d timeout: %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d timeout: %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 	}
 }
 
@@ -50,26 +51,27 @@
 	struct nv50_disp *disp = chan->root->disp;
 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 	struct nvkm_device *device = subdev->device;
-	int chid = chan->chid;
+	int ctrl = chan->chid.ctrl;
+	int user = chan->chid.user;
 
-	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00002000);
+	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00002000);
 	if (nvkm_msec(device, 2000,
-		if (!(nvkm_rd32(device, 0x610200 + (chid * 0x10)) & 0x00030000))
+		if (!(nvkm_rd32(device, 0x610200 + (ctrl * 0x10)) & 0x00030000))
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d timeout0: %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d timeout0: %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
-	nvkm_wr32(device, 0x610200 + (chid * 0x10), 0x00000001);
+	nvkm_wr32(device, 0x610200 + (ctrl * 0x10), 0x00000001);
 	if (nvkm_msec(device, 2000,
-		u32 tmp = nvkm_rd32(device, 0x610200 + (chid * 0x10));
+		u32 tmp = nvkm_rd32(device, 0x610200 + (ctrl * 0x10));
 		if ((tmp & 0x00030000) == 0x00010000)
 			break;
 	) < 0) {
-		nvkm_error(subdev, "ch %d timeout1: %08x\n", chid,
-			   nvkm_rd32(device, 0x610200 + (chid * 0x10)));
+		nvkm_error(subdev, "ch %d timeout1: %08x\n", user,
+			   nvkm_rd32(device, 0x610200 + (ctrl * 0x10)));
 		return -EBUSY;
 	}
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
index 8443e04..b053b29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
@@ -36,8 +36,8 @@
 		&gp104_disp_ovly_oclass,
 	},
 	.pioc = {
-		&gk104_disp_oimm_oclass,
-		&gk104_disp_curs_oclass,
+		&gp102_disp_oimm_oclass,
+		&gp102_disp_curs_oclass,
 	},
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 2f9cecd..05c829a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -207,8 +207,8 @@
 {
 	const struct nv50_disp_pioc_oclass *sclass = oclass->priv;
 	struct nv50_disp_root *root = nv50_disp_root(oclass->parent);
-	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid,
-			    oclass, data, size, pobject);
+	return sclass->ctor(sclass->func, sclass->mthd, root, sclass->chid.ctrl,
+			    sclass->chid.user, oclass, data, size, pobject);
 }
 
 static int
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index d544ff9..7aadce1 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -83,8 +83,7 @@
 	/* Which channel of the HVS this pixelvalve sources from. */
 	int hvs_channel;
 
-	enum vc4_encoder_type encoder0_type;
-	enum vc4_encoder_type encoder1_type;
+	enum vc4_encoder_type encoder_types[4];
 };
 
 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
@@ -669,6 +668,14 @@
 	CRTC_WRITE(PV_INTEN, 0);
 }
 
+/* Must be called with the event lock held */
+bool vc4_event_pending(struct drm_crtc *crtc)
+{
+	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+
+	return !!vc4_crtc->event;
+}
+
 static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
 {
 	struct drm_crtc *crtc = &vc4_crtc->base;
@@ -859,20 +866,26 @@
 
 static const struct vc4_crtc_data pv0_data = {
 	.hvs_channel = 0,
-	.encoder0_type = VC4_ENCODER_TYPE_DSI0,
-	.encoder1_type = VC4_ENCODER_TYPE_DPI,
+	.encoder_types = {
+		[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
+		[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
+	},
 };
 
 static const struct vc4_crtc_data pv1_data = {
 	.hvs_channel = 2,
-	.encoder0_type = VC4_ENCODER_TYPE_DSI1,
-	.encoder1_type = VC4_ENCODER_TYPE_SMI,
+	.encoder_types = {
+		[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
+		[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
+	},
 };
 
 static const struct vc4_crtc_data pv2_data = {
 	.hvs_channel = 1,
-	.encoder0_type = VC4_ENCODER_TYPE_VEC,
-	.encoder1_type = VC4_ENCODER_TYPE_HDMI,
+	.encoder_types = {
+		[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
+		[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
+	},
 };
 
 static const struct of_device_id vc4_crtc_dt_match[] = {
@@ -886,17 +899,20 @@
 					struct drm_crtc *crtc)
 {
 	struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+	const struct vc4_crtc_data *crtc_data = vc4_crtc->data;
+	const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types;
 	struct drm_encoder *encoder;
 
 	drm_for_each_encoder(encoder, drm) {
 		struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+		int i;
 
-		if (vc4_encoder->type == vc4_crtc->data->encoder0_type) {
-			vc4_encoder->clock_select = 0;
-			encoder->possible_crtcs |= drm_crtc_mask(crtc);
-		} else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) {
-			vc4_encoder->clock_select = 1;
-			encoder->possible_crtcs |= drm_crtc_mask(crtc);
+		for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) {
+			if (vc4_encoder->type == encoder_types[i]) {
+				vc4_encoder->clock_select = i;
+				encoder->possible_crtcs |= drm_crtc_mask(crtc);
+				break;
+			}
 		}
 	}
 }
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 7c1e4d9..50a55ef 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -194,6 +194,7 @@
 }
 
 enum vc4_encoder_type {
+	VC4_ENCODER_TYPE_NONE,
 	VC4_ENCODER_TYPE_HDMI,
 	VC4_ENCODER_TYPE_VEC,
 	VC4_ENCODER_TYPE_DSI0,
@@ -440,6 +441,7 @@
 extern struct platform_driver vc4_crtc_driver;
 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
+bool vc4_event_pending(struct drm_crtc *crtc);
 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
 int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
 			    unsigned int flags, int *vpos, int *hpos,
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index c1f65c6..67af2af 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -119,17 +119,34 @@
 
 	/* Make sure that any outstanding modesets have finished. */
 	if (nonblock) {
-		ret = down_trylock(&vc4->async_modeset);
-		if (ret) {
+		struct drm_crtc *crtc;
+		struct drm_crtc_state *crtc_state;
+		unsigned long flags;
+		bool busy = false;
+
+		/*
+		 * If there's an undispatched event to send then we're
+		 * obviously still busy.  If there isn't, then we can
+		 * unconditionally wait for the semaphore because it
+		 * shouldn't be contended (for long).
+		 *
+		 * This is to prevent a race where queuing a new flip
+		 * from userspace immediately on receipt of an event
+		 * beats our clean-up and returns EBUSY.
+		 */
+		spin_lock_irqsave(&dev->event_lock, flags);
+		for_each_crtc_in_state(state, crtc, crtc_state, i)
+			busy |= vc4_event_pending(crtc);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		if (busy) {
 			kfree(c);
 			return -EBUSY;
 		}
-	} else {
-		ret = down_interruptible(&vc4->async_modeset);
-		if (ret) {
-			kfree(c);
-			return ret;
-		}
+	}
+	ret = down_interruptible(&vc4->async_modeset);
+	if (ret) {
+		kfree(c);
+		return ret;
 	}
 
 	ret = drm_atomic_helper_prepare_planes(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 1aa44c2..39f6886 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -177,8 +177,9 @@
 # define PV_CONTROL_WAIT_HSTART			BIT(12)
 # define PV_CONTROL_PIXEL_REP_MASK		VC4_MASK(5, 4)
 # define PV_CONTROL_PIXEL_REP_SHIFT		4
-# define PV_CONTROL_CLK_SELECT_DSI_VEC		0
+# define PV_CONTROL_CLK_SELECT_DSI		0
 # define PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI	1
+# define PV_CONTROL_CLK_SELECT_VEC		2
 # define PV_CONTROL_CLK_SELECT_MASK		VC4_MASK(3, 2)
 # define PV_CONTROL_CLK_SELECT_SHIFT		2
 # define PV_CONTROL_FIFO_CLR			BIT(1)
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
index 35d8310..b65ed83 100644
--- a/drivers/gpu/msm/Kconfig
+++ b/drivers/gpu/msm/Kconfig
@@ -9,7 +9,6 @@
 	select DEVFREQ_GOV_PERFORMANCE
 	select DEVFREQ_GOV_QCOM_ADRENO_TZ
 	select DEVFREQ_GOV_QCOM_GPUBW_MON
-	select ONESHOT_SYNC if SYNC
 	---help---
 	  3D graphics driver for the Adreno family of GPUs from QTI.
 	  Required to use hardware accelerated OpenGL, compute and Vulkan
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 1b17a62..f513207 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/staging/android
+ccflags-y := -Iinclude/linux
 
 msm_kgsl_core-y = \
 	kgsl.o \
@@ -17,7 +17,7 @@
 
 msm_kgsl_core-$(CONFIG_QCOM_KGSL_IOMMU) += kgsl_iommu.o
 msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
-msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
+msm_kgsl_core-$(CONFIG_SYNC_FILE) += kgsl_sync.o
 msm_kgsl_core-$(CONFIG_COMPAT) += kgsl_compat.o
 
 msm_adreno-y += \
@@ -35,6 +35,7 @@
 	adreno_a3xx_snapshot.o \
 	adreno_a4xx_snapshot.o \
 	adreno_a5xx_snapshot.o \
+	adreno_a6xx_snapshot.o \
 	adreno_a4xx_preempt.o \
 	adreno_a5xx_preempt.o \
 	adreno_sysfs.o \
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 3907e24..1d42797 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -70,11 +70,44 @@
 #define A6XX_CP_ADDR_MODE_CNTL           0x842
 #define A6XX_CP_PROTECT_CNTL             0x84F
 #define A6XX_CP_PROTECT_REG              0x850
+#define A6XX_CP_PERFCTR_CP_SEL_0         0x8D0
+#define A6XX_CP_PERFCTR_CP_SEL_1         0x8D1
+#define A6XX_CP_PERFCTR_CP_SEL_2         0x8D2
+#define A6XX_CP_PERFCTR_CP_SEL_3         0x8D3
+#define A6XX_CP_PERFCTR_CP_SEL_4         0x8D4
+#define A6XX_CP_PERFCTR_CP_SEL_5         0x8D5
+#define A6XX_CP_PERFCTR_CP_SEL_6         0x8D6
+#define A6XX_CP_PERFCTR_CP_SEL_7         0x8D7
+#define A6XX_CP_PERFCTR_CP_SEL_8         0x8D8
+#define A6XX_CP_PERFCTR_CP_SEL_9         0x8D9
+#define A6XX_CP_PERFCTR_CP_SEL_10        0x8DA
+#define A6XX_CP_PERFCTR_CP_SEL_11        0x8DB
+#define A6XX_CP_PERFCTR_CP_SEL_12        0x8DC
+#define A6XX_CP_PERFCTR_CP_SEL_13        0x8DD
+#define A6XX_CP_CRASH_SCRIPT_BASE_LO     0x900
+#define A6XX_CP_CRASH_SCRIPT_BASE_HI     0x901
+#define A6XX_CP_CRASH_DUMP_CNTL          0x902
+#define A6XX_CP_CRASH_DUMP_STATUS        0x903
 #define A6XX_CP_SQE_STAT_ADDR            0x908
 #define A6XX_CP_SQE_STAT_DATA            0x909
+#define A6XX_CP_DRAW_STATE_ADDR          0x90A
+#define A6XX_CP_DRAW_STATE_DATA          0x90B
+#define A6XX_CP_ROQ_DBG_ADDR             0x90C
+#define A6XX_CP_ROQ_DBG_DATA             0x90D
+#define A6XX_CP_MEM_POOL_DBG_ADDR        0x90E
+#define A6XX_CP_MEM_POOL_DBG_DATA        0x90F
+#define A6XX_CP_SQE_UCODE_DBG_ADDR       0x910
+#define A6XX_CP_SQE_UCODE_DBG_DATA       0x911
+#define A6XX_CP_IB1_BASE                 0x928
+#define A6XX_CP_IB1_BASE_HI              0x929
+#define A6XX_CP_IB1_REM_SIZE             0x92A
+#define A6XX_CP_IB2_BASE                 0x92B
+#define A6XX_CP_IB2_BASE_HI              0x92C
+#define A6XX_CP_IB2_REM_SIZE             0x92D
 #define A6XX_CP_ALWAYS_ON_COUNTER_LO     0x980
 #define A6XX_CP_ALWAYS_ON_COUNTER_HI     0x981
 #define A6XX_CP_AHB_CNTL                 0x98D
+#define A6XX_CP_APERTURE_CNTL_HOST       0xA00
 #define A6XX_VSC_ADDR_MODE_CNTL          0xC01
 
 /* RBBM registers */
@@ -89,28 +122,401 @@
 #define A6XX_RBBM_INT_0_STATUS                   0x201
 #define A6XX_RBBM_STATUS                         0x210
 #define A6XX_RBBM_STATUS3                        0x213
+#define A6XX_RBBM_PERFCTR_CP_0_LO                0x400
+#define A6XX_RBBM_PERFCTR_CP_0_HI                0x401
+#define A6XX_RBBM_PERFCTR_CP_1_LO                0x402
+#define A6XX_RBBM_PERFCTR_CP_1_HI                0x403
+#define A6XX_RBBM_PERFCTR_CP_2_LO                0x404
+#define A6XX_RBBM_PERFCTR_CP_2_HI                0x405
+#define A6XX_RBBM_PERFCTR_CP_3_LO                0x406
+#define A6XX_RBBM_PERFCTR_CP_3_HI                0x407
+#define A6XX_RBBM_PERFCTR_CP_4_LO                0x408
+#define A6XX_RBBM_PERFCTR_CP_4_HI                0x409
+#define A6XX_RBBM_PERFCTR_CP_5_LO                0x40a
+#define A6XX_RBBM_PERFCTR_CP_5_HI                0x40b
+#define A6XX_RBBM_PERFCTR_CP_6_LO                0x40c
+#define A6XX_RBBM_PERFCTR_CP_6_HI                0x40d
+#define A6XX_RBBM_PERFCTR_CP_7_LO                0x40e
+#define A6XX_RBBM_PERFCTR_CP_7_HI                0x40f
+#define A6XX_RBBM_PERFCTR_CP_8_LO                0x410
+#define A6XX_RBBM_PERFCTR_CP_8_HI                0x411
+#define A6XX_RBBM_PERFCTR_CP_9_LO                0x412
+#define A6XX_RBBM_PERFCTR_CP_9_HI                0x413
+#define A6XX_RBBM_PERFCTR_CP_10_LO               0x414
+#define A6XX_RBBM_PERFCTR_CP_10_HI               0x415
+#define A6XX_RBBM_PERFCTR_CP_11_LO               0x416
+#define A6XX_RBBM_PERFCTR_CP_11_HI               0x417
+#define A6XX_RBBM_PERFCTR_CP_12_LO               0x418
+#define A6XX_RBBM_PERFCTR_CP_12_HI               0x419
+#define A6XX_RBBM_PERFCTR_CP_13_LO               0x41a
+#define A6XX_RBBM_PERFCTR_CP_13_HI               0x41b
+#define A6XX_RBBM_PERFCTR_RBBM_0_LO              0x41c
+#define A6XX_RBBM_PERFCTR_RBBM_0_HI              0x41d
+#define A6XX_RBBM_PERFCTR_RBBM_1_LO              0x41e
+#define A6XX_RBBM_PERFCTR_RBBM_1_HI              0x41f
+#define A6XX_RBBM_PERFCTR_RBBM_2_LO              0x420
+#define A6XX_RBBM_PERFCTR_RBBM_2_HI              0x421
+#define A6XX_RBBM_PERFCTR_RBBM_3_LO              0x422
+#define A6XX_RBBM_PERFCTR_RBBM_3_HI              0x423
+#define A6XX_RBBM_PERFCTR_PC_0_LO                0x424
+#define A6XX_RBBM_PERFCTR_PC_0_HI                0x425
+#define A6XX_RBBM_PERFCTR_PC_1_LO                0x426
+#define A6XX_RBBM_PERFCTR_PC_1_HI                0x427
+#define A6XX_RBBM_PERFCTR_PC_2_LO                0x428
+#define A6XX_RBBM_PERFCTR_PC_2_HI                0x429
+#define A6XX_RBBM_PERFCTR_PC_3_LO                0x42a
+#define A6XX_RBBM_PERFCTR_PC_3_HI                0x42b
+#define A6XX_RBBM_PERFCTR_PC_4_LO                0x42c
+#define A6XX_RBBM_PERFCTR_PC_4_HI                0x42d
+#define A6XX_RBBM_PERFCTR_PC_5_LO                0x42e
+#define A6XX_RBBM_PERFCTR_PC_5_HI                0x42f
+#define A6XX_RBBM_PERFCTR_PC_6_LO                0x430
+#define A6XX_RBBM_PERFCTR_PC_6_HI                0x431
+#define A6XX_RBBM_PERFCTR_PC_7_LO                0x432
+#define A6XX_RBBM_PERFCTR_PC_7_HI                0x433
+#define A6XX_RBBM_PERFCTR_VFD_0_LO               0x434
+#define A6XX_RBBM_PERFCTR_VFD_0_HI               0x435
+#define A6XX_RBBM_PERFCTR_VFD_1_LO               0x436
+#define A6XX_RBBM_PERFCTR_VFD_1_HI               0x437
+#define A6XX_RBBM_PERFCTR_VFD_2_LO               0x438
+#define A6XX_RBBM_PERFCTR_VFD_2_HI               0x439
+#define A6XX_RBBM_PERFCTR_VFD_3_LO               0x43a
+#define A6XX_RBBM_PERFCTR_VFD_3_HI               0x43b
+#define A6XX_RBBM_PERFCTR_VFD_4_LO               0x43c
+#define A6XX_RBBM_PERFCTR_VFD_4_HI               0x43d
+#define A6XX_RBBM_PERFCTR_VFD_5_LO               0x43e
+#define A6XX_RBBM_PERFCTR_VFD_5_HI               0x43f
+#define A6XX_RBBM_PERFCTR_VFD_6_LO               0x440
+#define A6XX_RBBM_PERFCTR_VFD_6_HI               0x441
+#define A6XX_RBBM_PERFCTR_VFD_7_LO               0x442
+#define A6XX_RBBM_PERFCTR_VFD_7_HI               0x443
+#define A6XX_RBBM_PERFCTR_HLSQ_0_LO              0x444
+#define A6XX_RBBM_PERFCTR_HLSQ_0_HI              0x445
+#define A6XX_RBBM_PERFCTR_HLSQ_1_LO              0x446
+#define A6XX_RBBM_PERFCTR_HLSQ_1_HI              0x447
+#define A6XX_RBBM_PERFCTR_HLSQ_2_LO              0x448
+#define A6XX_RBBM_PERFCTR_HLSQ_2_HI              0x449
+#define A6XX_RBBM_PERFCTR_HLSQ_3_LO              0x44a
+#define A6XX_RBBM_PERFCTR_HLSQ_3_HI              0x44b
+#define A6XX_RBBM_PERFCTR_HLSQ_4_LO              0x44c
+#define A6XX_RBBM_PERFCTR_HLSQ_4_HI              0x44d
+#define A6XX_RBBM_PERFCTR_HLSQ_5_LO              0x44e
+#define A6XX_RBBM_PERFCTR_HLSQ_5_HI              0x44f
+#define A6XX_RBBM_PERFCTR_VPC_0_LO               0x450
+#define A6XX_RBBM_PERFCTR_VPC_0_HI               0x451
+#define A6XX_RBBM_PERFCTR_VPC_1_LO               0x452
+#define A6XX_RBBM_PERFCTR_VPC_1_HI               0x453
+#define A6XX_RBBM_PERFCTR_VPC_2_LO               0x454
+#define A6XX_RBBM_PERFCTR_VPC_2_HI               0x455
+#define A6XX_RBBM_PERFCTR_VPC_3_LO               0x456
+#define A6XX_RBBM_PERFCTR_VPC_3_HI               0x457
+#define A6XX_RBBM_PERFCTR_VPC_4_LO               0x458
+#define A6XX_RBBM_PERFCTR_VPC_4_HI               0x459
+#define A6XX_RBBM_PERFCTR_VPC_5_LO               0x45a
+#define A6XX_RBBM_PERFCTR_VPC_5_HI               0x45b
+#define A6XX_RBBM_PERFCTR_CCU_0_LO               0x45c
+#define A6XX_RBBM_PERFCTR_CCU_0_HI               0x45d
+#define A6XX_RBBM_PERFCTR_CCU_1_LO               0x45e
+#define A6XX_RBBM_PERFCTR_CCU_1_HI               0x45f
+#define A6XX_RBBM_PERFCTR_CCU_2_LO               0x460
+#define A6XX_RBBM_PERFCTR_CCU_2_HI               0x461
+#define A6XX_RBBM_PERFCTR_CCU_3_LO               0x462
+#define A6XX_RBBM_PERFCTR_CCU_3_HI               0x463
+#define A6XX_RBBM_PERFCTR_CCU_4_LO               0x464
+#define A6XX_RBBM_PERFCTR_CCU_4_HI               0x465
+#define A6XX_RBBM_PERFCTR_TSE_0_LO               0x466
+#define A6XX_RBBM_PERFCTR_TSE_0_HI               0x467
+#define A6XX_RBBM_PERFCTR_TSE_1_LO               0x468
+#define A6XX_RBBM_PERFCTR_TSE_1_HI               0x469
+#define A6XX_RBBM_PERFCTR_TSE_2_LO               0x46a
+#define A6XX_RBBM_PERFCTR_CCU_4_HI               0x465
+#define A6XX_RBBM_PERFCTR_TSE_0_LO               0x466
+#define A6XX_RBBM_PERFCTR_TSE_0_HI               0x467
+#define A6XX_RBBM_PERFCTR_TSE_1_LO               0x468
+#define A6XX_RBBM_PERFCTR_TSE_1_HI               0x469
+#define A6XX_RBBM_PERFCTR_TSE_2_LO               0x46a
+#define A6XX_RBBM_PERFCTR_TSE_2_HI               0x46b
+#define A6XX_RBBM_PERFCTR_TSE_3_LO               0x46c
+#define A6XX_RBBM_PERFCTR_TSE_3_HI               0x46d
+#define A6XX_RBBM_PERFCTR_RAS_0_LO               0x46e
+#define A6XX_RBBM_PERFCTR_RAS_0_HI               0x46f
+#define A6XX_RBBM_PERFCTR_RAS_1_LO               0x470
+#define A6XX_RBBM_PERFCTR_RAS_1_HI               0x471
+#define A6XX_RBBM_PERFCTR_RAS_2_LO               0x472
+#define A6XX_RBBM_PERFCTR_RAS_2_HI               0x473
+#define A6XX_RBBM_PERFCTR_RAS_3_LO               0x474
+#define A6XX_RBBM_PERFCTR_RAS_3_HI               0x475
+#define A6XX_RBBM_PERFCTR_UCHE_0_LO              0x476
+#define A6XX_RBBM_PERFCTR_UCHE_0_HI              0x477
+#define A6XX_RBBM_PERFCTR_UCHE_1_LO              0x478
+#define A6XX_RBBM_PERFCTR_UCHE_1_HI              0x479
+#define A6XX_RBBM_PERFCTR_UCHE_2_LO              0x47a
+#define A6XX_RBBM_PERFCTR_UCHE_2_HI              0x47b
+#define A6XX_RBBM_PERFCTR_UCHE_3_LO              0x47c
+#define A6XX_RBBM_PERFCTR_UCHE_3_HI              0x47d
+#define A6XX_RBBM_PERFCTR_UCHE_4_LO              0x47e
+#define A6XX_RBBM_PERFCTR_UCHE_4_HI              0x47f
+#define A6XX_RBBM_PERFCTR_UCHE_5_LO              0x480
+#define A6XX_RBBM_PERFCTR_UCHE_5_HI              0x481
+#define A6XX_RBBM_PERFCTR_UCHE_6_LO              0x482
+#define A6XX_RBBM_PERFCTR_UCHE_6_HI              0x483
+#define A6XX_RBBM_PERFCTR_UCHE_7_LO              0x484
+#define A6XX_RBBM_PERFCTR_UCHE_7_HI              0x485
+#define A6XX_RBBM_PERFCTR_UCHE_8_LO              0x486
+#define A6XX_RBBM_PERFCTR_UCHE_8_HI              0x487
+#define A6XX_RBBM_PERFCTR_UCHE_9_LO              0x488
+#define A6XX_RBBM_PERFCTR_UCHE_9_HI              0x489
+#define A6XX_RBBM_PERFCTR_UCHE_10_LO             0x48a
+#define A6XX_RBBM_PERFCTR_UCHE_10_HI             0x48b
+#define A6XX_RBBM_PERFCTR_UCHE_11_LO             0x48c
+#define A6XX_RBBM_PERFCTR_UCHE_11_HI             0x48d
+#define A6XX_RBBM_PERFCTR_TP_0_LO                0x48e
+#define A6XX_RBBM_PERFCTR_TP_0_HI                0x48f
+#define A6XX_RBBM_PERFCTR_TP_1_LO                0x490
+#define A6XX_RBBM_PERFCTR_TP_1_HI                0x491
+#define A6XX_RBBM_PERFCTR_TP_2_LO                0x492
+#define A6XX_RBBM_PERFCTR_TP_2_HI                0x493
+#define A6XX_RBBM_PERFCTR_TP_3_LO                0x494
+#define A6XX_RBBM_PERFCTR_TP_3_HI                0x495
+#define A6XX_RBBM_PERFCTR_TP_4_LO                0x496
+#define A6XX_RBBM_PERFCTR_TP_4_HI                0x497
+#define A6XX_RBBM_PERFCTR_TP_5_LO                0x498
+#define A6XX_RBBM_PERFCTR_TP_5_HI                0x499
+#define A6XX_RBBM_PERFCTR_TP_6_LO                0x49a
+#define A6XX_RBBM_PERFCTR_TP_6_HI                0x49b
+#define A6XX_RBBM_PERFCTR_TP_7_LO                0x49c
+#define A6XX_RBBM_PERFCTR_TP_7_HI                0x49d
+#define A6XX_RBBM_PERFCTR_TP_8_LO                0x49e
+#define A6XX_RBBM_PERFCTR_TP_8_HI                0x49f
+#define A6XX_RBBM_PERFCTR_TP_9_LO                0x4a0
+#define A6XX_RBBM_PERFCTR_TP_9_HI                0x4a1
+#define A6XX_RBBM_PERFCTR_TP_10_LO               0x4a2
+#define A6XX_RBBM_PERFCTR_TP_10_HI               0x4a3
+#define A6XX_RBBM_PERFCTR_TP_11_LO               0x4a4
+#define A6XX_RBBM_PERFCTR_TP_11_HI               0x4a5
+#define A6XX_RBBM_PERFCTR_SP_0_LO                0x4a6
+#define A6XX_RBBM_PERFCTR_SP_0_HI                0x4a7
+#define A6XX_RBBM_PERFCTR_SP_1_LO                0x4a8
+#define A6XX_RBBM_PERFCTR_SP_1_HI                0x4a9
+#define A6XX_RBBM_PERFCTR_SP_2_LO                0x4aa
+#define A6XX_RBBM_PERFCTR_SP_2_HI                0x4ab
+#define A6XX_RBBM_PERFCTR_SP_3_LO                0x4ac
+#define A6XX_RBBM_PERFCTR_SP_3_HI                0x4ad
+#define A6XX_RBBM_PERFCTR_SP_4_LO                0x4ae
+#define A6XX_RBBM_PERFCTR_SP_4_HI                0x4af
+#define A6XX_RBBM_PERFCTR_SP_5_LO                0x4b0
+#define A6XX_RBBM_PERFCTR_SP_5_HI                0x4b1
+#define A6XX_RBBM_PERFCTR_SP_6_LO                0x4b2
+#define A6XX_RBBM_PERFCTR_SP_6_HI                0x4b3
+#define A6XX_RBBM_PERFCTR_SP_7_LO                0x4b4
+#define A6XX_RBBM_PERFCTR_SP_7_HI                0x4b5
+#define A6XX_RBBM_PERFCTR_SP_8_LO                0x4b6
+#define A6XX_RBBM_PERFCTR_SP_8_HI                0x4b7
+#define A6XX_RBBM_PERFCTR_SP_9_LO                0x4b8
+#define A6XX_RBBM_PERFCTR_SP_9_HI                0x4b9
+#define A6XX_RBBM_PERFCTR_SP_10_LO               0x4ba
+#define A6XX_RBBM_PERFCTR_SP_10_HI               0x4bb
+#define A6XX_RBBM_PERFCTR_SP_11_LO               0x4bc
+#define A6XX_RBBM_PERFCTR_SP_11_HI               0x4bd
+#define A6XX_RBBM_PERFCTR_SP_12_LO               0x4be
+#define A6XX_RBBM_PERFCTR_SP_12_HI               0x4bf
+#define A6XX_RBBM_PERFCTR_SP_13_LO               0x4c0
+#define A6XX_RBBM_PERFCTR_SP_13_HI               0x4c1
+#define A6XX_RBBM_PERFCTR_SP_14_LO               0x4c2
+#define A6XX_RBBM_PERFCTR_SP_14_HI               0x4c3
+#define A6XX_RBBM_PERFCTR_SP_15_LO               0x4c4
+#define A6XX_RBBM_PERFCTR_SP_15_HI               0x4c5
+#define A6XX_RBBM_PERFCTR_SP_16_LO               0x4c6
+#define A6XX_RBBM_PERFCTR_SP_16_HI               0x4c7
+#define A6XX_RBBM_PERFCTR_SP_17_LO               0x4c8
+#define A6XX_RBBM_PERFCTR_SP_17_HI               0x4c9
+#define A6XX_RBBM_PERFCTR_SP_18_LO               0x4ca
+#define A6XX_RBBM_PERFCTR_SP_18_HI               0x4cb
+#define A6XX_RBBM_PERFCTR_SP_19_LO               0x4cc
+#define A6XX_RBBM_PERFCTR_SP_19_HI               0x4cd
+#define A6XX_RBBM_PERFCTR_SP_20_LO               0x4ce
+#define A6XX_RBBM_PERFCTR_SP_20_HI               0x4cf
+#define A6XX_RBBM_PERFCTR_SP_21_LO               0x4d0
+#define A6XX_RBBM_PERFCTR_SP_21_HI               0x4d1
+#define A6XX_RBBM_PERFCTR_SP_22_LO               0x4d2
+#define A6XX_RBBM_PERFCTR_SP_22_HI               0x4d3
+#define A6XX_RBBM_PERFCTR_SP_23_LO               0x4d4
+#define A6XX_RBBM_PERFCTR_SP_23_HI               0x4d5
+#define A6XX_RBBM_PERFCTR_RB_0_LO                0x4d6
+#define A6XX_RBBM_PERFCTR_RB_0_HI                0x4d7
+#define A6XX_RBBM_PERFCTR_RB_1_LO                0x4d8
+#define A6XX_RBBM_PERFCTR_RB_1_HI                0x4d9
+#define A6XX_RBBM_PERFCTR_RB_2_LO                0x4da
+#define A6XX_RBBM_PERFCTR_RB_2_HI                0x4db
+#define A6XX_RBBM_PERFCTR_RB_3_LO                0x4dc
+#define A6XX_RBBM_PERFCTR_RB_3_HI                0x4dd
+#define A6XX_RBBM_PERFCTR_RB_4_LO                0x4de
+#define A6XX_RBBM_PERFCTR_RB_4_HI                0x4df
+#define A6XX_RBBM_PERFCTR_RB_5_LO                0x4e0
+#define A6XX_RBBM_PERFCTR_RB_5_HI                0x4e1
+#define A6XX_RBBM_PERFCTR_RB_6_LO                0x4e2
+#define A6XX_RBBM_PERFCTR_RB_6_HI                0x4e3
+#define A6XX_RBBM_PERFCTR_RB_7_LO                0x4e4
+#define A6XX_RBBM_PERFCTR_RB_7_HI                0x4e5
+#define A6XX_RBBM_PERFCTR_VSC_0_LO               0x4e6
+#define A6XX_RBBM_PERFCTR_VSC_0_HI               0x4e7
+#define A6XX_RBBM_PERFCTR_VSC_1_LO               0x4e8
+#define A6XX_RBBM_PERFCTR_VSC_1_HI               0x4e9
+#define A6XX_RBBM_PERFCTR_LRZ_0_LO               0x4ea
+#define A6XX_RBBM_PERFCTR_LRZ_0_HI               0x4eb
+#define A6XX_RBBM_PERFCTR_LRZ_1_LO               0x4ec
+#define A6XX_RBBM_PERFCTR_LRZ_1_HI               0x4ed
+#define A6XX_RBBM_PERFCTR_LRZ_2_LO               0x4ee
+#define A6XX_RBBM_PERFCTR_LRZ_2_HI               0x4ef
+#define A6XX_RBBM_PERFCTR_LRZ_3_LO               0x4f0
+#define A6XX_RBBM_PERFCTR_LRZ_3_HI               0x4f1
+#define A6XX_RBBM_PERFCTR_CMP_0_LO               0x4f2
+#define A6XX_RBBM_PERFCTR_CMP_0_HI               0x4f3
+#define A6XX_RBBM_PERFCTR_CMP_1_LO               0x4f4
+#define A6XX_RBBM_PERFCTR_CMP_1_HI               0x4f5
+#define A6XX_RBBM_PERFCTR_CMP_2_LO               0x4f6
+#define A6XX_RBBM_PERFCTR_CMP_2_HI               0x4f7
+#define A6XX_RBBM_PERFCTR_CMP_3_LO               0x4f8
+#define A6XX_RBBM_PERFCTR_CMP_3_HI               0x4f9
+#define A6XX_RBBM_PERFCTR_CNTL                   0x500
+#define A6XX_RBBM_PERFCTR_LOAD_CMD0              0x501
+#define A6XX_RBBM_PERFCTR_LOAD_CMD1              0x502
+#define A6XX_RBBM_PERFCTR_LOAD_CMD2              0x503
+#define A6XX_RBBM_PERFCTR_LOAD_CMD3              0x504
+#define A6XX_RBBM_PERFCTR_LOAD_VALUE_LO          0x505
+#define A6XX_RBBM_PERFCTR_LOAD_VALUE_HI          0x506
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_0             0x507
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_1             0x508
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_2             0x509
+#define A6XX_RBBM_PERFCTR_RBBM_SEL_3             0x50A
+
 #define A6XX_RBBM_SECVID_TRUST_CNTL              0xF400
 #define A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL      0xF810
 
+/* DBGC_CFG registers */
+#define A6XX_DBGC_CFG_DBGBUS_SEL_A                  0x600
+#define A6XX_DBGC_CFG_DBGBUS_SEL_B                  0x601
+#define A6XX_DBGC_CFG_DBGBUS_SEL_C                  0x602
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D                  0x603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT   0x0
+#define A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT 0x8
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT                  0x604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT    0x0
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT      0xC
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT       0x1C
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM                  0x605
+#define A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT     0x18
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_0                 0x608
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_1                 0x609
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_2                 0x60a
+#define A6XX_DBGC_CFG_DBGBUS_IVTL_3                 0x60b
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_0                0x60c
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_1                0x60d
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_2                0x60e
+#define A6XX_DBGC_CFG_DBGBUS_MASKL_3                0x60f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0                0x610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1                0x611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT           0x0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT           0x4
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT           0x8
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT           0xC
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT           0x10
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT           0x14
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT           0x18
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT           0x1C
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT           0x0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT           0x4
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT          0x8
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT          0xC
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT          0x10
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT          0x14
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT          0x18
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT          0x1C
+#define A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1             0x62f
+#define A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2             0x630
+
 /* VSC registers */
+#define A6XX_VSC_PERFCTR_VSC_SEL_0          0xCD8
+#define A6XX_VSC_PERFCTR_VSC_SEL_1          0xCD9
+
+/* GRAS registers */
 #define A6XX_GRAS_ADDR_MODE_CNTL            0x8601
+#define A6XX_GRAS_PERFCTR_TSE_SEL_0         0x8610
+#define A6XX_GRAS_PERFCTR_TSE_SEL_1         0x8611
+#define A6XX_GRAS_PERFCTR_TSE_SEL_2         0x8612
+#define A6XX_GRAS_PERFCTR_TSE_SEL_3         0x8613
+#define A6XX_GRAS_PERFCTR_RAS_SEL_0         0x8614
+#define A6XX_GRAS_PERFCTR_RAS_SEL_1         0x8615
+#define A6XX_GRAS_PERFCTR_RAS_SEL_2         0x8616
+#define A6XX_GRAS_PERFCTR_RAS_SEL_3         0x8617
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_0         0x8618
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_1         0x8619
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_2         0x861A
+#define A6XX_GRAS_PERFCTR_LRZ_SEL_3         0x861B
 
 /* RB registers */
 #define A6XX_RB_ADDR_MODE_CNTL              0x8E05
 #define A6XX_RB_NC_MODE_CNTL                0x8E08
+#define A6XX_RB_PERFCTR_RB_SEL_0            0x8E10
+#define A6XX_RB_PERFCTR_RB_SEL_1            0x8E11
+#define A6XX_RB_PERFCTR_RB_SEL_2            0x8E12
+#define A6XX_RB_PERFCTR_RB_SEL_3            0x8E13
+#define A6XX_RB_PERFCTR_RB_SEL_4            0x8E14
+#define A6XX_RB_PERFCTR_RB_SEL_5            0x8E15
+#define A6XX_RB_PERFCTR_RB_SEL_6            0x8E16
+#define A6XX_RB_PERFCTR_RB_SEL_7            0x8E17
+#define A6XX_RB_PERFCTR_CCU_SEL_0           0x8E18
+#define A6XX_RB_PERFCTR_CCU_SEL_1           0x8E19
+#define A6XX_RB_PERFCTR_CCU_SEL_2           0x8E1A
+#define A6XX_RB_PERFCTR_CCU_SEL_3           0x8E1B
+#define A6XX_RB_PERFCTR_CCU_SEL_4           0x8E1C
+#define A6XX_RB_PERFCTR_CMP_SEL_0           0x8E2C
+#define A6XX_RB_PERFCTR_CMP_SEL_1           0x8E2D
+#define A6XX_RB_PERFCTR_CMP_SEL_2           0x8E2E
+#define A6XX_RB_PERFCTR_CMP_SEL_3           0x8E2F
 
 /* PC registers */
 #define A6XX_PC_DBG_ECO_CNTL                0x9E00
 #define A6XX_PC_ADDR_MODE_CNTL              0x9E01
+#define A6XX_PC_PERFCTR_PC_SEL_0            0x9E34
+#define A6XX_PC_PERFCTR_PC_SEL_1            0x9E35
+#define A6XX_PC_PERFCTR_PC_SEL_2            0x9E36
+#define A6XX_PC_PERFCTR_PC_SEL_3            0x9E37
+#define A6XX_PC_PERFCTR_PC_SEL_4            0x9E38
+#define A6XX_PC_PERFCTR_PC_SEL_5            0x9E39
+#define A6XX_PC_PERFCTR_PC_SEL_6            0x9E3A
+#define A6XX_PC_PERFCTR_PC_SEL_7            0x9E3B
 
 /* HLSQ registers */
 #define A6XX_HLSQ_ADDR_MODE_CNTL            0xBE05
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_0        0xBE10
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_1        0xBE11
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_2        0xBE12
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_3        0xBE13
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_4        0xBE14
+#define A6XX_HLSQ_PERFCTR_HLSQ_SEL_5        0xBE15
+#define A6XX_HLSQ_DBG_AHB_READ_APERTURE     0xC800
+#define A6XX_HLSQ_DBG_READ_SEL              0xD000
 
 /* VFD registers */
 #define A6XX_VFD_ADDR_MODE_CNTL             0xA601
+#define A6XX_VFD_PERFCTR_VFD_SEL_0          0xA610
+#define A6XX_VFD_PERFCTR_VFD_SEL_1          0xA611
+#define A6XX_VFD_PERFCTR_VFD_SEL_2          0xA612
+#define A6XX_VFD_PERFCTR_VFD_SEL_3          0xA613
+#define A6XX_VFD_PERFCTR_VFD_SEL_4          0xA614
+#define A6XX_VFD_PERFCTR_VFD_SEL_5          0xA615
+#define A6XX_VFD_PERFCTR_VFD_SEL_6          0xA616
+#define A6XX_VFD_PERFCTR_VFD_SEL_7          0xA617
 
 /* VPC registers */
 #define A6XX_VPC_ADDR_MODE_CNTL             0x9601
+#define A6XX_VPC_PERFCTR_VPC_SEL_0          0x9604
+#define A6XX_VPC_PERFCTR_VPC_SEL_1          0x9605
+#define A6XX_VPC_PERFCTR_VPC_SEL_2          0x9606
+#define A6XX_VPC_PERFCTR_VPC_SEL_3          0x9607
+#define A6XX_VPC_PERFCTR_VPC_SEL_4          0x9608
+#define A6XX_VPC_PERFCTR_VPC_SEL_5          0x9609
 
 /* UCHE registers */
 #define A6XX_UCHE_ADDR_MODE_CNTL            0xE00
@@ -127,20 +533,89 @@
 #define A6XX_UCHE_GMEM_RANGE_MAX_HI         0xE0E
 #define A6XX_UCHE_CACHE_WAYS                0xE17
 #define A6XX_UCHE_FILTER_CNTL               0xE18
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_0        0xE1C
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_1        0xE1D
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_2        0xE1E
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_3        0xE1F
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_4        0xE20
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_5        0xE21
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_6        0xE22
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_7        0xE23
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_8        0xE24
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_9        0xE25
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_10       0xE26
+#define A6XX_UCHE_PERFCTR_UCHE_SEL_11       0xE27
 
 /* SP registers */
 #define A6XX_SP_ADDR_MODE_CNTL              0xAE01
 #define A6XX_SP_NC_MODE_CNTL                0xAE02
+#define A6XX_SP_PERFCTR_SP_SEL_0            0xAE10
+#define A6XX_SP_PERFCTR_SP_SEL_1            0xAE11
+#define A6XX_SP_PERFCTR_SP_SEL_2            0xAE12
+#define A6XX_SP_PERFCTR_SP_SEL_3            0xAE13
+#define A6XX_SP_PERFCTR_SP_SEL_4            0xAE14
+#define A6XX_SP_PERFCTR_SP_SEL_5            0xAE15
+#define A6XX_SP_PERFCTR_SP_SEL_6            0xAE16
+#define A6XX_SP_PERFCTR_SP_SEL_7            0xAE17
+#define A6XX_SP_PERFCTR_SP_SEL_8            0xAE18
+#define A6XX_SP_PERFCTR_SP_SEL_9            0xAE19
+#define A6XX_SP_PERFCTR_SP_SEL_10           0xAE1A
+#define A6XX_SP_PERFCTR_SP_SEL_11           0xAE1B
+#define A6XX_SP_PERFCTR_SP_SEL_12           0xAE1C
+#define A6XX_SP_PERFCTR_SP_SEL_13           0xAE1D
+#define A6XX_SP_PERFCTR_SP_SEL_14           0xAE1E
+#define A6XX_SP_PERFCTR_SP_SEL_15           0xAE1F
+#define A6XX_SP_PERFCTR_SP_SEL_16           0xAE20
+#define A6XX_SP_PERFCTR_SP_SEL_17           0xAE21
+#define A6XX_SP_PERFCTR_SP_SEL_18           0xAE22
+#define A6XX_SP_PERFCTR_SP_SEL_19           0xAE23
+#define A6XX_SP_PERFCTR_SP_SEL_20           0xAE24
+#define A6XX_SP_PERFCTR_SP_SEL_21           0xAE25
+#define A6XX_SP_PERFCTR_SP_SEL_22           0xAE26
+#define A6XX_SP_PERFCTR_SP_SEL_23           0xAE27
 
 /* TP registers */
 #define A6XX_TPL1_ADDR_MODE_CNTL            0xB601
 #define A6XX_TPL1_NC_MODE_CNTL              0xB604
+#define A6XX_TPL1_PERFCTR_TP_SEL_0          0xB610
+#define A6XX_TPL1_PERFCTR_TP_SEL_1          0xB611
+#define A6XX_TPL1_PERFCTR_TP_SEL_2          0xB612
+#define A6XX_TPL1_PERFCTR_TP_SEL_3          0xB613
+#define A6XX_TPL1_PERFCTR_TP_SEL_4          0xB614
+#define A6XX_TPL1_PERFCTR_TP_SEL_5          0xB615
+#define A6XX_TPL1_PERFCTR_TP_SEL_6          0xB616
+#define A6XX_TPL1_PERFCTR_TP_SEL_7          0xB617
+#define A6XX_TPL1_PERFCTR_TP_SEL_8          0xB618
+#define A6XX_TPL1_PERFCTR_TP_SEL_9          0xB619
+#define A6XX_TPL1_PERFCTR_TP_SEL_10         0xB61A
+#define A6XX_TPL1_PERFCTR_TP_SEL_11         0xB61B
 
 /* VBIF registers */
 #define A6XX_VBIF_VERSION                       0x3000
 #define A6XX_VBIF_GATE_OFF_WRREQ_EN             0x302A
 #define A6XX_VBIF_XIN_HALT_CTRL0                0x3080
 #define A6XX_VBIF_XIN_HALT_CTRL1                0x3081
+#define A6XX_VBIF_PERF_CNT_SEL0                 0x30d0
+#define A6XX_VBIF_PERF_CNT_SEL1                 0x30d1
+#define A6XX_VBIF_PERF_CNT_SEL2                 0x30d2
+#define A6XX_VBIF_PERF_CNT_SEL3                 0x30d3
+#define A6XX_VBIF_PERF_CNT_LOW0                 0x30d8
+#define A6XX_VBIF_PERF_CNT_LOW1                 0x30d9
+#define A6XX_VBIF_PERF_CNT_LOW2                 0x30da
+#define A6XX_VBIF_PERF_CNT_LOW3                 0x30db
+#define A6XX_VBIF_PERF_CNT_HIGH0                0x30e0
+#define A6XX_VBIF_PERF_CNT_HIGH1                0x30e1
+#define A6XX_VBIF_PERF_CNT_HIGH2                0x30e2
+#define A6XX_VBIF_PERF_CNT_HIGH3                0x30e3
+#define A6XX_VBIF_PERF_PWR_CNT_EN0              0x3100
+#define A6XX_VBIF_PERF_PWR_CNT_EN1              0x3101
+#define A6XX_VBIF_PERF_PWR_CNT_EN2              0x3102
+#define A6XX_VBIF_PERF_PWR_CNT_LOW0             0x3110
+#define A6XX_VBIF_PERF_PWR_CNT_LOW1             0x3111
+#define A6XX_VBIF_PERF_PWR_CNT_LOW2             0x3112
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH0            0x3118
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH1            0x3119
+#define A6XX_VBIF_PERF_PWR_CNT_HIGH2            0x311a
 
 /* GMU control registers */
 #define A6XX_GMU_GX_SPTPRAC_POWER_CONTROL	0x1A881
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 52639e3..876ff0c 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -334,7 +334,7 @@
 		.gmem_size = SZ_1M,
 		.num_protected_regs = 0x20,
 		.busy_mask = 0xFFFFFFFE,
-		.gpmufw_name = "a540_gpmu.fw2",
+		.gpmufw_name = "a630_gmu.bin",
 		.gpmu_major = 0x0,
 		.gpmu_minor = 0x005,
 		.gpmu_tsens = 0x000C000D,
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index d172022..ec3cade 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1365,6 +1365,53 @@
 	}
 }
 
+static int adreno_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
+				struct adreno_ringbuffer *rb)
+{
+	unsigned int *cmds;
+	int ret;
+
+	cmds = adreno_ringbuffer_allocspace(rb, 2);
+	if (IS_ERR(cmds))
+		return PTR_ERR(cmds);
+	if (cmds == NULL)
+		return -ENOSPC;
+
+	cmds += cp_secure_mode(adreno_dev, cmds, 0);
+
+	ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
+	if (ret)
+		adreno_spin_idle_debug(adreno_dev,
+				"Switch to unsecure failed to idle\n");
+
+	return ret;
+}
+
+int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
+		struct adreno_ringbuffer *rb)
+{
+	int ret = 0;
+
+	if (!adreno_is_a5xx(adreno_dev))
+		return -EINVAL;
+
+	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS) &&
+			adreno_is_a5xx(adreno_dev)) {
+		ret = a5xx_critical_packet_submit(adreno_dev, rb);
+		if (ret)
+			return ret;
+	}
+
+	/* GPU comes up in secured mode, make it unsecured by default */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+		ret = adreno_switch_to_unsecure_mode(adreno_dev, rb);
+	else
+		adreno_writereg(adreno_dev,
+				ADRENO_REG_RBBM_SECVID_TRUST_CONTROL, 0x0);
+
+	return ret;
+}
+
 /**
  * _adreno_start - Power up the GPU and prepare to accept commands
  * @adreno_dev: Pointer to an adreno_device structure
@@ -2325,6 +2372,34 @@
 	return adreno_hw_isidle(adreno_dev);
 }
 
+/* Print some key registers if a spin-for-idle times out */
+void adreno_spin_idle_debug(struct adreno_device *adreno_dev,
+		const char *str)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int rptr, wptr;
+	unsigned int status, status3, intstatus;
+	unsigned int hwfault;
+
+	dev_err(device->dev, str);
+
+	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
+	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
+
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS, &status);
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS3, &status3);
+	adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &intstatus);
+	adreno_readreg(adreno_dev, ADRENO_REG_CP_HW_FAULT, &hwfault);
+
+	dev_err(device->dev,
+		"rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
+		adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
+
+	dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
+
+	kgsl_device_snapshot(device, NULL);
+}
+
 /**
  * adreno_spin_idle() - Spin wait for the GPU to idle
  * @adreno_dev: Pointer to an adreno device
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index e2c189b..1e08a5e 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -951,6 +951,11 @@
 		unsigned int cmd, unsigned long arg,
 		const struct kgsl_ioctl *cmds, int len);
 
+int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
+		struct adreno_ringbuffer *rb);
+int adreno_set_unsecured_mode(struct adreno_device *adreno_dev,
+		struct adreno_ringbuffer *rb);
+void adreno_spin_idle_debug(struct adreno_device *adreno_dev, const char *str);
 int adreno_spin_idle(struct adreno_device *device, unsigned int timeout);
 int adreno_idle(struct kgsl_device *device);
 bool adreno_isidle(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index d1346c5..1e95e38 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -82,34 +82,6 @@
 #define A530_QFPROM_RAW_PTE_ROW0_MSB 0x134
 #define A530_QFPROM_RAW_PTE_ROW2_MSB 0x144
 
-/* Print some key registers if a spin-for-idle times out */
-static void spin_idle_debug(struct kgsl_device *device,
-		const char *str)
-{
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int rptr, wptr;
-	unsigned int status, status3, intstatus;
-	unsigned int hwfault;
-
-	dev_err(device->dev, str);
-
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
-	adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
-
-	kgsl_regread(device, A5XX_RBBM_STATUS, &status);
-	kgsl_regread(device, A5XX_RBBM_STATUS3, &status3);
-	kgsl_regread(device, A5XX_RBBM_INT_0_STATUS, &intstatus);
-	kgsl_regread(device, A5XX_CP_HW_FAULT, &hwfault);
-
-	dev_err(device->dev,
-		"rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
-		adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
-
-	dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
-
-	kgsl_device_snapshot(device, NULL);
-}
-
 static void a530_efuse_leakage(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -752,7 +724,7 @@
 
 	ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
 	if (ret != 0)
-		spin_idle_debug(&adreno_dev->dev,
+		adreno_spin_idle_debug(adreno_dev,
 				"gpmu initialization failed to idle\n");
 
 	return ret;
@@ -2153,7 +2125,7 @@
 
 	ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
 	if (ret)
-		spin_idle_debug(KGSL_DEVICE(adreno_dev),
+		adreno_spin_idle_debug(adreno_dev,
 				"hw initialization failed to idle\n");
 
 	return ret;
@@ -2178,28 +2150,6 @@
 	return 0;
 }
 
-static int a5xx_switch_to_unsecure_mode(struct adreno_device *adreno_dev,
-				struct adreno_ringbuffer *rb)
-{
-	unsigned int *cmds;
-	int ret;
-
-	cmds = adreno_ringbuffer_allocspace(rb, 2);
-	if (IS_ERR(cmds))
-		return PTR_ERR(cmds);
-	if (cmds == NULL)
-		return -ENOSPC;
-
-	cmds += cp_secure_mode(adreno_dev, cmds, 0);
-
-	ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
-	if (ret)
-		spin_idle_debug(KGSL_DEVICE(adreno_dev),
-				"Switch to unsecure failed to idle\n");
-
-	return ret;
-}
-
 /*
  * a5xx_microcode_load() - Load microcode
  * @adreno_dev: Pointer to adreno device
@@ -2360,7 +2310,7 @@
 		*cmds++ = 0x0;
 }
 
-static int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
+int a5xx_critical_packet_submit(struct adreno_device *adreno_dev,
 					struct adreno_ringbuffer *rb)
 {
 	unsigned int *cmds;
@@ -2379,7 +2329,7 @@
 
 	ret = adreno_ringbuffer_submit_spin(rb, NULL, 20);
 	if (ret)
-		spin_idle_debug(KGSL_DEVICE(adreno_dev),
+		adreno_spin_idle_debug(adreno_dev,
 			"Critical packet submission failed to idle\n");
 
 	return ret;
@@ -2410,33 +2360,12 @@
 
 	ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
 	if (ret)
-		spin_idle_debug(KGSL_DEVICE(adreno_dev),
+		adreno_spin_idle_debug(adreno_dev,
 				"CP initialization failed to idle\n");
 
 	return ret;
 }
 
-static int a5xx_set_unsecured_mode(struct adreno_device *adreno_dev,
-		struct adreno_ringbuffer *rb)
-{
-	int ret = 0;
-
-	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CRITICAL_PACKETS)) {
-		ret = a5xx_critical_packet_submit(adreno_dev, rb);
-		if (ret)
-			return ret;
-	}
-
-	/* GPU comes up in secured mode, make it unsecured by default */
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
-		ret = a5xx_switch_to_unsecure_mode(adreno_dev, rb);
-	else
-		kgsl_regwrite(&adreno_dev->dev,
-				A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
-
-	return ret;
-}
-
 /*
  * a5xx_rb_start() - Start the ringbuffer
  * @adreno_dev: Pointer to adreno device
@@ -2480,7 +2409,7 @@
 		return ret;
 
 	/* GPU comes up in secured mode, make it unsecured by default */
-	ret = a5xx_set_unsecured_mode(adreno_dev, rb);
+	ret = adreno_set_unsecured_mode(adreno_dev, rb);
 	if (ret)
 		return ret;
 
@@ -3025,6 +2954,7 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_ADDR, A5XX_CP_MEQ_DBG_ADDR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_MEQ_DATA, A5XX_CP_MEQ_DBG_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_REG_0, A5XX_CP_PROTECT_REG_0),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A5XX_CP_HW_FAULT),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A5XX_CP_CONTEXT_SWITCH_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DEBUG, ADRENO_REG_SKIP),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT_DISABLE, ADRENO_REG_SKIP),
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 7208672..bea5707a 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -15,6 +15,7 @@
 
 #include "adreno.h"
 #include "a6xx_reg.h"
+#include "adreno_a6xx.h"
 #include "adreno_cp_parser.h"
 #include "adreno_trace.h"
 #include "adreno_pm4types.h"
@@ -91,30 +92,6 @@
 	{ 0xA630, 0x0, 1 },
 };
 
-/* Print some key registers if a spin-for-idle times out */
-static void spin_idle_debug(struct kgsl_device *device,
-		const char *str)
-{
-	unsigned int rptr, wptr;
-	unsigned int status, status3, intstatus;
-	unsigned int hwfault;
-
-	dev_err(device->dev, str);
-
-	kgsl_regread(device, A6XX_CP_RB_RPTR, &rptr);
-	kgsl_regread(device, A6XX_CP_RB_WPTR, &wptr);
-
-	kgsl_regread(device, A6XX_RBBM_STATUS, &status);
-	kgsl_regread(device, A6XX_RBBM_STATUS3, &status3);
-	kgsl_regread(device, A6XX_RBBM_INT_0_STATUS, &intstatus);
-	kgsl_regread(device, A6XX_CP_HW_FAULT, &hwfault);
-
-	dev_err(device->dev,
-		" rb=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
-		rptr, wptr, status, status3, intstatus);
-	dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
-}
-
 static void a6xx_platform_setup(struct adreno_device *adreno_dev)
 {
 	uint64_t addr;
@@ -125,6 +102,11 @@
 	adreno_dev->sp_pvt_gpuaddr = addr + SZ_64K;
 }
 
+static void a6xx_init(struct adreno_device *adreno_dev)
+{
+	a6xx_crashdump_init(adreno_dev);
+}
+
 /**
  * a6xx_protect_init() - Initializes register protection on a6xx
  * @device: Pointer to the device structure
@@ -134,16 +116,29 @@
 static void a6xx_protect_init(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-	int i;
+	struct kgsl_protected_registers *mmu_prot =
+		kgsl_mmu_get_prot_regs(&device->mmu);
+	int i, num_sets;
+	int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
+	int max_sets = adreno_dev->gpucore->num_protected_regs;
+	unsigned int mmu_base = 0, mmu_range = 0, cur_range;
 
 	/* enable access protection to privileged registers */
 	kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000007);
 
-	if (ARRAY_SIZE(a6xx_protected_regs_group) >
-			adreno_dev->gpucore->num_protected_regs)
+	if (mmu_prot) {
+		mmu_base = mmu_prot->base;
+		mmu_range = 1 << mmu_prot->range;
+		req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
+	}
+
+	if (req_sets > max_sets)
 		WARN(1, "Size exceeds the num of protection regs available\n");
 
-	for (i = 0; i < ARRAY_SIZE(a6xx_protected_regs_group); i++) {
+	/* Protect GPU registers */
+	num_sets = min_t(unsigned int,
+		ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
+	for (i = 0; i < num_sets; i++) {
 		struct a6xx_protected_regs *regs =
 					&a6xx_protected_regs_group[i];
 
@@ -152,6 +147,19 @@
 				(regs->read_protect << 31));
 	}
 
+	/* Protect MMU registers */
+	if (mmu_prot) {
+		while ((i < max_sets) && (mmu_range > 0)) {
+			cur_range = min_t(unsigned int, mmu_range,
+						0x2000);
+			kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
+				mmu_base | ((cur_range - 1) << 18) | (1 << 31));
+
+			mmu_base += cur_range;
+			mmu_range -= cur_range;
+			i++;
+		}
+	}
 }
 
 static void a6xx_enable_64bit(struct adreno_device *adreno_dev)
@@ -387,7 +395,7 @@
 
 	ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
 	if (ret)
-		spin_idle_debug(KGSL_DEVICE(adreno_dev),
+		adreno_spin_idle_debug(adreno_dev,
 				"CP initialization failed to idle\n");
 
 	return ret;
@@ -1518,10 +1526,381 @@
 	.mask = A6XX_INT_MASK,
 };
 
+static struct adreno_snapshot_sizes a6xx_snap_sizes = {
+	.cp_pfp = 0x33,
+	.roq = 0x400,
+};
+
+static struct adreno_snapshot_data a6xx_snapshot_data = {
+	.sect_sizes = &a6xx_snap_sizes,
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_cp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_0_LO,
+		A6XX_RBBM_PERFCTR_CP_0_HI, 0, A6XX_CP_PERFCTR_CP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_1_LO,
+		A6XX_RBBM_PERFCTR_CP_1_HI, 1, A6XX_CP_PERFCTR_CP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_2_LO,
+		A6XX_RBBM_PERFCTR_CP_2_HI, 2, A6XX_CP_PERFCTR_CP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_3_LO,
+		A6XX_RBBM_PERFCTR_CP_3_HI, 3, A6XX_CP_PERFCTR_CP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_4_LO,
+		A6XX_RBBM_PERFCTR_CP_4_HI, 4, A6XX_CP_PERFCTR_CP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_5_LO,
+		A6XX_RBBM_PERFCTR_CP_5_HI, 5, A6XX_CP_PERFCTR_CP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_6_LO,
+		A6XX_RBBM_PERFCTR_CP_6_HI, 6, A6XX_CP_PERFCTR_CP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_7_LO,
+		A6XX_RBBM_PERFCTR_CP_7_HI, 7, A6XX_CP_PERFCTR_CP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_8_LO,
+		A6XX_RBBM_PERFCTR_CP_8_HI, 8, A6XX_CP_PERFCTR_CP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_9_LO,
+		A6XX_RBBM_PERFCTR_CP_9_HI, 9, A6XX_CP_PERFCTR_CP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_10_LO,
+		A6XX_RBBM_PERFCTR_CP_10_HI, 10, A6XX_CP_PERFCTR_CP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_11_LO,
+		A6XX_RBBM_PERFCTR_CP_11_HI, 11, A6XX_CP_PERFCTR_CP_SEL_11 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_12_LO,
+		A6XX_RBBM_PERFCTR_CP_12_HI, 12, A6XX_CP_PERFCTR_CP_SEL_12 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CP_13_LO,
+		A6XX_RBBM_PERFCTR_CP_13_HI, 13, A6XX_CP_PERFCTR_CP_SEL_13 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_rbbm[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_0_LO,
+		A6XX_RBBM_PERFCTR_RBBM_0_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_1_LO,
+		A6XX_RBBM_PERFCTR_RBBM_1_HI, 15, A6XX_RBBM_PERFCTR_RBBM_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_2_LO,
+		A6XX_RBBM_PERFCTR_RBBM_2_HI, 16, A6XX_RBBM_PERFCTR_RBBM_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RBBM_3_LO,
+		A6XX_RBBM_PERFCTR_RBBM_3_HI, 17, A6XX_RBBM_PERFCTR_RBBM_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_pc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_0_LO,
+		A6XX_RBBM_PERFCTR_PC_0_HI, 18, A6XX_PC_PERFCTR_PC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_1_LO,
+		A6XX_RBBM_PERFCTR_PC_1_HI, 19, A6XX_PC_PERFCTR_PC_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_2_LO,
+		A6XX_RBBM_PERFCTR_PC_2_HI, 20, A6XX_PC_PERFCTR_PC_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_3_LO,
+		A6XX_RBBM_PERFCTR_PC_3_HI, 21, A6XX_PC_PERFCTR_PC_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_4_LO,
+		A6XX_RBBM_PERFCTR_PC_4_HI, 22, A6XX_PC_PERFCTR_PC_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_5_LO,
+		A6XX_RBBM_PERFCTR_PC_5_HI, 23, A6XX_PC_PERFCTR_PC_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_6_LO,
+		A6XX_RBBM_PERFCTR_PC_6_HI, 24, A6XX_PC_PERFCTR_PC_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_PC_7_LO,
+		A6XX_RBBM_PERFCTR_PC_7_HI, 25, A6XX_PC_PERFCTR_PC_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vfd[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_0_LO,
+		A6XX_RBBM_PERFCTR_VFD_0_HI, 26, A6XX_VFD_PERFCTR_VFD_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_1_LO,
+		A6XX_RBBM_PERFCTR_VFD_1_HI, 27, A6XX_VFD_PERFCTR_VFD_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_2_LO,
+		A6XX_RBBM_PERFCTR_VFD_2_HI, 28, A6XX_VFD_PERFCTR_VFD_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_3_LO,
+		A6XX_RBBM_PERFCTR_VFD_3_HI, 29, A6XX_VFD_PERFCTR_VFD_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_4_LO,
+		A6XX_RBBM_PERFCTR_VFD_4_HI, 30, A6XX_VFD_PERFCTR_VFD_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_5_LO,
+		A6XX_RBBM_PERFCTR_VFD_5_HI, 31, A6XX_VFD_PERFCTR_VFD_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_6_LO,
+		A6XX_RBBM_PERFCTR_VFD_6_HI, 32, A6XX_VFD_PERFCTR_VFD_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VFD_7_LO,
+		A6XX_RBBM_PERFCTR_VFD_7_HI, 33, A6XX_VFD_PERFCTR_VFD_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_hlsq[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_0_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_0_HI, 34, A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_1_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_1_HI, 35, A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_2_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_2_HI, 36, A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_3_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_3_HI, 37, A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_4_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_4_HI, 38, A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_HLSQ_5_LO,
+		A6XX_RBBM_PERFCTR_HLSQ_5_HI, 39, A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vpc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_0_LO,
+		A6XX_RBBM_PERFCTR_VPC_0_HI, 40, A6XX_VPC_PERFCTR_VPC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_1_LO,
+		A6XX_RBBM_PERFCTR_VPC_1_HI, 41, A6XX_VPC_PERFCTR_VPC_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_2_LO,
+		A6XX_RBBM_PERFCTR_VPC_2_HI, 42, A6XX_VPC_PERFCTR_VPC_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_3_LO,
+		A6XX_RBBM_PERFCTR_VPC_3_HI, 43, A6XX_VPC_PERFCTR_VPC_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_4_LO,
+		A6XX_RBBM_PERFCTR_VPC_4_HI, 44, A6XX_VPC_PERFCTR_VPC_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VPC_5_LO,
+		A6XX_RBBM_PERFCTR_VPC_5_HI, 45, A6XX_VPC_PERFCTR_VPC_SEL_5 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_ccu[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_0_LO,
+		A6XX_RBBM_PERFCTR_CCU_0_HI, 46, A6XX_RB_PERFCTR_CCU_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_1_LO,
+		A6XX_RBBM_PERFCTR_CCU_1_HI, 47, A6XX_RB_PERFCTR_CCU_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_2_LO,
+		A6XX_RBBM_PERFCTR_CCU_2_HI, 48, A6XX_RB_PERFCTR_CCU_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_3_LO,
+		A6XX_RBBM_PERFCTR_CCU_3_HI, 49, A6XX_RB_PERFCTR_CCU_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CCU_4_LO,
+		A6XX_RBBM_PERFCTR_CCU_4_HI, 50, A6XX_RB_PERFCTR_CCU_SEL_4 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_tse[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_0_LO,
+		A6XX_RBBM_PERFCTR_TSE_0_HI, 51, A6XX_GRAS_PERFCTR_TSE_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_1_LO,
+		A6XX_RBBM_PERFCTR_TSE_1_HI, 52, A6XX_GRAS_PERFCTR_TSE_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_2_LO,
+		A6XX_RBBM_PERFCTR_TSE_2_HI, 53, A6XX_GRAS_PERFCTR_TSE_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TSE_3_LO,
+		A6XX_RBBM_PERFCTR_TSE_3_HI, 54, A6XX_GRAS_PERFCTR_TSE_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_ras[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_0_LO,
+		A6XX_RBBM_PERFCTR_RAS_0_HI, 55, A6XX_GRAS_PERFCTR_RAS_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_1_LO,
+		A6XX_RBBM_PERFCTR_RAS_1_HI, 56, A6XX_GRAS_PERFCTR_RAS_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_2_LO,
+		A6XX_RBBM_PERFCTR_RAS_2_HI, 57, A6XX_GRAS_PERFCTR_RAS_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RAS_3_LO,
+		A6XX_RBBM_PERFCTR_RAS_3_HI, 58, A6XX_GRAS_PERFCTR_RAS_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_uche[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_0_LO,
+		A6XX_RBBM_PERFCTR_UCHE_0_HI, 59, A6XX_UCHE_PERFCTR_UCHE_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_1_LO,
+		A6XX_RBBM_PERFCTR_UCHE_1_HI, 60, A6XX_UCHE_PERFCTR_UCHE_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_2_LO,
+		A6XX_RBBM_PERFCTR_UCHE_2_HI, 61, A6XX_UCHE_PERFCTR_UCHE_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_3_LO,
+		A6XX_RBBM_PERFCTR_UCHE_3_HI, 62, A6XX_UCHE_PERFCTR_UCHE_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_4_LO,
+		A6XX_RBBM_PERFCTR_UCHE_4_HI, 63, A6XX_UCHE_PERFCTR_UCHE_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_5_LO,
+		A6XX_RBBM_PERFCTR_UCHE_5_HI, 64, A6XX_UCHE_PERFCTR_UCHE_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_6_LO,
+		A6XX_RBBM_PERFCTR_UCHE_6_HI, 65, A6XX_UCHE_PERFCTR_UCHE_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_7_LO,
+		A6XX_RBBM_PERFCTR_UCHE_7_HI, 66, A6XX_UCHE_PERFCTR_UCHE_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_8_LO,
+		A6XX_RBBM_PERFCTR_UCHE_8_HI, 67, A6XX_UCHE_PERFCTR_UCHE_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_9_LO,
+		A6XX_RBBM_PERFCTR_UCHE_9_HI, 68, A6XX_UCHE_PERFCTR_UCHE_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_10_LO,
+		A6XX_RBBM_PERFCTR_UCHE_10_HI, 69,
+					A6XX_UCHE_PERFCTR_UCHE_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_UCHE_11_LO,
+		A6XX_RBBM_PERFCTR_UCHE_11_HI, 70,
+					A6XX_UCHE_PERFCTR_UCHE_SEL_11 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_tp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_0_LO,
+		A6XX_RBBM_PERFCTR_TP_0_HI, 71, A6XX_TPL1_PERFCTR_TP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_1_LO,
+		A6XX_RBBM_PERFCTR_TP_1_HI, 72, A6XX_TPL1_PERFCTR_TP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_2_LO,
+		A6XX_RBBM_PERFCTR_TP_2_HI, 73, A6XX_TPL1_PERFCTR_TP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_3_LO,
+		A6XX_RBBM_PERFCTR_TP_3_HI, 74, A6XX_TPL1_PERFCTR_TP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_4_LO,
+		A6XX_RBBM_PERFCTR_TP_4_HI, 75, A6XX_TPL1_PERFCTR_TP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_5_LO,
+		A6XX_RBBM_PERFCTR_TP_5_HI, 76, A6XX_TPL1_PERFCTR_TP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_6_LO,
+		A6XX_RBBM_PERFCTR_TP_6_HI, 77, A6XX_TPL1_PERFCTR_TP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_7_LO,
+		A6XX_RBBM_PERFCTR_TP_7_HI, 78, A6XX_TPL1_PERFCTR_TP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_8_LO,
+		A6XX_RBBM_PERFCTR_TP_8_HI, 79, A6XX_TPL1_PERFCTR_TP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_9_LO,
+		A6XX_RBBM_PERFCTR_TP_9_HI, 80, A6XX_TPL1_PERFCTR_TP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_10_LO,
+		A6XX_RBBM_PERFCTR_TP_10_HI, 81, A6XX_TPL1_PERFCTR_TP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_TP_11_LO,
+		A6XX_RBBM_PERFCTR_TP_11_HI, 82, A6XX_TPL1_PERFCTR_TP_SEL_11 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_sp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_0_LO,
+		A6XX_RBBM_PERFCTR_SP_0_HI, 83, A6XX_SP_PERFCTR_SP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_1_LO,
+		A6XX_RBBM_PERFCTR_SP_1_HI, 84, A6XX_SP_PERFCTR_SP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_2_LO,
+		A6XX_RBBM_PERFCTR_SP_2_HI, 85, A6XX_SP_PERFCTR_SP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_3_LO,
+		A6XX_RBBM_PERFCTR_SP_3_HI, 86, A6XX_SP_PERFCTR_SP_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_4_LO,
+		A6XX_RBBM_PERFCTR_SP_4_HI, 87, A6XX_SP_PERFCTR_SP_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_5_LO,
+		A6XX_RBBM_PERFCTR_SP_5_HI, 88, A6XX_SP_PERFCTR_SP_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_6_LO,
+		A6XX_RBBM_PERFCTR_SP_6_HI, 89, A6XX_SP_PERFCTR_SP_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_7_LO,
+		A6XX_RBBM_PERFCTR_SP_7_HI, 90, A6XX_SP_PERFCTR_SP_SEL_7 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_8_LO,
+		A6XX_RBBM_PERFCTR_SP_8_HI, 91, A6XX_SP_PERFCTR_SP_SEL_8 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_9_LO,
+		A6XX_RBBM_PERFCTR_SP_9_HI, 92, A6XX_SP_PERFCTR_SP_SEL_9 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_10_LO,
+		A6XX_RBBM_PERFCTR_SP_10_HI, 93, A6XX_SP_PERFCTR_SP_SEL_10 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_11_LO,
+		A6XX_RBBM_PERFCTR_SP_11_HI, 94, A6XX_SP_PERFCTR_SP_SEL_11 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_12_LO,
+		A6XX_RBBM_PERFCTR_SP_12_HI, 95, A6XX_SP_PERFCTR_SP_SEL_12 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_13_LO,
+		A6XX_RBBM_PERFCTR_SP_13_HI, 96, A6XX_SP_PERFCTR_SP_SEL_13 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_14_LO,
+		A6XX_RBBM_PERFCTR_SP_14_HI, 97, A6XX_SP_PERFCTR_SP_SEL_14 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_15_LO,
+		A6XX_RBBM_PERFCTR_SP_15_HI, 98, A6XX_SP_PERFCTR_SP_SEL_15 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_16_LO,
+		A6XX_RBBM_PERFCTR_SP_16_HI, 99, A6XX_SP_PERFCTR_SP_SEL_16 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_17_LO,
+		A6XX_RBBM_PERFCTR_SP_17_HI, 100, A6XX_SP_PERFCTR_SP_SEL_17 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_18_LO,
+		A6XX_RBBM_PERFCTR_SP_18_HI, 101, A6XX_SP_PERFCTR_SP_SEL_18 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_19_LO,
+		A6XX_RBBM_PERFCTR_SP_19_HI, 102, A6XX_SP_PERFCTR_SP_SEL_19 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_20_LO,
+		A6XX_RBBM_PERFCTR_SP_20_HI, 103, A6XX_SP_PERFCTR_SP_SEL_20 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_21_LO,
+		A6XX_RBBM_PERFCTR_SP_21_HI, 104, A6XX_SP_PERFCTR_SP_SEL_21 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_22_LO,
+		A6XX_RBBM_PERFCTR_SP_22_HI, 105, A6XX_SP_PERFCTR_SP_SEL_22 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_SP_23_LO,
+		A6XX_RBBM_PERFCTR_SP_23_HI, 106, A6XX_SP_PERFCTR_SP_SEL_23 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_rb[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_0_LO,
+		A6XX_RBBM_PERFCTR_RB_0_HI, 107, A6XX_RB_PERFCTR_RB_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_1_LO,
+		A6XX_RBBM_PERFCTR_RB_1_HI, 108, A6XX_RB_PERFCTR_RB_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_2_LO,
+		A6XX_RBBM_PERFCTR_RB_2_HI, 109, A6XX_RB_PERFCTR_RB_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_3_LO,
+		A6XX_RBBM_PERFCTR_RB_3_HI, 110, A6XX_RB_PERFCTR_RB_SEL_3 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_4_LO,
+		A6XX_RBBM_PERFCTR_RB_4_HI, 111, A6XX_RB_PERFCTR_RB_SEL_4 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_5_LO,
+		A6XX_RBBM_PERFCTR_RB_5_HI, 112, A6XX_RB_PERFCTR_RB_SEL_5 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_6_LO,
+		A6XX_RBBM_PERFCTR_RB_6_HI, 113, A6XX_RB_PERFCTR_RB_SEL_6 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_RB_7_LO,
+		A6XX_RBBM_PERFCTR_RB_7_HI, 114, A6XX_RB_PERFCTR_RB_SEL_7 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vsc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_0_LO,
+		A6XX_RBBM_PERFCTR_VSC_0_HI, 115, A6XX_VSC_PERFCTR_VSC_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_VSC_1_LO,
+		A6XX_RBBM_PERFCTR_VSC_1_HI, 116, A6XX_VSC_PERFCTR_VSC_SEL_1 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_lrz[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_0_LO,
+		A6XX_RBBM_PERFCTR_LRZ_0_HI, 117, A6XX_GRAS_PERFCTR_LRZ_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_1_LO,
+		A6XX_RBBM_PERFCTR_LRZ_1_HI, 118, A6XX_GRAS_PERFCTR_LRZ_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_2_LO,
+		A6XX_RBBM_PERFCTR_LRZ_2_HI, 119, A6XX_GRAS_PERFCTR_LRZ_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_LRZ_3_LO,
+		A6XX_RBBM_PERFCTR_LRZ_3_HI, 120, A6XX_GRAS_PERFCTR_LRZ_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_cmp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_0_LO,
+		A6XX_RBBM_PERFCTR_CMP_0_HI, 121, A6XX_RB_PERFCTR_CMP_SEL_0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_1_LO,
+		A6XX_RBBM_PERFCTR_CMP_1_HI, 122, A6XX_RB_PERFCTR_CMP_SEL_1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_2_LO,
+		A6XX_RBBM_PERFCTR_CMP_2_HI, 123, A6XX_RB_PERFCTR_CMP_SEL_2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_RBBM_PERFCTR_CMP_3_LO,
+		A6XX_RBBM_PERFCTR_CMP_3_HI, 124, A6XX_RB_PERFCTR_CMP_SEL_3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vbif[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW0,
+		A6XX_VBIF_PERF_CNT_HIGH0, -1, A6XX_VBIF_PERF_CNT_SEL0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW1,
+		A6XX_VBIF_PERF_CNT_HIGH1, -1, A6XX_VBIF_PERF_CNT_SEL1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW2,
+		A6XX_VBIF_PERF_CNT_HIGH2, -1, A6XX_VBIF_PERF_CNT_SEL2 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_CNT_LOW3,
+		A6XX_VBIF_PERF_CNT_HIGH3, -1, A6XX_VBIF_PERF_CNT_SEL3 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_vbif_pwr[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW0,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH0, -1, A6XX_VBIF_PERF_PWR_CNT_EN0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW1,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH1, -1, A6XX_VBIF_PERF_PWR_CNT_EN1 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_VBIF_PERF_PWR_CNT_LOW2,
+		A6XX_VBIF_PERF_PWR_CNT_HIGH2, -1, A6XX_VBIF_PERF_PWR_CNT_EN2 },
+};
+
+static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
+		A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
+};
+
+#define A6XX_PERFCOUNTER_GROUP(offset, name) \
+	ADRENO_PERFCOUNTER_GROUP(a6xx, offset, name)
+
+#define A6XX_PERFCOUNTER_GROUP_FLAGS(offset, name, flags) \
+	ADRENO_PERFCOUNTER_GROUP_FLAGS(a6xx, offset, name, flags)
+
+static struct adreno_perfcount_group a6xx_perfcounter_groups
+				[KGSL_PERFCOUNTER_GROUP_MAX] = {
+	A6XX_PERFCOUNTER_GROUP(CP, cp),
+	A6XX_PERFCOUNTER_GROUP(RBBM, rbbm),
+	A6XX_PERFCOUNTER_GROUP(PC, pc),
+	A6XX_PERFCOUNTER_GROUP(VFD, vfd),
+	A6XX_PERFCOUNTER_GROUP(HLSQ, hlsq),
+	A6XX_PERFCOUNTER_GROUP(VPC, vpc),
+	A6XX_PERFCOUNTER_GROUP(CCU, ccu),
+	A6XX_PERFCOUNTER_GROUP(CMP, cmp),
+	A6XX_PERFCOUNTER_GROUP(TSE, tse),
+	A6XX_PERFCOUNTER_GROUP(RAS, ras),
+	A6XX_PERFCOUNTER_GROUP(LRZ, lrz),
+	A6XX_PERFCOUNTER_GROUP(UCHE, uche),
+	A6XX_PERFCOUNTER_GROUP(TP, tp),
+	A6XX_PERFCOUNTER_GROUP(SP, sp),
+	A6XX_PERFCOUNTER_GROUP(RB, rb),
+	A6XX_PERFCOUNTER_GROUP(VSC, vsc),
+	A6XX_PERFCOUNTER_GROUP(VBIF, vbif),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
+	A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
+		ADRENO_PERFCOUNTER_GROUP_FIXED),
+};
+
+static struct adreno_perfcounters a6xx_perfcounters = {
+	a6xx_perfcounter_groups,
+	ARRAY_SIZE(a6xx_perfcounter_groups),
+};
+
 /* Register offset defines for A6XX, in order of enum adreno_regs */
 static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
 
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A6XX_CP_RB_BASE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A6XX_CP_RB_BASE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
 				A6XX_CP_RB_RPTR_ADDR_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
@@ -1529,9 +1908,28 @@
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A6XX_CP_RB_RPTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A6XX_CP_RB_WPTR),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A6XX_CP_RB_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A6XX_CP_SQE_CNTL),
 	ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A6XX_CP_MISC_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_HW_FAULT, A6XX_CP_HW_FAULT),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A6XX_CP_IB1_BASE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, A6XX_CP_IB1_BASE_HI),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A6XX_CP_IB1_REM_SIZE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A6XX_CP_IB2_BASE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, A6XX_CP_IB2_BASE_HI),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR),
+	ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
+					A6XX_RBBM_PERFCTR_LOAD_CMD0),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
+					A6XX_RBBM_PERFCTR_LOAD_CMD1),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
+					A6XX_RBBM_PERFCTR_LOAD_CMD2),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD3,
+					A6XX_RBBM_PERFCTR_LOAD_CMD3),
 
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A6XX_RBBM_INT_0_MASK),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A6XX_RBBM_INT_0_STATUS),
@@ -1543,6 +1941,10 @@
 					  A6XX_RBBM_BLOCK_SW_RESET_CMD),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
 					  A6XX_RBBM_BLOCK_SW_RESET_CMD2),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
+				A6XX_RBBM_PERFCTR_LOAD_VALUE_LO),
+	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
+				A6XX_RBBM_PERFCTR_LOAD_VALUE_HI),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
 				A6XX_CP_ALWAYS_ON_COUNTER_LO),
 	ADRENO_REG_DEFINE(ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
@@ -1592,13 +1994,17 @@
 struct adreno_gpudev adreno_a6xx_gpudev = {
 	.reg_offsets = &a6xx_reg_offsets,
 	.start = a6xx_start,
+	.snapshot = a6xx_snapshot,
 	.irq = &a6xx_irq,
+	.snapshot_data = &a6xx_snapshot_data,
 	.irq_trace = trace_kgsl_a5xx_irq_status,
 	.num_prio_levels = KGSL_PRIORITY_MAX_RB_LEVELS,
 	.platform_setup = a6xx_platform_setup,
+	.init = a6xx_init,
 	.rb_start = a6xx_rb_start,
 	.regulator_enable = a6xx_sptprac_enable,
 	.regulator_disable = a6xx_sptprac_disable,
+	.perfcounters = &a6xx_perfcounters,
 	.microcode_read = a6xx_microcode_read,
 	.enable_64bit = a6xx_enable_64bit,
 	.llc_configure_gpu_scid = a6xx_llc_configure_gpu_scid,
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
new file mode 100644
index 0000000..4b96f56
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ADRENO_A6XX_H_
+#define _ADRENO_A6XX_H_
+
+#include "a6xx_reg.h"
+
+#define CP_CLUSTER_FE		0x0
+#define CP_CLUSTER_SP_VS	0x1
+#define CP_CLUSTER_PC_VS	0x2
+#define CP_CLUSTER_GRAS		0x3
+#define CP_CLUSTER_SP_PS	0x4
+#define CP_CLUSTER_PS		0x5
+
+
+void a6xx_snapshot(struct adreno_device *adreno_dev,
+		struct kgsl_snapshot *snapshot);
+
+void a6xx_crashdump_init(struct adreno_device *adreno_dev);
+
+#endif
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
new file mode 100644
index 0000000..7d87096
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -0,0 +1,1294 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_snapshot.h"
+#include "adreno_snapshot.h"
+#include "a6xx_reg.h"
+#include "adreno_a6xx.h"
+#include "kgsl_gmu.h"
+
+#define A6XX_NUM_CTXTS 2
+
+static const unsigned int a6xx_gras_cluster[] = {
+	0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809D, 0x80A0, 0x80A6,
+	0x80AF, 0x80F1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+	0x8400, 0x840B,
+};
+
+static const unsigned int a6xx_ps_cluster[] = {
+	0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881E, 0x8820, 0x8865,
+	0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+	0x88C0, 0x88c1, 0x88D0, 0x88E3, 0x88F0, 0x88F3, 0x8900, 0x891A,
+	0x8927, 0x8928, 0x8C00, 0x8C01, 0x8C17, 0x8C33, 0x9200, 0x9216,
+	0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const unsigned int a6xx_fe_cluster[] = {
+	0x9300, 0x9306, 0x9800, 0x9806, 0x9B00, 0x9B07, 0xA000, 0xA009,
+	0xA00E, 0xA0EF, 0xA0F8, 0xA0F8,
+};
+
+static const unsigned int a6xx_pc_vs_cluster[] = {
+	0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9B00, 0x9B07,
+};
+
+static struct a6xx_cluster_registers {
+	unsigned int id;
+	const unsigned int *regs;
+	unsigned int num_sets;
+	unsigned int offset0;
+	unsigned int offset1;
+} a6xx_clusters[] = {
+	{ CP_CLUSTER_GRAS, a6xx_gras_cluster, ARRAY_SIZE(a6xx_gras_cluster)/2 },
+	{ CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2 },
+	{ CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2 },
+	{ CP_CLUSTER_PC_VS, a6xx_pc_vs_cluster,
+					ARRAY_SIZE(a6xx_pc_vs_cluster)/2 },
+};
+
+struct a6xx_cluster_regs_info {
+	struct a6xx_cluster_registers *cluster;
+	unsigned int ctxt_id;
+};
+
+static const unsigned int a6xx_sp_vs_hlsq_cluster[] = {
+	0xB800, 0xB803, 0xB820, 0xB822,
+};
+
+static const unsigned int a6xx_sp_vs_sp_cluster[] = {
+	0xA800, 0xA824, 0xA830, 0xA83C, 0xA840, 0xA864, 0xA870, 0xA895,
+	0xA8A0, 0xA8AF, 0xA8C0, 0xA8C3,
+};
+
+static const unsigned int a6xx_hlsq_duplicate_cluster[] = {
+	0xBB10, 0xBB11, 0xBB20, 0xBB29,
+};
+
+static const unsigned int a6xx_hlsq_2d_duplicate_cluster[] = {
+	0xBD80, 0xBD80,
+};
+
+static const unsigned int a6xx_sp_duplicate_cluster[] = {
+	0xAB00, 0xAB00, 0xAB04, 0xAB05, 0xAB10, 0xAB1B, 0xAB20, 0xAB20,
+};
+
+static const unsigned int a6xx_tp_duplicate_cluster[] = {
+	0xB300, 0xB307, 0xB309, 0xB309, 0xB380, 0xB382,
+};
+
+static const unsigned int a6xx_sp_ps_hlsq_cluster[] = {
+	0xB980, 0xB980, 0xB982, 0xB987, 0xB990, 0xB99B, 0xB9A0, 0xB9A2,
+	0xB9C0, 0xB9C9,
+};
+
+static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
+	0xBD80, 0xBD80,
+};
+
+static const unsigned int a6xx_sp_ps_sp_cluster[] = {
+	0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
+	0xAA00, 0xAA00, 0xAA30, 0xAA31,
+};
+
+static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
+	0xACC0, 0xACC0,
+};
+
+static const unsigned int a6xx_sp_ps_tp_cluster[] = {
+	0xB180, 0xB183, 0xB190, 0xB191,
+};
+
+static const unsigned int a6xx_sp_ps_tp_2d_cluster[] = {
+	0xB4C0, 0xB4D1,
+};
+
+static struct a6xx_cluster_dbgahb_registers {
+	unsigned int id;
+	unsigned int regbase;
+	unsigned int statetype;
+	const unsigned int *regs;
+	unsigned int num_sets;
+} a6xx_dbgahb_ctx_clusters[] = {
+	{ CP_CLUSTER_SP_VS, 0x0002E000, 0x41, a6xx_sp_vs_hlsq_cluster,
+		ARRAY_SIZE(a6xx_sp_vs_hlsq_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002A000, 0x21, a6xx_sp_vs_sp_cluster,
+		ARRAY_SIZE(a6xx_sp_vs_sp_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002EC00, 0x41, a6xx_hlsq_duplicate_cluster,
+		ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002F000, 0x45, a6xx_hlsq_2d_duplicate_cluster,
+		ARRAY_SIZE(a6xx_hlsq_2d_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002AC00, 0x21, a6xx_sp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002CC00, 0x1, a6xx_tp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002E600, 0x42, a6xx_sp_ps_hlsq_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_hlsq_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002F300, 0x46, a6xx_sp_ps_hlsq_2d_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_hlsq_2d_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002A600, 0x22, a6xx_sp_ps_sp_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_sp_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002B300, 0x26, a6xx_sp_ps_sp_2d_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_sp_2d_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002C600, 0x2, a6xx_sp_ps_tp_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_tp_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002D300, 0x6, a6xx_sp_ps_tp_2d_cluster,
+		ARRAY_SIZE(a6xx_sp_ps_tp_2d_cluster) / 2 },
+	{ CP_CLUSTER_SP_PS, 0x0002EC00, 0x42, a6xx_hlsq_duplicate_cluster,
+		ARRAY_SIZE(a6xx_hlsq_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002AC00, 0x22, a6xx_sp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_sp_duplicate_cluster) / 2 },
+	{ CP_CLUSTER_SP_VS, 0x0002CC00, 0x2, a6xx_tp_duplicate_cluster,
+		ARRAY_SIZE(a6xx_tp_duplicate_cluster) / 2 },
+};
+
+struct a6xx_cluster_dbgahb_regs_info {
+	struct a6xx_cluster_dbgahb_registers *cluster;
+	unsigned int ctxt_id;
+};
+
+static const unsigned int a6xx_hlsq_non_ctx_registers[] = {
+	0xBE00, 0xBE01, 0xBE04, 0xBE05, 0xBE08, 0xBE09, 0xBE10, 0xBE15,
+	0xBE20, 0xBE23,
+};
+
+static const unsigned int a6xx_sp_non_ctx_registers[] = {
+	0xAE00, 0xAE04, 0xAE0C, 0xAE0C, 0xAE0F, 0xAE2B, 0xAE30, 0xAE32,
+	0xAE35, 0xAE35, 0xAE3A, 0xAE3F, 0xAE50, 0xAE52,
+};
+
+static const unsigned int a6xx_tp_non_ctx_registers[] = {
+	0xB600, 0xB601, 0xB604, 0xB605, 0xB610, 0xB61B, 0xB620, 0xB623,
+};
+
+static struct a6xx_non_ctx_dbgahb_registers {
+	unsigned int regbase;
+	unsigned int statetype;
+	const unsigned int *regs;
+	unsigned int num_sets;
+} a6xx_non_ctx_dbgahb[] = {
+	{ 0x0002F800, 0x40, a6xx_hlsq_non_ctx_registers,
+		ARRAY_SIZE(a6xx_hlsq_non_ctx_registers) / 2 },
+	{ 0x0002B800, 0x20, a6xx_sp_non_ctx_registers,
+		ARRAY_SIZE(a6xx_sp_non_ctx_registers) / 2 },
+	{ 0x0002D800, 0x0, a6xx_tp_non_ctx_registers,
+		ARRAY_SIZE(a6xx_tp_non_ctx_registers) / 2 },
+};
+
+static const unsigned int a6xx_vbif_ver_20xxxxxx_registers[] = {
+	/* VBIF */
+	0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302D, 0x3030, 0x3031,
+	0x3034, 0x3036, 0x303C, 0x303D, 0x3040, 0x3040, 0x3042, 0x3042,
+	0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061, 0x3064, 0x3068,
+	0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
+	0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
+	0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
+	0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+	0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+	0x3156, 0x3156, 0x3158, 0x3158, 0x315A, 0x315A, 0x315C, 0x315C,
+	0x315E, 0x315E, 0x3160, 0x3160, 0x3162, 0x3162, 0x340C, 0x340C,
+	0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const unsigned int a6xx_gmu_registers[] = {
+	/* GMU */
+	0x1B400, 0x1C3FF, 0x1C400, 0x1D3FF,
+};
+
+static const struct adreno_vbif_snapshot_registers
+a6xx_vbif_snapshot_registers[] = {
+	{ 0x20040000, 0xFF000000, a6xx_vbif_ver_20xxxxxx_registers,
+				ARRAY_SIZE(a6xx_vbif_ver_20xxxxxx_registers)/2},
+};
+
+/*
+ * Set of registers to dump for A6XX on snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+static const unsigned int a6xx_registers[] = {
+	/* RBBM */
+	0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0014, 0x0014,
+	0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042,
+	0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE,
+	0x00B0, 0x00FB, 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213,
+	0x0218, 0x023D, 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B,
+	0x050E, 0x0511, 0x0533, 0x0533, 0x0540, 0x0555,
+	/* CP */
+	0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827,
+	0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A,
+	0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3,
+	0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D,
+	0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6,
+	0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03,
+	/* VSC */
+	0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
+	/* UCHE */
+	0x0E10, 0x0E13, 0x0E17, 0x0E19, 0x0E1C, 0x0E2B, 0x0E30, 0x0E32,
+	0x0E38, 0x0E39,
+	/* GRAS */
+	0x8600, 0x8601, 0x8604, 0x8605, 0x8610, 0x861B, 0x8620, 0x8620,
+	0x8628, 0x862B, 0x8630, 0x8637,
+	/* RB */
+	0x8E01, 0x8E01, 0x8E04, 0x8E05, 0x8E07, 0x8E08, 0x8E0C, 0x8E0C,
+	0x8E10, 0x8E1C, 0x8E20, 0x8E25, 0x8E28, 0x8E28, 0x8E2C, 0x8E2F,
+	0x8E3B, 0x8E3E, 0x8E40, 0x8E43, 0x8E50, 0x8E5E, 0x8E70, 0x8E77,
+	/* VPC */
+	0x9600, 0x9604, 0x9624, 0x9637,
+	/* PC */
+	0x9E00, 0x9E01, 0x9E03, 0x9E0E, 0x9E11, 0x9E16, 0x9E19, 0x9E19,
+	0x9E1C, 0x9E1C, 0x9E20, 0x9E23, 0x9E30, 0x9E31, 0x9E34, 0x9E34,
+	0x9E70, 0x9E72, 0x9E78, 0x9E79, 0x9E80, 0x9FFF,
+	/* VFD */
+	0xA600, 0xA601, 0xA603, 0xA603, 0xA60A, 0xA60A, 0xA610, 0xA617,
+	0xA630, 0xA630, 0xD200, 0xD263,
+};
+
+enum a6xx_debugbus_id {
+	A6XX_DBGBUS_CP           = 0x1,
+	A6XX_DBGBUS_RBBM         = 0x2,
+	A6XX_DBGBUS_VBIF         = 0x3,
+	A6XX_DBGBUS_HLSQ         = 0x4,
+	A6XX_DBGBUS_UCHE         = 0x5,
+	A6XX_DBGBUS_DPM          = 0x6,
+	A6XX_DBGBUS_TESS         = 0x7,
+	A6XX_DBGBUS_PC           = 0x8,
+	A6XX_DBGBUS_VFDP         = 0x9,
+	A6XX_DBGBUS_VPC          = 0xa,
+	A6XX_DBGBUS_TSE          = 0xb,
+	A6XX_DBGBUS_RAS          = 0xc,
+	A6XX_DBGBUS_VSC          = 0xd,
+	A6XX_DBGBUS_COM          = 0xe,
+	A6XX_DBGBUS_LRZ          = 0x10,
+	A6XX_DBGBUS_A2D          = 0x11,
+	A6XX_DBGBUS_CCUFCHE      = 0x12,
+	A6XX_DBGBUS_GMU          = 0x13,
+	A6XX_DBGBUS_RBP          = 0x14,
+	A6XX_DBGBUS_DCS          = 0x15,
+	A6XX_DBGBUS_RBBM_CFG     = 0x16,
+	A6XX_DBGBUS_CX           = 0x17,
+	A6XX_DBGBUS_TPFCHE       = 0x19,
+	A6XX_DBGBUS_GPC          = 0x1d,
+	A6XX_DBGBUS_LARC         = 0x1e,
+	A6XX_DBGBUS_HLSQ_SPTP    = 0x1f,
+	A6XX_DBGBUS_RB_0         = 0x20,
+	A6XX_DBGBUS_RB_1         = 0x21,
+	A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
+	A6XX_DBGBUS_CCU_0        = 0x28,
+	A6XX_DBGBUS_CCU_1        = 0x29,
+	A6XX_DBGBUS_VFD_0        = 0x38,
+	A6XX_DBGBUS_VFD_1        = 0x39,
+	A6XX_DBGBUS_VFD_2        = 0x3a,
+	A6XX_DBGBUS_VFD_3        = 0x3b,
+	A6XX_DBGBUS_SP_0         = 0x40,
+	A6XX_DBGBUS_SP_1         = 0x41,
+	A6XX_DBGBUS_TPL1_0       = 0x48,
+	A6XX_DBGBUS_TPL1_1       = 0x49,
+	A6XX_DBGBUS_TPL1_2       = 0x4a,
+	A6XX_DBGBUS_TPL1_3       = 0x4b,
+};
+
+static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
+	{ A6XX_DBGBUS_CP, 0x100, },
+	{ A6XX_DBGBUS_RBBM, 0x100, },
+	{ A6XX_DBGBUS_HLSQ, 0x100, },
+	{ A6XX_DBGBUS_UCHE, 0x100, },
+	{ A6XX_DBGBUS_DPM, 0x100, },
+	{ A6XX_DBGBUS_TESS, 0x100, },
+	{ A6XX_DBGBUS_PC, 0x100, },
+	{ A6XX_DBGBUS_VFDP, 0x100, },
+	{ A6XX_DBGBUS_VPC, 0x100, },
+	{ A6XX_DBGBUS_TSE, 0x100, },
+	{ A6XX_DBGBUS_RAS, 0x100, },
+	{ A6XX_DBGBUS_VSC, 0x100, },
+	{ A6XX_DBGBUS_COM, 0x100, },
+	{ A6XX_DBGBUS_LRZ, 0x100, },
+	{ A6XX_DBGBUS_A2D, 0x100, },
+	{ A6XX_DBGBUS_CCUFCHE, 0x100, },
+	{ A6XX_DBGBUS_RBP, 0x100, },
+	{ A6XX_DBGBUS_DCS, 0x100, },
+	{ A6XX_DBGBUS_RBBM_CFG, 0x100, },
+	{ A6XX_DBGBUS_TPFCHE, 0x100, },
+	{ A6XX_DBGBUS_GPC, 0x100, },
+	{ A6XX_DBGBUS_LARC, 0x100, },
+	{ A6XX_DBGBUS_HLSQ_SPTP, 0x100, },
+	{ A6XX_DBGBUS_RB_0, 0x100, },
+	{ A6XX_DBGBUS_RB_1, 0x100, },
+	{ A6XX_DBGBUS_UCHE_WRAPPER, 0x100, },
+	{ A6XX_DBGBUS_CCU_0, 0x100, },
+	{ A6XX_DBGBUS_CCU_1, 0x100, },
+	{ A6XX_DBGBUS_VFD_0, 0x100, },
+	{ A6XX_DBGBUS_VFD_1, 0x100, },
+	{ A6XX_DBGBUS_VFD_2, 0x100, },
+	{ A6XX_DBGBUS_VFD_3, 0x100, },
+	{ A6XX_DBGBUS_SP_0, 0x100, },
+	{ A6XX_DBGBUS_SP_1, 0x100, },
+	{ A6XX_DBGBUS_TPL1_0, 0x100, },
+	{ A6XX_DBGBUS_TPL1_1, 0x100, },
+	{ A6XX_DBGBUS_TPL1_2, 0x100, },
+	{ A6XX_DBGBUS_TPL1_3, 0x100, },
+};
+
+#define A6XX_NUM_SHADER_BANKS 3
+#define A6XX_SHADER_STATETYPE_SHIFT 8
+
+enum a6xx_shader_obj {
+	A6XX_TP0_TMO_DATA               = 0x9,
+	A6XX_TP0_SMO_DATA               = 0xa,
+	A6XX_TP0_MIPMAP_BASE_DATA       = 0xb,
+	A6XX_TP1_TMO_DATA               = 0x19,
+	A6XX_TP1_SMO_DATA               = 0x1a,
+	A6XX_TP1_MIPMAP_BASE_DATA       = 0x1b,
+	A6XX_SP_INST_DATA               = 0x29,
+	A6XX_SP_LB_0_DATA               = 0x2a,
+	A6XX_SP_LB_1_DATA               = 0x2b,
+	A6XX_SP_LB_2_DATA               = 0x2c,
+	A6XX_SP_LB_3_DATA               = 0x2d,
+	A6XX_SP_LB_4_DATA               = 0x2e,
+	A6XX_SP_LB_5_DATA               = 0x2f,
+	A6XX_SP_CB_BINDLESS_DATA        = 0x30,
+	A6XX_SP_CB_LEGACY_DATA          = 0x31,
+	A6XX_SP_UAV_DATA                = 0x32,
+	A6XX_SP_INST_TAG                = 0x33,
+	A6XX_SP_CB_BINDLESS_TAG         = 0x34,
+	A6XX_SP_TMO_UMO_TAG             = 0x35,
+	A6XX_SP_SMO_TAG                 = 0x36,
+	A6XX_SP_STATE_DATA              = 0x37,
+	A6XX_HLSQ_CHUNK_CVS_RAM         = 0x49,
+	A6XX_HLSQ_CHUNK_CPS_RAM         = 0x4a,
+	A6XX_HLSQ_CHUNK_CVS_RAM_TAG     = 0x4b,
+	A6XX_HLSQ_CHUNK_CPS_RAM_TAG     = 0x4c,
+	A6XX_HLSQ_ICB_CVS_CB_BASE_TAG   = 0x4d,
+	A6XX_HLSQ_ICB_CPS_CB_BASE_TAG   = 0x4e,
+	A6XX_HLSQ_CVS_MISC_RAM          = 0x50,
+	A6XX_HLSQ_CPS_MISC_RAM          = 0x51,
+	A6XX_HLSQ_INST_RAM              = 0x52,
+	A6XX_HLSQ_GFX_CVS_CONST_RAM     = 0x53,
+	A6XX_HLSQ_GFX_CPS_CONST_RAM     = 0x54,
+	A6XX_HLSQ_CVS_MISC_RAM_TAG      = 0x55,
+	A6XX_HLSQ_CPS_MISC_RAM_TAG      = 0x56,
+	A6XX_HLSQ_INST_RAM_TAG          = 0x57,
+	A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 0x58,
+	A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 0x59,
+	A6XX_HLSQ_PWR_REST_RAM          = 0x5a,
+	A6XX_HLSQ_PWR_REST_TAG          = 0x5b,
+	A6XX_HLSQ_DATAPATH_META         = 0x60,
+	A6XX_HLSQ_FRONTEND_META         = 0x61,
+	A6XX_HLSQ_INDIRECT_META         = 0x62,
+	A6XX_HLSQ_BACKEND_META          = 0x63
+};
+
+struct a6xx_shader_block {
+	unsigned int statetype;
+	unsigned int sz;
+	uint64_t offset;
+};
+
+struct a6xx_shader_block_info {
+	struct a6xx_shader_block *block;
+	unsigned int bank;
+	uint64_t offset;
+};
+
+static struct a6xx_shader_block a6xx_shader_blocks[] = {
+	{A6XX_TP0_TMO_DATA,               0x200},
+	{A6XX_TP0_SMO_DATA,               0x80,},
+	{A6XX_TP0_MIPMAP_BASE_DATA,       0x3C0},
+	{A6XX_TP1_TMO_DATA,               0x200},
+	{A6XX_TP1_SMO_DATA,               0x80,},
+	{A6XX_TP1_MIPMAP_BASE_DATA,       0x3C0},
+	{A6XX_SP_INST_DATA,               0x800},
+	{A6XX_SP_LB_0_DATA,               0x800},
+	{A6XX_SP_LB_1_DATA,               0x800},
+	{A6XX_SP_LB_2_DATA,               0x800},
+	{A6XX_SP_LB_3_DATA,               0x800},
+	{A6XX_SP_LB_4_DATA,               0x800},
+	{A6XX_SP_LB_5_DATA,               0x200},
+	{A6XX_SP_CB_BINDLESS_DATA,        0x2000},
+	{A6XX_SP_CB_LEGACY_DATA,          0x280,},
+	{A6XX_SP_UAV_DATA,                0x80,},
+	{A6XX_SP_INST_TAG,                0x80,},
+	{A6XX_SP_CB_BINDLESS_TAG,         0x80,},
+	{A6XX_SP_TMO_UMO_TAG,             0x80,},
+	{A6XX_SP_SMO_TAG,                 0x80},
+	{A6XX_SP_STATE_DATA,              0x3F},
+	{A6XX_HLSQ_CHUNK_CVS_RAM,         0x1C0},
+	{A6XX_HLSQ_CHUNK_CPS_RAM,         0x280},
+	{A6XX_HLSQ_CHUNK_CVS_RAM_TAG,     0x40,},
+	{A6XX_HLSQ_CHUNK_CPS_RAM_TAG,     0x40,},
+	{A6XX_HLSQ_ICB_CVS_CB_BASE_TAG,   0x4,},
+	{A6XX_HLSQ_ICB_CPS_CB_BASE_TAG,   0x4,},
+	{A6XX_HLSQ_CVS_MISC_RAM,          0x1C0},
+	{A6XX_HLSQ_CPS_MISC_RAM,          0x580},
+	{A6XX_HLSQ_INST_RAM,              0x800},
+	{A6XX_HLSQ_GFX_CVS_CONST_RAM,     0x800},
+	{A6XX_HLSQ_GFX_CPS_CONST_RAM,     0x800},
+	{A6XX_HLSQ_CVS_MISC_RAM_TAG,      0x8,},
+	{A6XX_HLSQ_CPS_MISC_RAM_TAG,      0x4,},
+	{A6XX_HLSQ_INST_RAM_TAG,          0x80,},
+	{A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xC,},
+	{A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10},
+	{A6XX_HLSQ_PWR_REST_RAM,          0x28},
+	{A6XX_HLSQ_PWR_REST_TAG,          0x14},
+	{A6XX_HLSQ_DATAPATH_META,         0x40,},
+	{A6XX_HLSQ_FRONTEND_META,         0x40},
+	{A6XX_HLSQ_INDIRECT_META,         0x40,}
+};
+
+static struct kgsl_memdesc a6xx_capturescript;
+static struct kgsl_memdesc a6xx_crashdump_registers;
+static bool crash_dump_valid;
+
+static size_t a6xx_legacy_snapshot_registers(struct kgsl_device *device,
+		u8 *buf, size_t remain)
+{
+	struct kgsl_snapshot_registers regs = {
+		.regs = a6xx_registers,
+		.count = ARRAY_SIZE(a6xx_registers) / 2,
+	};
+
+	return kgsl_snapshot_dump_registers(device, buf, remain, &regs);
+}
+
+static struct cdregs {
+	const unsigned int *regs;
+	unsigned int size;
+} _a6xx_cd_registers[] = {
+	{ a6xx_registers, ARRAY_SIZE(a6xx_registers) },
+};
+
+#define REG_PAIR_COUNT(_a, _i) \
+	(((_a)[(2 * (_i)) + 1] - (_a)[2 * (_i)]) + 1)
+
+static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf,
+		size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int *src = (unsigned int *)a6xx_crashdump_registers.hostptr;
+	unsigned int i, j, k;
+	unsigned int count = 0;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_registers(device, buf, remain);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
+		struct cdregs *regs = &_a6xx_cd_registers[i];
+
+		for (j = 0; j < regs->size / 2; j++) {
+			unsigned int start = regs->regs[2 * j];
+			unsigned int end = regs->regs[(2 * j) + 1];
+
+			if (remain < ((end - start) + 1) * 8) {
+				SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+				goto out;
+			}
+
+			remain -= ((end - start) + 1) * 8;
+
+			for (k = start; k <= end; k++, count++) {
+				*data++ = k;
+				*data++ = *src++;
+			}
+		}
+	}
+
+out:
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
+static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_shader *header =
+		(struct kgsl_snapshot_shader *) buf;
+	struct a6xx_shader_block_info *info =
+		(struct a6xx_shader_block_info *) priv;
+	struct a6xx_shader_block *block = info->block;
+	unsigned int *data = (unsigned int *) (buf + sizeof(*header));
+
+	if (remain < SHADER_SECTION_SZ(block->sz)) {
+		SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
+		return 0;
+	}
+
+	header->type = block->statetype;
+	header->index = info->bank;
+	header->size = block->sz;
+
+	memcpy(data, a6xx_crashdump_registers.hostptr + info->offset,
+		block->sz);
+
+	return SHADER_SECTION_SZ(block->sz);
+}
+
+static void a6xx_snapshot_shader(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	unsigned int i, j;
+	struct a6xx_shader_block_info info;
+
+	/* Shader blocks can only be read by the crash dumper */
+	if (crash_dump_valid == false)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
+		for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
+			info.block = &a6xx_shader_blocks[i];
+			info.bank = j;
+			info.offset = a6xx_shader_blocks[i].offset +
+				(j * a6xx_shader_blocks[i].sz);
+
+			/* Shader working/shadow memory */
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_SHADER,
+				snapshot, a6xx_snapshot_shader_memory, &info);
+		}
+	}
+}
+
+static void a6xx_snapshot_mempool(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	unsigned int pool_size;
+
+	/* Save the mempool size to 0 to stabilize it while dumping */
+	kgsl_regread(device, A6XX_CP_MEM_POOL_SIZE, &pool_size);
+	kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 0);
+
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_MEM_POOL_DBG_ADDR, A6XX_CP_MEM_POOL_DBG_DATA,
+		0, 0x2060);
+
+	/* Restore the saved mempool size */
+	kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, pool_size);
+}
+
+static inline unsigned int a6xx_read_dbgahb(struct kgsl_device *device,
+				unsigned int regbase, unsigned int reg)
+{
+	unsigned int read_reg = A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+				reg - regbase / 4;
+	unsigned int val;
+
+	kgsl_regread(device, read_reg, &val);
+	return val;
+}
+
+static size_t a6xx_snapshot_cluster_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_mvc_regs *header =
+				(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_dbgahb_regs_info *info =
+				(struct a6xx_cluster_dbgahb_regs_info *)priv;
+	struct a6xx_cluster_dbgahb_registers *cur_cluster = info->cluster;
+	unsigned int read_sel;
+	unsigned int data_size = 0;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int i, j;
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cur_cluster->id;
+
+	read_sel = ((cur_cluster->statetype + info->ctxt_id * 2) & 0xff) << 8;
+	kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
+
+	for (i = 0; i < cur_cluster->num_sets; i++) {
+		unsigned int start = cur_cluster->regs[2 * i];
+		unsigned int end = cur_cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+
+		for (j = start; j <= end; j++) {
+			unsigned int val;
+
+			val = a6xx_read_dbgahb(device, cur_cluster->regbase, j);
+			*data++ = val;
+
+		}
+	}
+
+out:
+	return data_size + sizeof(*header);
+}
+
+static size_t a6xx_snapshot_non_ctx_dbgahb(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header =
+				(struct kgsl_snapshot_regs *)buf;
+	struct a6xx_non_ctx_dbgahb_registers *regs =
+				(struct a6xx_non_ctx_dbgahb_registers *)priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0;
+	unsigned int read_sel;
+	int i, j;
+
+	/* Figure out how many registers we are going to dump */
+	for (i = 0; i < regs->num_sets; i++) {
+		int start = regs->regs[i * 2];
+		int end = regs->regs[i * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	header->count = count;
+
+	read_sel = (regs->statetype & 0xff) << 8;
+	kgsl_regwrite(device, A6XX_HLSQ_DBG_READ_SEL, read_sel);
+
+	for (i = 0; i < regs->num_sets; i++) {
+		unsigned int start = regs->regs[2 * i];
+		unsigned int end = regs->regs[2 * i + 1];
+
+		for (j = start; j <= end; j++) {
+			unsigned int val;
+
+			val = a6xx_read_dbgahb(device, regs->regbase, j);
+			*data++ = j;
+			*data++ = val;
+
+		}
+	}
+	return (count * 8) + sizeof(*header);
+}
+
+static void a6xx_snapshot_dbgahb_regs(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	int i, j;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_ctx_clusters); i++) {
+		struct a6xx_cluster_dbgahb_registers *cluster =
+				&a6xx_dbgahb_ctx_clusters[i];
+		struct a6xx_cluster_dbgahb_regs_info info;
+
+		info.cluster = cluster;
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+			info.ctxt_id = j;
+
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_MVC, snapshot,
+				a6xx_snapshot_cluster_dbgahb, &info);
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_non_ctx_dbgahb); i++) {
+		kgsl_snapshot_add_section(device,
+			KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+			a6xx_snapshot_non_ctx_dbgahb, &a6xx_non_ctx_dbgahb[i]);
+	}
+}
+
+static size_t a6xx_legacy_snapshot_mvc(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_mvc_regs *header =
+					(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_regs_info *info =
+					(struct a6xx_cluster_regs_info *)priv;
+	struct a6xx_cluster_registers *cur_cluster = info->cluster;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int ctxt = info->ctxt_id;
+	unsigned int start, end, i, j, aperture_cntl = 0;
+	unsigned int data_size = 0;
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cur_cluster->id;
+
+	/*
+	 * Set the AHB control for the Host to read from the
+	 * cluster/context for this iteration.
+	 */
+	aperture_cntl = ((cur_cluster->id & 0x7) << 8) | (ctxt << 4) | ctxt;
+	kgsl_regwrite(device, A6XX_CP_APERTURE_CNTL_HOST, aperture_cntl);
+
+	for (i = 0; i < cur_cluster->num_sets; i++) {
+		start = cur_cluster->regs[2 * i];
+		end = cur_cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+		for (j = start; j <= end; j++) {
+			unsigned int val;
+
+			kgsl_regread(device, j, &val);
+			*data++ = val;
+		}
+	}
+out:
+	return data_size + sizeof(*header);
+}
+
+static size_t a6xx_snapshot_mvc(struct kgsl_device *device, u8 *buf,
+				size_t remain, void *priv)
+{
+	struct kgsl_snapshot_mvc_regs *header =
+				(struct kgsl_snapshot_mvc_regs *)buf;
+	struct a6xx_cluster_regs_info *info =
+				(struct a6xx_cluster_regs_info *)priv;
+	struct a6xx_cluster_registers *cluster = info->cluster;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int *src;
+	int i, j;
+	unsigned int start, end;
+	size_t data_size = 0;
+
+	if (crash_dump_valid == false)
+		return a6xx_legacy_snapshot_mvc(device, buf, remain, info);
+
+	if (remain < sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+		return 0;
+	}
+
+	remain -= sizeof(*header);
+
+	header->ctxt_id = info->ctxt_id;
+	header->cluster_id = cluster->id;
+
+	src = (unsigned int *)(a6xx_crashdump_registers.hostptr +
+		(header->ctxt_id ? cluster->offset1 : cluster->offset0));
+
+	for (i = 0; i < cluster->num_sets; i++) {
+		start = cluster->regs[2 * i];
+		end = cluster->regs[2 * i + 1];
+
+		if (remain < (end - start + 3) * 4) {
+			SNAPSHOT_ERR_NOMEM(device, "MVC REGISTERS");
+			goto out;
+		}
+
+		remain -= (end - start + 3) * 4;
+		data_size += (end - start + 3) * 4;
+
+		*data++ = start | (1 << 31);
+		*data++ = end;
+		for (j = start; j <= end; j++)
+			*data++ = *src++;
+	}
+
+out:
+	return data_size + sizeof(*header);
+
+}
+
+static void a6xx_snapshot_mvc_regs(struct kgsl_device *device,
+				struct kgsl_snapshot *snapshot)
+{
+	int i, j;
+	struct a6xx_cluster_regs_info info;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
+		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+
+		info.cluster = cluster;
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+			info.ctxt_id = j;
+
+			kgsl_snapshot_add_section(device,
+				KGSL_SNAPSHOT_SECTION_MVC, snapshot,
+				a6xx_snapshot_mvc, &info);
+		}
+	}
+}
+
+/* a6xx_dbgc_debug_bus_read() - Read data from trace bus */
+static void a6xx_dbgc_debug_bus_read(struct kgsl_device *device,
+	unsigned int block_id, unsigned int index, unsigned int *val)
+{
+	unsigned int reg;
+
+	reg = (block_id << A6XX_DBGC_CFG_DBGBUS_SEL_PING_BLK_SEL_SHIFT) |
+			(index << A6XX_DBGC_CFG_DBGBUS_SEL_PING_INDEX_SHIFT);
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+	kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2, val);
+	val++;
+	kgsl_regread(device, A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1, val);
+}
+
+/* a6xx_snapshot_cbgc_debugbus_block() - Capture debug data for a gpu block */
+static size_t a6xx_snapshot_dbgc_debugbus_block(struct kgsl_device *device,
+	u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_debugbus *header =
+		(struct kgsl_snapshot_debugbus *)buf;
+	struct adreno_debugbus_block *block = priv;
+	int i;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	unsigned int dwords;
+	size_t size;
+
+	dwords = block->dwords;
+
+	/* For a6xx each debug bus data unit is 2 DWORDS */
+	size = (dwords * sizeof(unsigned int) * 2) + sizeof(*header);
+
+	if (remain < size) {
+		SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+		return 0;
+	}
+
+	header->id = block->block_id;
+	header->count = dwords * 2;
+
+	for (i = 0; i < dwords; i++)
+		a6xx_dbgc_debug_bus_read(device, block->block_id, i,
+					&data[i*2]);
+
+	return size;
+}
+
+/* a6xx_snapshot_debugbus() - Capture debug bus data */
+static void a6xx_snapshot_debugbus(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	int i;
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLT,
+		(0xf << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT_SHIFT) |
+		(0x4 << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU_SHIFT) |
+		(0x20 << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN_SHIFT));
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_CNTLM,
+		0xf << A6XX_DBGC_CFG_DBGBUS_CTLTM_ENABLE_SHIFT);
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+		(0 << A6XX_DBGC_CFG_DBGBUS_BYTEL0_SHIFT) |
+		(1 << A6XX_DBGC_CFG_DBGBUS_BYTEL1_SHIFT) |
+		(2 << A6XX_DBGC_CFG_DBGBUS_BYTEL2_SHIFT) |
+		(3 << A6XX_DBGC_CFG_DBGBUS_BYTEL3_SHIFT) |
+		(4 << A6XX_DBGC_CFG_DBGBUS_BYTEL4_SHIFT) |
+		(5 << A6XX_DBGC_CFG_DBGBUS_BYTEL5_SHIFT) |
+		(6 << A6XX_DBGC_CFG_DBGBUS_BYTEL6_SHIFT) |
+		(7 << A6XX_DBGC_CFG_DBGBUS_BYTEL7_SHIFT));
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+		(8 << A6XX_DBGC_CFG_DBGBUS_BYTEL8_SHIFT) |
+		(9 << A6XX_DBGC_CFG_DBGBUS_BYTEL9_SHIFT) |
+		(10 << A6XX_DBGC_CFG_DBGBUS_BYTEL10_SHIFT) |
+		(11 << A6XX_DBGC_CFG_DBGBUS_BYTEL11_SHIFT) |
+		(12 << A6XX_DBGC_CFG_DBGBUS_BYTEL12_SHIFT) |
+		(13 << A6XX_DBGC_CFG_DBGBUS_BYTEL13_SHIFT) |
+		(14 << A6XX_DBGC_CFG_DBGBUS_BYTEL14_SHIFT) |
+		(15 << A6XX_DBGC_CFG_DBGBUS_BYTEL15_SHIFT));
+
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+	kgsl_regwrite(device, A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_dbgc_debugbus_blocks); i++) {
+		kgsl_snapshot_add_section(device,
+			KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+			snapshot, a6xx_snapshot_dbgc_debugbus_block,
+			(void *) &a6xx_dbgc_debugbus_blocks[i]);
+	}
+}
+
+static size_t a6xx_snapshot_dump_gmu_registers(struct kgsl_device *device,
+		u8 *buf, size_t remain, void *priv)
+{
+	struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+	struct kgsl_snapshot_registers *regs = priv;
+	unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+	int count = 0, j, k;
+
+	/* Figure out how many registers we are going to dump */
+	for (j = 0; j < regs->count; j++) {
+		int start = regs->regs[j * 2];
+		int end = regs->regs[j * 2 + 1];
+
+		count += (end - start + 1);
+	}
+
+	if (remain < (count * 8) + sizeof(*header)) {
+		SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+		return 0;
+	}
+
+	for (j = 0; j < regs->count; j++) {
+		unsigned int start = regs->regs[j * 2];
+		unsigned int end = regs->regs[j * 2 + 1];
+
+		for (k = start; k <= end; k++) {
+			unsigned int val;
+
+			kgsl_gmu_regread(device, k, &val);
+			*data++ = k;
+			*data++ = val;
+		}
+	}
+
+	header->count = count;
+
+	/* Return the size of the section */
+	return (count * 8) + sizeof(*header);
+}
+
+static void a6xx_snapshot_gmu(struct kgsl_device *device,
+		struct kgsl_snapshot *snapshot)
+{
+	struct kgsl_snapshot_registers gmu_regs = {
+		.regs = a6xx_gmu_registers,
+		.count = ARRAY_SIZE(a6xx_gmu_registers) / 2,
+	};
+
+	if (!kgsl_gmu_isenabled(device))
+		return;
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+			snapshot, a6xx_snapshot_dump_gmu_registers, &gmu_regs);
+}
+
+static void _a6xx_do_crashdump(struct kgsl_device *device)
+{
+	unsigned long wait_time;
+	unsigned int reg = 0;
+	unsigned int val;
+
+	crash_dump_valid = false;
+
+	if (a6xx_capturescript.gpuaddr == 0 ||
+		a6xx_crashdump_registers.gpuaddr == 0)
+		return;
+
+	/* IF the SMMU is stalled we cannot do a crash dump */
+	kgsl_regread(device, A6XX_RBBM_STATUS3, &val);
+	if (val & BIT(24))
+		return;
+
+	/* Turn on APRIV so we can access the buffers */
+	kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 1);
+
+	kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_LO,
+			lower_32_bits(a6xx_capturescript.gpuaddr));
+	kgsl_regwrite(device, A6XX_CP_CRASH_SCRIPT_BASE_HI,
+			upper_32_bits(a6xx_capturescript.gpuaddr));
+	kgsl_regwrite(device, A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+	wait_time = jiffies + msecs_to_jiffies(CP_CRASH_DUMPER_TIMEOUT);
+	while (!time_after(jiffies, wait_time)) {
+		kgsl_regread(device, A6XX_CP_CRASH_DUMP_STATUS, &reg);
+		if (reg & 0x2)
+			break;
+		cpu_relax();
+	}
+
+	kgsl_regwrite(device, A6XX_CP_MISC_CNTL, 0);
+
+	if (!(reg & 0x2)) {
+		KGSL_CORE_ERR("Crash dump timed out: 0x%X\n", reg);
+		return;
+	}
+
+	crash_dump_valid = true;
+}
+
+/*
+ * a6xx_snapshot() - A6XX GPU snapshot function
+ * @adreno_dev: Device being snapshotted
+ * @snapshot: Pointer to the snapshot instance
+ *
+ * This is where all of the A6XX specific bits and pieces are grabbed
+ * into the snapshot memory
+ */
+void a6xx_snapshot(struct adreno_device *adreno_dev,
+		struct kgsl_snapshot *snapshot)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+	struct adreno_snapshot_data *snap_data = gpudev->snapshot_data;
+
+	/* Try to run the crash dumper */
+	_a6xx_do_crashdump(device);
+
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+		snapshot, a6xx_snapshot_registers, NULL);
+
+	adreno_snapshot_vbif_registers(device, snapshot,
+		a6xx_vbif_snapshot_registers,
+		ARRAY_SIZE(a6xx_vbif_snapshot_registers));
+
+	/* CP_SQE indexed registers */
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA,
+		0, snap_data->sect_sizes->cp_pfp);
+
+	/* CP_DRAW_STATE */
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
+		0, 0x100);
+
+	 /* SQE_UCODE Cache */
+	kgsl_snapshot_indexed_registers(device, snapshot,
+		A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
+		0, 0x6000);
+
+	/* CP ROQ */
+	kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_DEBUG,
+		snapshot, adreno_snapshot_cp_roq,
+		&snap_data->sect_sizes->roq);
+
+	/* Mempool debug data */
+	a6xx_snapshot_mempool(device, snapshot);
+
+	/* Shader memory */
+	a6xx_snapshot_shader(device, snapshot);
+
+	/* MVC register section */
+	a6xx_snapshot_mvc_regs(device, snapshot);
+
+	/* registers dumped through DBG AHB */
+	a6xx_snapshot_dbgahb_regs(device, snapshot);
+
+	a6xx_snapshot_debugbus(device, snapshot);
+
+	/* GMU TCM data dumped through AHB */
+	a6xx_snapshot_gmu(device, snapshot);
+}
+
+static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int i, j, k;
+	unsigned int count;
+
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
+		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+
+		cluster->offset0 = *offset;
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+
+			if (j == 1)
+				cluster->offset1 = *offset;
+
+			ptr[qwords++] = (cluster->id << 8) | (j << 4) | j;
+			ptr[qwords++] =
+				((uint64_t)A6XX_CP_APERTURE_CNTL_HOST << 44) |
+				(1 << 21) | 1;
+
+			for (k = 0; k < cluster->num_sets; k++) {
+				count = REG_PAIR_COUNT(cluster->regs, k);
+				ptr[qwords++] =
+				a6xx_crashdump_registers.gpuaddr + *offset;
+				ptr[qwords++] =
+				(((uint64_t)cluster->regs[2 * k]) << 44) |
+						count;
+
+				*offset += count * sizeof(unsigned int);
+			}
+		}
+	}
+
+	return qwords;
+}
+
+static int _a6xx_crashdump_init_shader(struct a6xx_shader_block *block,
+		uint64_t *ptr, uint64_t *offset)
+{
+	int qwords = 0;
+	unsigned int j;
+
+	/* Capture each bank in the block */
+	for (j = 0; j < A6XX_NUM_SHADER_BANKS; j++) {
+		/* Program the aperture */
+		ptr[qwords++] =
+			(block->statetype << A6XX_SHADER_STATETYPE_SHIFT) | j;
+		ptr[qwords++] = (((uint64_t) A6XX_HLSQ_DBG_READ_SEL << 44)) |
+			(1 << 21) | 1;
+
+		/* Read all the data in one chunk */
+		ptr[qwords++] = a6xx_crashdump_registers.gpuaddr + *offset;
+		ptr[qwords++] =
+			(((uint64_t) A6XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
+			block->sz;
+
+		/* Remember the offset of the first bank for easy access */
+		if (j == 0)
+			block->offset = *offset;
+
+		*offset += block->sz * sizeof(unsigned int);
+	}
+
+	return qwords;
+}
+
+void a6xx_crashdump_init(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int script_size = 0;
+	unsigned int data_size = 0;
+	unsigned int i, j, k;
+	uint64_t *ptr;
+	uint64_t offset = 0;
+
+	if (a6xx_capturescript.gpuaddr != 0 &&
+		a6xx_crashdump_registers.gpuaddr != 0)
+		return;
+
+	/*
+	 * We need to allocate two buffers:
+	 * 1 - the buffer to hold the draw script
+	 * 2 - the buffer to hold the data
+	 */
+
+	/*
+	 * To save the registers, we need 16 bytes per register pair for the
+	 * script and a dword for each register in the data
+	 */
+	for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
+		struct cdregs *regs = &_a6xx_cd_registers[i];
+
+		/* Each pair needs 16 bytes (2 qwords) */
+		script_size += (regs->size / 2) * 16;
+
+		/* Each register needs a dword in the data */
+		for (j = 0; j < regs->size / 2; j++)
+			data_size += REG_PAIR_COUNT(regs->regs, j) *
+				sizeof(unsigned int);
+
+	}
+
+	/*
+	 * To save the shader blocks for each block in each type we need 32
+	 * bytes for the script (16 bytes to program the aperture and 16 to
+	 * read the data) and then a block specific number of bytes to hold
+	 * the data
+	 */
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
+		script_size += 32 * A6XX_NUM_SHADER_BANKS;
+		data_size += a6xx_shader_blocks[i].sz * sizeof(unsigned int) *
+			A6XX_NUM_SHADER_BANKS;
+	}
+
+	/* Calculate the script and data size for MVC registers */
+	for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
+		struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+
+		for (j = 0; j < A6XX_NUM_CTXTS; j++) {
+
+			/* 16 bytes for programming the aperture */
+			script_size += 16;
+
+			/* Reading each pair of registers takes 16 bytes */
+			script_size += 16 * cluster->num_sets;
+
+			/* A dword per register read from the cluster list */
+			for (k = 0; k < cluster->num_sets; k++)
+				data_size += REG_PAIR_COUNT(cluster->regs, k) *
+						sizeof(unsigned int);
+		}
+	}
+
+	/* Now allocate the script and data buffers */
+
+	/* The script buffers needs 2 extra qwords on the end */
+	if (kgsl_allocate_global(device, &a6xx_capturescript,
+		script_size + 16, KGSL_MEMFLAGS_GPUREADONLY,
+		KGSL_MEMDESC_PRIVILEGED, "capturescript"))
+		return;
+
+	if (kgsl_allocate_global(device, &a6xx_crashdump_registers, data_size,
+		0, KGSL_MEMDESC_PRIVILEGED, "capturescript_regs")) {
+		kgsl_free_global(KGSL_DEVICE(adreno_dev), &a6xx_capturescript);
+		return;
+	}
+
+	/* Build the crash script */
+
+	ptr = (uint64_t *)a6xx_capturescript.hostptr;
+
+	/* For the registers, program a read command for each pair */
+	for (i = 0; i < ARRAY_SIZE(_a6xx_cd_registers); i++) {
+		struct cdregs *regs = &_a6xx_cd_registers[i];
+
+		for (j = 0; j < regs->size / 2; j++) {
+			unsigned int r = REG_PAIR_COUNT(regs->regs, j);
+			*ptr++ = a6xx_crashdump_registers.gpuaddr + offset;
+			*ptr++ = (((uint64_t) regs->regs[2 * j]) << 44) | r;
+			offset += r * sizeof(unsigned int);
+		}
+	}
+
+	/* Program each shader block */
+	for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++) {
+		ptr += _a6xx_crashdump_init_shader(&a6xx_shader_blocks[i], ptr,
+							&offset);
+	}
+
+	/* Program the capturescript for the MVC regsiters */
+	ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
+
+	*ptr++ = 0;
+	*ptr++ = 0;
+}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 2d38a1a..b1f832f 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -136,11 +136,14 @@
 				sync_event->context->id, sync_event->timestamp);
 		break;
 	}
-	case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
-		seq_printf(s, "sync: [%pK] %s", sync_event->handle,
-		(sync_event->handle && sync_event->handle->fence)
-				? sync_event->handle->fence->name : "NULL");
+	case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+		char fence_str[128];
+
+		kgsl_dump_fence(sync_event->handle,
+				fence_str, sizeof(fence_str));
+		seq_printf(s, "sync: [%pK] %s", sync_event->handle, fence_str);
 		break;
+	}
 	default:
 		seq_printf(s, "sync: type: %d", sync_event->type);
 		break;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index b5f0fca..d1c84f1 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -628,6 +628,8 @@
 	/* Remove the event group from the list */
 	kgsl_del_event_group(&context->events);
 
+	kgsl_sync_timeline_put(context->ktimeline);
+
 	kgsl_context_put(context);
 }
 
@@ -962,7 +964,7 @@
 		if (syncsource == NULL)
 			break;
 
-		kgsl_syncsource_put(syncsource);
+		kgsl_syncsource_cleanup(private, syncsource);
 		next = next + 1;
 	}
 }
@@ -1928,7 +1930,7 @@
 static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
 		struct kgsl_mem_entry *entry, struct kgsl_gpuobj_free *param)
 {
-	struct kgsl_sync_fence_waiter *handle;
+	struct kgsl_sync_fence_cb *handle;
 	struct kgsl_gpu_event_fence event;
 	long ret;
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 0b368a8..b4725c1 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -382,7 +382,7 @@
 	unsigned long priv;
 	struct kgsl_device *device;
 	unsigned int reset_status;
-	struct sync_timeline *timeline;
+	struct kgsl_sync_timeline *ktimeline;
 	struct kgsl_event_group events;
 	unsigned int flags;
 	struct kgsl_pwr_constraint pwr_constraint;
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 910f405..3a87e6e 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -100,15 +100,16 @@
 				retired);
 			break;
 		}
-		case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
-			if (event->handle)
-				dev_err(device->dev, "  fence: [%pK] %s\n",
-					event->handle->fence,
-					event->handle->name);
-			else
-				dev_err(device->dev, "  fence: invalid\n");
+		case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+			char fence_str[128];
+
+			kgsl_dump_fence(event->handle,
+					fence_str, sizeof(fence_str));
+			dev_err(device->dev,
+				"  fence: %s\n", fence_str);
 			break;
 		}
+		}
 	}
 }
 
@@ -146,15 +147,16 @@
 			dev_err(device->dev, "       [%d] TIMESTAMP %d:%d\n",
 				i, event->context->id, event->timestamp);
 			break;
-		case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
-			if (event->handle != NULL) {
-				dev_err(device->dev, "       [%d] FENCE %s\n",
-				i, event->handle->fence ?
-					event->handle->fence->name : "NULL");
-				kgsl_sync_fence_log(event->handle->fence);
-			}
+		case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+			char fence_str[128];
+
+			kgsl_dump_fence(event->handle,
+					fence_str, sizeof(fence_str));
+			dev_err(device->dev, "       [%d] FENCE %s\n",
+				i, fence_str);
 			break;
 		}
+		}
 	}
 
 	dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
@@ -326,9 +328,10 @@
 static void drawobj_sync_fence_func(void *priv)
 {
 	struct kgsl_drawobj_sync_event *event = priv;
+	char fence_str[128];
 
-	trace_syncpoint_fence_expire(event->syncobj,
-		event->handle ? event->handle->name : "unknown");
+	kgsl_dump_fence(event->handle, fence_str, sizeof(fence_str));
+	trace_syncpoint_fence_expire(event->syncobj, fence_str);
 
 	drawobj_sync_expire(event->device, event);
 
@@ -349,6 +352,7 @@
 	struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
 	struct kgsl_drawobj_sync_event *event;
 	unsigned int id;
+	char fence_str[128];
 
 	kref_get(&drawobj->refcount);
 
@@ -385,7 +389,8 @@
 		return ret;
 	}
 
-	trace_syncpoint_fence(syncobj, event->handle->name);
+	kgsl_dump_fence(event->handle, fence_str, sizeof(fence_str));
+	trace_syncpoint_fence(syncobj, fence_str);
 
 	return 0;
 }
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index fd9d2bc..5ec98ed 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -122,7 +122,7 @@
 	struct kgsl_drawobj_sync *syncobj;
 	struct kgsl_context *context;
 	unsigned int timestamp;
-	struct kgsl_sync_fence_waiter *handle;
+	struct kgsl_sync_fence_cb *handle;
 	struct kgsl_device *device;
 };
 
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 49630e6..56e4f23 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1345,12 +1345,19 @@
 
 	gmu_stop(device);
 
-	disable_irq(gmu->gmu_interrupt_num);
-	disable_irq(hfi->hfi_interrupt_num);
-	devm_free_irq(&gmu->pdev->dev,
-			gmu->gmu_interrupt_num, gmu);
-	devm_free_irq(&gmu->pdev->dev,
-			hfi->hfi_interrupt_num, gmu);
+	if (gmu->gmu_interrupt_num) {
+		disable_irq(gmu->gmu_interrupt_num);
+		devm_free_irq(&gmu->pdev->dev,
+				gmu->gmu_interrupt_num, gmu);
+		gmu->gmu_interrupt_num = 0;
+	}
+
+	if (hfi->hfi_interrupt_num) {
+		disable_irq(hfi->hfi_interrupt_num);
+		devm_free_irq(&gmu->pdev->dev,
+				hfi->hfi_interrupt_num, gmu);
+		hfi->hfi_interrupt_num = 0;
+	}
 
 	if (gmu->ccl) {
 		msm_bus_scale_unregister_client(gmu->ccl);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index b0e9292..4d38794 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -26,6 +26,7 @@
 #include "kgsl.h"
 #include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
+#include "kgsl_gmu.h"
 #include "kgsl_trace.h"
 
 #define KGSL_PWRFLAGS_POWER_ON 0
@@ -65,7 +66,8 @@
 	"alwayson_clk",
 	"isense_clk",
 	"rbcpr_clk",
-	"iref_clk"
+	"iref_clk",
+	"gmu_clk"
 };
 
 static unsigned int ib_votes[KGSL_MAX_BUSLEVELS];
@@ -214,6 +216,69 @@
 #endif
 
 /**
+ * kgsl_bus_scale_request() - set GPU BW vote
+ * @device: Pointer to the kgsl_device struct
+ * @buslevel: index of bw vector[] table
+ */
+static int kgsl_bus_scale_request(struct kgsl_device *device,
+		unsigned int buslevel)
+{
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret;
+
+	/* GMU scales BW */
+	if (kgsl_gmu_isenabled(device)) {
+		if (!(gmu->flags & GMU_HFI_ON))
+			return 0;
+
+		ret = gmu_dcvs_set(gmu, INVALID_DCVS_IDX, buslevel);
+	} else {
+		/* Linux bus driver scales BW */
+		ret = msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+	}
+
+	if (ret)
+		KGSL_PWR_ERR(device, "GPU BW scaling failure\n");
+
+	return ret;
+}
+
+/**
+ * kgsl_clk_set_rate() - set GPU clock rate
+ * @device: Pointer to the kgsl_device struct
+ * @pwrlevel: power level in pwrlevels[] table
+ */
+static int kgsl_clk_set_rate(struct kgsl_device *device,
+		unsigned int pwrlevel)
+{
+	struct gmu_device *gmu = &device->gmu;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret = 0;
+
+	/* GMU scales GPU freq */
+	if (kgsl_gmu_isenabled(device)) {
+		/* If GMU has not been started, save it */
+		if (!(gmu->flags & GMU_HFI_ON)) {
+			gmu->wakeup_pwrlevel = pwrlevel;
+			return 0;
+		}
+
+		ret = gmu_dcvs_set(gmu, pwrlevel, INVALID_DCVS_IDX);
+	} else {
+		/* Linux clock driver scales GPU freq */
+		struct kgsl_pwrlevel *Pl = &pwr->pwrlevels[pwrlevel];
+
+		ret = clk_set_rate(pwr->grp_clks[0], Pl->gpu_freq);
+	}
+
+	if (ret)
+		KGSL_PWR_ERR(device, "GPU clk freq set failure\n");
+
+	return ret;
+}
+
+/**
  * kgsl_pwrctrl_buslevel_update() - Recalculate the bus vote and send it
  * @device: Pointer to the kgsl_device struct
  * @on: true for setting and active bus vote, false to turn off the vote
@@ -259,7 +324,7 @@
 
 	/* vote for bus if gpubw-dev support is not enabled */
 	if (pwr->pcl)
-		msm_bus_scale_client_update_request(pwr->pcl, buslevel);
+		kgsl_bus_scale_request(device, buslevel);
 
 	kgsl_pwrctrl_vbif_update(ab);
 }
@@ -388,7 +453,7 @@
 	pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
 	/* Change register settings if any  BEFORE pwrlevel change*/
 	kgsl_pwrctrl_pwrlevel_change_settings(device, 0);
-	clk_set_rate(pwr->grp_clks[0], pwrlevel->gpu_freq);
+	kgsl_clk_set_rate(device, pwr->active_pwrlevel);
 	_isense_clk_set_rate(pwr, pwr->active_pwrlevel);
 
 	trace_kgsl_pwrlevel(device,
@@ -1631,9 +1696,8 @@
 				(requested_state != KGSL_STATE_NAP)) {
 				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 					clk_unprepare(pwr->grp_clks[i]);
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
+				kgsl_clk_set_rate(device,
+						pwr->num_pwrlevels - 1);
 				_isense_clk_set_rate(pwr,
 					pwr->num_pwrlevels - 1);
 			}
@@ -1645,9 +1709,8 @@
 			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 				clk_unprepare(pwr->grp_clks[i]);
 			if ((pwr->pwrlevels[0].gpu_freq > 0)) {
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
+				kgsl_clk_set_rate(device,
+						pwr->num_pwrlevels - 1);
 				_isense_clk_set_rate(pwr,
 					pwr->num_pwrlevels - 1);
 			}
@@ -1660,10 +1723,8 @@
 			/* High latency clock maintenance. */
 			if (device->state != KGSL_STATE_NAP) {
 				if (pwr->pwrlevels[0].gpu_freq > 0) {
-					clk_set_rate(pwr->grp_clks[0],
-						pwr->pwrlevels
-						[pwr->active_pwrlevel].
-						gpu_freq);
+					kgsl_clk_set_rate(device,
+							pwr->active_pwrlevel);
 					_isense_clk_set_rate(pwr,
 						pwr->active_pwrlevel);
 				}
@@ -2101,11 +2162,11 @@
 		if (freq > 0)
 			freq = clk_round_rate(pwr->grp_clks[0], freq);
 
-		pwr->pwrlevels[i].gpu_freq = freq;
+		if (freq >= pwr->pwrlevels[i].gpu_freq)
+			pwr->pwrlevels[i].gpu_freq = freq;
 	}
 
-	clk_set_rate(pwr->grp_clks[0],
-		pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+	kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
 
 	clk_set_rate(pwr->grp_clks[6],
 		clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ));
@@ -2362,8 +2423,12 @@
 	/* In order to touch a register you must hold the device mutex */
 	WARN_ON(!mutex_is_locked(&device->mutex));
 
-	/* A register access without device power will cause a fatal timeout */
-	BUG_ON(!kgsl_pwrctrl_isenabled(device));
+	/*
+	 * A register access without device power will cause a fatal timeout.
+	 * This is not valid for targets with a GMU.
+	 */
+	if (!kgsl_gmu_isenabled(device))
+		WARN_ON(!kgsl_pwrctrl_isenabled(device));
 }
 EXPORT_SYMBOL(kgsl_pre_hwaccess);
 
@@ -2383,6 +2448,9 @@
 
 	kgsl_pwrctrl_pwrlevel_change(device, level);
 
+	if (kgsl_gmu_isenabled(device))
+		return gmu_start(device);
+
 	/* Order pwrrail/clk sequence based upon platform */
 	status = kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
 	if (status)
@@ -2394,6 +2462,9 @@
 
 static void kgsl_pwrctrl_disable(struct kgsl_device *device)
 {
+	if (kgsl_gmu_isenabled(device))
+		return gmu_stop(device);
+
 	/* Order pwrrail/clk sequence based upon platform */
 	device->ftbl->regulator_disable(device);
 	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 58f16e8..62ee597 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -25,7 +25,7 @@
 
 #define KGSL_PWR_ON	0xFFFF
 
-#define KGSL_MAX_CLKS 14
+#define KGSL_MAX_CLKS 15
 #define KGSL_MAX_REGULATORS 2
 
 #define KGSL_MAX_PWRLEVELS 10
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index 2cb8b8f..d2ff8f1 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -58,6 +58,7 @@
 #define KGSL_SNAPSHOT_SECTION_MEMLIST      0x0E01
 #define KGSL_SNAPSHOT_SECTION_MEMLIST_V2   0x0E02
 #define KGSL_SNAPSHOT_SECTION_SHADER       0x1201
+#define KGSL_SNAPSHOT_SECTION_MVC          0x1501
 
 #define KGSL_SNAPSHOT_SECTION_END          0xFFFF
 
@@ -196,6 +197,12 @@
 	int count;     /* Number of dwords in the data */
 } __packed;
 
+/* MVC register sub-section header */
+struct kgsl_snapshot_mvc_regs {
+	int ctxt_id;
+	int cluster_id;
+} __packed;
+
 /* Istore sub-section header */
 struct kgsl_snapshot_istore {
 	int count;   /* Number of instructions in the istore */
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index 6752f3b7..3b57b73 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -21,73 +21,86 @@
 
 #include "kgsl_sync.h"
 
-static void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
+static void kgsl_sync_timeline_signal(struct kgsl_sync_timeline *timeline,
 	unsigned int timestamp);
 
-static struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline,
-	struct kgsl_context *context, unsigned int timestamp)
+static const struct fence_ops kgsl_sync_fence_ops;
+
+static struct kgsl_sync_fence *kgsl_sync_fence_create(
+					struct kgsl_context *context,
+					unsigned int timestamp)
 {
-	struct sync_pt *pt;
+	struct kgsl_sync_fence *kfence;
+	struct kgsl_sync_timeline *ktimeline = context->ktimeline;
+	unsigned long flags;
 
-	pt = sync_pt_create(timeline, (int) sizeof(struct kgsl_sync_pt));
-	if (pt) {
-		struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt;
+	/* Get a refcount to the timeline. Put when released */
+	if (!kref_get_unless_zero(&ktimeline->kref))
+		return NULL;
 
-		kpt->context = context;
-		kpt->timestamp = timestamp;
+	kfence = kzalloc(sizeof(*kfence), GFP_KERNEL);
+	if (kfence == NULL) {
+		kgsl_sync_timeline_put(ktimeline);
+		KGSL_DRV_ERR(context->device, "Couldn't allocate fence\n");
+		return NULL;
 	}
-	return pt;
+
+	kfence->parent = ktimeline;
+	kfence->context_id = context->id;
+	kfence->timestamp = timestamp;
+
+	fence_init(&kfence->fence, &kgsl_sync_fence_ops, &ktimeline->lock,
+		ktimeline->fence_context, timestamp);
+
+	kfence->sync_file = sync_file_create(&kfence->fence);
+
+	if (kfence->sync_file == NULL) {
+		kgsl_sync_timeline_put(ktimeline);
+		KGSL_DRV_ERR(context->device, "Create sync_file failed\n");
+		kfree(kfence);
+		return NULL;
+	}
+
+	/* Get a refcount to the fence. Put when signaled */
+	fence_get(&kfence->fence);
+
+	spin_lock_irqsave(&ktimeline->lock, flags);
+	list_add_tail(&kfence->child_list, &ktimeline->child_list_head);
+	spin_unlock_irqrestore(&ktimeline->lock, flags);
+
+	return kfence;
 }
 
-/*
- * This should only be called on sync_pts which have been created but
- * not added to a fence.
- */
-static void kgsl_sync_pt_destroy(struct sync_pt *pt)
+static void kgsl_sync_fence_release(struct fence *fence)
 {
-	sync_pt_free(pt);
+	struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence;
+
+	kgsl_sync_timeline_put(kfence->parent);
+	kfree(kfence);
 }
 
-static struct sync_pt *kgsl_sync_pt_dup(struct sync_pt *pt)
+/* Called with ktimeline->lock held */
+bool kgsl_sync_fence_has_signaled(struct fence *fence)
 {
-	struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt;
+	struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence;
+	struct kgsl_sync_timeline *ktimeline = kfence->parent;
+	unsigned int ts = kfence->timestamp;
 
-	return kgsl_sync_pt_create(sync_pt_parent(pt),
-				kpt->context, kpt->timestamp);
+	return (timestamp_cmp(ktimeline->last_timestamp, ts) >= 0);
 }
 
-static int kgsl_sync_pt_has_signaled(struct sync_pt *pt)
+bool kgsl_enable_signaling(struct fence *fence)
 {
-	struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt;
-	struct kgsl_sync_timeline *ktimeline =
-		 (struct kgsl_sync_timeline *) sync_pt_parent(pt);
-	unsigned int ts = kpt->timestamp;
-	int ret = 0;
-
-	spin_lock(&ktimeline->lock);
-	ret = (timestamp_cmp(ktimeline->last_timestamp, ts) >= 0);
-	spin_unlock(&ktimeline->lock);
-
-	return ret;
+	return !kgsl_sync_fence_has_signaled(fence);
 }
 
-static int kgsl_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
-{
-	struct kgsl_sync_pt *kpt_a = (struct kgsl_sync_pt *) a;
-	struct kgsl_sync_pt *kpt_b = (struct kgsl_sync_pt *) b;
-	unsigned int ts_a = kpt_a->timestamp;
-	unsigned int ts_b = kpt_b->timestamp;
-
-	return timestamp_cmp(ts_a, ts_b);
-}
-
-struct kgsl_fence_event_priv {
+struct kgsl_sync_fence_event_priv {
 	struct kgsl_context *context;
 	unsigned int timestamp;
 };
 
 /**
- * kgsl_fence_event_cb - Event callback for a fence timestamp event
+ * kgsl_sync_fence_event_cb - Event callback for a fence timestamp event
  * @device - The KGSL device that expired the timestamp
  * @context- Pointer to the context that owns the event
  * @priv: Private data for the callback
@@ -96,12 +109,12 @@
  * Signal a fence following the expiration of a timestamp
  */
 
-static void kgsl_fence_event_cb(struct kgsl_device *device,
+static void kgsl_sync_fence_event_cb(struct kgsl_device *device,
 		struct kgsl_event_group *group, void *priv, int result)
 {
-	struct kgsl_fence_event_priv *ev = priv;
+	struct kgsl_sync_fence_event_priv *ev = priv;
 
-	kgsl_sync_timeline_signal(ev->context->timeline, ev->timestamp);
+	kgsl_sync_timeline_signal(ev->context->ktimeline, ev->timestamp);
 	kgsl_context_put(ev->context);
 	kfree(ev);
 }
@@ -109,7 +122,7 @@
 static int _add_fence_event(struct kgsl_device *device,
 	struct kgsl_context *context, unsigned int timestamp)
 {
-	struct kgsl_fence_event_priv *event;
+	struct kgsl_sync_fence_event_priv *event;
 	int ret;
 
 	event = kmalloc(sizeof(*event), GFP_KERNEL);
@@ -127,10 +140,9 @@
 
 	event->context = context;
 	event->timestamp = timestamp;
-	event->context = context;
 
 	ret = kgsl_add_event(device, &context->events, timestamp,
-		kgsl_fence_event_cb, event);
+		kgsl_sync_fence_event_cb, event);
 
 	if (ret) {
 		kgsl_context_put(context);
@@ -159,10 +171,8 @@
 {
 	struct kgsl_timestamp_event_fence priv;
 	struct kgsl_context *context;
-	struct sync_pt *pt;
-	struct sync_fence *fence = NULL;
+	struct kgsl_sync_fence *kfence = NULL;
 	int ret = -EINVAL;
-	char fence_name[sizeof(fence->name)] = {};
 	unsigned int cur;
 
 	priv.fence_fd = -1;
@@ -178,23 +188,10 @@
 	if (test_bit(KGSL_CONTEXT_PRIV_INVALID, &context->priv))
 		goto out;
 
-	pt = kgsl_sync_pt_create(context->timeline, context, timestamp);
-	if (pt == NULL) {
-		KGSL_DRV_CRIT_RATELIMIT(device, "kgsl_sync_pt_create failed\n");
-		ret = -ENOMEM;
-		goto out;
-	}
-	snprintf(fence_name, sizeof(fence_name),
-		"%s-pid-%d-ctx-%d-ts-%d",
-		device->name, current->group_leader->pid,
-		context_id, timestamp);
-
-
-	fence = sync_fence_create(fence_name, pt);
-	if (fence == NULL) {
-		/* only destroy pt when not added to fence */
-		kgsl_sync_pt_destroy(pt);
-		KGSL_DRV_CRIT_RATELIMIT(device, "sync_fence_create failed\n");
+	kfence = kgsl_sync_fence_create(context, timestamp);
+	if (kfence == NULL) {
+		KGSL_DRV_CRIT_RATELIMIT(device,
+					"kgsl_sync_fence_create failed\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -218,7 +215,7 @@
 
 	if (timestamp_cmp(cur, timestamp) >= 0) {
 		ret = 0;
-		kgsl_sync_timeline_signal(context->timeline, cur);
+		kgsl_sync_timeline_signal(context->ktimeline, cur);
 	} else {
 		ret = _add_fence_event(device, context, timestamp);
 		if (ret)
@@ -229,43 +226,50 @@
 		ret = -EFAULT;
 		goto out;
 	}
-	sync_fence_install(fence, priv.fence_fd);
+	fd_install(priv.fence_fd, kfence->sync_file->file);
+
 out:
 	kgsl_context_put(context);
 	if (ret) {
 		if (priv.fence_fd >= 0)
 			put_unused_fd(priv.fence_fd);
 
-		if (fence)
-			sync_fence_put(fence);
+		if (kfence) {
+			/*
+			 * Put the refcount of sync file. This will release
+			 * kfence->fence as well.
+			 */
+			fput(kfence->sync_file->file);
+		}
 	}
 	return ret;
 }
 
-static unsigned int kgsl_sync_get_timestamp(
-	struct kgsl_sync_timeline *ktimeline, enum kgsl_timestamp_type type)
+static unsigned int kgsl_sync_fence_get_timestamp(
+					struct kgsl_sync_timeline *ktimeline,
+					enum kgsl_timestamp_type type)
 {
 	unsigned int ret = 0;
-	struct kgsl_context *context;
 
 	if (ktimeline->device == NULL)
 		return 0;
 
-	context = kgsl_context_get(ktimeline->device,
-			ktimeline->context_id);
+	kgsl_readtimestamp(ktimeline->device, ktimeline->context, type, &ret);
 
-	if (context)
-		kgsl_readtimestamp(ktimeline->device, context, type, &ret);
-
-	kgsl_context_put(context);
 	return ret;
 }
 
-static void kgsl_sync_timeline_value_str(struct sync_timeline *sync_timeline,
-					 char *str, int size)
+static void kgsl_sync_timeline_value_str(struct fence *fence,
+					char *str, int size)
 {
-	struct kgsl_sync_timeline *ktimeline =
-		(struct kgsl_sync_timeline *) sync_timeline;
+	struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence;
+	struct kgsl_sync_timeline *ktimeline = kfence->parent;
+
+	unsigned int timestamp_retired;
+	unsigned int timestamp_queued;
+
+	if (!kref_get_unless_zero(&ktimeline->kref))
+		return;
 
 	/*
 	 * This callback can be called before the device and spinlock are
@@ -274,55 +278,37 @@
 	 * timestamp of the context will be reported as 0, which is correct
 	 * because the context and timeline are just getting initialized.
 	 */
-	unsigned int timestamp_retired = kgsl_sync_get_timestamp(ktimeline,
-		KGSL_TIMESTAMP_RETIRED);
-	unsigned int timestamp_queued = kgsl_sync_get_timestamp(ktimeline,
-		KGSL_TIMESTAMP_QUEUED);
+	timestamp_retired = kgsl_sync_fence_get_timestamp(ktimeline,
+					KGSL_TIMESTAMP_RETIRED);
+	timestamp_queued = kgsl_sync_fence_get_timestamp(ktimeline,
+					KGSL_TIMESTAMP_QUEUED);
 
 	snprintf(str, size, "%u queued:%u retired:%u",
 		ktimeline->last_timestamp,
 		timestamp_queued, timestamp_retired);
+
+	kgsl_sync_timeline_put(ktimeline);
 }
 
-static void kgsl_sync_pt_value_str(struct sync_pt *sync_pt,
-				   char *str, int size)
+static void kgsl_sync_fence_value_str(struct fence *fence, char *str, int size)
 {
-	struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) sync_pt;
+	struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence;
 
-	snprintf(str, size, "%u", kpt->timestamp);
+	snprintf(str, size, "%u", kfence->timestamp);
 }
 
-static int kgsl_sync_fill_driver_data(struct sync_pt *sync_pt, void *data,
-					int size)
+static const char *kgsl_sync_fence_driver_name(struct fence *fence)
 {
-	struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) sync_pt;
-
-	if (size < sizeof(kpt->timestamp))
-		return -ENOMEM;
-
-	memcpy(data, &kpt->timestamp, sizeof(kpt->timestamp));
-	return sizeof(kpt->timestamp);
+	return "kgsl-timeline";
 }
 
-static void kgsl_sync_timeline_release_obj(struct sync_timeline *sync_timeline)
+static const char *kgsl_sync_timeline_name(struct fence *fence)
 {
-	/*
-	 * Make sure to free the timeline only after destroy flag is set.
-	 * This is to avoid further accessing to the timeline from KGSL and
-	 * also to catch any unbalanced kref of timeline.
-	 */
-	BUG_ON(sync_timeline && (sync_timeline->destroyed != true));
+	struct kgsl_sync_fence *kfence = (struct kgsl_sync_fence *)fence;
+	struct kgsl_sync_timeline *ktimeline = kfence->parent;
+
+	return ktimeline->name;
 }
-static const struct sync_timeline_ops kgsl_sync_timeline_ops = {
-	.driver_name = "kgsl-timeline",
-	.dup = kgsl_sync_pt_dup,
-	.has_signaled = kgsl_sync_pt_has_signaled,
-	.compare = kgsl_sync_pt_compare,
-	.timeline_value_str = kgsl_sync_timeline_value_str,
-	.pt_value_str = kgsl_sync_pt_value_str,
-	.fill_driver_data = kgsl_sync_fill_driver_data,
-	.release_obj = kgsl_sync_timeline_release_obj,
-};
 
 int kgsl_sync_timeline_create(struct kgsl_context *context)
 {
@@ -333,7 +319,11 @@
 	 * name, process id, and context id. This makes it possible to
 	 * identify the context of a timeline in the sync dump.
 	 */
-	char ktimeline_name[sizeof(context->timeline->name)] = {};
+	char ktimeline_name[sizeof(ktimeline->name)] = {};
+
+	/* Put context when timeline is released */
+	if (!_kgsl_context_get(context))
+		return -ENOENT;
 
 	snprintf(ktimeline_name, sizeof(ktimeline_name),
 		"%s_%.15s(%d)-%.15s(%d)-%d",
@@ -341,116 +331,162 @@
 		current->group_leader->comm, current->group_leader->pid,
 		current->comm, current->pid, context->id);
 
-	context->timeline = sync_timeline_create(&kgsl_sync_timeline_ops,
-		(int) sizeof(struct kgsl_sync_timeline), ktimeline_name);
-	if (context->timeline == NULL)
-		return -EINVAL;
+	ktimeline = kzalloc(sizeof(*ktimeline), GFP_KERNEL);
+	if (ktimeline == NULL) {
+		kgsl_context_put(context);
+		return -ENOMEM;
+	}
 
-	ktimeline = (struct kgsl_sync_timeline *) context->timeline;
+	kref_init(&ktimeline->kref);
+	strlcpy(ktimeline->name, ktimeline_name, KGSL_TIMELINE_NAME_LEN);
+	ktimeline->fence_context = fence_context_alloc(1);
 	ktimeline->last_timestamp = 0;
-	ktimeline->device = context->device;
-	ktimeline->context_id = context->id;
-
+	INIT_LIST_HEAD(&ktimeline->child_list_head);
 	spin_lock_init(&ktimeline->lock);
+	ktimeline->device = context->device;
+	ktimeline->context = context;
+
+	context->ktimeline = ktimeline;
+
 	return 0;
 }
 
-static void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
-	unsigned int timestamp)
+static void kgsl_sync_timeline_signal(struct kgsl_sync_timeline *ktimeline,
+					unsigned int timestamp)
 {
-	struct kgsl_sync_timeline *ktimeline =
-		(struct kgsl_sync_timeline *) timeline;
+	unsigned long flags;
+	struct kgsl_sync_fence *kfence, *next;
 
-	spin_lock(&ktimeline->lock);
+	kref_get(&ktimeline->kref);
+
+	spin_lock_irqsave(&ktimeline->lock, flags);
 	if (timestamp_cmp(timestamp, ktimeline->last_timestamp) > 0)
 		ktimeline->last_timestamp = timestamp;
-	spin_unlock(&ktimeline->lock);
 
-	sync_timeline_signal(timeline);
+	list_for_each_entry_safe(kfence, next, &ktimeline->child_list_head,
+				child_list) {
+		if (fence_is_signaled_locked(&kfence->fence)) {
+			list_del(&kfence->child_list);
+			fence_put(&kfence->fence);
+		}
+	}
+
+	spin_unlock_irqrestore(&ktimeline->lock, flags);
+	kgsl_sync_timeline_put(ktimeline);
 }
 
 void kgsl_sync_timeline_destroy(struct kgsl_context *context)
 {
-	sync_timeline_destroy(context->timeline);
+	kfree(context->ktimeline);
 }
 
-static void kgsl_sync_callback(struct sync_fence *fence,
-	struct sync_fence_waiter *waiter)
+static void kgsl_sync_timeline_release(struct kref *kref)
 {
-	struct kgsl_sync_fence_waiter *kwaiter =
-		(struct kgsl_sync_fence_waiter *) waiter;
-	kwaiter->func(kwaiter->priv);
-	sync_fence_put(kwaiter->fence);
-	kfree(kwaiter);
+	struct kgsl_sync_timeline *ktimeline =
+		container_of(kref, struct kgsl_sync_timeline, kref);
+
+	/*
+	 * Only put the context refcount here. The context destroy function
+	 * will call kgsl_sync_timeline_destroy() to kfree it
+	 */
+	kgsl_context_put(ktimeline->context);
 }
 
-struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline)
+{
+	if (ktimeline)
+		kref_put(&ktimeline->kref, kgsl_sync_timeline_release);
+}
+
+static const struct fence_ops kgsl_sync_fence_ops = {
+	.get_driver_name = kgsl_sync_fence_driver_name,
+	.get_timeline_name = kgsl_sync_timeline_name,
+	.enable_signaling = kgsl_enable_signaling,
+	.signaled = kgsl_sync_fence_has_signaled,
+	.wait = fence_default_wait,
+	.release = kgsl_sync_fence_release,
+
+	.fence_value_str = kgsl_sync_fence_value_str,
+	.timeline_value_str = kgsl_sync_timeline_value_str,
+};
+
+static void kgsl_sync_fence_callback(struct fence *fence, struct fence_cb *cb)
+{
+	struct kgsl_sync_fence_cb *kcb = (struct kgsl_sync_fence_cb *)cb;
+
+	kcb->func(kcb->priv);
+	fence_put(kcb->fence);
+	kfree(kcb);
+}
+
+struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
 	void (*func)(void *priv), void *priv)
 {
-	struct kgsl_sync_fence_waiter *kwaiter;
-	struct sync_fence *fence;
+	struct kgsl_sync_fence_cb *kcb;
+	struct fence *fence;
 	int status;
 
-	fence = sync_fence_fdget(fd);
+	fence = sync_file_get_fence(fd);
 	if (fence == NULL)
 		return ERR_PTR(-EINVAL);
 
-	/* create the waiter */
-	kwaiter = kzalloc(sizeof(*kwaiter), GFP_ATOMIC);
-	if (kwaiter == NULL) {
-		sync_fence_put(fence);
+	/* create the callback */
+	kcb = kzalloc(sizeof(*kcb), GFP_ATOMIC);
+	if (kcb == NULL) {
+		fence_put(fence);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	kwaiter->fence = fence;
-	kwaiter->priv = priv;
-	kwaiter->func = func;
-
-	strlcpy(kwaiter->name, fence->name, sizeof(kwaiter->name));
-
-	sync_fence_waiter_init((struct sync_fence_waiter *) kwaiter,
-		kgsl_sync_callback);
+	kcb->fence = fence;
+	kcb->priv = priv;
+	kcb->func = func;
 
 	/* if status then error or signaled */
-	status = sync_fence_wait_async(fence,
-		(struct sync_fence_waiter *) kwaiter);
+	status = fence_add_callback(fence, &kcb->fence_cb,
+				kgsl_sync_fence_callback);
+
 	if (status) {
-		kfree(kwaiter);
-		sync_fence_put(fence);
-		if (status < 0)
-			kwaiter = ERR_PTR(status);
+		kfree(kcb);
+		if (fence_is_signaled(fence))
+			kcb = ERR_PTR(status);
 		else
-			kwaiter = NULL;
+			kcb = NULL;
+		fence_put(fence);
 	}
 
-	return kwaiter;
+	return kcb;
 }
 
-int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *kwaiter)
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb)
 {
-	if (kwaiter == NULL)
+	if (kcb == NULL)
 		return 0;
 
-	if (sync_fence_cancel_async(kwaiter->fence,
-		(struct sync_fence_waiter *) kwaiter) == 0) {
-		sync_fence_put(kwaiter->fence);
-		kfree(kwaiter);
+	if (fence_remove_callback(kcb->fence, &kcb->fence_cb)) {
+		fence_put(kcb->fence);
+		kfree(kcb);
 		return 1;
 	}
 	return 0;
 }
 
-#ifdef CONFIG_ONESHOT_SYNC
-
-#include "oneshot_sync.h"
-
 struct kgsl_syncsource {
 	struct kref refcount;
+	char name[KGSL_TIMELINE_NAME_LEN];
 	int id;
 	struct kgsl_process_private *private;
-	struct oneshot_sync_timeline *oneshot;
+	struct list_head child_list_head;
+	spinlock_t lock;
 };
 
+struct kgsl_syncsource_fence {
+	struct fence fence;
+	struct kgsl_syncsource *parent;
+	struct list_head child_list;
+};
+
+static const struct fence_ops kgsl_syncsource_fence_ops;
+
 long kgsl_ioctl_syncsource_create(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data)
 {
@@ -459,7 +495,10 @@
 	int ret = -EINVAL;
 	int id = 0;
 	struct kgsl_process_private *private = dev_priv->process_priv;
-	char name[32];
+	char name[KGSL_TIMELINE_NAME_LEN];
+
+	if (!kgsl_process_private_get(private))
+		return ret;
 
 	syncsource = kzalloc(sizeof(*syncsource), GFP_KERNEL);
 	if (syncsource == NULL) {
@@ -470,14 +509,11 @@
 	snprintf(name, sizeof(name), "kgsl-syncsource-pid-%d",
 			current->group_leader->pid);
 
-	syncsource->oneshot = oneshot_timeline_create(name);
-	if (syncsource->oneshot == NULL) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
 	kref_init(&syncsource->refcount);
+	strlcpy(syncsource->name, name, KGSL_TIMELINE_NAME_LEN);
 	syncsource->private = private;
+	INIT_LIST_HEAD(&syncsource->child_list_head);
+	spin_lock_init(&syncsource->lock);
 
 	idr_preload(GFP_KERNEL);
 	spin_lock(&private->syncsource_lock);
@@ -495,8 +531,7 @@
 
 out:
 	if (ret) {
-		if (syncsource && syncsource->oneshot)
-			oneshot_timeline_destroy(syncsource->oneshot);
+		kgsl_process_private_put(private);
 		kfree(syncsource);
 	}
 
@@ -528,13 +563,8 @@
 
 	struct kgsl_process_private *private = syncsource->private;
 
-	spin_lock(&private->syncsource_lock);
-	if (syncsource->id != 0) {
-		idr_remove(&private->syncsource_idr, syncsource->id);
-		syncsource->id = 0;
-	}
-	oneshot_timeline_destroy(syncsource->oneshot);
-	spin_unlock(&private->syncsource_lock);
+	/* Done with process private. Release the refcount */
+	kgsl_process_private_put(private);
 
 	kfree(syncsource);
 }
@@ -545,6 +575,33 @@
 		kref_put(&syncsource->refcount, kgsl_syncsource_destroy);
 }
 
+void kgsl_syncsource_cleanup(struct kgsl_process_private *private,
+				struct kgsl_syncsource *syncsource)
+{
+	struct kgsl_syncsource_fence *sfence, *next;
+
+	spin_lock(&private->syncsource_lock);
+	if (syncsource->id != 0) {
+		idr_remove(&private->syncsource_idr, syncsource->id);
+		syncsource->id = 0;
+	}
+	spin_unlock(&private->syncsource_lock);
+
+	/* Signal all fences to release any callbacks */
+	spin_lock(&syncsource->lock);
+
+	list_for_each_entry_safe(sfence, next, &syncsource->child_list_head,
+				child_list) {
+		fence_signal_locked(&sfence->fence);
+		list_del_init(&sfence->child_list);
+	}
+
+	spin_unlock(&syncsource->lock);
+
+	/* put reference from syncsource creation */
+	kgsl_syncsource_put(syncsource);
+}
+
 long kgsl_ioctl_syncsource_destroy(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data)
 {
@@ -554,19 +611,12 @@
 
 	spin_lock(&private->syncsource_lock);
 	syncsource = idr_find(&private->syncsource_idr, param->id);
-
-	if (syncsource) {
-		idr_remove(&private->syncsource_idr, param->id);
-		syncsource->id = 0;
-	}
-
 	spin_unlock(&private->syncsource_lock);
 
 	if (syncsource == NULL)
 		return -EINVAL;
 
-	/* put reference from syncsource creation */
-	kgsl_syncsource_put(syncsource);
+	kgsl_syncsource_cleanup(private, syncsource);
 	return 0;
 }
 
@@ -576,21 +626,34 @@
 	struct kgsl_syncsource_create_fence *param = data;
 	struct kgsl_syncsource *syncsource = NULL;
 	int ret = -EINVAL;
-	struct sync_fence *fence = NULL;
+	struct kgsl_syncsource_fence *sfence = NULL;
+	struct sync_file *sync_file = NULL;
 	int fd = -1;
-	char name[32];
 
-
+	/*
+	 * Take a refcount that is released when the fence is released
+	 * (or if fence can't be added to the syncsource).
+	 */
 	syncsource = kgsl_syncsource_get(dev_priv->process_priv,
 					param->id);
 	if (syncsource == NULL)
 		goto out;
 
-	snprintf(name, sizeof(name), "kgsl-syncsource-pid-%d-%d",
-			current->group_leader->pid, syncsource->id);
+	sfence = kzalloc(sizeof(*sfence), GFP_KERNEL);
+	if (sfence == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	sfence->parent = syncsource;
 
-	fence = oneshot_fence_create(syncsource->oneshot, name);
-	if (fence == NULL) {
+	/* Use a new fence context for each fence */
+	fence_init(&sfence->fence, &kgsl_syncsource_fence_ops,
+		&syncsource->lock, fence_context_alloc(1), 1);
+
+	sync_file = sync_file_create(&sfence->fence);
+
+	if (sync_file == NULL) {
+		KGSL_DRV_ERR(dev_priv->device, "Create sync_file failed\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -602,18 +665,46 @@
 	}
 	ret = 0;
 
-	sync_fence_install(fence, fd);
+	fd_install(fd, sync_file->file);
 
 	param->fence_fd = fd;
+
+	spin_lock(&syncsource->lock);
+	list_add_tail(&sfence->child_list, &syncsource->child_list_head);
+	spin_unlock(&syncsource->lock);
 out:
 	if (ret) {
-		if (fence)
-			sync_fence_put(fence);
-		if (fd >= 0)
-			put_unused_fd(fd);
-
+		if (sync_file)
+			fput(sync_file->file);
+		else if (sfence)
+			fence_put(&sfence->fence);
+		kgsl_syncsource_put(syncsource);
 	}
-	kgsl_syncsource_put(syncsource);
+
+	return ret;
+}
+
+static int kgsl_syncsource_signal(struct kgsl_syncsource *syncsource,
+					struct fence *fence)
+{
+	struct kgsl_syncsource_fence *sfence, *next;
+	int ret = -EINVAL;
+
+	spin_lock(&syncsource->lock);
+
+	list_for_each_entry_safe(sfence, next, &syncsource->child_list_head,
+				child_list) {
+		if (fence == &sfence->fence) {
+			fence_signal_locked(fence);
+			list_del_init(&sfence->child_list);
+
+			ret = 0;
+			break;
+		}
+	}
+
+	spin_unlock(&syncsource->lock);
+
 	return ret;
 }
 
@@ -623,24 +714,106 @@
 	int ret = -EINVAL;
 	struct kgsl_syncsource_signal_fence *param = data;
 	struct kgsl_syncsource *syncsource = NULL;
-	struct sync_fence *fence = NULL;
+	struct fence *fence = NULL;
 
 	syncsource = kgsl_syncsource_get(dev_priv->process_priv,
 					param->id);
 	if (syncsource == NULL)
 		goto out;
 
-	fence = sync_fence_fdget(param->fence_fd);
+	fence = sync_file_get_fence(param->fence_fd);
 	if (fence == NULL) {
 		ret = -EBADF;
 		goto out;
 	}
 
-	ret = oneshot_fence_signal(syncsource->oneshot, fence);
+	ret = kgsl_syncsource_signal(syncsource, fence);
 out:
 	if (fence)
-		sync_fence_put(fence);
-	kgsl_syncsource_put(syncsource);
+		fence_put(fence);
+	if (syncsource)
+		kgsl_syncsource_put(syncsource);
 	return ret;
 }
-#endif
+
+static void kgsl_syncsource_fence_release(struct fence *fence)
+{
+	struct kgsl_syncsource_fence *sfence =
+			(struct kgsl_syncsource_fence *)fence;
+
+	/* Signal if it's not signaled yet */
+	kgsl_syncsource_signal(sfence->parent, fence);
+
+	/* Release the refcount on the syncsource */
+	kgsl_syncsource_put(sfence->parent);
+
+	kfree(sfence);
+}
+
+static const char *kgsl_syncsource_get_timeline_name(struct fence *fence)
+{
+	struct kgsl_syncsource_fence *sfence =
+			(struct kgsl_syncsource_fence *)fence;
+	struct kgsl_syncsource *syncsource = sfence->parent;
+
+	return syncsource->name;
+}
+
+static bool kgsl_syncsource_enable_signaling(struct fence *fence)
+{
+	return true;
+}
+
+static const char *kgsl_syncsource_driver_name(struct fence *fence)
+{
+	return "kgsl-syncsource-timeline";
+}
+
+static const struct fence_ops kgsl_syncsource_fence_ops = {
+	.get_driver_name = kgsl_syncsource_driver_name,
+	.get_timeline_name = kgsl_syncsource_get_timeline_name,
+	.enable_signaling = kgsl_syncsource_enable_signaling,
+	.wait = fence_default_wait,
+	.release = kgsl_syncsource_fence_release,
+};
+
+void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
+				char *fence_str, int len)
+{
+	struct fence *fence;
+	char *ptr = fence_str;
+	char *last = fence_str + len;
+
+	if (!handle || !handle->fence) {
+		snprintf(fence_str, len, "NULL");
+		return;
+	}
+
+	fence = handle->fence;
+
+	ptr += snprintf(ptr, last - ptr, "%s %s",
+			fence->ops->get_timeline_name(fence),
+			fence->ops->get_driver_name(fence));
+	if (ptr >= last)
+		return;
+
+	if (fence->ops->timeline_value_str &&
+		fence->ops->fence_value_str) {
+		char value[64];
+		bool success;
+
+		fence->ops->fence_value_str(fence, value, sizeof(value));
+		success = !!strlen(value);
+
+		if (success) {
+			ptr += snprintf(ptr, last - ptr, ": %s", value);
+			if (ptr >= last)
+				return;
+
+			fence->ops->timeline_value_str(fence, value,
+							sizeof(value));
+			ptr += snprintf(ptr, last - ptr, " / %s", value);
+		}
+	}
+}
+
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 68b4246..dc84c54 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2014, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,83 +13,88 @@
 #ifndef __KGSL_SYNC_H
 #define __KGSL_SYNC_H
 
-#include "sync.h"
+#include "sync_file.h"
 #include "kgsl_device.h"
 
+#define KGSL_TIMELINE_NAME_LEN 32
+
+/**
+ * struct kgsl_sync_timeline - A sync timeline associated with a kgsl context
+ * @kref: Refcount to keep the struct alive until all its fences are released
+ * @name: String to describe this timeline
+ * @fence_context: Used by the fence driver to identify fences belonging to
+ *		   this context
+ * @child_list_head: List head for all fences on this timeline
+ * @lock: Spinlock to protect this timeline
+ * @last_timestamp: Last timestamp when signaling fences
+ * @device: kgsl device
+ * @context: kgsl context
+ */
 struct kgsl_sync_timeline {
-	struct sync_timeline timeline;
+	struct kref kref;
+	char name[KGSL_TIMELINE_NAME_LEN];
+
+	u64 fence_context;
+
+	struct list_head child_list_head;
+
+	spinlock_t lock;
 	unsigned int last_timestamp;
 	struct kgsl_device *device;
-	u32 context_id;
-	spinlock_t lock;
+	struct kgsl_context *context;
 };
 
-struct kgsl_sync_pt {
-	struct sync_pt pt;
-	struct kgsl_context *context;
+/**
+ * struct kgsl_sync_fence - A struct containing a fence and other data
+ *				associated with it
+ * @fence: The fence struct
+ * @sync_file: Pointer to the sync file
+ * @parent: Pointer to the kgsl sync timeline this fence is on
+ * @child_list: List of fences on the same timeline
+ * @context_id: kgsl context id
+ * @timestamp: Context timestamp that this fence is associated with
+ */
+struct kgsl_sync_fence {
+	struct fence fence;
+	struct sync_file *sync_file;
+	struct kgsl_sync_timeline *parent;
+	struct list_head child_list;
+	u32 context_id;
 	unsigned int timestamp;
 };
 
-struct kgsl_sync_fence_waiter {
-	struct sync_fence_waiter waiter;
-	struct sync_fence *fence;
-	char name[32];
-	void (*func)(void *priv);
+/**
+ * struct kgsl_sync_fence_cb - Used for fence callbacks
+ * fence_cb: Fence callback struct
+ * fence: Pointer to the fence for which the callback is done
+ * priv: Private data for the callback
+ * func: Pointer to the kgsl function to call
+ */
+struct kgsl_sync_fence_cb {
+	struct fence_cb fence_cb;
+	struct fence *fence;
 	void *priv;
+	void (*func)(void *priv);
 };
 
 struct kgsl_syncsource;
 
-#if defined(CONFIG_SYNC)
+#if defined(CONFIG_SYNC_FILE)
 int kgsl_add_fence_event(struct kgsl_device *device,
 	u32 context_id, u32 timestamp, void __user *data, int len,
 	struct kgsl_device_private *owner);
+
 int kgsl_sync_timeline_create(struct kgsl_context *context);
+
 void kgsl_sync_timeline_destroy(struct kgsl_context *context);
-struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
-	void (*func)(void *priv), void *priv);
-int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter);
-static inline void kgsl_sync_fence_log(struct sync_fence *fence)
-{
-}
-#else
-static inline int kgsl_add_fence_event(struct kgsl_device *device,
-	u32 context_id, u32 timestamp, void __user *data, int len,
-	struct kgsl_device_private *owner)
-{
-	return -EINVAL;
-}
 
-static inline int kgsl_sync_timeline_create(struct kgsl_context *context)
-{
-	context->timeline = NULL;
-	return 0;
-}
+void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline);
 
-static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context)
-{
-}
+struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
+					void (*func)(void *priv), void *priv);
 
-static inline struct
-kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
-	void (*func)(void *priv), void *priv)
-{
-	return NULL;
-}
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
 
-static inline int
-kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter)
-{
-	return 1;
-}
-
-static inline void kgsl_sync_fence_log(struct sync_fence *fence)
-{
-}
-
-#endif
-
-#ifdef CONFIG_ONESHOT_SYNC
 long kgsl_ioctl_syncsource_create(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data);
 long kgsl_ioctl_syncsource_destroy(struct kgsl_device_private *dev_priv,
@@ -101,7 +106,46 @@
 
 void kgsl_syncsource_put(struct kgsl_syncsource *syncsource);
 
+void kgsl_syncsource_cleanup(struct kgsl_process_private *private,
+					struct kgsl_syncsource *syncsource);
+
+void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
+				char *fence_str, int len);
+
 #else
+static inline int kgsl_add_fence_event(struct kgsl_device *device,
+	u32 context_id, u32 timestamp, void __user *data, int len,
+	struct kgsl_device_private *owner)
+{
+	return -EINVAL;
+}
+
+static inline int kgsl_sync_timeline_create(struct kgsl_context *context)
+{
+	context->ktimeline = NULL;
+	return 0;
+}
+
+static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context)
+{
+}
+
+static inline void kgsl_sync_timeline_put(struct kgsl_sync_timeline *ktimeline)
+{
+}
+
+static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
+					void (*func)(void *priv), void *priv)
+{
+	return NULL;
+}
+
+static inline int
+kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb)
+{
+	return 1;
+}
+
 static inline long
 kgsl_ioctl_syncsource_create(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data)
@@ -134,6 +178,18 @@
 {
 
 }
-#endif
+
+static inline void kgsl_syncsource_cleanup(struct kgsl_process_private *private,
+					struct kgsl_syncsource *syncsource)
+{
+
+}
+
+void kgsl_dump_fence(struct kgsl_sync_fence_cb *handle,
+				char *fence_str, int len)
+{
+}
+
+#endif /* CONFIG_SYNC_FILE */
 
 #endif /* __KGSL_SYNC_H */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 8e38a24..d7b4363 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/qcom-geni-se.h>
 
 #define SE_I2C_TX_TRANS_LEN		(0x26C)
@@ -57,6 +58,7 @@
 	struct i2c_adapter adap;
 	struct completion xfer;
 	struct i2c_msg *cur;
+	struct se_geni_rsc i2c_rsc;
 	int cur_wr;
 	int cur_rd;
 };
@@ -153,7 +155,15 @@
 	gi2c->err = 0;
 	gi2c->cur = &msgs[0];
 	reinit_completion(&gi2c->xfer);
-	enable_irq(gi2c->irq);
+	ret = pm_runtime_get_sync(gi2c->dev);
+	if (ret < 0) {
+		dev_err(gi2c->dev, "error turning SE resources:%d\n", ret);
+		pm_runtime_put_noidle(gi2c->dev);
+		/* Set device in suspended since resume failed */
+		pm_runtime_set_suspended(gi2c->dev);
+		return ret;
+	}
+	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 	qcom_geni_i2c_conf(gi2c->base, 0, 2);
 	se_config_packing(gi2c->base, 8, 4, true);
 	dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
@@ -206,7 +216,7 @@
 	}
 	if (ret == 0)
 		ret = i;
-	disable_irq(gi2c->irq);
+	pm_runtime_put_sync(gi2c->dev);
 	gi2c->cur = NULL;
 	gi2c->err = 0;
 	dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
@@ -239,10 +249,54 @@
 	if (!res)
 		return -EINVAL;
 
+	gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
+	if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
+		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+		return ret;
+	}
+
+	gi2c->i2c_rsc.m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
+	if (IS_ERR(gi2c->i2c_rsc.m_ahb_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.m_ahb_clk);
+		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
+		return ret;
+	}
+
+	gi2c->i2c_rsc.s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
+	if (IS_ERR(gi2c->i2c_rsc.s_ahb_clk)) {
+		ret = PTR_ERR(gi2c->i2c_rsc.s_ahb_clk);
+		dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
+		return ret;
+	}
+
 	gi2c->base = devm_ioremap_resource(gi2c->dev, res);
 	if (IS_ERR(gi2c->base))
 		return PTR_ERR(gi2c->base);
 
+	gi2c->i2c_rsc.geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_pinctrl)) {
+		dev_err(&pdev->dev, "No pinctrl config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_pinctrl);
+		return ret;
+	}
+	gi2c->i2c_rsc.geni_gpio_active =
+		pinctrl_lookup_state(gi2c->i2c_rsc.geni_pinctrl,
+							PINCTRL_DEFAULT);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_gpio_active);
+		return ret;
+	}
+	gi2c->i2c_rsc.geni_gpio_sleep =
+		pinctrl_lookup_state(gi2c->i2c_rsc.geni_pinctrl,
+							PINCTRL_SLEEP);
+	if (IS_ERR_OR_NULL(gi2c->i2c_rsc.geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified\n");
+		ret = PTR_ERR(gi2c->i2c_rsc.geni_gpio_sleep);
+		return ret;
+	}
+
 	gi2c->irq = platform_get_irq(pdev, 0);
 	if (gi2c->irq < 0) {
 		dev_err(gi2c->dev, "IRQ error for i2c-geni\n");
@@ -266,8 +320,9 @@
 
 	strlcpy(gi2c->adap.name, "Geni-I2C", sizeof(gi2c->adap.name));
 
+	pm_runtime_set_suspended(gi2c->dev);
+	pm_runtime_enable(gi2c->dev);
 	i2c_add_adapter(&gi2c->adap);
-	geni_se_init(gi2c->base, FIFO_MODE, 0xF, 0x10);
 
 	return 0;
 }
@@ -276,27 +331,67 @@
 {
 	struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
 
-	disable_irq(gi2c->irq);
+	pm_runtime_disable(gi2c->dev);
 	i2c_del_adapter(&gi2c->adap);
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int geni_i2c_suspend(struct device *device)
+static int geni_i2c_resume_noirq(struct device *device)
 {
 	return 0;
 }
 
-static int geni_i2c_resume(struct device *device)
+#ifdef CONFIG_PM
+static int geni_i2c_runtime_suspend(struct device *dev)
+{
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+	disable_irq(gi2c->irq);
+	se_geni_resources_off(&gi2c->i2c_rsc);
+	return 0;
+}
+
+static int geni_i2c_runtime_resume(struct device *dev)
+{
+	int ret;
+	struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
+
+	ret = se_geni_resources_on(&gi2c->i2c_rsc);
+	if (ret)
+		return ret;
+
+	enable_irq(gi2c->irq);
+	return 0;
+}
+
+static int geni_i2c_suspend_noirq(struct device *device)
+{
+	if (!pm_runtime_status_suspended(device))
+		return -EBUSY;
+	return 0;
+}
+#else
+static int geni_i2c_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i2c_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int geni_i2c_suspend_noirq(struct device *device)
 {
 	return 0;
 }
 #endif
 
 static const struct dev_pm_ops geni_i2c_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(
-		geni_i2c_suspend,
-		geni_i2c_resume)
+	.suspend_noirq		= geni_i2c_suspend_noirq,
+	.resume_noirq		= geni_i2c_resume_noirq,
+	.runtime_suspend	= geni_i2c_runtime_suspend,
+	.runtime_resume		= geni_i2c_runtime_resume,
 };
 
 static const struct of_device_id geni_i2c_dt_match[] = {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 83768e8..2178266 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -429,6 +429,7 @@
 	while (muxc->num_adapters) {
 		struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
 		struct i2c_mux_priv *priv = adap->algo_data;
+		struct device_node *np = adap->dev.of_node;
 
 		muxc->adapter[muxc->num_adapters] = NULL;
 
@@ -438,6 +439,7 @@
 
 		sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
 		i2c_del_adapter(adap);
+		of_node_put(np);
 		kfree(priv);
 	}
 }
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4cab29e..11bfa27 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3141,9 +3141,11 @@
 	if (err)
 		goto err_rsrc;
 
-	err = mlx5_ib_alloc_q_counters(dev);
-	if (err)
-		goto err_odp;
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
+		err = mlx5_ib_alloc_q_counters(dev);
+		if (err)
+			goto err_odp;
+	}
 
 	err = ib_register_device(&dev->ib_dev, NULL);
 	if (err)
@@ -3171,7 +3173,8 @@
 	ib_unregister_device(&dev->ib_dev);
 
 err_q_cnt:
-	mlx5_ib_dealloc_q_counters(dev);
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+		mlx5_ib_dealloc_q_counters(dev);
 
 err_odp:
 	mlx5_ib_odp_remove_one(dev);
@@ -3201,7 +3204,8 @@
 
 	mlx5_remove_roce_notifier(dev);
 	ib_unregister_device(&dev->ib_dev);
-	mlx5_ib_dealloc_q_counters(dev);
+	if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+		mlx5_ib_dealloc_q_counters(dev);
 	destroy_umrc_res(dev);
 	mlx5_ib_odp_remove_one(dev);
 	destroy_dev_resources(&dev->devr);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 5a10a02..37dfe0a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -548,6 +548,8 @@
 static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
 static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
 
+static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain);
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
 	return container_of(dom, struct arm_smmu_domain, domain);
@@ -574,6 +576,17 @@
 	return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
 }
 
+static bool is_iommu_pt_coherent(struct arm_smmu_domain *smmu_domain)
+{
+	if (smmu_domain->attributes &
+			(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT))
+		return true;
+	else if (smmu_domain->smmu && smmu_domain->smmu->dev)
+		return smmu_domain->smmu->dev->archdata.dma_coherent;
+	else
+		return false;
+}
+
 static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
 {
 	return (smmu_domain->secure_vmid != VMID_INVAL);
@@ -1424,8 +1437,10 @@
 
 	/* SCTLR */
 	reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE;
-	if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
-	    !stage1)
+
+	if ((!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) &&
+	     !(smmu_domain->attributes & (1 << DOMAIN_ATTR_EARLY_MAP))) ||
+								!stage1)
 		reg |= SCTLR_M;
 	if (stage1)
 		reg |= SCTLR_S1_ASIDPNE;
@@ -1599,6 +1614,8 @@
 
 	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
 		quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
+	if (is_iommu_pt_coherent(smmu_domain))
+		quirks |= IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT;
 
 	/* Dynamic domains must set cbndx through domain attribute */
 	if (!dynamic) {
@@ -2578,6 +2595,22 @@
 				   (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
 		ret = 0;
 		break;
+	case DOMAIN_ATTR_EARLY_MAP:
+		*((int *)data) = !!(smmu_domain->attributes
+				    & (1 << DOMAIN_ATTR_EARLY_MAP));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
+		if (!smmu_domain->smmu)
+			return -ENODEV;
+		*((int *)data) = is_iommu_pt_coherent(smmu_domain);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
+		*((int *)data) = !!(smmu_domain->attributes
+			& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
+		ret = 0;
+		break;
 	default:
 		return -ENODEV;
 	}
@@ -2716,6 +2749,45 @@
 				1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
 		ret = 0;
 		break;
+	case DOMAIN_ATTR_EARLY_MAP: {
+		int early_map = *((int *)data);
+
+		ret = 0;
+		if (early_map) {
+			smmu_domain->attributes |=
+						1 << DOMAIN_ATTR_EARLY_MAP;
+		} else {
+			if (smmu_domain->smmu)
+				ret = arm_smmu_enable_s1_translations(
+								smmu_domain);
+
+			if (!ret)
+				smmu_domain->attributes &=
+					~(1 << DOMAIN_ATTR_EARLY_MAP);
+		}
+		break;
+	}
+	case DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT: {
+		int force_coherent = *((int *)data);
+
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change force coherent attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+
+		if (force_coherent)
+			smmu_domain->attributes |=
+			    1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
+		else
+			smmu_domain->attributes &=
+			    ~(1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT);
+
+		ret = 0;
+		break;
+	}
+
 	default:
 		ret = -ENODEV;
 	}
@@ -2738,6 +2810,44 @@
 	return iommu_fwspec_add_ids(dev, &fwid, 1);
 }
 
+static int arm_smmu_enable_s1_translations(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *cb_base;
+	u32 reg;
+	int ret;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	ret = arm_smmu_power_on(smmu->pwr);
+	if (ret)
+		return ret;
+
+	reg = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+	reg |= SCTLR_M;
+
+	writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
+	arm_smmu_power_off(smmu->pwr);
+	return ret;
+}
+
+static bool arm_smmu_is_iova_coherent(struct iommu_domain *domain,
+					 dma_addr_t iova)
+{
+	bool ret;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+	if (!ops)
+		return false;
+
+	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	ret = ops->is_iova_coherent(ops, iova);
+	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+	return ret;
+}
+
 static void arm_smmu_trigger_fault(struct iommu_domain *domain,
 					unsigned long flags)
 {
@@ -2867,6 +2977,7 @@
 	.tlbi_domain		= arm_smmu_tlbi_domain,
 	.enable_config_clocks	= arm_smmu_enable_config_clocks,
 	.disable_config_clocks	= arm_smmu_disable_config_clocks,
+	.is_iova_coherent	= arm_smmu_is_iova_coherent,
 };
 
 #define IMPL_DEF1_MICRO_MMU_CTRL	0
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index c5cbdb7..34c7381 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -25,6 +25,36 @@
 #define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
 #define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
 
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
+				 bool coherent)
+{
+	if (attrs & DMA_ATTR_STRONGLY_ORDERED)
+		return pgprot_noncached(prot);
+	else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
+		return pgprot_writecombine(prot);
+	return prot;
+}
+
+static int __get_iommu_pgprot(unsigned long attrs, int prot,
+			      bool coherent)
+{
+	if (!(attrs & DMA_ATTR_EXEC_MAPPING))
+		prot |= IOMMU_NOEXEC;
+	if ((attrs & DMA_ATTR_STRONGLY_ORDERED))
+		prot |= IOMMU_MMIO;
+	if (coherent)
+		prot |= IOMMU_CACHE;
+
+	return prot;
+}
+
+static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
+				  void *start, void *end)
+{
+	if (!mapping->is_smmu_pt_coherent)
+		dmac_clean_range(start, end);
+}
+
 /*
  * Checks if the allocated range (ending at @end) covered the upcoming
  * stale bit.  We don't need to know exactly where the range starts since
@@ -282,11 +312,11 @@
 	int nptes = len >> FAST_PAGE_SHIFT;
 	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
 	int prot = __fast_dma_direction_to_prot(dir);
+	bool is_coherent = is_device_dma_coherent(dev);
 
-	if (attrs & DMA_ATTR_STRONGLY_ORDERED)
-		prot |= IOMMU_MMIO;
+	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
 
-	if (!skip_sync)
+	if (!skip_sync && !is_coherent)
 		__fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map),
 					   offset_from_phys_to_map, size, dir);
 
@@ -302,8 +332,7 @@
 	if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
 		goto fail_free_iova;
 
-	if (!skip_sync)		/* TODO: should ask SMMU if coherent */
-		dmac_clean_range(pmd, pmd + nptes);
+	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
 
 	spin_unlock_irqrestore(&mapping->lock, flags);
 	return iova + offset_from_phys_to_map;
@@ -327,18 +356,42 @@
 	int nptes = len >> FAST_PAGE_SHIFT;
 	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
+	bool is_coherent = is_device_dma_coherent(dev);
 
-	if (!skip_sync)
+	if (!skip_sync && !is_coherent)
 		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
 
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(pmd, len);
-	if (!skip_sync)		/* TODO: should ask SMMU if coherent */
-		dmac_clean_range(pmd, pmd + nptes);
+	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
 	__fast_smmu_free_iova(mapping, iova, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
+static void fast_smmu_sync_single_for_cpu(struct device *dev,
+		dma_addr_t iova, size_t size, enum dma_data_direction dir)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+	unsigned long offset = iova & ~FAST_PAGE_MASK;
+	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+
+	if (!is_device_dma_coherent(dev))
+		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void fast_smmu_sync_single_for_device(struct device *dev,
+		dma_addr_t iova, size_t size, enum dma_data_direction dir)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+	unsigned long offset = iova & ~FAST_PAGE_MASK;
+	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+
+	if (!is_device_dma_coherent(dev))
+		__fast_dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
 static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
 			    int nents, enum dma_data_direction dir,
 			    unsigned long attrs)
@@ -354,6 +407,18 @@
 	WARN_ON_ONCE(1);
 }
 
+static void fast_smmu_sync_sg_for_cpu(struct device *dev,
+		struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+	WARN_ON_ONCE(1);
+}
+
+static void fast_smmu_sync_sg_for_device(struct device *dev,
+		struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+	WARN_ON_ONCE(1);
+}
+
 static void __fast_smmu_free_pages(struct page **pages, int count)
 {
 	int i;
@@ -403,9 +468,12 @@
 	struct sg_mapping_iter miter;
 	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
 	int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
-	pgprot_t remap_prot = pgprot_writecombine(PAGE_KERNEL);
+	bool is_coherent = is_device_dma_coherent(dev);
+	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
 	struct page **pages;
 
+	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
+
 	*handle = DMA_ERROR_CODE;
 
 	pages = __fast_smmu_alloc_pages(count, gfp);
@@ -420,7 +488,7 @@
 		goto out_free_pages;
 	}
 
-	if (!(prot & IOMMU_CACHE)) {
+	if (!is_coherent) {
 		/*
 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
 		 * sufficient here, so skip it by using the "wrong" direction.
@@ -453,7 +521,7 @@
 			/* TODO: unwind previously successful mappings */
 			goto out_free_iova;
 		}
-		dmac_clean_range(ptep, ptep + nptes);
+		fast_dmac_clean_range(mapping, ptep, ptep + nptes);
 		iova_iter += miter.length;
 	}
 	sg_miter_stop(&miter);
@@ -475,7 +543,7 @@
 	spin_lock_irqsave(&mapping->lock, flags);
 	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
 	av8l_fast_unmap_public(ptep, size);
-	dmac_clean_range(ptep, ptep + count);
+	fast_dmac_clean_range(mapping, ptep, ptep + count);
 out_free_iova:
 	__fast_smmu_free_iova(mapping, dma_addr, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
@@ -508,7 +576,7 @@
 	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(ptep, size);
-	dmac_clean_range(ptep, ptep + count);
+	fast_dmac_clean_range(mapping, ptep, ptep + count);
 	__fast_smmu_free_iova(mapping, dma_handle, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 	__fast_smmu_free_pages(pages, count);
@@ -522,9 +590,10 @@
 	unsigned long uaddr = vma->vm_start;
 	struct page **pages;
 	int i, nr_pages, ret = 0;
+	bool coherent = is_device_dma_coherent(dev);
 
-	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     coherent);
 	area = find_vm_area(cpu_addr);
 	if (!area)
 		return -EINVAL;
@@ -591,8 +660,12 @@
 	.mmap = fast_smmu_mmap_attrs,
 	.map_page = fast_smmu_map_page,
 	.unmap_page = fast_smmu_unmap_page,
+	.sync_single_for_cpu = fast_smmu_sync_single_for_cpu,
+	.sync_single_for_device = fast_smmu_sync_single_for_device,
 	.map_sg = fast_smmu_map_sg,
 	.unmap_sg = fast_smmu_unmap_sg,
+	.sync_sg_for_cpu = fast_smmu_sync_sg_for_cpu,
+	.sync_sg_for_device = fast_smmu_sync_sg_for_device,
 	.dma_supported = fast_smmu_dma_supported,
 	.mapping_error = fast_smmu_mapping_error,
 };
@@ -679,6 +752,10 @@
 	}
 	mapping->fast->pgtbl_pmds = info.pmds;
 
+	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+				  &mapping->fast->is_smmu_pt_coherent))
+		return -EINVAL;
+
 	mapping->fast->notifier.notifier_call = fast_smmu_notify;
 	av8l_register_notify(&mapping->fast->notifier);
 
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 37429c4..393e20c4 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -69,9 +69,12 @@
 #define ARM_LPAE_PGD_IDX(l,d)						\
 	((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
 
+#define ARM_LPAE_LVL_MASK(l, d)						\
+	((l) == ARM_LPAE_START_LVL(d) ?	(1 << (d)->pgd_bits) - 1 :	\
+					(1 << (d)->bits_per_level) - 1)
 #define ARM_LPAE_LVL_IDX(a,l,d)						\
 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
-	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
+	 ARM_LPAE_LVL_MASK(l, d))
 
 /* Calculate the block/page mapping size at level l for pagetable in d. */
 #define ARM_LPAE_BLOCK_SIZE(l,d)					\
@@ -106,6 +109,7 @@
 #define ARM_LPAE_PTE_AP_RW		(((arm_lpae_iopte)1) << 6)
 #define ARM_LPAE_PTE_AP_PRIV_RO		(((arm_lpae_iopte)2) << 6)
 #define ARM_LPAE_PTE_AP_RO		(((arm_lpae_iopte)3) << 6)
+#define ARM_LPAE_PTE_ATTRINDX_MASK	0x7
 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
 
@@ -196,6 +200,7 @@
 	struct io_pgtable	iop;
 
 	int			levels;
+	unsigned int		pgd_bits;
 	size_t			pgd_size;
 	unsigned long		pg_shift;
 	unsigned long		bits_per_level;
@@ -283,6 +288,16 @@
 	return (dma_addr_t)virt_to_phys(pages);
 }
 
+static inline void pgtable_dma_sync_single_for_device(
+				struct io_pgtable_cfg *cfg,
+				dma_addr_t addr, size_t size,
+				enum dma_data_direction dir)
+{
+	if (!(cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT))
+		dma_sync_single_for_device(cfg->iommu_dev, addr, size,
+								dir);
+}
+
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
 				    struct io_pgtable_cfg *cfg, void *cookie)
 {
@@ -332,7 +347,7 @@
 	*ptep = pte;
 
 	if (!selftest_running)
-		dma_sync_single_for_device(cfg->iommu_dev,
+		pgtable_dma_sync_single_for_device(cfg,
 					   __arm_lpae_dma_addr(ptep),
 					   sizeof(pte), DMA_TO_DEVICE);
 }
@@ -406,8 +421,7 @@
 
 		if (lvl == MAP_STATE_LVL) {
 			if (ms->pgtable)
-				dma_sync_single_for_device(
-					cfg->iommu_dev,
+				pgtable_dma_sync_single_for_device(cfg,
 					__arm_lpae_dma_addr(ms->pte_start),
 					ms->num_pte * sizeof(*ptep),
 					DMA_TO_DEVICE);
@@ -425,8 +439,7 @@
 			 * mapping.  Flush out the previous page mappings.
 			 */
 			if (ms->pgtable)
-				dma_sync_single_for_device(
-					cfg->iommu_dev,
+				pgtable_dma_sync_single_for_device(cfg,
 					__arm_lpae_dma_addr(ms->pte_start),
 					ms->num_pte * sizeof(*ptep),
 					DMA_TO_DEVICE);
@@ -597,9 +610,10 @@
 	}
 
 	if (ms.pgtable)
-		dma_sync_single_for_device(
-			cfg->iommu_dev, __arm_lpae_dma_addr(ms.pte_start),
-			ms.num_pte * sizeof(*ptep), DMA_TO_DEVICE);
+		pgtable_dma_sync_single_for_device(cfg,
+			__arm_lpae_dma_addr(ms.pte_start),
+			ms.num_pte * sizeof(*ms.pte_start),
+			DMA_TO_DEVICE);
 
 	return mapped;
 
@@ -736,7 +750,7 @@
 		table += tl_offset;
 
 		memset(table, 0, table_len);
-		dma_sync_single_for_device(iop->cfg.iommu_dev,
+		pgtable_dma_sync_single_for_device(&iop->cfg,
 					   __arm_lpae_dma_addr(table),
 					   table_len, DMA_TO_DEVICE);
 
@@ -804,39 +818,91 @@
 	return unmapped;
 }
 
-static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
-					 unsigned long iova)
+static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
+				unsigned long iova, int *plvl_ret,
+				arm_lpae_iopte *ptep_ret)
 {
-	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 	arm_lpae_iopte pte, *ptep = data->pgd;
-	int lvl = ARM_LPAE_START_LVL(data);
+	*plvl_ret = ARM_LPAE_START_LVL(data);
+	*ptep_ret = 0;
 
 	do {
 		/* Valid IOPTE pointer? */
 		if (!ptep)
-			return 0;
+			return -EINVAL;
 
 		/* Grab the IOPTE we're interested in */
-		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
+		pte = *(ptep + ARM_LPAE_LVL_IDX(iova, *plvl_ret, data));
 
 		/* Valid entry? */
 		if (!pte)
-			return 0;
+			return -EINVAL;
 
 		/* Leaf entry? */
-		if (iopte_leaf(pte,lvl))
+		if (iopte_leaf(pte, *plvl_ret))
 			goto found_translation;
 
 		/* Take it to the next level */
 		ptep = iopte_deref(pte, data);
-	} while (++lvl < ARM_LPAE_MAX_LEVELS);
+	} while (++(*plvl_ret) < ARM_LPAE_MAX_LEVELS);
 
 	/* Ran out of page tables to walk */
-	return 0;
+	return -EINVAL;
 
 found_translation:
-	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
-	return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
+	*ptep_ret = pte;
+	return 0;
+}
+
+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
+					 unsigned long iova)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte pte;
+	int lvl;
+	phys_addr_t phys = 0;
+
+	if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte)) {
+		iova &= ((1 << ARM_LPAE_LVL_SHIFT(lvl, data)) - 1);
+		phys = ((phys_addr_t)iopte_to_pfn(pte, data)
+				<< data->pg_shift) | iova;
+	}
+
+	return phys;
+}
+
+static bool __arm_lpae_is_iova_coherent(struct arm_lpae_io_pgtable *data,
+				    arm_lpae_iopte *ptep)
+{
+	if (data->iop.fmt == ARM_64_LPAE_S1 ||
+	    data->iop.fmt == ARM_32_LPAE_S1) {
+		int attr_idx = (*ptep & (ARM_LPAE_PTE_ATTRINDX_MASK <<
+					ARM_LPAE_PTE_ATTRINDX_SHIFT)) >>
+					ARM_LPAE_PTE_ATTRINDX_SHIFT;
+		if ((attr_idx == ARM_LPAE_MAIR_ATTR_IDX_CACHE) &&
+		    ((*ptep & ARM_LPAE_PTE_SH_IS) ||
+		     (*ptep & ARM_LPAE_PTE_SH_OS)))
+			return true;
+	} else {
+		if (*ptep & ARM_LPAE_PTE_MEMATTR_OIWB)
+			return true;
+	}
+
+	return false;
+}
+
+static bool arm_lpae_is_iova_coherent(struct io_pgtable_ops *ops,
+					 unsigned long iova)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte pte;
+	int lvl;
+	bool ret = false;
+
+	if (!arm_lpae_iova_to_pte(data, iova, &lvl, &pte))
+		ret = __arm_lpae_is_iova_coherent(data, &pte);
+
+	return ret;
 }
 
 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
@@ -908,6 +974,7 @@
 
 	/* Calculate the actual size of our pgd (without concatenation) */
 	pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
+	data->pgd_bits = pgd_bits;
 	data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
 
 	data->iop.ops = (struct io_pgtable_ops) {
@@ -915,6 +982,7 @@
 		.map_sg		= arm_lpae_map_sg,
 		.unmap		= arm_lpae_unmap,
 		.iova_to_phys	= arm_lpae_iova_to_phys,
+		.is_iova_coherent = arm_lpae_is_iova_coherent,
 	};
 
 	return data;
@@ -931,7 +999,7 @@
 		return NULL;
 
 	/* TCR */
-	if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+	if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
 		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
 			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index bc8e956..85fe317 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -198,7 +198,7 @@
 		| AV8L_FAST_PTE_TYPE_PAGE
 		| AV8L_FAST_PTE_AF
 		| AV8L_FAST_PTE_nG
-		| AV8L_FAST_PTE_SH_IS;
+		| AV8L_FAST_PTE_SH_OS;
 
 	if (prot & IOMMU_MMIO)
 		pte |= (AV8L_FAST_MAIR_ATTR_IDX_DEV
@@ -437,10 +437,14 @@
 		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
 			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
 			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
+	else if (cfg->quirks & IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT)
+		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_IRGN0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
 	else
-		reg = (AV8L_FAST_TCR_SH_IS << AV8L_FAST_TCR_SH0_SHIFT) |
-		      (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
-		      (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT);
+		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT);
 
 	reg |= AV8L_FAST_TCR_TG0_4K;
 
@@ -579,6 +583,7 @@
 	av8l_fast_iopte *pmds;
 
 	cfg = (struct io_pgtable_cfg) {
+		.quirks = 0,
 		.tlb = &dummy_tlb_ops,
 		.ias = 32,
 		.oas = 32,
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 33e0879..1599121 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -78,12 +78,16 @@
 	 * IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT: Override the attributes
 	 *	set in TCR for the page table walker. Use attributes specified
 	 *	by the upstream hw instead.
+	 *
+	 * IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT: Set the page table as
+	 *	coherent.
 	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
 	#define IO_PGTABLE_QUIRK_TLBI_ON_MAP	BIT(2)
 	#define IO_PGTABLE_QUIRK_ARM_MTK_4GB	BIT(3)
 	#define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT	BIT(4)
+	#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT BIT(5)
 	unsigned long			quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
@@ -143,6 +147,9 @@
 		     size_t size);
 	phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
 				    unsigned long iova);
+	bool (*is_iova_coherent)(struct io_pgtable_ops *ops,
+				unsigned long iova);
+
 };
 
 /**
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 504d46c..45ffb40 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -69,6 +69,8 @@
 		return "DOMAIN_ATTR_S1_BYPASS";
 	case DOMAIN_ATTR_FAST:
 		return "DOMAIN_ATTR_FAST";
+	case DOMAIN_ATTR_EARLY_MAP:
+		return "DOMAIN_ATTR_EARLY_MAP";
 	default:
 		return "Unknown attr!";
 	}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ba7d6f1..c90fbf0 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1286,6 +1286,14 @@
 	return domain->ops->iova_to_phys_hard(domain, iova);
 }
 
+bool iommu_is_iova_coherent(struct iommu_domain *domain, dma_addr_t iova)
+{
+	if (unlikely(domain->ops->is_iova_coherent == NULL))
+		return 0;
+
+	return domain->ops->is_iova_coherent(domain, iova);
+}
+
 size_t iommu_pgsize(unsigned long pgsize_bitmap,
 		    unsigned long addr_merge, size_t size)
 {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5dee30..acb9d25 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1598,6 +1598,14 @@
 	its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
 }
 
+static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+	struct its_node *its = data;
+
+	/* On QDF2400, the size of the ITE is 16Bytes */
+	its->ite_size = 16;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
 	{
@@ -1615,6 +1623,14 @@
 		.init	= its_enable_quirk_cavium_23144,
 	},
 #endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+	{
+		.desc	= "ITS: QDF2400 erratum 0065",
+		.iidr	= 0x00001070, /* QDF2400 ITS rev 1.x */
+		.mask	= 0xffffffff,
+		.init	= its_enable_quirk_qdf2400_e0065,
+	},
+#endif
 	{
 	}
 };
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 08809a9..01e553c 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -677,7 +677,8 @@
 #define VIN_FLASH_MIN_UV	3300000LL
 static int qpnp_flash_led_calc_max_current(struct qpnp_flash_led *led)
 {
-	int ocv_uv, rbatt_uohm, ibat_now, voltage_hdrm_mv, rc;
+	int ocv_uv, ibat_now, voltage_hdrm_mv, rc;
+	int rbatt_uohm = 0;
 	int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
 	int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv, vph_flash_vdip;
 
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index 1e24c79..c31d2e1 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -2076,11 +2076,12 @@
 	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
 	if (!wled)
 		return -ENOMEM;
-		wled->regmap = dev_get_regmap(pdev->dev.parent, NULL);
-		if (!wled->regmap) {
-			dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
-			return -EINVAL;
-		}
+
+	wled->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!wled->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
 
 	wled->pdev = pdev;
 
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
index bcd5fb9..5b114cb 100644
--- a/drivers/mailbox/qti-tcs.c
+++ b/drivers/mailbox/qti-tcs.c
@@ -670,7 +670,7 @@
 	int d = drv->drv_id;
 	struct tcs_mbox *tcs;
 	int i, slot, offset, m, n;
-	struct tcs_response *resp;
+	struct tcs_response *resp = NULL;
 
 	tcs = get_tcs_for_msg(drv, msg);
 	if (IS_ERR(tcs))
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 1e66909..3b53f34 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -516,15 +516,15 @@
 	  If unsure, say N.
 
 config DM_ANDROID_VERITY
-	tristate "Android verity target support"
-	depends on DM_VERITY
+	bool "Android verity target support"
+	depends on DM_VERITY=y
 	depends on X509_CERTIFICATE_PARSER
 	depends on SYSTEM_TRUSTED_KEYRING
 	depends on PUBLIC_KEY_ALGO_RSA
 	depends on KEYS
 	depends on ASYMMETRIC_KEY_TYPE
 	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
-	depends on MD_LINEAR
+	depends on MD_LINEAR=y
 	select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128
 	---help---
 	  This device-mapper target is virtually a VERITY target. This
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
index 16ce734..3b19017 100644
--- a/drivers/md/dm-android-verity.c
+++ b/drivers/md/dm-android-verity.c
@@ -115,6 +115,12 @@
 	return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug));
 }
 
+static inline bool is_unlocked(void)
+{
+	static const char unlocked[] = "orange";
+
+	return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked));
+}
 
 static int table_extract_mpi_array(struct public_key_signature *pks,
 				const void *data, size_t len)
@@ -651,6 +657,28 @@
 	return err;
 }
 
+static int create_linear_device(struct dm_target *ti, dev_t dev,
+				char *target_device)
+{
+	u64 device_size = 0;
+	int err = find_size(dev, &device_size);
+
+	if (err) {
+		DMERR("error finding bdev size");
+		handle_error();
+		return err;
+	}
+
+	ti->len = device_size;
+	err = add_as_linear_device(ti, target_device);
+	if (err) {
+		handle_error();
+		return err;
+	}
+	verity_enabled = false;
+	return 0;
+}
+
 /*
  * Target parameters:
  *	<key id>	Key id of the public key in the system keyring.
@@ -674,7 +702,6 @@
 	struct fec_ecc_metadata uninitialized_var(ecc);
 	char buf[FEC_ARG_LENGTH], *buf_ptr;
 	unsigned long long tmpll;
-	u64  uninitialized_var(device_size);
 
 	if (argc == 1) {
 		/* Use the default keyid */
@@ -702,23 +729,8 @@
 		return -EINVAL;
 	}
 
-	if (is_eng()) {
-		err = find_size(dev, &device_size);
-		if (err) {
-			DMERR("error finding bdev size");
-			handle_error();
-			return err;
-		}
-
-		ti->len = device_size;
-		err = add_as_linear_device(ti, target_device);
-		if (err) {
-			handle_error();
-			return err;
-		}
-		verity_enabled = false;
-		return 0;
-	}
+	if (is_eng())
+		return create_linear_device(ti, dev, target_device);
 
 	strreplace(key_id, '#', ' ');
 
@@ -733,6 +745,11 @@
 	err = extract_metadata(dev, &fec, &metadata, &verity_enabled);
 
 	if (err) {
+		/* Allow invalid metadata when the device is unlocked */
+		if (is_unlocked()) {
+			DMWARN("Allow invalid metadata when unlocked");
+			return create_linear_device(ti, dev, target_device);
+		}
 		DMERR("Error while extracting metadata");
 		handle_error();
 		goto free_metadata;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ef7bf1d..628ba00 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -972,10 +972,61 @@
 }
 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
 
+/*
+ * Flush current->bio_list when the target map method blocks.
+ * This fixes deadlocks in snapshot and possibly in other targets.
+ */
+struct dm_offload {
+	struct blk_plug plug;
+	struct blk_plug_cb cb;
+};
+
+static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
+{
+	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
+	struct bio_list list;
+	struct bio *bio;
+
+	INIT_LIST_HEAD(&o->cb.list);
+
+	if (unlikely(!current->bio_list))
+		return;
+
+	list = *current->bio_list;
+	bio_list_init(current->bio_list);
+
+	while ((bio = bio_list_pop(&list))) {
+		struct bio_set *bs = bio->bi_pool;
+		if (unlikely(!bs) || bs == fs_bio_set) {
+			bio_list_add(current->bio_list, bio);
+			continue;
+		}
+
+		spin_lock(&bs->rescue_lock);
+		bio_list_add(&bs->rescue_list, bio);
+		queue_work(bs->rescue_workqueue, &bs->rescue_work);
+		spin_unlock(&bs->rescue_lock);
+	}
+}
+
+static void dm_offload_start(struct dm_offload *o)
+{
+	blk_start_plug(&o->plug);
+	o->cb.callback = flush_current_bio_list;
+	list_add(&o->cb.list, &current->plug->cb_list);
+}
+
+static void dm_offload_end(struct dm_offload *o)
+{
+	list_del(&o->cb.list);
+	blk_finish_plug(&o->plug);
+}
+
 static void __map_bio(struct dm_target_io *tio)
 {
 	int r;
 	sector_t sector;
+	struct dm_offload o;
 	struct bio *clone = &tio->clone;
 	struct dm_target *ti = tio->ti;
 
@@ -988,7 +1039,11 @@
 	 */
 	atomic_inc(&tio->io->io_count);
 	sector = clone->bi_iter.bi_sector;
+
+	dm_offload_start(&o);
 	r = ti->type->map(ti, clone);
+	dm_offload_end(&o);
+
 	if (r == DM_MAPIO_REMAPPED) {
 		/* the bio has been remapped so dispatch it */
 
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
index bd925f4..faba819 100644
--- a/drivers/media/platform/msm/camera/Makefile
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -1,3 +1,5 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
 
 obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
new file mode 100644
index 0000000..6f9525e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
new file mode 100644
index 0000000..78cd9d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "cam_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_io_w(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CDBG("0x%pK %08x\n", addr, data);
+	writel_relaxed(data, addr);
+
+	return 0;
+}
+
+int cam_io_w_mb(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CDBG("0x%pK %08x\n", addr, data);
+	/* Ensure previous writes are done */
+	wmb();
+	writel_relaxed(data, addr);
+
+	return 0;
+}
+
+uint32_t cam_io_r(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		pr_err("Invalid args\n");
+		return 0;
+	}
+
+	data = readl_relaxed(addr);
+	CDBG("0x%pK %08x\n", addr, data);
+
+	return data;
+}
+
+uint32_t cam_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		pr_err("Invalid args\n");
+		return 0;
+	}
+
+	/* Ensure previous read is done */
+	rmb();
+	data = readl_relaxed(addr);
+	CDBG("0x%pK %08x\n", addr, data);
+
+	return data;
+}
+
+int cam_io_memcpy(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+
+	for (i = 0; i < len/4; i++) {
+		CDBG("0x%pK %08x\n", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int  cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CDBG("%pK %pK %d\n", dest_addr, src_addr, len);
+
+	/*
+	 * Do not use cam_io_w_mb to avoid double wmb() after a write
+	 * and before the next write.
+	 */
+	wmb();
+	for (i = 0; i < (len / 4); i++) {
+		CDBG("0x%pK %08x\n", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while ((tmp != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		pr_debug("Poll failed by value\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		pr_debug("Poll failed with mask\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr =%pK\n",
+			i, len, data[i], addr);
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr =%pK\n",
+			i, len, data[i], addr);
+		/* Ensure previous writes are done */
+		wmb();
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+#define __OFFSET(__i)   (data[__i][0])
+#define __VAL(__i)      (data[__i][1])
+int cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	/* Ensure write is done */
+	wmb();
+	for (i = 0; i < len; i++) {
+		CDBG("i= %d len =%d val=%x addr_base =%pK reg=%x\n",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+#define BYTES_PER_REGISTER           4
+#define NUM_REGISTER_PER_LINE        4
+#define REG_OFFSET(__start, __i)    (__start + (__i * BYTES_PER_REGISTER))
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size)
+{
+	char          line_str[128];
+	char         *p_str;
+	int           i;
+	uint32_t      data;
+
+	CDBG("addr=%pK offset=0x%x size=%d\n", base_addr, start_offset, size);
+
+	if (!base_addr || (size <= 0))
+		return -EINVAL;
+
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size; i++) {
+		if (i % NUM_REGISTER_PER_LINE == 0) {
+			snprintf(p_str, 12, "0x%08x: ",
+				REG_OFFSET(start_offset, i));
+			p_str += 12;
+		}
+		data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+		snprintf(p_str, 9, "%08x ", data);
+		p_str += 9;
+		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+			pr_err("%s\n", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		pr_err("%s\n", line_str);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
new file mode 100644
index 0000000..e4f73ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
@@ -0,0 +1,239 @@
+/* Copyright (c) 2011-2014, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_IO_UTIL_H_
+#define _CAM_IO_UTIL_H_
+
+#include <linux/types.h>
+
+/**
+ * cam_io_w()
+ *
+ * @brief:              Camera IO util for register write
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_w_mb()
+ *
+ * @brief:              Camera IO util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_r()
+ *
+ * @brief:              Camera IO util for register read
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r(void __iomem *addr);
+
+/**
+ * cam_io_r_mb()
+ *
+ * @brief:              Camera IO util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r_mb(void __iomem *addr);
+
+/**
+ * cam_io_memcpy()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy(void __iomem *dest_addr,
+		void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_memcpy_mb()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *                      with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_poll_value_wmask()
+ *
+ * @brief:              Poll register value with bitmask.
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until @bmask read from @addr matches this data
+ * @bmask:              Bit mask
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs);
+
+/**
+ * cam_io_poll_value()
+ *
+ * @brief:              Poll register value
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until value read from @addr matches this data
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs);
+
+/**
+ * cam_io_w_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_mb_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_w_mb_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *                      The OFFSETS NEED to be different because of the way
+ *                      barrier is used here.
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_dump()
+ *
+ * @brief:              Camera IO util for dumping a range of register
+ *
+ * @base_addr:          Start register address for the dumping
+ * @start_offset:       Start register offset for the dump
+ * @size:               Size specifying the range for dumping
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size);
+
+#endif /* _CAM_IO_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
new file mode 100644
index 0000000..d396d4f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -0,0 +1,598 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include "cam_soc_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		pr_err("No IRQ line available\n");
+		return -ENODEV;
+	}
+
+	enable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		pr_err("Invalid arguments\n");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		pr_err("No IRQ line available\n");
+		return -ENODEV;
+	}
+
+	disable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate)
+{
+	int rc = 0;
+	long clk_rate_round;
+
+	if (!clk || !clk_name || !clk_rate)
+		return -EINVAL;
+
+	CDBG("enable %s, clk %pK rate %d\n",
+		clk_name, clk, clk_rate);
+	if (clk_rate > 0) {
+		clk_rate_round = clk_round_rate(clk, clk_rate);
+		CDBG("new_rate %ld\n", clk_rate_round);
+		if (clk_rate_round < 0) {
+			pr_err("%s: round failed for clock %s rc = %ld\n",
+				__func__, clk_name, clk_rate_round);
+			return clk_rate_round;
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			pr_err("set_rate failed on %s\n", clk_name);
+			return rc;
+		}
+	} else if (clk_rate == INIT_RATE) {
+		clk_rate_round = clk_get_rate(clk);
+		CDBG("init new_rate %ld\n", clk_rate_round);
+		if (clk_rate_round == 0) {
+			clk_rate_round = clk_round_rate(clk, 0);
+			if (clk_rate_round <= 0) {
+				pr_err("round rate failed on %s\n", clk_name);
+				return clk_rate_round;
+			}
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			pr_err("set_rate failed on %s\n", clk_name);
+			return rc;
+		}
+	}
+	rc = clk_prepare_enable(clk);
+	if (rc) {
+		pr_err("enable failed for %s\n", clk_name);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
+{
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	CDBG("disable %s\n", clk_name);
+	clk_disable_unprepare(clk);
+
+	return 0;
+}
+
+/**
+ * cam_soc_util_clk_enable_default()
+ *
+ * @brief:              This function enables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i, rc = 0;
+
+	if (soc_info->num_clk == 0)
+		return rc;
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		rc = cam_soc_util_clk_enable(soc_info->clk[i],
+			soc_info->clk_name[i], soc_info->clk_rate[i]);
+		if (rc)
+			goto clk_disable;
+	}
+
+	return rc;
+
+clk_disable:
+	for (i--; i >= 0; i--) {
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_clk_disable_default()
+ *
+ * @brief:              This function disables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+static void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+
+	if (soc_info->num_clk == 0)
+		return;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		CDBG("disable %s\n", soc_info->clk_name[i]);
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+}
+
+/**
+ * cam_soc_util_get_dt_clk_info()
+ *
+ * @brief:              Parse the DT and populate the Clock properties
+ *
+ * @soc_info:           device soc struct to be populated
+ * @src_clk_str         name of src clock that has rate control
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count;
+	int i, rc;
+	struct platform_device *pdev = NULL;
+	const char *src_clk_str = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	of_node = pdev->dev.of_node;
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	CDBG("count = %d\n", count);
+	if (count > CAM_SOC_MAX_CLK) {
+		pr_err("invalid count of clocks, count=%d", count);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (count <= 0) {
+		CDBG("No clock-names found\n");
+		count = 0;
+		soc_info->num_clk = count;
+		return 0;
+	}
+	soc_info->num_clk = count;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(soc_info->clk_name[i]));
+		CDBG("clock-names[%d] = %s\n", i, soc_info->clk_name[i]);
+		if (rc) {
+			pr_err("i= %d count= %d reading clock-names failed\n",
+				i, count);
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32_array(of_node, "clock-rates",
+		soc_info->clk_rate, count);
+	if (rc) {
+		pr_err("reading clock-rates failed");
+		return rc;
+	}
+
+	rc = of_property_read_string_index(of_node, "src-clock-name",
+				i, &src_clk_str);
+	if (rc) {
+		CDBG("No src_clk_str found\n");
+		soc_info->src_clk_idx = -1;
+		rc = 0;
+		/* Bottom loop is dependent on src_clk_str. So return here */
+		return rc;
+	}
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk_rate[i] = (soc_info->clk_rate[i] == 0) ?
+			(long)-1 : soc_info->clk_rate[i];
+		if (src_clk_str &&
+			(strcmp(soc_info->clk_name[i], src_clk_str) == 0)) {
+			soc_info->src_clk_idx = i;
+		}
+		CDBG("clk_rate[%d] = %d\n", i, soc_info->clk_rate[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count = 0, i = 0, rc = 0;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+	if (rc) {
+		pr_err("device %s failed to read cell-index\n", pdev->name);
+		return rc;
+	}
+
+	count = of_property_count_strings(of_node, "regulator-names");
+	if (count <= 0) {
+		pr_err("no regulators found\n");
+		count = 0;
+	}
+	soc_info->num_rgltr = count;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = of_property_read_string_index(of_node,
+			"regulator-names", i, &soc_info->rgltr_name[i]);
+		CDBG("rgltr_name[%d] = %s\n", i, soc_info->rgltr_name[i]);
+		if (rc) {
+			pr_err("no regulator resource at cnt=%d\n", i);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	count = of_property_count_strings(of_node, "reg-names");
+	if (count <= 0) {
+		pr_err("no reg-names found\n");
+		count = 0;
+	}
+	soc_info->num_mem_block = count;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		rc = of_property_read_string_index(of_node, "reg-names", i,
+			&soc_info->mem_block_name[i]);
+		if (rc) {
+			pr_err("failed to read reg-names at %d\n", i);
+			return rc;
+		}
+		soc_info->mem_block[i] =
+			platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			soc_info->mem_block_name[i]);
+
+		if (!soc_info->mem_block[i]) {
+			pr_err("no mem resource by name %s\n",
+				soc_info->mem_block_name[i]);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = of_property_read_u32_array(of_node, "reg-cam-base",
+		soc_info->mem_block_cam_base, soc_info->num_mem_block);
+	if (rc) {
+		pr_err("Error reading register offsets\n");
+		return rc;
+	}
+
+	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
+		&soc_info->irq_name);
+	if (rc) {
+		pr_warn("No interrupt line present\n");
+	} else {
+		soc_info->irq_line = platform_get_resource_byname(pdev,
+			IORESOURCE_IRQ, soc_info->irq_name);
+		if (!soc_info->irq_line) {
+			pr_err("no irq resource\n");
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = cam_soc_util_get_dt_clk_info(soc_info);
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_get_regulator()
+ *
+ * @brief:              Get regulator resource named vdd
+ *
+ * @pdev:               Platform device associated with regulator
+ * @reg:                Return pointer to be filled with regulator on success
+ * @rgltr_name:         Name of regulator to get
+ *
+ * @return:             0 for Success, negative value for failure
+ */
+static int cam_soc_util_get_regulator(struct platform_device *pdev,
+	struct regulator **reg, const char *rgltr_name)
+{
+	int rc = 0;
+	*reg = regulator_get(&pdev->dev, rgltr_name);
+	if (IS_ERR_OR_NULL(*reg)) {
+		rc = PTR_ERR(*reg);
+		rc = rc ? rc : -EINVAL;
+		pr_err("Regulator %s get failed %d\n", rgltr_name, rc);
+		*reg = NULL;
+	}
+	return rc;
+}
+
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data)
+{
+	int i = 0, rc = 0;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		soc_info->reg_map[i].mem_base = ioremap(
+			soc_info->mem_block[i]->start,
+			resource_size(soc_info->mem_block[i]));
+		if (!soc_info->reg_map[i].mem_base) {
+			pr_err("i= %d base NULL\n", i);
+			rc = -ENOMEM;
+			goto unmap_base;
+		}
+		soc_info->reg_map[i].mem_cam_base =
+			soc_info->mem_block_cam_base[i];
+		soc_info->reg_map[i].size =
+			resource_size(soc_info->mem_block[i]);
+		soc_info->num_reg_map++;
+	}
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = cam_soc_util_get_regulator(pdev, &soc_info->rgltr[i],
+			soc_info->rgltr_name[i]);
+		if (rc)
+			goto put_regulator;
+	}
+
+	if (soc_info->irq_line) {
+		rc = devm_request_irq(&pdev->dev, soc_info->irq_line->start,
+			handler, IRQF_TRIGGER_RISING,
+			soc_info->irq_name, irq_data);
+		if (rc < 0) {
+			pr_err("irq request fail\n");
+			rc = -EBUSY;
+			goto put_regulator;
+		}
+		disable_irq(soc_info->irq_line->start);
+	}
+
+	/* Get Clock */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = clk_get(&soc_info->pdev->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			pr_err("get failed for %s\n", soc_info->clk_name[i]);
+			rc = -ENOENT;
+			goto put_clk;
+		}
+	}
+
+	return rc;
+
+put_clk:
+	if (i == -1)
+		i = soc_info->num_clk;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->clk[i]) {
+			clk_put(soc_info->clk[i]);
+			soc_info->clk[i] = NULL;
+		}
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		free_irq(soc_info->irq_line->start, soc_info);
+	}
+
+put_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_disable(soc_info->rgltr[i]);
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+unmap_base:
+	if (i == -1)
+		i = soc_info->num_reg_map;
+	for (i = i - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	struct platform_device *pdev = NULL;
+
+	if (!soc_info || !soc_info->pdev)
+		return -EINVAL;
+
+	pdev = soc_info->pdev;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		clk_put(soc_info->clk[i]);
+		soc_info->clk[i] = NULL;
+	}
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+	for (i = soc_info->num_reg_map - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		free_irq(soc_info->irq_line->start, soc_info);
+	}
+
+	return 0;
+}
+
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, bool enable_irq)
+{
+	int i, rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_enable(soc_info->rgltr[i]);
+		if (rc) {
+			pr_err("Regulator enable %s failed\n",
+				soc_info->rgltr_name[i]);
+			goto disable_regulator;
+		}
+	}
+
+	if (enable_clocks) {
+		rc = cam_soc_util_clk_enable_default(soc_info);
+		if (rc)
+			goto disable_regulator;
+	}
+
+	if (enable_irq) {
+		rc  = cam_soc_util_irq_enable(soc_info);
+		if (rc)
+			goto disable_clk;
+	}
+
+	return rc;
+
+disable_clk:
+	if (enable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+disable_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i])
+			regulator_disable(soc_info->rgltr[i]);
+	}
+
+	return rc;
+}
+
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disble_irq)
+{
+	int i, rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	if (disable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		rc |= regulator_disable(soc_info->rgltr[i]);
+		if (rc) {
+			pr_err("Regulator disble %s failed\n",
+				soc_info->rgltr_name[i]);
+			continue;
+		}
+	}
+
+	if (disble_irq)
+		rc |= cam_soc_util_irq_disable(soc_info);
+
+	return rc;
+}
+
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size)
+{
+	void __iomem     *base_addr = NULL;
+
+	CDBG("base_idx %u size=%d\n", base_index, size);
+
+	if (!soc_info || base_index >= soc_info->num_reg_map ||
+		size <= 0 || (offset + size) >=
+		CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index))
+		return -EINVAL;
+
+	base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index);
+
+	/*
+	 * All error checking already done above,
+	 * hence ignoring the return value below.
+	 */
+	cam_io_dump(base_addr, offset, size);
+
+	return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
new file mode 100644
index 0000000..0baa9e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -0,0 +1,386 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAM_SOC_UTIL_H_
+#define _CAM_SOC_UTIL_H_
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "cam_io_util.h"
+
+#define NO_SET_RATE  -1
+#define INIT_RATE    -2
+
+/* maximum number of device block */
+#define CAM_SOC_MAX_BLOCK           4
+
+/* maximum number of device base */
+#define CAM_SOC_MAX_BASE            CAM_SOC_MAX_BLOCK
+
+/* maximum number of device regulator */
+#define CAM_SOC_MAX_REGULATOR       4
+
+/* maximum number of device clock */
+#define CAM_SOC_MAX_CLK             32
+
+/**
+ * struct cam_soc_reg_map:   Information about the mapped register space
+ *
+ * @mem_base:               Starting location of MAPPED register space
+ * @mem_cam_base:           Starting offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @size:                   Size of register space
+ **/
+struct cam_soc_reg_map {
+	void __iomem                   *mem_base;
+	uint32_t                        mem_cam_base;
+	resource_size_t                 size;
+};
+
+/**
+ * struct cam_hw_soc_info:  Soc information pertaining to specific instance of
+ *                          Camera hardware driver module
+ *
+ * @pdev:                   Platform device pointer
+ * @hw_version;             Camera device version
+ * @index:                  Instance id for the camera device
+ * @irq_name:               Name of the irq associated with the device
+ * @irq_line:               Irq resource
+ * @num_mem_block:          Number of entry in the "reg-names"
+ * @mem_block_name:         Array of the reg block name
+ * @mem_block_cam_base:     Array of offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @mem_block:              Associated resource structs
+ * @reg_map:                Array of Mapped register info for the "reg-names"
+ * @num_reg_map:            Number of mapped register space associated
+ *                          with mem_block. num_reg_map = num_mem_block in
+ *                          most cases
+ * @num_rgltr:              Number of regulators
+ * @rgltr_name:             Array of regulator names
+ * @rgltr:                  Array of associated regulator resources
+ * @num_clk:                Number of clocks
+ * @clk_name:               Array of clock names
+ * @clk:                    Array of associated clock resources
+ * @clk_rate:               Array of default clock rates
+ * @src_clk_idx:            Source clock index that is rate-controllable
+ * @soc_private;            Soc private data
+ *
+ */
+struct cam_hw_soc_info {
+	struct platform_device         *pdev;
+	uint32_t                        hw_version;
+	uint32_t                        index;
+
+	const char                     *irq_name;
+	struct resource                *irq_line;
+
+	uint32_t                        num_mem_block;
+	const char                     *mem_block_name[CAM_SOC_MAX_BLOCK];
+	uint32_t                        mem_block_cam_base[CAM_SOC_MAX_BLOCK];
+	struct resource                *mem_block[CAM_SOC_MAX_BLOCK];
+	struct cam_soc_reg_map          reg_map[CAM_SOC_MAX_BASE];
+	uint32_t                        num_reg_map;
+
+	uint32_t                        num_rgltr;
+	const char                     *rgltr_name[CAM_SOC_MAX_REGULATOR];
+	struct regulator               *rgltr[CAM_SOC_MAX_REGULATOR];
+
+	uint32_t                        num_clk;
+	const char                     *clk_name[CAM_SOC_MAX_CLK];
+	struct clk                     *clk[CAM_SOC_MAX_CLK];
+	int32_t                         clk_rate[CAM_SOC_MAX_CLK];
+	int32_t                         src_clk_idx;
+
+	void                           *soc_private;
+};
+
+/*
+ * CAM_SOC_GET_REG_MAP_START
+ *
+ * @brief:              This MACRO will get the mapped starting address
+ *                      where the register space can be accessed
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a pointer to the mapped register memory
+ */
+#define CAM_SOC_GET_REG_MAP_START(__soc_info, __base_index)          \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		NULL : __soc_info->reg_map[__base_index].mem_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_CAM_BASE
+ *
+ * @brief:              This MACRO will get the cam_base of the
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns an int32_t value.
+ *                        Failure: -1
+ *                        Success: Starting offset of register space compared
+ *                                 to entire Camera Register Map
+ */
+#define CAM_SOC_GET_REG_MAP_CAM_BASE(__soc_info, __base_index)       \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		-1 : __soc_info->reg_map[__base_index].mem_cam_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_SIZE
+ *
+ * @brief:              This MACRO will get the size of the mapped
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a uint32_t value.
+ *                        Failure: 0
+ *                        Success: Non-zero size of mapped register space
+ */
+#define CAM_SOC_GET_REG_MAP_SIZE(__soc_info, __base_index)           \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		0 : __soc_info->reg_map[__base_index].size)
+
+
+/**
+ * cam_soc_util_get_dt_properties()
+ *
+ * @brief:              Parse the DT and populate the common properties that
+ *                      are part of the soc_info structure - register map,
+ *                      clocks, regulators, irq, etc.
+ *
+ * @soc_info:           Device soc struct to be populated
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info);
+
+
+/**
+ * cam_soc_util_request_platform_resource()
+ *
+ * @brief:              Request regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ * @handler:            Irq handler function pointer
+ * @irq_data:           Irq handler function CB data
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data);
+
+/**
+ * cam_soc_util_release_platform_resource()
+ *
+ * @brief:              Release regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_enable_platform_resource()
+ *
+ * @brief:              Enable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @enable_clocks:      Boolean flag:
+ *                          TRUE: Enable all clocks in soc_info Now.
+ *                          False: Don't enable clocks Now. Driver will
+ *                                 enable independently.
+ @enable_irq:           Boolean flag:
+ *                          TRUE: Enable IRQ in soc_info Now.
+ *                          False: Don't enable IRQ Now. Driver will
+ *                                 enable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, bool enable_irq);
+
+/**
+ * cam_soc_util_disable_platform_resource()
+ *
+ * @brief:              Disable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @disable_irq:        Boolean flag:
+ *                          TRUE: Disable IRQ in soc_info Now.
+ *                          False: Don't disble IRQ Now. Driver will
+ *                                 disable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq);
+
+/**
+ * cam_soc_util_clk_enable()
+ *
+ * @brief:              Enable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned ON
+ * @clk_name:           Clocks name associated with clk
+ * @clk_rate:           Clocks rate associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate);
+
+/**
+ * cam_soc_util_clk_disable()
+ *
+ * @brief:              Disable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned OFF
+ * @clk_name:           Clocks name associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name);
+
+/**
+ * cam_soc_util_irq_enable()
+ *
+ * @brief:              Enable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_irq_disable()
+ *
+ * @brief:              Disable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_w()
+ *
+ * @brief:              Camera SOC util for register write
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_w_mb()
+ *
+ * @brief:              Camera SOC util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w_mb(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r()
+ *
+ * @brief:              Camera SOC util for register read
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r_mb()
+ *
+ * @brief:              Camera SOC util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r_mb(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_reg_dump()
+ *
+ * @brief:              Camera SOC util for dumping a range of register
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Start register offset for the dump
+ * @size:               Size specifying the range for dump
+ *
+ * @return:             Success or Failure
+ */
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size);
+
+#endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index ef3846c..c7abd9d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -96,12 +96,16 @@
  * @SDE_CAPS_R1_WB: MDSS V1.x WB block
  * @SDE_CAPS_R3_WB: MDSS V3.x WB block
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
+ * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
+ * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
 	SDE_CAPS_R3_WB,
 	SDE_CAPS_R3_1P5_DOWNSCALE,
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
+	SDE_CAPS_MIN_BUS_VOTE,
+	SDE_CAPS_SBUF_1,
 	SDE_CAPS_MAX,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 10858b3..643e8a0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -67,6 +67,8 @@
 #define ROT_OVERHEAD_NUMERATOR		27
 #define ROT_OVERHEAD_DENOMINATOR	10000
 
+/* default minimum bandwidth vote */
+#define ROT_ENABLE_BW_VOTE		64000
 /*
  * Max rotator hw blocks possible. Used for upper array limits instead of
  * alloc and freeing small array
@@ -96,6 +98,9 @@
 	.active_only = 1,
 };
 
+/* forward prototype */
+static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
+
 static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
 		u64 quota)
 {
@@ -292,6 +297,7 @@
 
 static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	int ret;
 
 	if (WARN_ON(mgr->regulator_enable == on)) {
@@ -302,6 +308,11 @@
 	SDEROT_EVTLOG(on);
 	SDEROT_DBG("%s: rotator regulators\n", on ? "Enable" : "Disable");
 
+	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && on) {
+		mgr->minimum_bw_vote = mgr->enable_bw_vote;
+		sde_rotator_update_perf(mgr);
+	}
+
 	if (mgr->ops_hw_pre_pmevent)
 		mgr->ops_hw_pre_pmevent(mgr, on);
 
@@ -316,6 +327,11 @@
 	if (mgr->ops_hw_post_pmevent)
 		mgr->ops_hw_post_pmevent(mgr, on);
 
+	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && !on) {
+		mgr->minimum_bw_vote = 0;
+		sde_rotator_update_perf(mgr);
+	}
+
 	mgr->regulator_enable = on;
 }
 
@@ -532,6 +548,10 @@
 	if (!input)
 		dir = DMA_FROM_DEVICE;
 
+	data->sbuf = buffer->sbuf;
+	data->scid = buffer->scid;
+	data->writeback = buffer->writeback;
+
 	memset(planes, 0, sizeof(planes));
 
 	for (i = 0; i < buffer->plane_count; i++) {
@@ -539,6 +559,8 @@
 		planes[i].offset = buffer->planes[i].offset;
 		planes[i].buffer = buffer->planes[i].buffer;
 		planes[i].handle = buffer->planes[i].handle;
+		planes[i].addr = buffer->planes[i].addr;
+		planes[i].len = buffer->planes[i].len;
 	}
 
 	ret =  sde_mdp_data_get_and_validate_size(data, planes,
@@ -760,6 +782,9 @@
 	if (entry->item.flags & SDE_ROTATION_EXT_DMA_BUF)
 		flag |= SDE_ROT_EXT_DMA_BUF;
 
+	if (entry->item.flags & SDE_ROTATION_EXT_IOVA)
+		flag |= SDE_ROT_EXT_IOVA;
+
 	if (entry->item.flags & SDE_ROTATION_SECURE_CAMERA)
 		flag |= SDE_SECURE_CAMERA_SESSION;
 
@@ -800,6 +825,10 @@
 			entry->perf->wrot_limit != mgr->wrot_limit))
 		return true;
 
+	/* sbuf mode is exclusive and may impact queued entries */
+	if (!mgr->sbuf_ctx && entry->perf && entry->perf->config.output.sbuf)
+		return true;
+
 	return false;
 }
 
@@ -855,6 +884,9 @@
 				entry->item.session_id,
 				entry->item.sequence_id);
 		return sde_rotator_is_hw_idle(mgr, hw);
+	} else if (mgr->sbuf_ctx && mgr->sbuf_ctx != entry->private) {
+		SDEROT_DBG("wait until sbuf mode is off\n");
+		return false;
 	} else {
 		return (atomic_read(&hw->num_active) < hw->max_active);
 	}
@@ -907,6 +939,14 @@
 			entry->item.session_id, entry->item.sequence_id);
 	mgr->rdot_limit = entry->perf->rdot_limit;
 	mgr->wrot_limit = entry->perf->wrot_limit;
+
+	if (!mgr->sbuf_ctx && entry->perf->config.output.sbuf) {
+		SDEROT_DBG("acquire sbuf s:%d.%d\n", entry->item.session_id,
+				entry->item.sequence_id);
+		SDEROT_EVTLOG(entry->item.session_id, entry->item.sequence_id);
+		mgr->sbuf_ctx = entry->private;
+	}
+
 	return hw;
 }
 
@@ -1233,8 +1273,12 @@
 				(mgr->overhead.denom - max_fps *
 				mgr->overhead.numer));
 
+	/* use client provided clock if specified */
+	if (config->flags & SDE_ROTATION_EXT_PERF)
+		perf->clk_rate = config->clk_rate;
+
 	/*
-	 * check for Override clock calcualtion
+	 * check for Override clock calculation
 	 */
 	if (rot_dev->min_rot_clk > perf->clk_rate)
 		perf->clk_rate = rot_dev->min_rot_clk;
@@ -1258,6 +1302,10 @@
 	if (rot_dev->min_bw > perf->bw)
 		perf->bw = rot_dev->min_bw;
 
+	/* use client provided bandwidth if specified */
+	if (config->flags & SDE_ROTATION_EXT_PERF)
+		perf->bw = config->data_bw;
+
 	perf->rdot_limit = sde_mdp_get_ot_limit(
 			config->input.width, config->input.height,
 			config->input.format, config->frame_rate, true);
@@ -1291,6 +1339,7 @@
 	}
 
 	total_bw += mgr->pending_close_bw_vote;
+	total_bw = max_t(u64, total_bw, mgr->minimum_bw_vote);
 	sde_rotator_enable_reg_bus(mgr, total_bw);
 	ATRACE_INT("bus_quota", total_bw);
 	sde_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
@@ -1560,7 +1609,11 @@
 	if ((in_fmt->is_yuv != out_fmt->is_yuv) ||
 		(in_fmt->pixel_mode != out_fmt->pixel_mode) ||
 		(in_fmt->unpack_tight != out_fmt->unpack_tight)) {
-		SDEROT_ERR("Rotator does not support CSC\n");
+		SDEROT_ERR(
+			"Rotator does not support CSC yuv:%d/%d pm:%d/%d ut:%d/%d\n",
+			in_fmt->is_yuv, out_fmt->is_yuv,
+			in_fmt->pixel_mode, out_fmt->pixel_mode,
+			in_fmt->unpack_tight, out_fmt->unpack_tight);
 		goto verify_error;
 	}
 
@@ -1887,8 +1940,9 @@
 
 		INIT_WORK(&entry->commit_work, sde_rotator_commit_handler);
 		INIT_WORK(&entry->done_work, sde_rotator_done_handler);
-		SDEROT_DBG("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
-			"dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
+		SDEROT_DBG(
+			"Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%x dst{%u,%u,%u,%u}f=%x session_id=%u\n",
+			item->wb_idx,
 			item->src_rect.x, item->src_rect.y,
 			item->src_rect.w, item->src_rect.h, item->input.format,
 			item->dst_rect.x, item->dst_rect.y,
@@ -1967,8 +2021,7 @@
 	struct sde_rot_entry_container *req, *req_next;
 
 	list_for_each_entry_safe(req, req_next, &private->req_list, list) {
-		if ((atomic_read(&req->pending_count) == 0) &&
-				(!req->retire_work && !req->retireq)) {
+		if ((atomic_read(&req->pending_count) == 0) && req->finished) {
 			list_del_init(&req->list);
 			devm_kfree(&mgr->pdev->dev, req);
 		}
@@ -2029,6 +2082,34 @@
 	return ret;
 }
 
+/*
+ * sde_rotator_commit_request - commit the request to hardware
+ * @mgr: pointer to rotator manager
+ * @private: pointer to per file context
+ * @req: pointer to rotation request
+ *
+ * This differs from sde_rotator_queue_request in that this
+ * function will wait until request is committed to hardware.
+ */
+void sde_rotator_commit_request(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *ctx,
+	struct sde_rot_entry_container *req)
+{
+	int i;
+
+	if (!mgr || !ctx || !req || !req->entries) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	sde_rotator_queue_request(mgr, ctx, req);
+
+	sde_rot_mgr_unlock(mgr);
+	for (i = 0; i < req->count; i++)
+		flush_work(&req->entries[i].commit_work);
+	sde_rot_mgr_lock(mgr);
+}
+
 static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private, u32 session_id)
 {
@@ -2139,7 +2220,13 @@
 	sde_rotator_update_clk(mgr);
 	sde_rotator_resource_ctrl(mgr, false);
 done:
-	SDEROT_DBG("Closed session id:%u", id);
+	if (mgr->sbuf_ctx == private) {
+		SDEROT_DBG("release sbuf session id:%u\n", id);
+		SDEROT_EVTLOG(id);
+		mgr->sbuf_ctx = NULL;
+	}
+
+	SDEROT_DBG("Closed session id:%u\n", id);
 	return 0;
 }
 
@@ -2183,6 +2270,11 @@
 		goto done;
 	}
 
+	if (config->output.sbuf && mgr->sbuf_ctx != private && mgr->sbuf_ctx) {
+		SDEROT_ERR("too many sbuf sessions\n");
+		goto done;
+	}
+
 	SDEROT_DBG(
 		"reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u fps:%d clk:%lu, bw:%llu\n",
 		config->session_id, config->input.width, config->input.height,
@@ -2230,14 +2322,25 @@
 	return req;
 }
 
+void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private,
+	struct sde_rot_entry_container *req)
+{
+	if (!mgr || !private || !req) {
+		SDEROT_ERR("null parameters\n");
+		return;
+	}
+
+	req->finished = true;
+}
+
 int sde_rotator_handle_request_common(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
-	struct sde_rot_entry_container *req,
-	struct sde_rotation_item *items)
+	struct sde_rot_entry_container *req)
 {
 	int ret;
 
-	if (!mgr || !private || !req || !items) {
+	if (!mgr || !private || !req) {
 		SDEROT_ERR("null parameters\n");
 		return -EINVAL;
 	}
@@ -2708,6 +2811,7 @@
 	mgr->pdev = pdev;
 	mgr->device = &pdev->dev;
 	mgr->pending_close_bw_vote = 0;
+	mgr->enable_bw_vote = ROT_ENABLE_BW_VOTE;
 	mgr->hwacquire_timeout = ROT_HW_ACQUIRE_TIMEOUT_IN_MS;
 	mgr->queue_count = 1;
 	mgr->pixel_per_clk.numer = ROT_PIXEL_PER_CLK_NUMERATOR;
@@ -2927,6 +3031,7 @@
 	sde_rot_mgr_lock(mgr);
 	atomic_inc(&mgr->device_suspended);
 	sde_rotator_suspend_cancel_rot_work(mgr);
+	mgr->minimum_bw_vote = 0;
 	sde_rotator_update_perf(mgr);
 	ATRACE_END("pm_active");
 	SDEROT_DBG("end pm active %d\n", atomic_read(&mgr->device_suspended));
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 16eaae1..0818917 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -62,22 +62,49 @@
 /* secure camera operation*/
 #define SDE_ROTATION_SECURE_CAMERA	0x40000
 
+/* use client mapped i/o virtual address */
+#define SDE_ROTATION_EXT_IOVA		0x80000
+
+/* use client provided clock/bandwidth parameters */
+#define SDE_ROTATION_EXT_PERF		0x100000
+
 /**********************************************************************
  * configuration structures
  **********************************************************************/
 
+/*
+ * struct sde_rotation_buf_info - input/output buffer configuration
+ * @width: width of buffer region to be processed
+ * @height: height of buffer region to be processed
+ * @format: pixel format of buffer
+ * @comp_ratio: compression ratio for the session
+ * @sbuf: true if buffer is streaming buffer
+ */
 struct sde_rotation_buf_info {
 	uint32_t width;
 	uint32_t height;
 	uint32_t format;
 	struct sde_mult_factor comp_ratio;
+	bool sbuf;
 };
 
+/*
+ * struct sde_rotation_config - rotation configuration for given session
+ * @session_id: identifier of the given session
+ * @input: input buffer information
+ * @output: output buffer information
+ * @frame_rate: session frame rate in fps
+ * @clk_rate: requested rotator clock rate if SDE_ROTATION_EXT_PERF is set
+ * @data_bw: requested data bus bandwidth if SDE_ROTATION_EXT_PERF is set
+ * @flags: configuration flags, e.g. rotation angle, flip, etc...
+ */
 struct sde_rotation_config {
 	uint32_t	session_id;
 	struct sde_rotation_buf_info	input;
 	struct sde_rotation_buf_info	output;
 	uint32_t	frame_rate;
+	uint64_t	clk_rate;
+	uint64_t	data_bw;
 	uint32_t	flags;
 };
 
@@ -106,10 +133,22 @@
 	SDE_ROTATOR_CLK_MAX
 };
 
+enum sde_rotator_trigger {
+	SDE_ROTATOR_TRIGGER_IMMEDIATE,
+	SDE_ROTATOR_TRIGGER_VIDEO,
+	SDE_ROTATOR_TRIGGER_COMMAND,
+};
+
 struct sde_rotation_item {
 	/* rotation request flag */
 	uint32_t	flags;
 
+	/* rotation trigger mode */
+	uint32_t	trigger;
+
+	/* prefill bandwidth in Bps */
+	uint64_t	prefill_bw;
+
 	/* Source crop rectangle */
 	struct sde_rect	src_rect;
 
@@ -191,6 +230,18 @@
 	struct sde_rot_hw_resource *hw;
 };
 
+/*
+ * struct sde_rot_entry_container - rotation request
+ * @list: list of active requests managed by rotator manager
+ * @flags: reserved
+ * @count: size of rotation entries
+ * @pending_count: count of entries pending completion
+ * @failed_count: count of entries failed completion
+ * @finished: true if client is finished with the request
+ * @retireq: workqueue to post completion notification
+ * @retire_work: work for completion notification
+ * @entries: array of rotation entries
+ */
 struct sde_rot_entry_container {
 	struct list_head list;
 	u32 flags;
@@ -199,12 +250,33 @@
 	atomic_t failed_count;
 	struct workqueue_struct *retireq;
 	struct work_struct *retire_work;
+	bool finished;
 	struct sde_rot_entry *entries;
 };
 
 struct sde_rot_mgr;
 struct sde_rot_file_private;
 
+/*
+ * struct sde_rot_entry - rotation entry
+ * @item: rotation item
+ * @commit_work: work descriptor for commit handler
+ * @done_work: work descriptor for done handler
+ * @commitq: pointer to commit handler rotator queue
+ * @fenceq: pointer to fence signaling rotator queue
+ * @doneq: pointer to done handler rotator queue
+ * @request: pointer to containing request
+ * @src_buf: descriptor of source buffer
+ * @dst_buf: descriptor of destination buffer
+ * @input_fence: pointer to input fence for when input content is available
+ * @output_fence: pointer to output fence for when output content is available
+ * @output_signaled: true if output fence of this entry has been signaled
+ * @dnsc_factor_w: calculated width downscale factor for this entry
+ * @dnsc_factor_w: calculated height downscale factor for this entry
+ * @perf: pointer to performance configuration associated with this entry
+ * @work_assigned: true if this item is assigned to h/w queue/unit
+ * @private: pointer to controlling session context
+ */
 struct sde_rot_entry {
 	struct sde_rotation_item item;
 	struct work_struct commit_work;
@@ -230,6 +302,18 @@
 	struct sde_rot_file_private *private;
 };
 
+/*
+ * struct sde_rot_perf - rotator session performance configuration
+ * @list: list of performance configuration under one session
+ * @config: current rotation configuration
+ * @clk_rate: current clock rate in Hz
+ * @bw: current bandwidth in byte per second
+ * @work_dis_lock: serialization lock for updating work distribution (not used)
+ * @work_distribution: work distribution among multiple hardware queue/unit
+ * @last_wb_idx: last queue/unit index, used to account for pre-distributed work
+ * @rdot_limit: read OT limit of this session
+ * @wrot_limit: write OT limit of this session
+ */
 struct sde_rot_perf {
 	struct list_head list;
 	struct sde_rotation_config config;
@@ -242,6 +326,14 @@
 	u32 wrot_limit;
 };
 
+/*
+ * struct sde_rot_file_private - rotator manager per session context
+ * @list: list of all session context
+ * @req_list: list of rotation request for this session
+ * @perf_list: list of performance configuration for this session (only one)
+ * @mgr: pointer to the controlling rotator manager
+ * @fenceq: pointer to rotator queue to signal when entry is done
+ */
 struct sde_rot_file_private {
 	struct list_head list;
 	struct list_head req_list;
@@ -250,6 +342,13 @@
 	struct sde_rot_queue *fenceq;
 };
 
+/*
+ * struct sde_rot_bus_data_type - rotator bus scaling configuration
+ * @bus_cale_pdata: pointer to bus scaling configuration table
+ * @bus_hdl: msm bus scaling handle
+ * @curr_bw_uc_idx; current usecase index into configuration table
+ * @curr_quota_val: current bandwidth request in byte per second
+ */
 struct sde_rot_bus_data_type {
 	struct msm_bus_scale_pdata *bus_scale_pdata;
 	u32 bus_hdl;
@@ -257,6 +356,37 @@
 	u64 curr_quota_val;
 };
 
+/*
+ * struct sde_rot_mgr - core rotator manager
+ * @lock: serialization lock to rotator manager functions
+ * @device_suspended: 0 if device is not suspended; non-zero suspended
+ * @pdev: pointer to controlling platform device
+ * @device: pointer to controlling device
+ * @queue_count: number of hardware queue/unit available
+ * @commitq: array of rotator commit queue corresponding to hardware queue
+ * @doneq: array of rotator done queue corresponding to hardware queue
+ * @file_list: list of all sessions managed by rotator manager
+ * @pending_close_bw_vote: bandwidth of closed sessions with pending work
+ * @minimum_bw_vote: minimum bandwidth required for current use case
+ * @enable_bw_vote: minimum bandwidth required for power enable
+ * @data_bus: data bus configuration state
+ * @reg_bus: register bus configuration state
+ * @module_power: power/clock configuration state
+ * @regulator_enable: true if foot switch is enabled; false otherwise
+ * @res_ref_cnt: reference count of how many times resource is requested
+ * @rot_enable_clk_cnt: reference count of how many times clock is requested
+ * @rot_clk: array of rotator and periphery clocks
+ * @num_rot_clk: size of the rotator clock array
+ * @rdot_limit: current read OT limit
+ * @wrot_limit: current write OT limit
+ * @hwacquire_timeout: maximum wait time for hardware availability in msec
+ * @pixel_per_clk: rotator hardware performance in pixel for clock
+ * @fudge_factor: fudge factor for clock calculation
+ * @overhead: software overhead for offline rotation in msec
+ * @sbuf_ctx: pointer to sbuf session context
+ * @ops_xxx: function pointers of rotator HAL layer
+ * @hw_data: private handle of rotator HAL layer
+ */
 struct sde_rot_mgr {
 	struct mutex lock;
 	atomic_t device_suspended;
@@ -278,6 +408,8 @@
 	struct list_head file_list;
 
 	u64 pending_close_bw_vote;
+	u64 minimum_bw_vote;
+	u64 enable_bw_vote;
 	struct sde_rot_bus_data_type data_bus;
 	struct sde_rot_bus_data_type reg_bus;
 
@@ -297,6 +429,8 @@
 	struct sde_mult_factor fudge_factor;
 	struct sde_mult_factor overhead;
 
+	struct sde_rot_file_private *sbuf_ctx;
+
 	int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
 			struct sde_rot_entry *entry);
 	int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
@@ -323,6 +457,8 @@
 			bool input);
 	int (*ops_hw_is_valid_pixfmt)(struct sde_rot_mgr *mgr, u32 pixfmt,
 			bool input);
+	int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
+			int len);
 
 	void *hw_data;
 };
@@ -345,6 +481,15 @@
 	return 0;
 }
 
+static inline int sde_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
+		char *caps, int len)
+{
+	if (mgr && mgr->ops_hw_get_downscale_caps)
+		return mgr->ops_hw_get_downscale_caps(mgr, caps, len);
+
+	return 0;
+}
+
 static inline int __compare_session_item_rect(
 	struct sde_rotation_buf_info *s_rect,
 	struct sde_rect *i_rect, uint32_t i_fmt, bool src)
@@ -380,61 +525,179 @@
 	return 0;
 }
 
+/*
+ * sde_rotator_core_init - initialize rotator manager for the given platform
+ *	device
+ * @pmgr: Pointer to pointer of the newly initialized rotator manager
+ * @pdev: Pointer to platform device
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
 		struct platform_device *pdev);
 
+/*
+ * sde_rotator_core_destroy - destroy given rotator manager
+ * @mgr: Pointer to rotator manager
+ * return: none
+ */
 void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
 
+/*
+ * sde_rotator_session_open - open a new rotator per file session
+ * @mgr: Pointer to rotator manager
+ * @pprivate: Pointer to pointer of the newly initialized per file session
+ * @session_id: identifier of the newly created session
+ * @queue: Pointer to fence queue of the new session
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_session_open(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private **pprivate, int session_id,
 	struct sde_rot_queue *queue);
 
+/*
+ * sde_rotator_session_close - close the given rotator per file session
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to per file session
+ * @session_id: identifier of the session
+ * return: none
+ */
 void sde_rotator_session_close(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private, int session_id);
 
+/*
+ * sde_rotator_session_config - configure the given rotator per file session
+ * @mgr: Pointer to rotator manager
+ * @private: Pointer to  per file session
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_session_config(struct sde_rot_mgr *mgr,
 	struct sde_rot_file_private *private,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_req_init - allocate a new request and initialzie with given
+ *	array of rotation items
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @items: Pointer to array of rotation item
+ * @count: size of rotation item array
+ * @flags: rotation request flags
+ * return: Pointer to new rotation request if success; ERR_PTR otherwise
+ */
 struct sde_rot_entry_container *sde_rotator_req_init(
 	struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *private,
 	struct sde_rotation_item *items,
 	u32 count, u32 flags);
 
+/*
+ * sde_rotator_req_finish - notify manager that client is finished with the
+ *	given request and manager can release the request as required
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: none
+ */
+void sde_rotator_req_finish(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *private,
+	struct sde_rot_entry_container *req);
+
+/*
+ * sde_rotator_handle_request_common - add the given request to rotator
+ *	manager and clean up completed requests
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_handle_request_common(struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *ctx,
-	struct sde_rot_entry_container *req,
-	struct sde_rotation_item *items);
+	struct sde_rot_entry_container *req);
 
+/*
+ * sde_rotator_queue_request - queue/schedule the given request for h/w commit
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
 void sde_rotator_queue_request(struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *ctx,
 	struct sde_rot_entry_container *req);
 
-void sde_rotator_remove_request(struct sde_rot_mgr *mgr,
-	struct sde_rot_file_private *private,
+/*
+ * sde_rotator_commit_request - queue/schedule the given request and wait
+ *	until h/w commit
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
+void sde_rotator_commit_request(struct sde_rot_mgr *mgr,
+	struct sde_rot_file_private *ctx,
 	struct sde_rot_entry_container *req);
 
+/*
+ * sde_rotator_verify_config_all - verify given rotation configuration
+ * @rot_dev: Pointer to rotator device
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_verify_config_all(struct sde_rot_mgr *rot_dev,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_verify_config_input - verify rotation input configuration
+ * @rot_dev: Pointer to rotator device
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_verify_config_input(struct sde_rot_mgr *rot_dev,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_verify_config_output - verify rotation output configuration
+ * @rot_dev: Pointer to rotator device
+ * @config: Pointer to rotator configuration
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_verify_config_output(struct sde_rot_mgr *rot_dev,
 	struct sde_rotation_config *config);
 
+/*
+ * sde_rotator_validate_request - validates given rotation request with
+ *	previous rotator configuration
+ * @rot_dev: Pointer to rotator device
+ * @private: Pointer to rotator manager per file context
+ * @req: Pointer to rotation request
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_validate_request(struct sde_rot_mgr *rot_dev,
 	struct sde_rot_file_private *ctx,
 	struct sde_rot_entry_container *req);
 
+/*
+ * sde_rotator_clk_ctrl - enable/disable rotator clock with reference counting
+ * @mgr: Pointer to rotator manager
+ * @enable: true to enable clock; false to disable clock
+ * return: 0 if success; error code otherwise
+ */
 int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable);
 
+/*
+ * sde_rot_mgr_lock - serialization lock prior to rotator manager calls
+ * @mgr: Pointer to rotator manager
+ */
 static inline void sde_rot_mgr_lock(struct sde_rot_mgr *mgr)
 {
 	mutex_lock(&mgr->lock);
 }
 
+/*
+ * sde_rot_mgr_lock - serialization unlock after rotator manager calls
+ * @mgr: Pointer to rotator manager
+ */
 static inline void sde_rot_mgr_unlock(struct sde_rot_mgr *mgr)
 {
 	mutex_unlock(&mgr->lock);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index a41c450..86e04d6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -868,6 +868,12 @@
 		return -EINVAL;
 	}
 
+	if (!debugfs_create_u64("enable_bw_vote", 0644,
+			debugfs_root, &mgr->enable_bw_vote)) {
+		SDEROT_WARN("failed to create enable_bw_vote\n");
+		return -EINVAL;
+	}
+
 	if (mgr->ops_hw_create_debugfs) {
 		ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
 		if (ret)
@@ -1283,6 +1289,13 @@
 		return NULL;
 	}
 
+	if (!debugfs_create_u32("disable_syscache", 0644,
+			debugfs_root, &rot_dev->disable_syscache)) {
+		SDEROT_ERR("fail create disable_syscache\n");
+		debugfs_remove_recursive(debugfs_root);
+		return NULL;
+	}
+
 	if (!debugfs_create_u32("streamoff_timeout", 0644,
 			debugfs_root, &rot_dev->streamoff_timeout)) {
 		SDEROT_ERR("fail create streamoff_timeout\n");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 6db0923..47f4cb0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -27,6 +27,7 @@
 #include <media/videobuf2-v4l2.h>
 #include <media/v4l2-mem2mem.h>
 
+#include "sde_rotator_inline.h"
 #include "sde_rotator_base.h"
 #include "sde_rotator_core.h"
 #include "sde_rotator_dev.h"
@@ -413,16 +414,15 @@
 	SDEDEV_DBG(rot_dev->dev, "start streaming s:%d t:%d\n",
 			ctx->session_id, q->type);
 
-	if (!IS_ERR_OR_NULL(ctx->request) ||
-				atomic_read(&ctx->command_pending))
+	if (!list_empty(&ctx->pending_list)) {
 		SDEDEV_ERR(rot_dev->dev,
 				"command pending error s:%d t:%d p:%d\n",
 				ctx->session_id, q->type,
-				atomic_read(&ctx->command_pending));
+				!list_empty(&ctx->pending_list));
+		return -EINVAL;
+	}
 
-	ctx->request = NULL;
 	ctx->abort_pending = 0;
-	atomic_set(&ctx->command_pending, 0);
 
 	return 0;
 }
@@ -443,18 +443,18 @@
 
 	SDEDEV_DBG(rot_dev->dev, "stop streaming s:%d t:%d p:%d\n",
 			ctx->session_id, q->type,
-			atomic_read(&ctx->command_pending));
+			!list_empty(&ctx->pending_list));
 	ctx->abort_pending = 1;
 	mutex_unlock(q->lock);
 	ret = wait_event_timeout(ctx->wait_queue,
-			(atomic_read(&ctx->command_pending) == 0),
+			list_empty(&ctx->pending_list),
 			msecs_to_jiffies(rot_dev->streamoff_timeout));
 	mutex_lock(q->lock);
 	if (!ret)
 		SDEDEV_ERR(rot_dev->dev,
 				"timeout to stream off s:%d t:%d p:%d\n",
 				ctx->session_id, q->type,
-				atomic_read(&ctx->command_pending));
+				!list_empty(&ctx->pending_list));
 
 	sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
 
@@ -737,9 +737,7 @@
 			ctx->format_cap.fmt.pix.bytesperline,
 			ctx->format_cap.fmt.pix.sizeimage);
 	SPRINT("abort_pending=%d\n", ctx->abort_pending);
-	SPRINT("command_pending=%d\n", atomic_read(&ctx->command_pending));
-	SPRINT("submit_work=%d\n", work_busy(&ctx->submit_work));
-	SPRINT("retire_work=%d\n", work_busy(&ctx->retire_work));
+	SPRINT("command_pending=%d\n", !list_empty(&ctx->pending_list));
 	SPRINT("sequence=%u\n",
 		sde_rotator_get_timeline_commit_ts(ctx->work_queue.timeline));
 	SPRINT("timestamp=%u\n",
@@ -848,24 +846,26 @@
 }
 
 /*
- * sde_rotator_open - Rotator device open method.
- * @file: Pointer to file struct.
+ * sde_rotator_ctx_open - Rotator device open method.
+ * @rot_dev: Pointer to rotator device structure
+ * @file: Pointer to file struct (optional)
+ * return: Pointer rotator context if success; ptr error code, otherwise.
  */
-static int sde_rotator_open(struct file *file)
+struct sde_rotator_ctx *sde_rotator_ctx_open(
+		struct sde_rotator_device *rot_dev, struct file *file)
 {
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
-	struct video_device *video = video_devdata(file);
+	struct video_device *video = file ? video_devdata(file) : NULL;
 	struct sde_rotator_ctx *ctx;
 	struct v4l2_ctrl_handler *ctrl_handler;
 	char name[32];
-	int ret;
+	int i, ret;
 
 	if (atomic_read(&rot_dev->mgr->device_suspended))
-		return -EPERM;
+		return ERR_PTR(-EPERM);
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	if (mutex_lock_interruptible(&rot_dev->lock)) {
 		ret = -ERESTARTSYS;
@@ -873,6 +873,7 @@
 	}
 
 	ctx->rot_dev = rot_dev;
+	ctx->file = file;
 
 	/* Set context defaults */
 	ctx->session_id = rot_dev->session_id++;
@@ -883,7 +884,6 @@
 	ctx->vflip = 0;
 	ctx->rotate = 0;
 	ctx->secure = 0;
-	atomic_set(&ctx->command_pending, 0);
 	ctx->abort_pending = 0;
 	ctx->format_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
 	ctx->format_cap.fmt.pix.pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2;
@@ -898,18 +898,33 @@
 	ctx->crop_out.width = 640;
 	ctx->crop_out.height = 480;
 	init_waitqueue_head(&ctx->wait_queue);
-	INIT_WORK(&ctx->submit_work, sde_rotator_submit_handler);
-	INIT_WORK(&ctx->retire_work, sde_rotator_retire_handler);
+	spin_lock_init(&ctx->list_lock);
+	INIT_LIST_HEAD(&ctx->pending_list);
+	INIT_LIST_HEAD(&ctx->retired_list);
 
-	v4l2_fh_init(&ctx->fh, video);
-	file->private_data = &ctx->fh;
-	v4l2_fh_add(&ctx->fh);
+	for (i = 0 ; i < ARRAY_SIZE(ctx->requests); i++) {
+		struct sde_rotator_request *request = &ctx->requests[i];
 
-	ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
-		ctx, sde_rotator_queue_init);
-	if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
-		ret = PTR_ERR(ctx->fh.m2m_ctx);
-		goto error_m2m_init;
+		INIT_WORK(&request->submit_work,
+				sde_rotator_submit_handler);
+		INIT_WORK(&request->retire_work,
+				sde_rotator_retire_handler);
+		request->ctx = ctx;
+		INIT_LIST_HEAD(&request->list);
+		list_add_tail(&request->list, &ctx->retired_list);
+	}
+
+	if (ctx->file) {
+		v4l2_fh_init(&ctx->fh, video);
+		file->private_data = &ctx->fh;
+		v4l2_fh_add(&ctx->fh);
+
+		ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
+			ctx, sde_rotator_queue_init);
+		if (IS_ERR_OR_NULL(ctx->fh.m2m_ctx)) {
+			ret = PTR_ERR(ctx->fh.m2m_ctx);
+			goto error_m2m_init;
+		}
 	}
 
 	ret = kobject_init_and_add(&ctx->kobj, &sde_rotator_fs_ktype,
@@ -954,33 +969,34 @@
 	sde_rot_mgr_unlock(rot_dev->mgr);
 
 	/* Create control */
-	ctrl_handler = &ctx->ctrl_handler;
-	v4l2_ctrl_handler_init(ctrl_handler, 4);
-	v4l2_ctrl_new_std(ctrl_handler,
+	if (ctx->file) {
+		ctrl_handler = &ctx->ctrl_handler;
+		v4l2_ctrl_handler_init(ctrl_handler, 4);
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
-	v4l2_ctrl_new_std(ctrl_handler,
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
-	v4l2_ctrl_new_std(ctrl_handler,
+		v4l2_ctrl_new_std(ctrl_handler,
 			&sde_rotator_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
-	v4l2_ctrl_new_custom(ctrl_handler,
+		v4l2_ctrl_new_custom(ctrl_handler,
 			&sde_rotator_ctrl_secure, NULL);
-	v4l2_ctrl_new_custom(ctrl_handler,
+		v4l2_ctrl_new_custom(ctrl_handler,
 			&sde_rotator_ctrl_secure_camera, NULL);
-	if (ctrl_handler->error) {
-		ret = ctrl_handler->error;
-		v4l2_ctrl_handler_free(ctrl_handler);
-		goto error_ctrl_handler;
+		if (ctrl_handler->error) {
+			ret = ctrl_handler->error;
+			v4l2_ctrl_handler_free(ctrl_handler);
+			goto error_ctrl_handler;
+		}
+		ctx->fh.ctrl_handler = ctrl_handler;
+		v4l2_ctrl_handler_setup(ctrl_handler);
 	}
-	ctx->fh.ctrl_handler = ctrl_handler;
-	v4l2_ctrl_handler_setup(ctrl_handler);
-
 	mutex_unlock(&rot_dev->lock);
 
 	SDEDEV_DBG(ctx->rot_dev->dev, "SDE v4l2 rotator open success\n");
 
 	ATRACE_BEGIN(ctx->kobj.name);
 
-	return 0;
+	return ctx;
 error_ctrl_handler:
 error_open_session:
 	sde_rot_mgr_unlock(rot_dev->mgr);
@@ -992,11 +1008,655 @@
 	kobject_put(&ctx->kobj);
 error_kobj_init:
 error_m2m_init:
-	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
+	if (ctx->file) {
+		v4l2_fh_del(&ctx->fh);
+		v4l2_fh_exit(&ctx->fh);
+	}
 	mutex_unlock(&rot_dev->lock);
 error_lock:
 	kfree(ctx);
+	return ERR_PTR(ret);
+}
+
+/*
+ * sde_rotator_ctx_release - Rotator device release method.
+ * @ctx: Pointer rotator context.
+ * @file: Pointer to file struct (optional)
+ * return: 0 if success; error code, otherwise
+ */
+static int sde_rotator_ctx_release(struct sde_rotator_ctx *ctx,
+		struct file *file)
+{
+	struct sde_rotator_device *rot_dev = ctx->rot_dev;
+	u32 session_id = ctx->session_id;
+	struct list_head *curr, *next;
+
+	ATRACE_END(ctx->kobj.name);
+
+	SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
+	mutex_lock(&rot_dev->lock);
+	if (ctx->file) {
+		v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+		SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
+		v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_OUTPUT);
+		v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx,
+				V4L2_BUF_TYPE_VIDEO_CAPTURE);
+	}
+	mutex_unlock(&rot_dev->lock);
+	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n", session_id);
+	list_for_each_safe(curr, next, &ctx->pending_list) {
+		struct sde_rotator_request *request =
+			container_of(curr, struct sde_rotator_request, list);
+
+		SDEDEV_DBG(rot_dev->dev, "release submit work s:%d\n",
+				session_id);
+		cancel_work_sync(&request->submit_work);
+	}
+	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
+	sde_rot_mgr_lock(rot_dev->mgr);
+	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n", session_id);
+	list_for_each_safe(curr, next, &ctx->pending_list) {
+		struct sde_rotator_request *request =
+			container_of(curr, struct sde_rotator_request, list);
+
+		SDEDEV_DBG(rot_dev->dev, "release retire work s:%d\n",
+				session_id);
+		cancel_work_sync(&request->retire_work);
+	}
+	mutex_lock(&rot_dev->lock);
+	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
+	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
+	destroy_workqueue(ctx->work_queue.rot_work_queue);
+	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
+	kobject_put(&ctx->kobj);
+	if (ctx->file) {
+		v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+		v4l2_fh_del(&ctx->fh);
+		v4l2_fh_exit(&ctx->fh);
+	}
+	kfree(ctx->vbinfo_out);
+	kfree(ctx->vbinfo_cap);
+	kfree(ctx);
+	mutex_unlock(&rot_dev->lock);
+	SDEDEV_DBG(rot_dev->dev, "release complete s:%d\n", session_id);
+	return 0;
+}
+
+/*
+ * sde_rotator_update_retire_sequence - update retired sequence of the context
+ *	referenced in the request, and wake up any waiting for update event
+ * @request: Pointer to rotator request
+ */
+static void sde_rotator_update_retire_sequence(
+		struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rot_entry_container *req;
+
+	if (!request || !request->ctx) {
+		SDEROT_ERR("invalid parameters\n");
+		return;
+	}
+
+	ctx = request->ctx;
+	req = request->req;
+
+	if (req && req->entries && req->count)
+		ctx->retired_sequence_id =
+				req->entries[req->count - 1].item.sequence_id;
+
+	wake_up(&ctx->wait_queue);
+
+	SDEROT_DBG("update sequence s:%d.%d\n",
+				ctx->session_id, ctx->retired_sequence_id);
+}
+
+/*
+ * sde_rotator_retire_request - retire the given rotator request with
+ *	device mutex locked
+ * @request: Pointer to rotator request
+ */
+static void sde_rotator_retire_request(struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+
+	if (!request || !request->ctx) {
+		SDEROT_ERR("invalid parameters\n");
+		return;
+	}
+
+	ctx = request->ctx;
+
+	request->req = NULL;
+	request->committed = false;
+	spin_lock(&ctx->list_lock);
+	list_del_init(&request->list);
+	list_add_tail(&request->list, &ctx->retired_list);
+	spin_unlock(&ctx->list_lock);
+
+	SDEROT_DBG("retire request s:%d.%d\n",
+				ctx->session_id, ctx->retired_sequence_id);
+}
+
+/*
+ * sde_rotator_is_request_retired - Return true if given request already expired
+ * @request: Pointer to rotator request
+ */
+static bool sde_rotator_is_request_retired(struct sde_rotator_request *request)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rot_entry_container *req;
+	u32 sequence_id;
+	s32 retire_delta;
+
+	if (!request || !request->ctx || !request->req ||
+			!request->req->entries || !request->req->count)
+		return true;
+
+	ctx = request->ctx;
+	req = request->req;
+	sequence_id = req->entries[req->count - 1].item.sequence_id;
+
+	retire_delta = (s32) (ctx->retired_sequence_id - sequence_id);
+
+	SDEROT_DBG("sequence:%u/%u\n", sequence_id, ctx->retired_sequence_id);
+
+	return retire_delta >= 0;
+}
+
+/*
+ * sde_rotator_inline_open - open inline rotator session
+ * @pdev: Pointer to rotator platform device
+ * @video_mode: true if video mode is requested
+ * return: Pointer to new rotator session context
+ */
+void *sde_rotator_inline_open(struct platform_device *pdev)
+{
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_ctx *ctx;
+	int rc;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	ctx = sde_rotator_ctx_open(rot_dev, NULL);
+	if (IS_ERR_OR_NULL(ctx)) {
+		rc = PTR_ERR(ctx);
+		SDEROT_ERR("failed to open rotator context %d\n", rc);
+		goto rotator_open_error;
+	}
+
+	ctx->slice = llcc_slice_getd(rot_dev->dev, "rotator");
+	if (IS_ERR(ctx->slice)) {
+		rc = PTR_ERR(ctx->slice);
+		SDEROT_ERR("failed to get system cache %d\n", rc);
+		goto slice_getd_error;
+	}
+
+	if (!rot_dev->disable_syscache) {
+		rc = llcc_slice_activate(ctx->slice);
+		if (rc) {
+			SDEROT_ERR("failed to activate slice %d\n", rc);
+			goto activate_error;
+		}
+		SDEROT_DBG("scid %d size %zukb\n",
+				llcc_get_slice_id(ctx->slice),
+				llcc_get_slice_size(ctx->slice));
+	} else {
+		SDEROT_DBG("syscache bypassed\n");
+	}
+
+	SDEROT_EVTLOG(ctx->session_id, llcc_get_slice_id(ctx->slice),
+			llcc_get_slice_size(ctx->slice),
+			rot_dev->disable_syscache);
+
+	return ctx;
+
+activate_error:
+	llcc_slice_putd(ctx->slice);
+	ctx->slice = NULL;
+slice_getd_error:
+	sde_rotator_ctx_release(ctx, NULL);
+rotator_open_error:
+	return ERR_PTR(rc);
+}
+EXPORT_SYMBOL(sde_rotator_inline_open);
+
+int sde_rotator_inline_release(void *handle)
+{
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_ctx *ctx;
+
+	if (!handle) {
+		SDEROT_ERR("invalid rotator ctx\n");
+		return -EINVAL;
+	}
+
+	ctx = handle;
+	rot_dev = ctx->rot_dev;
+
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	if (ctx->slice) {
+		if (!rot_dev->disable_syscache)
+			llcc_slice_deactivate(ctx->slice);
+		llcc_slice_putd(ctx->slice);
+		ctx->slice = NULL;
+	}
+
+	SDEROT_EVTLOG(ctx->session_id);
+
+	return sde_rotator_ctx_release(ctx, NULL);
+}
+EXPORT_SYMBOL(sde_rotator_inline_release);
+
+/*
+ * sde_rotator_inline_get_dst_pixfmt - determine output pixel format
+ * @pdev: Pointer to platform device
+ * @src_pixfmt: input pixel format
+ * @dst_pixfmt: Pointer to output pixel format (output)
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	return sde_rot_get_base_tilea5x_pixfmt(src_pixfmt, dst_pixfmt);
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_dst_pixfmt);
+
+/*
+ * sde_rotator_inline_get_downscale_caps - get scaling capability
+ * @pdev: Pointer to platform device
+ * @caps: string buffer for capability
+ * @len: length of string buffer
+ * return: length of capability string
+ */
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *caps, int len)
+{
+	struct sde_rotator_device *rot_dev;
+	int rc;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	rc = sde_rotator_get_downscale_caps(rot_dev->mgr, caps, len);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return rc;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_downscale_caps);
+
+/*
+ * sde_rotator_inline_get_pixfmt_caps - get pixel format capability
+ * @pdev: Pointer to platform device
+ * @pixfmt: array of pixel format buffer
+ * @len: length of pixel format buffer
+ * return: length of pixel format capability if success; error code otherwise
+ */
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmts, int len)
+{
+	struct sde_rotator_device *rot_dev;
+	u32 i, pixfmt;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *) platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	for (i = 0;; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, input);
+		if (!pixfmt)
+			break;
+		if (pixfmts && i < len)
+			pixfmts[i] = pixfmt;
+	}
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return i;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_pixfmt_caps);
+
+/*
+ * sde_rotator_inline_commit - commit given rotator command
+ * @handle: Pointer to rotator context
+ * @cmd: Pointer to rotator command
+ * @cmd_type: command type - validate/prepare/commit/cleanup
+ * return: 0 if success; error code otherwise
+ */
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type)
+{
+	struct sde_rotator_ctx *ctx;
+	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request = NULL;
+	struct sde_rot_entry_container *req = NULL;
+	ktime_t *ts;
+	u32 flags = 0;
+	int i, ret;
+
+	if (!handle || !cmd) {
+		SDEROT_ERR("invalid rotator handle/cmd\n");
+		return -EINVAL;
+	}
+
+	ctx = handle;
+	rot_dev = ctx->rot_dev;
+
+	if (!rot_dev) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	SDEROT_DBG(
+		"s:%d.%u src:(%u,%u,%u,%u)/%ux%u/%c%c%c%c dst:(%u,%u,%u,%u)/%c%c%c%c r:%d f:%d/%d s:%d fps:%u clk:%llu bw:%llu wb:%d vid:%d cmd:%d\n",
+		ctx->session_id, cmd->sequence_id,
+		cmd->src_rect_x, cmd->src_rect_y,
+		cmd->src_rect_w, cmd->src_rect_h,
+		cmd->src_width, cmd->src_height,
+		cmd->src_pixfmt >> 0, cmd->src_pixfmt >> 8,
+		cmd->src_pixfmt >> 16, cmd->src_pixfmt >> 24,
+		cmd->dst_rect_x, cmd->dst_rect_y,
+		cmd->dst_rect_w, cmd->dst_rect_h,
+		cmd->dst_pixfmt >> 0, cmd->dst_pixfmt >> 8,
+		cmd->dst_pixfmt >> 16, cmd->dst_pixfmt >> 24,
+		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
+		cmd->clkrate, cmd->data_bw,
+		cmd->dst_writeback, cmd->video_mode, cmd_type);
+	SDEROT_EVTLOG(ctx->session_id, cmd->sequence_id,
+		cmd->src_rect_x, cmd->src_rect_y,
+		cmd->src_rect_w, cmd->src_rect_h,
+		cmd->src_width, cmd->src_height,
+		cmd->src_pixfmt,
+		cmd->dst_rect_x, cmd->dst_rect_y,
+		cmd->dst_rect_w, cmd->dst_rect_h,
+		cmd->dst_pixfmt,
+		cmd->rot90, cmd->hflip, cmd->vflip, cmd->secure, cmd->fps,
+		cmd->clkrate, cmd->data_bw,
+		cmd->dst_writeback, cmd->video_mode, cmd_type);
+
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE ||
+			cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
+
+		struct sde_rotation_item item;
+		struct sde_rotator_statistics *stats = &rot_dev->stats;
+		int scid = llcc_get_slice_id(ctx->slice);
+
+		/* allocate slot for timestamp */
+		ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+
+		if (cmd->rot90)
+			flags |= SDE_ROTATION_90;
+		if (cmd->hflip)
+			flags |= SDE_ROTATION_FLIP_LR;
+		if (cmd->vflip)
+			flags |= SDE_ROTATION_FLIP_UD;
+		if (cmd->secure)
+			flags |= SDE_ROTATION_SECURE;
+
+		flags |= SDE_ROTATION_EXT_PERF;
+
+		/* fill in item work structure */
+		memset(&item, 0, sizeof(struct sde_rotation_item));
+		item.flags = flags | SDE_ROTATION_EXT_IOVA;
+		item.trigger = cmd->video_mode ? SDE_ROTATOR_TRIGGER_VIDEO :
+				SDE_ROTATOR_TRIGGER_COMMAND;
+		item.prefill_bw = cmd->prefill_bw;
+		item.session_id = ctx->session_id;
+		item.sequence_id = cmd->sequence_id;
+		item.src_rect.x = cmd->src_rect_x;
+		item.src_rect.y = cmd->src_rect_y;
+		item.src_rect.w = cmd->src_rect_w;
+		item.src_rect.h = cmd->src_rect_h;
+		item.input.width = cmd->src_width;
+		item.input.height = cmd->src_height;
+		item.input.format = cmd->src_pixfmt;
+
+		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+			item.input.planes[i].addr = cmd->src_addr[i];
+			item.input.planes[i].len = cmd->src_len[i];
+			item.input.planes[i].fd = -1;
+		}
+		item.input.plane_count = cmd->src_planes;
+		item.input.comp_ratio.numer = 1;
+		item.input.comp_ratio.denom = 1;
+
+		item.output.width = cmd->dst_rect_x + cmd->dst_rect_w;
+		item.output.height = cmd->dst_rect_y + cmd->dst_rect_h;
+		item.dst_rect.x = cmd->dst_rect_x;
+		item.dst_rect.y = cmd->dst_rect_y;
+		item.dst_rect.w = cmd->dst_rect_w;
+		item.dst_rect.h = cmd->dst_rect_h;
+		item.output.sbuf = true;
+		item.output.scid = scid;
+		item.output.writeback = cmd->dst_writeback;
+		item.output.format = cmd->dst_pixfmt;
+
+		for (i = 0; i < SDE_ROTATOR_INLINE_PLANE_MAX; i++) {
+			item.output.planes[i].addr = cmd->dst_addr[i];
+			item.output.planes[i].len = cmd->dst_len[i];
+			item.output.planes[i].fd = -1;
+		}
+		item.output.plane_count = cmd->dst_planes;
+		item.output.comp_ratio.numer = 1;
+		item.output.comp_ratio.denom = 1;
+		item.sequence_id = ++(ctx->commit_sequence_id);
+		item.ts = ts;
+
+		req = sde_rotator_req_init(rot_dev->mgr, ctx->private,
+				&item, 1, 0);
+		if (IS_ERR_OR_NULL(req)) {
+			SDEROT_ERR("fail allocate request s:%d\n",
+					ctx->session_id);
+			ret = -ENOMEM;
+			goto error_init_request;
+		}
+	}
+
+	if (cmd_type == SDE_ROTATOR_INLINE_CMD_VALIDATE) {
+		struct sde_rotation_config rotcfg;
+
+		memset(&rotcfg, 0, sizeof(struct sde_rotation_config));
+		rotcfg.flags = flags;
+		rotcfg.frame_rate = cmd->fps;
+		rotcfg.clk_rate = cmd->clkrate;
+		rotcfg.data_bw = cmd->data_bw;
+		rotcfg.session_id = ctx->session_id;
+		rotcfg.input.width = cmd->src_rect_w;
+		rotcfg.input.height = cmd->src_rect_h;
+		rotcfg.input.format = cmd->src_pixfmt;
+		rotcfg.input.comp_ratio.numer = 1;
+		rotcfg.input.comp_ratio.denom = 1;
+		rotcfg.output.width = cmd->dst_rect_w;
+		rotcfg.output.height = cmd->dst_rect_h;
+		rotcfg.output.format = cmd->dst_pixfmt;
+		rotcfg.output.comp_ratio.numer = 1;
+		rotcfg.output.comp_ratio.denom = 1;
+		rotcfg.output.sbuf = true;
+
+		if (memcmp(&rotcfg, &ctx->rotcfg, sizeof(rotcfg))) {
+			ret = sde_rotator_session_config(rot_dev->mgr,
+					ctx->private, &rotcfg);
+			if (ret) {
+				SDEROT_ERR("fail session config s:%d\n",
+						ctx->session_id);
+				goto error_session_config;
+			}
+
+			ctx->rotcfg = rotcfg;
+		}
+
+		ret = sde_rotator_validate_request(rot_dev->mgr, ctx->private,
+				req);
+		if (ret) {
+			SDEROT_ERR("fail validate request s:%d\n",
+					ctx->session_id);
+			goto error_validate_request;
+		}
+
+		devm_kfree(rot_dev->dev, req);
+		req = NULL;
+
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_COMMIT) {
+
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			/* should not happen */
+			ret = -ENOMEM;
+			SDEROT_ERR("no free request s:%d\n", ctx->session_id);
+			goto error_retired_list;
+		}
+
+		request->req = req;
+
+		spin_lock(&ctx->list_lock);
+		list_del_init(&request->list);
+		list_add_tail(&request->list, &ctx->pending_list);
+		spin_unlock(&ctx->list_lock);
+
+		ts = req->entries[0].item.ts;
+		if (ts) {
+			ts[SDE_ROTATOR_TS_SRCQB] = ktime_get();
+			ts[SDE_ROTATOR_TS_DSTQB] = ktime_get();
+			ts[SDE_ROTATOR_TS_FENCE] = ktime_get();
+		} else {
+			SDEROT_ERR("invalid stats timestamp\n");
+		}
+		req->retireq = ctx->work_queue.rot_work_queue;
+		req->retire_work = &request->retire_work;
+
+		trace_rot_entry_fence(
+			ctx->session_id, cmd->sequence_id,
+			req->entries[0].item.wb_idx,
+			req->entries[0].item.flags,
+			req->entries[0].item.input.format,
+			req->entries[0].item.input.width,
+			req->entries[0].item.input.height,
+			req->entries[0].item.src_rect.x,
+			req->entries[0].item.src_rect.y,
+			req->entries[0].item.src_rect.w,
+			req->entries[0].item.src_rect.h,
+			req->entries[0].item.output.format,
+			req->entries[0].item.output.width,
+			req->entries[0].item.output.height,
+			req->entries[0].item.dst_rect.x,
+			req->entries[0].item.dst_rect.y,
+			req->entries[0].item.dst_rect.w,
+			req->entries[0].item.dst_rect.h);
+
+		ret = sde_rotator_handle_request_common(
+				rot_dev->mgr, ctx->private, req);
+		if (ret) {
+			SDEROT_ERR("fail handle request s:%d\n",
+					ctx->session_id);
+			goto error_handle_request;
+		}
+
+		sde_rotator_commit_request(rot_dev->mgr, ctx->private, req);
+
+		request->committed = true;
+
+		/* save request in private handle */
+		cmd->priv_handle = request;
+
+	} else if (cmd_type == SDE_ROTATOR_INLINE_CMD_CLEANUP) {
+		if (!cmd->priv_handle) {
+			ret = -EINVAL;
+			SDEROT_ERR("invalid private handle\n");
+			goto error_invalid_handle;
+		}
+
+		request = cmd->priv_handle;
+		req = request->req;
+
+		if (request->committed) {
+			/* wait until request is finished */
+			sde_rot_mgr_unlock(rot_dev->mgr);
+			ret = wait_event_timeout(ctx->wait_queue,
+				sde_rotator_is_request_retired(request),
+				msecs_to_jiffies(rot_dev->streamoff_timeout));
+			if (!ret)
+				SDEROT_ERR("timeout w/o retire s:%d\n",
+						ctx->session_id);
+			else if (ret == 1)
+				SDEROT_ERR("timeout w/ retire s:%d\n",
+						ctx->session_id);
+
+			sde_rot_mgr_lock(rot_dev->mgr);
+		}
+
+		sde_rotator_req_finish(rot_dev->mgr, ctx->private, req);
+		sde_rotator_retire_request(request);
+	}
+
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	return 0;
+
+error_handle_request:
+	sde_rotator_update_retire_sequence(request);
+	sde_rotator_retire_request(request);
+error_retired_list:
+error_validate_request:
+error_session_config:
+	devm_kfree(rot_dev->dev, req);
+error_invalid_handle:
+error_init_request:
+	sde_rot_mgr_unlock(rot_dev->mgr);
+	return ret;
+}
+EXPORT_SYMBOL(sde_rotator_inline_commit);
+
+/*
+ * sde_rotator_open - Rotator device open method.
+ * @file: Pointer to file struct.
+ */
+static int sde_rotator_open(struct file *file)
+{
+	struct sde_rotator_device *rot_dev = video_drvdata(file);
+	struct sde_rotator_ctx *ctx;
+	int ret = 0;
+
+	ctx = sde_rotator_ctx_open(rot_dev, file);
+	if (IS_ERR_OR_NULL(ctx)) {
+		SDEDEV_DBG(rot_dev->dev, "failed to open %d\n", ret);
+		ret = PTR_ERR(ctx);
+	}
+
 	return ret;
 }
 
@@ -1006,45 +1666,10 @@
  */
 static int sde_rotator_release(struct file *file)
 {
-	struct sde_rotator_device *rot_dev = video_drvdata(file);
 	struct sde_rotator_ctx *ctx =
 			sde_rotator_ctx_from_fh(file->private_data);
-	u32 session_id = ctx->session_id;
 
-	ATRACE_END(ctx->kobj.name);
-
-	SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
-	mutex_lock(&rot_dev->lock);
-	v4l2_ctrl_handler_free(&ctx->ctrl_handler);
-	SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
-	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
-	v4l2_m2m_streamoff(file, ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
-	mutex_unlock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release submit work s:%d w:%x\n",
-			session_id, work_busy(&ctx->submit_work));
-	cancel_work_sync(&ctx->submit_work);
-	SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
-	sde_rot_mgr_lock(rot_dev->mgr);
-	sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
-	sde_rot_mgr_unlock(rot_dev->mgr);
-	SDEDEV_DBG(rot_dev->dev, "release retire work s:%d w:%x\n",
-			session_id, work_busy(&ctx->retire_work));
-	cancel_work_sync(&ctx->retire_work);
-	mutex_lock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
-	sde_rotator_destroy_timeline(ctx->work_queue.timeline);
-	destroy_workqueue(ctx->work_queue.rot_work_queue);
-	sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
-	kobject_put(&ctx->kobj);
-	v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
-	v4l2_fh_del(&ctx->fh);
-	v4l2_fh_exit(&ctx->fh);
-	kfree(ctx->vbinfo_out);
-	kfree(ctx->vbinfo_cap);
-	kfree(ctx);
-	mutex_unlock(&rot_dev->lock);
-	SDEDEV_DBG(rot_dev->dev, "release complete s:%d\n", session_id);
-	return 0;
+	return sde_rotator_ctx_release(ctx, file);
 }
 
 /*
@@ -1109,14 +1734,30 @@
 	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	struct sde_mdp_format_params *fmt;
-	u32 pixfmt;
+	u32 i, index, pixfmt;
+	bool found = false;
 
-	pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, f->index, false);
-	if (!pixfmt)
-		return -EINVAL;
+	for (i = 0, index = 0; index <= f->index; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, false);
+		if (!pixfmt)
+			return -EINVAL;
 
-	fmt = sde_get_format_params(pixfmt);
-	if (!fmt)
+		fmt = sde_get_format_params(pixfmt);
+		if (!fmt)
+			return -EINVAL;
+
+		if (sde_mdp_is_private_format(fmt))
+			continue;
+
+		if (index == f->index) {
+			found = true;
+			break;
+		}
+
+		index++;
+	}
+
+	if (!found)
 		return -EINVAL;
 
 	f->pixelformat = pixfmt;
@@ -1137,14 +1778,30 @@
 	struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	struct sde_mdp_format_params *fmt;
-	u32 pixfmt;
+	u32 i, index, pixfmt;
+	bool found = false;
 
-	pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, f->index, true);
-	if (!pixfmt)
-		return -EINVAL;
+	for (i = 0, index = 0; index <= f->index; i++) {
+		pixfmt = sde_rotator_get_pixfmt(rot_dev->mgr, i, true);
+		if (!pixfmt)
+			return -EINVAL;
 
-	fmt = sde_get_format_params(pixfmt);
-	if (!fmt)
+		fmt = sde_get_format_params(pixfmt);
+		if (!fmt)
+			return -EINVAL;
+
+		if (sde_mdp_is_private_format(fmt))
+			continue;
+
+		if (index == f->index) {
+			found = true;
+			break;
+		}
+
+		index++;
+	}
+
+	if (!found)
 		return -EINVAL;
 
 	f->pixelformat = pixfmt;
@@ -1516,6 +2173,7 @@
 				ctx->session_id, buf_type, ret);
 			return ret;
 		}
+		ctx->rotcfg = config;
 	}
 
 	ret = v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, buf_type);
@@ -1992,8 +2650,10 @@
 	struct vb2_v4l2_buffer *dst_buf;
 	struct sde_rotator_ctx *ctx;
 	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request;
 
-	ctx = container_of(work, struct sde_rotator_ctx, retire_work);
+	request = container_of(work, struct sde_rotator_request, retire_work);
+	ctx = request->ctx;
 
 	if (!ctx || !ctx->rot_dev) {
 		SDEROT_ERR("null context/device\n");
@@ -2008,15 +2668,16 @@
 	if (ctx->abort_pending) {
 		SDEDEV_DBG(rot_dev->dev, "abort command in retire s:%d\n",
 				ctx->session_id);
-		ctx->request = ERR_PTR(-EINTR);
-		atomic_dec(&ctx->command_pending);
-		wake_up(&ctx->wait_queue);
+		sde_rotator_update_retire_sequence(request);
+		sde_rotator_retire_request(request);
 		mutex_unlock(&rot_dev->lock);
 		return;
 	}
 
-	if (rot_dev->early_submit) {
-		if (IS_ERR_OR_NULL(ctx->request)) {
+	if (!ctx->file) {
+		sde_rotator_update_retire_sequence(request);
+	} else if (rot_dev->early_submit) {
+		if (IS_ERR_OR_NULL(request->req)) {
 			/* fail pending request or something wrong */
 			SDEDEV_ERR(rot_dev->dev,
 					"pending request fail in retire s:%d\n",
@@ -2037,9 +2698,8 @@
 				src_buf, dst_buf);
 		}
 
-		ctx->request = NULL;
-		atomic_dec(&ctx->command_pending);
-		wake_up(&ctx->wait_queue);
+		sde_rotator_update_retire_sequence(request);
+		sde_rotator_retire_request(request);
 		v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
 		v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
 		v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
@@ -2052,9 +2712,11 @@
  * @ctx: Pointer rotator context.
  * @src_buf: Pointer to Vb2 source buffer.
  * @dst_buf: Pointer to Vb2 destination buffer.
+ * @request: Pointer to rotator request
  */
 static int sde_rotator_process_buffers(struct sde_rotator_ctx *ctx,
-	struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf)
+	struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf,
+	struct sde_rotator_request *request)
 {
 	struct sde_rotator_device *rot_dev = ctx->rot_dev;
 	struct sde_rotation_item item;
@@ -2173,17 +2835,17 @@
 	}
 
 	req->retireq = ctx->work_queue.rot_work_queue;
-	req->retire_work = &ctx->retire_work;
+	req->retire_work = &request->retire_work;
 
 	ret = sde_rotator_handle_request_common(
-			rot_dev->mgr, ctx->private, req, &item);
+			rot_dev->mgr, ctx->private, req);
 	if (ret) {
 		SDEDEV_ERR(rot_dev->dev, "fail handle request\n");
 		goto error_handle_request;
 	}
 
 	sde_rotator_queue_request(rot_dev->mgr, ctx->private, req);
-	ctx->request = req;
+	request->req = req;
 
 	return 0;
 error_handle_request:
@@ -2191,7 +2853,7 @@
 error_init_request:
 error_fence_wait:
 error_null_buffer:
-	ctx->request = ERR_PTR(ret);
+	request->req = NULL;
 	return ret;
 }
 
@@ -2207,11 +2869,13 @@
 	struct sde_rotator_device *rot_dev;
 	struct vb2_v4l2_buffer *src_buf;
 	struct vb2_v4l2_buffer *dst_buf;
+	struct sde_rotator_request *request;
 	int ret;
 
-	ctx = container_of(work, struct sde_rotator_ctx, submit_work);
+	request = container_of(work, struct sde_rotator_request, submit_work);
+	ctx = request->ctx;
 
-	if (!ctx->rot_dev) {
+	if (!ctx || !ctx->rot_dev) {
 		SDEROT_ERR("null device\n");
 		return;
 	}
@@ -2223,9 +2887,8 @@
 	if (ctx->abort_pending) {
 		SDEDEV_DBG(rot_dev->dev, "abort command in submit s:%d\n",
 				ctx->session_id);
-		ctx->request = ERR_PTR(-EINTR);
-		atomic_dec(&ctx->command_pending);
-		wake_up(&ctx->wait_queue);
+		sde_rotator_update_retire_sequence(request);
+		sde_rotator_retire_request(request);
 		mutex_unlock(&rot_dev->lock);
 		return;
 	}
@@ -2235,7 +2898,7 @@
 	src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 	sde_rot_mgr_lock(rot_dev->mgr);
 	ret = sde_rotator_process_buffers(ctx, &src_buf->vb2_buf,
-			&dst_buf->vb2_buf);
+			&dst_buf->vb2_buf, request);
 	sde_rot_mgr_unlock(rot_dev->mgr);
 	if (ret) {
 		SDEDEV_ERR(rot_dev->dev,
@@ -2258,6 +2921,7 @@
 	struct sde_rotator_device *rot_dev;
 	struct vb2_v4l2_buffer *src_buf;
 	struct vb2_v4l2_buffer *dst_buf;
+	struct sde_rotator_request *request;
 	int ret;
 
 	if (!ctx || !ctx->rot_dev) {
@@ -2269,8 +2933,11 @@
 	SDEDEV_DBG(rot_dev->dev, "device run s:%d\n", ctx->session_id);
 
 	if (rot_dev->early_submit) {
+		request = list_first_entry_or_null(&ctx->pending_list,
+				struct sde_rotator_request, list);
+
 		/* pending request mode, check for completion */
-		if (IS_ERR_OR_NULL(ctx->request)) {
+		if (!request || IS_ERR_OR_NULL(request->req)) {
 			/* pending request fails or something wrong. */
 			SDEDEV_ERR(rot_dev->dev,
 				"pending request fail in device run s:%d\n",
@@ -2278,19 +2945,19 @@
 			rot_dev->stats.fail_count++;
 			ATRACE_INT("fail_count", rot_dev->stats.fail_count);
 			goto error_process_buffers;
-		} else if (!atomic_read(&ctx->request->pending_count)) {
+
+		} else if (!atomic_read(&request->req->pending_count)) {
 			/* pending request completed. signal done. */
 			int failed_count =
-				atomic_read(&ctx->request->failed_count);
+				atomic_read(&request->req->failed_count);
 			SDEDEV_DBG(rot_dev->dev,
 				"pending request completed in device run s:%d\n",
 				ctx->session_id);
 
 			/* disconnect request (will be freed by core layer) */
 			sde_rot_mgr_lock(rot_dev->mgr);
-			ctx->request->retireq = NULL;
-			ctx->request->retire_work = NULL;
-			ctx->request = NULL;
+			sde_rotator_req_finish(rot_dev->mgr, ctx->private,
+					request->req);
 			sde_rot_mgr_unlock(rot_dev->mgr);
 
 			if (failed_count) {
@@ -2314,8 +2981,8 @@
 				goto error_process_buffers;
 			}
 
-			atomic_dec(&ctx->command_pending);
-			wake_up(&ctx->wait_queue);
+			sde_rotator_update_retire_sequence(request);
+			sde_rotator_retire_request(request);
 			v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
 			v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
 			v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->fh.m2m_ctx);
@@ -2327,22 +2994,33 @@
 
 			/* disconnect request (will be freed by core layer) */
 			sde_rot_mgr_lock(rot_dev->mgr);
-			ctx->request->retireq = NULL;
-			ctx->request->retire_work = NULL;
-			ctx->request = ERR_PTR(-EIO);
+			sde_rotator_req_finish(rot_dev->mgr, ctx->private,
+					request->req);
 			sde_rot_mgr_unlock(rot_dev->mgr);
 
 			goto error_process_buffers;
 		}
 	} else {
-		/* no pending request. submit buffer the usual way. */
-		atomic_inc(&ctx->command_pending);
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			SDEDEV_ERR(rot_dev->dev,
+				"no free request in device run s:%d\n",
+				ctx->session_id);
+			goto error_retired_list;
+		}
 
+		spin_lock(&ctx->list_lock);
+		list_del_init(&request->list);
+		list_add_tail(&request->list, &ctx->pending_list);
+		spin_unlock(&ctx->list_lock);
+
+		/* no pending request. submit buffer the usual way. */
 		dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
 		src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
 		if (!src_buf || !dst_buf) {
 			SDEDEV_ERR(rot_dev->dev,
-				"null buffer in device run s:%d sb:%p db:%p\n",
+				"null buffer in device run s:%d sb:%pK db:%pK\n",
 				ctx->session_id,
 				src_buf, dst_buf);
 			goto error_empty_buffer;
@@ -2350,13 +3028,12 @@
 
 		sde_rot_mgr_lock(rot_dev->mgr);
 		ret = sde_rotator_process_buffers(ctx, &src_buf->vb2_buf,
-				&dst_buf->vb2_buf);
+				&dst_buf->vb2_buf, request);
 		sde_rot_mgr_unlock(rot_dev->mgr);
 		if (ret) {
 			SDEDEV_ERR(rot_dev->dev,
 				"fail process buffer in device run s:%d\n",
 				ctx->session_id);
-			ctx->request = ERR_PTR(ret);
 			rot_dev->stats.fail_count++;
 			ATRACE_INT("fail_count", rot_dev->stats.fail_count);
 			goto error_process_buffers;
@@ -2366,8 +3043,9 @@
 	return;
 error_process_buffers:
 error_empty_buffer:
-	atomic_dec(&ctx->command_pending);
-	wake_up(&ctx->wait_queue);
+error_retired_list:
+	sde_rotator_update_retire_sequence(request);
+	sde_rotator_retire_request(request);
 	src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
 	dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 	if (src_buf)
@@ -2406,6 +3084,7 @@
 {
 	struct sde_rotator_ctx *ctx = priv;
 	struct sde_rotator_device *rot_dev;
+	struct sde_rotator_request *request;
 	int ret = 0;
 
 	if (!ctx || !ctx->rot_dev) {
@@ -2416,26 +3095,43 @@
 	rot_dev = ctx->rot_dev;
 	SDEDEV_DBG(rot_dev->dev, "job ready s:%d\n", ctx->session_id);
 
+	request = list_first_entry_or_null(&ctx->pending_list,
+			struct sde_rotator_request, list);
+
 	if (!rot_dev->early_submit) {
 		/* always ready in normal mode. */
 		ret = 1;
-	} else if (IS_ERR(ctx->request)) {
+	} else if (request && IS_ERR_OR_NULL(request->req)) {
 		/* if pending request fails, forward to device run state. */
 		SDEDEV_DBG(rot_dev->dev,
 				"pending request fail in job ready s:%d\n",
 				ctx->session_id);
 		ret = 1;
-	} else if (!ctx->request) {
+	} else if (list_empty(&ctx->pending_list)) {
 		/* if no pending request, submit a new request. */
 		SDEDEV_DBG(rot_dev->dev,
 				"submit job s:%d sc:%d dc:%d p:%d\n",
 				ctx->session_id,
 				v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
 				v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx),
-				atomic_read(&ctx->command_pending));
-		atomic_inc(&ctx->command_pending);
-		queue_work(ctx->work_queue.rot_work_queue, &ctx->submit_work);
-	} else if (!atomic_read(&ctx->request->pending_count)) {
+				!list_empty(&ctx->pending_list));
+
+		request = list_first_entry_or_null(&ctx->retired_list,
+				struct sde_rotator_request, list);
+		if (!request) {
+			/* should not happen */
+			SDEDEV_ERR(rot_dev->dev,
+					"no free request in job ready s:%d\n",
+					ctx->session_id);
+		} else {
+			spin_lock(&ctx->list_lock);
+			list_del_init(&request->list);
+			list_add_tail(&request->list, &ctx->pending_list);
+			spin_unlock(&ctx->list_lock);
+			queue_work(ctx->work_queue.rot_work_queue,
+					&request->submit_work);
+		}
+	} else if (request && !atomic_read(&request->req->pending_count)) {
 		/* if pending request completed, forward to device run state */
 		SDEDEV_DBG(rot_dev->dev,
 				"pending request completed in job ready s:%d\n",
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
index a46c0b5..100ce27 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,8 @@
 #include <linux/iommu.h>
 #include <linux/dma-buf.h>
 #include <linux/msm-bus.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-fh.h>
 #include <media/v4l2-ctrls.h>
@@ -36,6 +38,9 @@
 #define SDE_ROTATOR_NUM_EVENTS		4096
 #define SDE_ROTATOR_NUM_TIMESTAMPS	SDE_ROTATOR_TS_MAX
 
+/* maximum number of outstanding requests per ctx session */
+#define SDE_ROTATOR_REQUEST_MAX		2
+
 struct sde_rotator_device;
 struct sde_rotator_ctx;
 
@@ -80,9 +85,28 @@
 };
 
 /*
+ * struct sde_rotator_request - device layer rotation request
+ * @list: list head for submit/retire list
+ * @submit_work: submit work structure
+ * @retire_work: retire work structure
+ * @request: Pointer to core layer rotator manager request
+ * @ctx: Pointer to parent context
+ * @committed: true if request committed to hardware
+ */
+struct sde_rotator_request {
+	struct list_head list;
+	struct work_struct submit_work;
+	struct work_struct retire_work;
+	struct sde_rot_entry_container *req;
+	struct sde_rotator_ctx *ctx;
+	bool committed;
+};
+
+/*
  * struct sde_rotator_ctx - Structure contains per open file handle context.
  * @kobj: kernel object of this context
  * @rot_dev: Pointer to rotator device.
+ * @file: Pointer to device file handle
  * @fh: V4l2 file handle.
  * @ctrl_handler: control handler
  * @format_cap: Current capture format.
@@ -95,22 +119,27 @@
  * @vflip: vertical flip (1-flip)
  * @rotate: rotation angle (0,90,180,270)
  * @secure: Non-secure (0) / Secure processing
- * @command_pending: Number of pending transaction in h/w
  * @abort_pending: True if abort is requested for async handling.
  * @nbuf_cap: Number of requested buffer for capture queue
  * @nbuf_out: Number of requested buffer for output queue
  * @fence_cap: Fence info for each requested capture buffer
  * @fence_out: Fence info for each requested output buffer
  * @wait_queue: Wait queue for signaling end of job
- * @submit_work: Work structure for submitting work
- * @retire_work: Work structure for retiring work
  * @work_queue: work queue for submit and retire processing
- * @request: current service request
  * @private: Pointer to session private information
+ * @slice: Pointer to system cache slice descriptor
+ * @commit_sequence_id: last committed sequence id
+ * @retired_sequence_id: last retired sequence id
+ * @list_lock: lock for pending/retired list
+ * @pending_list: list of pending request
+ * @retired_list: list of retired/free request
+ * @requests: static allocation of free requests
+ * @rotcfg: current core rotation configuration
  */
 struct sde_rotator_ctx {
 	struct kobject kobj;
 	struct sde_rotator_device *rot_dev;
+	struct file *file;
 	struct v4l2_fh fh;
 	struct v4l2_ctrl_handler ctrl_handler;
 	struct v4l2_format format_cap;
@@ -124,18 +153,22 @@
 	s32 rotate;
 	s32 secure;
 	s32 secure_camera;
-	atomic_t command_pending;
 	int abort_pending;
 	int nbuf_cap;
 	int nbuf_out;
 	struct sde_rotator_vbinfo *vbinfo_cap;
 	struct sde_rotator_vbinfo *vbinfo_out;
 	wait_queue_head_t wait_queue;
-	struct work_struct submit_work;
-	struct work_struct retire_work;
 	struct sde_rot_queue work_queue;
-	struct sde_rot_entry_container *request;
 	struct sde_rot_file_private *private;
+	struct llcc_slice_desc *slice;
+	u32 commit_sequence_id;
+	u32 retired_sequence_id;
+	spinlock_t list_lock;
+	struct list_head pending_list;
+	struct list_head retired_list;
+	struct sde_rotator_request requests[SDE_ROTATOR_REQUEST_MAX];
+	struct sde_rotation_config rotcfg;
 };
 
 /*
@@ -160,6 +193,7 @@
  * @pdev: Pointer to platform device.
  * @drvdata: Pointer to driver data.
  * @early_submit: flag enable job submission in ready state.
+ * @disable_syscache: true to disable system cache
  * @mgr: Pointer to core rotator manager.
  * @mdata: Pointer to common rotator data/resource.
  * @session_id: Next context session identifier
@@ -180,6 +214,7 @@
 	struct platform_device *pdev;
 	const void *drvdata;
 	u32 early_submit;
+	u32 disable_syscache;
 	struct sde_rot_mgr *mgr;
 	struct sde_rot_data_type *mdata;
 	u32 session_id;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index 3b36b6b..c78c513 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -31,9 +31,9 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2) },		\
 		.bits = {					\
-			[C2_R_Cr] = COLOR_5BIT,			\
-			[C0_G_Y] = COLOR_6BIT,			\
-			[C1_B_Cb] = COLOR_5BIT,			\
+			[C2_R_Cr] = SDE_COLOR_5BIT,		\
+			[C0_G_Y] = SDE_COLOR_6BIT,		\
+			[C1_B_Cb] = SDE_COLOR_5BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -53,9 +53,9 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2) },		\
 		.bits = {					\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -76,10 +76,10 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2), (e3) },		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_8BIT,		\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C3_ALPHA] = SDE_COLOR_8BIT,		\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -88,9 +88,9 @@
 		.format = (fmt),				\
 		.is_yuv = 1,					\
 		.bits = {					\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.alpha_enable = 0,				\
 		.unpack_tight = 1,				\
@@ -143,10 +143,10 @@
 		.frame_format = SDE_MDP_FMT_LINEAR,		\
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_ALPHA_1BIT,		\
-			[C2_R_Cr] = COLOR_5BIT,			\
-			[C0_G_Y] = COLOR_5BIT,			\
-			[C1_B_Cb] = COLOR_5BIT,			\
+			[C3_ALPHA] = SDE_COLOR_ALPHA_1BIT,	\
+			[C2_R_Cr] = SDE_COLOR_5BIT,		\
+			[C0_G_Y] = SDE_COLOR_5BIT,		\
+			[C1_B_Cb] = SDE_COLOR_5BIT,		\
 		},						\
 		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
 	}
@@ -166,10 +166,10 @@
 		.pixel_mode = SDE_MDP_PIXEL_NORMAL,		\
 		.element = { (e0), (e1), (e2), (e3) },		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_ALPHA_4BIT,		\
-			[C2_R_Cr] = COLOR_4BIT,			\
-			[C0_G_Y] = COLOR_4BIT,			\
-			[C1_B_Cb] = COLOR_4BIT,			\
+			[C3_ALPHA] = SDE_COLOR_ALPHA_4BIT,	\
+			[C2_R_Cr] = SDE_COLOR_4BIT,		\
+			[C0_G_Y] = SDE_COLOR_4BIT,		\
+			[C1_B_Cb] = SDE_COLOR_4BIT,		\
 		},						\
 		.is_ubwc = SDE_MDP_COMPRESS_NONE,		\
 	}
@@ -190,10 +190,10 @@
 		.pixel_mode = SDE_MDP_PIXEL_10BIT,		\
 		.element = { (e0), (e1), (e2), (e3) },		\
 		.bits = {					\
-			[C3_ALPHA] = COLOR_8BIT,		\
-			[C2_R_Cr] = COLOR_8BIT,			\
-			[C0_G_Y] = COLOR_8BIT,			\
-			[C1_B_Cb] = COLOR_8BIT,			\
+			[C3_ALPHA] = SDE_COLOR_8BIT,		\
+			[C2_R_Cr] = SDE_COLOR_8BIT,		\
+			[C0_G_Y] = SDE_COLOR_8BIT,		\
+			[C1_B_Cb] = SDE_COLOR_8BIT,		\
 		},						\
 		.is_ubwc = isubwc,				\
 	}
@@ -283,6 +283,240 @@
 			.tile_width = 48,
 		},
 	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
+			"SDE/RGBA_1010102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_TILE,
+			"SDE/RGBX_1010102102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102_TILE,
+			"SDE/BGRA_1010102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102_TILE,
+			"SDE/BGRX_1010102_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010_TILE,
+			"SDE/ARGB_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010_TILE,
+			"SDE/XRGB_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010_TILE,
+			"SDE/ABGR_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010_TILE,
+			"SDE/XBGR_2101010_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+			"Y_CRCB_H2V2_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 8,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+			"Y_CBCR_H2V2_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 8,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_ABGR_8888_TILE,
+			"SDE/ABGR_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_XRGB_8888_TILE,
+			"SDE/XRGB_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_ARGB_8888_TILE,
+			"SDE/ARGB_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_TILE,
+			"SDE/RGBA_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_TILE,
+			"SDE/RGBX_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_BGRA_8888_TILE,
+			"SDE/BGRA_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_BGRX_8888_TILE,
+			"SDE/BGRX_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
+	{
+		.mdp_format =
+			FMT_RGB_8888(SDE_PIX_FMT_XBGR_8888_TILE,
+			"SDE/XBGR_8888_TILE",
+			SDE_MDP_FMT_TILE_A5X,
+			SDE_MDP_FORMAT_FLAG_PRIVATE,
+			0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+			SDE_MDP_COMPRESS_NONE),
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 16,
+		},
+	},
 };
 
 static struct sde_mdp_format_params sde_mdp_format_map[] = {
@@ -543,3 +777,93 @@
 
 	return 0;
 }
+
+/*
+ * sde_rot_get_tilea5x_pixfmt - get base a5x tile format of given source format
+ * @src_pixfmt: source pixel format to be converted
+ * @dst_pixfmt: pointer to base a5x tile pixel format
+ * return: 0 if success; error code otherwise
+ */
+int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	int rc = 0;
+
+	if (!dst_pixfmt) {
+		SDEROT_ERR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	switch (src_pixfmt) {
+	case SDE_PIX_FMT_Y_CBCR_H2V2:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CRCB_H2V2:
+	case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2_TILE;
+		break;
+	case SDE_PIX_FMT_RGBA_8888:
+	case SDE_PIX_FMT_RGBA_8888_UBWC:
+	case SDE_PIX_FMT_RGBA_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_RGBA_8888_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_8888:
+	case SDE_PIX_FMT_RGBX_8888_UBWC:
+	case SDE_PIX_FMT_RGBX_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_RGBX_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_8888:
+	case SDE_PIX_FMT_ARGB_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_8888:
+	case SDE_PIX_FMT_XRGB_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_8888:
+	case SDE_PIX_FMT_ABGR_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_8888:
+	case SDE_PIX_FMT_XBGR_8888_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
+		break;
+	case SDE_PIX_FMT_ARGB_2101010:
+	case SDE_PIX_FMT_ARGB_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_XRGB_2101010:
+	case SDE_PIX_FMT_XRGB_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_ABGR_2101010:
+	case SDE_PIX_FMT_ABGR_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010:
+	case SDE_PIX_FMT_XBGR_2101010_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102:
+	case SDE_PIX_FMT_BGRA_1010102_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102:
+	case SDE_PIX_FMT_BGRX_1010102_TILE:
+		*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
+		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
+		break;
+	default:
+		SDEROT_ERR("invalid src pixel format %c%c%c%c\n",
+				src_pixfmt >> 0, src_pixfmt >> 8,
+				src_pixfmt >> 16, src_pixfmt >> 24);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
index bdd16a9..5bb6198 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
@@ -17,6 +17,27 @@
 #include <linux/types.h>
 #include <media/msm_sde_rotator.h>
 
+/* Internal rotator pixel formats */
+#define SDE_PIX_FMT_RGBA_8888_TILE	v4l2_fourcc('Q', 'T', '0', '0')
+#define SDE_PIX_FMT_RGBX_8888_TILE	v4l2_fourcc('Q', 'T', '0', '1')
+#define SDE_PIX_FMT_BGRA_8888_TILE	v4l2_fourcc('Q', 'T', '0', '2')
+#define SDE_PIX_FMT_BGRX_8888_TILE	v4l2_fourcc('Q', 'T', '0', '3')
+#define SDE_PIX_FMT_ARGB_8888_TILE	v4l2_fourcc('Q', 'T', '0', '4')
+#define SDE_PIX_FMT_XRGB_8888_TILE	v4l2_fourcc('Q', 'T', '0', '5')
+#define SDE_PIX_FMT_ABGR_8888_TILE	v4l2_fourcc('Q', 'T', '0', '6')
+#define SDE_PIX_FMT_XBGR_8888_TILE	v4l2_fourcc('Q', 'T', '0', '7')
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TILE	v4l2_fourcc('Q', 'T', '0', '8')
+#define SDE_PIX_FMT_Y_CRCB_H2V2_TILE	v4l2_fourcc('Q', 'T', '0', '9')
+#define SDE_PIX_FMT_ARGB_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'A')
+#define SDE_PIX_FMT_XRGB_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'B')
+#define SDE_PIX_FMT_ABGR_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'C')
+#define SDE_PIX_FMT_XBGR_2101010_TILE	v4l2_fourcc('Q', 'T', '0', 'D')
+#define SDE_PIX_FMT_BGRA_1010102_TILE	v4l2_fourcc('Q', 'T', '0', 'E')
+#define SDE_PIX_FMT_BGRX_1010102_TILE	v4l2_fourcc('Q', 'T', '0', 'F')
+#define SDE_PIX_FMT_RGBA_1010102_TILE	v4l2_fourcc('Q', 'T', '1', '0')
+#define SDE_PIX_FMT_RGBX_1010102_TILE	v4l2_fourcc('Q', 'T', '1', '1')
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE	v4l2_fourcc('Q', 'T', '1', '2')
+
 #define SDE_ROT_MAX_PLANES		4
 
 #define UBWC_META_MACRO_W_H		16
@@ -27,12 +48,12 @@
  * expected by the HW programming.
  */
 enum {
-	COLOR_4BIT,
-	COLOR_5BIT,
-	COLOR_6BIT,
-	COLOR_8BIT,
-	COLOR_ALPHA_1BIT = 0,
-	COLOR_ALPHA_4BIT = 1,
+	SDE_COLOR_4BIT,
+	SDE_COLOR_5BIT,
+	SDE_COLOR_6BIT,
+	SDE_COLOR_8BIT,
+	SDE_COLOR_ALPHA_1BIT = 0,
+	SDE_COLOR_ALPHA_4BIT = 1,
 };
 
 #define C3_ALPHA	3	/* alpha */
@@ -69,6 +90,10 @@
 	SDE_MDP_CHROMA_420
 };
 
+enum sde_mdp_format_flag_type {
+	SDE_MDP_FORMAT_FLAG_PRIVATE = BIT(0)
+};
+
 struct sde_mdp_format_params {
 	u32 format;
 	const char *description;
@@ -104,6 +129,8 @@
 
 int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h);
 
+int sde_rot_get_base_tilea5x_pixfmt(u32 src_pixfmt, u32 *dst_pixfmt);
+
 static inline bool sde_mdp_is_tilea4x_format(struct sde_mdp_format_params *fmt)
 {
 	return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A4X);
@@ -158,4 +185,19 @@
 {
 	return fmt && fmt->is_yuv;
 }
+
+static inline bool sde_mdp_is_rgb_format(struct sde_mdp_format_params *fmt)
+{
+	return !sde_mdp_is_yuv_format(fmt);
+}
+
+static inline bool sde_mdp_is_private_format(struct sde_mdp_format_params *fmt)
+{
+	return fmt && (fmt->flag & SDE_MDP_FORMAT_FLAG_PRIVATE);
+}
+
+static inline int sde_mdp_format_blk_size(struct sde_mdp_format_params *fmt)
+{
+	return sde_mdp_is_tp10_format(fmt) ? 96 : 128;
+}
 #endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
new file mode 100644
index 0000000..ec89785
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_INLINE_H__
+#define __SDE_ROTATOR_INLINE_H__
+
+#include <linux/types.h>
+#include <linux/dma-buf.h>
+#include <linux/platform_device.h>
+
+#include "sde_rotator_formats.h"
+
+#define SDE_ROTATOR_INLINE_PLANE_MAX	4
+
+/*
+ * enum sde_rotator_inline_cmd_type - inline rotator command stages
+ * @SDE_ROTATOR_INLINE_CMD_VALIDATE: validate command only
+ * @SDE_ROTATOR_INLINE_CMD_COMMIT: commit command to hardware
+ * @SDE_ROTATOR_INLINE_CMD_CLEANUP: cleanup after commit is done
+ */
+enum sde_rotator_inline_cmd_type {
+	SDE_ROTATOR_INLINE_CMD_VALIDATE,
+	SDE_ROTATOR_INLINE_CMD_COMMIT,
+	SDE_ROTATOR_INLINE_CMD_CLEANUP,
+};
+
+/**
+ * sde_rotator_inline_cmd - inline rotation command
+ * @sequence_id: unique command sequence identifier
+ * @video_mode: true if video interface is connected
+ * @fps: frame rate in frame-per-second
+ * @rot90: rotate 90 counterclockwise
+ * @hflip: horizontal flip prior to rotation
+ * @vflip: vertical flip prior to rotation
+ * @secure: true if buffer is in secure domain
+ * @prefill_bw: prefill bandwidth in Bps
+ * @clkrate: clock rate in Hz
+ * @data_bw: data bus bandwidth in Bps
+ * @src_addr: source i/o buffer virtual address
+ * @src_len: source i/o buffer length
+ * @src_planes: source plane number
+ * @src_pixfmt: v4l2 fourcc pixel format of source buffer
+ * @src_width: width of source buffer
+ * @src_height: height of source buffer
+ * @src_rect_x: roi x coordinate of source buffer
+ * @src_rect_y: roi y coordinate of source buffer
+ * @src_rect_w: roi width of source buffer
+ * @src_rect_h: roi height of source buffer
+ * @dst_addr: destination i/o virtual buffer address
+ * @dst_len: destination i/o buffer length
+ * @dst_planes: destination plane number
+ * @dst_pixfmt: v4l2 fourcc pixel format of destination buffer
+ * @dst_rect_x: roi x coordinate of destination buffer
+ * @dst_rect_y: roi y coordinate of destination buffer
+ * @dst_rect_w: roi width of destination buffer
+ * @dst_rect_h: roi height of destination buffer
+ * @dst_writeback: true if cache writeback is required
+ * @priv_handle: private handle of rotator session
+ */
+struct sde_rotator_inline_cmd {
+	u32 sequence_id;
+	bool video_mode;
+	u32 fps;
+	bool rot90;
+	bool hflip;
+	bool vflip;
+	bool secure;
+	u64 prefill_bw;
+	u64 clkrate;
+	u64 data_bw;
+	dma_addr_t src_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 src_len[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 src_planes;
+	u32 src_pixfmt;
+	u32 src_width;
+	u32 src_height;
+	u32 src_rect_x;
+	u32 src_rect_y;
+	u32 src_rect_w;
+	u32 src_rect_h;
+	dma_addr_t dst_addr[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 dst_len[SDE_ROTATOR_INLINE_PLANE_MAX];
+	u32 dst_planes;
+	u32 dst_pixfmt;
+	u32 dst_rect_x;
+	u32 dst_rect_y;
+	u32 dst_rect_w;
+	u32 dst_rect_h;
+	bool dst_writeback;
+	void *priv_handle;
+};
+
+void *sde_rotator_inline_open(struct platform_device *pdev);
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt);
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *downscale_caps, int len);
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmt, int len);
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type);
+int sde_rotator_inline_release(void *handle);
+
+#endif /* __SDE_ROTATOR_INLINE_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
index 2fb4669..9f4a854 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
@@ -144,7 +144,7 @@
 	struct sde_mdp_hw_resource *mdp_hw;
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	int pipe_ndx, offset = ctl_id;
-	int ret;
+	int ret = 0;
 
 	mdp_hw = devm_kzalloc(&mgr->pdev->dev,
 			sizeof(struct sde_mdp_hw_resource), GFP_KERNEL);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
index fef4a85..74f3628 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@
 
 int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe)
 {
-	u32 flush_bits;
+	u32 flush_bits = 0;
 
 	if (pipe->type == SDE_MDP_PIPE_TYPE_DMA)
 		flush_bits |= BIT(pipe->num) << 5;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 0512083..4278b6d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -41,17 +41,23 @@
 /* traffic shaping clock ticks = finish_time x 19.2MHz */
 #define TRAFFIC_SHAPE_CLKTICK_14MS   268800
 #define TRAFFIC_SHAPE_CLKTICK_12MS   230400
+#define TRAFFIC_SHAPE_VSYNC_CLK      19200000
 
 /* XIN mapping */
 #define XIN_SSPP		0
 #define XIN_WRITEBACK		1
 
 /* wait for at most 2 vsync for lowest refresh rate (24hz) */
-#define KOFF_TIMEOUT msecs_to_jiffies(42 * 32)
+#define KOFF_TIMEOUT		(42 * 32)
+
+/* default stream buffer headroom in lines */
+#define DEFAULT_SBUF_HEADROOM	20
 
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
 	do { \
+		SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
+				(u32)(data));\
 		*p++ = REGDMA_OP_REGWRITE | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (data); \
@@ -59,6 +65,8 @@
 
 #define SDE_REGDMA_MODIFY(p, off, mask, data) \
 	do { \
+		SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
+				(u32)(data));\
 		*p++ = REGDMA_OP_REGMODIFY | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (mask); \
@@ -67,6 +75,8 @@
 
 #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
 	do { \
+		SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
+				(u32)(len));\
 		*p++ = REGDMA_OP_BLKWRITE_INC | \
 			((off) & REGDMA_ADDR_OFFSET_MASK); \
 		*p++ = (len); \
@@ -74,18 +84,23 @@
 
 #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
 	do { \
+		SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
 		*(p) = (data); \
 		(p)++; \
 	} while (0)
 
 /* Macro for directly accessing mapped registers */
 #define SDE_ROTREG_WRITE(base, off, data) \
-	writel_relaxed(data, (base + (off)))
+	do { \
+		SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
+				, (u32)(data));\
+		writel_relaxed(data, (base + (off))); \
+	} while (0)
 
 #define SDE_ROTREG_READ(base, off) \
 	readl_relaxed(base + (off))
 
-static u32 sde_hw_rotator_input_pixfmts[] = {
+static u32 sde_hw_rotator_v3_inpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
 	SDE_PIX_FMT_ABGR_8888,
@@ -145,7 +160,7 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
 };
 
-static u32 sde_hw_rotator_output_pixfmts[] = {
+static u32 sde_hw_rotator_v3_outpixfmts[] = {
 	SDE_PIX_FMT_XRGB_8888,
 	SDE_PIX_FMT_ARGB_8888,
 	SDE_PIX_FMT_ABGR_8888,
@@ -205,6 +220,162 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
 };
 
+static u32 sde_hw_rotator_v4_inpixfmts[] = {
+	SDE_PIX_FMT_XRGB_8888,
+	SDE_PIX_FMT_ARGB_8888,
+	SDE_PIX_FMT_ABGR_8888,
+	SDE_PIX_FMT_RGBA_8888,
+	SDE_PIX_FMT_BGRA_8888,
+	SDE_PIX_FMT_RGBX_8888,
+	SDE_PIX_FMT_BGRX_8888,
+	SDE_PIX_FMT_XBGR_8888,
+	SDE_PIX_FMT_RGBA_5551,
+	SDE_PIX_FMT_ARGB_1555,
+	SDE_PIX_FMT_ABGR_1555,
+	SDE_PIX_FMT_BGRA_5551,
+	SDE_PIX_FMT_BGRX_5551,
+	SDE_PIX_FMT_RGBX_5551,
+	SDE_PIX_FMT_XBGR_1555,
+	SDE_PIX_FMT_XRGB_1555,
+	SDE_PIX_FMT_ARGB_4444,
+	SDE_PIX_FMT_RGBA_4444,
+	SDE_PIX_FMT_BGRA_4444,
+	SDE_PIX_FMT_ABGR_4444,
+	SDE_PIX_FMT_RGBX_4444,
+	SDE_PIX_FMT_XRGB_4444,
+	SDE_PIX_FMT_BGRX_4444,
+	SDE_PIX_FMT_XBGR_4444,
+	SDE_PIX_FMT_RGB_888,
+	SDE_PIX_FMT_BGR_888,
+	SDE_PIX_FMT_RGB_565,
+	SDE_PIX_FMT_BGR_565,
+	SDE_PIX_FMT_Y_CB_CR_H2V2,
+	SDE_PIX_FMT_Y_CR_CB_H2V2,
+	SDE_PIX_FMT_Y_CR_CB_GH2V2,
+	SDE_PIX_FMT_Y_CBCR_H2V2,
+	SDE_PIX_FMT_Y_CRCB_H2V2,
+	SDE_PIX_FMT_Y_CBCR_H1V2,
+	SDE_PIX_FMT_Y_CRCB_H1V2,
+	SDE_PIX_FMT_Y_CBCR_H2V1,
+	SDE_PIX_FMT_Y_CRCB_H2V1,
+	SDE_PIX_FMT_YCBYCR_H2V1,
+	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+	SDE_PIX_FMT_RGBA_8888_UBWC,
+	SDE_PIX_FMT_RGBX_8888_UBWC,
+	SDE_PIX_FMT_RGB_565_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+	SDE_PIX_FMT_RGBA_1010102,
+	SDE_PIX_FMT_RGBX_1010102,
+	SDE_PIX_FMT_ARGB_2101010,
+	SDE_PIX_FMT_XRGB_2101010,
+	SDE_PIX_FMT_BGRA_1010102,
+	SDE_PIX_FMT_BGRX_1010102,
+	SDE_PIX_FMT_ABGR_2101010,
+	SDE_PIX_FMT_XBGR_2101010,
+	SDE_PIX_FMT_RGBA_1010102_UBWC,
+	SDE_PIX_FMT_RGBX_1010102_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+	SDE_PIX_FMT_XRGB_8888_TILE,
+	SDE_PIX_FMT_ARGB_8888_TILE,
+	SDE_PIX_FMT_ABGR_8888_TILE,
+	SDE_PIX_FMT_XBGR_8888_TILE,
+	SDE_PIX_FMT_RGBA_8888_TILE,
+	SDE_PIX_FMT_BGRA_8888_TILE,
+	SDE_PIX_FMT_RGBX_8888_TILE,
+	SDE_PIX_FMT_BGRX_8888_TILE,
+	SDE_PIX_FMT_RGBA_1010102_TILE,
+	SDE_PIX_FMT_RGBX_1010102_TILE,
+	SDE_PIX_FMT_ARGB_2101010_TILE,
+	SDE_PIX_FMT_XRGB_2101010_TILE,
+	SDE_PIX_FMT_BGRA_1010102_TILE,
+	SDE_PIX_FMT_BGRX_1010102_TILE,
+	SDE_PIX_FMT_ABGR_2101010_TILE,
+	SDE_PIX_FMT_XBGR_2101010_TILE,
+};
+
+static u32 sde_hw_rotator_v4_outpixfmts[] = {
+	SDE_PIX_FMT_XRGB_8888,
+	SDE_PIX_FMT_ARGB_8888,
+	SDE_PIX_FMT_ABGR_8888,
+	SDE_PIX_FMT_RGBA_8888,
+	SDE_PIX_FMT_BGRA_8888,
+	SDE_PIX_FMT_RGBX_8888,
+	SDE_PIX_FMT_BGRX_8888,
+	SDE_PIX_FMT_XBGR_8888,
+	SDE_PIX_FMT_RGBA_5551,
+	SDE_PIX_FMT_ARGB_1555,
+	SDE_PIX_FMT_ABGR_1555,
+	SDE_PIX_FMT_BGRA_5551,
+	SDE_PIX_FMT_BGRX_5551,
+	SDE_PIX_FMT_RGBX_5551,
+	SDE_PIX_FMT_XBGR_1555,
+	SDE_PIX_FMT_XRGB_1555,
+	SDE_PIX_FMT_ARGB_4444,
+	SDE_PIX_FMT_RGBA_4444,
+	SDE_PIX_FMT_BGRA_4444,
+	SDE_PIX_FMT_ABGR_4444,
+	SDE_PIX_FMT_RGBX_4444,
+	SDE_PIX_FMT_XRGB_4444,
+	SDE_PIX_FMT_BGRX_4444,
+	SDE_PIX_FMT_XBGR_4444,
+	SDE_PIX_FMT_RGB_888,
+	SDE_PIX_FMT_BGR_888,
+	SDE_PIX_FMT_RGB_565,
+	SDE_PIX_FMT_BGR_565,
+	/* SDE_PIX_FMT_Y_CB_CR_H2V2 */
+	/* SDE_PIX_FMT_Y_CR_CB_H2V2 */
+	/* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
+	SDE_PIX_FMT_Y_CBCR_H2V2,
+	SDE_PIX_FMT_Y_CRCB_H2V2,
+	SDE_PIX_FMT_Y_CBCR_H1V2,
+	SDE_PIX_FMT_Y_CRCB_H1V2,
+	SDE_PIX_FMT_Y_CBCR_H2V1,
+	SDE_PIX_FMT_Y_CRCB_H2V1,
+	/* SDE_PIX_FMT_YCBYCR_H2V1 */
+	SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+	SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+	SDE_PIX_FMT_RGBA_8888_UBWC,
+	SDE_PIX_FMT_RGBX_8888_UBWC,
+	SDE_PIX_FMT_RGB_565_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+	SDE_PIX_FMT_RGBA_1010102,
+	SDE_PIX_FMT_RGBX_1010102,
+	/* SDE_PIX_FMT_ARGB_2101010 */
+	/* SDE_PIX_FMT_XRGB_2101010 */
+	SDE_PIX_FMT_BGRA_1010102,
+	SDE_PIX_FMT_BGRX_1010102,
+	/* SDE_PIX_FMT_ABGR_2101010 */
+	/* SDE_PIX_FMT_XBGR_2101010 */
+	SDE_PIX_FMT_RGBA_1010102_UBWC,
+	SDE_PIX_FMT_RGBX_1010102_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
+	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
+	SDE_PIX_FMT_XRGB_8888_TILE,
+	SDE_PIX_FMT_ARGB_8888_TILE,
+	SDE_PIX_FMT_ABGR_8888_TILE,
+	SDE_PIX_FMT_XBGR_8888_TILE,
+	SDE_PIX_FMT_RGBA_8888_TILE,
+	SDE_PIX_FMT_BGRA_8888_TILE,
+	SDE_PIX_FMT_RGBX_8888_TILE,
+	SDE_PIX_FMT_BGRX_8888_TILE,
+	SDE_PIX_FMT_RGBA_1010102_TILE,
+	SDE_PIX_FMT_RGBX_1010102_TILE,
+	SDE_PIX_FMT_ARGB_2101010_TILE,
+	SDE_PIX_FMT_XRGB_2101010_TILE,
+	SDE_PIX_FMT_BGRA_1010102_TILE,
+	SDE_PIX_FMT_BGRX_1010102_TILE,
+	SDE_PIX_FMT_ABGR_2101010_TILE,
+	SDE_PIX_FMT_XBGR_2101010_TILE,
+};
+
 static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
 	{0x214, 0x21c, 16, 1, 0x10}, /* arb clients */
 	{0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
@@ -284,6 +455,30 @@
 }
 
 /**
+ * sde_hw_rotator_update_swts - update software timestamp with given value
+ * @rot: Pointer to hw rotator
+ * @ctx: Pointer to rotator contxt
+ * @swts: new software timestamp
+ * @return: new combined swts
+ */
+static u32 sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
+		struct sde_hw_rotator_context *ctx, u32 swts)
+{
+	u32 mask = SDE_REGDMA_SWTS_MASK;
+
+	swts &= SDE_REGDMA_SWTS_MASK;
+	if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY) {
+		swts <<= SDE_REGDMA_SWTS_SHIFT;
+		mask <<= SDE_REGDMA_SWTS_SHIFT;
+	}
+
+	swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
+	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
+
+	return swts;
+}
+
+/**
  * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
  *				Also, clear rotator/regdma irq status.
  * @rot: Pointer to hw rotator
@@ -376,6 +571,13 @@
 	SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
 		SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
 		SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
+
+	SDEROT_ERR(
+		"sbuf_status_plane0 = %x, sbuf_status_plane1 = %x\n",
+		SDE_ROTREG_READ(rot->mdss_base,
+			ROT_WB_SBUF_STATUS_PLANE0),
+		SDE_ROTREG_READ(rot->mdss_base,
+			ROT_WB_SBUF_STATUS_PLANE1));
 }
 
 /**
@@ -540,6 +742,17 @@
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
+	/*
+	 * initialize start control trigger selection first
+	 */
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		if (ctx->sbuf_mode)
+			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
+					ctx->start_ctrl);
+		else
+			SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
+	}
+
 	/* source image setup */
 	if ((flags & SDE_ROT_FLAG_DEINTERLACE)
 			&& !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
@@ -618,6 +831,9 @@
 	if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
 		src_format |= BIT(14); /* UNPACK_DX_FORMAT */
 
+	if (rot->solid_fill)
+		src_format |= BIT(22); /* SOLID_FILL */
+
 	/* SRC_FORMAT */
 	SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
 
@@ -652,6 +868,10 @@
 			fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
 	}
 
+	if (rot->solid_fill)
+		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
+				rot->constant_color);
+
 	SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
 			fetch_blocksize |
 			SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
@@ -697,6 +917,7 @@
 		struct sde_hw_rot_wb_cfg *cfg,
 		u32 flags)
 {
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	struct sde_mdp_format_params *fmt;
 	u32 *wrptr;
 	u32 pack = 0;
@@ -784,17 +1005,19 @@
 			cfg->v_downscale_factor |
 			(cfg->h_downscale_factor << 16));
 
-	/* write config setup for bank configration */
+	/* write config setup for bank configuration */
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
 			(ctx->rot->highest_bank & 0x3) << 8);
 
-	if (flags & SDE_ROT_FLAG_ROT_90)
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x3);
-	else
-		SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 0x1);
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
+				ctx->sys_cache_mode);
 
-	/* setup traffic shaper for 4k 30fps content */
-	if (ctx->is_traffic_shaping) {
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
+			(flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
+
+	/* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
+	if (ctx->is_traffic_shaping || cfg->prefill_bw) {
 		u32 bw;
 
 		/*
@@ -813,10 +1036,16 @@
 			bw *= fmt->bpp;
 
 		bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
+
+		/* use prefill bandwidth instead if specified */
+		if (cfg->prefill_bw)
+			bw = DIV_ROUND_UP(cfg->prefill_bw,
+					TRAFFIC_SHAPE_VSYNC_CLK);
+
 		if (bw > 0xFF)
 			bw = 0xFF;
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
-				BIT(31) | bw);
+				BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
 		SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
 	} else {
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
@@ -852,7 +1081,7 @@
 		sde_hw_rotator_enable_irq(rot);
 	}
 
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
 
 	/* Update command queue write ptr */
 	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
@@ -942,13 +1171,15 @@
 	u32  enableInt;
 	u32  swts = 0;
 	u32  mask = 0;
+	u32  trig_sel;
 
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
 	/*
 	 * Last ROT command must be ROT_START before REGDMA start
 	 */
-	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
+	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
+
 	sde_hw_rotator_put_regdma_segment(ctx, wrptr);
 
 	/*
@@ -959,6 +1190,8 @@
 	offset = (u32)(ctx->regdma_base - (u32 *)(rot->mdss_base +
 				REGDMA_RAM_REGDMA_CMD_RAM));
 	enableInt = ((ctx->timestamp & 1) + 1) << 30;
+	trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
+			REGDMA_CMD_TRIG_SEL_SW_START;
 
 	SDEROT_DBG(
 		"regdma(%d)[%d] <== INT:0x%X|length:%ld|offset:0x%X, ts:%X\n",
@@ -972,34 +1205,39 @@
 	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
 		SDE_ROTREG_WRITE(rot->mdss_base,
 				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				(length << 14) | offset);
+				(ctx->sbuf_mode ? enableInt : 0) | trig_sel |
+				((length & 0x3ff) << 14) | offset);
 		swts = ctx->timestamp;
 		mask = ~SDE_REGDMA_SWTS_MASK;
 	} else {
 		SDE_ROTREG_WRITE(rot->mdss_base,
 				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				(length << 14) | offset);
+				(ctx->sbuf_mode ? enableInt : 0) | trig_sel |
+				((length & 0x3ff) << 14) | offset);
 		swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
 		mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
 	}
 
-	/* Write timestamp after previous rotator job finished */
-	sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
-	offset += length;
-	ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
-	WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
+	/* timestamp update can only be used in offline multi-context mode */
+	if (!ctx->sbuf_mode) {
+		/* Write timestamp after previous rotator job finished */
+		sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
+		offset += length;
+		ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
+		WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
 
-	/* ensure command packet is issue before the submit command */
-	wmb();
+		/* ensure command packet is issue before the submit command */
+		wmb();
 
-	if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
-				enableInt | (ts_length << 14) | offset);
-	} else {
-		SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
-				enableInt | (ts_length << 14) | offset);
+		if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
+			SDE_ROTREG_WRITE(rot->mdss_base,
+					REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
+					enableInt | (ts_length << 14) | offset);
+		} else {
+			SDE_ROTREG_WRITE(rot->mdss_base,
+					REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
+					enableInt | (ts_length << 14) | offset);
+		}
 	}
 
 	/* Update command queue write ptr */
@@ -1027,7 +1265,7 @@
 	if (rot->irq_num >= 0) {
 		SDEROT_DBG("Wait for Rotator completion\n");
 		rc = wait_for_completion_timeout(&ctx->rot_comp,
-					KOFF_TIMEOUT);
+				msecs_to_jiffies(rot->koff_timeout));
 
 		spin_lock_irqsave(&rot->rotisr_lock, flags);
 		status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
@@ -1098,7 +1336,7 @@
 				ctx, ctx->timestamp);
 		rc = wait_event_timeout(ctx->regdma_waitq,
 				!sde_hw_rotator_pending_swts(rot, ctx, &swts),
-				KOFF_TIMEOUT);
+				msecs_to_jiffies(rot->koff_timeout));
 
 		ATRACE_INT("sde_rot_done", 0);
 		spin_lock_irqsave(&rot->rotisr_lock, flags);
@@ -1506,13 +1744,15 @@
  * @rot: Pointer to rotator hw
  * @hw: Pointer to rotator resource
  * @session_id: Session identifier of this context
+ * @sbuf_mode: true if stream buffer is requested
  *
  * This function allocates a new rotator context for the given session id.
  */
 static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
 		struct sde_hw_rotator *rot,
 		struct sde_rot_hw_resource *hw,
-		u32    session_id)
+		u32    session_id,
+		bool   sbuf_mode)
 {
 	struct sde_hw_rotator_context *ctx;
 
@@ -1530,6 +1770,8 @@
 	ctx->timestamp  = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
 	ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
 	ctx->is_secure  = false;
+	ctx->sbuf_mode  = sbuf_mode;
+	INIT_LIST_HEAD(&ctx->list);
 
 	ctx->regdma_base  = rot->cmd_wr_ptr[ctx->q_id]
 		[sde_hw_rotator_get_regdma_ctxidx(ctx)];
@@ -1547,10 +1789,11 @@
 	sde_hw_rotator_put_ctx(ctx);
 
 	SDEROT_DBG(
-		"New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
+		"New rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
 		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
 		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active));
+		atomic_read(&ctx->hwres->num_active),
+		ctx->sbuf_mode);
 
 	return ctx;
 }
@@ -1567,10 +1810,11 @@
 		return;
 
 	SDEROT_DBG(
-		"Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d\n",
+		"Free rot CTX:%p, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
 		ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
 		ctx->q_id, ctx->timestamp,
-		atomic_read(&ctx->hwres->num_active));
+		atomic_read(&ctx->hwres->num_active),
+		ctx->sbuf_mode);
 
 	/* Clear rotator context from lookup purpose */
 	sde_hw_rotator_clr_ctx(ctx);
@@ -1599,6 +1843,7 @@
 	u32 safe_lut = 0;	/* applicable for realtime client only */
 	u32 flags = 0;
 	struct sde_rotation_item *item;
+	int ret;
 
 	if (!hw || !entry) {
 		SDEROT_ERR("null hw resource/entry\n");
@@ -1609,12 +1854,65 @@
 	rot = resinfo->rot;
 	item = &entry->item;
 
-	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id);
+	ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
+			item->output.sbuf);
 	if (!ctx) {
 		SDEROT_ERR("Failed allocating rotator context!!\n");
 		return -EINVAL;
 	}
 
+	/* save entry for debugging purposes */
+	ctx->last_entry = entry;
+
+	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		if (entry->dst_buf.sbuf) {
+			u32 op_mode;
+
+			if (entry->item.trigger ==
+					SDE_ROTATOR_TRIGGER_COMMAND)
+				ctx->start_ctrl = (rot->cmd_trigger << 4);
+			else if (entry->item.trigger ==
+					SDE_ROTATOR_TRIGGER_VIDEO)
+				ctx->start_ctrl = (rot->vid_trigger << 4);
+			else
+				ctx->start_ctrl = 0;
+
+			ctx->sys_cache_mode = BIT(15) |
+					((item->output.scid & 0x1f) << 8) |
+					(item->output.writeback ? 0x5 : 0);
+
+			ctx->op_mode = BIT(4) |
+				((ctx->rot->sbuf_headroom & 0xff) << 8);
+
+			/* detect transition to inline mode */
+			op_mode = (SDE_ROTREG_READ(rot->mdss_base,
+					ROTTOP_OP_MODE) >> 4) & 0x3;
+			if (!op_mode) {
+				u32 status;
+
+				status = SDE_ROTREG_READ(rot->mdss_base,
+						ROTTOP_STATUS);
+				if (status & BIT(0)) {
+					SDEROT_ERR("rotator busy 0x%x\n",
+							status);
+					sde_hw_rotator_dump_status(rot);
+					SDEROT_EVTLOG_TOUT_HANDLER("rot",
+							"vbif_dbg_bus",
+							"panic");
+				}
+			}
+
+		} else {
+			ctx->start_ctrl = BIT(0);
+			ctx->sys_cache_mode = 0;
+			ctx->op_mode = 0;
+		}
+	} else  {
+		ctx->start_ctrl = BIT(0);
+	}
+
+	SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
+
 	if (rot->reset_hw_ts) {
 		SDEROT_EVTLOG(rot->last_hw_ts);
 		SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
@@ -1645,7 +1943,8 @@
 	sspp_cfg.fmt = sde_get_format_params(item->input.format);
 	if (!sspp_cfg.fmt) {
 		SDEROT_ERR("null format\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto error;
 	}
 	sspp_cfg.src_rect = &item->src_rect;
 	sspp_cfg.data = &entry->src_buf;
@@ -1673,6 +1972,7 @@
 
 	wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
 	wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
+	wb_cfg.prefill_bw = item->prefill_bw;
 
 	rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
 
@@ -1778,6 +2078,10 @@
 			BIT(XIN_WRITEBACK));
 
 	return 0;
+
+error:
+	sde_hw_rotator_free_rotctx(rot, ctx);
+	return ret;
 }
 
 /*
@@ -1887,6 +2191,7 @@
 
 	set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
 
+	/* features exposed via rotator top h/w version */
 	if (hw_version != SDE_ROT_TYPE_V1_0) {
 		SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
 		set_bit(SDE_CAPS_R3_1P5_DOWNSCALE,  mdata->sde_caps_map);
@@ -1901,6 +2206,28 @@
 	mdata->regdump = sde_rot_r3_regdump;
 	mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
 	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
+
+	/* features exposed via mdss h/w version */
+	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
+		SDEROT_DBG("Supporting sys cache inline rotation\n");
+		set_bit(SDE_CAPS_MIN_BUS_VOTE,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
+		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
+		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
+		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
+		rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
+		rot->downscale_caps =
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	} else {
+		rot->inpixfmts = sde_hw_rotator_v3_inpixfmts;
+		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
+		rot->outpixfmts = sde_hw_rotator_v3_outpixfmts;
+		rot->num_outpixfmt = ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
+		rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
+			"LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
+			"LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
+	}
+
 	return 0;
 }
 
@@ -1989,6 +2316,23 @@
 			SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
 			goto done_isr_handle;
 		}
+
+		/*
+		 * Timestamp packet is not available in sbuf mode.
+		 * Simulate timestamp update in the handler instead.
+		 */
+		if (!list_empty(&rot->sbuf_ctx[q_id])) {
+			ctx = list_first_entry_or_null(&rot->sbuf_ctx[q_id],
+					struct sde_hw_rotator_context, list);
+			if (ctx) {
+				ts = ctx->timestamp;
+				sde_hw_rotator_update_swts(rot, ctx, ts);
+				SDEROT_DBG("update swts:0x%X\n", ts);
+			} else {
+				SDEROT_ERR("invalid swts ctx\n");
+			}
+		}
+
 		ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
 
 		/*
@@ -2076,6 +2420,12 @@
 	entry->dnsc_factor_w = 0;
 	entry->dnsc_factor_h = 0;
 
+	if (item->output.sbuf &&
+			!test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
+		SDEROT_ERR("stream buffer not supported\n");
+		return -EINVAL;
+	}
+
 	if ((src_w != dst_w) || (src_h != dst_h)) {
 		if ((src_w % dst_w) || (src_h % dst_h)) {
 			SDEROT_DBG("non integral scale not support\n");
@@ -2183,6 +2533,9 @@
 
 	SPRINT("downscale_compression=1\n");
 
+	if (hw_data->downscale_caps)
+		SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
+
 #undef SPRINT
 	return cnt;
 }
@@ -2253,14 +2606,23 @@
 static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
 		int index, bool input)
 {
+	struct sde_hw_rotator *rot;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return 0;
+	}
+
+	rot = mgr->hw_data;
+
 	if (input) {
-		if (index < ARRAY_SIZE(sde_hw_rotator_input_pixfmts))
-			return sde_hw_rotator_input_pixfmts[index];
+		if ((index < rot->num_inpixfmt) && rot->inpixfmts)
+			return rot->inpixfmts[index];
 		else
 			return 0;
 	} else {
-		if (index < ARRAY_SIZE(sde_hw_rotator_output_pixfmts))
-			return sde_hw_rotator_output_pixfmts[index];
+		if ((index < rot->num_outpixfmt) && rot->outpixfmts)
+			return rot->outpixfmts[index];
 		else
 			return 0;
 	}
@@ -2275,22 +2637,69 @@
 static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
 		bool input)
 {
+	struct sde_hw_rotator *rot;
+	u32 *pixfmts;
+	u32 num_pixfmt;
 	int i;
 
-	if (input) {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_input_pixfmts); i++)
-			if (sde_hw_rotator_input_pixfmts[i] == pixfmt)
-				return true;
-	} else {
-		for (i = 0; i < ARRAY_SIZE(sde_hw_rotator_output_pixfmts); i++)
-			if (sde_hw_rotator_output_pixfmts[i] == pixfmt)
-				return true;
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return false;
 	}
 
+	rot = mgr->hw_data;
+
+	if (input) {
+		pixfmts = rot->inpixfmts;
+		num_pixfmt = rot->num_inpixfmt;
+	} else {
+		pixfmts = rot->outpixfmts;
+		num_pixfmt = rot->num_outpixfmt;
+	}
+
+	if (!pixfmts || !num_pixfmt) {
+		SDEROT_ERR("invalid pixel format tables\n");
+		return false;
+	}
+
+	for (i = 0; i < num_pixfmt; i++)
+		if (pixfmts[i] == pixfmt)
+			return true;
+
 	return false;
 }
 
 /*
+ * sde_hw_rotator_get_downscale_caps - get scaling capability string
+ * @mgr: Pointer to rotator manager
+ * @caps: Pointer to capability string buffer; NULL to return maximum length
+ * @len: length of capability string buffer
+ * return: length of capability string
+ */
+static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
+		char *caps, int len)
+{
+	struct sde_hw_rotator *rot;
+	int rc = 0;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return -EINVAL;
+	}
+
+	rot = mgr->hw_data;
+
+	if (rot->downscale_caps) {
+		if (caps)
+			rc = snprintf(caps, len, "%s", rot->downscale_caps);
+		else
+			rc = strlen(rot->downscale_caps);
+	}
+
+	return rc;
+}
+
+/*
  * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  * @hw_data: Pointer to rotator hw
  * @dev: Pointer to platform device
@@ -2329,6 +2738,16 @@
 		hw_data->highest_bank = data;
 	}
 
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,mdss-sbuf-headroom", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
+	} else {
+		SDEROT_DBG("set sbuf headroom to %d\n", data);
+		hw_data->sbuf_headroom = data;
+	}
+
 	return ret;
 }
 
@@ -2356,6 +2775,9 @@
 
 	rot->mdss_base = mdata->sde_io.base;
 	rot->pdev      = mgr->pdev;
+	rot->koff_timeout = KOFF_TIMEOUT;
+	rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
+	rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
 
 	/* Assign ops */
 	mgr->ops_hw_destroy = sde_hw_rotator_destroy;
@@ -2372,6 +2794,7 @@
 	mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
 	mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
 	mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
+	mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
 
 	ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
 	if (ret)
@@ -2425,8 +2848,10 @@
 					(i + SDE_HW_ROT_REGDMA_TOTAL_CTX));
 	}
 
-	atomic_set(&rot->timestamp[0], 0);
-	atomic_set(&rot->timestamp[1], 0);
+	for (i = 0; i < ROT_QUEUE_MAX; i++) {
+		atomic_set(&rot->timestamp[i], 0);
+		INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
+	}
 
 	ret = sde_rotator_hw_rev_init(rot);
 	if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
index 987e61c..da67527 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -44,5 +44,41 @@
 		return -EINVAL;
 	}
 
+	if (!debugfs_create_u32("koff_timeout", 0644,
+			debugfs_root, &hw_data->koff_timeout)) {
+		SDEROT_ERR("fail create koff_timeout\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("vid_trigger", 0644,
+			debugfs_root, &hw_data->vid_trigger)) {
+		SDEROT_ERR("fail create vid_trigger\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("cmd_trigger", 0644,
+			debugfs_root, &hw_data->cmd_trigger)) {
+		SDEROT_ERR("fail create cmd_trigger\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("sbuf_headroom", 0644,
+			debugfs_root, &hw_data->sbuf_headroom)) {
+		SDEROT_ERR("fail create sbuf_headroom\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("solid_fill", 0644,
+			debugfs_root, &hw_data->solid_fill)) {
+		SDEROT_ERR("fail create solid_fill\n");
+		return -EINVAL;
+	}
+
+	if (!debugfs_create_u32("constant_color", 0644,
+			debugfs_root, &hw_data->constant_color)) {
+		SDEROT_ERR("fail create constant_color\n");
+		return -EINVAL;
+	}
+
 	return 0;
 }
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index fedade1..f86f54b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -45,6 +45,11 @@
 #define ROTTOP_ROT_UBWC_DEC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x58)
 #define ROTTOP_ROT_UBWC_ENC_VERSION             (SDE_ROT_ROTTOP_OFFSET+0x5C)
 
+#define ROTTOP_START_CTRL_TRIG_SEL_SW           0
+#define ROTTOP_START_CTRL_TRIG_SEL_DONE         1
+#define ROTTOP_START_CTRL_TRIG_SEL_REGDMA       2
+#define ROTTOP_START_CTRL_TRIG_SEL_MDP          3
+
 /* SDE_ROT_SSPP:
  * OFFSET=0x0A8900
  */
@@ -160,6 +165,10 @@
 #define ROT_WB_SAFE_LUT                         (SDE_ROT_WB_OFFSET+0x088)
 #define ROT_WB_CREQ_LUT                         (SDE_ROT_WB_OFFSET+0x08C)
 #define ROT_WB_QOS_CTRL                         (SDE_ROT_WB_OFFSET+0x090)
+#define ROT_WB_SYS_CACHE_MODE                   (SDE_ROT_WB_OFFSET+0x094)
+#define ROT_WB_UBWC_STATIC_CTRL                 (SDE_ROT_WB_OFFSET+0x144)
+#define ROT_WB_SBUF_STATUS_PLANE0               (SDE_ROT_WB_OFFSET+0x148)
+#define ROT_WB_SBUF_STATUS_PLANE1               (SDE_ROT_WB_OFFSET+0x14C)
 #define ROT_WB_CSC_MATRIX_COEFF_0               (SDE_ROT_WB_OFFSET+0x260)
 #define ROT_WB_CSC_MATRIX_COEFF_1               (SDE_ROT_WB_OFFSET+0x264)
 #define ROT_WB_CSC_MATRIX_COEFF_2               (SDE_ROT_WB_OFFSET+0x268)
@@ -251,6 +260,10 @@
 /* REGDMA ADDR offset Mask */
 #define REGDMA_ADDR_OFFSET_MASK         0xFFFFF
 
+/* REGDMA command trigger select */
+#define REGDMA_CMD_TRIG_SEL_SW_START    (0 << 27)
+#define REGDMA_CMD_TRIG_SEL_MDP_FLUSH   (1 << 27)
+
 /* General defines */
 #define ROT_DONE_MASK                   0x1
 #define ROT_DONE_CLEAR                  0x1
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index 5502cc0..c011d7a 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -85,6 +85,7 @@
  *  @dest_rect: dest ROI, caller takes into account the different operations
  *              such as decimation, flip etc to program this field
  *  @addr:      destination surface address
+ *  @prefill_bw: prefill bandwidth in Bps
  */
 struct sde_hw_rot_wb_cfg {
 	struct sde_mdp_format_params   *fmt;
@@ -97,6 +98,7 @@
 	u32                             h_downscale_factor;
 	u32                             fps;
 	u64                             bw;
+	u64                             prefill_bw;
 };
 
 
@@ -200,9 +202,16 @@
  * struct sde_hw_rotator_context : Each rotator context ties to each priority
  * queue. Max number of concurrent contexts in regdma is limited to regdma
  * ram segment size allocation. Each rotator context can be any priority. A
- * incrementatl timestamp is used to identify and assigne to each context.
+ * incremental timestamp is used to identify and assigned to each context.
+ * @list: list of pending context
+ * @sbuf_mode: true if stream buffer is requested
+ * @start_ctrl: start control register update value
+ * @sys_cache_mode: sys cache mode register update value
+ * @op_mode: rot top op mode selection
+ * @last_entry: pointer to last configured entry (for debugging purposes)
  */
 struct sde_hw_rotator_context {
+	struct list_head list;
 	struct sde_hw_rotator *rot;
 	struct sde_rot_hw_resource *hwres;
 	enum   sde_rot_queue_prio q_id;
@@ -219,6 +228,11 @@
 	dma_addr_t ts_addr;
 	bool   is_secure;
 	bool   is_traffic_shaping;
+	bool   sbuf_mode;
+	u32    start_ctrl;
+	u32    sys_cache_mode;
+	u32    op_mode;
+	struct sde_rot_entry *last_entry;
 };
 
 /**
@@ -234,6 +248,17 @@
  * struct sde_hw_rotator : Rotator description
  * @hw:           mdp register mapped offset
  * @ops:          pointer to operations possible for the rotator HW
+ * @sbuf_headroom: stream buffer headroom in lines
+ * @solid_fill: true if solid fill is requested
+ * @constant_color: solid fill constant color
+ * @sbuf_ctx: list of active sbuf context in FIFO order
+ * @vid_trigger: video mode trigger select
+ * @cmd_trigger: command mode trigger select
+ * @inpixfmts: array of supported input pixel formats forucc
+ * @num_inpixfmt: size of the supported input pixel format array
+ * @outpixfmts: array of supported output pixel formats in fourcc
+ * @num_outpixfmt: size of the supported output pixel formats array
+ * @downscale_caps: capability string of scaling
  */
 struct sde_hw_rotator {
 	/* base */
@@ -271,6 +296,9 @@
 	void *swts_buffer;
 
 	u32    highest_bank;
+	u32    sbuf_headroom;
+	u32    solid_fill;
+	u32    constant_color;
 
 	spinlock_t rotctx_lock;
 	spinlock_t rotisr_lock;
@@ -278,6 +306,17 @@
 	bool    dbgmem;
 	bool reset_hw_ts;
 	u32 last_hw_ts;
+	u32 koff_timeout;
+	u32 vid_trigger;
+	u32 cmd_trigger;
+
+	struct list_head sbuf_ctx[ROT_QUEUE_MAX];
+
+	u32 *inpixfmts;
+	u32 num_inpixfmt;
+	u32 *outpixfmts;
+	u32 num_outpixfmt;
+	const char *downscale_caps;
 };
 
 /**
@@ -349,15 +388,17 @@
  */
 static inline void sde_hw_rotator_put_ctx(struct sde_hw_rotator_context *ctx)
 {
-	 struct sde_hw_rotator *rot = ctx->rot;
-	 u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	 unsigned long flags;
+	struct sde_hw_rotator *rot = ctx->rot;
+	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
+	unsigned long flags;
 
-	 spin_lock_irqsave(&rot->rotisr_lock, flags);
-	 rot->rotCtx[ctx->q_id][idx] = ctx;
-	 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+	rot->rotCtx[ctx->q_id][idx] = ctx;
+	if (ctx->sbuf_mode)
+		list_add_tail(&rot->sbuf_ctx[ctx->q_id], &ctx->list);
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
-	 SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
+	SDEROT_DBG("rotCtx[%d][%d] <== ctx:%p | session-id:%d\n",
 			 ctx->q_id, idx, ctx, ctx->session_id);
 }
 
@@ -367,15 +408,17 @@
  */
 static inline void sde_hw_rotator_clr_ctx(struct sde_hw_rotator_context *ctx)
 {
-	 struct sde_hw_rotator *rot = ctx->rot;
-	 u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
-	 unsigned long flags;
+	struct sde_hw_rotator *rot = ctx->rot;
+	u32 idx = sde_hw_rotator_get_regdma_ctxidx(ctx);
+	unsigned long flags;
 
-	 spin_lock_irqsave(&rot->rotisr_lock, flags);
-	 rot->rotCtx[ctx->q_id][idx] = NULL;
-	 spin_unlock_irqrestore(&rot->rotisr_lock, flags);
+	spin_lock_irqsave(&rot->rotisr_lock, flags);
+	rot->rotCtx[ctx->q_id][idx] = NULL;
+	if (ctx->sbuf_mode)
+		list_del_init(&ctx->list);
+	spin_unlock_irqrestore(&rot->rotisr_lock, flags);
 
-	 SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
+	SDEROT_DBG("rotCtx[%d][%d] <== null | session-id:%d\n",
 			 ctx->q_id, idx, ctx->session_id);
 }
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 20d578f..4cf9dfc 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -179,13 +179,13 @@
 	return 0;
 }
 
-static int sde_mdp_get_ubwc_plane_size(struct sde_mdp_format_params *fmt,
+static int sde_mdp_get_a5x_plane_size(struct sde_mdp_format_params *fmt,
 	u32 width, u32 height, struct sde_mdp_plane_sizes *ps)
 {
 	int rc = 0;
 
-	if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) {
-		ps->num_planes = 4;
+	if (sde_mdp_is_nv12_8b_format(fmt)) {
+		ps->num_planes = 2;
 		/* Y bitstream stride and plane size */
 		ps->ystride[0] = ALIGN(width, 128);
 		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 32),
@@ -196,6 +196,11 @@
 		ps->plane_size[1] = ALIGN(ps->ystride[1] *
 			ALIGN(height / 2, 32), 4096);
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
 		/* Y meta data stride and plane size */
 		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
 		ps->plane_size[2] = ALIGN(ps->ystride[2] *
@@ -205,13 +210,13 @@
 		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 			ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
-	} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) {
+	} else if (sde_mdp_is_tp10_format(fmt)) {
 		u32 yWidth   = sde_mdp_general_align(width, 192);
 		u32 yHeight  = ALIGN(height, 16);
 		u32 uvWidth  = sde_mdp_general_align(width, 192);
 		u32 uvHeight = ALIGN(height, 32);
 
-		ps->num_planes = 4;
+		ps->num_planes = 2;
 
 		/* Y bitstream stride and plane size */
 		ps->ystride[0]    = yWidth * TILEWIDTH_SIZE / Y_TILEWIDTH;
@@ -225,6 +230,11 @@
 				(uvHeight * TILEHEIGHT_SIZE / UV_TILEHEIGHT),
 				4096);
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
 		/* Y meta data stride and plane size */
 		ps->ystride[2]    = ALIGN(yWidth / Y_TILEWIDTH, 64);
 		ps->plane_size[2] = ALIGN(ps->ystride[2] *
@@ -234,11 +244,7 @@
 		ps->ystride[3]    = ALIGN(uvWidth / UV_TILEWIDTH, 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 				ALIGN((uvHeight / UV_TILEHEIGHT), 16), 4096);
-	} else if (fmt->format == SDE_PIX_FMT_RGBA_8888_UBWC ||
-		fmt->format == SDE_PIX_FMT_RGBX_8888_UBWC    ||
-		fmt->format == SDE_PIX_FMT_RGBA_1010102_UBWC ||
-		fmt->format == SDE_PIX_FMT_RGBX_1010102_UBWC ||
-		fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
+	} else if (sde_mdp_is_rgb_format(fmt)) {
 		uint32_t stride_alignment, bpp, aligned_bitstream_width;
 
 		if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
@@ -248,7 +254,8 @@
 			stride_alignment = 64;
 			bpp = 4;
 		}
-		ps->num_planes = 2;
+
+		ps->num_planes = 1;
 
 		/* RGB bitstream stride and plane size */
 		aligned_bitstream_width = ALIGN(width, stride_alignment);
@@ -256,6 +263,11 @@
 		ps->plane_size[0] = ALIGN(bpp * aligned_bitstream_width *
 			ALIGN(height, 16), 4096);
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 1;
+
 		/* RGB meta data stride and plane size */
 		ps->ystride[2] = ALIGN(DIV_ROUND_UP(aligned_bitstream_width,
 			16), 64);
@@ -266,7 +278,7 @@
 			__func__, fmt->format);
 		rc = -EINVAL;
 	}
-
+done:
 	return rc;
 }
 
@@ -285,8 +297,8 @@
 	bpp = fmt->bpp;
 	memset(ps, 0, sizeof(struct sde_mdp_plane_sizes));
 
-	if (sde_mdp_is_ubwc_format(fmt)) {
-		rc = sde_mdp_get_ubwc_plane_size(fmt, w, h, ps);
+	if (sde_mdp_is_tilea5x_format(fmt)) {
+		rc = sde_mdp_get_a5x_plane_size(fmt, w, h, ps);
 	} else if (bwc_mode) {
 		u32 height, meta_size;
 
@@ -394,7 +406,7 @@
 	return rc;
 }
 
-static int sde_mdp_ubwc_data_check(struct sde_mdp_data *data,
+static int sde_mdp_a5x_data_check(struct sde_mdp_data *data,
 			struct sde_mdp_plane_sizes *ps,
 			struct sde_mdp_format_params *fmt)
 {
@@ -416,8 +428,7 @@
 
 	base_addr = data->p[0].addr;
 
-	if ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) ||
-		(fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC)) {
+	if (sde_mdp_is_yuv_format(fmt)) {
 		/************************************************/
 		/*      UBWC            **                      */
 		/*      buffer          **      MDP PLANE       */
@@ -447,6 +458,9 @@
 			+ ps->plane_size[2] + ps->plane_size[3];
 		data->p[1].len = ps->plane_size[1];
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
 		/* configure Y metadata plane */
 		data->p[2].addr = base_addr;
 		data->p[2].len = ps->plane_size[2];
@@ -477,10 +491,14 @@
 		data->p[0].addr = base_addr + ps->plane_size[2];
 		data->p[0].len = ps->plane_size[0];
 
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
 		/* configure RGB metadata plane */
 		data->p[2].addr = base_addr;
 		data->p[2].len = ps->plane_size[2];
 	}
+done:
 	data->num_planes = ps->num_planes;
 
 end:
@@ -490,7 +508,7 @@
 		return -EINVAL;
 	}
 
-	inc = ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) ? 1 : 2);
+	inc = (sde_mdp_is_yuv_format(fmt) ? 1 : 2);
 	for (i = 0; i < SDE_ROT_MAX_PLANES; i += inc) {
 		if (data->p[i].len != ps->plane_size[i]) {
 			SDEROT_ERR(
@@ -517,8 +535,8 @@
 	if (!data || data->num_planes == 0)
 		return -ENOMEM;
 
-	if (sde_mdp_is_ubwc_format(fmt))
-		return sde_mdp_ubwc_data_check(data, ps, fmt);
+	if (sde_mdp_is_tilea5x_format(fmt))
+		return sde_mdp_a5x_data_check(data, ps, fmt);
 
 	SDEROT_DBG("srcp0=%pa len=%lu frame_size=%u\n", &data->p[0].addr,
 		data->p[0].len, ps->total_size);
@@ -574,7 +592,7 @@
 	return ret;
 }
 
-/* x and y are assumednt to be valid, expected to line up with start of tiles */
+/* x and y are assumed to be valid, expected to line up with start of tiles */
 void sde_rot_ubwc_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
 	struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
 {
@@ -589,7 +607,7 @@
 	}
 	macro_w = 4 * micro_w;
 
-	if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) {
+	if (sde_mdp_is_nv12_8b_format(fmt)) {
 		u16 chroma_macro_w = macro_w / 2;
 		u16 chroma_micro_w = micro_w / 2;
 
@@ -631,9 +649,11 @@
 			ret = 4;
 			goto done;
 		}
-	} else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) {
+	} else if (sde_mdp_is_nv12_10b_format(fmt)) {
 		/* TODO: */
-		SDEROT_ERR("UBWC TP10 format not implemented yet");
+		SDEROT_ERR("%c%c%c%c format not implemented yet",
+				fmt->format >> 0, fmt->format >> 8,
+				fmt->format >> 16, fmt->format >> 24);
 		ret = 1;
 		goto done;
 	} else {
@@ -670,7 +690,7 @@
 	if ((x == 0) && (y == 0))
 		return;
 
-	if (sde_mdp_is_ubwc_format(fmt)) {
+	if (sde_mdp_is_tilea5x_format(fmt)) {
 		sde_rot_ubwc_data_calc_offset(data, x, y, ps, fmt);
 		return;
 	}
@@ -715,6 +735,12 @@
 {
 	u32 domain;
 
+	if (data->flags & SDE_ROT_EXT_IOVA) {
+		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
+				&data->addr, data->len);
+		return 0;
+	}
+
 	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
 		SDEROT_DBG("ion hdl=%p buf=0x%pa\n", data->srcp_dma_buf,
 							&data->addr);
@@ -767,9 +793,14 @@
 	len = &data->len;
 	data->flags |= img->flags;
 	data->offset = img->offset;
-	if (data->flags & SDE_ROT_EXT_DMA_BUF)
+	if (data->flags & SDE_ROT_EXT_DMA_BUF) {
 		data->srcp_dma_buf = img->buffer;
-	else if (IS_ERR(data->srcp_dma_buf)) {
+	} else if (data->flags & SDE_ROT_EXT_IOVA) {
+		data->addr = img->addr;
+		data->len = img->len;
+		SDEROT_DBG("use client %pad/%lx\n", &data->addr, data->len);
+		return 0;
+	} else if (IS_ERR(data->srcp_dma_buf)) {
 		SDEROT_ERR("error on ion_import_fd\n");
 		ret = PTR_ERR(data->srcp_dma_buf);
 		data->srcp_dma_buf = NULL;
@@ -871,6 +902,12 @@
 	if (data->addr && data->len)
 		return 0;
 
+	if (data->flags & SDE_ROT_EXT_IOVA) {
+		SDEROT_DBG("buffer %pad/%lx is client mapped\n",
+				&data->addr, data->len);
+		return 0;
+	}
+
 	if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
 		if (sde_mdp_is_map_needed(data)) {
 			domain = sde_smmu_get_domain_type(data->flags,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
index 3f94a15..cc367cd 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -66,6 +66,7 @@
 #define SDE_SECURE_OVERLAY_SESSION	0x00008000
 #define SDE_ROT_EXT_DMA_BUF		0x00010000
 #define SDE_SECURE_CAMERA_SESSION	0x00020000
+#define SDE_ROT_EXT_IOVA			0x00040000
 
 struct sde_rot_data_type;
 
@@ -77,7 +78,8 @@
 	int id;
 	uint32_t flags;
 	uint32_t priv;
-	uint32_t iova;
+	dma_addr_t addr;
+	u32 len;
 };
 
 struct sde_layer_plane {
@@ -86,6 +88,10 @@
 	struct dma_buf *buffer;
 	struct ion_handle *handle;
 
+	/* i/o virtual address & length */
+	dma_addr_t addr;
+	u32 len;
+
 	/* Pixel offset in the dma buffer. */
 	uint32_t offset;
 
@@ -127,6 +133,15 @@
 	 * for new content.
 	 */
 	struct sde_rot_sync_fence *fence;
+
+	/* indicate if this is a stream (inline) buffer */
+	bool sbuf;
+
+	/* specify the system cache id in stream buffer mode */
+	int scid;
+
+	/* indicate if system cache writeback is required */
+	bool writeback;
 };
 
 struct sde_mdp_plane_sizes {
@@ -151,22 +166,12 @@
 	struct sg_table *srcp_table;
 };
 
-enum sde_data_state {
-	SDE_BUF_STATE_UNUSED,
-	SDE_BUF_STATE_READY,
-	SDE_BUF_STATE_ACTIVE,
-	SDE_BUF_STATE_CLEANUP,
-};
-
 struct sde_mdp_data {
-	enum sde_data_state state;
 	u8 num_planes;
 	struct sde_mdp_img_data p[SDE_ROT_MAX_PLANES];
-	struct list_head buf_list;
-	struct list_head pipe_list;
-	struct list_head chunk_list;
-	u64 last_alloc;
-	u64 last_freed;
+	bool sbuf;
+	int scid;
+	bool writeback;
 };
 
 void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 90ec313..49fe5fb 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1069,6 +1069,7 @@
 		hfi = (struct hfi_buffer_count_actual *)
 			&pkt->rg_property_data[1];
 		hfi->buffer_count_actual = prop->buffer_count_actual;
+		hfi->buffer_count_min_host = prop->buffer_count_min_host;
 
 		buffer_type = get_hfi_buffer(prop->buffer_type);
 		if (buffer_type)
@@ -1480,21 +1481,13 @@
 		case HAL_INTRA_REFRESH_NONE:
 			hfi->mode = HFI_INTRA_REFRESH_NONE;
 			break;
-		case HAL_INTRA_REFRESH_ADAPTIVE:
-			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
-			hfi->mbs = prop->air_mbs;
-			break;
 		case HAL_INTRA_REFRESH_CYCLIC:
 			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
-			hfi->mbs = prop->cir_mbs;
-			break;
-		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
-			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
-			hfi->mbs = prop->air_mbs;
+			hfi->mbs = prop->ir_mbs;
 			break;
 		case HAL_INTRA_REFRESH_RANDOM:
 			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
-			hfi->mbs = prop->air_mbs;
+			hfi->mbs = prop->ir_mbs;
 			break;
 		default:
 			dprintk(VIDC_ERR,
@@ -1720,14 +1713,6 @@
 		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
 		break;
 	}
-	case HAL_PARAM_VENC_H264_NAL_SVC_EXT:
-	{
-		create_pkt_enable(pkt->rg_property_data,
-			HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT,
-			((struct hal_enable *)pdata)->enable);
-		pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
-		break;
-	}
 	case HAL_CONFIG_VENC_PERF_MODE:
 	{
 		u32 hfi_perf_mode = 0;
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 4760723a..00830cc 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -790,7 +790,7 @@
 {
 	enum vidc_status status = VIDC_ERR_NONE;
 	u32 prop_id, next_offset;
-	u32 codecs, domain;
+	u32 codecs = 0, domain = 0;
 
 	while (status == VIDC_ERR_NONE && num_properties &&
 			rem_bytes >= sizeof(u32)) {
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 2db245e..c82db74 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -73,11 +73,6 @@
 
 	trace_msm_v4l2_vidc_close_start("msm_v4l2_close start");
 	vidc_inst = get_vidc_inst(filp, NULL);
-	rc = msm_vidc_release_buffers(vidc_inst,
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-	if (rc)
-		dprintk(VIDC_WARN,
-			"Failed in %s for release output buffers\n", __func__);
 
 	rc = msm_vidc_close(vidc_inst);
 	trace_msm_v4l2_vidc_close_end("msm_v4l2_close end");
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 33821c7..abc6cc8 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -20,10 +20,7 @@
 #include "msm_vidc_clocks.h"
 
 #define MSM_VDEC_DVC_NAME "msm_vdec_8974"
-#define MIN_NUM_OUTPUT_BUFFERS 4
-#define MIN_NUM_CAPTURE_BUFFERS 6
-#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS 1
-#define MAX_NUM_OUTPUT_BUFFERS VB2_MAX_FRAME
+#define MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS MIN_NUM_CAPTURE_BUFFERS
 #define DEFAULT_VIDEO_CONCEAL_COLOR_BLACK 0x8010
 #define MB_SIZE_IN_PIXEL (16 * 16)
 #define MAX_OPERATING_FRAME_RATE (300 << 16)
@@ -323,6 +320,30 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+		.name = "CAPTURE Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_CAPTURE_BUFFERS,
+		.maximum = MAX_NUM_CAPTURE_BUFFERS,
+		.default_value = MIN_NUM_CAPTURE_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+		.name = "OUTPUT Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_OUTPUT_BUFFERS,
+		.maximum = MAX_NUM_OUTPUT_BUFFERS,
+		.default_value = MIN_NUM_OUTPUT_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT,
 		.name = "Video decoder dpb color format",
 		.type = V4L2_CTRL_TYPE_MENU,
@@ -477,7 +498,6 @@
 		.name = "YCbCr Semiplanar 4:2:0",
 		.description = "Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12,
-		.num_planes = 2,
 		.get_frame_size = get_frame_size_nv12,
 		.type = CAPTURE_PORT,
 	},
@@ -485,7 +505,6 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0",
 		.description = "UBWC Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12_UBWC,
-		.num_planes = 2,
 		.get_frame_size = get_frame_size_nv12_ubwc,
 		.type = CAPTURE_PORT,
 	},
@@ -493,7 +512,6 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0 10bit",
 		.description = "UBWC Y/CbCr 4:2:0 10bit",
 		.fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC,
-		.num_planes = 2,
 		.get_frame_size = get_frame_size_nv12_ubwc_10bit,
 		.type = CAPTURE_PORT,
 	},
@@ -501,7 +519,6 @@
 		.name = "Mpeg4",
 		.description = "Mpeg4 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG4,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -510,7 +527,6 @@
 		.name = "Mpeg2",
 		.description = "Mpeg2 compressed format",
 		.fourcc = V4L2_PIX_FMT_MPEG2,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -519,7 +535,6 @@
 		.name = "H263",
 		.description = "H263 compressed format",
 		.fourcc = V4L2_PIX_FMT_H263,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -528,7 +543,6 @@
 		.name = "VC1",
 		.description = "VC-1 compressed format",
 		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_G,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -537,7 +551,6 @@
 		.name = "VC1 SP",
 		.description = "VC-1 compressed format G",
 		.fourcc = V4L2_PIX_FMT_VC1_ANNEX_L,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -546,7 +559,6 @@
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -555,7 +567,6 @@
 		.name = "H264_MVC",
 		.description = "H264_MVC compressed format",
 		.fourcc = V4L2_PIX_FMT_H264_MVC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -564,7 +575,6 @@
 		.name = "HEVC",
 		.description = "HEVC compressed format",
 		.fourcc = V4L2_PIX_FMT_HEVC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -573,7 +583,6 @@
 		.name = "VP8",
 		.description = "VP8 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP8,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = OUTPUT_PORT,
 		.defer_outputs = false,
@@ -582,122 +591,12 @@
 		.name = "VP9",
 		.description = "VP9 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP9,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed_full_yuv,
 		.type = OUTPUT_PORT,
 		.defer_outputs = true,
 	},
 };
 
-int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
-{
-	const struct msm_vidc_format *fmt = NULL;
-	struct hfi_device *hdev;
-	int rc = 0, i = 0, stride = 0, scanlines = 0, color_format = 0;
-	unsigned int *plane_sizes = NULL, extra_idx = 0;
-
-	if (!inst || !f || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR,
-			"Invalid input, inst = %pK, format = %pK\n", inst, f);
-		return -EINVAL;
-	}
-
-	hdev = inst->core->device;
-	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-		fmt = &inst->fmts[CAPTURE_PORT];
-	else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-		fmt = &inst->fmts[OUTPUT_PORT];
-	else
-		return -ENOTSUPP;
-
-	f->fmt.pix_mp.pixelformat = fmt->fourcc;
-	f->fmt.pix_mp.num_planes = fmt->num_planes;
-	if (inst->in_reconfig) {
-		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
-		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
-
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"%s: unsupported session\n", __func__);
-			goto exit;
-		}
-	}
-
-	f->fmt.pix_mp.height = inst->prop.height[CAPTURE_PORT];
-	f->fmt.pix_mp.width = inst->prop.width[CAPTURE_PORT];
-	stride = inst->prop.width[CAPTURE_PORT];
-	scanlines = inst->prop.height[CAPTURE_PORT];
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		plane_sizes = &inst->bufq[OUTPUT_PORT].plane_sizes[0];
-		for (i = 0; i < fmt->num_planes; ++i) {
-			if (!plane_sizes[i]) {
-				f->fmt.pix_mp.plane_fmt[i].sizeimage =
-					get_frame_size(inst, fmt, f->type, i);
-				plane_sizes[i] = f->fmt.pix_mp.plane_fmt[i].
-					sizeimage;
-			} else
-				f->fmt.pix_mp.plane_fmt[i].sizeimage =
-					plane_sizes[i];
-		}
-		f->fmt.pix_mp.height = inst->prop.height[OUTPUT_PORT];
-		f->fmt.pix_mp.width = inst->prop.width[OUTPUT_PORT];
-		f->fmt.pix_mp.plane_fmt[0].bytesperline =
-			(__u16)inst->prop.width[OUTPUT_PORT];
-		f->fmt.pix_mp.plane_fmt[0].reserved[0] =
-			(__u16)inst->prop.height[OUTPUT_PORT];
-	} else {
-		switch (fmt->fourcc) {
-		case V4L2_PIX_FMT_NV12:
-			color_format = COLOR_FMT_NV12;
-			break;
-		case V4L2_PIX_FMT_NV12_UBWC:
-			color_format = COLOR_FMT_NV12_UBWC;
-			break;
-		case V4L2_PIX_FMT_NV12_TP10_UBWC:
-			color_format = COLOR_FMT_NV12_BPP10_UBWC;
-			break;
-		default:
-			dprintk(VIDC_WARN, "Color format not recognized\n");
-			rc = -ENOTSUPP;
-			goto exit;
-		}
-
-		stride = VENUS_Y_STRIDE(color_format,
-				inst->prop.width[CAPTURE_PORT]);
-		scanlines = VENUS_Y_SCANLINES(color_format,
-				inst->prop.height[CAPTURE_PORT]);
-
-		f->fmt.pix_mp.plane_fmt[0].sizeimage =
-			fmt->get_frame_size(0,
-			inst->prop.height[CAPTURE_PORT],
-			inst->prop.width[CAPTURE_PORT]);
-
-		extra_idx = EXTRADATA_IDX(fmt->num_planes);
-		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-				VENUS_EXTRADATA_SIZE(
-					inst->prop.height[CAPTURE_PORT],
-					inst->prop.width[CAPTURE_PORT]);
-		}
-
-		for (i = 0; i < fmt->num_planes; ++i)
-			inst->bufq[CAPTURE_PORT].plane_sizes[i] =
-				f->fmt.pix_mp.plane_fmt[i].sizeimage;
-
-		f->fmt.pix_mp.height = inst->prop.height[CAPTURE_PORT];
-		f->fmt.pix_mp.width = inst->prop.width[CAPTURE_PORT];
-		f->fmt.pix_mp.plane_fmt[0].bytesperline =
-			(__u16)stride;
-		f->fmt.pix_mp.plane_fmt[0].reserved[0] =
-			(__u16)scanlines;
-	}
-
-exit:
-	return rc;
-}
-
 int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
 {
 	struct msm_vidc_format *fmt = NULL;
@@ -724,6 +623,14 @@
 			rc = -EINVAL;
 			goto err_invalid_fmt;
 		}
+
+		if (inst->fmts[fmt->type].fourcc == f->fmt.pix_mp.pixelformat &&
+			inst->prop.width[CAPTURE_PORT] == f->fmt.pix_mp.width &&
+			inst->prop.height[CAPTURE_PORT] ==
+				f->fmt.pix_mp.height) {
+			dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+			return 0;
+		}
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
@@ -750,7 +657,7 @@
 			inst->fmts[fmt->type].get_frame_size(0,
 			f->fmt.pix_mp.height, f->fmt.pix_mp.width);
 
-		extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
+		extra_idx = EXTRADATA_IDX(inst->bufq[fmt->type].num_planes);
 		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
 			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
 				VENUS_EXTRADATA_SIZE(
@@ -758,14 +665,12 @@
 					inst->prop.width[CAPTURE_PORT]);
 		}
 
-		f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
-		for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
 			inst->bufq[CAPTURE_PORT].plane_sizes[i] =
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
-		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
 
 		fmt = msm_comm_get_pixel_fmt_fourcc(vdec_formats,
 				ARRAY_SIZE(vdec_formats),
@@ -781,34 +686,21 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
-		rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
-		if (rc) {
-			dprintk(VIDC_ERR, "Failed to initialize instance\n");
-			goto err_invalid_fmt;
-		}
-
-		if (!(get_hal_codec(inst->fmts[fmt->type].fourcc) &
-			inst->core->dec_codec_supported)) {
-			dprintk(VIDC_ERR,
-				"Codec(%#x) is not present in the supported codecs list(%#x)\n",
-				get_hal_codec(inst->fmts[fmt->type].fourcc),
-				inst->core->dec_codec_supported);
-			rc = -EINVAL;
-			goto err_invalid_fmt;
-		}
-
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
 			goto err_invalid_fmt;
 		}
 
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto err_invalid_fmt;
+		if (inst->fmts[fmt->type].fourcc == f->fmt.pix_mp.pixelformat &&
+			inst->prop.width[OUTPUT_PORT] == f->fmt.pix_mp.width &&
+			inst->prop.height[OUTPUT_PORT] ==
+				f->fmt.pix_mp.height) {
+			dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+			return 0;
 		}
+		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
+		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
 
 		frame_sz.buffer_type = HAL_BUFFER_INPUT;
 		frame_sz.width = inst->prop.width[OUTPUT_PORT];
@@ -820,18 +712,19 @@
 		msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
 
 		max_input_size = get_frame_size(
-inst, &inst->fmts[fmt->type], f->type, 0);
+			inst, &inst->fmts[fmt->type], f->type, 0);
 		if (f->fmt.pix_mp.plane_fmt[0].sizeimage > max_input_size ||
 			!f->fmt.pix_mp.plane_fmt[0].sizeimage) {
 			f->fmt.pix_mp.plane_fmt[0].sizeimage = max_input_size;
 		}
 
-		f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
-		for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; ++i) {
 			inst->bufq[OUTPUT_PORT].plane_sizes[i] =
 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 
+		rc = msm_comm_try_get_bufreqs(inst);
 	}
 err_invalid_fmt:
 	return rc;
@@ -868,516 +761,6 @@
 	return rc;
 }
 
-static int set_actual_buffer_count(struct msm_vidc_inst *inst,
-			int count, enum hal_buffer type)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct hal_buffer_count_actual buf_count;
-
-	hdev = inst->core->device;
-
-	buf_count.buffer_type = type;
-	buf_count.buffer_count_actual = count;
-	rc = call_hfi_op(hdev, session_set_property,
-		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to set actual buffer count %d for buffer type %d\n",
-			count, type);
-	return rc;
-}
-
-static int msm_vdec_queue_setup(
-	struct vb2_queue *q,
-	unsigned int *num_buffers, unsigned int *num_planes,
-	unsigned int sizes[], struct device *alloc_devs[])
-{
-	int i, rc = 0;
-	struct msm_vidc_inst *inst;
-	struct hal_buffer_requirements *bufreq;
-	int extra_idx = 0;
-	int min_buff_count = 0;
-
-	if (!q || !num_buffers || !num_planes
-		|| !sizes || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK, %pK, %pK\n",
-			q, num_buffers, num_planes);
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"%s: Failed : Buffer requirements\n", __func__);
-		goto exit;
-	}
-
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		*num_planes = inst->fmts[OUTPUT_PORT].num_planes;
-		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
-				*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
-			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
-		for (i = 0; i < *num_planes; i++) {
-			sizes[i] = get_frame_size(inst,
-					&inst->fmts[OUTPUT_PORT], q->type, i);
-		}
-		rc = set_actual_buffer_count(inst, *num_buffers,
-			HAL_BUFFER_INPUT);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		dprintk(VIDC_DBG, "Getting bufreqs on capture plane\n");
-		*num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
-		if (rc) {
-			dprintk(VIDC_ERR, "Failed to open instance\n");
-			break;
-		}
-		rc = msm_comm_try_get_bufreqs(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to get buffer requirements: %d\n", rc);
-			break;
-		}
-
-		bufreq = get_buff_req_buffer(inst,
-			msm_comm_get_hal_output_buffer(inst));
-		if (!bufreq) {
-			dprintk(VIDC_ERR,
-				"No buffer requirement for buffer type %x\n",
-				HAL_BUFFER_OUTPUT);
-			rc = -EINVAL;
-			break;
-		}
-
-		/* Pretend as if FW itself is asking for
-		 * additional buffers.
-		 * *num_buffers += MSM_VIDC_ADDITIONAL_BUFS_FOR_DCVS
-		 * is wrong since it will end up increasing the count
-		 * on every call to reqbufs if *num_bufs is larger
-		 * than min requirement.
-		 */
-		*num_buffers = max(*num_buffers, bufreq->buffer_count_min
-			+ msm_dcvs_get_extra_buff_count(inst));
-
-		min_buff_count = (!!(inst->flags & VIDC_THUMBNAIL)) ?
-			MIN_NUM_THUMBNAIL_MODE_CAPTURE_BUFFERS :
-				MIN_NUM_CAPTURE_BUFFERS;
-
-		*num_buffers = clamp_val(*num_buffers,
-			min_buff_count, VB2_MAX_FRAME);
-
-		dprintk(VIDC_DBG, "Set actual output buffer count: %d\n",
-				*num_buffers);
-		rc = set_actual_buffer_count(inst, *num_buffers,
-					msm_comm_get_hal_output_buffer(inst));
-		if (rc)
-			break;
-
-		if (*num_buffers != bufreq->buffer_count_actual) {
-			rc = msm_comm_try_get_bufreqs(inst);
-			if (rc) {
-				dprintk(VIDC_WARN,
-					"Failed to get buf req, %d\n", rc);
-				break;
-			}
-		}
-		dprintk(VIDC_DBG, "count =  %d, size = %d, alignment = %d\n",
-				inst->buff_req.buffer[1].buffer_count_actual,
-				inst->buff_req.buffer[1].buffer_size,
-				inst->buff_req.buffer[1].buffer_alignment);
-		sizes[0] = inst->bufq[CAPTURE_PORT].plane_sizes[0];
-
-		/*
-		 * Set actual buffer count to firmware for DPB buffers.
-		 * Firmware mandates setting of minimum buffer size
-		 * and actual buffer count for both OUTPUT and OUTPUT2.
-		 * Hence we are setting back the same buffer size
-		 * information back to firmware.
-		 */
-		if (msm_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-			bufreq = get_buff_req_buffer(inst,
-					HAL_BUFFER_OUTPUT);
-			if (!bufreq) {
-				rc = -EINVAL;
-				break;
-			}
-
-			rc = set_actual_buffer_count(inst,
-				bufreq->buffer_count_actual,
-				HAL_BUFFER_OUTPUT);
-			if (rc)
-				break;
-		}
-
-		extra_idx =
-			EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
-		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
-			sizes[extra_idx] =
-				VENUS_EXTRADATA_SIZE(
-					inst->prop.height[CAPTURE_PORT],
-					inst->prop.width[CAPTURE_PORT]);
-		}
-		break;
-	default:
-		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
-		rc = -EINVAL;
-	}
-exit:
-	return rc;
-}
-
-static inline int set_max_internal_buffers_size(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	struct {
-		enum hal_buffer type;
-		struct hal_buffer_requirements *req;
-		size_t size;
-	} internal_buffers[] = {
-		{ HAL_BUFFER_INTERNAL_SCRATCH, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_SCRATCH_1, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_SCRATCH_2, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_PERSIST, NULL, 0},
-		{ HAL_BUFFER_INTERNAL_PERSIST_1, NULL, 0},
-	};
-
-	struct hal_frame_size frame_sz;
-	int i;
-
-	frame_sz.buffer_type = HAL_BUFFER_INPUT;
-	frame_sz.width = inst->capability.width.max;
-	frame_sz.height =
-		(inst->capability.mbs_per_frame.max * 256) /
-		inst->capability.width.max;
-
-	dprintk(VIDC_DBG,
-		"Max buffer reqs, buffer type = %d width = %d, height = %d, max_mbs_per_frame = %d\n",
-		frame_sz.buffer_type, frame_sz.width,
-		frame_sz.height, inst->capability.mbs_per_frame.max);
-
-	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"%s Failed to get max buf req, %d\n", __func__, rc);
-		return 0;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) {
-		internal_buffers[i].req =
-			get_buff_req_buffer(inst, internal_buffers[i].type);
-		internal_buffers[i].size = internal_buffers[i].req ?
-			internal_buffers[i].req->buffer_size : 0;
-	}
-
-	frame_sz.buffer_type = HAL_BUFFER_INPUT;
-	frame_sz.width = inst->prop.width[OUTPUT_PORT];
-	frame_sz.height = inst->prop.height[OUTPUT_PORT];
-
-	msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"%s Failed to get back old buf req, %d\n",
-			__func__, rc);
-		return rc;
-	}
-
-	dprintk(VIDC_DBG,
-			"Old buffer reqs, buffer type = %d width = %d, height = %d\n",
-			frame_sz.buffer_type, frame_sz.width,
-			frame_sz.height);
-
-	for (i = 0; i < ARRAY_SIZE(internal_buffers); i++) {
-		if (internal_buffers[i].req) {
-			internal_buffers[i].req->buffer_size =
-				internal_buffers[i].size;
-			dprintk(VIDC_DBG,
-				"Changing buffer type : %d size to : %zd\n",
-				internal_buffers[i].type,
-				internal_buffers[i].size);
-		}
-	}
-	return 0;
-}
-
-static inline int start_streaming(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	bool slave_side_cp = inst->core->resources.slave_side_cp;
-	struct hal_buffer_size_minimum b;
-	unsigned int buffer_size;
-	struct msm_vidc_format *fmt = NULL;
-
-	fmt = &inst->fmts[CAPTURE_PORT];
-	buffer_size = fmt->get_frame_size(0,
-		inst->prop.height[CAPTURE_PORT],
-		inst->prop.width[CAPTURE_PORT]);
-	hdev = inst->core->device;
-
-	if (msm_comm_get_stream_output_mode(inst) ==
-		HAL_VIDEO_DECODER_SECONDARY) {
-		rc = msm_vidc_check_scaling_supported(inst);
-		b.buffer_type = HAL_BUFFER_OUTPUT2;
-	} else {
-		b.buffer_type = HAL_BUFFER_OUTPUT;
-	}
-
-	b.buffer_size = buffer_size;
-	rc = call_hfi_op(hdev, session_set_property,
-		 inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
-		 &b);
-	if (rc) {
-		dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
-		return -EINVAL;
-	}
-	if ((inst->flags & VIDC_SECURE) && !inst->in_reconfig &&
-		!slave_side_cp) {
-		rc = set_max_internal_buffers_size(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set max scratch buffer size: %d\n",
-				rc);
-			goto fail_start;
-		}
-	}
-	rc = msm_comm_set_scratch_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to set scratch buffers: %d\n", rc);
-		goto fail_start;
-	}
-	rc = msm_comm_set_persist_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to set persist buffers: %d\n", rc);
-		goto fail_start;
-	}
-
-	if (msm_comm_get_stream_output_mode(inst) ==
-		HAL_VIDEO_DECODER_SECONDARY) {
-		rc = msm_comm_set_output_buffers(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set output buffers: %d\n", rc);
-			goto fail_start;
-		}
-	}
-
-	/*
-	 * For seq_changed_insufficient, driver should set session_continue
-	 * to firmware after the following sequence
-	 * - driver raises insufficient event to v4l2 client
-	 * - all output buffers have been flushed and freed
-	 * - v4l2 client queries buffer requirements and splits/combines OPB-DPB
-	 * - v4l2 client sets new set of buffers to firmware
-	 * - v4l2 client issues CONTINUE to firmware to resume decoding of
-	 *   submitted ETBs.
-	 */
-	if (inst->in_reconfig) {
-		dprintk(VIDC_DBG, "send session_continue after reconfig\n");
-		rc = call_hfi_op(hdev, session_continue,
-			(void *) inst->session);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s - failed to send session_continue\n",
-				__func__);
-			goto fail_start;
-		}
-	}
-	inst->in_reconfig = false;
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK to start done state\n", inst);
-		goto fail_start;
-	}
-	msm_dcvs_init_load(inst);
-	if (msm_comm_get_stream_output_mode(inst) ==
-		HAL_VIDEO_DECODER_SECONDARY) {
-		rc = msm_comm_queue_output_buffers(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to queue output buffers: %d\n", rc);
-			goto fail_start;
-		}
-	}
-
-fail_start:
-	return rc;
-}
-
-static inline int stop_streaming(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK to start done state\n", inst);
-	return rc;
-}
-
-static int msm_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-	struct hfi_device *hdev;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
-		q->type, inst);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		goto stream_start_failed;
-	}
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Streamon failed on: %d capability for inst: %pK\n",
-			q->type, inst);
-		goto stream_start_failed;
-	}
-
-	rc = msm_comm_qbuf(inst, NULL);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
-				rc);
-		goto stream_start_failed;
-	}
-
-stream_start_failed:
-	return rc;
-}
-
-static void msm_vdec_stop_streaming(struct vb2_queue *q)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
-		return;
-	}
-
-	inst = q->drv_priv;
-	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		if (!inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
-			rc = stop_streaming(inst);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (!inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
-			rc = stop_streaming(inst);
-		break;
-	default:
-		dprintk(VIDC_ERR,
-			"Q-type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		break;
-	}
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK, cap = %d to state: %d\n",
-			inst, q->type, MSM_VIDC_RELEASE_RESOURCES_DONE);
-}
-
-static void msm_vdec_buf_queue(struct vb2_buffer *vb)
-{
-	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
-
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
-}
-
-static void msm_vdec_buf_cleanup(struct vb2_buffer *vb)
-{
-	int rc = 0;
-	struct buf_queue *q = NULL;
-	struct msm_vidc_inst *inst = NULL;
-
-	if (!vb) {
-		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
-			__func__, vb);
-		return;
-	}
-
-	inst = vb2_get_drv_priv(vb->vb2_queue);
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
-			__func__);
-		return;
-	}
-
-	q = msm_comm_get_vb2q(inst, vb->type);
-	if (!q) {
-		dprintk(VIDC_ERR,
-			"%s : Failed to find buffer queue for type = %d\n",
-				__func__, vb->type);
-		return;
-	}
-
-	if (q->vb2_bufq.streaming) {
-		dprintk(VIDC_DBG, "%d PORT is streaming\n",
-			vb->type);
-		return;
-	}
-
-	rc = msm_vidc_release_buffers(inst, vb->type);
-	if (rc)
-		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
-			__func__, rc);
-}
-
-static const struct vb2_ops msm_vdec_vb2q_ops = {
-	.queue_setup = msm_vdec_queue_setup,
-	.start_streaming = msm_vdec_start_streaming,
-	.buf_queue = msm_vdec_buf_queue,
-	.buf_cleanup = msm_vdec_buf_cleanup,
-	.stop_streaming = msm_vdec_stop_streaming,
-};
-
-const struct vb2_ops *msm_vdec_get_vb2q_ops(void)
-{
-	return &msm_vdec_vb2q_ops;
-}
-
 int msm_vdec_inst_init(struct msm_vidc_inst *inst)
 {
 	int rc = 0;
@@ -1398,6 +781,9 @@
 	inst->capability.secure_output2_threshold.max = 0;
 	inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_DYNAMIC;
+	/* To start with, both ports are 1 plane each */
+	inst->bufq[OUTPUT_PORT].num_planes = 1;
+	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->prop.fps = DEFAULT_FPS;
 	inst->operating_rate = 0;
 	memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
@@ -1497,6 +883,7 @@
 		property_id = HAL_PARAM_VDEC_SYNC_FRAME_DECODE;
 		hal_property.enable = ctrl->val;
 		pdata = &hal_property;
+		msm_dcvs_try_enable(inst);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_SECURE:
 		inst->flags |= VIDC_SECURE;
@@ -1506,6 +893,36 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
 		property_id = HAL_PARAM_INDEX_EXTRADATA;
 		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_EXTRADATA_MB_QUANTIZATION:
+		case V4L2_MPEG_VIDC_EXTRADATA_INTERLACE_VIDEO:
+		case V4L2_MPEG_VIDC_EXTRADATA_TIMESTAMP:
+		case V4L2_MPEG_VIDC_EXTRADATA_S3D_FRAME_PACKING:
+		case V4L2_MPEG_VIDC_EXTRADATA_FRAME_RATE:
+		case V4L2_MPEG_VIDC_EXTRADATA_PANSCAN_WINDOW:
+		case V4L2_MPEG_VIDC_EXTRADATA_RECOVERY_POINT_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+		case V4L2_MPEG_VIDC_EXTRADATA_MPEG2_SEQDISP:
+		case V4L2_MPEG_VIDC_EXTRADATA_STREAM_USERDATA:
+		case V4L2_MPEG_VIDC_EXTRADATA_FRAME_QP:
+		case V4L2_MPEG_VIDC_EXTRADATA_FRAME_BITS_INFO:
+		case V4L2_MPEG_VIDC_EXTRADATA_VQZIP_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_OUTPUT_CROP:
+		case V4L2_MPEG_VIDC_EXTRADATA_DISPLAY_COLOUR_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
+		case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY:
+		case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE:
+			inst->bufq[CAPTURE_PORT].num_planes = 2;
+			inst->bufq[CAPTURE_PORT].plane_sizes[EXTRADATA_IDX(2)] =
+				VENUS_EXTRADATA_SIZE(
+				inst->prop.height[CAPTURE_PORT],
+				inst->prop.width[CAPTURE_PORT]);
+			break;
+		default:
+			rc = -ENOTSUPP;
+			break;
+		}
 		extra.enable = 1;
 		pdata = &extra;
 		break;
@@ -1606,6 +1023,7 @@
 				V4L2_CID_MPEG_VIDEO_H264_LEVEL,
 				temp_ctrl->val);
 		pdata = &profile_level;
+		rc = msm_comm_try_get_bufreqs(inst);
 		break;
 	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
@@ -1617,6 +1035,7 @@
 				V4L2_CID_MPEG_VIDEO_H264_PROFILE,
 				temp_ctrl->val);
 		pdata = &profile_level;
+		rc = msm_comm_try_get_bufreqs(inst);
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_BUFFER_SIZE_LIMIT:
 		dprintk(VIDC_DBG,
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.h b/drivers/media/platform/msm/vidc/msm_vdec.h
index a209dd5..44ba4fd 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.h
+++ b/drivers/media/platform/msm/vidc/msm_vdec.h
@@ -22,10 +22,8 @@
 	const struct v4l2_ctrl_ops *ctrl_ops);
 int msm_vdec_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
 int msm_vdec_s_fmt(void *instance, struct v4l2_format *f);
-int msm_vdec_g_fmt(void *instance, struct v4l2_format *f);
 int msm_vdec_s_ctrl(void *instance, struct v4l2_ctrl *ctrl);
 int msm_vdec_g_ctrl(void *instance, struct v4l2_ctrl *ctrl);
 int msm_vdec_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-struct vb2_ops *msm_vdec_get_vb2q_ops(void);
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 7526fb7..2ec5155 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -18,8 +18,6 @@
 #include "msm_vidc_clocks.h"
 
 #define MSM_VENC_DVC_NAME "msm_venc_8974"
-#define MIN_NUM_OUTPUT_BUFFERS 4
-#define MIN_NUM_CAPTURE_BUFFERS 4
 #define MIN_BIT_RATE 32000
 #define MAX_BIT_RATE 300000000
 #define DEFAULT_BIT_RATE 64000
@@ -326,6 +324,31 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
+		.name = "CAPTURE Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_CAPTURE_BUFFERS,
+		.maximum = MAX_NUM_CAPTURE_BUFFERS,
+		.default_value = MIN_NUM_CAPTURE_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+	{
+		.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+		.name = "OUTPUT Count",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = MIN_NUM_OUTPUT_BUFFERS,
+		.maximum = MAX_NUM_OUTPUT_BUFFERS,
+		.default_value = MIN_NUM_OUTPUT_BUFFERS,
+		.step = 1,
+		.menu_skip_mask = 0,
+		.qmenu = NULL,
+		.flags = V4L2_CTRL_FLAG_VOLATILE,
+	},
+
+	{
 		.id = V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME,
 		.name = "Request I Frame",
 		.type = V4L2_CTRL_TYPE_BUTTON,
@@ -887,6 +910,16 @@
 		.qmenu = NULL,
 	},
 	{
+		.id = V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID,
+		.name = "Layer ID for different settings",
+		.type = V4L2_CTRL_TYPE_INTEGER,
+		.minimum = 0,
+		.maximum = 6,
+		.default_value = 0,
+		.step = 1,
+		.qmenu = NULL,
+	},
+	{
 		.id = V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH,
 		.name = "SAR Width",
 		.type = V4L2_CTRL_TYPE_INTEGER,
@@ -1088,7 +1121,6 @@
 		.name = "YCbCr Semiplanar 4:2:0",
 		.description = "Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_nv12,
 		.type = OUTPUT_PORT,
 	},
@@ -1096,7 +1128,6 @@
 		.name = "UBWC YCbCr Semiplanar 4:2:0",
 		.description = "UBWC Y/CbCr 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV12_UBWC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_nv12_ubwc,
 		.type = OUTPUT_PORT,
 	},
@@ -1104,7 +1135,6 @@
 		.name = "RGBA 8:8:8:8",
 		.description = "RGBA 8:8:8:8",
 		.fourcc = V4L2_PIX_FMT_RGB32,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_rgba,
 		.type = OUTPUT_PORT,
 	},
@@ -1112,7 +1142,6 @@
 		.name = "H264",
 		.description = "H264 compressed format",
 		.fourcc = V4L2_PIX_FMT_H264,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -1120,7 +1149,6 @@
 		.name = "VP8",
 		.description = "VP8 compressed format",
 		.fourcc = V4L2_PIX_FMT_VP8,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -1128,7 +1156,6 @@
 		.name = "HEVC",
 		.description = "HEVC compressed format",
 		.fourcc = V4L2_PIX_FMT_HEVC,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_compressed,
 		.type = CAPTURE_PORT,
 	},
@@ -1136,222 +1163,13 @@
 		.name = "YCrCb Semiplanar 4:2:0",
 		.description = "Y/CrCb 4:2:0",
 		.fourcc = V4L2_PIX_FMT_NV21,
-		.num_planes = 1,
 		.get_frame_size = get_frame_size_nv21,
 		.type = OUTPUT_PORT,
 	},
 };
 
-static void msm_venc_update_plane_count(struct msm_vidc_inst *inst, int type)
-{
-	struct v4l2_ctrl *ctrl = NULL;
-	u32 extradata = 0;
-
-	if (!inst)
-		return;
-
-	inst->fmts[type].num_planes = 1;
-
-	ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
-		V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
-
-	if (ctrl)
-		extradata = v4l2_ctrl_g_ctrl(ctrl);
-
-	if (type == CAPTURE_PORT) {
-		switch (extradata) {
-		case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
-		case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
-		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
-		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
-		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
-			inst->fmts[CAPTURE_PORT].num_planes = 2;
-		default:
-			break;
-		}
-	} else if (type == OUTPUT_PORT) {
-		switch (extradata) {
-		case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
-		case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
-		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
-		case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
-		case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
-		case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
-			inst->fmts[OUTPUT_PORT].num_planes = 2;
-			break;
-		default:
-			break;
-		}
-	}
-}
-
 static int msm_venc_set_csc(struct msm_vidc_inst *inst);
 
-static int msm_venc_queue_setup(struct vb2_queue *q,
-	unsigned int *num_buffers, unsigned int *num_planes,
-	unsigned int sizes[], struct device *alloc_devs[])
-{
-	int i, temp, rc = 0;
-	struct msm_vidc_inst *inst;
-	struct hal_buffer_count_actual new_buf_count;
-	enum hal_property property_id;
-	struct hfi_device *hdev;
-	struct hal_buffer_requirements *buff_req;
-	u32 extra_idx = 0;
-	struct hal_buffer_requirements *buff_req_buffer = NULL;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input\n");
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	hdev = inst->core->device;
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to open instance\n");
-		return rc;
-	}
-
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to get buffer requirements: %d\n", rc);
-		return rc;
-	}
-
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		*num_planes = 1;
-
-		buff_req = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-		if (buff_req) {
-			/*
-			 * Pretend as if the FW itself is asking for additional
-			 * buffers, which are required for DCVS
-			 */
-			unsigned int min_req_buffers =
-				buff_req->buffer_count_min +
-				msm_dcvs_get_extra_buff_count(inst);
-			*num_buffers = max(*num_buffers, min_req_buffers);
-		}
-
-		if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
-				*num_buffers > VB2_MAX_FRAME) {
-			int temp = *num_buffers;
-
-			*num_buffers = clamp_val(*num_buffers,
-					MIN_NUM_CAPTURE_BUFFERS,
-					VB2_MAX_FRAME);
-			dprintk(VIDC_INFO,
-				"Changing buffer count on CAPTURE_MPLANE from %d to %d for best effort encoding\n",
-				temp, *num_buffers);
-		}
-
-		msm_venc_update_plane_count(inst, CAPTURE_PORT);
-		*num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-
-		for (i = 0; i < *num_planes; i++) {
-			int extra_idx = EXTRADATA_IDX(*num_planes);
-
-			buff_req_buffer = get_buff_req_buffer(inst,
-					HAL_BUFFER_OUTPUT);
-
-			sizes[i] = buff_req_buffer ?
-				buff_req_buffer->buffer_size : 0;
-
-			if (extra_idx && i == extra_idx &&
-					extra_idx < VIDEO_MAX_PLANES) {
-				buff_req_buffer = get_buff_req_buffer(inst,
-						HAL_BUFFER_EXTRADATA_OUTPUT);
-				if (!buff_req_buffer) {
-					dprintk(VIDC_ERR,
-						"%s: failed - invalid buffer req\n",
-						__func__);
-					return -EINVAL;
-				}
-
-				sizes[i] = buff_req_buffer->buffer_size;
-			}
-		}
-
-		dprintk(VIDC_DBG, "actual output buffer count set to fw = %d\n",
-				*num_buffers);
-		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
-		new_buf_count.buffer_type = HAL_BUFFER_OUTPUT;
-		new_buf_count.buffer_count_actual = *num_buffers;
-		rc = call_hfi_op(hdev, session_set_property, inst->session,
-			property_id, &new_buf_count);
-
-		break;
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		*num_planes = 1;
-
-		*num_buffers = inst->buff_req.buffer[0].buffer_count_actual =
-			max(*num_buffers, inst->buff_req.buffer[0].
-				buffer_count_min);
-
-		temp = *num_buffers;
-
-		*num_buffers = clamp_val(*num_buffers,
-				MIN_NUM_OUTPUT_BUFFERS,
-				VB2_MAX_FRAME);
-		dprintk(VIDC_INFO,
-			"Changing buffer count on OUTPUT_MPLANE from %d to %d for best effort encoding\n",
-			temp, *num_buffers);
-
-		property_id = HAL_PARAM_BUFFER_COUNT_ACTUAL;
-		new_buf_count.buffer_type = HAL_BUFFER_INPUT;
-		new_buf_count.buffer_count_actual = *num_buffers;
-
-		dprintk(VIDC_DBG, "actual input buffer count set to fw = %d\n",
-				*num_buffers);
-
-		msm_venc_update_plane_count(inst, OUTPUT_PORT);
-		*num_planes = inst->fmts[OUTPUT_PORT].num_planes;
-
-		rc = call_hfi_op(hdev, session_set_property, inst->session,
-					property_id, &new_buf_count);
-		if (rc)
-			dprintk(VIDC_ERR, "failed to set count to fw\n");
-
-		dprintk(VIDC_DBG, "size = %d, alignment = %d, count = %d\n",
-				inst->buff_req.buffer[0].buffer_size,
-				inst->buff_req.buffer[0].buffer_alignment,
-				inst->buff_req.buffer[0].buffer_count_actual);
-		sizes[0] = inst->fmts[OUTPUT_PORT].get_frame_size(
-				0, inst->prop.height[OUTPUT_PORT],
-				inst->prop.width[OUTPUT_PORT]);
-
-		extra_idx =
-			EXTRADATA_IDX(inst->fmts[OUTPUT_PORT].num_planes);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-			buff_req_buffer = get_buff_req_buffer(inst,
-				HAL_BUFFER_EXTRADATA_INPUT);
-			if (!buff_req_buffer) {
-				dprintk(VIDC_ERR,
-					"%s: failed - invalid buffer req\n",
-					__func__);
-				return -EINVAL;
-			}
-
-			sizes[extra_idx] = buff_req_buffer->buffer_size;
-		}
-
-		break;
-	default:
-		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
-		rc = -EINVAL;
-		break;
-	}
-	return rc;
-}
-
 static int msm_venc_toggle_hier_p(struct msm_vidc_inst *inst, int layers)
 {
 	int num_enh_layers = 0;
@@ -1426,190 +1244,6 @@
 	return rc;
 }
 
-static inline int start_streaming(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
-		return -EINVAL;
-	}
-	msm_venc_power_save_mode_enable(inst);
-	if (inst->capability.pixelprocess_capabilities &
-		HAL_VIDEO_ENCODER_SCALING_CAPABILITY)
-		rc = msm_vidc_check_scaling_supported(inst);
-	if (rc) {
-		dprintk(VIDC_ERR, "H/w scaling is not in valid range\n");
-		return -EINVAL;
-	}
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to get Buffer Requirements : %d\n", rc);
-		goto fail_start;
-	}
-	rc = msm_comm_set_scratch_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to set scratch buffers: %d\n", rc);
-		goto fail_start;
-	}
-	rc = msm_comm_set_persist_buffers(inst);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to set persist buffers: %d\n", rc);
-		goto fail_start;
-	}
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK to start done state\n", inst);
-		goto fail_start;
-	}
-	msm_dcvs_init_load(inst);
-
-fail_start:
-	return rc;
-}
-
-static int msm_venc_start_streaming(struct vb2_queue *q, unsigned int count)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
-		return -EINVAL;
-	}
-	inst = q->drv_priv;
-	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
-		q->type, inst);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
-			rc = start_streaming(inst);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		goto stream_start_failed;
-	}
-	if (rc) {
-		dprintk(VIDC_ERR,
-			"Streamon failed on: %d capability for inst: %pK\n",
-			q->type, inst);
-		goto stream_start_failed;
-	}
-
-	rc = msm_comm_qbuf(inst, NULL);
-	if (rc) {
-		dprintk(VIDC_ERR,
-				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
-				rc);
-		goto stream_start_failed;
-	}
-
-stream_start_failed:
-	return rc;
-}
-
-static void msm_venc_stop_streaming(struct vb2_queue *q)
-{
-	struct msm_vidc_inst *inst;
-	int rc = 0;
-
-	if (!q || !q->drv_priv) {
-		dprintk(VIDC_ERR, "%s - Invalid input, q = %pK\n", __func__, q);
-		return;
-	}
-
-	inst = q->drv_priv;
-	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
-	switch (q->type) {
-	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
-		break;
-	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
-		rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
-		break;
-	default:
-		dprintk(VIDC_ERR, "Q-type is not supported: %d\n", q->type);
-		rc = -EINVAL;
-		break;
-	}
-
-	msm_comm_scale_clocks_and_bus(inst);
-
-	if (rc)
-		dprintk(VIDC_ERR,
-			"Failed to move inst: %pK, cap = %d to state: %d\n",
-			inst, q->type, MSM_VIDC_CLOSE_DONE);
-}
-
-static void msm_venc_buf_queue(struct vb2_buffer *vb)
-{
-	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
-
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
-}
-
-static void msm_venc_buf_cleanup(struct vb2_buffer *vb)
-{
-	int rc = 0;
-	struct buf_queue *q = NULL;
-	struct msm_vidc_inst *inst = NULL;
-
-	if (!vb) {
-		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
-			__func__, vb);
-		return;
-	}
-
-	inst = vb2_get_drv_priv(vb->vb2_queue);
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
-			__func__);
-		return;
-	}
-
-	q = msm_comm_get_vb2q(inst, vb->type);
-	if (!q) {
-		dprintk(VIDC_ERR,
-			"%s : Failed to find buffer queue for type = %d\n",
-				__func__, vb->type);
-		return;
-	}
-
-	if (q->vb2_bufq.streaming) {
-		dprintk(VIDC_DBG, "%d PORT is streaming\n",
-			vb->type);
-		return;
-	}
-
-	rc = msm_vidc_release_buffers(inst, vb->type);
-	if (rc)
-		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
-			__func__, rc);
-}
-
-static const struct vb2_ops msm_venc_vb2q_ops = {
-	.queue_setup = msm_venc_queue_setup,
-	.start_streaming = msm_venc_start_streaming,
-	.buf_queue = msm_venc_buf_queue,
-	.buf_cleanup = msm_venc_buf_cleanup,
-	.stop_streaming = msm_venc_stop_streaming,
-};
-
-const struct vb2_ops *msm_venc_get_vb2q_ops(void)
-{
-	return &msm_venc_vb2q_ops;
-}
-
 static struct v4l2_ctrl *get_ctrl_from_cluster(int id,
 		struct v4l2_ctrl **cluster, int ncontrols)
 {
@@ -1813,6 +1447,7 @@
 		bitrate.bit_rate = ctrl->val;
 		bitrate.layer_id = 0;
 		pdata = &bitrate;
+		inst->bitrate = ctrl->val;
 		break;
 	}
 	case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
@@ -2011,38 +1646,31 @@
 		pdata = &enable;
 		break;
 	}
-	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: {
-		struct v4l2_ctrl *air_mbs, *air_ref, *cir_mbs;
-		bool is_cont_intra_supported = false;
+	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE:
+	{
+		struct v4l2_ctrl *ir_mbs;
 
-		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS);
-
-		is_cont_intra_supported =
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) ||
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC);
+		ir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS);
 
 		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
-		intra_refresh.mode = ctrl->val;
-		intra_refresh.air_mbs = air_mbs->val;
-		intra_refresh.air_ref = air_ref->val;
-		intra_refresh.cir_mbs = cir_mbs->val;
+		intra_refresh.mode   = ctrl->val;
+		intra_refresh.ir_mbs = ir_mbs->val;
 
 		pdata = &intra_refresh;
 		break;
 	}
-	case V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS: {
-		struct v4l2_ctrl *ir_mode, *air_ref, *cir_mbs;
+	case V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS:
+	{
+		struct v4l2_ctrl *ir_mode;
 
 		ir_mode = TRY_GET_CTRL(
 				V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE);
 
 		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
-		intra_refresh.air_mbs = ctrl->val;
-		intra_refresh.mode = ir_mode->val;
-		intra_refresh.air_ref = air_ref->val;
-		intra_refresh.cir_mbs = cir_mbs->val;
+		intra_refresh.mode   = ir_mode->val;
+		intra_refresh.ir_mbs = ctrl->val;
 
 		pdata = &intra_refresh;
 		break;
@@ -2119,11 +1747,63 @@
 		dprintk(VIDC_INFO, "Setting secure mode to: %d\n",
 				!!(inst->flags & VIDC_SECURE));
 		break;
-	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA:
+	case V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA: {
+		struct hal_buffer_requirements *buff_req_buffer = NULL;
+		int extra_idx = 0;
+
 		property_id = HAL_PARAM_INDEX_EXTRADATA;
 		extra.index = msm_comm_get_hal_extradata_index(ctrl->val);
 		extra.enable = 1;
+
+		switch (ctrl->val) {
+		case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+		case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+		case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+		case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+		case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+		case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+			inst->bufq[OUTPUT_PORT].num_planes = 2;
+			break;
+		case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+		case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+		case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+			inst->bufq[CAPTURE_PORT].num_planes = 2;
+			break;
+		default:
+			rc = -ENOTSUPP;
+			break;
+		}
+
 		pdata = &extra;
+		rc = call_hfi_op(hdev, session_set_property,
+				(void *)inst->session, property_id, pdata);
+
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+			break;
+		}
+
+		buff_req_buffer = get_buff_req_buffer(inst,
+			HAL_BUFFER_EXTRADATA_INPUT);
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes);
+
+		inst->bufq[OUTPUT_PORT].plane_sizes[extra_idx] =
+			buff_req_buffer ?
+			buff_req_buffer->buffer_size : 0;
+
+		buff_req_buffer = get_buff_req_buffer(inst,
+			HAL_BUFFER_EXTRADATA_OUTPUT);
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
+		inst->bufq[CAPTURE_PORT].plane_sizes[extra_idx] =
+			buff_req_buffer ?
+			buff_req_buffer->buffer_size : 0;
+
+		property_id = 0;
+		}
 		break;
 	case V4L2_CID_MPEG_VIDC_VIDEO_AU_DELIMITER:
 		property_id = HAL_PARAM_VENC_GENERATE_AUDNAL;
@@ -2437,6 +2117,9 @@
 		return -EINVAL;
 	}
 
+	/* This will check the range for contols and clip if necessary */
+	v4l2_try_ext_ctrls(&inst->ctrl_handler, ctrl);
+
 	hdev = inst->core->device;
 	cap = &inst->capability;
 
@@ -2681,6 +2364,9 @@
 	inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
 	inst->prop.fps = DEFAULT_FPS;
 	inst->capability.pixelprocess_capabilities = 0;
+	/* To start with, both ports are 1 plane each */
+	inst->bufq[OUTPUT_PORT].num_planes = 1;
+	inst->bufq[CAPTURE_PORT].num_planes = 1;
 	inst->operating_rate = 0;
 
 	memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
@@ -2750,8 +2436,9 @@
 {
 	struct msm_vidc_format *fmt = NULL;
 	int rc = 0;
-	int i;
 	struct hfi_device *hdev;
+	int extra_idx = 0, i = 0;
+	struct hal_buffer_requirements *buff_req_buffer;
 
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
@@ -2766,6 +2453,7 @@
 	hdev = inst->core->device;
 
 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
 		fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats,
 			ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat,
 			CAPTURE_PORT);
@@ -2780,9 +2468,6 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
-		msm_venc_update_plane_count(inst, CAPTURE_PORT);
-		fmt->num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-
 		rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
 		if (rc) {
 			dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -2791,11 +2476,45 @@
 
 		inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
 		inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
-		rc = msm_vidc_check_session_supported(inst);
+
+		rc = msm_comm_try_get_bufreqs(inst);
 		if (rc) {
 			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto exit;
+				"Failed to get buffer requirements: %d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Get CAPTURE plane size from HW. This may change based on
+		 * settings like Slice delivery mode. HW should decide howmuch
+		 * it needs.
+		 */
+
+		buff_req_buffer = get_buff_req_buffer(inst,
+			HAL_BUFFER_OUTPUT);
+
+		f->fmt.pix_mp.plane_fmt[0].sizeimage = buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+
+		/*
+		 * Get CAPTURE plane Extradata size from HW. This may change
+		 * with no of Extradata's enabled. HW should decide howmuch
+		 * it needs.
+		 */
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[fmt->type].num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_OUTPUT);
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+		}
+
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
+			inst->bufq[fmt->type].plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
 		}
 	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		struct hal_frame_size frame_sz;
@@ -2803,13 +2522,6 @@
 		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
 		inst->prop.height[OUTPUT_PORT] = f->fmt.pix_mp.height;
 
-		rc = msm_vidc_check_session_supported(inst);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: session not supported\n", __func__);
-			goto exit;
-		}
-
 		frame_sz.buffer_type = HAL_BUFFER_INPUT;
 		frame_sz.width = inst->prop.width[OUTPUT_PORT];
 		frame_sz.height = inst->prop.height[OUTPUT_PORT];
@@ -2836,8 +2548,38 @@
 		memcpy(&inst->fmts[fmt->type], fmt,
 				sizeof(struct msm_vidc_format));
 
-		msm_venc_update_plane_count(inst, OUTPUT_PORT);
-		fmt->num_planes = inst->fmts[OUTPUT_PORT].num_planes;
+		f->fmt.pix_mp.plane_fmt[0].sizeimage =
+			inst->fmts[fmt->type].get_frame_size(0,
+			f->fmt.pix_mp.height, f->fmt.pix_mp.width);
+
+		rc = msm_comm_try_get_bufreqs(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to get buffer requirements: %d\n", rc);
+			return rc;
+		}
+
+		/*
+		 * Get OUTPUT plane Extradata size from HW. This may change
+		 * with no of Extradata's enabled. HW should decide howmuch
+		 * it needs.
+		 */
+
+		extra_idx = EXTRADATA_IDX(inst->bufq[fmt->type].num_planes);
+		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+			buff_req_buffer = get_buff_req_buffer(inst,
+					HAL_BUFFER_EXTRADATA_INPUT);
+			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+				buff_req_buffer ?
+				buff_req_buffer->buffer_size : 0;
+		}
+
+		f->fmt.pix_mp.num_planes = inst->bufq[fmt->type].num_planes;
+
+		for (i = 0; i < inst->bufq[fmt->type].num_planes; i++) {
+			inst->bufq[fmt->type].plane_sizes[i] =
+				f->fmt.pix_mp.plane_fmt[i].sizeimage;
+		}
 
 		msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc);
 	} else {
@@ -2846,137 +2588,10 @@
 		rc = -EINVAL;
 		goto exit;
 	}
-
-	f->fmt.pix_mp.num_planes = fmt->num_planes;
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		struct hal_frame_size frame_sz = {0};
-		struct hal_buffer_requirements *bufreq = NULL;
-
-		frame_sz.width = inst->prop.width[CAPTURE_PORT];
-		frame_sz.height = inst->prop.height[CAPTURE_PORT];
-		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
-		rc = call_hfi_op(hdev, session_set_property, (void *)
-				inst->session, HAL_PARAM_FRAME_SIZE,
-				&frame_sz);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"Failed to set OUTPUT framesize\n");
-			goto exit;
-		}
-		rc = msm_comm_try_get_bufreqs(inst);
-		if (rc) {
-			dprintk(VIDC_WARN,
-				"%s : Getting buffer reqs failed: %d\n",
-					__func__, rc);
-			goto exit;
-		}
-		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_OUTPUT);
-		f->fmt.pix_mp.plane_fmt[0].sizeimage =
-			bufreq ? bufreq->buffer_size : 0;
-	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		struct hal_buffer_requirements *bufreq = NULL;
-		int extra_idx = 0;
-
-		for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
-			f->fmt.pix_mp.plane_fmt[i].sizeimage =
-				inst->fmts[fmt->type].get_frame_size(i,
-				f->fmt.pix_mp.height, f->fmt.pix_mp.width);
-		}
-		extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
-		if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-			bufreq = get_buff_req_buffer(inst,
-					HAL_BUFFER_EXTRADATA_INPUT);
-			f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-				bufreq ? bufreq->buffer_size : 0;
-		}
-	}
 exit:
 	return rc;
 }
 
-int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
-{
-	const struct msm_vidc_format *fmt = NULL;
-	int rc = 0;
-	int i;
-	u32 height, width, num_planes;
-	unsigned int extra_idx = 0;
-	struct hal_buffer_requirements *bufreq = NULL;
-
-	if (!inst || !f) {
-		dprintk(VIDC_ERR,
-			"Invalid input, inst = %pK, format = %pK\n", inst, f);
-		return -EINVAL;
-	}
-
-	rc = msm_comm_try_get_bufreqs(inst);
-	if (rc) {
-		dprintk(VIDC_WARN, "Getting buffer requirements failed: %d\n",
-				rc);
-		return rc;
-	}
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		fmt = &inst->fmts[CAPTURE_PORT];
-		height = inst->prop.height[CAPTURE_PORT];
-		width = inst->prop.width[CAPTURE_PORT];
-		msm_venc_update_plane_count(inst, CAPTURE_PORT);
-		num_planes = inst->fmts[CAPTURE_PORT].num_planes;
-	} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		fmt = &inst->fmts[OUTPUT_PORT];
-		height = inst->prop.height[OUTPUT_PORT];
-		width = inst->prop.width[OUTPUT_PORT];
-		msm_venc_update_plane_count(inst, OUTPUT_PORT);
-		num_planes = inst->fmts[OUTPUT_PORT].num_planes;
-	} else {
-		dprintk(VIDC_ERR, "Invalid type: %x\n", f->type);
-		return -ENOTSUPP;
-	}
-
-	f->fmt.pix_mp.pixelformat = fmt->fourcc;
-	f->fmt.pix_mp.height = height;
-	f->fmt.pix_mp.width = width;
-	f->fmt.pix_mp.num_planes = num_planes;
-
-	if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-		for (i = 0; i < num_planes; ++i) {
-			f->fmt.pix_mp.plane_fmt[i].sizeimage =
-				fmt->get_frame_size(i, height, width);
-		}
-	} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-		bufreq = get_buff_req_buffer(inst,
-				HAL_BUFFER_OUTPUT);
-
-		f->fmt.pix_mp.plane_fmt[0].sizeimage =
-			bufreq ? bufreq->buffer_size : 0;
-	}
-	extra_idx = EXTRADATA_IDX(num_planes);
-	if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
-		if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-			bufreq = get_buff_req_buffer(inst,
-						HAL_BUFFER_EXTRADATA_OUTPUT);
-		else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
-			bufreq = get_buff_req_buffer(inst,
-						HAL_BUFFER_EXTRADATA_INPUT);
-
-		f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
-			bufreq ? bufreq->buffer_size : 0;
-	}
-
-	for (i = 0; i < num_planes; ++i) {
-		if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
-			inst->bufq[OUTPUT_PORT].plane_sizes[i] =
-				f->fmt.pix_mp.plane_fmt[i].sizeimage;
-		} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
-			inst->bufq[CAPTURE_PORT].plane_sizes[i] =
-				f->fmt.pix_mp.plane_fmt[i].sizeimage;
-		}
-	}
-
-	return rc;
-}
-
 int msm_venc_ctrl_init(struct msm_vidc_inst *inst,
 	const struct v4l2_ctrl_ops *ctrl_ops)
 {
diff --git a/drivers/media/platform/msm/vidc/msm_venc.h b/drivers/media/platform/msm/vidc/msm_venc.h
index 0bb7de77a..6fe1db3 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.h
+++ b/drivers/media/platform/msm/vidc/msm_venc.h
@@ -22,9 +22,7 @@
 	const struct v4l2_ctrl_ops *ctrl_ops);
 int msm_venc_enum_fmt(void *instance, struct v4l2_fmtdesc *f);
 int msm_venc_s_fmt(void *instance, struct v4l2_format *f);
-int msm_venc_g_fmt(void *instance, struct v4l2_format *f);
 int msm_venc_s_ctrl(void *instance, struct v4l2_ctrl *ctrl);
 int msm_venc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
-struct vb2_ops *msm_venc_get_vb2q_ops(void);
 
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index e93b771..270fc31 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -187,15 +187,55 @@
 int msm_vidc_g_fmt(void *instance, struct v4l2_format *f)
 {
 	struct msm_vidc_inst *inst = instance;
+	int i, rc = 0, color_format = 0;
+	enum vidc_ports port;
+	u32 num_planes;
 
-	if (!inst || !f)
+	if (!inst || !f) {
+		dprintk(VIDC_ERR,
+			"Invalid input, inst = %pK, format = %pK\n", inst, f);
 		return -EINVAL;
+	}
+	if (inst->in_reconfig) {
+		inst->prop.height[OUTPUT_PORT] = inst->reconfig_height;
+		inst->prop.width[OUTPUT_PORT] = inst->reconfig_width;
+	}
 
-	if (inst->session_type == MSM_VIDC_DECODER)
-		return msm_vdec_g_fmt(instance, f);
-	else if (inst->session_type == MSM_VIDC_ENCODER)
-		return msm_venc_g_fmt(instance, f);
-	return -EINVAL;
+	port = f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+		OUTPUT_PORT : CAPTURE_PORT;
+
+	f->fmt.pix_mp.pixelformat = inst->fmts[port].fourcc;
+	f->fmt.pix_mp.height = inst->prop.height[port];
+	f->fmt.pix_mp.width = inst->prop.width[port];
+	num_planes = f->fmt.pix_mp.num_planes = inst->bufq[port].num_planes;
+	for (i = 0; i < num_planes; ++i)
+		f->fmt.pix_mp.plane_fmt[i].sizeimage =
+			inst->bufq[port].plane_sizes[i];
+	switch (inst->fmts[port].fourcc) {
+	case V4L2_PIX_FMT_NV12:
+		color_format = COLOR_FMT_NV12;
+		break;
+	case V4L2_PIX_FMT_NV12_UBWC:
+		color_format = COLOR_FMT_NV12_UBWC;
+		break;
+	case V4L2_PIX_FMT_NV12_TP10_UBWC:
+		color_format = COLOR_FMT_NV12_BPP10_UBWC;
+		break;
+	default:
+		dprintk(VIDC_DBG,
+			"Invalid : g_fmt called on %s port with Invalid fourcc 0x%x\n",
+			port == OUTPUT_PORT ? "OUTPUT" : "CAPTURE",
+			inst->fmts[port].fourcc);
+		goto exit;
+	}
+
+	f->fmt.pix_mp.plane_fmt[0].bytesperline = VENUS_Y_STRIDE(color_format,
+			inst->prop.width[port]);
+	f->fmt.pix_mp.plane_fmt[0].reserved[0] = VENUS_Y_SCANLINES(color_format,
+			inst->prop.height[port]);
+
+exit:
+	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_g_fmt);
 
@@ -457,11 +497,12 @@
 		return -EINVAL;
 }
 
-static inline bool is_dynamic_output_buffer_mode(struct v4l2_buffer *b,
+static inline bool is_dynamic_buffer_mode(struct v4l2_buffer *b,
 				struct msm_vidc_inst *inst)
 {
-	return b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-		inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC;
+	enum vidc_ports port = b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
+		OUTPUT_PORT : CAPTURE_PORT;
+	return inst->buffer_mode_set[port] == HAL_BUFFER_MODE_DYNAMIC;
 }
 
 
@@ -518,7 +559,7 @@
 		}
 		mutex_lock(&inst->registeredbufs.lock);
 		temp = get_registered_buf(inst, b, i, &plane);
-		if (temp && !is_dynamic_output_buffer_mode(b, inst)) {
+		if (temp && !is_dynamic_buffer_mode(b, inst)) {
 			dprintk(VIDC_DBG,
 				"This memory region has already been prepared\n");
 			rc = 0;
@@ -526,7 +567,7 @@
 			goto exit;
 		}
 
-		if (temp && is_dynamic_output_buffer_mode(b, inst) && !i) {
+		if (temp && is_dynamic_buffer_mode(b, inst) && !i) {
 			/*
 			 * Buffer is already present in registered list
 			 * increment ref_count, populate new values of v4l2
@@ -559,7 +600,7 @@
 		if (rc == 1) {
 			rc = 0;
 			goto exit;
-		} else if (rc == 2) {
+		} else if (rc >= 2) {
 			rc = -EEXIST;
 			goto exit;
 		}
@@ -589,7 +630,7 @@
 		}
 
 		/* We maintain one ref count for all planes*/
-		if (!i && is_dynamic_output_buffer_mode(b, inst)) {
+		if (!i && is_dynamic_buffer_mode(b, inst)) {
 			rc = buf_ref_get(inst, binfo);
 			if (rc < 0)
 				goto exit;
@@ -769,14 +810,16 @@
 								MAX_PORT_NUM;
 
 	return port != MAX_PORT_NUM &&
-		inst->fmts[port].num_planes == b->length;
+		inst->bufq[port].num_planes == b->length;
 }
 
-int msm_vidc_release_buffers(void *instance, int buffer_type)
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index)
 {
 	struct msm_vidc_inst *inst = instance;
 	struct buffer_info *bi, *dummy;
 	int i, rc = 0;
+	int found_buf = 0;
 
 	if (!inst)
 		return -EINVAL;
@@ -794,7 +837,8 @@
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == buffer_type) {
+		if (bi->type == buffer_type && bi->v4l2_index == buffer_index) {
+			found_buf = 1;
 			list_del(&bi->list);
 			for (i = 0; i < bi->num_planes; i++) {
 				if (bi->handle[i] && bi->mapped[i]) {
@@ -805,15 +849,38 @@
 						bi->buff_off[i], bi->mapped[i]);
 					msm_comm_smem_free(inst,
 							bi->handle[i]);
+					found_buf = 2;
 				}
 			}
 			kfree(bi);
+			break;
 		}
 	}
 	mutex_unlock(&inst->registeredbufs.lock);
+
+	switch (found_buf) {
+	case 0:
+		dprintk(VIDC_WARN,
+			"%s: No buffer(type: %d) found for index %d\n",
+			__func__, buffer_type, buffer_index);
+		break;
+	case 1:
+		dprintk(VIDC_WARN,
+			"%s: Buffer(type: %d) found for index %d.",
+			__func__, buffer_type, buffer_index);
+		dprintk(VIDC_WARN, "zero planes mapped.\n");
+		break;
+	case 2:
+		dprintk(VIDC_DBG,
+			"%s: Released buffer(type: %d) for index %d\n",
+			__func__, buffer_type, buffer_index);
+		break;
+	default:
+		break;
+	}
 	return rc;
 }
-EXPORT_SYMBOL(msm_vidc_release_buffers);
+EXPORT_SYMBOL(msm_vidc_release_buffer);
 
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
 {
@@ -834,7 +901,7 @@
 	rc = map_and_register_buf(inst, b);
 	if (rc == -EEXIST) {
 		if (atomic_read(&inst->in_flush) &&
-			is_dynamic_output_buffer_mode(b, inst)) {
+			is_dynamic_buffer_mode(b, inst)) {
 			dprintk(VIDC_ERR,
 				"Flush in progress, do not hold any buffers in driver\n");
 			msm_comm_flush_dynamic_buffers(inst);
@@ -958,7 +1025,7 @@
 		return rc;
 
 
-	if (is_dynamic_output_buffer_mode(b, inst)) {
+	if (is_dynamic_buffer_mode(b, inst)) {
 		buffer_info->dequeued = true;
 
 		dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n",
@@ -1062,6 +1129,433 @@
 	.put_userptr = vidc_put_userptr,
 };
 
+static void msm_vidc_cleanup_buffer(struct vb2_buffer *vb)
+{
+	int rc = 0;
+	struct buf_queue *q = NULL;
+	struct msm_vidc_inst *inst = NULL;
+
+	if (!vb) {
+		dprintk(VIDC_ERR, "%s : Invalid vb pointer %pK",
+			__func__, vb);
+		return;
+	}
+
+	inst = vb2_get_drv_priv(vb->vb2_queue);
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s : Invalid inst pointer",
+			__func__);
+		return;
+	}
+
+	q = msm_comm_get_vb2q(inst, vb->type);
+	if (!q) {
+		dprintk(VIDC_ERR,
+			"%s : Failed to find buffer queue for type = %d\n",
+			__func__, vb->type);
+		return;
+	}
+
+	if (q->vb2_bufq.streaming) {
+		dprintk(VIDC_DBG, "%d PORT is streaming\n",
+			vb->type);
+		return;
+	}
+
+	rc = msm_vidc_release_buffer(inst, vb->type, vb->index);
+	if (rc)
+		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
+			__func__, rc);
+}
+
+static int set_buffer_count(struct msm_vidc_inst *inst,
+	int host_count, int act_count, enum hal_buffer type)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_count_actual buf_count;
+
+	hdev = inst->core->device;
+
+	buf_count.buffer_type = type;
+	buf_count.buffer_count_actual = act_count;
+	buf_count.buffer_count_min_host = host_count;
+	rc = call_hfi_op(hdev, session_set_property,
+		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, &buf_count);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to set actual buffer count %d for buffer type %d\n",
+			act_count, type);
+	return rc;
+}
+
+static int msm_vidc_queue_setup(struct vb2_queue *q,
+	unsigned int *num_buffers, unsigned int *num_planes,
+	unsigned int sizes[], struct device *alloc_devs[])
+{
+	struct msm_vidc_inst *inst;
+	int i, rc = 0;
+	struct hal_buffer_requirements *bufreq;
+	enum hal_buffer buffer_type;
+
+	if (!q || !num_buffers || !num_planes
+		|| !sizes || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK, %pK, %pK\n",
+			q, num_buffers, num_planes);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: {
+		bufreq = get_buff_req_buffer(inst,
+			HAL_BUFFER_INPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+				HAL_BUFFER_INPUT);
+			return -EINVAL;
+		}
+		if (*num_buffers < bufreq->buffer_count_actual) {
+			dprintk(VIDC_ERR,
+				"Invalid parameters : Req = %d Act = %d\n",
+				*num_buffers, bufreq->buffer_count_actual);
+			return -EINVAL;
+		}
+		*num_planes = inst->bufq[OUTPUT_PORT].num_planes;
+		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
+			*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
+			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+		for (i = 0; i < *num_planes; i++)
+			sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i];
+
+		bufreq->buffer_count_actual = *num_buffers;
+		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+			*num_buffers, HAL_BUFFER_INPUT);
+		}
+
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
+		buffer_type = msm_comm_get_hal_output_buffer(inst);
+		bufreq = get_buff_req_buffer(inst,
+			buffer_type);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed : No buffer requirements : %x\n",
+				buffer_type);
+			return -EINVAL;
+		}
+		if (*num_buffers < bufreq->buffer_count_actual) {
+			dprintk(VIDC_ERR,
+				"Invalid parameters : Req = %d Act = %d\n",
+				*num_buffers, bufreq->buffer_count_actual);
+			return -EINVAL;
+		}
+		*num_planes = inst->bufq[CAPTURE_PORT].num_planes;
+		if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
+			*num_buffers > MAX_NUM_CAPTURE_BUFFERS)
+			*num_buffers = MIN_NUM_CAPTURE_BUFFERS;
+
+		for (i = 0; i < *num_planes; i++)
+			sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i];
+
+		bufreq->buffer_count_actual = *num_buffers;
+		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+			*num_buffers, buffer_type);
+		}
+		break;
+	default:
+		dprintk(VIDC_ERR, "Invalid q type = %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static inline int msm_vidc_decide_core_and_power_mode(
+	struct msm_vidc_inst *inst)
+{
+	dprintk(VIDC_DBG,
+		"Core selection is not yet implemented for inst = %pK\n",
+			inst);
+	return 0;
+}
+static inline int msm_vidc_verify_buffer_counts(struct msm_vidc_inst *inst)
+{
+	int rc = 0, i = 0;
+
+	for (i = 0; i < HAL_BUFFER_MAX; i++) {
+		struct hal_buffer_requirements *req = &inst->buff_req.buffer[i];
+
+		dprintk(VIDC_DBG, "Verifying Buffer : %d\n", req->buffer_type);
+		if (!req ||
+			req->buffer_count_actual < req->buffer_count_min_host ||
+			req->buffer_count_min_host < req->buffer_count_min) {
+			dprintk(VIDC_ERR, "Invalid data : Counts mismatch\n");
+			dprintk(VIDC_ERR,
+				"Min Count = %d ", req->buffer_count_min);
+			dprintk(VIDC_ERR,
+				"Min Host Count = %d ",
+					req->buffer_count_min_host);
+			dprintk(VIDC_ERR,
+				"Min Actual Count = %d\n",
+					req->buffer_count_actual);
+			rc = -EINVAL;
+			break;
+		}
+	}
+	return rc;
+}
+
+static inline int start_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_size_minimum b;
+	struct vb2_buf_entry *temp, *next;
+
+	hdev = inst->core->device;
+
+	/* Check if current session is under HW capability */
+	rc = msm_vidc_check_session_supported(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"This session is not supported %pK\n", inst);
+		goto fail_start;
+	}
+
+	/* Assign Core and LP mode for current session */
+	rc = msm_vidc_decide_core_and_power_mode(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"This session can't be submitted to HW%pK\n", inst);
+		goto fail_start;
+	}
+
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		b.buffer_type = HAL_BUFFER_OUTPUT2;
+	} else {
+		b.buffer_type = HAL_BUFFER_OUTPUT;
+	}
+
+	b.buffer_size = inst->bufq[CAPTURE_PORT].plane_sizes[0];
+	rc = call_hfi_op(hdev, session_set_property,
+			inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM,
+			&b);
+
+	rc = msm_comm_try_get_bufreqs(inst);
+
+	/* Check if current session is under HW capability */
+	rc = msm_vidc_verify_buffer_counts(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"This session has mis-match buffer counts%pK\n", inst);
+		goto fail_start;
+	}
+
+	rc = msm_comm_set_scratch_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to set scratch buffers: %d\n", rc);
+		goto fail_start;
+	}
+	rc = msm_comm_set_persist_buffers(inst);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to set persist buffers: %d\n", rc);
+		goto fail_start;
+	}
+
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_comm_set_output_buffers(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set output buffers: %d\n", rc);
+			goto fail_start;
+		}
+	}
+
+	/*
+	 * For seq_changed_insufficient, driver should set session_continue
+	 * to firmware after the following sequence
+	 * - driver raises insufficient event to v4l2 client
+	 * - all output buffers have been flushed and freed
+	 * - v4l2 client queries buffer requirements and splits/combines OPB-DPB
+	 * - v4l2 client sets new set of buffers to firmware
+	 * - v4l2 client issues CONTINUE to firmware to resume decoding of
+	 *   submitted ETBs.
+	 */
+	if (inst->in_reconfig) {
+		dprintk(VIDC_DBG, "send session_continue after reconfig\n");
+		rc = call_hfi_op(hdev, session_continue,
+				(void *) inst->session);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"%s - failed to send session_continue\n",
+				__func__);
+			goto fail_start;
+		}
+	}
+	inst->in_reconfig = false;
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE);
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to start done state\n", inst);
+		goto fail_start;
+	}
+	msm_dcvs_init(inst);
+	if (msm_comm_get_stream_output_mode(inst) ==
+			HAL_VIDEO_DECODER_SECONDARY) {
+		rc = msm_comm_queue_output_buffers(inst);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to queue output buffers: %d\n", rc);
+			goto fail_start;
+		}
+	}
+
+fail_start:
+	if (rc) {
+		mutex_lock(&inst->pendingq.lock);
+		list_for_each_entry_safe(temp, next, &inst->pendingq.list,
+				list) {
+			vb2_buffer_done(temp->vb,
+					VB2_BUF_STATE_QUEUED);
+			list_del(&temp->list);
+			kfree(temp);
+		}
+		mutex_unlock(&inst->pendingq.lock);
+	}
+	return rc;
+}
+
+
+static int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+	struct hfi_device *hdev;
+
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return -EINVAL;
+	}
+	inst = q->drv_priv;
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
+		return -EINVAL;
+	}
+	hdev = inst->core->device;
+	dprintk(VIDC_DBG, "Streamon called on: %d capability for inst: %pK\n",
+		q->type, inst);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = start_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR, "Queue type is not supported: %d\n", q->type);
+		rc = -EINVAL;
+		goto stream_start_failed;
+	}
+	if (rc) {
+		dprintk(VIDC_ERR,
+			"Streamon failed on: %d capability for inst: %pK\n",
+			q->type, inst);
+		goto stream_start_failed;
+	}
+
+	rc = msm_comm_qbuf(inst, NULL);
+	if (rc) {
+		dprintk(VIDC_ERR,
+				"Failed to commit buffers queued before STREAM_ON to hardware: %d\n",
+				rc);
+		goto stream_start_failed;
+	}
+
+stream_start_failed:
+	return rc;
+}
+
+static inline int stop_streaming(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+
+	rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to move inst: %pK to state %d\n",
+				inst, MSM_VIDC_RELEASE_RESOURCES_DONE);
+	return rc;
+}
+
+static void msm_vidc_stop_streaming(struct vb2_queue *q)
+{
+	struct msm_vidc_inst *inst;
+	int rc = 0;
+
+	if (!q || !q->drv_priv) {
+		dprintk(VIDC_ERR, "Invalid input, q = %pK\n", q);
+		return;
+	}
+
+	inst = q->drv_priv;
+	dprintk(VIDC_DBG, "Streamoff called on: %d capability\n", q->type);
+	switch (q->type) {
+	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+		if (!inst->bufq[CAPTURE_PORT].vb2_bufq.streaming)
+			rc = stop_streaming(inst);
+		break;
+	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+		if (!inst->bufq[OUTPUT_PORT].vb2_bufq.streaming)
+			rc = stop_streaming(inst);
+		break;
+	default:
+		dprintk(VIDC_ERR,
+			"Q-type is not supported: %d\n", q->type);
+		rc = -EINVAL;
+		break;
+	}
+
+	msm_comm_scale_clocks_and_bus(inst);
+
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed STOP Streaming inst = %pK on cap = %d\n",
+			inst, q->type);
+}
+
+static void msm_vidc_buf_queue(struct vb2_buffer *vb)
+{
+	int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb);
+
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc);
+}
+
+static const struct vb2_ops msm_vidc_vb2q_ops = {
+	.queue_setup = msm_vidc_queue_setup,
+	.start_streaming = msm_vidc_start_streaming,
+	.buf_queue = msm_vidc_buf_queue,
+	.buf_cleanup = msm_vidc_cleanup_buffer,
+	.stop_streaming = msm_vidc_stop_streaming,
+};
+
 static inline int vb2_bufq_init(struct msm_vidc_inst *inst,
 		enum v4l2_buf_type type, enum session_type sess)
 {
@@ -1079,11 +1573,8 @@
 	q->type = type;
 	q->io_modes = VB2_MMAP | VB2_USERPTR;
 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+	q->ops = &msm_vidc_vb2q_ops;
 
-	if (sess == MSM_VIDC_DECODER)
-		q->ops = msm_vdec_get_vb2q_ops();
-	else if (sess == MSM_VIDC_ENCODER)
-		q->ops = msm_venc_get_vb2q_ops();
 	q->mem_ops = &msm_vidc_vb2_mem_ops;
 	q->drv_priv = inst;
 	q->allow_zero_bytesused = 1;
@@ -1214,9 +1705,152 @@
 	return rc;
 }
 
+static int set_actual_buffer_count(struct msm_vidc_inst *inst,
+	int count, enum hal_buffer type)
+{
+	int rc = 0;
+	struct hfi_device *hdev;
+	struct hal_buffer_count_actual buf_count;
+
+	hdev = inst->core->device;
+
+	buf_count.buffer_type = type;
+	buf_count.buffer_count_min_host = count;
+	buf_count.buffer_count_actual = count;
+	rc = call_hfi_op(hdev, session_set_property,
+		inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL,
+		&buf_count);
+	if (rc)
+		dprintk(VIDC_ERR,
+			"Failed to set actual count %d for buffer type %d\n",
+			count, type);
+	return rc;
+}
+
+
+static int msm_vdec_get_count(struct msm_vidc_inst *inst,
+	struct v4l2_ctrl *ctrl)
+{
+	int rc = 0;
+	struct hal_buffer_requirements *bufreq, *newreq;
+	enum hal_buffer buffer_type;
+
+	if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT) {
+		bufreq = get_buff_req_buffer(inst, HAL_BUFFER_INPUT);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed to find bufreqs for buffer type = %d\n",
+					HAL_BUFFER_INPUT);
+			return 0;
+		}
+		if (inst->bufq[OUTPUT_PORT].vb2_bufq.streaming) {
+			ctrl->val = bufreq->buffer_count_min_host;
+			return 0;
+		}
+		if (ctrl->val > bufreq->buffer_count_min_host) {
+			dprintk(VIDC_DBG,
+				"Interesting : Usually shouldn't happen\n");
+			bufreq->buffer_count_min_host = ctrl->val;
+		}
+		rc = set_actual_buffer_count(inst, ctrl->val,
+			HAL_BUFFER_INPUT);
+		return rc;
+
+	} else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) {
+		int count = 0;
+
+		buffer_type = msm_comm_get_hal_output_buffer(inst);
+		bufreq = get_buff_req_buffer(inst,
+			buffer_type);
+		if (!bufreq) {
+			dprintk(VIDC_ERR,
+				"Failed to find bufreqs for buffer type = %d\n",
+					buffer_type);
+			return 0;
+		}
+		if (inst->bufq[CAPTURE_PORT].vb2_bufq.streaming) {
+			if (ctrl->val != bufreq->buffer_count_min_host)
+				return -EINVAL;
+			else
+				return 0;
+		}
+		count = bufreq->buffer_count_min_host;
+
+		if (inst->in_reconfig) {
+			rc = msm_comm_try_get_bufreqs(inst);
+			newreq = get_buff_req_buffer(inst,
+				buffer_type);
+			if (!newreq) {
+				dprintk(VIDC_ERR,
+					"Failed to find new bufreqs = %d\n",
+					buffer_type);
+				return 0;
+			}
+			newreq->buffer_count_min_host = count =
+				newreq->buffer_count_min +
+				msm_dcvs_get_extra_buff_count(inst);
+		}
+		if (!inst->in_reconfig &&
+			inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
+			dprintk(VIDC_DBG, "Clients will correct this\n");
+			rc = set_actual_buffer_count(inst, ctrl->val,
+				buffer_type);
+			bufreq->buffer_count_min_host = ctrl->val;
+			return 0;
+		}
+		bufreq->buffer_count_min_host = ctrl->val = count;
+		rc = set_actual_buffer_count(inst, ctrl->val,
+			buffer_type);
+
+		return rc;
+	}
+	return -EINVAL;
+}
+
 static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
 {
-	return 0;
+	int rc = 0;
+
+	/*
+	 * HACK: unlock the control prior to querying the hardware.  Otherwise
+	 * lower level code that attempts to do g_ctrl() will end up deadlocking
+	 * us.
+	 */
+	v4l2_ctrl_unlock(ctrl);
+
+	switch (ctrl->id) {
+
+	case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+	case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+	case V4L2_CID_MPEG_VIDC_VIDEO_VP8_PROFILE_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_PROFILE:
+		ctrl->val = inst->profile;
+	break;
+
+	case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+	case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+	case V4L2_CID_MPEG_VIDC_VIDEO_MPEG2_LEVEL:
+		ctrl->val = inst->level;
+	break;
+
+	case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+		ctrl->val = inst->entropy_mode;
+	break;
+
+	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+		rc = msm_vdec_get_count(inst, ctrl);
+		break;
+	default:
+		/*
+		 * Other controls aren't really volatile, shouldn't need to
+		 * modify ctrl->value
+		 */
+		break;
+	}
+	v4l2_ctrl_lock(ctrl);
+
+	return rc;
 }
 
 static int msm_vidc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
@@ -1301,6 +1935,7 @@
 
 	INIT_MSM_VIDC_LIST(&inst->pendingq);
 	INIT_MSM_VIDC_LIST(&inst->scratchbufs);
+	INIT_MSM_VIDC_LIST(&inst->freqs);
 	INIT_MSM_VIDC_LIST(&inst->persistbufs);
 	INIT_MSM_VIDC_LIST(&inst->pending_getpropq);
 	INIT_MSM_VIDC_LIST(&inst->outputbufs);
@@ -1311,8 +1946,9 @@
 	inst->session_type = session_type;
 	inst->state = MSM_VIDC_CORE_UNINIT_DONE;
 	inst->core = core;
+	inst->freq = 0;
 	inst->bit_depth = MSM_VIDC_BIT_DEPTH_8;
-	inst->instant_bitrate = 0;
+	inst->bitrate = 0;
 	inst->pic_struct = MSM_VIDC_PIC_STRUCT_PROGRESSIVE;
 	inst->colour_space = MSM_VIDC_BT601_6_525;
 	inst->profile = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
@@ -1341,7 +1977,6 @@
 	if (rc)
 		goto fail_bufq_capture;
 
-	msm_dcvs_init(inst);
 	rc = vb2_bufq_init(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
 			session_type);
 	if (rc) {
@@ -1427,6 +2062,8 @@
 		}
 		mutex_unlock(&inst->pendingq.lock);
 
+		msm_comm_free_freq_table(inst);
+
 		if (msm_comm_release_scratch_buffers(inst, false)) {
 			dprintk(VIDC_ERR,
 				"Failed to release scratch buffers\n");
@@ -1517,10 +2154,17 @@
 	if (!inst || !inst->core)
 		return -EINVAL;
 
+	/*
+	 * Make sure that HW stop working on these buffers that
+	 * we are going to free.
+	 */
+	if (inst->state != MSM_VIDC_CORE_INVALID &&
+		inst->core->state != VIDC_CORE_INVALID)
+		rc = msm_comm_try_state(inst,
+				MSM_VIDC_RELEASE_RESOURCES_DONE);
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 			int i = 0;
 
 			list_del(&bi->list);
@@ -1533,7 +2177,6 @@
 
 			kfree(bi);
 		}
-	}
 	mutex_unlock(&inst->registeredbufs.lock);
 
 	cleanup_instance(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index d891644..70427d3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -16,32 +16,250 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
-#define IS_VALID_DCVS_SESSION(__cur_mbpf, __min_mbpf) \
-		((__cur_mbpf) >= (__min_mbpf))
-
-static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst);
-static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst);
-static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd);
-
-int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+int msm_comm_vote_bus(struct msm_vidc_core *core)
 {
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+	int rc = 0, vote_data_count = 0, i = 0;
+	struct hfi_device *hdev;
+	struct msm_vidc_inst *inst = NULL;
+	struct vidc_bus_vote_data *vote_data = NULL;
+
+	if (!core) {
+		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
 		return -EINVAL;
 	}
-	inst->dcvs_mode = msm_dcvs_check_supported(inst);
-	return 0;
+
+	hdev = core->device;
+	if (!hdev) {
+		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
+				__func__, hdev);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core->lock);
+	list_for_each_entry(inst, &core->instances, list)
+		++vote_data_count;
+
+	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
+			GFP_TEMPORARY);
+	if (!vote_data) {
+		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
+		rc = -ENOMEM;
+		goto fail_alloc;
+	}
+
+	list_for_each_entry(inst, &core->instances, list) {
+		int codec = 0, yuv = 0;
+
+		codec = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[OUTPUT_PORT].fourcc :
+			inst->fmts[CAPTURE_PORT].fourcc;
+
+		yuv = inst->session_type == MSM_VIDC_DECODER ?
+			inst->fmts[CAPTURE_PORT].fourcc :
+			inst->fmts[OUTPUT_PORT].fourcc;
+
+		vote_data[i].domain = get_hal_domain(inst->session_type);
+		vote_data[i].codec = get_hal_codec(codec);
+		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
+				inst->prop.width[OUTPUT_PORT]);
+		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
+				inst->prop.height[OUTPUT_PORT]);
+
+		if (inst->operating_rate)
+			vote_data[i].fps = (inst->operating_rate >> 16) ?
+				inst->operating_rate >> 16 : 1;
+		else
+			vote_data[i].fps = inst->prop.fps;
+
+		/*
+		 * TODO: support for OBP-DBP split mode hasn't been yet
+		 * implemented, once it is, this part of code needs to be
+		 * revisited since passing in accurate information to the bus
+		 * governor will drastically reduce bandwidth
+		 */
+		//vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
+		vote_data[i].num_formats = 1;
+		i++;
+	}
+	mutex_unlock(&core->lock);
+
+	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
+			vote_data_count);
+	if (rc)
+		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
+
+	kfree(vote_data);
+	return rc;
+
+fail_alloc:
+	mutex_unlock(&core->lock);
+	return rc;
 }
 
+static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
+{
+	int fw_out_qsize = 0, buffers_in_driver = 0;
+
+	/*
+	 * DCVS always operates on Uncompressed buffers.
+	 * For Decoders, FTB and Encoders, ETB.
+	 */
+
+	if (inst->state >= MSM_VIDC_OPEN_DONE &&
+			inst->state < MSM_VIDC_STOP_DONE) {
+		if (inst->session_type == MSM_VIDC_DECODER)
+			fw_out_qsize = inst->count.ftb - inst->count.fbd;
+		else
+			fw_out_qsize = inst->count.etb - inst->count.ebd;
+
+		buffers_in_driver = inst->buffers_held_in_driver;
+	}
+
+	return fw_out_qsize + buffers_in_driver;
+}
+
+static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst)
+{
+	int rc = 0;
+	int fw_pending_bufs = 0;
+	int total_output_buf = 0;
+	int buffers_outside_fw = 0;
+	struct msm_vidc_core *core;
+	struct hal_buffer_requirements *output_buf_req;
+	struct dcvs_stats *dcvs;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	if (!inst->dcvs_mode) {
+		dprintk(VIDC_DBG, "DCVS is not enabled\n");
+		return 0;
+	}
+
+	dcvs = &inst->dcvs;
+
+	core = inst->core;
+	mutex_lock(&inst->lock);
+	fw_pending_bufs = get_pending_bufs_fw(inst);
+
+	output_buf_req = get_buff_req_buffer(inst,
+			dcvs->buffer_type);
+	mutex_unlock(&inst->lock);
+	if (!output_buf_req) {
+		dprintk(VIDC_ERR,
+				"%s: No buffer requirement for buffer type %x\n",
+				__func__, dcvs->buffer_type);
+		return -EINVAL;
+	}
+
+	/* Total number of output buffers */
+	total_output_buf = output_buf_req->buffer_count_actual;
+
+	/* Buffers outside FW are with display */
+	buffers_outside_fw = total_output_buf - fw_pending_bufs;
+	dprintk(VIDC_DBG,
+		"Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n",
+		total_output_buf, fw_pending_bufs, buffers_outside_fw);
+
+	if (buffers_outside_fw >=  dcvs->min_threshold &&
+			dcvs->load > dcvs->load_low) {
+		dcvs->load = dcvs->load_low;
+	} else if (buffers_outside_fw < dcvs->min_threshold &&
+			dcvs->load == dcvs->load_low) {
+		dcvs->load = dcvs->load_high;
+	}
+	return rc;
+}
+
+static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst,
+	unsigned long freq, ion_phys_addr_t device_addr)
+{
+	struct vidc_freq_data *temp, *next;
+	bool found = false;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		if (temp->device_addr == device_addr) {
+			temp->freq = freq;
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+		temp->freq = freq;
+		temp->device_addr = device_addr;
+		list_add_tail(&temp->list, &inst->freqs.list);
+	}
+	mutex_unlock(&inst->freqs.lock);
+}
+
+// TODO this needs to be removed later and use queued_list
+
+void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
+	ion_phys_addr_t device_addr)
+{
+	struct vidc_freq_data *temp, *next;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		if (temp->device_addr == device_addr)
+			temp->freq = 0;
+	}
+	mutex_unlock(&inst->freqs.lock);
+
+	inst->dcvs.buffer_counter++;
+}
+
+
+static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst)
+{
+	struct vidc_freq_data *temp;
+	unsigned long freq = 0;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry(temp, &inst->freqs.list, list) {
+		freq = max(freq, temp->freq);
+	}
+	mutex_unlock(&inst->freqs.lock);
+
+	/* If current requirement is within DCVS limits, try DCVS. */
+
+	if (freq < inst->dcvs.load_high) {
+		dprintk(VIDC_DBG, "Calling DCVS now\n");
+		// TODO calling DCVS here may reduce the residency. Re-visit.
+		msm_dcvs_scale_clocks(inst);
+		freq = inst->dcvs.load;
+	}
+
+	return freq;
+}
+
+void msm_comm_free_freq_table(struct msm_vidc_inst *inst)
+{
+	struct vidc_freq_data *temp, *next;
+
+	mutex_lock(&inst->freqs.lock);
+	list_for_each_entry_safe(temp, next, &inst->freqs.list, list) {
+		list_del(&temp->list);
+		kfree(temp);
+	}
+	INIT_LIST_HEAD(&inst->freqs.list);
+	mutex_unlock(&inst->freqs.lock);
+}
+
+
 static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
 {
 	int height, width;
 
 	if (!inst->in_reconfig) {
 		height = max(inst->prop.height[CAPTURE_PORT],
-				inst->prop.height[OUTPUT_PORT]);
+			inst->prop.height[OUTPUT_PORT]);
 		width = max(inst->prop.width[CAPTURE_PORT],
-				inst->prop.width[OUTPUT_PORT]);
+			inst->prop.width[OUTPUT_PORT]);
 	} else {
 		height = inst->reconfig_height;
 		width = inst->reconfig_width;
@@ -50,31 +268,174 @@
 	return NUM_MBS_PER_FRAME(height, width);
 }
 
-static inline int msm_dcvs_count_active_instances(struct msm_vidc_core *core,
-	enum session_type session_type)
+static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst,
+	u32 filled_len)
 {
-	int active_instances = 0;
-	struct msm_vidc_inst *temp = NULL;
+	unsigned long freq = 0;
+	unsigned long vpp_cycles = 0, vsp_cycles = 0;
+	u32 vpp_cycles_per_mb;
+	u32 mbs_per_frame;
 
-	if (!core) {
-		dprintk(VIDC_ERR, "%s: Invalid args: %pK\n", __func__, core);
+	mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+
+	/*
+	 * Calculate vpp, vsp cycles separately for encoder and decoder.
+	 * Even though, most part is common now, in future it may change
+	 * between them.
+	 */
+
+	if (inst->session_type == MSM_VIDC_ENCODER) {
+		vpp_cycles_per_mb = inst->flags & VIDC_LOW_POWER ?
+			inst->entry->low_power_cycles :
+			inst->entry->vpp_cycles;
+
+		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+
+		/* 10 / 7 is overhead factor */
+		vsp_cycles += (inst->bitrate * 10) / 7;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		vpp_cycles = mbs_per_frame * inst->entry->vpp_cycles;
+
+		vsp_cycles = mbs_per_frame * inst->entry->vsp_cycles;
+		/* 10 / 7 is overhead factor */
+		vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7;
+
+	} else {
+		// TODO return Min or Max ?
+		dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__);
+		return freq;
+	}
+
+	freq = max(vpp_cycles, vsp_cycles);
+
+	return freq;
+}
+
+static int msm_vidc_set_clocks(struct msm_vidc_core *core)
+{
+	struct hfi_device *hdev;
+	unsigned long freq = 0, rate = 0;
+	struct msm_vidc_inst *temp = NULL;
+	int rc = 0, i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+
+	hdev = core->device;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	if (!hdev || !allowed_clks_tbl) {
+		dprintk(VIDC_ERR,
+			"%s Invalid parameters\n", __func__);
 		return -EINVAL;
 	}
 
-	/* DCVS condition is as following
-	 * Decoder DCVS : Only for ONE decoder session.
-	 * Encoder DCVS : Only for ONE encoder session + ONE decoder session
-	 */
 	mutex_lock(&core->lock);
 	list_for_each_entry(temp, &core->instances, list) {
-		if (temp->state >= MSM_VIDC_OPEN_DONE &&
-			temp->state < MSM_VIDC_STOP_DONE &&
-			(temp->session_type == session_type ||
-			 temp->session_type == MSM_VIDC_ENCODER))
-			active_instances++;
+		freq += temp->freq;
+	}
+	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= freq)
+			break;
 	}
 	mutex_unlock(&core->lock);
-	return active_instances;
+
+	core->freq = rate;
+	dprintk(VIDC_PROF, "Voting for freq = %lu", freq);
+	rc = call_hfi_op(hdev, scale_clocks,
+			hdev->hfi_device_data, rate);
+
+	return rc;
+}
+
+static unsigned long msm_vidc_max_freq(struct msm_vidc_inst *inst)
+{
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	unsigned long freq = 0;
+
+	allowed_clks_tbl = inst->core->resources.allowed_clks_tbl;
+	freq = allowed_clks_tbl[0].clock_rate;
+	dprintk(VIDC_PROF, "Max rate = %lu", freq);
+
+	return freq;
+}
+
+int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
+{
+	struct vb2_buf_entry *temp, *next;
+	unsigned long freq = 0;
+	u32 filled_len = 0;
+	ion_phys_addr_t device_addr = 0;
+
+	if (inst->dcvs.buffer_counter < DCVS_FTB_WINDOW) {
+		freq = msm_vidc_max_freq(inst);
+		goto decision_done;
+	}
+
+	mutex_lock(&inst->pendingq.lock);
+	list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) {
+		if (temp->vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+			filled_len = max(filled_len,
+				temp->vb->planes[0].bytesused);
+			device_addr = temp->vb->planes[0].m.userptr;
+		}
+	}
+	mutex_unlock(&inst->pendingq.lock);
+
+	if (!filled_len || !device_addr) {
+		freq = inst->freq;
+		goto decision_done;
+	}
+
+	freq = msm_vidc_calc_freq(inst, filled_len);
+
+	msm_vidc_update_freq_entry(inst, freq, device_addr);
+
+	freq = msm_vidc_adjust_freq(inst);
+
+decision_done:
+	inst->freq = freq;
+	msm_vidc_set_clocks(inst->core);
+	return 0;
+}
+
+int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
+{
+	struct msm_vidc_core *core;
+	struct hfi_device *hdev;
+
+	if (!inst || !inst->core || !inst->core->device) {
+		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
+		return -EINVAL;
+	}
+	core = inst->core;
+	hdev = core->device;
+
+	if (msm_comm_scale_clocks(inst)) {
+		dprintk(VIDC_WARN,
+			"Failed to scale clocks. Performance might be impacted\n");
+	}
+	if (msm_comm_vote_bus(core)) {
+		dprintk(VIDC_WARN,
+			"Failed to scale DDR bus. Performance might be impacted\n");
+	}
+	return 0;
+}
+
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+{
+	if (!inst) {
+		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+		return -EINVAL;
+	}
+	if (inst->flags & VIDC_THUMBNAIL) {
+		dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
+			inst);
+		return false;
+	}
+	inst->dcvs_mode = true;
+
+	// TODO : Update with proper number based on on-target tuning.
+	inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	return true;
 }
 
 static bool msm_dcvs_check_codec_supported(int fourcc,
@@ -104,90 +465,41 @@
 	return codec_type && session_type;
 }
 
-static void msm_dcvs_update_dcvs_params(int idx, struct msm_vidc_inst *inst)
+int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst)
 {
-	struct dcvs_stats *dcvs = NULL;
-	struct msm_vidc_platform_resources *res = NULL;
-	struct dcvs_table *table = NULL;
+	int rc = 0, j = 0;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct clock_profile_entry *entry = NULL;
+	int fourcc;
 
-	if (!inst || !inst->core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
+	clk_freq_tbl = &inst->core->resources.clock_freq_tbl;
+	fourcc = inst->session_type == MSM_VIDC_DECODER ?
+		inst->fmts[OUTPUT_PORT].fourcc :
+		inst->fmts[CAPTURE_PORT].fourcc;
 
-	dcvs = &inst->dcvs;
-	res = &inst->core->resources;
-	table = res->dcvs_tbl;
+	for (j = 0; j < clk_freq_tbl->count; j++) {
+		bool matched = false;
 
-	dcvs->load_low = table[idx].load_low;
-	dcvs->load_high = table[idx].load_high;
-	dcvs->supported_codecs = table[idx].supported_codecs;
-}
+		entry = &clk_freq_tbl->clk_prof_entries[j];
 
-static void msm_dcvs_enc_check_and_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
+		matched = msm_dcvs_check_codec_supported(
+				fourcc,
+				entry->codec_mask,
+				inst->session_type);
 
-	if (inst->session_type == MSM_VIDC_ENCODER &&
-		msm_vidc_enc_dcvs_mode) {
-		rc = msm_dcvs_enc_scale_clocks(inst);
-		if (rc) {
-			dprintk(VIDC_DBG,
-				"ENC_DCVS: error while scaling clocks\n");
+		if (matched) {
+			inst->entry = entry;
+			break;
 		}
 	}
-}
 
-static void msm_dcvs_dec_check_and_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0;
-
-	if (inst->session_type == MSM_VIDC_DECODER &&
-		msm_vidc_dec_dcvs_mode) {
-		msm_dcvs_monitor_buffer(inst);
-		rc = msm_dcvs_dec_scale_clocks(inst, false);
-		if (rc) {
-			dprintk(VIDC_ERR,
-					"%s: Failed to scale clocks in DCVS: %d\n",
-					__func__, rc);
-		}
-	}
-}
-
-void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb)
-{
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-	msm_dcvs_try_enable(inst);
-	if (!inst->dcvs_mode) {
-		dprintk(VIDC_DBG, "DCVS is not enabled\n");
-		return;
+	if (j == clk_freq_tbl->count) {
+		dprintk(VIDC_ERR,
+			"Failed : No matching clock entry found\n");
+		rc = -EINVAL;
 	}
 
-	if (is_etb)
-		msm_dcvs_enc_check_and_scale_clocks(inst);
-	else
-		msm_dcvs_dec_check_and_scale_clocks(inst);
-}
-
-static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst)
-{
-	int fw_out_qsize = 0, buffers_in_driver = 0;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
-		return -EINVAL;
-	}
-
-	if (inst->state >= MSM_VIDC_OPEN_DONE &&
-		inst->state < MSM_VIDC_STOP_DONE) {
-		fw_out_qsize = inst->count.ftb - inst->count.fbd;
-		buffers_in_driver = inst->buffers_held_in_driver;
-	}
-
-	return fw_out_qsize + buffers_in_driver;
+	return rc;
 }
 
 static inline void msm_dcvs_print_dcvs_stats(struct dcvs_stats *dcvs)
@@ -198,23 +510,18 @@
 		dcvs->load_high);
 
 	dprintk(VIDC_DBG,
-		"DCVS: ThrDispBufLow %d, ThrDispBufHigh %d\n",
-		dcvs->threshold_disp_buf_low,
-		dcvs->threshold_disp_buf_high);
-
-	dprintk(VIDC_DBG,
 		"DCVS: min_threshold %d, max_threshold %d\n",
 		dcvs->min_threshold, dcvs->max_threshold);
 }
 
-void msm_dcvs_init_load(struct msm_vidc_inst *inst)
+void msm_dcvs_init(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
+	int i = 0;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u64 total_freq = 0, rate = 0, load;
+	int cycles;
 	struct dcvs_stats *dcvs;
-	struct dcvs_table *table;
-	struct msm_vidc_platform_resources *res = NULL;
-	int i, num_rows, fourcc;
 
 	dprintk(VIDC_DBG, "Init DCVS Load\n");
 
@@ -225,414 +532,38 @@
 
 	core = inst->core;
 	dcvs = &inst->dcvs;
-	res = &core->resources;
-	dcvs->load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-
-	num_rows = res->dcvs_tbl_size;
-	table = res->dcvs_tbl;
-
-	if (!num_rows || !table) {
-		dprintk(VIDC_ERR,
-				"%s: Dcvs table entry not found.\n", __func__);
-		return;
-	}
-
-	fourcc = inst->session_type == MSM_VIDC_DECODER ?
-				inst->fmts[OUTPUT_PORT].fourcc :
-				inst->fmts[CAPTURE_PORT].fourcc;
-
-	for (i = 0; i < num_rows; i++) {
-		bool matches = msm_dcvs_check_codec_supported(
-					fourcc,
-					table[i].supported_codecs,
-					inst->session_type);
-		if (!matches)
-			continue;
-
-		if (dcvs->load > table[i].load) {
-			msm_dcvs_update_dcvs_params(i, inst);
-			break;
-		}
-	}
-
-	if (inst->session_type == MSM_VIDC_ENCODER)
-		goto print_stats;
-
-	output_buf_req = get_buff_req_buffer(inst,
-		msm_comm_get_hal_output_buffer(inst));
-
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR,
-			"%s: No buffer requirement for buffer type %x\n",
-			__func__, HAL_BUFFER_OUTPUT);
-		return;
-	}
-
-	dcvs->transition_turbo = false;
-
-	/* calculating the min and max threshold */
-	if (output_buf_req->buffer_count_actual) {
-		dcvs->min_threshold = output_buf_req->buffer_count_actual -
-			output_buf_req->buffer_count_min -
-			msm_dcvs_get_extra_buff_count(inst) + 1;
-		dcvs->max_threshold = output_buf_req->buffer_count_actual;
-		if (dcvs->max_threshold <= dcvs->min_threshold)
-			dcvs->max_threshold =
-				dcvs->min_threshold + DCVS_BUFFER_SAFEGUARD;
-		dcvs->threshold_disp_buf_low = dcvs->min_threshold;
-		dcvs->threshold_disp_buf_high = dcvs->max_threshold;
-	}
-
-print_stats:
-	msm_dcvs_print_dcvs_stats(dcvs);
-}
-
-void msm_dcvs_init(struct msm_vidc_inst *inst)
-{
-	dprintk(VIDC_DBG, "Init DCVS Struct\n");
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-
-	inst->dcvs = (struct dcvs_stats){ {0} };
-	inst->dcvs.threshold_disp_buf_high = DCVS_NOMINAL_THRESHOLD;
-	inst->dcvs.threshold_disp_buf_low = DCVS_TURBO_THRESHOLD;
-}
-
-void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst)
-{
-	int new_ftb, i, prev_buf_count;
-	int fw_pending_bufs, total_output_buf, buffers_outside_fw;
-	struct dcvs_stats *dcvs;
-	struct hal_buffer_requirements *output_buf_req;
-
-	if (!inst) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
-		return;
-	}
-	dcvs = &inst->dcvs;
-
-	mutex_lock(&inst->lock);
-	output_buf_req = get_buff_req_buffer(inst,
-				msm_comm_get_hal_output_buffer(inst));
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR, "%s : Get output buffer req failed %pK\n",
-			__func__, inst);
-		mutex_unlock(&inst->lock);
-		return;
-	}
-
-	total_output_buf = output_buf_req->buffer_count_actual;
-	fw_pending_bufs = get_pending_bufs_fw(inst);
-	mutex_unlock(&inst->lock);
-
-	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-	dcvs->num_ftb[dcvs->ftb_index] = buffers_outside_fw;
-	dcvs->ftb_index = (dcvs->ftb_index + 1) % DCVS_FTB_WINDOW;
-
-	if (dcvs->ftb_counter < DCVS_FTB_WINDOW)
-		dcvs->ftb_counter++;
-
-	dprintk(VIDC_PROF,
-		"DCVS: ftb_counter %d\n", dcvs->ftb_counter);
-
-	if (dcvs->ftb_counter == DCVS_FTB_WINDOW) {
-		new_ftb = 0;
-		for (i = 0; i < dcvs->ftb_counter; i++) {
-			if (dcvs->num_ftb[i] > new_ftb)
-				new_ftb = dcvs->num_ftb[i];
-		}
-
-		dcvs->threshold_disp_buf_high = new_ftb;
-		if (dcvs->threshold_disp_buf_high <=
-			dcvs->threshold_disp_buf_low +
-			DCVS_BUFFER_SAFEGUARD) {
-			dcvs->threshold_disp_buf_high =
-				dcvs->threshold_disp_buf_low +
-				DCVS_BUFFER_SAFEGUARD
-				+ (DCVS_BUFFER_SAFEGUARD == 0);
-		}
-
-		dcvs->threshold_disp_buf_high =
-			clamp(dcvs->threshold_disp_buf_high,
-				dcvs->min_threshold,
-				dcvs->max_threshold);
-	}
-
-	if (dcvs->ftb_counter == DCVS_FTB_WINDOW &&
-			dcvs->load == dcvs->load_low) {
-		prev_buf_count =
-			dcvs->num_ftb[((dcvs->ftb_index - 2 +
-				DCVS_FTB_WINDOW) % DCVS_FTB_WINDOW)];
-		if (prev_buf_count == dcvs->threshold_disp_buf_low &&
-			buffers_outside_fw <= dcvs->threshold_disp_buf_low) {
-			dcvs->transition_turbo = true;
-		} else if (buffers_outside_fw > dcvs->threshold_disp_buf_low &&
-			(buffers_outside_fw -
-			 (prev_buf_count - buffers_outside_fw))
-			< dcvs->threshold_disp_buf_low){
-			dcvs->transition_turbo = true;
-		}
-	}
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_output_buf %d buffers_outside_fw %d load %d transition_turbo %d\n",
-		total_output_buf, buffers_outside_fw, dcvs->load_low,
-		dcvs->transition_turbo);
-}
-
-static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst)
-{
-	int rc = 0, fw_pending_bufs = 0, total_input_buf = 0;
-	struct msm_vidc_core *core;
-	struct dcvs_stats *dcvs;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	dcvs = &inst->dcvs;
-
-	mutex_lock(&inst->lock);
-	total_input_buf = inst->buff_req.buffer[0].buffer_count_actual;
-	fw_pending_bufs = (inst->count.etb - inst->count.ebd);
-	mutex_unlock(&inst->lock);
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_input_buf %d, fw_pending_bufs %d\n",
-		total_input_buf, fw_pending_bufs);
-
-	if (dcvs->etb_counter < total_input_buf) {
-		dcvs->etb_counter++;
-		if (dcvs->etb_counter != total_input_buf)
-			return rc;
-	}
-
-	dprintk(VIDC_PROF,
-		"DCVS: total_input_buf %d, fw_pending_bufs %d etb_counter %d  dcvs->load %d\n",
-		total_input_buf, fw_pending_bufs,
-		dcvs->etb_counter, dcvs->load);
-
-	if (fw_pending_bufs <= DCVS_ENC_LOW_THR &&
-		dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-		dcvs->prev_freq_lowered = true;
-	} else {
-		dcvs->prev_freq_lowered = false;
-	}
-
-	if (fw_pending_bufs >= DCVS_ENC_HIGH_THR &&
-		dcvs->load <= dcvs->load_low) {
-		dcvs->load = dcvs->load_high;
-		dcvs->prev_freq_increased = true;
-	} else {
-		dcvs->prev_freq_increased = false;
-	}
-
-	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
-		dprintk(VIDC_PROF,
-			"DCVS: (Scaling Clock %s)  etb clock set = %d total_input_buf = %d fw_pending_bufs %d\n",
-			dcvs->prev_freq_lowered ? "Lower" : "Higher",
-			dcvs->load, total_input_buf, fw_pending_bufs);
-
-		rc = msm_comm_scale_clocks_load(core, dcvs->load,
-				LOAD_CALC_NO_QUIRKS);
-		if (rc) {
-			dprintk(VIDC_PROF,
-				"Failed to set clock rate in FBD: %d\n", rc);
-		}
-	} else {
-		dprintk(VIDC_PROF,
-			"DCVS: etb clock load_old = %d total_input_buf = %d fw_pending_bufs %d\n",
-			dcvs->load, total_input_buf, fw_pending_bufs);
-	}
-
-	return rc;
-}
-
-
-/*
- * In DCVS scale_clocks will be done both in qbuf and FBD
- * 1 indicates call made from fbd that lowers clock
- * 0 indicates call made from qbuf that increases clock
- * based on DCVS algorithm
- */
-
-static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd)
-{
-	int rc = 0;
-	int fw_pending_bufs = 0;
-	int total_output_buf = 0;
-	int buffers_outside_fw = 0;
-	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	core = inst->core;
-	dcvs = &inst->dcvs;
-	mutex_lock(&inst->lock);
-	fw_pending_bufs = get_pending_bufs_fw(inst);
-
-	output_buf_req = get_buff_req_buffer(inst,
-		msm_comm_get_hal_output_buffer(inst));
-	mutex_unlock(&inst->lock);
-	if (!output_buf_req) {
-		dprintk(VIDC_ERR,
-			"%s: No buffer requirement for buffer type %x\n",
-			__func__, HAL_BUFFER_OUTPUT);
-		return -EINVAL;
-	}
-
-	/* Total number of output buffers */
-	total_output_buf = output_buf_req->buffer_count_actual;
-
-	/* Buffers outside FW are with display */
-	buffers_outside_fw = total_output_buf - fw_pending_bufs;
-
-	if (buffers_outside_fw >= dcvs->threshold_disp_buf_high &&
-		!dcvs->prev_freq_increased &&
-		dcvs->load > dcvs->load_low) {
-		dcvs->load = dcvs->load_low;
-		dcvs->prev_freq_lowered = true;
-		dcvs->prev_freq_increased = false;
-	} else if (dcvs->transition_turbo && dcvs->load == dcvs->load_low) {
-		dcvs->load = dcvs->load_high;
-		dcvs->prev_freq_increased = true;
-		dcvs->prev_freq_lowered = false;
-		dcvs->transition_turbo = false;
-	} else {
-		dcvs->prev_freq_increased = false;
-		dcvs->prev_freq_lowered = false;
-	}
-
-	if (dcvs->prev_freq_lowered || dcvs->prev_freq_increased) {
-		dprintk(VIDC_PROF,
-			"DCVS: clock set = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
-			dcvs->load, total_output_buf, buffers_outside_fw,
-			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
-
-		rc = msm_comm_scale_clocks_load(core, dcvs->load,
-				LOAD_CALC_NO_QUIRKS);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed to set clock rate in FBD: %d\n", rc);
-		}
-	} else {
-		dprintk(VIDC_PROF,
-			"DCVS: clock old = %d tot_output_buf = %d buffers_outside_fw %d threshold_high %d transition_turbo %d\n",
-			dcvs->load, total_output_buf, buffers_outside_fw,
-			dcvs->threshold_disp_buf_high, dcvs->transition_turbo);
-	}
-	return rc;
-}
-
-static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
-{
-	int num_mbs_per_frame = 0, instance_count = 0;
-	long int instance_load = 0;
-	long int dcvs_limit = 0;
-	struct msm_vidc_inst *temp = NULL;
-	struct msm_vidc_core *core;
-	struct hal_buffer_requirements *output_buf_req;
-	struct dcvs_stats *dcvs;
-	bool is_codec_supported = false;
-	bool is_dcvs_supported = true;
-	struct msm_vidc_platform_resources *res = NULL;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
-		return -EINVAL;
-	}
-
-	core = inst->core;
-	dcvs = &inst->dcvs;
-	res = &core->resources;
-
-	if (!res->dcvs_limit) {
-		dprintk(VIDC_WARN,
-				"%s: dcvs limit table not found\n", __func__);
-		return false;
-	}
-	instance_count = msm_dcvs_count_active_instances(core,
-		inst->session_type);
-	num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
-	instance_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
-	dcvs_limit =
-		(long int)res->dcvs_limit[inst->session_type].min_mbpf *
-		res->dcvs_limit[inst->session_type].fps;
-	inst->dcvs.extra_buffer_count = 0;
-
-	if (!IS_VALID_DCVS_SESSION(num_mbs_per_frame,
-				res->dcvs_limit[inst->session_type].min_mbpf)) {
-		inst->dcvs.extra_buffer_count = 0;
-		is_dcvs_supported = false;
-		goto dcvs_decision_done;
-
-	}
-
-	if (inst->session_type == MSM_VIDC_DECODER) {
-		inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
-		output_buf_req = get_buff_req_buffer(inst,
-				msm_comm_get_hal_output_buffer(inst));
-		if (!output_buf_req) {
-			dprintk(VIDC_ERR,
-					"%s: No buffer requirement for buffer type %x\n",
-					__func__, HAL_BUFFER_OUTPUT);
-			return false;
-		}
-		is_codec_supported =
-			msm_dcvs_check_codec_supported(
-				inst->fmts[OUTPUT_PORT].fourcc,
-				inst->dcvs.supported_codecs,
-				inst->session_type);
-		if (!is_codec_supported ||
-				!msm_vidc_dec_dcvs_mode) {
-			inst->dcvs.extra_buffer_count = 0;
-			is_dcvs_supported = false;
-			goto dcvs_decision_done;
-		}
-		if (msm_comm_turbo_session(inst) ||
-			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
-			instance_count > 1)
-			is_dcvs_supported = false;
-	}
+	inst->dcvs = (struct dcvs_stats){0};
+	load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
+	cycles = inst->entry->vpp_cycles;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
 	if (inst->session_type == MSM_VIDC_ENCODER) {
-		inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
-		is_codec_supported =
-			msm_dcvs_check_codec_supported(
-				inst->fmts[CAPTURE_PORT].fourcc,
-				inst->dcvs.supported_codecs,
-				inst->session_type);
-		if (!is_codec_supported ||
-				!msm_vidc_enc_dcvs_mode) {
-			inst->dcvs.extra_buffer_count = 0;
-			is_dcvs_supported = false;
-			goto dcvs_decision_done;
-		}
-		if (msm_comm_turbo_session(inst) ||
-			!IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
-				instance_count > 1)
-			is_dcvs_supported = false;
+		cycles = inst->flags & VIDC_LOW_POWER ?
+			inst->entry->low_power_cycles :
+			cycles;
+
+		dcvs->buffer_type = HAL_BUFFER_INPUT;
+		// TODO : Update with proper no based on Buffer counts change.
+		dcvs->min_threshold = 7;
+	} else if (inst->session_type == MSM_VIDC_DECODER) {
+		dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst);
+		// TODO : Update with proper no based on Buffer counts change.
+		dcvs->min_threshold = 4;
+	} else {
+		return;
 	}
-dcvs_decision_done:
-	if (!is_dcvs_supported) {
-		msm_comm_scale_clocks(core);
-		if (instance_count > 1) {
-			mutex_lock(&core->lock);
-			list_for_each_entry(temp, &core->instances, list)
-				temp->dcvs_mode = false;
-			mutex_unlock(&core->lock);
-		}
+
+	total_freq = cycles * load;
+
+	for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+		rate = allowed_clks_tbl[i].clock_rate;
+		if (rate >= total_freq)
+			break;
 	}
-	return is_dcvs_supported;
+
+	dcvs->load = dcvs->load_high = rate;
+	dcvs->load_low = allowed_clks_tbl[i+1].clock_rate;
+
+	msm_dcvs_print_dcvs_stats(dcvs);
 }
 
 int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 383c27e1..0229ccbb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -32,9 +32,12 @@
 #define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
 void msm_dcvs_init(struct msm_vidc_inst *inst);
-void msm_dcvs_init_load(struct msm_vidc_inst *inst);
-void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst);
-void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb);
 int  msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
+int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
+int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst);
+void msm_comm_free_freq_table(struct msm_vidc_inst *inst);
+void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
+	ion_phys_addr_t device_addr);
+
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 302c306..4aaa525 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -773,103 +773,6 @@
 	return format;
 }
 
-static int msm_comm_vote_bus(struct msm_vidc_core *core)
-{
-	int rc = 0, vote_data_count = 0, i = 0;
-	struct hfi_device *hdev;
-	struct msm_vidc_inst *inst = NULL;
-	struct vidc_bus_vote_data *vote_data = NULL;
-	unsigned long core_freq = 0;
-
-	if (!core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
-		return -EINVAL;
-	}
-
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-			__func__, hdev);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list)
-		++vote_data_count;
-
-	vote_data = kcalloc(vote_data_count, sizeof(*vote_data),
-			GFP_TEMPORARY);
-	if (!vote_data) {
-		dprintk(VIDC_ERR, "%s: failed to allocate memory\n", __func__);
-		rc = -ENOMEM;
-		goto fail_alloc;
-	}
-
-	core_freq = call_hfi_op(hdev, get_core_clock_rate,
-			hdev->hfi_device_data, 0);
-
-	list_for_each_entry(inst, &core->instances, list) {
-		int codec = 0, yuv = 0;
-
-		codec = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-		yuv = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[CAPTURE_PORT].fourcc :
-			inst->fmts[OUTPUT_PORT].fourcc;
-
-		vote_data[i].domain = get_hal_domain(inst->session_type);
-		vote_data[i].codec = get_hal_codec(codec);
-		vote_data[i].width =  max(inst->prop.width[CAPTURE_PORT],
-			inst->prop.width[OUTPUT_PORT]);
-		vote_data[i].height = max(inst->prop.height[CAPTURE_PORT],
-			inst->prop.height[OUTPUT_PORT]);
-
-		if (inst->operating_rate)
-			vote_data[i].fps = (inst->operating_rate >> 16) ?
-				inst->operating_rate >> 16 : 1;
-		else
-			vote_data[i].fps = inst->prop.fps;
-
-		if (msm_comm_turbo_session(inst))
-			vote_data[i].power_mode = VIDC_POWER_TURBO;
-		else if (is_low_power_session(inst))
-			vote_data[i].power_mode = VIDC_POWER_LOW;
-		else
-			vote_data[i].power_mode = VIDC_POWER_NORMAL;
-		if (i == 0) {
-			vote_data[i].imem_ab_tbl = core->resources.imem_ab_tbl;
-			vote_data[i].imem_ab_tbl_size =
-				core->resources.imem_ab_tbl_size;
-			vote_data[i].core_freq = core_freq;
-		}
-
-		/*
-		 * TODO: support for OBP-DBP split mode hasn't been yet
-		 * implemented, once it is, this part of code needs to be
-		 * revisited since passing in accurate information to the bus
-		 * governor will drastically reduce bandwidth
-		 */
-		vote_data[i].color_formats[0] = get_hal_uncompressed(yuv);
-		vote_data[i].num_formats = 1;
-		i++;
-	}
-	mutex_unlock(&core->lock);
-
-	rc = call_hfi_op(hdev, vote_bus, hdev->hfi_device_data, vote_data,
-			vote_data_count);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to scale bus: %d\n", rc);
-
-	kfree(vote_data);
-	return rc;
-
-fail_alloc:
-	mutex_unlock(&core->lock);
-	return rc;
-}
-
 struct msm_vidc_core *get_vidc_core(int core_id)
 {
 	struct msm_vidc_core *core;
@@ -1472,31 +1375,7 @@
 
 	switch (event_notify->hal_event_type) {
 	case HAL_EVENT_SEQ_CHANGED_SUFFICIENT_RESOURCES:
-		event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
-
-		if (msm_comm_get_stream_output_mode(inst) ==
-			HAL_VIDEO_DECODER_SECONDARY) {
-			struct hal_frame_size frame_sz;
-
-			frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
-			frame_sz.width = event_notify->width;
-			frame_sz.height = event_notify->height;
-			dprintk(VIDC_DBG,
-				"Update OPB dimensions to firmware if buffer requirements are sufficient\n");
-			rc = msm_comm_try_set_prop(inst,
-				HAL_PARAM_FRAME_SIZE, &frame_sz);
-		}
-
-		dprintk(VIDC_DBG,
-			"send session_continue after sufficient event\n");
-		rc = call_hfi_op(hdev, session_continue,
-				(void *) inst->session);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s - failed to send session_continue\n",
-				__func__);
-			goto err_bad_event;
-		}
+		event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
 		break;
 	case HAL_EVENT_SEQ_CHANGED_INSUFFICIENT_RESOURCES:
 		event = V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT;
@@ -1646,9 +1525,6 @@
 		inst->prop.width[OUTPUT_PORT] = event_notify->width;
 	}
 
-	if (inst->session_type == MSM_VIDC_DECODER)
-		msm_dcvs_init_load(inst);
-
 	rc = msm_vidc_check_session_supported(inst);
 	if (!rc) {
 		seq_changed_event.type = event;
@@ -2187,6 +2063,43 @@
 	return vb;
 }
 
+static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
+		ion_phys_addr_t device_addr, u32 flags)
+{
+	struct buffer_info *binfo = NULL, *temp = NULL;
+
+	/*
+	 * Update reference count and release OR queue back the buffer,
+	 * only when firmware is not holding a reference.
+	 */
+	binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
+	if (!binfo) {
+		dprintk(VIDC_ERR,
+			"%s buffer not found in registered list\n",
+			__func__);
+		return;
+	}
+	if (flags & HAL_BUFFERFLAG_READONLY) {
+		dprintk(VIDC_DBG,
+			"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
+			binfo->fd[0], &device_addr);
+	} else {
+		dprintk(VIDC_DBG,
+			"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
+			binfo->fd[0], &device_addr);
+
+		mutex_lock(&inst->registeredbufs.lock);
+		list_for_each_entry(temp, &inst->registeredbufs.list,
+				list) {
+			if (temp == binfo) {
+				buf_ref_put(inst, binfo);
+				break;
+			}
+		}
+		mutex_unlock(&inst->registeredbufs.lock);
+	}
+}
+
 static void handle_ebd(enum hal_command_response cmd, void *data)
 {
 	struct msm_vidc_cb_data_done *response = data;
@@ -2206,6 +2119,9 @@
 		dprintk(VIDC_WARN, "Got a response for an inactive session\n");
 		return;
 	}
+	if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC)
+		handle_dynamic_buffer(inst,
+			response->input_done.packet_buffer, 0);
 
 	vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT],
 			response->input_done.packet_buffer);
@@ -2245,6 +2161,8 @@
 			empty_buf_done->alloc_len, empty_buf_done->status,
 			empty_buf_done->picture_type, empty_buf_done->flags);
 
+		msm_vidc_clear_freq_entry(inst, empty_buf_done->packet_buffer);
+
 		mutex_lock(&inst->bufq[OUTPUT_PORT].lock);
 		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
 		mutex_unlock(&inst->bufq[OUTPUT_PORT].lock);
@@ -2263,11 +2181,7 @@
 
 	atomic_inc(&binfo->ref_count);
 	cnt = atomic_read(&binfo->ref_count);
-	if (cnt > 2) {
-		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
-		cnt = -EINVAL;
-	}
-	if (cnt == 2)
+	if (cnt >= 2)
 		inst->buffers_held_in_driver++;
 
 	dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
@@ -2290,7 +2204,7 @@
 	dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]);
 	if (!cnt)
 		release_buf = true;
-	else if (cnt == 1)
+	else if (cnt >= 1)
 		qbuf_again = true;
 	else {
 		dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt);
@@ -2321,45 +2235,6 @@
 	return cnt;
 }
 
-static void handle_dynamic_buffer(struct msm_vidc_inst *inst,
-		ion_phys_addr_t device_addr, u32 flags)
-{
-	struct buffer_info *binfo = NULL, *temp = NULL;
-
-	/*
-	 * Update reference count and release OR queue back the buffer,
-	 * only when firmware is not holding a reference.
-	 */
-	if (inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_DYNAMIC) {
-		binfo = device_to_uvaddr(&inst->registeredbufs, device_addr);
-		if (!binfo) {
-			dprintk(VIDC_ERR,
-				"%s buffer not found in registered list\n",
-				__func__);
-			return;
-		}
-		if (flags & HAL_BUFFERFLAG_READONLY) {
-			dprintk(VIDC_DBG,
-				"FBD fd[0] = %d -> Reference with f/w, addr: %pa\n",
-				binfo->fd[0], &device_addr);
-		} else {
-			dprintk(VIDC_DBG,
-				"FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n",
-				binfo->fd[0], &device_addr);
-
-			mutex_lock(&inst->registeredbufs.lock);
-			list_for_each_entry(temp, &inst->registeredbufs.list,
-							list) {
-				if (temp == binfo) {
-					buf_ref_put(inst, binfo);
-					break;
-				}
-			}
-			mutex_unlock(&inst->registeredbufs.lock);
-		}
-	}
-}
-
 static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
 		ion_phys_addr_t dev_addr)
 {
@@ -2410,7 +2285,7 @@
 	struct vidc_hal_fbd *fill_buf_done;
 	enum hal_buffer buffer_type;
 	int extra_idx = 0;
-	u64 time_nsec = 0;
+	u64 time_usec = 0;
 	struct vb2_v4l2_buffer *vbuf = NULL;
 
 	if (!response) {
@@ -2458,11 +2333,11 @@
 				vb->planes[0].length);
 		if (!(fill_buf_done->flags1 &
 			HAL_BUFFERFLAG_TIMESTAMPINVALID)) {
-			time_nsec = fill_buf_done->timestamp_hi;
-			time_nsec = (time_nsec << 32) |
+			time_usec = fill_buf_done->timestamp_hi;
+			time_usec = (time_usec << 32) |
 				fill_buf_done->timestamp_lo;
 		} else {
-			time_nsec = 0;
+			time_usec = 0;
 			dprintk(VIDC_DBG,
 					"Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n",
 					&fill_buf_done->packet_buffer1,
@@ -2471,10 +2346,10 @@
 					fill_buf_done->timestamp_lo);
 		}
 		vbuf->flags = 0;
-		vb->timestamp = time_nsec;
+		vb->timestamp = (time_usec * NSEC_PER_USEC);
 
 		extra_idx =
-			EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
+			EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
 		if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
 			vb->planes[extra_idx].m.userptr =
 				(unsigned long)fill_buf_done->extra_data_buffer;
@@ -2483,6 +2358,8 @@
 			vb->planes[extra_idx].data_offset = 0;
 		}
 
+		if (inst->buffer_mode_set[CAPTURE_PORT] ==
+			HAL_BUFFER_MODE_DYNAMIC)
 		handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1,
 					fill_buf_done->flags1);
 		if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY)
@@ -2537,7 +2414,7 @@
 		dprintk(VIDC_DBG,
 		"Got fbd from hal: device_addr: %pa, alloc: %d, filled: %d, offset: %d, ts: %lld, flags: %#x, crop: %d %d %d %d, pic_type: %#x, mark_data: %#x\n",
 		&fill_buf_done->packet_buffer1, fill_buf_done->alloc_len1,
-		fill_buf_done->filled_len1, fill_buf_done->offset1, time_nsec,
+		fill_buf_done->filled_len1, fill_buf_done->offset1, time_usec,
 		fill_buf_done->flags1, fill_buf_done->start_x_coord,
 		fill_buf_done->start_y_coord, fill_buf_done->frame_width,
 		fill_buf_done->frame_height, fill_buf_done->picture_type,
@@ -2665,127 +2542,6 @@
 	}
 }
 
-int msm_comm_scale_clocks(struct msm_vidc_core *core)
-{
-	int num_mbs_per_sec, enc_mbs_per_sec, dec_mbs_per_sec;
-
-	enc_mbs_per_sec =
-		msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS);
-	dec_mbs_per_sec	=
-		msm_comm_get_load(core, MSM_VIDC_DECODER, LOAD_CALC_NO_QUIRKS);
-
-	if (enc_mbs_per_sec >= dec_mbs_per_sec) {
-	/*
-	 * If Encoder load is higher, use that load. Encoder votes for higher
-	 * clock. Since Encoder and Deocder run on parallel cores, this clock
-	 * should suffice decoder usecases.
-	 */
-		num_mbs_per_sec = enc_mbs_per_sec;
-	} else {
-	/*
-	 * If Decoder load is higher, it's tricky to decide clock. Decoder
-	 * higher load might results less clocks than Encoder smaller load.
-	 * At this point driver doesn't know which clock to vote. Hence use
-	 * total load.
-	 */
-		num_mbs_per_sec = enc_mbs_per_sec + dec_mbs_per_sec;
-	}
-
-	return msm_comm_scale_clocks_load(core, num_mbs_per_sec,
-			LOAD_CALC_NO_QUIRKS);
-}
-
-int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
-		int num_mbs_per_sec, enum load_calc_quirks quirks)
-{
-	int rc = 0;
-	struct hfi_device *hdev;
-	struct msm_vidc_inst *inst = NULL;
-	unsigned long instant_bitrate = 0;
-	int num_sessions = 0;
-	struct vidc_clk_scale_data clk_scale_data = { {0} };
-	int codec = 0;
-
-	if (!core) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, core);
-		return -EINVAL;
-	}
-
-	hdev = core->device;
-	if (!hdev) {
-		dprintk(VIDC_ERR, "%s Invalid device handle: %pK\n",
-			__func__, hdev);
-		return -EINVAL;
-	}
-
-	mutex_lock(&core->lock);
-	list_for_each_entry(inst, &core->instances, list) {
-
-		codec = inst->session_type == MSM_VIDC_DECODER ?
-			inst->fmts[OUTPUT_PORT].fourcc :
-			inst->fmts[CAPTURE_PORT].fourcc;
-
-		if (msm_comm_turbo_session(inst))
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_TURBO;
-		else if (is_low_power_session(inst))
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_LOW;
-		else
-			clk_scale_data.power_mode[num_sessions] =
-				VIDC_POWER_NORMAL;
-
-		if (inst->dcvs_mode)
-			clk_scale_data.load[num_sessions] = inst->dcvs.load;
-		else
-			clk_scale_data.load[num_sessions] =
-				msm_comm_get_inst_load(inst, quirks);
-
-		clk_scale_data.session[num_sessions] =
-				VIDC_VOTE_DATA_SESSION_VAL(
-				get_hal_codec(codec),
-				get_hal_domain(inst->session_type));
-		num_sessions++;
-
-		if (inst->instant_bitrate > instant_bitrate)
-			instant_bitrate = inst->instant_bitrate;
-
-	}
-	clk_scale_data.num_sessions = num_sessions;
-	mutex_unlock(&core->lock);
-
-
-	rc = call_hfi_op(hdev, scale_clocks,
-		hdev->hfi_device_data, num_mbs_per_sec,
-		&clk_scale_data, instant_bitrate);
-	if (rc)
-		dprintk(VIDC_ERR, "Failed to set clock rate: %d\n", rc);
-
-	return rc;
-}
-
-void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst)
-{
-	struct msm_vidc_core *core;
-	struct hfi_device *hdev;
-
-	if (!inst || !inst->core || !inst->core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return;
-	}
-	core = inst->core;
-	hdev = core->device;
-
-	if (msm_comm_scale_clocks(core)) {
-		dprintk(VIDC_WARN,
-				"Failed to scale clocks. Performance might be impacted\n");
-	}
-	if (msm_comm_vote_bus(core)) {
-		dprintk(VIDC_WARN,
-				"Failed to scale DDR bus. Performance might be impacted\n");
-	}
-}
-
 static inline enum msm_vidc_thermal_level msm_comm_vidc_thermal_level(int level)
 {
 	switch (level) {
@@ -2800,33 +2556,16 @@
 	}
 }
 
-static unsigned long msm_comm_get_clock_rate(struct msm_vidc_core *core)
-{
-	struct hfi_device *hdev;
-	unsigned long freq = 0;
-
-	if (!core || !core->device) {
-		dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
-		return -EINVAL;
-	}
-	hdev = core->device;
-
-	freq = call_hfi_op(hdev, get_core_clock_rate, hdev->hfi_device_data, 1);
-	dprintk(VIDC_DBG, "clock freq %ld\n", freq);
-
-	return freq;
-}
-
 static bool is_core_turbo(struct msm_vidc_core *core, unsigned long freq)
 {
 	int i = 0;
-	struct msm_vidc_platform_resources *res = &core->resources;
-	struct load_freq_table *table = res->load_freq_tbl;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
 	u32 max_freq = 0;
 
-	for (i = 0; i < res->load_freq_tbl_size; i++) {
-		if (max_freq < table[i].freq)
-			max_freq = table[i].freq;
+	allowed_clks_tbl = core->resources.allowed_clks_tbl;
+	for (i = 0; i < core->resources.allowed_clks_tbl_size; i++) {
+		if (max_freq < allowed_clks_tbl[i].clock_rate)
+			max_freq = allowed_clks_tbl[i].clock_rate;
 	}
 	return freq >= max_freq;
 }
@@ -2848,7 +2587,7 @@
 	}
 
 	tl = msm_comm_vidc_thermal_level(vidc_driver->thermal_level);
-	freq = msm_comm_get_clock_rate(core);
+	freq = core->freq;
 
 	is_turbo = is_core_turbo(core, freq);
 	dprintk(VIDC_DBG,
@@ -3066,6 +2805,8 @@
 core_already_inited:
 	change_inst_state(inst, MSM_VIDC_CORE_INIT);
 	mutex_unlock(&core->lock);
+
+	rc = msm_comm_scale_clocks_and_bus(inst);
 	return rc;
 
 fail_core_init:
@@ -3159,6 +2900,8 @@
 		return -EINVAL;
 	}
 
+	msm_comm_init_clocks_and_bus_data(inst);
+
 	rc = call_hfi_op(hdev, session_init, hdev->hfi_device_data,
 			inst, get_hal_domain(inst->session_type),
 			get_hal_codec(fourcc),
@@ -3985,15 +3728,19 @@
 static void populate_frame_data(struct vidc_frame_data *data,
 		const struct vb2_buffer *vb, struct msm_vidc_inst *inst)
 {
+	u64 time_usec;
 	int extra_idx;
 	enum v4l2_buf_type type = vb->type;
 	enum vidc_ports port = type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ?
 		OUTPUT_PORT : CAPTURE_PORT;
 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 
+	time_usec = vb->timestamp;
+	do_div(time_usec, NSEC_PER_USEC);
+
 	data->alloc_len = vb->planes[0].length;
 	data->device_addr = vb->planes[0].m.userptr;
-	data->timestamp = vb->timestamp;
+	data->timestamp = time_usec;
 	data->flags = 0;
 	data->clnt_data = data->device_addr;
 
@@ -4029,7 +3776,7 @@
 		data->buffer_type = msm_comm_get_hal_output_buffer(inst);
 	}
 
-	extra_idx = EXTRADATA_IDX(inst->fmts[port].num_planes);
+	extra_idx = EXTRADATA_IDX(inst->bufq[port].num_planes);
 	if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
 			vb->planes[extra_idx].m.userptr) {
 		data->extradata_addr = vb->planes[extra_idx].m.userptr;
@@ -4085,6 +3832,7 @@
 static void log_frame(struct msm_vidc_inst *inst, struct vidc_frame_data *data,
 		enum v4l2_buf_type type)
 {
+
 	if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		dprintk(VIDC_DBG,
 			"Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n",
@@ -4092,13 +3840,6 @@
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
 
-		if (msm_vidc_bitrate_clock_scaling &&
-			inst->session_type == MSM_VIDC_DECODER &&
-			!inst->dcvs_mode)
-			inst->instant_bitrate =
-				data->filled_len * 8 * inst->prop.fps;
-		else
-			inst->instant_bitrate = 0;
 	} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 		dprintk(VIDC_DBG,
 			"Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n",
@@ -4106,20 +3847,6 @@
 			data->timestamp, data->flags);
 		msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
 	}
-
-	msm_dcvs_check_and_scale_clocks(inst,
-			type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
-
-	if (msm_vidc_bitrate_clock_scaling && !inst->dcvs_mode &&
-		type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
-		inst->session_type == MSM_VIDC_DECODER)
-		if (msm_comm_scale_clocks(inst->core))
-			dprintk(VIDC_WARN,
-				"Failed to scale clocks. Performance might be impacted\n");
-
-	if (msm_comm_vote_bus(inst->core))
-		dprintk(VIDC_WARN,
-			"Failed to scale bus. Performance might be impacted\n");
 }
 
 /*
@@ -4202,6 +3929,8 @@
 		return 0;
 	}
 
+	rc = msm_comm_scale_clocks_and_bus(inst);
+
 	dprintk(VIDC_DBG, "%sing %d etbs and %d ftbs\n",
 			batch_mode ? "Batch" : "Process",
 			output_count, capture_count);
@@ -4874,20 +4603,22 @@
 	 * driver should not queue any new buffer it has been holding.
 	 *
 	 * Each dynamic o/p buffer can have one of following ref_count:
-	 * ref_count : 0 - f/w has released reference and sent fbd back.
-	 *		  The buffer has been returned back to client.
+	 * ref_count : 0   - f/w has released reference and sent dynamic
+	 *                   buffer back. The buffer has been returned
+	 *                   back to client.
 	 *
-	 * ref_count : 1 - f/w is holding reference. f/w may have released
-	 *                 fbd as read_only OR fbd is pending. f/w will
-	 *		  release reference before sending flush_done.
+	 * ref_count : 1   - f/w is holding reference. f/w may have released
+	 *                   dynamic buffer as read_only OR dynamic buffer is
+	 *                   pending. f/w will release reference before sending
+	 *                   flush_done.
 	 *
-	 * ref_count : 2 - f/w is holding reference, f/w has released fbd as
-	 *                 read_only, which client has queued back to driver.
-	 *                 driver holds this buffer and will queue back
-	 *                 only when f/w releases the reference. During
-	 *		  flush_done, f/w will release the reference but driver
-	 *		  should not queue back the buffer to f/w.
-	 *		  Flush all buffers with ref_count 2.
+	 * ref_count : >=2 - f/w is holding reference, f/w has released dynamic
+	 *                   buffer as read_only, which client has queued back
+	 *                   to driver. Driver holds this buffer and will queue
+	 *                   back only when f/w releases the reference. During
+	 *                   flush_done, f/w will release the reference but
+	 *                   driver should not queue back the buffer to f/w.
+	 *                   Flush all buffers with ref_count >= 2.
 	 */
 	mutex_lock(&inst->registeredbufs.lock);
 	if (!list_empty(&inst->registeredbufs.list)) {
@@ -4896,7 +4627,7 @@
 
 		list_for_each_entry(binfo, &inst->registeredbufs.list, list) {
 			if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
-				atomic_read(&binfo->ref_count) == 2) {
+				atomic_read(&binfo->ref_count) >= 2) {
 
 				atomic_dec(&binfo->ref_count);
 				buf_event.type =
@@ -4988,6 +4719,10 @@
 		return 0;
 	}
 
+	// Finish FLUSH As Soon As Possible.
+	inst->dcvs.buffer_counter = 0;
+	msm_comm_scale_clocks_and_bus(inst);
+
 	msm_comm_flush_dynamic_buffers(inst);
 
 	if (inst->state == MSM_VIDC_CORE_INVALID ||
@@ -5323,9 +5058,6 @@
 		return -ENOTSUPP;
 	}
 
-	if (!rc)
-		msm_dcvs_try_enable(inst);
-
 	if (!rc) {
 		if (inst->prop.width[CAPTURE_PORT] < capability->width.min ||
 			inst->prop.height[CAPTURE_PORT] <
@@ -5642,11 +5374,7 @@
 			if (rc)
 				dprintk(VIDC_WARN,
 					"Failed to set frame rate %d\n", rc);
-		} else {
-			msm_dcvs_init_load(inst);
 		}
-		msm_comm_scale_clocks_and_bus(inst);
-		msm_dcvs_try_enable(inst);
 	}
 exit:
 	return rc;
@@ -5730,8 +5458,7 @@
 	}
 	core = inst->core;
 
-	dprintk(VIDC_ERR, "Venus core frequency = %lu",
-		msm_comm_get_clock_rate(core));
+	dprintk(VIDC_ERR, "Venus core frequency = %lu", core->freq);
 	mutex_lock(&core->lock);
 	dprintk(VIDC_ERR, "Printing instance info that caused Error\n");
 	msm_comm_print_inst_info(inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index d898682..39a28b3 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -46,10 +46,6 @@
 int msm_comm_set_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst);
 int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb);
-void msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
-int msm_comm_scale_clocks(struct msm_vidc_core *core);
-int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
-		int num_mbs_per_sec, enum load_calc_quirks quirks);
 void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst);
 int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags);
 int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
@@ -101,5 +97,4 @@
 void msm_comm_print_inst_info(struct msm_vidc_inst *inst);
 int msm_comm_v4l2_to_hal(int id, int value);
 int msm_comm_hal_to_v4l2(int id, int value);
-
 #endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index f418260..15ee8a8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -50,7 +50,7 @@
 })
 
 #define DYNAMIC_BUF_OWNER(__binfo) ({ \
-	atomic_read(&__binfo->ref_count) == 2 ? "video driver" : "firmware";\
+	atomic_read(&__binfo->ref_count) >= 2 ? "video driver" : "firmware";\
 })
 
 static int core_info_open(struct inode *inode, struct file *file)
@@ -296,7 +296,7 @@
 		write_str(&dbg_buf, "capability: %s\n", i == OUTPUT_PORT ?
 			"Output" : "Capture");
 		write_str(&dbg_buf, "name : %s\n", inst->fmts[i].name);
-		write_str(&dbg_buf, "planes : %d\n", inst->fmts[i].num_planes);
+		write_str(&dbg_buf, "planes : %d\n", inst->bufq[i].num_planes);
 		write_str(
 		&dbg_buf, "type: %s\n", inst->fmts[i].type == OUTPUT_PORT ?
 		"Output" : "Capture");
@@ -314,7 +314,7 @@
 		write_str(&dbg_buf, "count: %u\n",
 				inst->bufq[i].vb2_bufq.num_buffers);
 
-		for (j = 0; j < inst->fmts[i].num_planes; j++)
+		for (j = 0; j < inst->bufq[i].num_planes; j++)
 			write_str(&dbg_buf, "size for plane %d: %u\n", j,
 			inst->bufq[i].plane_sizes[j]);
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 4a14ca3..8562e8f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -45,6 +45,11 @@
 #define MIN_SUPPORTED_WIDTH 32
 #define MIN_SUPPORTED_HEIGHT 32
 #define DEFAULT_FPS 15
+#define MIN_NUM_OUTPUT_BUFFERS 1
+#define MIN_NUM_CAPTURE_BUFFERS 1
+#define MAX_NUM_OUTPUT_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+#define MAX_NUM_CAPTURE_BUFFERS VIDEO_MAX_FRAME // same as VB2_MAX_FRAME
+
 
 /* Maintains the number of FTB's between each FBD over a window */
 #define DCVS_FTB_WINDOW 32
@@ -134,6 +139,12 @@
 	MAX_OWNER
 };
 
+struct vidc_freq_data {
+	struct list_head list;
+	ion_phys_addr_t device_addr;
+	unsigned long freq;
+};
+
 struct internal_buf {
 	struct list_head list;
 	enum hal_buffer buffer_type;
@@ -145,7 +156,6 @@
 	char name[MAX_NAME_LENGTH];
 	u8 description[32];
 	u32 fourcc;
-	int num_planes;
 	int type;
 	u32 (*get_frame_size)(int plane, u32 height, u32 width);
 	bool defer_outputs;
@@ -175,7 +185,8 @@
 struct buf_queue {
 	struct vb2_queue vb2_bufq;
 	struct mutex lock;
-	unsigned int	plane_sizes[VB2_MAX_PLANES];
+	unsigned int plane_sizes[VB2_MAX_PLANES];
+	int num_planes;
 };
 
 enum profiling_points {
@@ -195,23 +206,14 @@
 };
 
 struct dcvs_stats {
-	int num_ftb[DCVS_FTB_WINDOW];
-	bool transition_turbo;
-	int ftb_index;
-	int ftb_counter;
-	bool prev_freq_lowered;
-	bool prev_freq_increased;
-	int threshold_disp_buf_high;
-	int threshold_disp_buf_low;
+	int buffer_counter;
 	int load;
 	int load_low;
 	int load_high;
 	int min_threshold;
 	int max_threshold;
-	int etb_counter;
-	bool is_power_save_mode;
 	unsigned int extra_buffer_count;
-	u32 supported_codecs;
+	enum hal_buffer buffer_type;
 };
 
 struct profile_data {
@@ -256,6 +258,7 @@
 	struct msm_vidc_capability *capabilities;
 	struct delayed_work fw_unload_work;
 	bool smmu_fault_handled;
+	unsigned long freq;
 };
 
 struct msm_vidc_inst {
@@ -269,6 +272,7 @@
 	struct msm_vidc_format fmts[MAX_PORT_NUM];
 	struct buf_queue bufq[MAX_PORT_NUM];
 	struct msm_vidc_list pendingq;
+	struct msm_vidc_list freqs;
 	struct msm_vidc_list scratchbufs;
 	struct msm_vidc_list persistbufs;
 	struct msm_vidc_list pending_getpropq;
@@ -297,7 +301,8 @@
 	bool dcvs_mode;
 	enum msm_vidc_pixel_depth bit_depth;
 	struct kref kref;
-	unsigned long instant_bitrate;
+	unsigned long bitrate;
+	unsigned long freq;
 	u32 buffers_held_in_driver;
 	atomic_t in_flush;
 	u32 pic_struct;
@@ -306,6 +311,7 @@
 	u32 profile;
 	u32 level;
 	u32 entropy_mode;
+	struct clock_profile_entry *entry;
 };
 
 extern struct msm_vidc_drv *vidc_driver;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 97a625b..8b9018c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -90,24 +90,6 @@
 	res->pf_ver_tbl = NULL;
 }
 
-static inline void msm_vidc_free_freq_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->load_freq_tbl = NULL;
-}
-
-static inline void msm_vidc_free_dcvs_table(
-		struct msm_vidc_platform_resources *res)
-{
-	res->dcvs_tbl = NULL;
-}
-
-static inline void msm_vidc_free_dcvs_limit(
-		struct msm_vidc_platform_resources *res)
-{
-	res->dcvs_limit = NULL;
-}
-
 static inline void msm_vidc_free_imem_ab_table(
 		struct msm_vidc_platform_resources *res)
 {
@@ -168,10 +150,7 @@
 {
 	msm_vidc_free_clock_table(res);
 	msm_vidc_free_regulator_table(res);
-	msm_vidc_free_freq_table(res);
 	msm_vidc_free_platform_version_table(res);
-	msm_vidc_free_dcvs_table(res);
-	msm_vidc_free_dcvs_limit(res);
 	msm_vidc_free_cycles_per_mb_table(res);
 	msm_vidc_free_allowed_clocks_table(res);
 	msm_vidc_free_reg_table(res);
@@ -411,6 +390,14 @@
 	int rc = 0;
 	struct platform_device *pdev = res->pdev;
 
+	/* A comparator to compare loads (needed later on) */
+	int cmp(const void *a, const void *b)
+	{
+		/* want to sort in reverse so flip the comparison */
+		return ((struct allowed_clock_rates_table *)b)->clock_rate -
+			((struct allowed_clock_rates_table *)a)->clock_rate;
+	}
+
 	if (!of_find_property(pdev->dev.of_node,
 			"qcom,allowed-clock-rates", NULL)) {
 		dprintk(VIDC_DBG, "qcom,allowed-clock-rates not found\n");
@@ -428,6 +415,9 @@
 		return rc;
 	}
 
+	sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
+		 sizeof(*res->allowed_clks_tbl), cmp, NULL);
+
 	return 0;
 }
 
@@ -490,34 +480,51 @@
 		}
 		dprintk(VIDC_DBG, "codec_mask %#x\n", entry->codec_mask);
 
-		if (of_find_property(child_node, "qcom,cycles-per-mb", NULL)) {
+		if (of_find_property(child_node,
+				"qcom,vsp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
-					"qcom,cycles-per-mb", &entry->cycles);
+					"qcom,vsp-cycles-per-mb",
+					&entry->vsp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"qcom,cycles-per-mb not found\n");
+					"qcom,vsp-cycles-per-mb not found\n");
 				goto error;
 			}
 		} else {
-			entry->cycles = 0;
+			entry->vsp_cycles = 0;
 		}
-		dprintk(VIDC_DBG, "cycles_per_mb %d\n", entry->cycles);
+		dprintk(VIDC_DBG, "vsp cycles_per_mb %d\n", entry->vsp_cycles);
 
 		if (of_find_property(child_node,
-				"qcom,low-power-mode-factor", NULL)) {
+				"qcom,vpp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
-					"qcom,low-power-mode-factor",
-					&entry->low_power_factor);
+					"qcom,vpp-cycles-per-mb",
+					&entry->vsp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"qcom,low-power-mode-factor not found\n");
+					"qcom,vpp-cycles-per-mb not found\n");
 				goto error;
 			}
 		} else {
-			entry->low_power_factor = 0;
+			entry->vpp_cycles = 0;
+		}
+		dprintk(VIDC_DBG, "vpp cycles_per_mb %d\n", entry->vpp_cycles);
+
+		if (of_find_property(child_node,
+				"qcom,low-power-cycles-per-mb", NULL)) {
+			rc = of_property_read_u32(child_node,
+					"qcom,low-power-cycles-per-mb",
+					&entry->low_power_cycles);
+			if (rc) {
+				dprintk(VIDC_ERR,
+					"qcom,low-power-cycles-per-mb not found\n");
+				goto error;
+			}
+		} else {
+			entry->low_power_cycles = 0;
 		}
 		dprintk(VIDC_DBG, "low_power_factor %d\n",
-				entry->low_power_factor);
+				entry->low_power_cycles);
 
 		i++;
 	}
@@ -526,155 +533,6 @@
 	return rc;
 }
 
-static int msm_vidc_load_freq_table(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	/* A comparator to compare loads (needed later on) */
-	int cmp(const void *a, const void *b)
-	{
-		/* want to sort in reverse so flip the comparison */
-		return ((struct load_freq_table *)b)->load -
-			((struct load_freq_table *)a)->load;
-	}
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,load-freq-tbl", NULL)) {
-		/*
-		 * qcom,load-freq-tbl is an optional property.  It likely won't
-		 * be present on cores that we can't clock scale on.
-		 */
-		dprintk(VIDC_DBG, "qcom,load-freq-tbl not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,load-freq-tbl");
-	num_elements /= sizeof(*res->load_freq_tbl) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in frequency table\n");
-		return rc;
-	}
-
-	res->load_freq_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->load_freq_tbl), GFP_KERNEL);
-	if (!res->load_freq_tbl) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc load_freq_tbl\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,load-freq-tbl", (u32 *)res->load_freq_tbl,
-		num_elements * sizeof(*res->load_freq_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read frequency table\n");
-		msm_vidc_free_freq_table(res);
-		return -EINVAL;
-	}
-
-	res->load_freq_tbl_size = num_elements;
-
-	/* The entries in the DT might not be sorted (for aesthetic purposes).
-	 * Given that we expect the loads in descending order for our scaling
-	 * logic to work, just sort it ourselves
-	 */
-	sort(res->load_freq_tbl, res->load_freq_tbl_size,
-			sizeof(*res->load_freq_tbl), cmp, NULL);
-	return rc;
-}
-
-static int msm_vidc_load_dcvs_table(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-tbl", NULL)) {
-		/*
-		 * qcom,dcvs-tbl is an optional property. Incase qcom,dcvs-limit
-		 * property is present, it becomes mandatory. It likely won't
-		 * be present on targets that does not support the feature
-		 */
-		dprintk(VIDC_DBG, "qcom,dcvs-tbl not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,dcvs-tbl");
-	num_elements /= sizeof(*res->dcvs_tbl) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in dcvs table\n");
-		return rc;
-	}
-
-	res->dcvs_tbl = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->dcvs_tbl), GFP_KERNEL);
-	if (!res->dcvs_tbl) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc dcvs_tbl\n",
-				__func__);
-		return -ENOMEM;
-	}
-
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,dcvs-tbl", (u32 *)res->dcvs_tbl,
-		num_elements * sizeof(*res->dcvs_tbl) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read dcvs table\n");
-		msm_vidc_free_dcvs_table(res);
-		return -EINVAL;
-	}
-	res->dcvs_tbl_size = num_elements;
-
-	return rc;
-}
-
-static int msm_vidc_load_dcvs_limit(struct msm_vidc_platform_resources *res)
-{
-	int rc = 0;
-	int num_elements = 0;
-	struct platform_device *pdev = res->pdev;
-
-	if (!of_find_property(pdev->dev.of_node, "qcom,dcvs-limit", NULL)) {
-		/*
-		 * qcom,dcvs-limit is an optional property. Incase qcom,dcvs-tbl
-		 * property is present, it becomes mandatory. It likely won't
-		 * be present on targets that does not support the feature
-		 */
-		dprintk(VIDC_DBG, "qcom,dcvs-limit not found\n");
-		return 0;
-	}
-
-	num_elements = get_u32_array_num_elements(pdev->dev.of_node,
-			"qcom,dcvs-limit");
-	num_elements /= sizeof(*res->dcvs_limit) / sizeof(u32);
-	if (!num_elements) {
-		dprintk(VIDC_ERR, "no elements in dcvs limit\n");
-		res->dcvs_limit = NULL;
-		return rc;
-	}
-
-	res->dcvs_limit = devm_kzalloc(&pdev->dev, num_elements *
-			sizeof(*res->dcvs_limit), GFP_KERNEL);
-	if (!res->dcvs_limit) {
-		dprintk(VIDC_ERR,
-				"%s Failed to alloc dcvs_limit\n",
-				__func__);
-		return -ENOMEM;
-	}
-	if (of_property_read_u32_array(pdev->dev.of_node,
-		"qcom,dcvs-limit", (u32 *)res->dcvs_limit,
-		num_elements * sizeof(*res->dcvs_limit) / sizeof(u32))) {
-		dprintk(VIDC_ERR, "Failed to read dcvs limit\n");
-		msm_vidc_free_dcvs_limit(res);
-		return -EINVAL;
-	}
-
-	return rc;
-}
-
-
 static int msm_vidc_populate_bus(struct device *dev,
 		struct msm_vidc_platform_resources *res)
 {
@@ -952,11 +810,8 @@
 
 		if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
 			vc->has_scaling = true;
-			vc->count = res->load_freq_tbl_size;
-			vc->load_freq_tbl = res->load_freq_tbl;
 		} else {
 			vc->count = 0;
-			vc->load_freq_tbl = NULL;
 			vc->has_scaling = false;
 		}
 
@@ -1016,7 +871,7 @@
 			&res->fw_name);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed to read firmware name: %d\n", rc);
-		goto err_load_freq_table;
+		goto err_load_reg_table;
 	}
 	dprintk(VIDC_DBG, "Firmware filename: %s\n", res->fw_name);
 
@@ -1029,20 +884,6 @@
 	if (rc)
 		dprintk(VIDC_ERR, "Failed to load pf version table: %d\n", rc);
 
-	rc = msm_vidc_load_freq_table(res);
-	if (rc) {
-		dprintk(VIDC_ERR, "Failed to load freq table: %d\n", rc);
-		goto err_load_freq_table;
-	}
-
-	rc = msm_vidc_load_dcvs_table(res);
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to load dcvs table: %d\n", rc);
-
-	rc = msm_vidc_load_dcvs_limit(res);
-	if (rc)
-		dprintk(VIDC_WARN, "Failed to load dcvs limit: %d\n", rc);
-
 	rc = msm_vidc_load_imem_ab_table(res);
 	if (rc)
 		dprintk(VIDC_WARN, "Failed to load freq table: %d\n", rc);
@@ -1157,8 +998,6 @@
 err_load_buffer_usage_table:
 	msm_vidc_free_reg_table(res);
 err_load_reg_table:
-	msm_vidc_free_freq_table(res);
-err_load_freq_table:
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 4f152fb..8fd43006 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -24,12 +24,6 @@
 	u32 version_shift;
 };
 
-struct load_freq_table {
-	u32 load;
-	u32 freq;
-	u32 supported_codecs;
-};
-
 struct dcvs_table {
 	u32 load;
 	u32 load_low;
@@ -101,7 +95,6 @@
 struct clock_info {
 	const char *name;
 	struct clk *clk;
-	struct load_freq_table *load_freq_tbl;
 	u32 count;
 	bool has_scaling;
 	bool has_mem_retention;
@@ -142,8 +135,9 @@
 
 struct clock_profile_entry {
 	u32 codec_mask;
-	u32 cycles;
-	u32 low_power_factor;
+	u32 vpp_cycles;
+	u32 vsp_cycles;
+	u32 low_power_cycles;
 };
 
 struct clock_freq_table {
@@ -160,8 +154,6 @@
 	struct allowed_clock_rates_table *allowed_clks_tbl;
 	u32 allowed_clks_tbl_size;
 	struct clock_freq_table clock_freq_tbl;
-	struct load_freq_table *load_freq_tbl;
-	uint32_t load_freq_tbl_size;
 	struct dcvs_table *dcvs_tbl;
 	uint32_t dcvs_tbl_size;
 	struct dcvs_limit *dcvs_limit;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index aabf2d3..74e360e 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -85,13 +85,11 @@
 static int __enable_regulators(struct venus_hfi_device *device);
 static inline int __prepare_enable_clks(struct venus_hfi_device *device);
 static inline void __disable_unprepare_clks(struct venus_hfi_device *device);
-static int __scale_clocks_load(struct venus_hfi_device *device, int load,
-		struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate);
 static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet);
 static int __initialize_packetization(struct venus_hfi_device *device);
 static struct hal_session *__get_session(struct venus_hfi_device *device,
 		u32 session_id);
+static int __set_clocks(struct venus_hfi_device *device, u32 freq);
 static int __iface_cmdq_write(struct venus_hfi_device *device,
 					void *pkt);
 static int __load_fw(struct venus_hfi_device *device);
@@ -1137,162 +1135,6 @@
 	return rc;
 }
 
-static struct clock_info *__get_clock(struct venus_hfi_device *device,
-		char *name)
-{
-	struct clock_info *vc;
-
-	venus_hfi_for_each_clock(device, vc) {
-		if (!strcmp(vc->name, name))
-			return vc;
-	}
-
-	dprintk(VIDC_WARN, "%s Clock %s not found\n", __func__, name);
-
-	return NULL;
-}
-
-static unsigned long __get_clock_rate(struct clock_info *clock,
-	int num_mbs_per_sec, struct vidc_clk_scale_data *data)
-{
-	int num_rows = clock->count;
-	struct load_freq_table *table = clock->load_freq_tbl;
-	unsigned long freq = table[0].freq, max_freq = 0;
-	int i = 0, j = 0;
-	unsigned long instance_freq[VIDC_MAX_SESSIONS] = {0};
-
-	if (!data && !num_rows) {
-		freq = 0;
-		goto print_clk;
-	}
-
-	if ((!num_mbs_per_sec || !data) && num_rows) {
-
-		/* When no data is given, vote for the highest frequency. */
-
-		freq = table[0].freq;
-		goto print_clk;
-	}
-
-	for (i = 0; i < num_rows; i++) {
-		if (num_mbs_per_sec > table[i].load)
-			break;
-		for (j = 0; j < data->num_sessions; j++) {
-			bool matches = __is_session_supported(
-				table[i].supported_codecs, data->session[j]);
-
-			if (!matches)
-				continue;
-			instance_freq[j] = table[i].freq;
-		}
-	}
-	for (i = 0; i < data->num_sessions; i++)
-		max_freq = max(instance_freq[i], max_freq);
-
-	freq = max_freq ? : freq;
-print_clk:
-	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
-					freq, num_mbs_per_sec);
-	return freq;
-}
-
-static unsigned long __get_clock_rate_with_bitrate(struct clock_info *clock,
-		int num_mbs_per_sec, struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate)
-{
-	int num_rows = clock->count;
-	struct load_freq_table *table = clock->load_freq_tbl;
-	unsigned long freq = table[0].freq, max_freq = 0;
-	unsigned long base_freq, supported_clk[VIDC_MAX_SESSIONS] = {0};
-	int i, j;
-
-	if (!data && !num_rows) {
-		freq = 0;
-		goto print_clk;
-	}
-	if ((!num_mbs_per_sec || !data) && num_rows) {
-		freq = table[num_rows - 1].freq;
-		goto print_clk;
-	}
-
-	/* Get clock rate based on current load only */
-	base_freq = __get_clock_rate(clock, num_mbs_per_sec, data);
-
-	/*
-	 * Supported bitrate = 40% of clock frequency
-	 * Check if the instant bitrate is supported by the base frequency.
-	 * If not, move on to the next frequency which supports the bitrate.
-	 */
-
-	for (j = 0; j < data->num_sessions; j++) {
-		unsigned long supported_bitrate = 0;
-
-		for (i = num_rows - 1; i >= 0; i--) {
-			bool matches = __is_session_supported(
-				table[i].supported_codecs, data->session[j]);
-
-			if (!matches)
-				continue;
-			freq = table[i].freq;
-
-			supported_bitrate = freq * 40/100;
-			/*
-			 * Store this frequency for each instance, we need
-			 * to select the maximum freq among all the instances.
-			 */
-			if (freq >= base_freq &&
-				supported_bitrate >= instant_bitrate) {
-				supported_clk[j] = freq;
-				break;
-			}
-		}
-
-		/*
-		 * Current bitrate is higher than max supported load.
-		 * Select max frequency to handle this load.
-		 */
-		if (i < 0)
-			supported_clk[j] = table[0].freq;
-	}
-
-	for (i = 0; i < data->num_sessions; i++)
-		max_freq = max(supported_clk[i], max_freq);
-
-	freq = max_freq ? : base_freq;
-
-	if (base_freq == freq)
-		dprintk(VIDC_DBG, "Stay at base freq: %lu bitrate = %lu\n",
-			freq, instant_bitrate);
-	else
-		dprintk(VIDC_DBG, "Move up clock freq: %lu bitrate = %lu\n",
-			freq, instant_bitrate);
-print_clk:
-	dprintk(VIDC_PROF, "Required clock rate = %lu num_mbs_per_sec %d\n",
-					freq, num_mbs_per_sec);
-	return freq;
-}
-
-static unsigned long venus_hfi_get_core_clock_rate(void *dev, bool actual_rate)
-{
-	struct venus_hfi_device *device = (struct venus_hfi_device *) dev;
-	struct clock_info *vc;
-
-	if (!device) {
-		dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, device);
-		return -EINVAL;
-	}
-
-	if (actual_rate) {
-		vc = __get_clock(device, "core_clk");
-		if (vc)
-			return clk_get_rate(vc->clk);
-		else
-			return 0;
-	} else {
-		return device->scaled_rate;
-	}
-}
-
 static int venus_hfi_suspend(void *dev)
 {
 	int rc = 0;
@@ -1390,167 +1232,31 @@
 	return rc;
 }
 
-static int __scale_clocks_cycles_per_mb(struct venus_hfi_device *device,
-		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
-{
-	int rc = 0, i = 0, j = 0;
-	struct clock_info *cl;
-	struct clock_freq_table *clk_freq_tbl = NULL;
-	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
-	struct clock_profile_entry *entry = NULL;
-	u64 total_freq = 0, rate = 0;
-
-	clk_freq_tbl = &device->res->clock_freq_tbl;
-	allowed_clks_tbl = device->res->allowed_clks_tbl;
-
-	if (!data) {
-		dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
-		total_freq = device->clk_freq;
-		goto get_clock_freq;
-	}
-
-	device->clk_bitrate = instant_bitrate;
-
-	for (i = 0; i < data->num_sessions; i++) {
-		/*
-		 * for each active session iterate through all possible
-		 * sessions and get matching session's cycles per mb
-		 * from dtsi and multiply with the session's load to
-		 * get the frequency required for the session.
-		 * accumulate all session's frequencies to get the
-		 * total clock frequency.
-		 */
-		for (j = 0; j < clk_freq_tbl->count; j++) {
-			bool matched = false;
-			u64 freq = 0;
-
-			entry = &clk_freq_tbl->clk_prof_entries[j];
-
-			matched = __is_session_supported(entry->codec_mask,
-					data->session[i]);
-			if (!matched)
-				continue;
-
-			freq = entry->cycles * data->load[i];
-
-			if (data->power_mode[i] == VIDC_POWER_LOW &&
-					entry->low_power_factor) {
-				/* low_power_factor is in Q16 format */
-				freq = (freq * entry->low_power_factor) >> 16;
-			}
-
-			total_freq += freq;
-
-			dprintk(VIDC_DBG,
-				"session[%d] %#x: cycles (%d), load (%d), freq (%llu), factor (%d)\n",
-				i, data->session[i], entry->cycles,
-				data->load[i], freq,
-				entry->low_power_factor);
-		}
-	}
-
-get_clock_freq:
-	/*
-	 * get required clock rate from allowed clock rates table
-	 */
-	for (i = device->res->allowed_clks_tbl_size - 1; i >= 0; i--) {
-		rate = allowed_clks_tbl[i].clock_rate;
-		if (rate >= total_freq)
-			break;
-	}
-
-	venus_hfi_for_each_clock(device, cl) {
-		if (!cl->has_scaling)
-			continue;
-
-		device->clk_freq = rate;
-		rc = clk_set_rate(cl->clk, rate);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"%s: Failed to set clock rate %llu %s: %d\n",
-				__func__, rate, cl->name, rc);
-			return rc;
-		}
-		if (!strcmp(cl->name, "core_clk"))
-			device->scaled_rate = rate;
-
-		dprintk(VIDC_DBG,
-			"scaling clock %s to %llu (required freq %llu)\n",
-			cl->name, rate, total_freq);
-	}
-
-	return rc;
-}
-
-static int __scale_clocks_load(struct venus_hfi_device *device, int load,
-		struct vidc_clk_scale_data *data, unsigned long instant_bitrate)
+static int __set_clocks(struct venus_hfi_device *device, u32 freq)
 {
 	struct clock_info *cl;
-
-	device->clk_bitrate = instant_bitrate;
+	int rc = 0;
 
 	venus_hfi_for_each_clock(device, cl) {
-		if (cl->has_scaling) {
-
-			unsigned long rate = 0;
-			int rc;
-			/*
-			 * load_fw and power_on needs to be addressed.
-			 * differently. Below check enforces the same.
-			 */
-			if (!device->clk_bitrate && !data && !load &&
-				device->clk_freq)
-				rate = device->clk_freq;
-
-			if (!rate) {
-				if (!device->clk_bitrate)
-					rate = __get_clock_rate(cl, load,
-							data);
-				else
-					rate = __get_clock_rate_with_bitrate(cl,
-							load, data,
-							instant_bitrate);
-			}
-			device->clk_freq = rate;
-			rc = clk_set_rate(cl->clk, rate);
+		if (cl->has_scaling) {/* has_scaling */
+			device->clk_freq = freq;
+			rc = clk_set_rate(cl->clk, freq);
 			if (rc) {
 				dprintk(VIDC_ERR,
-					"Failed to set clock rate %lu %s: %d\n",
-					rate, cl->name, rc);
+					"Failed to set clock rate %u %s: %d %s\n",
+					freq, cl->name, rc, __func__);
 				return rc;
 			}
 
-			if (!strcmp(cl->name, "core_clk"))
-				device->scaled_rate = rate;
-
-			dprintk(VIDC_PROF, "Scaling clock %s to %lu\n",
-					cl->name, rate);
+			dprintk(VIDC_PROF, "Scaling clock %s to %u\n",
+					cl->name, freq);
 		}
 	}
 
 	return 0;
 }
 
-static int __scale_clocks(struct venus_hfi_device *device,
-		int load, struct vidc_clk_scale_data *data,
-		unsigned long instant_bitrate)
-{
-	int rc = 0;
-
-	if (device->res->clock_freq_tbl.clk_prof_entries &&
-			device->res->allowed_clks_tbl)
-		rc = __scale_clocks_cycles_per_mb(device,
-				data, instant_bitrate);
-	else if (device->res->load_freq_tbl)
-		rc = __scale_clocks_load(device, load, data, instant_bitrate);
-	else
-		dprintk(VIDC_DBG, "Clock scaling is not supported\n");
-
-	return rc;
-}
-static int venus_hfi_scale_clocks(void *dev, int load,
-					struct vidc_clk_scale_data *data,
-					unsigned long instant_bitrate)
+static int venus_hfi_scale_clocks(void *dev, u32 freq)
 {
 	int rc = 0;
 	struct venus_hfi_device *device = dev;
@@ -1568,9 +1274,28 @@
 		goto exit;
 	}
 
-	rc = __scale_clocks(device, load, data, instant_bitrate);
+	rc = __set_clocks(device, freq);
 exit:
 	mutex_unlock(&device->lock);
+
+	return rc;
+}
+
+static int __scale_clocks(struct venus_hfi_device *device)
+{
+	int rc = 0;
+	struct clock_freq_table *clk_freq_tbl = NULL;
+	struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
+	u32 rate = 0;
+
+	clk_freq_tbl = &device->res->clock_freq_tbl;
+	allowed_clks_tbl = device->res->allowed_clks_tbl;
+
+	dprintk(VIDC_DBG, "%s: NULL scale data\n", __func__);
+	rate = device->clk_freq ? device->clk_freq :
+		allowed_clks_tbl[0].clock_rate;
+
+	rc = __set_clocks(device, rate);
 	return rc;
 }
 
@@ -3672,17 +3397,9 @@
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
-		int i = 0;
 
 		dprintk(VIDC_DBG, "%s: scalable? %d, count %d\n",
 				cl->name, cl->has_scaling, cl->count);
-		for (i = 0; i < cl->count; ++i) {
-			dprintk(VIDC_DBG,
-				"\tload = %d, freq = %d codecs supported %#x\n",
-				cl->load_freq_tbl[i].load,
-				cl->load_freq_tbl[i].freq,
-				cl->load_freq_tbl[i].supported_codecs);
-		}
 	}
 
 	venus_hfi_for_each_clock(device, cl) {
@@ -4141,7 +3858,7 @@
 		goto fail_enable_clks;
 	}
 
-	rc = __scale_clocks(device, 0, NULL, 0);
+	rc = __scale_clocks(device);
 	if (rc) {
 		dprintk(VIDC_WARN,
 				"Failed to scale clocks, performance might be affected\n");
@@ -4624,7 +4341,6 @@
 	hdev->get_core_capabilities = venus_hfi_get_core_capabilities;
 	hdev->suspend = venus_hfi_suspend;
 	hdev->flush_debug_queue = venus_hfi_flush_debug_queue;
-	hdev->get_core_clock_rate = venus_hfi_get_core_clock_rate;
 	hdev->get_default_properties = venus_hfi_get_default_properties;
 }
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 3267999..7caff53 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -251,6 +251,7 @@
 struct hfi_buffer_count_actual {
 	u32 buffer_type;
 	u32 buffer_count_actual;
+	u32 buffer_count_min_host;
 };
 
 struct hfi_buffer_size_minimum {
@@ -262,8 +263,8 @@
 	u32 buffer_type;
 	u32 buffer_size;
 	u32 buffer_region_size;
-	u32 buffer_hold_count;
 	u32 buffer_count_min;
+	u32 buffer_count_min_host;
 	u32 buffer_count_actual;
 	u32 contiguous;
 	u32 buffer_alignment;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 1bbb730..eff16f2 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -220,7 +220,6 @@
 	HAL_PARAM_VENC_SEARCH_RANGE,
 	HAL_PARAM_VPE_COLOR_SPACE_CONVERSION,
 	HAL_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE,
-	HAL_PARAM_VENC_H264_NAL_SVC_EXT,
 	HAL_CONFIG_VENC_PERF_MODE,
 	HAL_PARAM_VENC_HIER_B_MAX_ENH_LAYERS,
 	HAL_PARAM_VDEC_NON_SECURE_OUTPUT2,
@@ -235,7 +234,6 @@
 	HAL_PARAM_VENC_BITRATE_TYPE,
 	HAL_PARAM_VENC_H264_PIC_ORDER_CNT,
 	HAL_PARAM_VENC_LOW_LATENCY,
-	HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED,
 	HAL_CONFIG_VENC_BLUR_RESOLUTION,
 	HAL_PARAM_VENC_H264_TRANSFORM_8x8,
 	HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
@@ -608,6 +606,7 @@
 struct hal_buffer_count_actual {
 	enum hal_buffer buffer_type;
 	u32 buffer_count_actual;
+	u32 buffer_count_min_host;
 };
 
 struct hal_buffer_size_minimum {
@@ -793,17 +792,13 @@
 enum hal_intra_refresh_mode {
 	HAL_INTRA_REFRESH_NONE,
 	HAL_INTRA_REFRESH_CYCLIC,
-	HAL_INTRA_REFRESH_ADAPTIVE,
-	HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE,
 	HAL_INTRA_REFRESH_RANDOM,
 	HAL_UNUSED_INTRA = 0x10000000,
 };
 
 struct hal_intra_refresh {
 	enum hal_intra_refresh_mode mode;
-	u32 air_mbs;
-	u32 air_ref;
-	u32 cir_mbs;
+	u32 ir_mbs;
 };
 
 enum hal_multi_slice {
@@ -827,8 +822,8 @@
 	enum hal_buffer buffer_type;
 	u32 buffer_size;
 	u32 buffer_region_size;
-	u32 buffer_hold_count;
 	u32 buffer_count_min;
+	u32 buffer_count_min_host;
 	u32 buffer_count_actual;
 	u32 contiguous;
 	u32 buffer_alignment;
@@ -1529,9 +1524,7 @@
 	int (*session_set_property)(void *sess, enum hal_property ptype,
 			void *pdata);
 	int (*session_get_property)(void *sess, enum hal_property ptype);
-	int (*scale_clocks)(void *dev, int load,
-			struct vidc_clk_scale_data *data,
-			unsigned long instant_bitrate);
+	int (*scale_clocks)(void *dev, u32 freq);
 	int (*vote_bus)(void *dev, struct vidc_bus_vote_data *data,
 			int num_data);
 	int (*get_fw_info)(void *dev, struct hal_fw_info *fw_info);
@@ -1539,7 +1532,6 @@
 	int (*get_core_capabilities)(void *dev);
 	int (*suspend)(void *dev);
 	int (*flush_debug_queue)(void *dev);
-	unsigned long (*get_core_clock_rate)(void *dev, bool actual_rate);
 	enum hal_default_properties (*get_default_properties)(void *dev);
 };
 
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 8ea5472..dc64ad2 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -273,8 +273,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x017)
 #define HFI_PROPERTY_PARAM_VENC_NUMREF					\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x018)
-#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT		\
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01B)
 #define HFI_PROPERTY_PARAM_VENC_LTRMODE		\
 	 (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01C)
 #define HFI_PROPERTY_PARAM_VENC_VIDEO_SIGNAL_INFO	\
@@ -456,9 +454,7 @@
 
 #define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
 #define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
-#define HFI_INTRA_REFRESH_ADAPTIVE			(HFI_COMMON_BASE + 0x3)
-#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE	(HFI_COMMON_BASE + 0x4)
-#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x3)
 
 struct hfi_intra_refresh {
 	u32 mode;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index d9c1f2f..aba7735 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1411,6 +1411,7 @@
 	int attr = 0;
 	int minor;
 	int rc;
+	u64 rc_type;
 
 	if (!dev || !dev->map_name)
 		return -EINVAL;
@@ -1496,14 +1497,18 @@
 			goto out_input;
 	}
 
+	rc_type = BIT_ULL(rc_map->rc_type);
+
 	if (dev->change_protocol) {
-		u64 rc_type = (1ll << rc_map->rc_type);
 		rc = dev->change_protocol(dev, &rc_type);
 		if (rc < 0)
 			goto out_raw;
 		dev->enabled_protocols = rc_type;
 	}
 
+	if (dev->driver_type == RC_DRIVER_IR_RAW)
+		ir_raw_load_modules(&rc_type);
+
 	/* Allow the RC sysfs nodes to be accessible */
 	atomic_set(&dev->initialized, 1);
 
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 2c720cb..c3e6734 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -68,6 +68,7 @@
 struct dw2102_state {
 	u8 initialized;
 	u8 last_lock;
+	u8 data[MAX_XFER_SIZE + 4];
 	struct i2c_client *i2c_client_demod;
 	struct i2c_client *i2c_client_tuner;
 
@@ -662,62 +663,72 @@
 								int num)
 {
 	struct dvb_usb_device *d = i2c_get_adapdata(adap);
-	u8 obuf[0x40], ibuf[0x40];
+	struct dw2102_state *state;
 
 	if (!d)
 		return -ENODEV;
+
+	state = d->priv;
+
 	if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
 		return -EAGAIN;
+	if (mutex_lock_interruptible(&d->data_mutex) < 0) {
+		mutex_unlock(&d->i2c_mutex);
+		return -EAGAIN;
+	}
 
 	switch (num) {
 	case 1:
 		switch (msg[0].addr) {
 		case SU3000_STREAM_CTRL:
-			obuf[0] = msg[0].buf[0] + 0x36;
-			obuf[1] = 3;
-			obuf[2] = 0;
-			if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
+			state->data[0] = msg[0].buf[0] + 0x36;
+			state->data[1] = 3;
+			state->data[2] = 0;
+			if (dvb_usb_generic_rw(d, state->data, 3,
+					state->data, 0, 0) < 0)
 				err("i2c transfer failed.");
 			break;
 		case DW2102_RC_QUERY:
-			obuf[0] = 0x10;
-			if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
+			state->data[0] = 0x10;
+			if (dvb_usb_generic_rw(d, state->data, 1,
+					state->data, 2, 0) < 0)
 				err("i2c transfer failed.");
-			msg[0].buf[1] = ibuf[0];
-			msg[0].buf[0] = ibuf[1];
+			msg[0].buf[1] = state->data[0];
+			msg[0].buf[0] = state->data[1];
 			break;
 		default:
 			/* always i2c write*/
-			obuf[0] = 0x08;
-			obuf[1] = msg[0].addr;
-			obuf[2] = msg[0].len;
+			state->data[0] = 0x08;
+			state->data[1] = msg[0].addr;
+			state->data[2] = msg[0].len;
 
-			memcpy(&obuf[3], msg[0].buf, msg[0].len);
+			memcpy(&state->data[3], msg[0].buf, msg[0].len);
 
-			if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
-						ibuf, 1, 0) < 0)
+			if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
+						state->data, 1, 0) < 0)
 				err("i2c transfer failed.");
 
 		}
 		break;
 	case 2:
 		/* always i2c read */
-		obuf[0] = 0x09;
-		obuf[1] = msg[0].len;
-		obuf[2] = msg[1].len;
-		obuf[3] = msg[0].addr;
-		memcpy(&obuf[4], msg[0].buf, msg[0].len);
+		state->data[0] = 0x09;
+		state->data[1] = msg[0].len;
+		state->data[2] = msg[1].len;
+		state->data[3] = msg[0].addr;
+		memcpy(&state->data[4], msg[0].buf, msg[0].len);
 
-		if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
-					ibuf, msg[1].len + 1, 0) < 0)
+		if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+					state->data, msg[1].len + 1, 0) < 0)
 			err("i2c transfer failed.");
 
-		memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+		memcpy(msg[1].buf, &state->data[1], msg[1].len);
 		break;
 	default:
 		warn("more than 2 i2c messages at a time is not handled yet.");
 		break;
 	}
+	mutex_unlock(&d->data_mutex);
 	mutex_unlock(&d->i2c_mutex);
 	return num;
 }
@@ -845,17 +856,23 @@
 static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
 {
 	struct dw2102_state *state = (struct dw2102_state *)d->priv;
-	u8 obuf[] = {0xde, 0};
+	int ret = 0;
 
 	info("%s: %d, initialized %d", __func__, i, state->initialized);
 
 	if (i && !state->initialized) {
+		mutex_lock(&d->data_mutex);
+
+		state->data[0] = 0xde;
+		state->data[1] = 0;
+
 		state->initialized = 1;
 		/* reset board */
-		return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+		ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
+		mutex_unlock(&d->data_mutex);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1310,49 +1327,57 @@
 	return 0;
 }
 
-static int su3000_frontend_attach(struct dvb_usb_adapter *d)
+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-	u8 obuf[3] = { 0xe, 0x80, 0 };
-	u8 ibuf[] = { 0 };
+	struct dvb_usb_device *d = adap->dev;
+	struct dw2102_state *state = d->priv;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 0;
+
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x02;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x02;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 	msleep(300);
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 0;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 0;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0x51;
+	state->data[0] = 0x51;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
-	d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
-					&d->dev->i2c_adap);
-	if (d->fe_adap[0].fe == NULL)
+	mutex_unlock(&d->data_mutex);
+
+	adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+					&d->i2c_adap);
+	if (adap->fe_adap[0].fe == NULL)
 		return -EIO;
 
-	if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+	if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
 				&dw2104_ts2020_config,
-				&d->dev->i2c_adap)) {
+				&d->i2c_adap)) {
 		info("Attached DS3000/TS2020!");
 		return 0;
 	}
@@ -1361,47 +1386,55 @@
 	return -EIO;
 }
 
-static int t220_frontend_attach(struct dvb_usb_adapter *d)
+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
 {
-	u8 obuf[3] = { 0xe, 0x87, 0 };
-	u8 ibuf[] = { 0 };
+	struct dvb_usb_device *d = adap->dev;
+	struct dw2102_state *state = d->priv;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0xe;
+	state->data[1] = 0x87;
+	state->data[2] = 0x0;
+
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x86;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x86;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x80;
-	obuf[2] = 0;
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 0;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
 	msleep(50);
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x80;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0x51;
+	state->data[0] = 0x51;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
-	d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
-					&d->dev->i2c_adap, NULL);
-	if (d->fe_adap[0].fe != NULL) {
-		if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
-					&d->dev->i2c_adap, &tda18271_config)) {
+	mutex_unlock(&d->data_mutex);
+
+	adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+					&d->i2c_adap, NULL);
+	if (adap->fe_adap[0].fe != NULL) {
+		if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
+					&d->i2c_adap, &tda18271_config)) {
 			info("Attached TDA18271HD/CXD2820R!");
 			return 0;
 		}
@@ -1411,23 +1444,30 @@
 	return -EIO;
 }
 
-static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
 {
-	u8 obuf[] = { 0x51 };
-	u8 ibuf[] = { 0 };
+	struct dvb_usb_device *d = adap->dev;
+	struct dw2102_state *state = d->priv;
 
-	if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0x51;
+
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
-	d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
-					&d->dev->i2c_adap);
+	mutex_unlock(&d->data_mutex);
 
-	if (d->fe_adap[0].fe == NULL)
+	adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
+					&s421_m88rs2000_config,
+					&d->i2c_adap);
+
+	if (adap->fe_adap[0].fe == NULL)
 		return -EIO;
 
-	if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+	if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
 				&dw2104_ts2020_config,
-				&d->dev->i2c_adap)) {
+				&d->i2c_adap)) {
 		info("Attached RS2000/TS2020!");
 		return 0;
 	}
@@ -1440,44 +1480,50 @@
 {
 	struct dvb_usb_device *d = adap->dev;
 	struct dw2102_state *state = d->priv;
-	u8 obuf[3] = { 0xe, 0x80, 0 };
-	u8 ibuf[] = { 0 };
 	struct i2c_adapter *i2c_adapter;
 	struct i2c_client *client;
 	struct i2c_board_info board_info;
 	struct m88ds3103_platform_data m88ds3103_pdata = {};
 	struct ts2020_config ts2020_config = {};
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	mutex_lock(&d->data_mutex);
+
+	state->data[0] = 0xe;
+	state->data[1] = 0x80;
+	state->data[2] = 0x0;
+
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x02;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x02;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 	msleep(300);
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 0;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 0;
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0xe;
-	obuf[1] = 0x83;
-	obuf[2] = 1;
+	state->data[0] = 0xe;
+	state->data[1] = 0x83;
+	state->data[2] = 1;
 
-	if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
 		err("command 0x0e transfer failed.");
 
-	obuf[0] = 0x51;
+	state->data[0] = 0x51;
 
-	if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+	if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
 		err("command 0x51 transfer failed.");
 
+	mutex_unlock(&d->data_mutex);
+
 	/* attach demod */
 	m88ds3103_pdata.clk = 27000000;
 	m88ds3103_pdata.i2c_wr_max = 33;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 302e284..cde43b6 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1595,6 +1595,114 @@
 	return buffer;
 }
 
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+	struct uvc_video_chain *chain;
+
+	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+	if (chain == NULL)
+		return NULL;
+
+	INIT_LIST_HEAD(&chain->entities);
+	mutex_init(&chain->ctrl_mutex);
+	chain->dev = dev;
+	v4l2_prio_init(&chain->prio);
+
+	return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ *  - Acer Integrated Camera (5986:055a)
+ *  - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+	struct uvc_video_chain *chain;
+	struct uvc_entity *iterm = NULL;
+	struct uvc_entity *oterm = NULL;
+	struct uvc_entity *entity;
+	struct uvc_entity *prev;
+
+	/*
+	 * Start by locating the input and output terminals. We only support
+	 * devices with exactly one of each for now.
+	 */
+	list_for_each_entry(entity, &dev->entities, list) {
+		if (UVC_ENTITY_IS_ITERM(entity)) {
+			if (iterm)
+				return -EINVAL;
+			iterm = entity;
+		}
+
+		if (UVC_ENTITY_IS_OTERM(entity)) {
+			if (oterm)
+				return -EINVAL;
+			oterm = entity;
+		}
+	}
+
+	if (iterm == NULL || oterm == NULL)
+		return -EINVAL;
+
+	/* Allocate the chain and fill it. */
+	chain = uvc_alloc_chain(dev);
+	if (chain == NULL)
+		return -ENOMEM;
+
+	if (uvc_scan_chain_entity(chain, oterm) < 0)
+		goto error;
+
+	prev = oterm;
+
+	/*
+	 * Add all Processing and Extension Units with two pads. The order
+	 * doesn't matter much, use reverse list traversal to connect units in
+	 * UVC descriptor order as we build the chain from output to input. This
+	 * leads to units appearing in the order meant by the manufacturer for
+	 * the cameras known to require this heuristic.
+	 */
+	list_for_each_entry_reverse(entity, &dev->entities, list) {
+		if (entity->type != UVC_VC_PROCESSING_UNIT &&
+		    entity->type != UVC_VC_EXTENSION_UNIT)
+			continue;
+
+		if (entity->num_pads != 2)
+			continue;
+
+		if (uvc_scan_chain_entity(chain, entity) < 0)
+			goto error;
+
+		prev->baSourceID[0] = entity->id;
+		prev = entity;
+	}
+
+	if (uvc_scan_chain_entity(chain, iterm) < 0)
+		goto error;
+
+	prev->baSourceID[0] = iterm->id;
+
+	list_add_tail(&chain->list, &dev->chains);
+
+	uvc_trace(UVC_TRACE_PROBE,
+		  "Found a video chain by fallback heuristic (%s).\n",
+		  uvc_print_chain(chain));
+
+	return 0;
+
+error:
+	kfree(chain);
+	return -EINVAL;
+}
+
 /*
  * Scan the device for video chains and register video devices.
  *
@@ -1617,15 +1725,10 @@
 		if (term->chain.next || term->chain.prev)
 			continue;
 
-		chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+		chain = uvc_alloc_chain(dev);
 		if (chain == NULL)
 			return -ENOMEM;
 
-		INIT_LIST_HEAD(&chain->entities);
-		mutex_init(&chain->ctrl_mutex);
-		chain->dev = dev;
-		v4l2_prio_init(&chain->prio);
-
 		term->flags |= UVC_ENTITY_FLAG_DEFAULT;
 
 		if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1742,9 @@
 		list_add_tail(&chain->list, &dev->chains);
 	}
 
+	if (list_empty(&dev->chains))
+		uvc_scan_fallback(dev);
+
 	if (list_empty(&dev->chains)) {
 		uvc_printk(KERN_INFO, "No valid video chain found.\n");
 		return -1;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index f57700c..5aa3f09 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -617,6 +617,12 @@
 		card->ext_csd.ffu_capable =
 			(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
 			!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+
+		card->ext_csd.pre_eol_info = ext_csd[EXT_CSD_PRE_EOL_INFO];
+		card->ext_csd.device_life_time_est_typ_a =
+			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A];
+		card->ext_csd.device_life_time_est_typ_b =
+			ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
 	}
 out:
 	return err;
@@ -746,6 +752,11 @@
 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
+MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev);
+MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info);
+MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n",
+	card->ext_csd.device_life_time_est_typ_a,
+	card->ext_csd.device_life_time_est_typ_b);
 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
 		card->ext_csd.enhanced_area_offset);
@@ -799,6 +810,9 @@
 	&dev_attr_name.attr,
 	&dev_attr_oemid.attr,
 	&dev_attr_prv.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_pre_eol_info.attr,
+	&dev_attr_life_time.attr,
 	&dev_attr_serial.attr,
 	&dev_attr_enhanced_area_offset.attr,
 	&dev_attr_enhanced_area_size.attr,
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
index f9fa3fa..2051f28 100644
--- a/drivers/mtd/maps/pmcmsp-flash.c
+++ b/drivers/mtd/maps/pmcmsp-flash.c
@@ -139,15 +139,13 @@
 		}
 
 		msp_maps[i].bankwidth = 1;
-		msp_maps[i].name = kmalloc(7, GFP_KERNEL);
+		msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
 		if (!msp_maps[i].name) {
 			iounmap(msp_maps[i].virt);
 			kfree(msp_parts[i]);
 			goto cleanup_loop;
 		}
 
-		msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
-
 		for (j = 0; j < pcnt; j++) {
 			part_name[5] = '0' + i;
 			part_name[7] = '0' + j;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 5370909..08d91ef 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -913,6 +913,8 @@
 		priv->old_link = 0;
 		priv->old_duplex = -1;
 		priv->old_pause = -1;
+	} else {
+		phydev = NULL;
 	}
 
 	/* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@
 	enet_dmac_writel(priv, priv->dma_chan_int_mask,
 			 ENETDMAC_IRMASK, priv->tx_chan);
 
-	if (priv->has_phy)
+	if (phydev)
 		phy_start(phydev);
 	else
 		bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@
 	free_irq(dev->irq, dev);
 
 out_phy_disconnect:
-	if (priv->has_phy)
+	if (phydev)
 		phy_disconnect(phydev);
 
 	return ret;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a36022b..03dca73 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1181,7 +1181,9 @@
 
 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
 {
+	struct tcphdr *tcph;
 	int offset = 0;
+	int hdr_len;
 
 	/* only TCP packets will be aggregated */
 	if (skb->protocol == htons(ETH_P_IP)) {
@@ -1208,14 +1210,20 @@
 	/* if mss is not set through Large Packet bit/mss in rx buffer,
 	 * expect that the mss will be written to the tcp header checksum.
 	 */
+	tcph = (struct tcphdr *)(skb->data + offset);
 	if (lrg_pkt) {
 		skb_shinfo(skb)->gso_size = mss;
 	} else if (offset) {
-		struct tcphdr *tcph = (struct tcphdr *)(skb->data + offset);
-
 		skb_shinfo(skb)->gso_size = ntohs(tcph->check);
 		tcph->check = 0;
 	}
+
+	if (skb_shinfo(skb)->gso_size) {
+		hdr_len = offset + tcph->doff * 4;
+		skb_shinfo(skb)->gso_segs =
+				DIV_ROUND_UP(skb->len - hdr_len,
+					     skb_shinfo(skb)->gso_size);
+	}
 }
 
 static int ibmveth_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 5b54254..2788a54 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -77,6 +77,10 @@
 	s32 ret_val = 0;
 	u16 phy_id;
 
+	/* ensure PHY page selection to fix misconfigured i210 */
+	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+		phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
 	ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
 	if (ret_val)
 		goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b306713..d4fa851 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -81,6 +81,7 @@
 static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
 {
 	priv->params.rq_wq_type = rq_type;
+	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 	switch (priv->params.rq_wq_type) {
 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
 		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
@@ -92,6 +93,10 @@
 		break;
 	default: /* MLX5_WQ_TYPE_LINKED_LIST */
 		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+
+		/* Extra room needed for build_skb */
+		priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 	}
 	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
 					       BIT(priv->params.log_rq_size));
@@ -3473,12 +3478,6 @@
 	mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
 				      MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
 
-	priv->params.lro_wqe_sz =
-		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
-		/* Extra room needed for build_skb */
-		MLX5_RX_HEADROOM -
-		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
 	/* Initialize pflags */
 	MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
 			    priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
@@ -3936,6 +3935,19 @@
 	}
 }
 
+static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
+{
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+	int vport;
+
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return;
+
+	for (vport = 1; vport < total_vfs; vport++)
+		mlx5_eswitch_unregister_vport_rep(esw, vport);
+}
+
 void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3983,6 +3995,7 @@
 		return err;
 	}
 
+	mlx5e_register_vport_rep(mdev);
 	return 0;
 }
 
@@ -3994,6 +4007,7 @@
 	if (!netif_device_present(netdev))
 		return;
 
+	mlx5e_unregister_vport_rep(mdev);
 	mlx5e_detach_netdev(mdev, netdev);
 	mlx5e_destroy_mdev_resources(mdev);
 }
@@ -4012,8 +4026,6 @@
 	if (err)
 		return NULL;
 
-	mlx5e_register_vport_rep(mdev);
-
 	if (MLX5_CAP_GEN(mdev, vport_group_manager))
 		ppriv = &esw->offloads.vport_reps[0];
 
@@ -4065,13 +4077,7 @@
 
 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
 {
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
 	struct mlx5e_priv *priv = vpriv;
-	int vport;
-
-	for (vport = 1; vport < total_vfs; vport++)
-		mlx5_eswitch_unregister_vport_rep(esw, vport);
 
 	unregister_netdev(priv->netdev);
 	mlx5e_detach(mdev, vpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e7b2158..796bdf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -92,19 +92,18 @@
 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
 					struct mlx5e_cq *cq, u32 cqcc)
 {
-	u16 wqe_cnt_step;
-
 	cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
 	cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
 	cq->title.op_own      &= 0xf0;
 	cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.log_sz);
 	cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
 
-	wqe_cnt_step =
-		rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
-		mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
-	cq->decmprs_wqe_counter =
-		(cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+		cq->decmprs_wqe_counter +=
+			mpwrq_get_cqe_consumed_strides(&cq->title);
+	else
+		cq->decmprs_wqe_counter =
+			(cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
 }
 
 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e83072d..6905630 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -500,30 +500,40 @@
 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
 			   struct mlxsw_sp_prefix_usage *req_prefix_usage)
 {
-	struct mlxsw_sp_lpm_tree *lpm_tree;
+	struct mlxsw_sp_lpm_tree *lpm_tree = vr->lpm_tree;
+	struct mlxsw_sp_lpm_tree *new_tree;
+	int err;
 
-	if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
-				     &vr->lpm_tree->prefix_usage))
+	if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
 		return 0;
 
-	lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
+	new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
 					 vr->proto, false);
-	if (IS_ERR(lpm_tree)) {
+	if (IS_ERR(new_tree)) {
 		/* We failed to get a tree according to the required
 		 * prefix usage. However, the current tree might be still good
 		 * for us if our requirement is subset of the prefixes used
 		 * in the tree.
 		 */
 		if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
-						 &vr->lpm_tree->prefix_usage))
+						 &lpm_tree->prefix_usage))
 			return 0;
-		return PTR_ERR(lpm_tree);
+		return PTR_ERR(new_tree);
 	}
 
-	mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
-	mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+	/* Prevent packet loss by overwriting existing binding */
+	vr->lpm_tree = new_tree;
+	err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+	if (err)
+		goto err_tree_bind;
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
+
+	return 0;
+
+err_tree_bind:
 	vr->lpm_tree = lpm_tree;
-	return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+	mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
+	return err;
 }
 
 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 28097be..5127b7e 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1211,7 +1211,7 @@
 		goto fail_alloc;
 	}
 
-#warning FIXME: unhardcode gpio&reset bits
+	/* FIXME: unhardcode gpio&reset bits */
 	ar7_gpio_disable(26);
 	ar7_gpio_disable(27);
 	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 8b4822a..3c1f89a 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,16 +1039,22 @@
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct ip_tunnel_info *info = NULL;
+	int err;
 
 	if (geneve->collect_md)
 		info = skb_tunnel_info(skb);
 
+	rcu_read_lock();
 #if IS_ENABLED(CONFIG_IPV6)
 	if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
 	    (!info && geneve->remote.sa.sa_family == AF_INET6))
-		return geneve6_xmit_skb(skb, dev, info);
+		err = geneve6_xmit_skb(skb, dev, info);
+	else
 #endif
-	return geneve_xmit_skb(skb, dev, info);
+		err = geneve_xmit_skb(skb, dev, info);
+	rcu_read_unlock();
+
+	return err;
 }
 
 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f424b86..201ffa5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -611,14 +611,18 @@
  * phy_trigger_machine - trigger the state machine to run
  *
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: There has been a change in state which requires that the
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
-	cancel_delayed_work_sync(&phydev->state_queue);
+	if (sync)
+		cancel_delayed_work_sync(&phydev->state_queue);
+	else
+		cancel_delayed_work(&phydev->state_queue);
 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
 }
 
@@ -655,7 +659,7 @@
 	phydev->state = PHY_HALTED;
 	mutex_unlock(&phydev->lock);
 
-	phy_trigger_machine(phydev);
+	phy_trigger_machine(phydev, false);
 }
 
 /**
@@ -817,7 +821,7 @@
 	}
 
 	/* reschedule state queue work to run as soon as possible */
-	phy_trigger_machine(phydev);
+	phy_trigger_machine(phydev, true);
 	return;
 
 ignore:
@@ -907,7 +911,7 @@
 	if (do_resume)
 		phy_resume(phydev);
 
-	phy_trigger_machine(phydev);
+	phy_trigger_machine(phydev, true);
 }
 EXPORT_SYMBOL(phy_start);
 
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e686b70..4b7a363 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -819,7 +819,18 @@
 /* Net device open. */
 static int tun_net_open(struct net_device *dev)
 {
+	struct tun_struct *tun = netdev_priv(dev);
+	int i;
+
 	netif_tx_start_all_queues(dev);
+
+	for (i = 0; i < tun->numqueues; i++) {
+		struct tun_file *tfile;
+
+		tfile = rtnl_dereference(tun->tfiles[i]);
+		tfile->socket.sk->sk_write_space(tfile->socket.sk);
+	}
+
 	return 0;
 }
 
@@ -1116,9 +1127,10 @@
 	if (!skb_array_empty(&tfile->tx_array))
 		mask |= POLLIN | POLLRDNORM;
 
-	if (sock_writeable(sk) ||
-	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
-	     sock_writeable(sk)))
+	if (tun->dev->flags & IFF_UP &&
+	    (sock_writeable(sk) ||
+	     (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+	      sock_writeable(sk))))
 		mask |= POLLOUT | POLLWRNORM;
 
 	if (tun->dev->reg_state != NETREG_REGISTERED)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 95cf1d8..bc744ac 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -346,6 +346,7 @@
 
 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+	int len = skb->len;
 	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
 
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -353,7 +354,7 @@
 
 		u64_stats_update_begin(&dstats->syncp);
 		dstats->tx_pkts++;
-		dstats->tx_bytes += skb->len;
+		dstats->tx_bytes += len;
 		u64_stats_update_end(&dstats->syncp);
 	} else {
 		this_cpu_inc(dev->dstats->tx_drps);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d4f495b..3c4c2cf 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1942,7 +1942,6 @@
 	const struct iphdr *old_iph;
 	union vxlan_addr *dst;
 	union vxlan_addr remote_ip, local_ip;
-	union vxlan_addr *src;
 	struct vxlan_metadata _md;
 	struct vxlan_metadata *md = &_md;
 	__be16 src_port = 0, dst_port;
@@ -1956,11 +1955,12 @@
 
 	info = skb_tunnel_info(skb);
 
+	rcu_read_lock();
 	if (rdst) {
 		dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
 		vni = rdst->remote_vni;
 		dst = &rdst->remote_ip;
-		src = &vxlan->cfg.saddr;
+		local_ip = vxlan->cfg.saddr;
 		dst_cache = &rdst->dst_cache;
 	} else {
 		if (!info) {
@@ -1979,7 +1979,6 @@
 			local_ip.sin6.sin6_addr = info->key.u.ipv6.src;
 		}
 		dst = &remote_ip;
-		src = &local_ip;
 		dst_cache = &info->dst_cache;
 	}
 
@@ -1987,7 +1986,7 @@
 		if (did_rsc) {
 			/* short-circuited back to local bridge */
 			vxlan_encap_bypass(skb, vxlan, vxlan);
-			return;
+			goto out_unlock;
 		}
 		goto drop;
 	}
@@ -2028,7 +2027,7 @@
 		rt = vxlan_get_route(vxlan, skb,
 				     rdst ? rdst->remote_ifindex : 0, tos,
 				     dst->sin.sin_addr.s_addr,
-				     &src->sin.sin_addr.s_addr,
+				     &local_ip.sin.sin_addr.s_addr,
 				     dst_cache, info);
 		if (IS_ERR(rt)) {
 			netdev_dbg(dev, "no route to %pI4\n",
@@ -2056,7 +2055,7 @@
 			if (!dst_vxlan)
 				goto tx_error;
 			vxlan_encap_bypass(skb, vxlan, dst_vxlan);
-			return;
+			goto out_unlock;
 		}
 
 		if (!info)
@@ -2071,7 +2070,7 @@
 		if (err < 0)
 			goto xmit_tx_error;
 
-		udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr,
+		udp_tunnel_xmit_skb(rt, sk, skb, local_ip.sin.sin_addr.s_addr,
 				    dst->sin.sin_addr.s_addr, tos, ttl, df,
 				    src_port, dst_port, xnet, !udp_sum);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -2087,7 +2086,7 @@
 		ndst = vxlan6_get_route(vxlan, skb,
 					rdst ? rdst->remote_ifindex : 0, tos,
 					label, &dst->sin6.sin6_addr,
-					&src->sin6.sin6_addr,
+					&local_ip.sin6.sin6_addr,
 					dst_cache, info);
 		if (IS_ERR(ndst)) {
 			netdev_dbg(dev, "no route to %pI6\n",
@@ -2117,7 +2116,7 @@
 			if (!dst_vxlan)
 				goto tx_error;
 			vxlan_encap_bypass(skb, vxlan, dst_vxlan);
-			return;
+			goto out_unlock;
 		}
 
 		if (!info)
@@ -2131,15 +2130,16 @@
 		if (err < 0) {
 			dst_release(ndst);
 			dev->stats.tx_errors++;
-			return;
+			goto out_unlock;
 		}
 		udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
-				     &src->sin6.sin6_addr,
+				     &local_ip.sin6.sin6_addr,
 				     &dst->sin6.sin6_addr, tos, ttl,
 				     label, src_port, dst_port, !udp_sum);
 #endif
 	}
-
+out_unlock:
+	rcu_read_unlock();
 	return;
 
 drop:
@@ -2155,6 +2155,7 @@
 	dev->stats.tx_errors++;
 tx_free:
 	dev_kfree_skb(skb);
+	rcu_read_unlock();
 }
 
 /* Transmit local packets over Vxlan
@@ -2637,7 +2638,7 @@
 
 	if (data[IFLA_VXLAN_ID]) {
 		__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
-		if (id >= VXLAN_VID_MASK)
+		if (id >= VXLAN_N_VID)
 			return -ERANGE;
 	}
 
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 133f6b5..2c48419 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -887,6 +887,10 @@
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	wil_dbg_misc(wil, "Prevent DS in BL & mark FW to set T_POWER_ON=0\n");
+	wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP |
+	      BIT_USER_SUPPORT_T_POWER_ON_0);
+
 	if (wil->platform_ops.notify) {
 		rc = wil->platform_ops.notify(wil->platform_handle,
 					      WIL_PLATFORM_EVT_PRE_RESET);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index bfffc0e..4bccef3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -140,6 +140,9 @@
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
+#define RGF_USER_USAGE_8		(0x880020)
+	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
+	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e30f05c..4722782 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -306,13 +306,6 @@
 			return rc;
 	}
 
-	pci_iov_set_numvfs(dev, nr_virtfn);
-	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
-	pci_cfg_access_lock(dev);
-	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
-	msleep(100);
-	pci_cfg_access_unlock(dev);
-
 	iov->initial_VFs = initial;
 	if (nr_virtfn < initial)
 		initial = nr_virtfn;
@@ -323,6 +316,13 @@
 		goto err_pcibios;
 	}
 
+	pci_iov_set_numvfs(dev, nr_virtfn);
+	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+	pci_cfg_access_lock(dev);
+	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+	msleep(100);
+	pci_cfg_access_unlock(dev);
+
 	for (i = 0; i < initial; i++) {
 		rc = pci_iov_add_virtfn(dev, i, 0);
 		if (rc)
@@ -554,21 +554,61 @@
 }
 
 /**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
  * @dev: the PCI device
  * @resno: the resource number
  *
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
  */
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
 {
-	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
-		return 0;
+	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+	struct resource *res = dev->resource + resno;
+	int vf_bar = resno - PCI_IOV_RESOURCES;
+	struct pci_bus_region region;
+	u16 cmd;
+	u32 new;
+	int reg;
 
-	BUG_ON(!dev->is_physfn);
+	/*
+	 * The generic pci_restore_bars() path calls this for all devices,
+	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
+	 * have nothing to do.
+	 */
+	if (!iov)
+		return;
 
-	return dev->sriov->pos + PCI_SRIOV_BAR +
-		4 * (resno - PCI_IOV_RESOURCES);
+	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+			 vf_bar, res);
+		return;
+	}
+
+	/*
+	 * Ignore unimplemented BARs, unused resource slots for 64-bit
+	 * BARs, and non-movable resources, e.g., those described via
+	 * Enhanced Allocation.
+	 */
+	if (!res->flags)
+		return;
+
+	if (res->flags & IORESOURCE_UNSET)
+		return;
+
+	if (res->flags & IORESOURCE_PCI_FIXED)
+		return;
+
+	pcibios_resource_to_bus(dev->bus, &region, res);
+	new = region.start;
+	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+
+	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+	pci_write_config_dword(dev, reg, new);
+	if (res->flags & IORESOURCE_MEM_64) {
+		new = region.start >> 16 >> 16;
+		pci_write_config_dword(dev, reg + 4, new);
+	}
 }
 
 resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index eda6a7c..6922964 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -564,10 +564,6 @@
 {
 	int i;
 
-	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
-	if (dev->is_virtfn)
-		return;
-
 	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 		pci_update_resource(dev, i);
 }
@@ -4835,36 +4831,6 @@
 }
 EXPORT_SYMBOL(pci_select_bars);
 
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
-	int reg;
-
-	if (resno < PCI_ROM_RESOURCE) {
-		*type = pci_bar_unknown;
-		return PCI_BASE_ADDRESS_0 + 4 * resno;
-	} else if (resno == PCI_ROM_RESOURCE) {
-		*type = pci_bar_mem32;
-		return dev->rom_base_reg;
-	} else if (resno < PCI_BRIDGE_RESOURCES) {
-		/* device specific resource */
-		*type = pci_bar_unknown;
-		reg = pci_iov_resource_bar(dev, resno);
-		if (reg)
-			return reg;
-	}
-
-	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
-	return 0;
-}
-
 /* Some architectures require additional programming to enable VGA */
 static arch_set_vga_state_t arch_set_vga_state;
 
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4518562..a5d37f6 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -245,7 +245,6 @@
 int pci_setup_device(struct pci_dev *dev);
 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
 		    struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
 void pci_configure_ari(struct pci_dev *dev);
 void __pci_bus_size_bridges(struct pci_bus *bus,
 			struct list_head *realloc_head);
@@ -289,7 +288,7 @@
 #ifdef CONFIG_PCI_IOV
 int pci_iov_init(struct pci_dev *dev);
 void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
 resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
 void pci_restore_iov_state(struct pci_dev *dev);
 int pci_iov_bus_range(struct pci_bus *bus);
@@ -303,10 +302,6 @@
 
 {
 }
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
-	return 0;
-}
 static inline void pci_restore_iov_state(struct pci_dev *dev)
 {
 }
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 300770c..d266d80 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -227,7 +227,8 @@
 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
 		}
 	} else {
-		res->flags |= (l & IORESOURCE_ROM_ENABLE);
+		if (l & PCI_ROM_ADDRESS_ENABLE)
+			res->flags |= IORESOURCE_ROM_ENABLE;
 		l64 = l & PCI_ROM_ADDRESS_MASK;
 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 3a035e07..087a218 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2173,6 +2173,7 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
 		quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
 
 /*
  * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 06663d3..b6edb18 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -35,6 +35,11 @@
 	if (res->flags & IORESOURCE_ROM_SHADOW)
 		return 0;
 
+	/*
+	 * Ideally pci_update_resource() would update the ROM BAR address,
+	 * and we would only set the enable bit here.  But apparently some
+	 * devices have buggy ROM BARs that read as zero when disabled.
+	 */
 	pcibios_resource_to_bus(pdev->bus, &region, res);
 	pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
 	rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 9526e34..4bc589e 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
 #include <linux/slab.h>
 #include "pci.h"
 
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
 {
 	struct pci_bus_region region;
 	bool disable;
 	u16 cmd;
 	u32 new, check, mask;
 	int reg;
-	enum pci_bar_type type;
 	struct resource *res = dev->resource + resno;
 
-	if (dev->is_virtfn) {
-		dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+	if (dev->is_virtfn)
 		return;
-	}
 
 	/*
 	 * Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@
 		return;
 
 	pcibios_resource_to_bus(dev->bus, &region, res);
+	new = region.start;
 
-	new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
-	if (res->flags & IORESOURCE_IO)
+	if (res->flags & IORESOURCE_IO) {
 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
-	else
+		new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+	} else if (resno == PCI_ROM_RESOURCE) {
+		mask = (u32)PCI_ROM_ADDRESS_MASK;
+	} else {
 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+		new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+	}
 
-	reg = pci_resource_bar(dev, resno, &type);
-	if (!reg)
-		return;
-	if (type != pci_bar_unknown) {
+	if (resno < PCI_ROM_RESOURCE) {
+		reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+	} else if (resno == PCI_ROM_RESOURCE) {
+
+		/*
+		 * Apparently some Matrox devices have ROM BARs that read
+		 * as zero when disabled, so don't update ROM BARs unless
+		 * they're enabled.  See https://lkml.org/lkml/2005/8/30/138.
+		 */
 		if (!(res->flags & IORESOURCE_ROM_ENABLE))
 			return;
+
+		reg = dev->rom_base_reg;
 		new |= PCI_ROM_ADDRESS_ENABLE;
-	}
+	} else
+		return;
 
 	/*
 	 * We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@
 		pci_write_config_word(dev, PCI_COMMAND, cmd);
 }
 
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+	if (resno <= PCI_ROM_RESOURCE)
+		pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+	else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+		pci_iov_update_resource(dev, resno);
+#endif
+}
+
 int pci_claim_resource(struct pci_dev *dev, int resource)
 {
 	struct resource *res = &dev->resource[resource];
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 1817585..fff8966 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -568,7 +568,7 @@
 		if (!type)
 			break;
 
-		GSIDBG("type %x\n", type);
+		GSIDBG_LOW("type %x\n", type);
 
 		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
 			gsi_handle_ch_ctrl(ee);
@@ -2758,9 +2758,10 @@
 		unsigned long *size)
 {
 	if (base_offset)
-		*base_offset = GSI_GSI_INST_RAM_BASE_OFFS;
+		*base_offset = GSI_GSI_INST_RAM_n_OFFS(0);
 	if (size)
-		*size = GSI_GSI_INST_RAM_SIZE;
+		*size = GSI_GSI_INST_RAM_n_WORD_SZ *
+			(GSI_GSI_INST_RAM_n_MAXn + 1);
 }
 EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
 
@@ -2836,6 +2837,13 @@
 		return -ENOMEM;
 	}
 
+	gsi_ctx->ipc_logbuf = ipc_log_context_create(GSI_IPC_LOG_PAGES,
+		"gsi", 0);
+	if (gsi_ctx->ipc_logbuf == NULL) {
+		GSIERR("failed to get ipc_logbuf\n");
+		return -ENOMEM;
+	}
+
 	gsi_ctx->dev = dev;
 	init_completion(&gsi_ctx->gen_ee_cmd_compl);
 	gsi_debugfs_init();
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index d0eb162..f53a4bd 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -18,6 +18,7 @@
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
 #include <linux/msm_gsi.h>
+#include <linux/ipc_logging.h>
 
 #define GSI_CHAN_MAX      31
 #define GSI_EVT_RING_MAX  23
@@ -26,10 +27,48 @@
 #define gsi_readl(c)	({ u32 __v = readl_relaxed(c); __iormb(); __v; })
 #define gsi_writel(v, c)	({ __iowmb(); writel_relaxed((v), (c)); })
 
-#define GSIERR(fmt, args...) \
-		dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, ## args)
+#define GSI_IPC_LOGGING(buf, fmt, args...) \
+	do { \
+		if (buf) \
+			ipc_log_string((buf), fmt, __func__, __LINE__, \
+				## args); \
+	} while (0)
+
 #define GSIDBG(fmt, args...) \
-		dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, ## args)
+	do { \
+		dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
+		## args);\
+		if (gsi_ctx) { \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
+				"%s:%d " fmt, ## args); \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
+				"%s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define GSIDBG_LOW(fmt, args...) \
+	do { \
+		dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
+		## args);\
+		if (gsi_ctx) { \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
+				"%s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define GSIERR(fmt, args...) \
+	do { \
+		dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
+		## args);\
+		if (gsi_ctx) { \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
+				"%s:%d " fmt, ## args); \
+			GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
+				"%s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define GSI_IPC_LOG_PAGES 50
 
 enum gsi_evt_ring_state {
 	GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
@@ -163,6 +202,8 @@
 	u32 max_ch;
 	u32 max_ev;
 	struct completion gen_ee_cmd_compl;
+	void *ipc_logbuf;
+	void *ipc_logbuf_low;
 };
 
 enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 5eb9084..717c8917 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -747,6 +747,45 @@
 	return -EFAULT;
 }
 
+static ssize_t gsi_enable_ipc_low(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option) {
+		if (!gsi_ctx->ipc_logbuf_low) {
+			gsi_ctx->ipc_logbuf_low =
+				ipc_log_context_create(GSI_IPC_LOG_PAGES,
+					"gsi_low", 0);
+		}
+
+		if (gsi_ctx->ipc_logbuf_low == NULL) {
+			TERR("failed to get ipc_logbuf_low\n");
+			return -EFAULT;
+		}
+	} else {
+		if (gsi_ctx->ipc_logbuf_low)
+			ipc_log_context_destroy(gsi_ctx->ipc_logbuf_low);
+		gsi_ctx->ipc_logbuf_low = NULL;
+	}
+
+	return count;
+}
+
+
+
 const struct file_operations gsi_ev_dump_ops = {
 	.write = gsi_dump_evt,
 };
@@ -783,6 +822,10 @@
 	.write = gsi_print_dp_stats,
 };
 
+const struct file_operations gsi_ipc_low_ops = {
+	.write = gsi_enable_ipc_low,
+};
+
 void gsi_debugfs_init(void)
 {
 	static struct dentry *dfile;
@@ -858,6 +901,13 @@
 		goto fail;
 	}
 
+	dfile = debugfs_create_file("ipc_low", write_only_mode,
+		dent, 0, &gsi_ipc_low_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("could not create ipc_low\n");
+		goto fail;
+	}
+
 	return;
 fail:
 	debugfs_remove_recursive(dent);
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index d0462aa..653cdd4 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -688,8 +688,9 @@
 #define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_IRAM_PTR_BMSK 0xfff
 #define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_IRAM_PTR_SHFT 0x0
 
+#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4
 #define GSI_GSI_INST_RAM_n_OFFS(n) \
-	(GSI_GSI_REG_BASE_OFFS + 0x00004000 + 0x4 * (n))
+	(GSI_GSI_REG_BASE_OFFS + 0x00004000 + GSI_GSI_INST_RAM_n_WORD_SZ * (n))
 #define GSI_GSI_INST_RAM_n_RMSK 0xffffffff
 #define GSI_GSI_INST_RAM_n_MAXn 4095
 #define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
@@ -1842,7 +1843,5 @@
 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
 
-#define GSI_GSI_INST_RAM_BASE_OFFS	0x4000
-#define GSI_GSI_INST_RAM_SIZE		0x4000
 
 #endif /* __GSI_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 069f0a2..51c930a 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -74,6 +74,10 @@
 
 static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
 
+static int ipa_uc_ntn_cons_release(void);
+static int ipa_uc_ntn_cons_request(void);
+static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long);
+
 static int ipa_commit_partial_hdr(
 	struct ipa_ioc_add_hdr *hdr,
 	const char *netdev_name,
@@ -115,16 +119,37 @@
 	struct ipa_uc_offload_out_params *outp,
 	struct ipa_uc_offload_ctx *ntn_ctx)
 {
-	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_ioc_add_hdr *hdr = NULL;
 	struct ipa_tx_intf tx;
 	struct ipa_rx_intf rx;
 	struct ipa_ioc_tx_intf_prop tx_prop[2];
 	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	struct ipa_rm_create_params param;
 	u32 len;
 	int ret = 0;
 
 	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
 					 inp->netdev_name);
+	memset(&param, 0, sizeof(param));
+	param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	param.reg_params.user_data = ntn_ctx;
+	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+	param.floor_voltage = IPA_VOLTAGE_SVS;
+	ret = ipa_rm_create_resource(&param);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+		return -EFAULT;
+	}
+
+	memset(&param, 0, sizeof(param));
+	param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	param.request_resource = ipa_uc_ntn_cons_request;
+	param.release_resource = ipa_uc_ntn_cons_release;
+	ret = ipa_rm_create_resource(&param);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+		goto fail_create_rm_cons;
+	}
 
 	memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
 	ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
@@ -136,7 +161,8 @@
 	hdr = kzalloc(len, GFP_KERNEL);
 	if (hdr == NULL) {
 		IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto fail_alloc;
 	}
 
 	if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
@@ -197,8 +223,15 @@
 	init_completion(&ntn_ctx->ntn_completion);
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
 
+	kfree(hdr);
+	return ret;
+
 fail:
 	kfree(hdr);
+fail_alloc:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
 	return ret;
 }
 
@@ -301,9 +334,10 @@
 			struct ipa_ntn_conn_out_params *outp,
 			struct ipa_uc_offload_ctx *ntn_ctx)
 {
-	struct ipa_rm_create_params param;
 	int result = 0;
+	enum ipa_uc_offload_state prev_state;
 
+	prev_state = ntn_ctx->state;
 	if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
 		inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
 		IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
@@ -315,42 +349,13 @@
 		return -EINVAL;
 	}
 
-	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
-	param.reg_params.user_data = ntn_ctx;
-	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
-	param.floor_voltage = IPA_VOLTAGE_SVS;
-	result = ipa_rm_create_resource(&param);
+	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
 	if (result) {
-		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
-		return -EFAULT;
+		IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", result);
+		return result;
 	}
 
-	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
-	param.request_resource = ipa_uc_ntn_cons_request;
-	param.release_resource = ipa_uc_ntn_cons_release;
-	result = ipa_rm_create_resource(&param);
-	if (result) {
-		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
-		goto fail_create_rm_cons;
-	}
-
-	if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-		IPA_RM_RESOURCE_APPS_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
-		ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
-		IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
 	result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
 	if (result == -EINPROGRESS) {
 		if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
@@ -365,13 +370,22 @@
 		goto fail;
 	}
 
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+	result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+		ntn_ctx->priv, ntn_ctx->hdr_len, outp);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n",
+				result);
+		ntn_ctx->state = prev_state;
+		result = -EFAULT;
+		goto fail;
+	}
+
 	return 0;
 
 fail:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
-fail_create_rm_cons:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
-
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
 	return result;
 }
 
@@ -399,7 +413,8 @@
 		return -EINVAL;
 	}
 
-	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED &&
+		offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
 		IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
 		return -EPERM;
 	}
@@ -454,32 +469,34 @@
 static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 {
 	int ipa_ep_idx_ul, ipa_ep_idx_dl;
+	int ret = 0;
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
-	if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
-				IPA_RM_RESOURCE_APPS_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+
+	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to release ODU_ADAPT_PROD res: %d\n",
+						  ret);
 		return -EFAULT;
 	}
 
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
-		return -EFAULT;
-	}
-
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to del dep ODU->APPS, %d\n", ret);
 		return -EFAULT;
 	}
 
 	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
 	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
-	if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
-		IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+	ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl);
+	if (ret) {
+		IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
+						 ret);
 		return -EFAULT;
 	}
 
-	return 0;
+	return ret;
 }
 
 int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
@@ -524,6 +541,16 @@
 	int len, result = 0;
 	struct ipa_ioc_del_hdr *hdr;
 
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+		return -EFAULT;
+	}
+
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+		return -EFAULT;
+	}
+
 	len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
 	hdr = kzalloc(len, GFP_KERNEL);
 	if (hdr == NULL) {
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
index 54cad88..e10c75a 100644
--- a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
@@ -227,7 +227,7 @@
 	if (ipa_rm_dep_graph_get_resource(graph,
 					  resource_name,
 					  &dependent)) {
-		IPA_RM_ERR("%s does not exist\n",
+		IPA_RM_DBG("%s does not exist\n",
 					ipa_rm_resource_str(resource_name));
 		result = -EINVAL;
 		goto bail;
@@ -236,7 +236,7 @@
 	if (ipa_rm_dep_graph_get_resource(graph,
 					  depends_on_name,
 					  &dependency)) {
-		IPA_RM_ERR("%s does not exist\n",
+		IPA_RM_DBG("%s does not exist\n",
 					ipa_rm_resource_str(depends_on_name));
 		result = -EINVAL;
 		goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index ad5b799..e474a40 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1463,18 +1463,18 @@
 
 	mutex_lock(&ipa_ctx->lock);
 	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
-	tbl->sticky_rear = true;
 	rule.action = IPA_PASS_TO_EXCEPTION;
-	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
 			&ep->dflt_flt4_rule_hdl);
 	ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+	tbl->sticky_rear = true;
 
 	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
-	tbl->sticky_rear = true;
 	rule.action = IPA_PASS_TO_EXCEPTION;
-	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
 			&ep->dflt_flt6_rule_hdl);
 	ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+	tbl->sticky_rear = true;
 	mutex_unlock(&ipa_ctx->lock);
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index 00d52d0..6f59ebd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -299,12 +299,6 @@
 	/* setup ul ep cfg */
 	ep_ul->valid = 1;
 	ep_ul->client = in->ul.client;
-	result = ipa_enable_data_path(ipa_ep_idx_ul);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_ul);
-		return -EFAULT;
-	}
 	ep_ul->client_notify = notify;
 	ep_ul->priv = priv;
 
@@ -333,14 +327,6 @@
 	/* setup dl ep cfg */
 	ep_dl->valid = 1;
 	ep_dl->client = in->dl.client;
-	result = ipa_enable_data_path(ipa_ep_idx_dl);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_dl);
-		result = -EFAULT;
-		goto fail;
-	}
-
 	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
 	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
 	ep_dl->cfg.hdr.hdr_len = hdr_len;
@@ -359,9 +345,16 @@
 	}
 	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
 	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+
+	result = ipa_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("Enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
 	IPAERR("client %d (ep: %d) connected\n", in->dl.client,
 		ipa_ep_idx_dl);
-	ipa_inc_acquire_wakelock(IPA_WAKELOCK_REF_CLIENT_ODU_RX);
 
 fail:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -403,11 +396,31 @@
 	}
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	/* teardown the UL pipe */
 	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
 	cmd_data->protocol = IPA_HW_FEATURE_NTN;
-
 	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+
+	/* teardown the DL pipe */
+	ipa_disable_data_path(ipa_ep_idx_dl);
+	/*
+	 * Reset ep before sending cmd otherwise disconnect
+	 * during data transfer will result into
+	 * enormous suspend interrupts
+	 */
+	memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* teardown the UL pipe */
 	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
 	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
@@ -418,27 +431,11 @@
 		result = -EFAULT;
 		goto fail;
 	}
-	ipa_disable_data_path(ipa_ep_idx_ul);
+
 	ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
 	memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
 	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
 
-	/* teardown the DL pipe */
-	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
-	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
-				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
-				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-				false, 10*HZ);
-	if (result) {
-		IPAERR("fail to tear down ul pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa_disable_data_path(ipa_ep_idx_dl);
-	memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
-	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
-	ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_ODU_RX);
-
 fail:
 	dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 513d7bb..db732c5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1072,6 +1072,8 @@
 		IPAWANDBG
 		("SW filtering out none QMAP packet received from %s",
 		current->comm);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
 
@@ -1113,6 +1115,8 @@
 	if (ret) {
 		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
 		       dev->name, ret);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return -EFAULT;
 	}
 	/* IPA_RM checking end */
@@ -1128,7 +1132,6 @@
 
 	if (ret) {
 		ret = NETDEV_TX_BUSY;
-		dev->stats.tx_dropped++;
 		goto out;
 	}
 
@@ -2637,7 +2640,7 @@
  *
  * Return codes:
  * 0: Success
- * -EFAULT: Invalid interface name provided
+ * -EFAULT: Invalid src/dst pipes provided
  * other: See ipa_qmi_set_data_quota
  */
 int rmnet_ipa_set_tether_client_pipe(
@@ -2645,6 +2648,23 @@
 {
 	int number, i;
 
+	/* error checking if ul_src_pipe_len valid or not*/
+	if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
+		data->ul_src_pipe_len < 0) {
+		IPAWANERR("UL src pipes %d exceeding max %d\n",
+			data->ul_src_pipe_len,
+			QMI_IPA_MAX_PIPES_V01);
+		return -EFAULT;
+	}
+	/* error checking if dl_dst_pipe_len valid or not*/
+	if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
+		data->dl_dst_pipe_len < 0) {
+		IPAWANERR("DL dst pipes %d exceeding max %d\n",
+			data->dl_dst_pipe_len,
+			QMI_IPA_MAX_PIPES_V01);
+		return -EFAULT;
+	}
+
 	IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
 	data->ipa_client,
 	data->ul_src_pipe_len,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index ed14033..20b73d8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2934,14 +2934,17 @@
 	}
 
 	/* LAN OUT (AP->IPA) */
-	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
-	sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
-	sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
-	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
-	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_out)) {
-		IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
-		result = -EPERM;
-		goto fail_lan_data_out;
+	if (!ipa3_ctx->ipa_config_is_mhi) {
+		memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+		sys_in.client = IPA_CLIENT_APPS_LAN_PROD;
+		sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+		sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+		if (ipa3_setup_sys_pipe(&sys_in,
+			&ipa3_ctx->clnt_hdl_data_out)) {
+			IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
+			result = -EPERM;
+			goto fail_lan_data_out;
+		}
 	}
 
 	return 0;
@@ -2962,7 +2965,8 @@
 
 static void ipa3_teardown_apps_pipes(void)
 {
-	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+	if (!ipa3_ctx->ipa_config_is_mhi)
+		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
 	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
@@ -3122,6 +3126,12 @@
 
 static int ipa3_get_clks(struct device *dev)
 {
+	if (ipa3_res.use_bw_vote) {
+		IPADBG("Vote IPA clock by bw voting via bus scaling driver\n");
+		ipa3_clk = NULL;
+		return 0;
+	}
+
 	ipa3_clk = clk_get(dev, "core_clk");
 	if (IS_ERR(ipa3_clk)) {
 		if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
@@ -3136,17 +3146,15 @@
  */
 void _ipa_enable_clks_v3_0(void)
 {
-	IPADBG_LOW("enabling gcc_ipa_clk\n");
+	IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
 	if (ipa3_clk) {
+		IPADBG_LOW("enabling gcc_ipa_clk\n");
 		clk_prepare(ipa3_clk);
 		clk_enable(ipa3_clk);
-		IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
 		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
-		ipa3_uc_notify_clk_state(true);
-	} else {
-		WARN_ON(1);
 	}
 
+	ipa3_uc_notify_clk_state(true);
 	ipa3_suspend_apps_pipes(false);
 }
 
@@ -3184,12 +3192,11 @@
 {
 	IPADBG("enabling IPA clocks and bus voting\n");
 
-	ipa3_ctx->ctrl->ipa3_enable_clks();
+	if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+	    ipa3_get_bus_vote()))
+		WARN_ON(1);
 
-	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
-		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
-		    ipa3_get_bus_vote()))
-			WARN_ON(1);
+	ipa3_ctx->ctrl->ipa3_enable_clks();
 }
 
 
@@ -3198,13 +3205,12 @@
  */
 void _ipa_disable_clks_v3_0(void)
 {
-	IPADBG_LOW("disabling gcc_ipa_clk\n");
 	ipa3_suspend_apps_pipes(true);
 	ipa3_uc_notify_clk_state(false);
-	if (ipa3_clk)
+	if (ipa3_clk) {
+		IPADBG_LOW("disabling gcc_ipa_clk\n");
 		clk_disable_unprepare(ipa3_clk);
-	else
-		WARN_ON(1);
+	}
 }
 
 /**
@@ -3219,10 +3225,8 @@
 
 	ipa3_ctx->ctrl->ipa3_disable_clks();
 
-	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
-		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
-		    0))
-			WARN_ON(1);
+	if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl, 0))
+		WARN_ON(1);
 }
 
 /**
@@ -3524,11 +3528,11 @@
 	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
 	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
 	if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
-		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
-		if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
-			if (msm_bus_scale_client_update_request(
-			    ipa3_ctx->ipa_bus_hdl, ipa3_get_bus_vote()))
-				WARN_ON(1);
+		if (ipa3_clk)
+			clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+			ipa3_get_bus_vote()))
+			WARN_ON(1);
 	} else {
 		IPADBG_LOW("clocks are gated, not setting rate\n");
 	}
@@ -3889,6 +3893,10 @@
  * @pdev:	The platform device structure representing the IPA driver
  *
  * Function initialization process:
+ * - Initialize endpoints bitmaps
+ * - Initialize resource groups min and max values
+ * - Initialize filtering lists heads and idr
+ * - Initialize interrupts
  * - Register GSI
  * - Setup APPS pipes
  * - Initialize tethering bridge
@@ -3906,6 +3914,61 @@
 	int result;
 	struct gsi_per_props gsi_props;
 	struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
+	struct ipa3_flt_tbl *flt_tbl;
+	int i;
+
+	/*
+	 * indication whether working in MHI config or non MHI config is given
+	 * in ipa3_write which is launched before ipa3_post_init. i.e. from
+	 * this point it is safe to use ipa3_ep_mapping array and the correct
+	 * entry will be returned from ipa3_get_hw_type_index()
+	 */
+	ipa_init_ep_flt_bitmap();
+	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
+		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
+
+	/* Assign resource limitation to each group */
+	ipa3_set_resorce_groups_min_max_limits();
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+	}
+
+	if (!ipa3_ctx->apply_rg10_wa) {
+		result = ipa3_init_interrupts();
+		if (result) {
+			IPAERR("ipa initialization of interrupts failed\n");
+			result = -ENODEV;
+			goto fail_register_device;
+		}
+	} else {
+		IPADBG("Initialization of ipa interrupts skipped\n");
+	}
+
+	/*
+	 * IPAv3.5 and above requires to disable prefetch for USB in order
+	 * to allow MBIM to work, currently MBIM is not needed in MHI mode.
+	 */
+	if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) &&
+		(!ipa3_ctx->ipa_config_is_mhi))
+		ipa3_disable_prefetch(IPA_CLIENT_USB_CONS);
 
 	memset(&gsi_props, 0, sizeof(gsi_props));
 	gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
@@ -4008,6 +4071,9 @@
 	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
 	iounmap(ipa3_ctx->mmio);
 	ipa3_disable_clks();
+	if (ipa3_clk)
+		clk_put(ipa3_clk);
+	ipa3_clk = NULL;
 	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
 	if (ipa3_bus_scale_table) {
 		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
@@ -4100,10 +4166,19 @@
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 
-	if (ipa3_is_msm_device())
+	if (ipa3_is_msm_device()) {
 		result = ipa3_trigger_fw_loading_msms();
-	else
+	} else {
+		if (!strcasecmp(dbg_buff, "MHI")) {
+			ipa3_ctx->ipa_config_is_mhi = true;
+			pr_info(
+			"IPA is loading with MHI configuration\n");
+		} else {
+			pr_info(
+			"IPA is loading with non MHI configuration\n");
+		}
 		result = ipa3_trigger_fw_loading_mdms();
+	}
 	/* No IPAv3.x chipsets that don't support FW loading */
 
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -4173,35 +4248,34 @@
 * @pdev:	The platform device structure representing the IPA driver
 *
 * Function initialization process:
-* - Allocate memory for the driver context data struct
-* - Initializing the ipa3_ctx with:
+* Allocate memory for the driver context data struct
+* Initializing the ipa3_ctx with :
 *    1)parsed values from the dts file
 *    2)parameters passed to the module initialization
 *    3)read HW values(such as core memory size)
-* - Map IPA core registers to CPU memory
-* - Restart IPA core(HW reset)
-* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+* Map IPA core registers to CPU memory
+* Restart IPA core(HW reset)
+* Initialize the look-aside caches(kmem_cache/slab) for filter,
 *   routing and IPA-tree
-* - Create memory pool with 4 objects for DMA operations(each object
+* Create memory pool with 4 objects for DMA operations(each object
 *   is 512Bytes long), this object will be use for tx(A5->IPA)
-* - Initialize lists head(routing,filter,hdr,system pipes)
-* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
-* - Initialize spinlocks (for list related to A5<->IPA pipes)
-* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
-* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
-*   routing table ,filtering rule
-* - Initialize the filter block by committing IPV4 and IPV6 default rules
-* - Create empty routing table in system memory(no committing)
-* - Create a char-device for IPA
-* - Initialize IPA RM (resource manager)
-* - Configure GSI registers (in GSI case)
+* Initialize lists head(routing, hdr, system pipes)
+* Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* Initialize spinlocks (for list related to A5<->IPA pipes)
+* Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*  routing table ,filtering rule
+* Initialize the filter block by committing IPV4 and IPV6 default rules
+* Create empty routing table in system memory(no committing)
+* Create a char-device for IPA
+* Initialize IPA RM (resource manager)
+* Configure GSI registers (in GSI case)
 */
 static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		struct device *ipa_dev)
 {
 	int result = 0;
 	int i;
-	struct ipa3_flt_tbl *flt_tbl;
 	struct ipa3_rt_tbl_set *rset;
 	struct ipa_active_client_logging_info log_info;
 
@@ -4293,22 +4367,19 @@
 	}
 
 	if (ipa3_bus_scale_table) {
-		IPADBG("Use bus scaling info from device tree\n");
+		IPADBG("Use bus scaling info from device tree #usecases=%d\n",
+			ipa3_bus_scale_table->num_usecases);
 		ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
 	}
 
-	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
-		/* get BUS handle */
-		ipa3_ctx->ipa_bus_hdl =
-			msm_bus_scale_register_client(
-				ipa3_ctx->ctrl->msm_bus_data_ptr);
-		if (!ipa3_ctx->ipa_bus_hdl) {
-			IPAERR("fail to register with bus mgr!\n");
-			result = -ENODEV;
-			goto fail_bus_reg;
-		}
-	} else {
-		IPADBG("Skipping bus scaling registration on Virtual plat\n");
+	/* get BUS handle */
+	ipa3_ctx->ipa_bus_hdl =
+		msm_bus_scale_register_client(
+			ipa3_ctx->ctrl->msm_bus_data_ptr);
+	if (!ipa3_ctx->ipa_bus_hdl) {
+		IPAERR("fail to register with bus mgr!\n");
+		result = -ENODEV;
+		goto fail_bus_reg;
 	}
 
 	/* get IPA clocks */
@@ -4362,10 +4433,6 @@
 		goto fail_init_hw;
 	}
 
-	ipa_init_ep_flt_bitmap();
-	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
-		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
-
 	ipa3_ctx->ctrl->ipa_sram_read_settings();
 	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
 		ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
@@ -4398,9 +4465,6 @@
 	ipa3_active_clients_log_inc(&log_info, false);
 	ipa3_ctx->ipa3_active_clients.cnt = 1;
 
-	/* Assign resource limitation to each group */
-	ipa3_set_resorce_groups_min_max_limits();
-
 	/* Create workqueues for power management */
 	ipa3_ctx->power_mgmt_wq =
 		create_singlethread_workqueue("ipa_power_mgmt");
@@ -4503,26 +4567,6 @@
 	}
 	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
 	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
-	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
-		if (!ipa_is_ep_support_flt(i))
-			continue;
-
-		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
-		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
-		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
-			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
-		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
-			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
-		idr_init(&flt_tbl->rule_ids);
-
-		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
-		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
-		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
-			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
-		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
-			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
-		idr_init(&flt_tbl->rule_ids);
-	}
 
 	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
 	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
@@ -4604,17 +4648,6 @@
 		goto fail_create_apps_resource;
 	}
 
-	if (!ipa3_ctx->apply_rg10_wa) {
-		result = ipa3_init_interrupts();
-		if (result) {
-			IPAERR("ipa initialization of interrupts failed\n");
-			result = -ENODEV;
-			goto fail_ipa_init_interrupts;
-		}
-	} else {
-		IPADBG("Initialization of ipa interrupts skipped\n");
-	}
-
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
 		ipa3_enable_dcd();
 
@@ -4653,7 +4686,6 @@
 fail_device_create:
 	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
 fail_alloc_chrdev_region:
-	ipa3_destroy_flt_tbl_idrs();
 	idr_destroy(&ipa3_ctx->ipa_idr);
 	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
 fail_rx_pkt_wrapper_cache:
@@ -4684,9 +4716,16 @@
 	ipa3_disable_clks();
 	ipa3_active_clients_log_destroy();
 fail_init_active_client:
+	if (ipa3_clk)
+		clk_put(ipa3_clk);
+	ipa3_clk = NULL;
 fail_clk:
 	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
 fail_bus_reg:
+	if (ipa3_bus_scale_table) {
+		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
+		ipa3_bus_scale_table = NULL;
+	}
 fail_init_mem_partition:
 fail_bind:
 	kfree(ipa3_ctx->ctrl);
@@ -4717,6 +4756,7 @@
 	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
 	ipa_drv_res->ipa_wdi2 = false;
 	ipa_drv_res->use_64_bit_dma_mask = false;
+	ipa_drv_res->use_bw_vote = false;
 	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
 	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
 	ipa_drv_res->apply_rg10_wa = false;
@@ -4796,6 +4836,13 @@
 			ipa_drv_res->use_64_bit_dma_mask
 			? "True" : "False");
 
+	ipa_drv_res->use_bw_vote =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,bandwidth-vote-for-ipa");
+	IPADBG(": use_bw_vote = %s\n",
+			ipa_drv_res->use_bw_vote
+			? "True" : "False");
+
 	ipa_drv_res->skip_uc_pipe_reset =
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,skip-uc-pipe-reset");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 9e72f67..244c80c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1054,7 +1054,7 @@
  * @ctrl: holds the core specific operations based on
  *  core version (vtable like)
  * @enable_clock_scaling: clock scaling is enabled ?
- * @curr_ipa_clk_rate: ipa3_clk current rate
+ * @curr_ipa_clk_rate: IPA current clock rate
  * @wcstats: wlan common buffer stats
  * @uc_ctx: uC interface context
  * @uc_wdi_ctx: WDI specific fields for uC interface
@@ -1141,6 +1141,7 @@
 	wait_queue_head_t msg_waitq;
 	enum ipa_hw_type ipa_hw_type;
 	enum ipa3_hw_mode ipa3_hw_mode;
+	bool ipa_config_is_mhi;
 	bool use_ipa_teth_bridge;
 	bool modem_cfg_emb_pipe_flt;
 	bool ipa_wdi2;
@@ -1209,6 +1210,7 @@
 	bool modem_cfg_emb_pipe_flt;
 	bool ipa_wdi2;
 	bool use_64_bit_dma_mask;
+	bool use_bw_vote;
 	u32 wan_rx_ring_size;
 	u32 lan_rx_ring_size;
 	bool skip_uc_pipe_reset;
@@ -1960,4 +1962,5 @@
 bool ipa3_is_msm_device(void);
 struct device *ipa3_get_pdev(void);
 void ipa3_enable_dcd(void);
+void ipa3_disable_prefetch(enum ipa_client_type client);
 #endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index 7b89184..30243da 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -265,12 +265,6 @@
 	/* setup ul ep cfg */
 	ep_ul->valid = 1;
 	ep_ul->client = in->ul.client;
-	result = ipa3_enable_data_path(ipa_ep_idx_ul);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_ul);
-		return -EFAULT;
-	}
 	ep_ul->client_notify = notify;
 	ep_ul->priv = priv;
 
@@ -299,14 +293,6 @@
 	/* setup dl ep cfg */
 	ep_dl->valid = 1;
 	ep_dl->client = in->dl.client;
-	result = ipa3_enable_data_path(ipa_ep_idx_dl);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_dl);
-		result = -EFAULT;
-		goto fail;
-	}
-
 	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
 	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
 	ep_dl->cfg.hdr.hdr_len = hdr_len;
@@ -325,6 +311,14 @@
 	}
 	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
 	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+
+	result = ipa3_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("Enable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
 	IPADBG("client %d (ep: %d) connected\n", in->dl.client,
 		ipa_ep_idx_dl);
 
@@ -368,11 +362,31 @@
 	}
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	/* teardown the UL pipe */
 	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
 	cmd_data->protocol = IPA_HW_FEATURE_NTN;
-
 	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+
+	/* teardown the DL pipe */
+	ipa3_disable_data_path(ipa_ep_idx_dl);
+	/*
+	 * Reset ep before sending cmd otherwise disconnect
+	 * during data transfer will result into
+	 * enormous suspend interrupts
+	 */
+	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	/* teardown the UL pipe */
 	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
 				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
@@ -383,25 +397,9 @@
 		result = -EFAULT;
 		goto fail;
 	}
-	ipa3_disable_data_path(ipa_ep_idx_ul);
 	ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
-	memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
-	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
-
-	/* teardown the DL pipe */
-	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
-	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
-				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
-				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-				false, 10*HZ);
-	if (result) {
-		IPAERR("fail to tear down ul pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa3_disable_data_path(ipa_ep_idx_dl);
 	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
-	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
 
 fail:
 	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 836e3e8..0519563 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -112,11 +112,12 @@
 #define IPA_v3_0_GROUP_Q6ZIP_ENGINE	IPA_v3_0_GROUP_UC_RX_Q
 #define IPA_v3_0_GROUP_MAX		(6)
 
-#define IPA_v3_5_GROUP_LWA_DL		(0)
-#define IPA_v3_5_GROUP_PCIE		(0)
+#define IPA_v3_5_GROUP_LWA_DL		(0) /* currently not used */
+#define IPA_v3_5_MHI_GROUP_PCIE	IPA_v3_5_GROUP_LWA_DL
 #define IPA_v3_5_GROUP_UL_DL		(1)
-#define IPA_v3_5_GROUP_DMA		(2)
-#define IPA_v3_5_GROUP_UC_RX_Q		(3)
+#define IPA_v3_5_MHI_GROUP_DDR		IPA_v3_5_GROUP_UL_DL
+#define IPA_v3_5_MHI_GROUP_DMA		(2)
+#define IPA_v3_5_GROUP_UC_RX_Q		(3) /* currently not used */
 #define IPA_v3_5_SRC_GROUP_MAX		(4)
 #define IPA_v3_5_DST_GROUP_MAX		(3)
 
@@ -167,6 +168,7 @@
 enum ipa_ver {
 	IPA_3_0,
 	IPA_3_5,
+	IPA_3_5_MHI,
 	IPA_3_5_1,
 	IPA_VER_MAX,
 };
@@ -195,6 +197,19 @@
 	[IPA_3_5] = {
 		/* LWA_DL  UL_DL    not used  UC_RX_Q, other are invalid */
 		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{0, 0}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{0, 0}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{0, 0}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{0, 0}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE  DDR     DMA  not used, other are invalid */
+		[IPA_v3_5_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
 		{4, 4}, {5, 5}, {1, 1}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v3_5_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
 		{10, 10}, {10, 10}, {8, 8}, {0, 0}, {0, 0}, {0, 0} },
@@ -223,7 +238,7 @@
 static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
 	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
 	[IPA_3_0] = {
-		/*UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng*/
+		/* UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng */
 		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
 		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
 		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
@@ -232,14 +247,21 @@
 		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
 	},
 	[IPA_3_5] = {
-		/*LWA_DL UL/DL/DPL not used, other are invalid */
+		/* unused UL/DL/DPL unused N/A    N/A     N/A */
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	},
+	[IPA_3_5_MHI] = {
+		/* PCIE  DDR     DMA     N/A     N/A     N/A */
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
 		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
 		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
 	},
 	[IPA_3_5_1] = {
-		/*LWA_DL UL/DL/DPL not used, other are invalid */
+		/* LWA_DL UL/DL/DPL unused N/A   N/A     N/A */
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
 		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
 		[IPA_v3_5_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
@@ -250,17 +272,22 @@
 static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
 	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
 	[IPA_3_0] = {
-		/*UL	DL	DIAG	DMA	Unused	uC Rx*/
+		/* UL	DL	DIAG	DMA	Unused	uC Rx */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
 	},
 	[IPA_3_5] = {
-		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		/* unused UL_DL	unused UC_RX_Q   N/A     N/A */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
-		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+		{0, 0}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
 	},
+	[IPA_3_5_MHI] = {
+		/* PCIE   DDR	     DMA       unused   N/A        N/A */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{ 3, 3 }, { 7, 7 }, { 2, 2 }, { 0, 0 }, { 0, 0 }, { 0, 0 } },
+},
 	[IPA_3_5_1] = {
-		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		/* LWA_DL UL_DL	unused   UC_RX_Q N/A     N/A */
 		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
 		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
 	},
@@ -519,15 +546,11 @@
 
 	/* IPA_3_5 */
 	[IPA_3_5][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
-	/*
-	 * for WLAN1_PROD this configuration is temporal and needs to be updated
-	 * according to documentation.
-	 */
 	[IPA_3_5][IPA_CLIENT_WLAN1_PROD]          = {
 			6, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 6, 1, 12, 30, IPA_EE_UC } },
+			{ 6, 1, 8, 16, IPA_EE_UC } },
 	[IPA_3_5][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
@@ -563,22 +586,14 @@
 			1, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_MHI_PROD]            = {
-			1, IPA_v3_5_GROUP_PCIE, true,
-			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_PCIE,
-			{ 1, 0, 8, 16, IPA_EE_AP } },
+			{ 1, 0, 8, 16, IPA_EE_UC } },
+	[IPA_3_5][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LAN_PROD]         = {
-			3, IPA_v3_5_GROUP_UL_DL, false,
+			3, IPA_v3_5_GROUP_UL_DL, true,
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 3, 0, 16, 32, IPA_EE_Q6 } },
-	[IPA_3_5][IPA_CLIENT_Q6_WAN_PROD]         = {
-			6, IPA_v3_5_GROUP_UL_DL, true,
-			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
-			QMB_MASTER_SELECT_DDR,
-			{ 6, 4, 10, 30, IPA_EE_Q6 } },
+	[IPA_3_5][IPA_CLIENT_Q6_WAN_PROD]         = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_CMD_PROD]	  = {
 			4, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
@@ -586,16 +601,8 @@
 			{ 4, 1, 20, 23, IPA_EE_Q6 } },
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
-			7, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
-			QMB_MASTER_SELECT_PCIE,
-			{ 7, 8, 8, 16, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
-			8, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
-			QMB_MASTER_SELECT_PCIE,
-			{ 8, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5][IPA_CLIENT_TEST_PROD]           = {
 			0, IPA_v3_5_GROUP_UL_DL, true,
@@ -650,7 +657,7 @@
 	[IPA_3_5][IPA_CLIENT_USB_CONS]            = {
 			17, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_USB_DPL_CONS]        = {
 			14, IPA_v3_5_GROUP_UL_DL, false,
@@ -676,11 +683,7 @@
 			QMB_MASTER_SELECT_DDR,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MHI_CONS]            = {
-			15, IPA_v3_5_GROUP_PCIE, false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LAN_CONS]         = {
 			13, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
@@ -694,22 +697,15 @@
 	[IPA_3_5][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP_CONS]	  = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_CONS]	  = IPA_CLIENT_NOT_USED,
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
-			18, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 18, 12, 8, 8, IPA_EE_AP } },
-	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
-			19, IPA_v3_5_GROUP_DMA, false,
-			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_PCIE,
-			{ 19, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
+	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_3_5][IPA_CLIENT_TEST_CONS]           = {
 			15, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 15, 1, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST1_CONS]           = {
 			15, IPA_v3_5_GROUP_UL_DL, false,
@@ -719,7 +715,7 @@
 	[IPA_3_5][IPA_CLIENT_TEST2_CONS]          = {
 			17, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 17, 11, 8, 8, IPA_EE_AP } },
 	[IPA_3_5][IPA_CLIENT_TEST3_CONS]          = {
 			18, IPA_v3_5_GROUP_UL_DL, false,
@@ -729,9 +725,195 @@
 	[IPA_3_5][IPA_CLIENT_TEST4_CONS]          = {
 			19, IPA_v3_5_GROUP_UL_DL, false,
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
-			QMB_MASTER_SELECT_DDR,
+			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
 
+	/* IPA_3_5_MHI */
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB_PROD]            = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_PROD]   = {
+			2, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 2, 3, 16, 32, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_CMD_PROD]	  = {
+			5, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 5, 4, 20, 23, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MHI_PROD]            = {
+			1, IPA_v3_5_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_PROD]         = {
+			3, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 3, 0, 16, 32, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_PROD]         = {
+			6, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 6, 4, 10, 30, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_CMD_PROD]	  = {
+			4, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 4, 1, 20, 23, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = {
+			7, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = {
+			8, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+	/* Only for test purpose */
+	[IPA_3_5_MHI][IPA_CLIENT_TEST_PROD]           = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v3_5_MHI_GROUP_DDR, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{0, 7, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST2_PROD]          = {
+			1, IPA_v3_5_MHI_GROUP_PCIE, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE,
+			{ 1, 0, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST3_PROD]          = {
+			7, IPA_v3_5_MHI_GROUP_DMA, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{7, 8, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST4_PROD]          = {
+			8, IPA_v3_5_MHI_GROUP_DMA, true,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR,
+			{ 8, 9, 8, 16, IPA_EE_AP } },
+
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN1_CONS]          = {
+			16, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 16, 3, 8, 8, IPA_EE_UC } },
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_USB_CONS]            = {
+			17, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_USB_DPL_CONS]        = {
+			14, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 14, 10, 4, 6, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_LAN_CONS]       = {
+			9, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 9, 5, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_APPS_WAN_CONS]       = {
+			10, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 6, 8, 12, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MHI_CONS]            = {
+			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LAN_CONS]         = {
+			13, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 13, 3, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_WAN_CONS]         = {
+			12, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 12, 2, 8, 12, IPA_EE_Q6 } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DUN_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_DECOMP2_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = {
+			18, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = {
+			19, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]	= IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_5_MHI][IPA_CLIENT_TEST_CONS]           = {
+			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST1_CONS]           = {
+			15, IPA_v3_5_MHI_GROUP_PCIE, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 15, 1, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST2_CONS]          = {
+			17, IPA_v3_5_MHI_GROUP_DDR, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 17, 11, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST3_CONS]          = {
+			18, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 18, 12, 8, 8, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_TEST4_CONS]          = {
+			19, IPA_v3_5_MHI_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE,
+			{ 19, 13, 8, 8, IPA_EE_AP } },
 
 	/* IPA_3_5_1 */
 	[IPA_3_5_1][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
@@ -1383,6 +1565,11 @@
 	int qsb_max_writes[2] = { 8, 2 };
 	int qsb_max_reads[2] = { 8, 8 };
 
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) {
+		qsb_max_writes[1] = 4;
+		qsb_max_reads[1] = 12;
+	}
+
 	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
 	ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
 }
@@ -1441,6 +1628,12 @@
 		break;
 	case IPA_HW_v3_5:
 		hw_type_index = IPA_3_5;
+		/*
+		 *this flag is initialized only after fw load trigger from
+		 * user space (ipa3_write)
+		 */
+		if (ipa3_ctx->ipa_config_is_mhi)
+			hw_type_index = IPA_3_5_MHI;
 		break;
 	case IPA_HW_v3_5_1:
 		hw_type_index = IPA_3_5_1;
@@ -3729,6 +3922,7 @@
 		}
 		break;
 	case IPA_3_5:
+	case IPA_3_5_MHI:
 	case IPA_3_5_1:
 		if (src) {
 			switch (group_index) {
@@ -3738,7 +3932,7 @@
 					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
 				break;
-			case IPA_v3_5_GROUP_DMA:
+			case IPA_v3_5_MHI_GROUP_DMA:
 			case IPA_v3_5_GROUP_UC_RX_Q:
 				ipahal_write_reg_n_fields(
 					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
@@ -3758,7 +3952,7 @@
 					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
 					n, val);
 				break;
-			case IPA_v3_5_GROUP_DMA:
+			case IPA_v3_5_MHI_GROUP_DMA:
 				ipahal_write_reg_n_fields(
 					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
 					n, val);
@@ -3838,6 +4032,7 @@
 		dst_grp_idx_max = IPA_v3_0_GROUP_MAX;
 		break;
 	case IPA_3_5:
+	case IPA_3_5_MHI:
 	case IPA_3_5_1:
 		src_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_SRC_MAX;
 		dst_rsrc_type_max = IPA_v3_5_RSRC_GRP_TYPE_DST_MAX;
@@ -4047,11 +4242,6 @@
 
 	memset(&mem, 0, sizeof(mem));
 
-	if (IPA_CLIENT_IS_PROD(ep->client)) {
-		res = gsi_stop_channel(ep->gsi_chan_hdl);
-		goto end_sequence;
-	}
-
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
 		IPADBG("Calling gsi_stop_channel\n");
 		res = gsi_stop_channel(ep->gsi_chan_hdl);
@@ -4059,12 +4249,14 @@
 		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
 			goto end_sequence;
 
-		IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n");
-		/* Send a 1B packet DMA_TASK to IPA and try again */
-		res = ipa3_inject_dma_task_for_gsi();
-		if (res) {
-			IPAERR("Failed to inject DMA TASk for GSI\n");
-			goto end_sequence;
+		if (IPA_CLIENT_IS_CONS(ep->client)) {
+			IPADBG("Inject a DMA_TASK with 1B packet to IPA\n");
+			/* Send a 1B packet DMA_TASK to IPA and try again */
+			res = ipa3_inject_dma_task_for_gsi();
+			if (res) {
+				IPAERR("Failed to inject DMA TASk for GSI\n");
+				goto end_sequence;
+			}
 		}
 
 		/* sleep for short period to flush IPA */
@@ -4266,6 +4458,33 @@
 }
 
 /**
+* ipa3_disable_prefetch() - disable\enable tx prefetch
+*
+* @client: the client which is related to the TX where prefetch will be
+*          disabled
+*
+* Return value: Non applicable
+*
+*/
+void ipa3_disable_prefetch(enum ipa_client_type client)
+{
+	struct ipahal_reg_tx_cfg cfg;
+	u8 qmb;
+
+	qmb = ipa3_get_qmb_master_sel(client);
+
+	IPADBG("disabling prefetch for qmb %d\n", (int)qmb);
+
+	ipahal_read_reg_fields(IPA_TX_CFG, &cfg);
+	/* QMB0 (DDR) correlates with TX0, QMB1(PCIE) correlates with TX1 */
+	if (qmb == QMB_MASTER_SELECT_DDR)
+		cfg.tx0_prefetch_disable = true;
+	else
+		cfg.tx1_prefetch_disable = true;
+	ipahal_write_reg_fields(IPA_TX_CFG, &cfg);
+}
+
+/**
  * ipa3_get_pdev() - return a pointer to IPA dev struct
  *
  * Return value: a pointer to IPA dev struct
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 2a780b6..3c8688e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -901,6 +901,26 @@
 			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
 }
 
+static void ipareg_parse_tx_cfg(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	tx_cfg->tx0_prefetch_disable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+	tx_cfg->tx1_prefetch_disable = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+	tx_cfg->prefetch_almost_empty_size = IPA_GETFIELD_FROM_REG(val,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
 static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
 	const void *fields, u32 *val)
 {
@@ -1174,7 +1194,7 @@
 
 	/* IPAv3.5 */
 	[IPA_HW_v3_5][IPA_TX_CFG] = {
-		ipareg_construct_tx_cfg, ipareg_parse_dummy,
+		ipareg_construct_tx_cfg, ipareg_parse_tx_cfg,
 		0x000001FC, 0},
 	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
 		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 3e1a8fb..d747771 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1067,6 +1067,8 @@
 		IPAWANDBG_LOW
 		("SW filtering out none QMAP packet received from %s",
 		current->comm);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
 	}
 
@@ -1078,7 +1080,8 @@
 			pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
 			goto send;
 		} else {
-			pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+			pr_err("[%s]fatal: ipa3_wwan_xmit stopped\n",
+				  dev->name);
 			return NETDEV_TX_BUSY;
 		}
 	}
@@ -1108,6 +1111,8 @@
 	if (ret) {
 		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
 		       dev->name, ret);
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
 		return -EFAULT;
 	}
 	/* IPA_RM checking end */
@@ -1124,7 +1129,6 @@
 
 	if (ret) {
 		ret = NETDEV_TX_BUSY;
-		dev->stats.tx_dropped++;
 		goto out;
 	}
 
@@ -2681,6 +2685,23 @@
 {
 	int number, i;
 
+	/* error checking if ul_src_pipe_len valid or not*/
+	if (data->ul_src_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
+		data->ul_src_pipe_len < 0) {
+		IPAWANERR("UL src pipes %d exceeding max %d\n",
+			data->ul_src_pipe_len,
+			QMI_IPA_MAX_PIPES_V01);
+		return -EFAULT;
+	}
+	/* error checking if dl_dst_pipe_len valid or not*/
+	if (data->dl_dst_pipe_len > QMI_IPA_MAX_PIPES_V01 ||
+		data->dl_dst_pipe_len < 0) {
+		IPAWANERR("DL dst pipes %d exceeding max %d\n",
+			data->dl_dst_pipe_len,
+			QMI_IPA_MAX_PIPES_V01);
+		return -EFAULT;
+	}
+
 	IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
 	data->ipa_client,
 	data->ul_src_pipe_len,
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 3330595..47da1b3 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -55,6 +55,8 @@
 #define VDDIO_MAX_UV	2040000
 #define VDDIO_MAX_UA	70300
 
+#define PCIE20_CAP_LINKCTRLSTATUS 0x80
+
 #define WIGIG_MIN_CPU_BOOST_KBPS	150000
 
 struct device;
@@ -87,6 +89,7 @@
 	u32 rc_index; /* PCIE root complex index */
 	struct pci_dev *pcidev;
 	struct pci_saved_state *pristine_state;
+	bool l1_enabled_in_enum;
 
 	/* SMMU */
 	bool use_smmu; /* have SMMU enabled? */
@@ -476,6 +479,47 @@
 	msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
 }
 
+int msm_11ad_ctrl_aspm_l1(struct msm11ad_ctx *ctx, bool enable)
+{
+	int rc;
+	u32 val;
+	struct pci_dev *pdev = ctx->pcidev;
+	bool l1_enabled;
+
+	/* Read current state */
+	rc = pci_read_config_dword(pdev,
+				   PCIE20_CAP_LINKCTRLSTATUS, &val);
+	if (rc) {
+		dev_err(ctx->dev,
+			"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n", rc);
+		return rc;
+	}
+	dev_dbg(ctx->dev, "PCIE20_CAP_LINKCTRLSTATUS read returns 0x%x\n", val);
+
+	l1_enabled = val & PCI_EXP_LNKCTL_ASPM_L1;
+	if (l1_enabled == enable) {
+		dev_dbg(ctx->dev, "ASPM_L1 is already %s\n",
+			l1_enabled ? "enabled" : "disabled");
+		return 0;
+	}
+
+	if (enable)
+		val |= PCI_EXP_LNKCTL_ASPM_L1; /* enable bit 1 */
+	else
+		val &= ~PCI_EXP_LNKCTL_ASPM_L1; /* disable bit 1 */
+
+	dev_dbg(ctx->dev, "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x)\n",
+		val);
+	rc = pci_write_config_dword(pdev,
+				    PCIE20_CAP_LINKCTRLSTATUS, val);
+	if (rc)
+		dev_err(ctx->dev,
+			"writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x) failed:%d\n",
+			val, rc);
+
+	return rc;
+}
+
 static int ops_suspend(void *handle)
 {
 	int rc;
@@ -561,6 +605,16 @@
 		goto err_suspend_rc;
 	}
 
+	/* Disable L1, in case it is enabled */
+	if (ctx->l1_enabled_in_enum) {
+		rc = msm_11ad_ctrl_aspm_l1(ctx, false);
+		if (rc) {
+			dev_err(ctx->dev,
+				"failed to disable L1, rc %d\n", rc);
+			goto err_suspend_rc;
+		}
+	}
+
 	return 0;
 
 err_suspend_rc:
@@ -847,6 +901,7 @@
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
 	int rc;
+	u32 val;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -965,6 +1020,31 @@
 		goto out_rc;
 	}
 	ctx->pcidev = pcidev;
+
+	/* Read current state */
+	rc = pci_read_config_dword(pcidev,
+				   PCIE20_CAP_LINKCTRLSTATUS, &val);
+	if (rc) {
+		dev_err(ctx->dev,
+			"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
+			rc);
+		goto out_rc;
+	}
+
+	ctx->l1_enabled_in_enum = val & PCI_EXP_LNKCTL_ASPM_L1;
+	dev_dbg(ctx->dev, "L1 is %s in enumeration\n",
+		ctx->l1_enabled_in_enum ? "enabled" : "disabled");
+
+	/* Disable L1, in case it is enabled */
+	if (ctx->l1_enabled_in_enum) {
+		rc = msm_11ad_ctrl_aspm_l1(ctx, false);
+		if (rc) {
+			dev_err(ctx->dev,
+				"failed to disable L1, rc %d\n", rc);
+			goto out_rc;
+		}
+	}
+
 	rc = pci_save_state(pcidev);
 	if (rc) {
 		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
@@ -1212,6 +1292,13 @@
 		 * TODO: Enable rf_clk3 clock before resetting the device to
 		 * ensure stable ref clock during the device reset
 		 */
+		/* Re-enable L1 in case it was enabled in enumeration */
+		if (ctx->l1_enabled_in_enum) {
+			rc = msm_11ad_ctrl_aspm_l1(ctx, true);
+			if (rc)
+				dev_err(ctx->dev,
+					"failed to enable L1, rc %d\n", rc);
+		}
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 566b884..07a0aef 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -280,6 +280,18 @@
 	((vband) == 0 ? CPR4_REG_MARGIN_TEMP_CORE(core) \
 			: 0x3AB0 + 0x40 * ((vband) - 1) + 0x4 * (core))
 
+#define CPRH_REG_MISC_REG2	0x3AAC
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_MASK	GENMASK(31, 29)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_SHIFT	29
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_MASK	GENMASK(28, 24)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_SHIFT	24
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_MASK	GENMASK(23, 22)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT	22
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK	GENMASK(21, 20)
+#define CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT	20
+#define CPRH_MISC_REG2_ACD_AVG_EN_MASK	BIT(12)
+#define CPRH_MISC_REG2_ACD_AVG_ENABLE	BIT(12)
+
 /* SAW module registers */
 #define SAW_REG_AVS_CTL				0x904
 #define SAW_REG_AVS_LIMIT			0x908
@@ -1399,6 +1411,33 @@
 	}
 
 	/*
+	 * Configure CPRh ACD AVG registers on controllers
+	 * that support this feature.
+	 */
+	if (ctrl->cpr_hw_version >= CPRH_CPR_VERSION_4P5
+	    && ctrl->acd_avg_enabled) {
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_MASK,
+				  ctrl->acd_adj_up_step_limit <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_UP_LIMIT_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_MASK,
+				  ctrl->acd_adj_down_step_limit <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_DOWN_LIMIT_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_MASK,
+				  ctrl->acd_adj_up_step_size <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_UP_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_MASK,
+				  ctrl->acd_adj_down_step_size <<
+				  CPRH_MISC_REG2_ACD_ADJ_STEP_SIZE_DOWN_SHIFT);
+		cpr3_masked_write(ctrl, CPRH_REG_MISC_REG2,
+				  CPRH_MISC_REG2_ACD_AVG_EN_MASK,
+				  CPRH_MISC_REG2_ACD_AVG_ENABLE);
+	}
+
+	/*
 	 * Program base voltage and voltage multiplier values which
 	 * are used for floor and initial voltage calculations by the
 	 * CPRh controller.
@@ -3188,7 +3227,8 @@
 	struct cpr4_sdelta *sdelta;
 	bool valid = false;
 	bool thread_valid;
-	int i, j, rc, new_volt, vdd_volt, dynamic_floor_volt, last_corner_volt;
+	int i, j, rc, new_volt, vdd_volt, dynamic_floor_volt;
+	int last_corner_volt = 0;
 	u32 reg_last_measurement = 0, sdelta_size;
 	int *sdelta_table, *boost_table;
 
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index 31d737ca..570ddfc 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -649,6 +649,20 @@
  *			defines the maximum number of VDD supply regulator steps
  *			that the voltage may be increased as the result of a
  *			single CPR measurement.
+ * @acd_adj_down_step_limit: Limits the number of PMIC steps to go down within
+ *			a given corner due to all ACD adjustments on some CPRh
+ *			controllers.
+ * @acd_adj_up_step_limit: Limits the number of PMIC steps to go up within a
+ *			given corner due to all ACD adjustments on some CPRh
+ *			controllers.
+ * @acd_adj_down_step_size: ACD step size in units of PMIC steps used for
+ *			target quotient adjustment due to an ACD down
+ *			recommendation.
+ * @acd_adj_up_step_size: ACD step size in units of PMIC steps used for
+ *			target quotient adjustment due to an ACD up
+ *			recommendation.
+ * @acd_avg_enabled:	Boolean defining the enable state of the ACD AVG
+ *			feature.
  * @count_mode:		CPR controller count mode
  * @count_repeat:	Number of times to perform consecutive sensor
  *			measurements when using all-at-once count modes.
@@ -804,6 +818,11 @@
 	int			step_volt;
 	u32			down_error_step_limit;
 	u32			up_error_step_limit;
+	u32			acd_adj_down_step_limit;
+	u32			acd_adj_up_step_limit;
+	u32			acd_adj_down_step_size;
+	u32			acd_adj_up_step_size;
+	bool			acd_avg_enabled;
 	enum cpr3_count_mode	count_mode;
 	u32			count_repeat;
 	u32			proc_clock_throttle;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 892f704..a93e7d8 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -544,25 +544,25 @@
 		[CPRH_KBSS_POWER_CLUSTER_THREAD_ID] = {
 			688000,
 			812000,
-			884000,
+			896000,
 		},
 		[CPRH_KBSS_L3_THREAD_ID] = {
 			688000,
 			812000,
-			884000,
+			896000,
 		},
 	},
 	[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
 		[CPRH_KBSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
 			 756000,
 			 828000,
-			1056000,
+			1000000,
 		},
 	},
 };
 
 #define CPRH_KBSS_FUSE_STEP_VOLT		10000
-#define CPRH_SDM845_KBSS_FUSE_STEP_VOLT		11000
+#define CPRH_SDM845_KBSS_FUSE_STEP_VOLT		8000
 #define CPRH_KBSS_VOLTAGE_FUSE_SIZE		6
 #define CPRH_KBSS_QUOT_OFFSET_SCALE		5
 #define CPRH_KBSS_AGING_INIT_QUOT_DIFF_SIZE	8
@@ -2068,7 +2068,7 @@
 static int cprh_kbss_init_aging(struct cpr3_controller *ctrl)
 {
 	struct cprh_kbss_fuses *fuse = NULL;
-	struct cpr3_regulator *vreg;
+	struct cpr3_regulator *vreg = NULL;
 	u32 aging_ro_scale;
 	int i, j, rc = 0;
 
@@ -2221,6 +2221,46 @@
 		return rc;
 	}
 
+	ctrl->acd_avg_enabled = of_property_read_bool(ctrl->dev->of_node,
+					      "qcom,cpr-acd-avg-enable");
+	if (ctrl->acd_avg_enabled) {
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-down-step-limit",
+					  &ctrl->acd_adj_down_step_limit);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-adj-down-step-limit, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-up-step-limit",
+					  &ctrl->acd_adj_up_step_limit);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-adj-up-step-limit, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-down-step-size",
+					  &ctrl->acd_adj_down_step_size);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-down-step-size, rc=%d\n",
+				 rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(ctrl->dev->of_node,
+					  "qcom,cpr-acd-adj-up-step-size",
+					  &ctrl->acd_adj_up_step_size);
+		if (rc) {
+			cpr3_err(ctrl, "error reading qcom,cpr-acd-up-step-size, rc=%d\n",
+				 rc);
+			return rc;
+		}
+	}
+
 	rc = of_property_read_u32(ctrl->dev->of_node,
 				  "qcom,voltage-base",
 				  &ctrl->base_volt);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed92fb0..76b802c 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1712,6 +1712,9 @@
 		ap_dev->queue_depth = queue_depth;
 		ap_dev->raw_hwtype = device_type;
 		ap_dev->device_type = device_type;
+		/* CEX6 toleration: map to CEX5 */
+		if (device_type == AP_DEVICE_TYPE_CEX6)
+			ap_dev->device_type = AP_DEVICE_TYPE_CEX5;
 		ap_dev->functions = device_functions;
 		spin_lock_init(&ap_dev->lock);
 		INIT_LIST_HEAD(&ap_dev->pendingq);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d7fdf5c..fd66d2c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -105,6 +105,7 @@
 #define AP_DEVICE_TYPE_CEX3C	9
 #define AP_DEVICE_TYPE_CEX4	10
 #define AP_DEVICE_TYPE_CEX5	11
+#define AP_DEVICE_TYPE_CEX6	12
 
 /*
  * Known function facilities
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 91dfd58..c4fe95a 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -22,7 +22,7 @@
  *
  ****************************************************************************/
 
-#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -82,7 +82,7 @@
 		}
 	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
 		if (se_cmd->data_direction == DMA_TO_DEVICE) {
-			/*  residual data from an overflow write */
+			/* residual data from an overflow write */
 			rsp->flags = SRP_RSP_FLAG_DOOVER;
 			rsp->data_out_res_cnt = cpu_to_be32(residual_count);
 		} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
@@ -102,7 +102,7 @@
  * and the function returns TRUE.
  *
  * EXECUTION ENVIRONMENT:
- *      Interrupt or Process environment
+ *	Interrupt or Process environment
  */
 static bool connection_broken(struct scsi_info *vscsi)
 {
@@ -325,7 +325,7 @@
 }
 
 /**
- * ibmvscsis_send_init_message() -  send initialize message to the client
+ * ibmvscsis_send_init_message() - send initialize message to the client
  * @vscsi:	Pointer to our adapter structure
  * @format:	Which Init Message format to send
  *
@@ -383,13 +383,13 @@
 					      vscsi->cmd_q.base_addr);
 		if (crq) {
 			*format = (uint)(crq->format);
-			rc =  ERROR;
+			rc = ERROR;
 			crq->valid = INVALIDATE_CMD_RESP_EL;
 			dma_rmb();
 		}
 	} else {
 		*format = (uint)(crq->format);
-		rc =  ERROR;
+		rc = ERROR;
 		crq->valid = INVALIDATE_CMD_RESP_EL;
 		dma_rmb();
 	}
@@ -398,166 +398,6 @@
 }
 
 /**
- * ibmvscsis_establish_new_q() - Establish new CRQ queue
- * @vscsi:	Pointer to our adapter structure
- * @new_state:	New state being established after resetting the queue
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_establish_new_q(struct scsi_info *vscsi,  uint new_state)
-{
-	long rc = ADAPT_SUCCESS;
-	uint format;
-
-	vscsi->flags &= PRESERVE_FLAG_FIELDS;
-	vscsi->rsp_q_timer.timer_pops = 0;
-	vscsi->debit = 0;
-	vscsi->credit = 0;
-
-	rc = vio_enable_interrupts(vscsi->dma_dev);
-	if (rc) {
-		pr_warn("reset_queue: failed to enable interrupts, rc %ld\n",
-			rc);
-		return rc;
-	}
-
-	rc = ibmvscsis_check_init_msg(vscsi, &format);
-	if (rc) {
-		dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n",
-			rc);
-		return rc;
-	}
-
-	if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) {
-		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-		case H_DROPPED:
-		case H_CLOSED:
-			rc = ADAPT_SUCCESS;
-			break;
-
-		case H_PARAMETER:
-		case H_HARDWARE:
-			break;
-
-		default:
-			vscsi->state = UNDEFINED;
-			rc = H_HARDWARE;
-			break;
-		}
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_reset_queue() - Reset CRQ Queue
- * @vscsi:	Pointer to our adapter structure
- * @new_state:	New state to establish after resetting the queue
- *
- * This function calls h_free_q and then calls h_reg_q and does all
- * of the bookkeeping to get us back to where we can communicate.
- *
- * Actually, we don't always call h_free_crq.  A problem was discovered
- * where one partition would close and reopen his queue, which would
- * cause his partner to get a transport event, which would cause him to
- * close and reopen his queue, which would cause the original partition
- * to get a transport event, etc., etc.  To prevent this, we don't
- * actually close our queue if the client initiated the reset, (i.e.
- * either we got a transport event or we have detected that the client's
- * queue is gone)
- *
- * EXECUTION ENVIRONMENT:
- *	Process environment, called with interrupt lock held
- */
-static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state)
-{
-	int bytes;
-	long rc = ADAPT_SUCCESS;
-
-	pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
-
-	/* don't reset, the client did it for us */
-	if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
-		vscsi->flags &=  PRESERVE_FLAG_FIELDS;
-		vscsi->rsp_q_timer.timer_pops = 0;
-		vscsi->debit = 0;
-		vscsi->credit = 0;
-		vscsi->state = new_state;
-		vio_enable_interrupts(vscsi->dma_dev);
-	} else {
-		rc = ibmvscsis_free_command_q(vscsi);
-		if (rc == ADAPT_SUCCESS) {
-			vscsi->state = new_state;
-
-			bytes = vscsi->cmd_q.size * PAGE_SIZE;
-			rc = h_reg_crq(vscsi->dds.unit_id,
-				       vscsi->cmd_q.crq_token, bytes);
-			if (rc == H_CLOSED || rc == H_SUCCESS) {
-				rc = ibmvscsis_establish_new_q(vscsi,
-							       new_state);
-			}
-
-			if (rc != ADAPT_SUCCESS) {
-				pr_debug("reset_queue: reg_crq rc %ld\n", rc);
-
-				vscsi->state = ERR_DISCONNECTED;
-				vscsi->flags |=  RESPONSE_Q_DOWN;
-				ibmvscsis_free_command_q(vscsi);
-			}
-		} else {
-			vscsi->state = ERR_DISCONNECTED;
-			vscsi->flags |= RESPONSE_Q_DOWN;
-		}
-	}
-}
-
-/**
- * ibmvscsis_free_cmd_resources() - Free command resources
- * @vscsi:	Pointer to our adapter structure
- * @cmd:	Command which is not longer in use
- *
- * Must be called with interrupt lock held.
- */
-static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
-					 struct ibmvscsis_cmd *cmd)
-{
-	struct iu_entry *iue = cmd->iue;
-
-	switch (cmd->type) {
-	case TASK_MANAGEMENT:
-	case SCSI_CDB:
-		/*
-		 * When the queue goes down this value is cleared, so it
-		 * cannot be cleared in this general purpose function.
-		 */
-		if (vscsi->debit)
-			vscsi->debit -= 1;
-		break;
-	case ADAPTER_MAD:
-		vscsi->flags &= ~PROCESSING_MAD;
-		break;
-	case UNSET_TYPE:
-		break;
-	default:
-		dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
-			cmd->type);
-		break;
-	}
-
-	cmd->iue = NULL;
-	list_add_tail(&cmd->list, &vscsi->free_cmd);
-	srp_iu_put(iue);
-
-	if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
-	    list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
-		vscsi->flags &= ~WAIT_FOR_IDLE;
-		complete(&vscsi->wait_idle);
-	}
-}
-
-/**
  * ibmvscsis_disconnect() - Helper function to disconnect
  * @work:	Pointer to work_struct, gives access to our adapter structure
  *
@@ -576,7 +416,6 @@
 					       proc_work);
 	u16 new_state;
 	bool wait_idle = false;
-	long rc = ADAPT_SUCCESS;
 
 	spin_lock_bh(&vscsi->intr_lock);
 	new_state = vscsi->new_state;
@@ -590,7 +429,7 @@
 	 * should transitition to the new state
 	 */
 	switch (vscsi->state) {
-	/*  Should never be called while in this state. */
+	/* Should never be called while in this state. */
 	case NO_QUEUE:
 	/*
 	 * Can never transition from this state;
@@ -629,30 +468,24 @@
 			vscsi->state = new_state;
 		break;
 
-	/*
-	 * If this is a transition into an error state.
-	 * a client is attempting to establish a connection
-	 * and has violated the RPA protocol.
-	 * There can be nothing pending on the adapter although
-	 * there can be requests in the command queue.
-	 */
 	case WAIT_ENABLED:
-	case PART_UP_WAIT_ENAB:
 		switch (new_state) {
-		case ERR_DISCONNECT:
-			vscsi->flags |= RESPONSE_Q_DOWN;
+		case UNCONFIGURING:
 			vscsi->state = new_state;
+			vscsi->flags |= RESPONSE_Q_DOWN;
 			vscsi->flags &= ~(SCHEDULE_DISCONNECT |
 					  DISCONNECT_SCHEDULED);
-			ibmvscsis_free_command_q(vscsi);
-			break;
-		case ERR_DISCONNECT_RECONNECT:
-			ibmvscsis_reset_queue(vscsi, WAIT_ENABLED);
+			dma_rmb();
+			if (vscsi->flags & CFG_SLEEPING) {
+				vscsi->flags &= ~CFG_SLEEPING;
+				complete(&vscsi->unconfig);
+			}
 			break;
 
 		/* should never happen */
+		case ERR_DISCONNECT:
+		case ERR_DISCONNECT_RECONNECT:
 		case WAIT_IDLE:
-			rc = ERROR;
 			dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
 				vscsi->state);
 			break;
@@ -661,6 +494,13 @@
 
 	case WAIT_IDLE:
 		switch (new_state) {
+		case UNCONFIGURING:
+			vscsi->flags |= RESPONSE_Q_DOWN;
+			vscsi->state = new_state;
+			vscsi->flags &= ~(SCHEDULE_DISCONNECT |
+					  DISCONNECT_SCHEDULED);
+			ibmvscsis_free_command_q(vscsi);
+			break;
 		case ERR_DISCONNECT:
 		case ERR_DISCONNECT_RECONNECT:
 			vscsi->state = new_state;
@@ -789,7 +629,6 @@
 			break;
 
 		case WAIT_ENABLED:
-		case PART_UP_WAIT_ENAB:
 		case WAIT_IDLE:
 		case WAIT_CONNECTION:
 		case CONNECTED:
@@ -807,6 +646,310 @@
 }
 
 /**
+ * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+
+	switch (vscsi->state) {
+	case NO_QUEUE:
+	case ERR_DISCONNECT:
+	case ERR_DISCONNECT_RECONNECT:
+	case ERR_DISCONNECTED:
+	case UNCONFIGURING:
+	case UNDEFINED:
+		rc = ERROR;
+		break;
+
+	case WAIT_CONNECTION:
+		vscsi->state = CONNECTED;
+		break;
+
+	case WAIT_IDLE:
+	case SRP_PROCESSING:
+	case CONNECTED:
+	case WAIT_ENABLED:
+	default:
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
+			vscsi->state);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_handle_init_msg() - Respond to an Init Message
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+
+	switch (vscsi->state) {
+	case WAIT_CONNECTION:
+		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
+		switch (rc) {
+		case H_SUCCESS:
+			vscsi->state = CONNECTED;
+			break;
+
+		case H_PARAMETER:
+			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+				rc);
+			ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+			break;
+
+		case H_DROPPED:
+			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+				rc);
+			rc = ERROR;
+			ibmvscsis_post_disconnect(vscsi,
+						  ERR_DISCONNECT_RECONNECT, 0);
+			break;
+
+		case H_CLOSED:
+			pr_warn("init_msg: failed to send, rc %ld\n", rc);
+			rc = 0;
+			break;
+		}
+		break;
+
+	case UNDEFINED:
+		rc = ERROR;
+		break;
+
+	case UNCONFIGURING:
+		break;
+
+	case WAIT_ENABLED:
+	case CONNECTED:
+	case SRP_PROCESSING:
+	case WAIT_IDLE:
+	case NO_QUEUE:
+	case ERR_DISCONNECT:
+	case ERR_DISCONNECT_RECONNECT:
+	case ERR_DISCONNECTED:
+	default:
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
+			vscsi->state);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_init_msg() - Respond to an init message
+ * @vscsi:	Pointer to our adapter structure
+ * @crq:	Pointer to CRQ element containing the Init Message
+ *
+ * EXECUTION ENVIRONMENT:
+ *	Interrupt, interrupt lock held
+ */
+static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
+{
+	long rc = ADAPT_SUCCESS;
+
+	pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+
+	rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
+		      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
+		      0);
+	if (rc == H_SUCCESS) {
+		vscsi->client_data.partition_number =
+			be64_to_cpu(*(u64 *)vscsi->map_buf);
+		pr_debug("init_msg, part num %d\n",
+			 vscsi->client_data.partition_number);
+	} else {
+		pr_debug("init_msg h_vioctl rc %ld\n", rc);
+		rc = ADAPT_SUCCESS;
+	}
+
+	if (crq->format == INIT_MSG) {
+		rc = ibmvscsis_handle_init_msg(vscsi);
+	} else if (crq->format == INIT_COMPLETE_MSG) {
+		rc = ibmvscsis_handle_init_compl_msg(vscsi);
+	} else {
+		rc = ERROR;
+		dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
+			(uint)crq->format);
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_establish_new_q() - Establish new CRQ queue
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * Must be called with interrupt lock held.
+ */
+static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
+{
+	long rc = ADAPT_SUCCESS;
+	uint format;
+
+	vscsi->flags &= PRESERVE_FLAG_FIELDS;
+	vscsi->rsp_q_timer.timer_pops = 0;
+	vscsi->debit = 0;
+	vscsi->credit = 0;
+
+	rc = vio_enable_interrupts(vscsi->dma_dev);
+	if (rc) {
+		pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
+			rc);
+		return rc;
+	}
+
+	rc = ibmvscsis_check_init_msg(vscsi, &format);
+	if (rc) {
+		dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
+			rc);
+		return rc;
+	}
+
+	if (format == UNUSED_FORMAT) {
+		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
+		switch (rc) {
+		case H_SUCCESS:
+		case H_DROPPED:
+		case H_CLOSED:
+			rc = ADAPT_SUCCESS;
+			break;
+
+		case H_PARAMETER:
+		case H_HARDWARE:
+			break;
+
+		default:
+			vscsi->state = UNDEFINED;
+			rc = H_HARDWARE;
+			break;
+		}
+	} else if (format == INIT_MSG) {
+		rc = ibmvscsis_handle_init_msg(vscsi);
+	}
+
+	return rc;
+}
+
+/**
+ * ibmvscsis_reset_queue() - Reset CRQ Queue
+ * @vscsi:	Pointer to our adapter structure
+ *
+ * This function calls h_free_q and then calls h_reg_q and does all
+ * of the bookkeeping to get us back to where we can communicate.
+ *
+ * Actually, we don't always call h_free_crq.  A problem was discovered
+ * where one partition would close and reopen his queue, which would
+ * cause his partner to get a transport event, which would cause him to
+ * close and reopen his queue, which would cause the original partition
+ * to get a transport event, etc., etc.  To prevent this, we don't
+ * actually close our queue if the client initiated the reset, (i.e.
+ * either we got a transport event or we have detected that the client's
+ * queue is gone)
+ *
+ * EXECUTION ENVIRONMENT:
+ *	Process environment, called with interrupt lock held
+ */
+static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
+{
+	int bytes;
+	long rc = ADAPT_SUCCESS;
+
+	pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+
+	/* don't reset, the client did it for us */
+	if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
+		vscsi->flags &= PRESERVE_FLAG_FIELDS;
+		vscsi->rsp_q_timer.timer_pops = 0;
+		vscsi->debit = 0;
+		vscsi->credit = 0;
+		vscsi->state = WAIT_CONNECTION;
+		vio_enable_interrupts(vscsi->dma_dev);
+	} else {
+		rc = ibmvscsis_free_command_q(vscsi);
+		if (rc == ADAPT_SUCCESS) {
+			vscsi->state = WAIT_CONNECTION;
+
+			bytes = vscsi->cmd_q.size * PAGE_SIZE;
+			rc = h_reg_crq(vscsi->dds.unit_id,
+				       vscsi->cmd_q.crq_token, bytes);
+			if (rc == H_CLOSED || rc == H_SUCCESS) {
+				rc = ibmvscsis_establish_new_q(vscsi);
+			}
+
+			if (rc != ADAPT_SUCCESS) {
+				pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+
+				vscsi->state = ERR_DISCONNECTED;
+				vscsi->flags |= RESPONSE_Q_DOWN;
+				ibmvscsis_free_command_q(vscsi);
+			}
+		} else {
+			vscsi->state = ERR_DISCONNECTED;
+			vscsi->flags |= RESPONSE_Q_DOWN;
+		}
+	}
+}
+
+/**
+ * ibmvscsis_free_cmd_resources() - Free command resources
+ * @vscsi:	Pointer to our adapter structure
+ * @cmd:	Command which is not longer in use
+ *
+ * Must be called with interrupt lock held.
+ */
+static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
+					 struct ibmvscsis_cmd *cmd)
+{
+	struct iu_entry *iue = cmd->iue;
+
+	switch (cmd->type) {
+	case TASK_MANAGEMENT:
+	case SCSI_CDB:
+		/*
+		 * When the queue goes down this value is cleared, so it
+		 * cannot be cleared in this general purpose function.
+		 */
+		if (vscsi->debit)
+			vscsi->debit -= 1;
+		break;
+	case ADAPTER_MAD:
+		vscsi->flags &= ~PROCESSING_MAD;
+		break;
+	case UNSET_TYPE:
+		break;
+	default:
+		dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
+			cmd->type);
+		break;
+	}
+
+	cmd->iue = NULL;
+	list_add_tail(&cmd->list, &vscsi->free_cmd);
+	srp_iu_put(iue);
+
+	if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
+	    list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
+		vscsi->flags &= ~WAIT_FOR_IDLE;
+		complete(&vscsi->wait_idle);
+	}
+}
+
+/**
  * ibmvscsis_trans_event() - Handle a Transport Event
  * @vscsi:	Pointer to our adapter structure
  * @crq:	Pointer to CRQ entry containing the Transport Event
@@ -864,10 +1007,6 @@
 						   TRANS_EVENT));
 			break;
 
-		case PART_UP_WAIT_ENAB:
-			vscsi->state = WAIT_ENABLED;
-			break;
-
 		case SRP_PROCESSING:
 			if ((vscsi->debit > 0) ||
 			    !list_empty(&vscsi->schedule_q) ||
@@ -896,7 +1035,7 @@
 		}
 	}
 
-	rc =  vscsi->flags & SCHEDULE_DISCONNECT;
+	rc = vscsi->flags & SCHEDULE_DISCONNECT;
 
 	pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
 		 vscsi->flags, vscsi->state, rc);
@@ -1067,16 +1206,28 @@
 		free_qs = true;
 
 	switch (vscsi->state) {
+	case UNCONFIGURING:
+		ibmvscsis_free_command_q(vscsi);
+		dma_rmb();
+		isync();
+		if (vscsi->flags & CFG_SLEEPING) {
+			vscsi->flags &= ~CFG_SLEEPING;
+			complete(&vscsi->unconfig);
+		}
+		break;
 	case ERR_DISCONNECT_RECONNECT:
-		ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION);
+		ibmvscsis_reset_queue(vscsi);
 		pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
 		break;
 
 	case ERR_DISCONNECT:
 		ibmvscsis_free_command_q(vscsi);
-		vscsi->flags &= ~DISCONNECT_SCHEDULED;
+		vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
 		vscsi->flags |= RESPONSE_Q_DOWN;
-		vscsi->state = ERR_DISCONNECTED;
+		if (vscsi->tport.enabled)
+			vscsi->state = ERR_DISCONNECTED;
+		else
+			vscsi->state = WAIT_ENABLED;
 		pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
 			 vscsi->flags, vscsi->state);
 		break;
@@ -1221,7 +1372,7 @@
  * @iue:	Information Unit containing the Adapter Info MAD request
  *
  * EXECUTION ENVIRONMENT:
- *	Interrupt adpater lock is held
+ *	Interrupt adapter lock is held
  */
 static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
 				   struct iu_entry *iue)
@@ -1621,8 +1772,8 @@
 					be64_to_cpu(msg_hi),
 					be64_to_cpu(cmd->rsp.tag));
 
-			pr_debug("send_messages: tag 0x%llx, rc %ld\n",
-				 be64_to_cpu(cmd->rsp.tag), rc);
+			pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+				 cmd, be64_to_cpu(cmd->rsp.tag), rc);
 
 			/* if all ok free up the command element resources */
 			if (rc == H_SUCCESS) {
@@ -1692,7 +1843,7 @@
  * @crq:	Pointer to the CRQ entry containing the MAD request
  *
  * EXECUTION ENVIRONMENT:
- *	Interrupt  called with adapter lock held
+ *	Interrupt, called with adapter lock held
  */
 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
 {
@@ -1746,14 +1897,7 @@
 
 		pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
 
-		if (be16_to_cpu(mad->length) < 0) {
-			dev_err(&vscsi->dev, "mad: length is < 0\n");
-			ibmvscsis_post_disconnect(vscsi,
-						  ERR_DISCONNECT_RECONNECT, 0);
-			rc = SRP_VIOLATION;
-		} else {
-			rc = ibmvscsis_process_mad(vscsi, iue);
-		}
+		rc = ibmvscsis_process_mad(vscsi, iue);
 
 		pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
 			 rc);
@@ -1865,7 +2009,7 @@
 		break;
 	case H_PERMISSION:
 		if (connection_broken(vscsi))
-			flag_bits =  RESPONSE_Q_DOWN | CLIENT_FAILED;
+			flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
 		dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
 			rc);
 		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
@@ -2188,156 +2332,6 @@
 }
 
 /**
- * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
- * @vscsi:	Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
-{
-	long rc = ADAPT_SUCCESS;
-
-	switch (vscsi->state) {
-	case NO_QUEUE:
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	case ERR_DISCONNECTED:
-	case UNCONFIGURING:
-	case UNDEFINED:
-		rc = ERROR;
-		break;
-
-	case WAIT_CONNECTION:
-		vscsi->state = CONNECTED;
-		break;
-
-	case WAIT_IDLE:
-	case SRP_PROCESSING:
-	case CONNECTED:
-	case WAIT_ENABLED:
-	case PART_UP_WAIT_ENAB:
-	default:
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
-			vscsi->state);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-		break;
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_handle_init_msg() - Respond to an Init Message
- * @vscsi:	Pointer to our adapter structure
- *
- * Must be called with interrupt lock held.
- */
-static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
-{
-	long rc = ADAPT_SUCCESS;
-
-	switch (vscsi->state) {
-	case WAIT_ENABLED:
-		vscsi->state = PART_UP_WAIT_ENAB;
-		break;
-
-	case WAIT_CONNECTION:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-			vscsi->state = CONNECTED;
-			break;
-
-		case H_PARAMETER:
-			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
-				rc);
-			ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
-			break;
-
-		case H_DROPPED:
-			dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
-				rc);
-			rc = ERROR;
-			ibmvscsis_post_disconnect(vscsi,
-						  ERR_DISCONNECT_RECONNECT, 0);
-			break;
-
-		case H_CLOSED:
-			pr_warn("init_msg: failed to send, rc %ld\n", rc);
-			rc = 0;
-			break;
-		}
-		break;
-
-	case UNDEFINED:
-		rc = ERROR;
-		break;
-
-	case UNCONFIGURING:
-		break;
-
-	case PART_UP_WAIT_ENAB:
-	case CONNECTED:
-	case SRP_PROCESSING:
-	case WAIT_IDLE:
-	case NO_QUEUE:
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	case ERR_DISCONNECTED:
-	default:
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
-			vscsi->state);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-		break;
-	}
-
-	return rc;
-}
-
-/**
- * ibmvscsis_init_msg() - Respond to an init message
- * @vscsi:	Pointer to our adapter structure
- * @crq:	Pointer to CRQ element containing the Init Message
- *
- * EXECUTION ENVIRONMENT:
- *	Interrupt, interrupt lock held
- */
-static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
-{
-	long rc = ADAPT_SUCCESS;
-
-	pr_debug("init_msg: state 0x%hx\n", vscsi->state);
-
-	rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
-		      (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
-		      0);
-	if (rc == H_SUCCESS) {
-		vscsi->client_data.partition_number =
-			be64_to_cpu(*(u64 *)vscsi->map_buf);
-		pr_debug("init_msg, part num %d\n",
-			 vscsi->client_data.partition_number);
-	} else {
-		pr_debug("init_msg h_vioctl rc %ld\n", rc);
-		rc = ADAPT_SUCCESS;
-	}
-
-	if (crq->format == INIT_MSG) {
-		rc = ibmvscsis_handle_init_msg(vscsi);
-	} else if (crq->format == INIT_COMPLETE_MSG) {
-		rc = ibmvscsis_handle_init_compl_msg(vscsi);
-	} else {
-		rc = ERROR;
-		dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
-			(uint)crq->format);
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-	}
-
-	return rc;
-}
-
-/**
  * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
  * @vscsi:	Pointer to our adapter structure
  * @crq:	Pointer to CRQ element containing the SRP request
@@ -2392,7 +2386,7 @@
 		break;
 
 	case VALID_TRANS_EVENT:
-		rc =  ibmvscsis_trans_event(vscsi, crq);
+		rc = ibmvscsis_trans_event(vscsi, crq);
 		break;
 
 	case VALID_INIT_MSG:
@@ -2523,7 +2517,6 @@
 		dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
 			srp->tag);
 		goto fail;
-		return;
 	}
 
 	cmd->rsp.sol_not = srp->sol_not;
@@ -2560,6 +2553,10 @@
 			       data_len, attr, dir, 0);
 	if (rc) {
 		dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
+		spin_lock_bh(&vscsi->intr_lock);
+		list_del(&cmd->list);
+		ibmvscsis_free_cmd_resources(vscsi, cmd);
+		spin_unlock_bh(&vscsi->intr_lock);
 		goto fail;
 	}
 	return;
@@ -2639,6 +2636,9 @@
 		if (rc) {
 			dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
 				rc);
+			spin_lock_bh(&vscsi->intr_lock);
+			list_del(&cmd->list);
+			spin_unlock_bh(&vscsi->intr_lock);
 			cmd->se_cmd.se_tmr_req->response =
 				TMR_FUNCTION_REJECTED;
 		}
@@ -2787,36 +2787,6 @@
 }
 
 /**
- * ibmvscsis_check_q() - Helper function to Check Init Message Valid
- * @vscsi:	Pointer to our adapter structure
- *
- * Checks if a initialize message was queued by the initiatior
- * while the timing window was open.  This function is called from
- * probe after the CRQ is created and interrupts are enabled.
- * It would only be used by adapters who wait for some event before
- * completing the init handshake with the client.  For ibmvscsi, this
- * event is waiting for the port to be enabled.
- *
- * EXECUTION ENVIRONMENT:
- *	Process level only, interrupt lock held
- */
-static long ibmvscsis_check_q(struct scsi_info *vscsi)
-{
-	uint format;
-	long rc;
-
-	rc = ibmvscsis_check_init_msg(vscsi, &format);
-	if (rc)
-		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
-	else if (format == UNUSED_FORMAT)
-		vscsi->state = WAIT_ENABLED;
-	else
-		vscsi->state = PART_UP_WAIT_ENAB;
-
-	return rc;
-}
-
-/**
  * ibmvscsis_enable_change_state() - Set new state based on enabled status
  * @vscsi:	Pointer to our adapter structure
  *
@@ -2827,77 +2797,19 @@
  */
 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
 {
+	int bytes;
 	long rc = ADAPT_SUCCESS;
 
-handle_state_change:
-	switch (vscsi->state) {
-	case WAIT_ENABLED:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-		case H_DROPPED:
-		case H_CLOSED:
-			vscsi->state =  WAIT_CONNECTION;
-			rc = ADAPT_SUCCESS;
-			break;
+	bytes = vscsi->cmd_q.size * PAGE_SIZE;
+	rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
+	if (rc == H_CLOSED || rc == H_SUCCESS) {
+		vscsi->state = WAIT_CONNECTION;
+		rc = ibmvscsis_establish_new_q(vscsi);
+	}
 
-		case H_PARAMETER:
-			break;
-
-		case H_HARDWARE:
-			break;
-
-		default:
-			vscsi->state = UNDEFINED;
-			rc = H_HARDWARE;
-			break;
-		}
-		break;
-	case PART_UP_WAIT_ENAB:
-		rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
-		switch (rc) {
-		case H_SUCCESS:
-			vscsi->state = CONNECTED;
-			rc = ADAPT_SUCCESS;
-			break;
-
-		case H_DROPPED:
-		case H_CLOSED:
-			vscsi->state = WAIT_ENABLED;
-			goto handle_state_change;
-
-		case H_PARAMETER:
-			break;
-
-		case H_HARDWARE:
-			break;
-
-		default:
-			rc = H_HARDWARE;
-			break;
-		}
-		break;
-
-	case WAIT_CONNECTION:
-	case WAIT_IDLE:
-	case SRP_PROCESSING:
-	case CONNECTED:
-		rc = ADAPT_SUCCESS;
-		break;
-		/* should not be able to get here */
-	case UNCONFIGURING:
-		rc = ERROR;
-		vscsi->state = UNDEFINED;
-		break;
-
-		/* driver should never allow this to happen */
-	case ERR_DISCONNECT:
-	case ERR_DISCONNECT_RECONNECT:
-	default:
-		dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n",
-			vscsi->state);
-		rc = ADAPT_SUCCESS;
-		break;
+	if (rc != ADAPT_SUCCESS) {
+		vscsi->state = ERR_DISCONNECTED;
+		vscsi->flags |= RESPONSE_Q_DOWN;
 	}
 
 	return rc;
@@ -2917,7 +2829,6 @@
  */
 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
 {
-	long rc = 0;
 	int pages;
 	struct vio_dev *vdev = vscsi->dma_dev;
 
@@ -2941,22 +2852,7 @@
 		return -ENOMEM;
 	}
 
-	rc =  h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE);
-	if (rc) {
-		if (rc == H_CLOSED) {
-			vscsi->state = WAIT_ENABLED;
-			rc = 0;
-		} else {
-			dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token,
-					 PAGE_SIZE, DMA_BIDIRECTIONAL);
-			free_page((unsigned long)vscsi->cmd_q.base_addr);
-			rc = -ENODEV;
-		}
-	} else {
-		vscsi->state = WAIT_ENABLED;
-	}
-
-	return rc;
+	return 0;
 }
 
 /**
@@ -3271,7 +3167,7 @@
 	/*
 	 * if we are in a path where we are waiting for all pending commands
 	 * to complete because we received a transport event and anything in
-	 * the command queue is for a new connection,  do nothing
+	 * the command queue is for a new connection, do nothing
 	 */
 	if (TARGET_STOP(vscsi)) {
 		vio_enable_interrupts(vscsi->dma_dev);
@@ -3315,7 +3211,7 @@
 				 * everything but transport events on the queue
 				 *
 				 * need to decrement the queue index so we can
-				 * look at the elment again
+				 * look at the element again
 				 */
 				if (vscsi->cmd_q.index)
 					vscsi->cmd_q.index -= 1;
@@ -3379,7 +3275,8 @@
 	INIT_LIST_HEAD(&vscsi->waiting_rsp);
 	INIT_LIST_HEAD(&vscsi->active_q);
 
-	snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev));
+	snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
+		 dev_name(&vdev->dev));
 
 	pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
 
@@ -3394,6 +3291,9 @@
 	strncat(vscsi->eye, vdev->name, MAX_EYE);
 
 	vscsi->dds.unit_id = vdev->unit_address;
+	strncpy(vscsi->dds.partition_name, partition_name,
+		sizeof(vscsi->dds.partition_name));
+	vscsi->dds.partition_num = partition_number;
 
 	spin_lock_bh(&ibmvscsis_dev_lock);
 	list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
@@ -3470,6 +3370,7 @@
 		     (unsigned long)vscsi);
 
 	init_completion(&vscsi->wait_idle);
+	init_completion(&vscsi->unconfig);
 
 	snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
 	vscsi->work_q = create_workqueue(wq_name);
@@ -3486,31 +3387,12 @@
 		goto destroy_WQ;
 	}
 
-	spin_lock_bh(&vscsi->intr_lock);
-	vio_enable_interrupts(vdev);
-	if (rc) {
-		dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc);
-		rc = -ENODEV;
-		spin_unlock_bh(&vscsi->intr_lock);
-		goto free_irq;
-	}
-
-	if (ibmvscsis_check_q(vscsi)) {
-		rc = ERROR;
-		dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc);
-		spin_unlock_bh(&vscsi->intr_lock);
-		goto disable_interrupt;
-	}
-	spin_unlock_bh(&vscsi->intr_lock);
+	vscsi->state = WAIT_ENABLED;
 
 	dev_set_drvdata(&vdev->dev, vscsi);
 
 	return 0;
 
-disable_interrupt:
-	vio_disable_interrupts(vdev);
-free_irq:
-	free_irq(vdev->irq, vscsi);
 destroy_WQ:
 	destroy_workqueue(vscsi->work_q);
 unmap_buf:
@@ -3544,10 +3426,11 @@
 
 	pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
 
-	/*
-	 * TBD: Need to handle if there are commands on the waiting_rsp q
-	 *      Actually, can there still be cmds outstanding to tcm?
-	 */
+	spin_lock_bh(&vscsi->intr_lock);
+	ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
+	vscsi->flags |= CFG_SLEEPING;
+	spin_unlock_bh(&vscsi->intr_lock);
+	wait_for_completion(&vscsi->unconfig);
 
 	vio_disable_interrupts(vdev);
 	free_irq(vdev->irq, vscsi);
@@ -3556,7 +3439,6 @@
 			 DMA_BIDIRECTIONAL);
 	kfree(vscsi->map_buf);
 	tasklet_kill(&vscsi->work_task);
-	ibmvscsis_unregister_command_q(vscsi);
 	ibmvscsis_destroy_command_q(vscsi);
 	ibmvscsis_freetimer(vscsi);
 	ibmvscsis_free_cmds(vscsi);
@@ -3610,7 +3492,7 @@
 
 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
 	if (num)
-		partition_number = *num;
+		partition_number = of_read_number(num, 1);
 
 	of_node_put(rootdn);
 
@@ -3904,18 +3786,22 @@
 	}
 
 	if (tmp) {
-		tport->enabled = true;
 		spin_lock_bh(&vscsi->intr_lock);
+		tport->enabled = true;
 		lrc = ibmvscsis_enable_change_state(vscsi);
 		if (lrc)
 			pr_err("enable_change_state failed, rc %ld state %d\n",
 			       lrc, vscsi->state);
 		spin_unlock_bh(&vscsi->intr_lock);
 	} else {
+		spin_lock_bh(&vscsi->intr_lock);
 		tport->enabled = false;
+		/* This simulates the server going down */
+		ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
+		spin_unlock_bh(&vscsi->intr_lock);
 	}
 
-	pr_debug("tpg_enable_store, state %d\n", vscsi->state);
+	pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
 
 	return count;
 }
@@ -3985,10 +3871,10 @@
 ATTRIBUTE_GROUPS(ibmvscsis_dev);
 
 static struct class ibmvscsis_class = {
-	.name           = "ibmvscsis",
-	.dev_release    = ibmvscsis_dev_release,
-	.class_attrs    = ibmvscsis_class_attrs,
-	.dev_groups     = ibmvscsis_dev_groups,
+	.name		= "ibmvscsis",
+	.dev_release	= ibmvscsis_dev_release,
+	.class_attrs	= ibmvscsis_class_attrs,
+	.dev_groups	= ibmvscsis_dev_groups,
 };
 
 static struct vio_device_id ibmvscsis_device_table[] = {
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
index 981a0c9..98b0ca7 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
@@ -204,8 +204,6 @@
 	struct list_head waiting_rsp;
 #define NO_QUEUE                    0x00
 #define WAIT_ENABLED                0X01
-	/* driver has received an initialize command */
-#define PART_UP_WAIT_ENAB           0x02
 #define WAIT_CONNECTION             0x04
 	/* have established a connection */
 #define CONNECTED                   0x08
@@ -259,6 +257,8 @@
 #define SCHEDULE_DISCONNECT           0x00400
 	/* disconnect handler is scheduled */
 #define DISCONNECT_SCHEDULED          0x00800
+	/* remove function is sleeping */
+#define CFG_SLEEPING                  0x01000
 	u32 flags;
 	/* adapter lock */
 	spinlock_t intr_lock;
@@ -287,6 +287,7 @@
 
 	struct workqueue_struct *work_q;
 	struct completion wait_idle;
+	struct completion unconfig;
 	struct device dev;
 	struct vio_dev *dma_dev;
 	struct srp_target target;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0a2eb98..7552357 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6617,8 +6617,8 @@
 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
 {
 	int ret = 0;
-	struct scsi_device *sdev_rpmb;
-	struct scsi_device *sdev_boot;
+	struct scsi_device *sdev_rpmb = NULL;
+	struct scsi_device *sdev_boot = NULL;
 	bool is_bootable_dev = false;
 	bool is_embedded_dev = false;
 
diff --git a/drivers/soc/qcom/debug_core.c b/drivers/soc/qcom/debug_core.c
index 019360a..164a866 100644
--- a/drivers/soc/qcom/debug_core.c
+++ b/drivers/soc/qcom/debug_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -290,8 +290,8 @@
 
 int msm_core_debug_init(void)
 {
-	struct dentry *dir;
-	struct dentry *file;
+	struct dentry *dir = NULL;
+	struct dentry *file = NULL;
 	int i;
 
 	msm_core_data = get_cpu_pwr_stats();
diff --git a/drivers/soc/qcom/gladiator_hang_detect.c b/drivers/soc/qcom/gladiator_hang_detect.c
index 7fc2825..b0940ad 100644
--- a/drivers/soc/qcom/gladiator_hang_detect.c
+++ b/drivers/soc/qcom/gladiator_hang_detect.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,7 +71,7 @@
 		hang_dev->M1_threshold = threshold_val;
 	else if (offset == hang_dev->M2_offset)
 		hang_dev->M2_threshold = threshold_val;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		hang_dev->PCIO_threshold = threshold_val;
 }
 
@@ -86,7 +86,7 @@
 		*reg_value = hang_dev->M1_threshold;
 	else if (offset == hang_dev->M2_offset)
 		*reg_value = hang_dev->M2_threshold;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		*reg_value = hang_dev->PCIO_threshold;
 }
 
@@ -101,7 +101,7 @@
 		hang_dev->M1_enable = enabled;
 	else if (offset == hang_dev->M2_offset)
 		hang_dev->M2_enable = enabled;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		hang_dev->PCIO_enable = enabled;
 }
 
@@ -116,7 +116,7 @@
 		*reg_value = hang_dev->M1_enable;
 	else if (offset == hang_dev->M2_offset)
 		*reg_value = hang_dev->M2_enable;
-	else if (offset == hang_dev->PCIO_offset)
+	else
 		*reg_value = hang_dev->PCIO_enable;
 }
 
@@ -475,7 +475,7 @@
 	struct device_node *node = pdev->dev.of_node;
 	struct hang_detect *hang_det = NULL;
 	int i = 0, ret;
-	u32 NR_GLA_REG;
+	u32 NR_GLA_REG = 0;
 	u32 *treg;
 	u32 creg;
 
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 49a0173..8d2a758 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,7 @@
 #include "glink_private.h"
 #include "glink_xprt_if.h"
 
+#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
 /* Number of internal IPC Logging log pages */
 #define NUM_LOG_PAGES	10
 #define GLINK_PM_QOS_HOLDOFF_MS		10
@@ -38,6 +39,8 @@
 #define GLINK_QOS_DEF_MTU		2048
 
 #define GLINK_KTHREAD_PRIO 1
+
+static rwlock_t magic_lock;
 /**
  * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
  * @max_rate_kBps:	Maximum rate supported by the priority bucket.
@@ -309,6 +312,7 @@
 	unsigned long req_rate_kBps;
 	uint32_t tx_intent_cnt;
 	uint32_t tx_cnt;
+	uint32_t magic_number;
 };
 
 static struct glink_core_if core_impl;
@@ -437,6 +441,37 @@
 #define GLINK_GET_CH_TX_STATE(ctx) \
 		((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
 
+static int glink_get_ch_ctx(struct channel_ctx *ctx)
+{
+	unsigned long flags;
+
+	if (!ctx)
+		return -EINVAL;
+	read_lock_irqsave(&magic_lock, flags);
+	if (ctx->magic_number != GLINK_CTX_CANARY) {
+		read_unlock_irqrestore(&magic_lock, flags);
+		return -EINVAL;
+	}
+	rwref_get(&ctx->ch_state_lhb2);
+	read_unlock_irqrestore(&magic_lock, flags);
+	return 0;
+}
+
+static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+{
+	unsigned long flags;
+
+	if (!update_magic) {
+		rwref_put(&ctx->ch_state_lhb2);
+		return 0;
+	}
+	write_lock_irqsave(&magic_lock, flags);
+	ctx->magic_number = 0;
+	rwref_put(&ctx->ch_state_lhb2);
+	write_unlock_irqrestore(&magic_lock, flags);
+	return 0;
+}
+
 /**
  * glink_ssr() - Clean up locally for SSR by simulating remote close
  * @subsystem:	The name of the subsystem being restarted
@@ -2548,6 +2583,7 @@
 	ctx->notify_tx_abort = cfg->notify_tx_abort;
 	ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
 	ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+	ctx->magic_number = GLINK_CTX_CANARY;
 
 	if (!ctx->notify_rx_intent_req)
 		ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2583,7 +2619,6 @@
 
 	GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
 			__func__, ctx);
-
 	return ctx;
 }
 EXPORT_SYMBOL(glink_open);
@@ -2683,15 +2718,19 @@
 	unsigned long flags;
 	bool is_empty = false;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
-	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
+		glink_put_ch_ctx(ctx, false);
 		return 0;
+	}
 
 	if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
 		/* close already pending */
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -2756,6 +2795,7 @@
 
 	rwref_put(&ctx->ch_state_lhb2);
 	rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+	glink_put_ch_ctx(ctx, true);
 	return ret;
 }
 EXPORT_SYMBOL(glink_close);
@@ -2814,29 +2854,30 @@
 	if (!size)
 		return -EINVAL;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
 	if (!(vbuf_provider || pbuf_provider)) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto glink_tx_common_err;
 	}
 
 	if (!ch_is_fully_opened(ctx)) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EBUSY;
+		ret = -EBUSY;
+		goto glink_tx_common_err;
 	}
 
 	if (size > GLINK_MAX_PKT_SIZE) {
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto glink_tx_common_err;
 	}
 
 	if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
 		if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EOPNOTSUPP;
+			ret = -EOPNOTSUPP;
+			goto glink_tx_common_err;
 		}
 		tracer_pkt_log_event(data, GLINK_CORE_TX);
 	}
@@ -2848,16 +2889,16 @@
 			GLINK_ERR_CH(ctx,
 				"%s: R[%u]:%zu Intent not present for lcid\n",
 				__func__, riid, size);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EAGAIN;
+			ret = -EAGAIN;
+			goto glink_tx_common_err;
 		}
 		if (is_atomic && !(ctx->transport_ptr->capabilities &
 					  GCAP_AUTO_QUEUE_RX_INT)) {
 			GLINK_ERR_CH(ctx,
 				"%s: Cannot request intent in atomic context\n",
 				__func__);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto glink_tx_common_err;
 		}
 
 		/* request intent of correct size */
@@ -2867,20 +2908,18 @@
 		if (ret) {
 			GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
 					__func__, ret);
-			rwref_read_put(&ctx->ch_state_lhb2);
-			return ret;
+			goto glink_tx_common_err;
 		}
 
 		while (ch_pop_remote_rx_intent(ctx, size, &riid,
 						&intent_size, &cookie)) {
-			rwref_get(&ctx->ch_state_lhb2);
 			rwref_read_put(&ctx->ch_state_lhb2);
 			if (is_atomic) {
 				GLINK_ERR_CH(ctx,
 				    "%s Intent of size %zu not ready\n",
 				    __func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EAGAIN;
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
 			}
 
 			if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
@@ -2888,8 +2927,8 @@
 				GLINK_ERR_CH(ctx,
 					"%s: Channel closed while waiting for intent\n",
 					__func__);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EBUSY;
+				ret = -EBUSY;
+				goto glink_tx_common_err_2;
 			}
 
 			/* wait for the remote intent req ack */
@@ -2899,8 +2938,8 @@
 				GLINK_ERR_CH(ctx,
 					"%s: Intent request ack with size: %zu not granted for lcid\n",
 					__func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -ETIMEDOUT;
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
 			}
 
 			if (!ctx->int_req_ack) {
@@ -2908,8 +2947,8 @@
 				    "%s: Intent Request with size: %zu %s",
 				    __func__, size,
 				    "not granted for lcid\n");
-				rwref_put(&ctx->ch_state_lhb2);
-				return -EAGAIN;
+				ret = -EAGAIN;
+				goto glink_tx_common_err_2;
 			}
 
 			/* wait for the rx_intent from remote side */
@@ -2919,13 +2958,12 @@
 				GLINK_ERR_CH(ctx,
 					"%s: Intent request with size: %zu not granted for lcid\n",
 					__func__, size);
-				rwref_put(&ctx->ch_state_lhb2);
-				return -ETIMEDOUT;
+				ret = -ETIMEDOUT;
+				goto glink_tx_common_err_2;
 			}
 
 			reinit_completion(&ctx->int_req_complete);
 			rwref_read_get(&ctx->ch_state_lhb2);
-			rwref_put(&ctx->ch_state_lhb2);
 		}
 	}
 
@@ -2945,8 +2983,8 @@
 	if (!tx_info) {
 		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
 		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
-		rwref_read_put(&ctx->ch_state_lhb2);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto glink_tx_common_err;
 	}
 	rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
 	INIT_LIST_HEAD(&tx_info->list_done);
@@ -2972,7 +3010,10 @@
 	else
 		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
 
+glink_tx_common_err:
 	rwref_read_put(&ctx->ch_state_lhb2);
+glink_tx_common_err_2:
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 
@@ -3013,13 +3054,15 @@
 	struct glink_core_rx_intent *intent_ptr;
 	int ret = 0;
 
-	if (!ctx)
-		return -EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 
 	if (!ch_is_fully_opened(ctx)) {
 		/* Can only queue rx intents if channel is fully opened */
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3028,13 +3071,16 @@
 		GLINK_ERR_CH(ctx,
 			"%s: Intent pointer allocation failed size[%zu]\n",
 			__func__, size);
+		glink_put_ch_ctx(ctx, false);
 		return -ENOMEM;
 	}
 	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
 			intent_ptr->intent_size);
 
-	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+		glink_put_ch_ctx(ctx, false);
 		return ret;
+	}
 
 	/* notify remote side of rx intent */
 	ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
@@ -3042,7 +3088,7 @@
 	if (ret)
 		/* unable to transmit, dequeue intent */
 		ch_remove_local_rx_intent(ctx, intent_ptr->id);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3061,20 +3107,25 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	struct glink_core_rx_intent *intent;
 	unsigned long flags;
+	int ret;
 
 	if (!ctx || !ch_is_fully_opened(ctx))
 		return false;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return false;
 	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
 	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
 		if (size <= intent->intent_size) {
 			spin_unlock_irqrestore(
 				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			glink_put_ch_ctx(ctx, false);
 			return true;
 		}
 	}
 	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
-
+	glink_put_ch_ctx(ctx, false);
 	return false;
 }
 EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3095,11 +3146,15 @@
 	uint32_t id;
 	int ret = 0;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
 
 	if (IS_ERR_OR_NULL(liid_ptr)) {
 		/* invalid pointer */
 		GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+		glink_put_ch_ctx(ctx, false);
 		return -EINVAL;
 	}
 
@@ -3125,7 +3180,7 @@
 	/* send rx done */
 	ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
 			ctx->lcid, id, reuse);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_rx_done);
@@ -3173,12 +3228,13 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3188,6 +3244,7 @@
 			ctx->lcid, ctx->lsigs);
 	GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
 
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_sigs_set);
@@ -3203,17 +3260,22 @@
 int glink_sigs_local_get(void *handle, uint32_t *sigs)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx || !sigs)
+	if (!sigs)
 		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->lsigs;
+	glink_put_ch_ctx(ctx, false);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3229,17 +3291,23 @@
 int glink_sigs_remote_get(void *handle, uint32_t *sigs)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx || !sigs)
+	if (!sigs)
 		return -EINVAL;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	*sigs = ctx->rsigs;
+	glink_put_ch_ctx(ctx, false);
 	return 0;
 }
 EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3334,12 +3402,16 @@
 	int ret;
 	unsigned long req_rate_kBps;
 
-	if (!ctx || !latency_us || !pkt_size)
+	if (!latency_us || !pkt_size)
 		return -EINVAL;
 
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3349,7 +3421,7 @@
 	if (ret < 0)
 		GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
 			     __func__, latency_us, pkt_size);
-
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_latency);
@@ -3367,16 +3439,18 @@
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
 	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
 	ret = glink_qos_reset_priority(ctx);
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_cancel);
@@ -3397,12 +3471,13 @@
 	int ret;
 	unsigned long flags;
 
-	if (!ctx)
-		return -EINVAL;
-
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return -EBUSY;
 	}
 
@@ -3411,6 +3486,7 @@
 	ret = glink_qos_add_ch_tx_intent(ctx);
 	spin_unlock(&ctx->tx_lists_lock_lhc3);
 	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	glink_put_ch_ctx(ctx, false);
 	return ret;
 }
 EXPORT_SYMBOL(glink_qos_start);
@@ -3429,16 +3505,20 @@
 unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx)
-		return (unsigned long)-EINVAL;
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return (unsigned long)ret;
 
 	if (!ch_is_fully_opened(ctx)) {
 		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
 			__func__);
+		glink_put_ch_ctx(ctx, false);
 		return (unsigned long)-EBUSY;
 	}
 
+	glink_put_ch_ctx(ctx, false);
 	return ctx->transport_ptr->ops->get_power_vote_ramp_time(
 			ctx->transport_ptr->ops,
 			glink_prio_to_power_state(ctx->transport_ptr,
@@ -3522,12 +3602,16 @@
 int glink_wait_link_down(void *handle)
 {
 	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
 
-	if (!ctx)
-		return -EINVAL;
-	if (!ctx->transport_ptr)
+	ret = glink_get_ch_ctx(ctx);
+	if (ret)
+		return ret;
+	if (!ctx->transport_ptr) {
+		glink_put_ch_ctx(ctx, false);
 		return -EOPNOTSUPP;
-
+	}
+	glink_put_ch_ctx(ctx, false);
 	return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
 }
 EXPORT_SYMBOL(glink_wait_link_down);
@@ -4030,6 +4114,37 @@
 	return xprt_ptr;
 }
 
+static struct channel_ctx *get_first_ch_ctx(
+	struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long flags;
+	struct channel_ctx *ctx;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	if (!list_empty(&xprt_ctx->channels)) {
+		ctx = list_first_entry(&xprt_ctx->channels,
+					struct channel_ctx, port_list_node);
+		rwref_get(&ctx->ch_state_lhb2);
+	} else {
+		ctx = NULL;
+	}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	return ctx;
+}
+
+static void glink_core_move_ch_node(struct glink_core_xprt_ctx *xprt_ptr,
+	struct glink_core_xprt_ctx *dummy_xprt_ctx, struct channel_ctx *ctx)
+{
+	unsigned long flags, d_flags;
+
+	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+	list_move_tail(&ctx->port_list_node, &dummy_xprt_ctx->channels);
+	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+}
+
 /**
  * glink_core_channel_cleanup() - cleanup all channels for the transport
  *
@@ -4040,7 +4155,7 @@
 static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
 {
 	unsigned long flags, d_flags;
-	struct channel_ctx *ctx, *tmp_ctx;
+	struct channel_ctx *ctx;
 	struct channel_lcid *temp_lcid, *temp_lcid1;
 	struct glink_core_xprt_ctx *dummy_xprt_ctx;
 
@@ -4049,29 +4164,18 @@
 		GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
 		return;
 	}
-
 	rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
 	rwref_read_get(&xprt_ptr->xprt_state_lhb0);
-	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
-	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
-
-	list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
-						port_list_node) {
+	ctx = get_first_ch_ctx(xprt_ptr);
+	while (ctx) {
 		rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
 		if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
 			ctx->local_open_state == GLINK_CHANNEL_OPENING) {
-			rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
-			list_move_tail(&ctx->port_list_node,
-					&dummy_xprt_ctx->channels);
 			ctx->transport_ptr = dummy_xprt_ctx;
 			rwref_write_put(&ctx->ch_state_lhb2);
+			glink_core_move_ch_node(xprt_ptr, dummy_xprt_ctx, ctx);
 		} else {
 			/* local state is in either CLOSED or CLOSING */
-			spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
-							flags);
-			spin_unlock_irqrestore(
-					&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
-					d_flags);
 			glink_core_remote_close_common(ctx, true);
 			if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
 				glink_core_ch_close_ack_common(ctx, true);
@@ -4079,22 +4183,21 @@
 			if (ch_is_fully_closed(ctx))
 				glink_delete_ch_from_list(ctx, false);
 			rwref_write_put(&ctx->ch_state_lhb2);
-			spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
-						d_flags);
-			spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
 		}
+		rwref_put(&ctx->ch_state_lhb2);
+		ctx = get_first_ch_ctx(xprt_ptr);
 	}
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
 	list_for_each_entry_safe(temp_lcid, temp_lcid1,
 			&xprt_ptr->free_lcid_list, list_node) {
 		list_del(&temp_lcid->list_node);
 		kfree(&temp_lcid->list_node);
 	}
-	dummy_xprt_ctx->dummy_in_use = false;
 	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
-	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
 	rwref_read_put(&xprt_ptr->xprt_state_lhb0);
 
 	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	dummy_xprt_ctx->dummy_in_use = false;
 	while (!list_empty(&dummy_xprt_ctx->channels)) {
 		ctx = list_first_entry(&dummy_xprt_ctx->channels,
 					struct channel_ctx, port_list_node);
@@ -5275,7 +5378,7 @@
 			struct glink_core_xprt_ctx *xprt_ctx)
 {
 	unsigned long flags;
-	struct glink_core_tx_pkt *tx_info;
+	struct glink_core_tx_pkt *tx_info, *temp_tx_info;
 	size_t txd_len = 0;
 	size_t tx_len = 0;
 	uint32_t num_pkts = 0;
@@ -5310,6 +5413,20 @@
 						ctx->lcid, tx_info);
 		}
 		spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+		if (!list_empty(&ctx->tx_active)) {
+			/*
+			 * Verify if same tx_info still exist in tx_active
+			 * list and is not removed during tx operation.
+			 * It can happen if SSR and tx done both happen
+			 * before tx_lists_lock_lhc3 is taken.
+			 */
+			temp_tx_info = list_first_entry(&ctx->tx_active,
+					struct glink_core_tx_pkt, list_node);
+			if (temp_tx_info != tx_info)
+				continue;
+		} else {
+			break;
+		}
 		if (ret == -EAGAIN) {
 			/*
 			 * transport unable to send at the moment and will call
@@ -5336,6 +5453,7 @@
 			 * Break out of the loop so that the scheduler can
 			 * continue with the next channel.
 			 */
+			rwref_put(&tx_info->pkt_ref);
 			break;
 		}
 
@@ -5343,8 +5461,8 @@
 		if (!tx_info->size_remaining) {
 			num_pkts++;
 			list_del_init(&tx_info->list_node);
-			rwref_put(&tx_info->pkt_ref);
 		}
+		rwref_put(&tx_info->pkt_ref);
 	}
 
 	ctx->txd_len += txd_len;
@@ -5369,7 +5487,7 @@
 {
 	struct channel_ctx *ch_ptr;
 	uint32_t prio;
-	uint32_t tx_ready_head_prio;
+	uint32_t tx_ready_head_prio = 0;
 	int ret;
 	struct channel_ctx *tx_ready_head = NULL;
 	bool transmitted_successfully = true;
@@ -5393,6 +5511,7 @@
 		glink_pm_qos_vote(xprt_ptr);
 		ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
 				struct channel_ctx, tx_ready_list_node);
+		rwref_get(&ch_ptr->ch_state_lhb2);
 		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
 
 		if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
@@ -5404,6 +5523,7 @@
 			GLINK_ERR_XPRT(xprt_ptr,
 				"%s: Unable to send data on this transport.\n",
 				__func__);
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			break;
 		}
 		transmitted_successfully = false;
@@ -5414,6 +5534,7 @@
 			 * transport unable to send at the moment and will call
 			 * tx_resume() when it can send again.
 			 */
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			break;
 		} else if (ret < 0) {
 			/*
@@ -5426,6 +5547,7 @@
 			GLINK_ERR_XPRT(xprt_ptr,
 					"%s: unrecoverable xprt failure %d\n",
 					__func__, ret);
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			break;
 		} else if (!ret) {
 			/*
@@ -5437,6 +5559,7 @@
 			list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
 			spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3,
 						flags);
+			rwref_put(&ch_ptr->ch_state_lhb2);
 			continue;
 		}
 
@@ -5454,6 +5577,7 @@
 
 		tx_ready_head = NULL;
 		transmitted_successfully = true;
+		rwref_put(&ch_ptr->ch_state_lhb2);
 	}
 	glink_pm_qos_unvote(xprt_ptr);
 	GLINK_PERF("%s: worker exiting\n", __func__);
@@ -6024,6 +6148,7 @@
 static int glink_init(void)
 {
 	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+	rwlock_init(&magic_lock);
 	if (!log_ctx)
 		GLINK_ERR("%s: unable to create log context\n", __func__);
 	glink_debugfs_init();
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index 6d04b61..f36e7fc 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -770,7 +770,7 @@
 	struct device_node *phandle_node;
 	struct restart_notifier_block *nb;
 	struct subsys_info *ss_info;
-	struct subsys_info_leaf *ss_info_leaf;
+	struct subsys_info_leaf *ss_info_leaf = NULL;
 	struct glink_link_info *link_info;
 	char *key;
 	const char *edge;
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index dd3e190..501b902 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -71,7 +71,6 @@
 /* Get the slice entry by index */
 static struct llcc_slice_desc *llcc_slice_get_entry(struct device *dev, int n)
 {
-	int id;
 	struct of_phandle_args phargs;
 	struct llcc_drv_data *drv;
 	const struct llcc_slice_config *llcc_data_ptr;
@@ -105,7 +104,7 @@
 	}
 
 	if (llcc_data_ptr == NULL) {
-		pr_err("can't find %d usecase id\n", id);
+		pr_err("can't find %d usecase id\n", phargs.args[0]);
 		return ERR_PTR(-ENODEV);
 	}
 
diff --git a/drivers/soc/qcom/msm-core.c b/drivers/soc/qcom/msm-core.c
index fa3ba1d..5a0b261 100644
--- a/drivers/soc/qcom/msm-core.c
+++ b/drivers/soc/qcom/msm-core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -52,7 +52,7 @@
 #define NUM_OF_CORNERS 10
 #define DEFAULT_SCALING_FACTOR 1
 
-#define ALLOCATE_2D_ARRAY(type) (\
+#define ALLOCATE_2D_ARRAY(type) \
 static type **allocate_2d_array_##type(int idx)\
 {\
 	int i;\
@@ -77,15 +77,13 @@
 		kfree(ptr[i]);\
 	kfree(ptr);\
 	return ERR_PTR(-ENOMEM);\
-})
+}
 
 struct cpu_activity_info {
 	int cpu;
 	int mpidr;
 	long temp;
 	int sensor_id;
-	struct sensor_threshold hi_threshold;
-	struct sensor_threshold low_threshold;
 	struct cpu_static_info *sp;
 };
 
@@ -123,79 +121,11 @@
 static int max_throttling_temp = 80; /* in C */
 module_param_named(throttling_temp, max_throttling_temp, int, 0664);
 
-/*
- * Cannot be called from an interrupt context
- */
-static void set_and_activate_threshold(uint32_t sensor_id,
-	struct sensor_threshold *threshold)
-{
-	if (sensor_set_trip(sensor_id, threshold)) {
-		pr_err("%s: Error in setting trip %d\n",
-			KBUILD_MODNAME, threshold->trip);
-		return;
-	}
-
-	if (sensor_activate_trip(sensor_id, threshold, true)) {
-		sensor_cancel_trip(sensor_id, threshold);
-		pr_err("%s: Error in enabling trip %d\n",
-			KBUILD_MODNAME, threshold->trip);
-		return;
-	}
-}
-
-static void set_threshold(struct cpu_activity_info *cpu_node)
-{
-	if (cpu_node->sensor_id < 0)
-		return;
-
-	/*
-	 * Before operating on the threshold structure which is used by
-	 * thermal core ensure that the sensor is disabled to prevent
-	 * incorrect operations on the sensor list maintained by thermal code.
-	 */
-	sensor_activate_trip(cpu_node->sensor_id,
-			&cpu_node->hi_threshold, false);
-	sensor_activate_trip(cpu_node->sensor_id,
-			&cpu_node->low_threshold, false);
-
-	cpu_node->hi_threshold.temp = (cpu_node->temp + high_hyst_temp) *
-					scaling_factor;
-	cpu_node->low_threshold.temp = (cpu_node->temp - low_hyst_temp) *
-					scaling_factor;
-
-	/*
-	 * Set the threshold only if we are below the hotplug limit
-	 * Adding more work at this high temperature range, seems to
-	 * fail hotplug notifications.
-	 */
-	if (cpu_node->hi_threshold.temp < (CPU_HOTPLUG_LIMIT * scaling_factor))
-		set_and_activate_threshold(cpu_node->sensor_id,
-			&cpu_node->hi_threshold);
-
-	set_and_activate_threshold(cpu_node->sensor_id,
-		&cpu_node->low_threshold);
-}
-
 static void samplequeue_handle(struct work_struct *work)
 {
 	complete(&sampling_completion);
 }
 
-/* May be called from an interrupt context */
-static void core_temp_notify(enum thermal_trip_type type,
-		int temp, void *data)
-{
-	struct cpu_activity_info *cpu_node =
-		(struct cpu_activity_info *) data;
-
-	trace_temp_notification(cpu_node->sensor_id,
-		type, temp, cpu_node->temp);
-
-	cpu_node->temp = temp / scaling_factor;
-
-	complete(&sampling_completion);
-}
-
 static void repopulate_stats(int cpu)
 {
 	int i;
@@ -226,7 +156,6 @@
 	int cpu;
 	static long prev_temp[NR_CPUS];
 	struct cpu_activity_info *cpu_node;
-	int temp;
 
 	if (disabled)
 		return;
@@ -238,11 +167,6 @@
 		if (cpu_node->sensor_id < 0)
 			continue;
 
-		if (cpu_node->temp == prev_temp[cpu]) {
-			sensor_get_temp(cpu_node->sensor_id, &temp);
-			cpu_node->temp = temp / scaling_factor;
-		}
-
 		prev_temp[cpu] = cpu_node->temp;
 
 		/*
@@ -276,7 +200,7 @@
 	int cpu, num_of_freqs;
 	struct cpufreq_frequency_table *table;
 
-	table = cpufreq_frequency_get_table(policy->cpu);
+	table = policy->freq_table;
 	if (!table) {
 		pr_err("Couldn't get freq table for cpu%d\n",
 				policy->cpu);
@@ -319,12 +243,6 @@
 			cpu_node = &activity[cpu];
 			if (prev_temp[cpu] != cpu_node->temp) {
 				prev_temp[cpu] = cpu_node->temp;
-				set_threshold(cpu_node);
-				trace_temp_threshold(cpu, cpu_node->temp,
-					cpu_node->hi_threshold.temp /
-					scaling_factor,
-					cpu_node->low_threshold.temp /
-					scaling_factor);
 			}
 		}
 		if (!poll_ms)
@@ -551,16 +469,6 @@
 	return 0;
 }
 
-static inline void init_sens_threshold(struct sensor_threshold *threshold,
-		enum thermal_trip_type trip, long temp,
-		void *data)
-{
-	threshold->trip = trip;
-	threshold->temp = temp;
-	threshold->data = data;
-	threshold->notify = (void *)core_temp_notify;
-}
-
 static int msm_core_stats_init(struct device *dev, int cpu)
 {
 	int i;
@@ -702,7 +610,6 @@
 	struct device_node *phandle;
 	const char *sensor_type = NULL;
 	struct cpu_activity_info *cpu_node = &activity[cpu];
-	int temp;
 
 	if (!node)
 		return -ENODEV;
@@ -730,7 +637,6 @@
 		return ret;
 	}
 
-	cpu_node->sensor_id = sensor_get_id((char *)sensor_type);
 	if (cpu_node->sensor_id < 0)
 		return cpu_node->sensor_id;
 
@@ -742,21 +648,6 @@
 		scaling_factor = DEFAULT_SCALING_FACTOR;
 	}
 
-	ret = sensor_get_temp(cpu_node->sensor_id, &temp);
-	if (ret)
-		return ret;
-
-	cpu_node->temp = temp / scaling_factor;
-
-	init_sens_threshold(&cpu_node->hi_threshold,
-			THERMAL_TRIP_CONFIGURABLE_HI,
-			(cpu_node->temp + high_hyst_temp) * scaling_factor,
-			(void *)cpu_node);
-	init_sens_threshold(&cpu_node->low_threshold,
-			THERMAL_TRIP_CONFIGURABLE_LOW,
-			(cpu_node->temp - low_hyst_temp) * scaling_factor,
-			(void *)cpu_node);
-
 	return ret;
 }
 
@@ -846,11 +737,6 @@
 		for_each_possible_cpu(cpu) {
 			if (activity[cpu].sensor_id < 0)
 				continue;
-
-			sensor_activate_trip(activity[cpu].sensor_id,
-				&activity[cpu].hi_threshold, false);
-			sensor_activate_trip(activity[cpu].sensor_id,
-				&activity[cpu].low_threshold, false);
 		}
 		break;
 	default:
@@ -1020,7 +906,6 @@
 	int ret = 0;
 	char *key = NULL;
 	struct device_node *node;
-	int cpu;
 	struct uio_info *info;
 
 	if (!pdev)
@@ -1071,9 +956,6 @@
 	if (ret)
 		goto failed;
 
-	for_each_possible_cpu(cpu)
-		set_threshold(&activity[cpu]);
-
 	INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle);
 	schedule_delayed_work(&sampling_work, msecs_to_jiffies(0));
 	cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER);
@@ -1096,11 +978,6 @@
 	for_each_possible_cpu(cpu) {
 		if (activity[cpu].sensor_id < 0)
 			continue;
-
-		sensor_cancel_trip(activity[cpu].sensor_id,
-				&activity[cpu].hi_threshold);
-		sensor_cancel_trip(activity[cpu].sensor_id,
-				&activity[cpu].low_threshold);
 	}
 	free_dyn_memory();
 	misc_deregister(&msm_core_device);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
index 8c6deb1..c977d1b 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -572,7 +572,7 @@
 		bcm_dev->lnode_list[lnode_idx].lnode_query_ab[ctx] =
 			msm_bus_div64(cur_dev->node_bw[ctx].sum_query_ab *
 					(uint64_t)bcm_dev->bcmdev->width,
-				cur_dev->node_info->agg_params.num_aggports,
+				cur_dev->node_info->agg_params.num_aggports *
 				cur_dev->node_info->agg_params.buswidth);
 
 		for (i = 0; i < bcm_dev->num_lnodes; i++) {
@@ -1298,7 +1298,7 @@
 					struct msm_bus_tcs_usecase *tcs_usecase)
 {
 	int lnode, src, dest, cur_idx;
-	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw;
 	int i, ret = 0;
 	struct msm_bus_scale_pdata *pdata;
 	struct device *src_dev;
@@ -1339,8 +1339,8 @@
 					curr_bw, curr_clk);
 		}
 
-		ret = query_path(src_dev, dest, req_clk, req_bw, slp_clk,
-			slp_bw, curr_clk, curr_bw, lnode);
+		ret = query_path(src_dev, dest, req_clk, req_bw, 0,
+			0, curr_clk, curr_bw, lnode);
 
 		if (ret) {
 			MSM_BUS_ERR("%s: Query path failed! %d ctx %d\n",
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
index 7a0fbc5..4911cf2 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_core.h
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -288,7 +288,7 @@
 	struct device **src_devs;
 };
 
-uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
+uint64_t msm_bus_div64(uint64_t num, unsigned int base);
 int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
 void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
 struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
index 5b5159d..03042fa 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_rules.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -588,7 +588,7 @@
 static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
 					struct notifier_block *nb)
 {
-	int i;
+	int i = 0;
 	struct rule_node_info *node = NULL;
 	struct rule_node_info *node_tmp = NULL;
 	struct rules_def *node_rule;
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index 5b1e9de..c2fb37b 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1157,7 +1157,7 @@
 static void smem_init_security_partition(struct smem_toc_entry *entry,
 								uint32_t num)
 {
-	uint16_t remote_host;
+	uint16_t remote_host = 0;
 	struct smem_partition_header *hdr;
 	bool is_comm_partition = false;
 
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 03a6204..11e1b4d 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -928,7 +928,8 @@
 					desc->attrs);
 			priv->region = NULL;
 		}
-		pil_clear_segment(desc);
+		if (desc->clear_fw_region && priv->region_start)
+			pil_clear_segment(desc);
 		pil_release_mmap(desc);
 	}
 	return ret;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 752a6ce..af7249b 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@
  * This defaults to iounmap if not specified.
  * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
  * @modem_ssr: true if modem is restarting, false if booting for first time.
+ * @clear_fw_region: Clear fw region on failure in loading.
  * @subsys_vmid: memprot id for the subsystem.
  */
 struct pil_desc {
@@ -54,6 +55,7 @@
 	void *map_data;
 	bool shutdown_fail;
 	bool modem_ssr;
+	bool clear_fw_region;
 	u32 subsys_vmid;
 };
 
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index ffe72e6..fb3d7d9 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -320,6 +320,7 @@
 	struct modem_data *drv = dev_get_drvdata(pil->dev);
 	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
 	int ret = 0;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	s32 status;
 	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
 
@@ -349,7 +350,7 @@
 		if (pil->subsys_vmid > 0)
 			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
 						drv->q6->mba_dp_size);
-		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
 				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
 				drv->attrs_dma);
 		drv->q6->mba_dp_virt = NULL;
@@ -542,6 +543,7 @@
 	dma_addr_t mba_dp_phys, mba_dp_phys_end;
 	int ret, count;
 	const u8 *data;
+	struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
 
 	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
 	ret = request_firmware(&fw, fw_name_p, pil->dev);
@@ -560,11 +562,12 @@
 
 	drv->mba_dp_size = SZ_1M;
 
-	arch_setup_dma_ops(&md->mba_mem_dev, 0, 0, NULL, 0);
+	arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
 
-	md->mba_mem_dev.coherent_dma_mask =
-		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
 	md->attrs_dma = 0;
+	md->attrs_dma |= DMA_ATTR_SKIP_ZEROING;
 	md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
 
 	ret = request_firmware(&dp_fw, dp_name, pil->dev);
@@ -581,10 +584,11 @@
 		drv->mba_dp_size += drv->dp_size;
 	}
 
-	mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
-			&mba_dp_phys, GFP_KERNEL, md->attrs_dma);
+	mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+				   GFP_KERNEL, md->attrs_dma);
 	if (!mba_dp_virt) {
-		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
+				 __func__, drv->mba_dp_size);
 		ret = -ENOMEM;
 		goto err_invalid_fw;
 	}
@@ -640,7 +644,7 @@
 		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
 							drv->mba_dp_size);
 err_mba_data:
-	dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+	dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
 				drv->mba_dp_phys, md->attrs_dma);
 err_invalid_fw:
 	if (dp_fw)
@@ -659,13 +663,14 @@
 	s32 status;
 	int ret;
 	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	unsigned long attrs = 0;
 
-	drv->mba_mem_dev.coherent_dma_mask =
-		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_SKIP_ZEROING;
 	attrs |= DMA_ATTR_STRONGLY_ORDERED;
 	/* Make metadata physically contiguous and 4K aligned. */
-	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
+	mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys,
 					GFP_KERNEL, attrs);
 	if (!mdata_virt) {
 		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
@@ -682,8 +687,8 @@
 		if (ret) {
 			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
 									ret);
-			dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
-							mdata_phys, attrs);
+			dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+									attrs);
 			goto fail;
 		}
 	}
@@ -709,7 +714,7 @@
 	if (pil->subsys_vmid > 0)
 		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
 
-	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, attrs);
+	dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, attrs);
 
 	if (!ret)
 		return ret;
@@ -721,7 +726,7 @@
 		if (pil->subsys_vmid > 0)
 			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
 						drv->q6->mba_dp_size);
-		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+		dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
 				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
 				drv->attrs_dma);
 		drv->q6->mba_dp_virt = NULL;
@@ -773,6 +778,7 @@
 	struct modem_data *drv = dev_get_drvdata(pil->dev);
 	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
 	int ret;
+	struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
 	s32 status;
 	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
 
@@ -794,9 +800,9 @@
 				pil_assign_mem_to_linux(pil,
 					drv->q6->mba_dp_phys,
 					drv->q6->mba_dp_size);
-			dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
-					drv->q6->mba_dp_virt,
-					drv->q6->mba_dp_phys, drv->attrs_dma);
+			dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
 
 			drv->q6->mba_dp_virt = NULL;
 		}
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index 3af6368..1789ba3 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@
 	struct clk *xo;
 	struct pil_desc desc;
 	struct device mba_mem_dev;
+	struct device *mba_mem_dev_fixed;
 	unsigned long attrs_dma;
 };
 
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index ec0187a..2cbbe2e 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/ioport.h>
@@ -379,6 +380,11 @@
 	}
 	init_completion(&drv->stop_ack);
 
+	/* Probe the MBA mem device if present */
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		return ret;
+
 	return pil_subsys_init(drv, pdev);
 }
 
@@ -392,6 +398,33 @@
 	return 0;
 }
 
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+
+	if (!pdev->dev.parent) {
+		pr_err("No parent found.\n");
+		return -EINVAL;
+	}
+	drv = dev_get_drvdata(pdev->dev.parent);
+	drv->mba_mem_dev_fixed = &pdev->dev;
+	return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+	{ .compatible = "qcom,pil-mba-mem" },
+	{}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+	.probe = pil_mba_mem_driver_probe,
+	.driver = {
+		.name = "pil-mba-mem",
+		.of_match_table = mba_mem_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
 static const struct of_device_id mss_match_table[] = {
 	{ .compatible = "qcom,pil-q6v5-mss" },
 	{ .compatible = "qcom,pil-q6v55-mss" },
@@ -411,7 +444,12 @@
 
 static int __init pil_mss_init(void)
 {
-	return platform_driver_register(&pil_mss_driver);
+	int ret;
+
+	ret = platform_driver_register(&pil_mba_mem_driver);
+	if (!ret)
+		ret = platform_driver_register(&pil_mss_driver);
+	return ret;
 }
 module_init(pil_mss_init);
 
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index fb4d0ea..d9d6c72 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -637,6 +637,7 @@
 	if (ret)
 		return ERR_PTR(ret);
 
+	desc->clear_fw_region = false;
 	desc->dev = &pdev->dev;
 
 	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/qdsp6v2/adsp-loader.c b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
index 1bde1bf..d90267e 100644
--- a/drivers/soc/qcom/qdsp6v2/adsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
 #include <linux/qdsp6v2/apr.h>
 #include <linux/of_device.h>
 #include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
 #include <soc/qcom/subsystem_restart.h>
 
 #define Q6_PIL_GET_DELAY_MS 100
@@ -44,12 +46,13 @@
 	NULL,
 };
 
+static struct work_struct adsp_ldr_work;
 static struct platform_device *adsp_private;
 static void adsp_loader_unload(struct platform_device *pdev);
 
-static void adsp_loader_do(struct platform_device *pdev)
+static void adsp_load_fw(struct work_struct *adsp_ldr_work)
 {
-
+	struct platform_device *pdev = adsp_private;
 	struct adsp_loader_private *priv = NULL;
 
 	const char *adsp_dt = "qcom,adsp-state";
@@ -146,6 +149,10 @@
 	dev_err(&pdev->dev, "%s: Q6 image loading failed\n", __func__);
 }
 
+static void adsp_loader_do(struct platform_device *pdev)
+{
+	schedule_work(&adsp_ldr_work);
+}
 
 static ssize_t adsp_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
@@ -272,6 +279,8 @@
 		return ret;
 	}
 
+	INIT_WORK(&adsp_ldr_work, adsp_load_fw);
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
index 9bb4eb0..70977d3 100644
--- a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
@@ -19,6 +19,8 @@
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
 #include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
 #include <soc/qcom/subsystem_restart.h>
 
 #define BOOT_CMD 1
@@ -47,10 +49,12 @@
 
 static u32 cdsp_state = CDSP_SUBSYS_DOWN;
 static struct platform_device *cdsp_private;
+static struct work_struct cdsp_ldr_work;
 static void cdsp_loader_unload(struct platform_device *pdev);
 
-static int cdsp_loader_do(struct platform_device *pdev)
+static void cdsp_load_fw(struct work_struct *cdsp_ldr_work)
 {
+	struct platform_device *pdev = cdsp_private;
 	struct cdsp_loader_private *priv = NULL;
 
 	int rc = 0;
@@ -99,14 +103,17 @@
 		}
 
 		dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
-		return rc;
+		return;
 	}
 
 fail:
 	dev_err(&pdev->dev, "%s: CDSP image loading failed\n", __func__);
-	return rc;
 }
 
+static void cdsp_loader_do(struct platform_device *pdev)
+{
+	schedule_work(&cdsp_ldr_work);
+}
 
 static ssize_t cdsp_boot_store(struct kobject *kobj,
 	struct kobj_attribute *attr,
@@ -124,7 +131,7 @@
 		pr_debug("%s: going to call cdsp_loader_do\n", __func__);
 		cdsp_loader_do(cdsp_private);
 	} else if (boot == IMAGE_UNLOAD_CMD) {
-		pr_debug("%s: going to call adsp_unloader\n", __func__);
+		pr_debug("%s: going to call cdsp_unloader\n", __func__);
 		cdsp_loader_unload(cdsp_private);
 	}
 	return count;
@@ -236,6 +243,8 @@
 		return ret;
 	}
 
+	INIT_WORK(&cdsp_ldr_work, cdsp_load_fw);
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index ba65e68..b2627f2 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -261,7 +261,7 @@
 			int *dest_vmids, int *dest_perms,
 			int dest_nelems)
 {
-	int ret;
+	int ret = 0;
 	struct scm_desc desc = {0};
 	u32 *source_vm_copy;
 	size_t source_vm_copy_size;
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 2f578c5..10caf22 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -111,6 +111,7 @@
 			qmi_handle_create(service_locator_clnt_notify, NULL);
 	if (!service_locator.clnt_handle) {
 		service_locator.clnt_handle = NULL;
+		complete_all(&service_locator.service_available);
 		mutex_unlock(&service_locator.service_mutex);
 		pr_err("Service locator QMI client handle alloc failed!\n");
 		return;
@@ -123,6 +124,7 @@
 	if (rc) {
 		qmi_handle_destroy(service_locator.clnt_handle);
 		service_locator.clnt_handle = NULL;
+		complete_all(&service_locator.service_available);
 		mutex_unlock(&service_locator.service_mutex);
 		pr_err("Unable to connnect to service rc:%d\n", rc);
 		return;
@@ -138,6 +140,7 @@
 	mutex_lock(&service_locator.service_mutex);
 	qmi_handle_destroy(service_locator.clnt_handle);
 	service_locator.clnt_handle = NULL;
+	complete_all(&service_locator.service_available);
 	mutex_unlock(&service_locator.service_mutex);
 	pr_info("Connection with service locator lost\n");
 }
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index bcd00b4..fca1c68 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -99,6 +99,7 @@
  */
 struct qmi_client_info {
 	int instance_id;
+	enum pd_subsys_state subsys_state;
 	struct work_struct svc_arrive;
 	struct work_struct svc_exit;
 	struct work_struct svc_rcv_msg;
@@ -436,7 +437,7 @@
 {
 	struct qmi_client_info *data = container_of(work,
 					struct qmi_client_info, svc_exit);
-	root_service_service_exit(data, ROOT_PD_DOWN);
+	root_service_service_exit(data, data->subsys_state);
 }
 
 static int service_event_notify(struct notifier_block *this,
@@ -453,6 +454,7 @@
 		break;
 	case QMI_SERVER_EXIT:
 		pr_debug("Root PD service DOWN\n");
+		data->subsys_state = ROOT_PD_DOWN;
 		queue_work(data->svc_event_wq, &data->svc_exit);
 		break;
 	default:
@@ -468,7 +470,6 @@
 	struct qmi_client_info *info = container_of(this,
 					struct qmi_client_info, ssr_notifier);
 	struct notif_data *notif = data;
-	enum pd_subsys_state state;
 
 	switch (code) {
 	case	SUBSYS_BEFORE_SHUTDOWN:
@@ -476,16 +477,16 @@
 						notif->crashed);
 		switch (notif->crashed) {
 		case CRASH_STATUS_ERR_FATAL:
-			state = ROOT_PD_ERR_FATAL;
+			info->subsys_state = ROOT_PD_ERR_FATAL;
 			break;
 		case CRASH_STATUS_WDOG_BITE:
-			state = ROOT_PD_WDOG_BITE;
+			info->subsys_state = ROOT_PD_WDOG_BITE;
 			break;
 		default:
-			state = ROOT_PD_SHUTDOWN;
+			info->subsys_state = ROOT_PD_SHUTDOWN;
 			break;
 		}
-		root_service_service_exit(info, state);
+		queue_work(info->svc_event_wq, &info->svc_exit);
 		break;
 	default:
 		break;
@@ -635,7 +636,13 @@
 		return rc;
 	}
 
-	/* Check the response */
+	/* Check response if PDR is disabled */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) == QMI_ERR_DISABLED_V01) {
+		pr_err("PD restart is disabled 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		return -EOPNOTSUPP;
+	}
+	/* Check the response for other error case*/
 	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
 		pr_err("QMI request for PD restart failed 0x%x\n",
 					QMI_RESP_BIT_SHIFT(resp.resp.error));
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 0063ae1..982dfae 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1035,6 +1035,7 @@
 	d->desc.ops = &pil_ops_trusted;
 
 	d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+	d->desc.clear_fw_region = true;
 
 	rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
 					&proxy_timeout);
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index a087ad6..f4c7779 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -153,10 +153,12 @@
 	struct sysmon_qmi_data *data = container_of(work,
 					struct sysmon_qmi_data, svc_arrive);
 
+	mutex_lock(&sysmon_lock);
 	/* Create a Local client port for QMI communication */
 	data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
 	if (!data->clnt_handle) {
 		pr_err("QMI client handle alloc failed for %s\n", data->name);
+		mutex_unlock(&sysmon_lock);
 		return;
 	}
 
@@ -167,6 +169,7 @@
 								data->name);
 		qmi_handle_destroy(data->clnt_handle);
 		data->clnt_handle = NULL;
+		mutex_unlock(&sysmon_lock);
 		return;
 	}
 	pr_info("Connection established between QMI handle and %s's SSCTL service\n"
@@ -177,6 +180,7 @@
 	if (rc < 0)
 		pr_warn("%s: Could not register the indication callback\n",
 								data->name);
+	mutex_unlock(&sysmon_lock);
 }
 
 static void sysmon_clnt_svc_exit(struct work_struct *work)
@@ -184,8 +188,10 @@
 	struct sysmon_qmi_data *data = container_of(work,
 					struct sysmon_qmi_data, svc_exit);
 
+	mutex_lock(&sysmon_lock);
 	qmi_handle_destroy(data->clnt_handle);
 	data->clnt_handle = NULL;
+	mutex_unlock(&sysmon_lock);
 }
 
 static void sysmon_clnt_recv_msg(struct work_struct *work)
diff --git a/drivers/soundwire/swr-wcd-ctrl.c b/drivers/soundwire/swr-wcd-ctrl.c
index b9984f2..ea886c7 100644
--- a/drivers/soundwire/swr-wcd-ctrl.c
+++ b/drivers/soundwire/swr-wcd-ctrl.c
@@ -507,7 +507,7 @@
 {
 	struct swr_mstr_ctrl *swrm = swr_get_ctrl_data(master);
 	int ret = 0;
-	int val;
+	int val = 0;
 	u8 *reg_val = (u8 *)buf;
 
 	if (!swrm) {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b799547..fc96f62 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -533,6 +533,18 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called spi_qup.
 
+config SPI_QCOM_GENI
+	tristate "Qualcomm Technologies Inc.'s GENI based SPI controller"
+	depends on ARCH_QCOM
+	help
+	  SPI driver for Qualcomm Technologies Inc's GENI based controller.
+	  The controller can run upto 50 Mhz, support upto 4 CS lines,
+	  programmable bits per word from 4 to 32 and supports the various
+	  SPI modes. It can operate in FIFO mode (SW driven IO) and DMA mode.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called spi-geni-qcom.
+
 config SPI_S3C24XX
 	tristate "Samsung S3C24XX series SPI"
 	depends on ARCH_S3C24XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index aa939d9..9d72f37 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -73,6 +73,7 @@
 obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-platform.o
 obj-$(CONFIG_SPI_PXA2XX_PCI)		+= spi-pxa2xx-pci.o
 obj-$(CONFIG_SPI_QUP)			+= spi-qup.o
+obj-$(CONFIG_SPI_QCOM_GENI)		+= spi-geni-qcom.o
 obj-$(CONFIG_SPI_ROCKCHIP)		+= spi-rockchip.o
 obj-$(CONFIG_SPI_RB4XX)			+= spi-rb4xx.o
 obj-$(CONFIG_SPI_RSPI)			+= spi-rspi.o
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
new file mode 100644
index 0000000..4c86197
--- /dev/null
+++ b/drivers/spi/spi-geni-qcom.c
@@ -0,0 +1,687 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/spi/spi.h>
+
+#define SPI_NUM_CHIPSELECT	(4)
+#define SPI_XFER_TIMEOUT_MS	(250)
+#define SPI_OVERSAMPLING	(2)
+/* SPI SE specific registers */
+#define SE_SPI_CPHA		(0x224)
+#define SE_SPI_LOOPBACK		(0x22C)
+#define SE_SPI_CPOL		(0x230)
+#define SE_SPI_DEMUX_OUTPUT_INV	(0x24C)
+#define SE_SPI_DEMUX_SEL	(0x250)
+#define SE_SPI_TRANS_CFG	(0x25C)
+#define SE_SPI_WORD_LEN		(0x268)
+#define SE_SPI_TX_TRANS_LEN	(0x26C)
+#define SE_SPI_RX_TRANS_LEN	(0x270)
+#define SE_SPI_PRE_POST_CMD_DLY	(0x274)
+#define SE_SPI_DELAY_COUNTERS	(0x278)
+
+/* SE_SPI_CPHA register fields */
+#define CPHA			(BIT(0))
+
+/* SE_SPI_LOOPBACK register fields */
+#define LOOPBACK_ENABLE		(0x1)
+#define NORMAL_MODE		(0x0)
+#define LOOPBACK_MSK		(GENMASK(1, 0))
+
+/* SE_SPI_CPOL register fields */
+#define CPOL			(BIT(2))
+
+/* SE_SPI_DEMUX_OUTPUT_INV register fields */
+#define CS_DEMUX_OUTPUT_INV_MSK	(GENMASK(3, 0))
+
+/* SE_SPI_DEMUX_SEL register fields */
+#define CS_DEMUX_OUTPUT_SEL	(GENMASK(3, 0))
+
+/* SE_SPI_TX_TRANS_CFG register fields */
+#define CS_TOGGLE		(BIT(0))
+
+/* SE_SPI_WORD_LEN register fields */
+#define WORD_LEN_MSK		(GENMASK(9, 0))
+#define MIN_WORD_LEN		(4)
+
+/* SPI_TX/SPI_RX_TRANS_LEN fields */
+#define TRANS_LEN_MSK		(GENMASK(23, 0))
+
+/* M_CMD OP codes for SPI */
+#define SPI_TX_ONLY		(1)
+#define SPI_RX_ONLY		(2)
+#define SPI_FULL_DUPLEX		(3)
+#define SPI_TX_RX		(7)
+#define SPI_CS_ASSERT		(8)
+#define SPI_CS_DEASSERT		(9)
+#define SPI_SCK_ONLY		(10)
+/* M_CMD params for SPI */
+#define SPI_PRE_CMD_DELAY	(0)
+#define TIMESTAMP_BEFORE	(1)
+#define FRAGMENTATION		(2)
+#define TIMESTAMP_AFTER		(3)
+#define POST_CMD_DELAY		(4)
+
+struct spi_geni_master {
+	struct se_geni_rsc spi_rsc;
+	resource_size_t phys_addr;
+	resource_size_t size;
+	void __iomem *base;
+	int irq;
+	struct device *dev;
+	int rx_fifo_depth;
+	int tx_fifo_depth;
+	int tx_fifo_width;
+	int tx_wm;
+	bool setup;
+	u32 cur_speed_hz;
+	int cur_word_len;
+	unsigned int tx_rem_bytes;
+	unsigned int rx_rem_bytes;
+	struct spi_transfer *cur_xfer;
+	struct completion xfer_done;
+};
+
+static struct spi_master *get_spi_master(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct spi_master *spi = platform_get_drvdata(pdev);
+
+	return spi;
+}
+
+static int get_sclk(u32 speed_hz, unsigned long *sclk_freq)
+{
+	u32 root_freq[] = { 19200000 };
+
+	*sclk_freq = root_freq[0];
+	return 0;
+}
+
+static int do_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas)
+{
+	unsigned long sclk_freq;
+	int div = 0;
+	int idx;
+	struct se_geni_rsc *rsc = &mas->spi_rsc;
+	int ret = 0;
+	u32 clk_sel = geni_read_reg(mas->base, SE_GENI_CLK_SEL);
+	u32 m_clk_cfg = geni_read_reg(mas->base, GENI_SER_M_CLK_CFG);
+
+	clk_sel &= ~CLK_SEL_MSK;
+	m_clk_cfg &= ~CLK_DIV_MSK;
+
+	idx = get_sclk(speed_hz, &sclk_freq);
+	if (idx < 0) {
+		ret = -EINVAL;
+		goto spi_clk_cfg_exit;
+	}
+	div = (sclk_freq / (SPI_OVERSAMPLING / speed_hz));
+
+	clk_sel |= (idx & CLK_SEL_MSK);
+	m_clk_cfg |= ((div << CLK_DIV_SHFT) | SER_CLK_EN);
+	ret = clk_set_rate(rsc->se_clk, sclk_freq);
+	if (ret)
+		goto spi_clk_cfg_exit;
+
+	geni_write_reg(clk_sel, mas->base, SE_GENI_CLK_SEL);
+	geni_write_reg(m_clk_cfg, mas->base, GENI_SER_M_CLK_CFG);
+spi_clk_cfg_exit:
+	return ret;
+}
+
+static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
+						int bits_per_word)
+{
+	int pack_words = mas->tx_fifo_width / bits_per_word;
+	bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
+	u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
+
+	word_len &= ~WORD_LEN_MSK;
+	word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
+	se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
+	geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
+}
+
+static int spi_geni_prepare_message(struct spi_master *spi_mas,
+					struct spi_message *spi_msg)
+{
+	struct spi_device *spi_slv = spi_msg->spi;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+	u16 mode = spi_slv->mode;
+	u32 loopback_cfg = geni_read_reg(mas->base, SE_SPI_LOOPBACK);
+	u32 cpol = geni_read_reg(mas->base, SE_SPI_CPOL);
+	u32 cpha = geni_read_reg(mas->base, SE_SPI_CPHA);
+	u32 demux_sel = geni_read_reg(mas->base, SE_SPI_DEMUX_SEL);
+	u32 demux_output_inv =
+			geni_read_reg(mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	int ret = 0;
+
+	loopback_cfg &= ~LOOPBACK_MSK;
+	cpol &= ~CPOL;
+	cpha &= ~CPHA;
+	demux_output_inv &= ~BIT(spi_slv->chip_select);
+
+	if (mode & SPI_LOOP)
+		loopback_cfg |= LOOPBACK_ENABLE;
+
+	if (mode & SPI_CPOL)
+		cpol |= CPOL;
+
+	if (mode & SPI_CPHA)
+		cpha |= CPHA;
+
+	if (spi_slv->mode & SPI_CS_HIGH)
+		demux_output_inv |= BIT(spi_slv->chip_select);
+
+	demux_sel |= BIT(spi_slv->chip_select);
+	mas->cur_speed_hz = spi_slv->max_speed_hz;
+	mas->cur_word_len = spi_slv->bits_per_word;
+
+	ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
+	if (ret) {
+		dev_err(&spi_mas->dev, "Err setting clks ret %d\n", ret);
+		goto prepare_message_exit;
+	}
+	spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
+	geni_write_reg(loopback_cfg, mas->base, SE_SPI_LOOPBACK);
+	geni_write_reg(demux_sel, mas->base, SE_SPI_DEMUX_SEL);
+	geni_write_reg(cpha, mas->base, SE_SPI_CPHA);
+	geni_write_reg(cpol, mas->base, SE_SPI_CPOL);
+	geni_write_reg(demux_output_inv, mas->base, SE_SPI_DEMUX_OUTPUT_INV);
+	/* Ensure message level attributes are written before returning */
+	mb();
+prepare_message_exit:
+	return ret;
+}
+
+static int spi_geni_unprepare_message(struct spi_master *spi_mas,
+					struct spi_message *spi_msg)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi_mas);
+
+	mas->cur_speed_hz = 0;
+	mas->cur_word_len = 0;
+	return 0;
+}
+
+static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int ret = 0;
+
+	ret = pm_runtime_get_sync(mas->dev);
+	if (ret < 0) {
+		dev_err(mas->dev, "Error enabling SE resources\n");
+		pm_runtime_put_noidle(mas->dev);
+		goto exit_prepare_transfer_hardware;
+	} else {
+		ret = 0;
+	}
+
+	if (unlikely(!mas->setup)) {
+		int proto = get_se_proto(mas->base);
+
+		if (unlikely(proto != SPI)) {
+			dev_err(mas->dev, "Invalid proto %d\n", proto);
+			return -ENXIO;
+		}
+		geni_se_init(mas->base, FIFO_MODE, 0x0,
+						(mas->tx_fifo_depth - 2));
+		mas->tx_fifo_depth = get_tx_fifo_depth(mas->base);
+		mas->rx_fifo_depth = get_rx_fifo_depth(mas->base);
+		mas->tx_fifo_width = get_tx_fifo_width(mas->base);
+		/* Transmit an entire FIFO worth of data per IRQ */
+		mas->tx_wm = 1;
+		dev_dbg(mas->dev, "tx_fifo %d rx_fifo %d tx_width %d\n",
+			mas->tx_fifo_depth, mas->rx_fifo_depth,
+			mas->tx_fifo_width);
+		mas->setup = true;
+	}
+exit_prepare_transfer_hardware:
+	return ret;
+}
+
+static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
+{
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+	pm_runtime_put_sync(mas->dev);
+	return 0;
+}
+
+static void setup_fifo_xfer(struct spi_transfer *xfer,
+				struct spi_geni_master *mas, u16 mode,
+				struct spi_master *spi)
+{
+	u32 m_cmd = 0;
+	u32 m_param = 0;
+	u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
+	u32 trans_len = 0;
+
+	if (xfer->bits_per_word != mas->cur_word_len) {
+		spi_setup_word_len(mas, mode, xfer->bits_per_word);
+		mas->cur_word_len = xfer->bits_per_word;
+	}
+
+	if (xfer->tx_buf && xfer->rx_buf)
+		m_cmd = SPI_FULL_DUPLEX;
+	else if (xfer->tx_buf)
+		m_cmd = SPI_TX_ONLY;
+	else if (xfer->rx_buf)
+		m_cmd = SPI_RX_ONLY;
+
+	spi_tx_cfg &= ~CS_TOGGLE;
+	if (xfer->cs_change)
+		spi_tx_cfg |= CS_TOGGLE;
+	trans_len = ((xfer->len / (mas->cur_word_len >> 3)) & TRANS_LEN_MSK);
+	if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+		m_param |= FRAGMENTATION;
+
+	mas->cur_xfer = xfer;
+	if (m_cmd & SPI_TX_ONLY) {
+		mas->tx_rem_bytes = xfer->len;
+		geni_write_reg(trans_len, mas->base, SE_SPI_TX_TRANS_LEN);
+	}
+
+	if (m_cmd & SPI_RX_ONLY) {
+		geni_write_reg(trans_len, mas->base, SE_SPI_RX_TRANS_LEN);
+		mas->rx_rem_bytes = xfer->len;
+	}
+	geni_write_reg(spi_tx_cfg, mas->base, SE_SPI_TRANS_CFG);
+	geni_setup_m_cmd(mas->base, m_cmd, m_param);
+	geni_write_reg(mas->tx_wm, mas->base, SE_GENI_TX_WATERMARK_REG);
+	/* Ensure all writes are done before the WM interrupt */
+	mb();
+}
+
+static void handle_fifo_timeout(struct spi_geni_master *mas)
+{
+	unsigned long timeout;
+	u32 tx_trans_len = geni_read_reg(mas->base, SE_SPI_TX_TRANS_LEN);
+	u32 rx_trans_len = geni_read_reg(mas->base, SE_SPI_RX_TRANS_LEN);
+	u32 spi_tx_cfg = geni_read_reg(mas->base, SE_SPI_TRANS_CFG);
+	u32 m_cmd = geni_read_reg(mas->base, SE_GENI_M_CMD0);
+
+	/* Timed-out on a FIFO xfer, print relevant reg info. */
+	dev_err(mas->dev, "tx_rem_bytes %d rx_rem_bytes %d\n",
+			mas->tx_rem_bytes, mas->rx_rem_bytes);
+	dev_err(mas->dev, "tx_trans_len %d rx_trans_len %d\n", tx_trans_len,
+								rx_trans_len);
+	dev_err(mas->dev, "spi_tx_cfg 0x%x m_cmd 0x%x\n", spi_tx_cfg, m_cmd);
+	reinit_completion(&mas->xfer_done);
+	geni_cancel_m_cmd(mas->base);
+	/* Ensure cmd cancel is written */
+	mb();
+	timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
+	if (!timeout) {
+		reinit_completion(&mas->xfer_done);
+		geni_abort_m_cmd(mas->base);
+		/* Ensure cmd abort is written */
+		mb();
+		timeout = wait_for_completion_timeout(&mas->xfer_done,
+								HZ);
+		if (!timeout)
+			dev_err(mas->dev,
+				"Failed to cancel/abort m_cmd\n");
+	}
+}
+
+static int spi_geni_transfer_one(struct spi_master *spi,
+				struct spi_device *slv,
+				struct spi_transfer *xfer)
+{
+	int ret = 0;
+	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	unsigned long timeout;
+
+	if ((xfer->tx_buf == NULL) && (xfer->rx_buf == NULL)) {
+		dev_err(mas->dev, "Invalid xfer both tx rx are NULL\n");
+		return -EINVAL;
+	}
+
+	reinit_completion(&mas->xfer_done);
+	/* Speed and bits per word can be overridden per transfer */
+	if (xfer->speed_hz != mas->cur_speed_hz) {
+		ret = do_spi_clk_cfg(mas->cur_speed_hz, mas);
+		if (ret) {
+			dev_err(mas->dev, "%s:Err setting clks:%d\n",
+								__func__, ret);
+			goto geni_transfer_one_exit;
+		}
+		mas->cur_speed_hz = xfer->speed_hz;
+	}
+
+	setup_fifo_xfer(xfer, mas, slv->mode, spi);
+	timeout = wait_for_completion_timeout(&mas->xfer_done,
+					msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+	if (!timeout) {
+		dev_err(mas->dev, "Xfer[len %d tx %p rx %p n %d] timed out.\n",
+						xfer->len, xfer->tx_buf,
+						xfer->rx_buf,
+						xfer->bits_per_word);
+		ret = -ETIMEDOUT;
+		handle_fifo_timeout(mas);
+	}
+geni_transfer_one_exit:
+	return ret;
+}
+
+static void geni_spi_handle_tx(struct spi_geni_master *mas)
+{
+	int i = 0;
+	int tx_fifo_width = (mas->tx_fifo_width >> 3);
+	int max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * tx_fifo_width;
+	const u8 *tx_buf = mas->cur_xfer->tx_buf;
+
+	tx_buf += (mas->cur_xfer->len - mas->tx_rem_bytes);
+	max_bytes = min_t(int, mas->tx_rem_bytes, max_bytes);
+	while (i < max_bytes) {
+		int j;
+		u32 fifo_word = 0;
+		u8 *fifo_byte;
+		int bytes_to_write = min_t(int, (max_bytes - i), tx_fifo_width);
+
+		fifo_byte = (u8 *)&fifo_word;
+		for (j = 0; j < bytes_to_write; j++)
+			fifo_byte[j] = tx_buf[i++];
+		geni_write_reg(fifo_word, mas->base, SE_GENI_TX_FIFOn);
+		/* Ensure FIFO writes are written in order */
+		mb();
+	}
+	mas->tx_rem_bytes -= max_bytes;
+	if (!mas->tx_rem_bytes) {
+		geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
+		/* Barrier here before return to prevent further ISRs */
+		mb();
+	}
+}
+
+static void geni_spi_handle_rx(struct spi_geni_master *mas)
+{
+	int i = 0;
+	int fifo_width = (mas->tx_fifo_width >> 3);
+	u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
+	int rx_bytes = 0;
+	int rx_wc = 0;
+	u8 *rx_buf = mas->cur_xfer->rx_buf;
+
+	rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
+	if (rx_fifo_status & RX_LAST) {
+		int rx_last_byte_valid =
+			(rx_fifo_status & RX_LAST_BYTE_VALID_MSK)
+					>> RX_LAST_BYTE_VALID_SHFT;
+		if (rx_last_byte_valid && (rx_last_byte_valid < 4)) {
+			rx_wc -= 1;
+			rx_bytes += rx_last_byte_valid;
+		}
+	}
+	rx_bytes += rx_wc * fifo_width;
+	rx_bytes = min_t(int, mas->rx_rem_bytes, rx_bytes);
+	rx_buf += (mas->cur_xfer->len - mas->rx_rem_bytes);
+	while (i < rx_bytes) {
+		u32 fifo_word = 0;
+		u8 *fifo_byte;
+		int read_bytes = min_t(int, (rx_bytes - i), fifo_width);
+		int j;
+
+		fifo_word = geni_read_reg(mas->base, SE_GENI_RX_FIFOn);
+		fifo_byte = (u8 *)&fifo_word;
+		for (j = 0; j < read_bytes; j++)
+			rx_buf[i++] = fifo_byte[j];
+	}
+	mas->rx_rem_bytes -= rx_bytes;
+}
+
+static irqreturn_t geni_spi_irq(int irq, void *dev)
+{
+	struct spi_geni_master *mas = dev;
+	u32 m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
+
+	if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+		geni_spi_handle_rx(mas);
+
+	if ((m_irq & M_TX_FIFO_WATERMARK_EN))
+		geni_spi_handle_tx(mas);
+
+	if ((m_irq & M_CMD_DONE_EN) || (m_irq & M_CMD_CANCEL_EN) ||
+		(m_irq & M_CMD_ABORT_EN)) {
+		complete(&mas->xfer_done);
+	}
+	geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
+	return IRQ_HANDLED;
+}
+
+static int spi_geni_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct spi_master *spi;
+	struct spi_geni_master *geni_mas;
+	struct se_geni_rsc *rsc;
+	struct resource *res;
+
+	spi = spi_alloc_master(&pdev->dev, sizeof(struct spi_geni_master));
+	if (!spi) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to alloc spi struct\n");
+		goto spi_geni_probe_err;
+	}
+
+	platform_set_drvdata(pdev, spi);
+	geni_mas = spi_master_get_devdata(spi);
+	rsc = &geni_mas->spi_rsc;
+	geni_mas->dev = &pdev->dev;
+	spi->dev.of_node = pdev->dev.of_node;
+	rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+	if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
+		dev_err(&pdev->dev, "No pinctrl config specified!\n");
+		ret = PTR_ERR(rsc->geni_pinctrl);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_gpio_active = pinctrl_lookup_state(rsc->geni_pinctrl,
+							PINCTRL_DEFAULT);
+	if (IS_ERR_OR_NULL(rsc->geni_gpio_active)) {
+		dev_err(&pdev->dev, "No default config specified!\n");
+		ret = PTR_ERR(rsc->geni_gpio_active);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->geni_gpio_sleep = pinctrl_lookup_state(rsc->geni_pinctrl,
+							PINCTRL_SLEEP);
+	if (IS_ERR_OR_NULL(rsc->geni_gpio_sleep)) {
+		dev_err(&pdev->dev, "No sleep config specified!\n");
+		ret = PTR_ERR(rsc->geni_gpio_sleep);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->se_clk = devm_clk_get(&pdev->dev, "se-clk");
+	if (IS_ERR(rsc->se_clk)) {
+		ret = PTR_ERR(rsc->se_clk);
+		dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
+	if (IS_ERR(rsc->m_ahb_clk)) {
+		ret = PTR_ERR(rsc->m_ahb_clk);
+		dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	rsc->s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
+	if (IS_ERR(rsc->s_ahb_clk)) {
+		ret = PTR_ERR(rsc->s_ahb_clk);
+		dev_err(&pdev->dev, "Err getting S AHB clk %d\n", ret);
+		goto spi_geni_probe_err;
+	}
+
+	if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+				&spi->max_speed_hz)) {
+		dev_err(&pdev->dev, "Max frequency not specified.\n");
+		ret = -ENXIO;
+		goto spi_geni_probe_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "se_phys");
+	if (!res) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "Err getting IO region\n");
+		goto spi_geni_probe_err;
+	}
+
+	geni_mas->phys_addr = res->start;
+	geni_mas->size = resource_size(res);
+	geni_mas->base = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!geni_mas->base) {
+		ret = -ENOMEM;
+		dev_err(&pdev->dev, "Err IO mapping iomem\n");
+		goto spi_geni_probe_err;
+	}
+
+	geni_mas->irq = platform_get_irq(pdev, 0);
+	if (geni_mas->irq < 0) {
+		dev_err(&pdev->dev, "Err getting IRQ\n");
+		ret = geni_mas->irq;
+		goto spi_geni_probe_unmap;
+	}
+	ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
+			       IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
+	if (ret) {
+		dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+				   geni_mas->irq, ret);
+		goto spi_geni_probe_unmap;
+	}
+
+	spi->mode_bits = (SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH);
+	spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+	spi->num_chipselect = SPI_NUM_CHIPSELECT;
+	spi->prepare_transfer_hardware = spi_geni_prepare_transfer_hardware;
+	spi->prepare_message = spi_geni_prepare_message;
+	spi->unprepare_message = spi_geni_unprepare_message;
+	spi->transfer_one = spi_geni_transfer_one;
+	spi->unprepare_transfer_hardware
+			= spi_geni_unprepare_transfer_hardware;
+	spi->auto_runtime_pm = false;
+
+	init_completion(&geni_mas->xfer_done);
+	pm_runtime_enable(&pdev->dev);
+	ret = spi_register_master(spi);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to register SPI master\n");
+		goto spi_geni_probe_unmap;
+	}
+	return ret;
+spi_geni_probe_unmap:
+	devm_iounmap(&pdev->dev, geni_mas->base);
+spi_geni_probe_err:
+	spi_master_put(spi);
+	return ret;
+}
+
+static int spi_geni_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(master);
+
+	spi_unregister_master(master);
+	se_geni_resources_off(&geni_mas->spi_rsc);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_geni_runtime_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct spi_master *spi = get_spi_master(dev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+	ret = se_geni_resources_off(&geni_mas->spi_rsc);
+	return ret;
+}
+
+static int spi_geni_runtime_resume(struct device *dev)
+{
+	int ret = 0;
+	struct spi_master *spi = get_spi_master(dev);
+	struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+
+	ret = se_geni_resources_on(&geni_mas->spi_rsc);
+	return ret;
+}
+
+static int spi_geni_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_suspend(struct device *dev)
+{
+	if (!pm_runtime_status_suspended(dev))
+		return -EBUSY;
+	return 0;
+}
+#else
+static int spi_geni_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_resume(struct device *dev)
+{
+	return 0;
+}
+
+static int spi_geni_suspend(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops spi_geni_pm_ops = {
+	SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
+					spi_geni_runtime_resume, NULL)
+	SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
+};
+
+static const struct of_device_id spi_geni_dt_match[] = {
+	{ .compatible = "qcom,spi-geni" },
+	{}
+};
+
+static struct platform_driver spi_geni_driver = {
+	.probe  = spi_geni_probe,
+	.remove = spi_geni_remove,
+	.driver = {
+		.name = "spi_geni",
+		.pm = &spi_geni_pm_ops,
+		.of_match_table = spi_geni_dt_match,
+	},
+};
+module_platform_driver(spi_geni_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_geni");
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 7e6f8d8..9ea4a9f 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -504,9 +504,10 @@
 	return 0;
 }
 
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
-			     size_t align, unsigned int heap_id_mask,
-			     unsigned int flags)
+static struct ion_handle *__ion_alloc(
+		struct ion_client *client, size_t len,
+		size_t align, unsigned int heap_id_mask,
+		unsigned int flags, bool grab_handle)
 {
 	struct ion_handle *handle;
 	struct ion_device *dev = client->dev;
@@ -605,6 +606,8 @@
 		return handle;
 
 	mutex_lock(&client->lock);
+	if (grab_handle)
+		ion_handle_get(handle);
 	ret = ion_handle_add(client, handle);
 	mutex_unlock(&client->lock);
 	if (ret) {
@@ -614,6 +617,13 @@
 
 	return handle;
 }
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags)
+{
+	return __ion_alloc(client, len, align, heap_id_mask, flags, false);
+}
 EXPORT_SYMBOL(ion_alloc);
 
 static void ion_free_nolock(struct ion_client *client,
@@ -1524,10 +1534,10 @@
 	{
 		struct ion_handle *handle;
 
-		handle = ion_alloc(client, data.allocation.len,
-				   data.allocation.align,
-				   data.allocation.heap_id_mask,
-				   data.allocation.flags);
+		handle = __ion_alloc(client, data.allocation.len,
+				     data.allocation.align,
+				     data.allocation.heap_id_mask,
+				     data.allocation.flags, true);
 		if (IS_ERR(handle))
 			return PTR_ERR(handle);
 
@@ -1605,11 +1615,15 @@
 
 	if (dir & _IOC_READ) {
 		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
-			if (cleanup_handle)
+			if (cleanup_handle) {
 				ion_free(client, cleanup_handle);
+				ion_handle_put(cleanup_handle);
+			}
 			return -EFAULT;
 		}
 	}
+	if (cleanup_handle)
+		ion_handle_put(cleanup_handle);
 	return ret;
 }
 
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 2c1b4cf..323bb0c 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -279,11 +279,16 @@
 
 	source_vm = VMID_HLOS;
 	dest_vm = get_secure_vmid(flags);
+
 	if (dest_vm < 0) {
 		pr_err("%s: Failed to get secure vmid\n", __func__);
 		return -EINVAL;
 	}
-	dest_perms = PERM_READ | PERM_WRITE;
+
+	if (dest_vm == VMID_CP_SEC_DISPLAY)
+		dest_perms = PERM_READ;
+	else
+		dest_perms = PERM_READ | PERM_WRITE;
 
 	ret = ion_cma_allocate(heap, buffer, len, align, flags);
 	if (ret) {
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index e75166a..43d3f92 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -325,8 +325,9 @@
 
 	switch (heap_data->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
-		heap = ion_system_contig_heap_create(heap_data);
-		break;
+		pr_err("%s: Heap type is disabled: %d\n", __func__,
+		       heap_data->type);
+		return ERR_PTR(-EINVAL);
 	case ION_HEAP_TYPE_SYSTEM:
 		heap = ion_system_heap_create(heap_data);
 		break;
@@ -366,7 +367,8 @@
 
 	switch (heap->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
-		ion_system_contig_heap_destroy(heap);
+		pr_err("%s: Heap type is disabled: %d\n", __func__,
+		       heap->type);
 		break;
 	case ION_HEAP_TYPE_SYSTEM:
 		ion_system_heap_destroy(heap);
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 6bcce90..ad6028f 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -205,11 +205,16 @@
 		split_page(page, order);
 		break;
 	}
-	/* Return the remaining order-0 pages to the pool */
-	if (page)
-		for (j = 1; j < (1 << order); j++)
+	/*
+	 * Return the remaining order-0 pages to the pool.
+	 * SetPagePrivate flag to mark memory as secure.
+	 */
+	if (page) {
+		for (j = 1; j < (1 << order); j++) {
+			SetPagePrivate(page + j);
 			free_buffer_page(heap, buffer, page + j, 0);
-
+		}
+	}
 got_page:
 	mutex_unlock(&heap->split_page_mutex);
 
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
index 243119d..ae9bf5f 100644
--- a/drivers/staging/android/ion/msm/msm_ion.c
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -301,13 +301,23 @@
 	};
 
 	for_each_sg(table->sgl, sg, table->nents, i) {
+		unsigned int sg_offset, sg_left, size = 0;
+
 		len += sg->length;
-		if (len < offset)
+		if (len <= offset)
 			continue;
 
-		__do_cache_ops(sg_page(sg), sg->offset, sg->length, op);
+		sg_left = len - offset;
+		sg_offset = sg->length - sg_left;
 
-		if (len > length + offset)
+		size = (length < sg_left) ? length : sg_left;
+
+		__do_cache_ops(sg_page(sg), sg_offset, size, op);
+
+		offset += size;
+		length -= size;
+
+		if (length == 0)
 			break;
 	}
 	return 0;
@@ -356,6 +366,15 @@
 }
 EXPORT_SYMBOL(msm_ion_do_cache_op);
 
+int msm_ion_do_cache_offset_op(
+		struct ion_client *client, struct ion_handle *handle,
+		void *vaddr, unsigned int offset, unsigned long len,
+		unsigned int cmd)
+{
+	return ion_do_cache_op(client, handle, vaddr, offset, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_offset_op);
+
 static void msm_ion_allocate(struct ion_platform_heap *heap)
 {
 	if (!heap->base && heap->extra_data) {
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
index 68cc8b0..55b02b6 100644
--- a/drivers/staging/android/ion/msm/msm_ion.h
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -167,6 +167,11 @@
 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
 			void *vaddr, unsigned long len, unsigned int cmd);
 
+int msm_ion_do_cache_offset_op(
+		struct ion_client *client, struct ion_handle *handle,
+		void *vaddr, unsigned int offset, unsigned long len,
+		unsigned int cmd);
+
 #else
 static inline struct ion_client *msm_ion_client_create(const char *name)
 {
@@ -187,6 +192,14 @@
 	return -ENODEV;
 }
 
+int msm_ion_do_cache_offset_op(
+		struct ion_client *client, struct ion_handle *handle,
+		void *vaddr, unsigned int offset, unsigned long len,
+		unsigned int cmd)
+{
+	return -ENODEV;
+}
+
 #endif /* CONFIG_ION */
 
 #endif
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 574da15..7a2d45b 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -88,6 +88,27 @@
 	 driver. This console is used through a JTAG only on ARM. If you don't have
 	 a JTAG then you probably don't want this option.
 
+config HVC_DCC_SERIALIZE_SMP
+	bool "Use DCC only on core 0"
+	depends on SMP && HVC_DCC
+	help
+	  Some debuggers, such as Trace32 from Lauterbach GmbH, do not handle
+	  reads/writes from/to DCC on more than one core.  Each core has its
+	  own DCC device registers, so when a core reads or writes from/to DCC,
+	  it only accesses its own DCC device.  Since kernel code can run on
+	  any core, every time the kernel wants to write to the console, it
+	  might write to a different DCC.
+
+	  In SMP mode, Trace32 only uses the DCC on core 0.  In AMP mode, it
+	  creates multiple windows, and each window shows the DCC output
+	  only from that core's DCC.  The result is that console output is
+	  either lost or scattered across windows.
+
+	  Selecting this option will enable code that serializes all console
+	  input and output to core 0.  The DCC driver will create input and
+	  output FIFOs that all cores will use.  Reads and writes from/to DCC
+	  are handled by a workqueue that runs only core 0.
+
 config HVC_BFIN_JTAG
 	bool "Blackfin JTAG console"
 	depends on BLACKFIN
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 82f240f..c987697 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, 2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, 2014, 2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,10 @@
  */
 
 #include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/console.h>
 
 #include <asm/dcc.h>
 #include <asm/processor.h>
@@ -48,6 +52,12 @@
 	return i;
 }
 
+/*
+ * Check if the DCC is enabled.  If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core 0.  That
+ * way, dcc_core0_available will be true only if it's available on core 0.
+ */
+#ifndef CONFIG_HVC_DCC_SERIALIZE_SMP
 static bool hvc_dcc_check(void)
 {
 	unsigned long time = jiffies + (HZ / 10);
@@ -62,12 +72,173 @@
 
 	return false;
 }
+#endif
+
+#ifdef CONFIG_HVC_DCC_SERIALIZE_SMP
+static bool hvc_dcc_check(void)
+{
+	unsigned long time = jiffies + (HZ / 10);
+
+	static bool dcc_core0_available;
+
+	/*
+	 * If we're not on core 0, but we previously confirmed that DCC is
+	 * active, then just return true.
+	 */
+	if (smp_processor_id() && dcc_core0_available)
+		return true;
+
+	/* Write a test character to check if it is handled */
+	__dcc_putchar('\n');
+
+	while (time_is_after_jiffies(time)) {
+		if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+			dcc_core0_available = true;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+static void dcc_put_work_fn(struct work_struct *work);
+static void dcc_get_work_fn(struct work_struct *work);
+static DECLARE_WORK(dcc_pwork, dcc_put_work_fn);
+static DECLARE_WORK(dcc_gwork, dcc_get_work_fn);
+static DEFINE_SPINLOCK(dcc_lock);
+static DEFINE_KFIFO(inbuf, unsigned char, 128);
+static DEFINE_KFIFO(outbuf, unsigned char, 1024);
+
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	/* While there's data in the output FIFO, write it to the DCC */
+	while (kfifo_get(&outbuf, &ch))
+		hvc_dcc_put_chars(0, &ch, 1);
+
+	/* While we're at it, check for any input characters */
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+	unsigned long irqflags;
+
+	/*
+	 * Read characters from DCC and put them into the input FIFO, as
+	 * long as there is room and we have characters to read.
+	 */
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+}
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(uint32_t vt, const char *buf,
+					     int count)
+{
+	int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+	if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+		len = kfifo_in(&outbuf, buf, count);
+		spin_unlock_irqrestore(&dcc_lock, irqflags);
+		/*
+		 * We just push data to the output FIFO, so schedule the
+		 * workqueue that will actually write that data to DCC.
+		 */
+		schedule_work_on(0, &dcc_pwork);
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * write the data to DCC.
+	 */
+	len = hvc_dcc_put_chars(vt, buf, count);
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+	return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(uint32_t vt, char *buf, int count)
+{
+	int len;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dcc_lock, irqflags);
+
+	if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+		len = kfifo_out(&inbuf, buf, count);
+		spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+		/*
+		 * If the FIFO was empty, there may be characters in the DCC
+		 * that we haven't read yet.  Schedule a workqueue to fill
+		 * the input FIFO, so that the next time this function is
+		 * called, we'll have data.
+		 */
+		if (!len)
+			schedule_work_on(0, &dcc_gwork);
+
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * read the data from DCC.
+	 */
+	len = hvc_dcc_get_chars(vt, buf, count);
+	spin_unlock_irqrestore(&dcc_lock, irqflags);
+
+	return len;
+}
+
+static const struct hv_ops hvc_dcc_get_put_ops = {
+	.get_chars = hvc_dcc0_get_chars,
+	.put_chars = hvc_dcc0_put_chars,
+};
+
+#else
 
 static const struct hv_ops hvc_dcc_get_put_ops = {
 	.get_chars = hvc_dcc_get_chars,
 	.put_chars = hvc_dcc_put_chars,
 };
 
+#endif
+
 static int __init hvc_dcc_console_init(void)
 {
 	int ret;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4d09bd4..6e3e636 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -52,6 +52,7 @@
 	struct pci_dev		*dev;
 	unsigned int		nr;
 	struct pci_serial_quirk	*quirk;
+	const struct pciserial_board *board;
 	int			line[0];
 };
 
@@ -3871,6 +3872,7 @@
 		}
 	}
 	priv->nr = i;
+	priv->board = board;
 	return priv;
 
 err_deinit:
@@ -3881,7 +3883,7 @@
 }
 EXPORT_SYMBOL_GPL(pciserial_init_ports);
 
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
 {
 	struct pci_serial_quirk *quirk;
 	int i;
@@ -3895,7 +3897,11 @@
 	quirk = find_quirk(priv->dev);
 	if (quirk->exit)
 		quirk->exit(priv->dev);
+}
 
+void pciserial_remove_ports(struct serial_private *priv)
+{
+	pciserial_detach_ports(priv);
 	kfree(priv);
 }
 EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5590,7 +5596,7 @@
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	if (priv)
-		pciserial_suspend_ports(priv);
+		pciserial_detach_ports(priv);
 
 	pci_disable_device(dev);
 
@@ -5615,9 +5621,18 @@
 static void serial8250_io_resume(struct pci_dev *dev)
 {
 	struct serial_private *priv = pci_get_drvdata(dev);
+	const struct pciserial_board *board;
 
-	if (priv)
-		pciserial_resume_ports(priv);
+	if (!priv)
+		return;
+
+	board = priv->board;
+	kfree(priv);
+	priv = pciserial_init_ports(dev, board);
+
+	if (!IS_ERR(priv)) {
+		pci_set_drvdata(dev, priv);
+	}
 }
 
 static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index f44615f..3e2ef4f 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1036,8 +1036,10 @@
 	if (ourport->dma) {
 		ret = s3c24xx_serial_request_dma(ourport);
 		if (ret < 0) {
-			dev_warn(port->dev, "DMA request failed\n");
-			return ret;
+			dev_warn(port->dev,
+				 "DMA request failed, DMA will not be used\n");
+			devm_kfree(port->dev, ourport->dma);
+			ourport->dma = NULL;
 		}
 	}
 
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 98e39f9..a6cd44a 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -63,7 +63,7 @@
 	int		i, size;
 
 	if (!IS_ENABLED(CONFIG_HAS_DMA) ||
-	    (!hcd->self.controller->dma_mask &&
+	    (!is_device_dma_capable(hcd->self.sysdev) &&
 	     !(hcd->driver->flags & HCD_LOCAL_MEM)))
 		return 0;
 
@@ -72,7 +72,7 @@
 		if (!size)
 			continue;
 		snprintf(name, sizeof(name), "buffer-%d", size);
-		hcd->pool[i] = dma_pool_create(name, hcd->self.controller,
+		hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
 				size, size, 0);
 		if (!hcd->pool[i]) {
 			hcd_buffer_destroy(hcd);
@@ -127,7 +127,7 @@
 
 	/* some USB hosts just use PIO */
 	if (!IS_ENABLED(CONFIG_HAS_DMA) ||
-	    (!bus->controller->dma_mask &&
+	    (!is_device_dma_capable(bus->sysdev) &&
 	     !(hcd->driver->flags & HCD_LOCAL_MEM))) {
 		*dma = ~(dma_addr_t) 0;
 		return kmalloc(size, mem_flags);
@@ -137,7 +137,7 @@
 		if (size <= pool_max[i])
 			return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
 	}
-	return dma_alloc_coherent(hcd->self.controller, size, dma, mem_flags);
+	return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags);
 }
 
 void hcd_buffer_free(
@@ -154,7 +154,7 @@
 		return;
 
 	if (!IS_ENABLED(CONFIG_HAS_DMA) ||
-	    (!bus->controller->dma_mask &&
+	    (!is_device_dma_capable(bus->sysdev) &&
 	     !(hcd->driver->flags & HCD_LOCAL_MEM))) {
 		kfree(addr);
 		return;
@@ -166,5 +166,5 @@
 			return;
 		}
 	}
-	dma_free_coherent(hcd->self.controller, size, addr, dma);
+	dma_free_coherent(hcd->self.sysdev, size, addr, dma);
 }
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8d..26a305f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1441,6 +1441,9 @@
 {
 	struct usb_device	*udev = to_usb_device(dev);
 
+	if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
+		return 0;
+
 	unbind_no_pm_drivers_interfaces(udev);
 
 	/* From now on we are sure all drivers support suspend/resume
@@ -1470,6 +1473,15 @@
 	struct usb_device	*udev = to_usb_device(dev);
 	int			status;
 
+	/*
+	 * Some buses would like to keep their devices in suspend
+	 * state after system resume.  Their resume happen when
+	 * a remote wakeup is detected or interface driver start
+	 * I/O.
+	 */
+	if (udev->bus->skip_resume)
+		return 0;
+
 	/* For all calls, take the device back to full power and
 	 * tell the PM core in case it was autosuspended previously.
 	 * Unbind the interfaces that will need rebinding later,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 479e223..2c4bd54 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1073,6 +1073,7 @@
 static int register_root_hub(struct usb_hcd *hcd)
 {
 	struct device *parent_dev = hcd->self.controller;
+	struct device *sysdev = hcd->self.sysdev;
 	struct usb_device *usb_dev = hcd->self.root_hub;
 	const int devnum = 1;
 	int retval;
@@ -1119,7 +1120,7 @@
 		/* Did the HC die before the root hub was registered? */
 		if (HCD_DEAD(hcd))
 			usb_hc_died (hcd);	/* This time clean up */
-		usb_dev->dev.of_node = parent_dev->of_node;
+		usb_dev->dev.of_node = sysdev->of_node;
 	}
 	mutex_unlock(&usb_bus_idr_lock);
 
@@ -1465,19 +1466,19 @@
 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 	if (IS_ENABLED(CONFIG_HAS_DMA) &&
 	    (urb->transfer_flags & URB_DMA_MAP_SG))
-		dma_unmap_sg(hcd->self.controller,
+		dma_unmap_sg(hcd->self.sysdev,
 				urb->sg,
 				urb->num_sgs,
 				dir);
 	else if (IS_ENABLED(CONFIG_HAS_DMA) &&
 		 (urb->transfer_flags & URB_DMA_MAP_PAGE))
-		dma_unmap_page(hcd->self.controller,
+		dma_unmap_page(hcd->self.sysdev,
 				urb->transfer_dma,
 				urb->transfer_buffer_length,
 				dir);
 	else if (IS_ENABLED(CONFIG_HAS_DMA) &&
 		 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
-		dma_unmap_single(hcd->self.controller,
+		dma_unmap_single(hcd->self.sysdev,
 				urb->transfer_dma,
 				urb->transfer_buffer_length,
 				dir);
@@ -1520,11 +1521,11 @@
 			return ret;
 		if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) {
 			urb->setup_dma = dma_map_single(
-					hcd->self.controller,
+					hcd->self.sysdev,
 					urb->setup_packet,
 					sizeof(struct usb_ctrlrequest),
 					DMA_TO_DEVICE);
-			if (dma_mapping_error(hcd->self.controller,
+			if (dma_mapping_error(hcd->self.sysdev,
 						urb->setup_dma))
 				return -EAGAIN;
 			urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
@@ -1555,7 +1556,7 @@
 				}
 
 				n = dma_map_sg(
-						hcd->self.controller,
+						hcd->self.sysdev,
 						urb->sg,
 						urb->num_sgs,
 						dir);
@@ -1570,12 +1571,12 @@
 			} else if (urb->sg) {
 				struct scatterlist *sg = urb->sg;
 				urb->transfer_dma = dma_map_page(
-						hcd->self.controller,
+						hcd->self.sysdev,
 						sg_page(sg),
 						sg->offset,
 						urb->transfer_buffer_length,
 						dir);
-				if (dma_mapping_error(hcd->self.controller,
+				if (dma_mapping_error(hcd->self.sysdev,
 						urb->transfer_dma))
 					ret = -EAGAIN;
 				else
@@ -1585,11 +1586,11 @@
 				ret = -EAGAIN;
 			} else {
 				urb->transfer_dma = dma_map_single(
-						hcd->self.controller,
+						hcd->self.sysdev,
 						urb->transfer_buffer,
 						urb->transfer_buffer_length,
 						dir);
-				if (dma_mapping_error(hcd->self.controller,
+				if (dma_mapping_error(hcd->self.sysdev,
 						urb->transfer_dma))
 					ret = -EAGAIN;
 				else
@@ -2228,8 +2229,65 @@
 	return hcd->driver->get_frame_number (hcd);
 }
 
+int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_setup(hcd, intr_num);
+}
+
+int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->sec_event_ring_cleanup(hcd, intr_num);
+}
+
 /*-------------------------------------------------------------------------*/
 
+dma_addr_t
+usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+	unsigned int intr_num)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
+}
+
+dma_addr_t
+usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_dcba_dma_addr(hcd, udev);
+}
+
+dma_addr_t
+usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+		struct usb_host_endpoint *ep)
+{
+	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);
+
+	if (!HCD_RH_RUNNING(hcd))
+		return 0;
+
+	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
+}
+
 #ifdef	CONFIG_PM
 
 int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
@@ -2495,24 +2553,8 @@
 	tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh);
 }
 
-/**
- * usb_create_shared_hcd - create and initialize an HCD structure
- * @driver: HC driver that will use this hcd
- * @dev: device for this HC, stored in hcd->self.controller
- * @bus_name: value to store in hcd->self.bus_name
- * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
- *              PCI device.  Only allocate certain resources for the primary HCD
- * Context: !in_interrupt()
- *
- * Allocate a struct usb_hcd, with extra space at the end for the
- * HC driver's private data.  Initialize the generic members of the
- * hcd structure.
- *
- * Return: On success, a pointer to the created and initialized HCD structure.
- * On failure (e.g. if memory is unavailable), %NULL.
- */
-struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
-		struct device *dev, const char *bus_name,
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+		struct device *sysdev, struct device *dev, const char *bus_name,
 		struct usb_hcd *primary_hcd)
 {
 	struct usb_hcd *hcd;
@@ -2553,8 +2595,9 @@
 
 	usb_bus_init(&hcd->self);
 	hcd->self.controller = dev;
+	hcd->self.sysdev = sysdev;
 	hcd->self.bus_name = bus_name;
-	hcd->self.uses_dma = (dev->dma_mask != NULL);
+	hcd->self.uses_dma = (sysdev->dma_mask != NULL);
 
 	init_timer(&hcd->rh_timer);
 	hcd->rh_timer.function = rh_timer_func;
@@ -2569,6 +2612,30 @@
 			"USB Host Controller";
 	return hcd;
 }
+EXPORT_SYMBOL_GPL(__usb_create_hcd);
+
+/**
+ * usb_create_shared_hcd - create and initialize an HCD structure
+ * @driver: HC driver that will use this hcd
+ * @dev: device for this HC, stored in hcd->self.controller
+ * @bus_name: value to store in hcd->self.bus_name
+ * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
+ *              PCI device.  Only allocate certain resources for the primary HCD
+ * Context: !in_interrupt()
+ *
+ * Allocate a struct usb_hcd, with extra space at the end for the
+ * HC driver's private data.  Initialize the generic members of the
+ * hcd structure.
+ *
+ * Return: On success, a pointer to the created and initialized HCD structure.
+ * On failure (e.g. if memory is unavailable), %NULL.
+ */
+struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+		struct device *dev, const char *bus_name,
+		struct usb_hcd *primary_hcd)
+{
+	return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd);
+}
 EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
 
 /**
@@ -2588,7 +2655,7 @@
 struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
 		struct device *dev, const char *bus_name)
 {
-	return usb_create_shared_hcd(driver, dev, bus_name, NULL);
+	return __usb_create_hcd(driver, dev, dev, bus_name, NULL);
 }
 EXPORT_SYMBOL_GPL(usb_create_hcd);
 
@@ -2715,7 +2782,7 @@
 	struct usb_device *rhdev;
 
 	if (IS_ENABLED(CONFIG_USB_PHY) && !hcd->usb_phy) {
-		struct usb_phy *phy = usb_get_phy_dev(hcd->self.controller, 0);
+		struct usb_phy *phy = usb_get_phy_dev(hcd->self.sysdev, 0);
 
 		if (IS_ERR(phy)) {
 			retval = PTR_ERR(phy);
@@ -2733,7 +2800,7 @@
 	}
 
 	if (IS_ENABLED(CONFIG_GENERIC_PHY) && !hcd->phy) {
-		struct phy *phy = phy_get(hcd->self.controller, "usb");
+		struct phy *phy = phy_get(hcd->self.sysdev, "usb");
 
 		if (IS_ERR(phy)) {
 			retval = PTR_ERR(phy);
@@ -2781,7 +2848,7 @@
 	 */
 	retval = hcd_buffer_create(hcd);
 	if (retval != 0) {
-		dev_dbg(hcd->self.controller, "pool alloc failed\n");
+		dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
 		goto err_create_buf;
 	}
 
@@ -2791,7 +2858,7 @@
 
 	rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
 	if (rhdev == NULL) {
-		dev_err(hcd->self.controller, "unable to allocate root hub\n");
+		dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
 		retval = -ENOMEM;
 		goto err_allocate_root_hub;
 	}
@@ -2966,6 +3033,9 @@
 	cancel_work_sync(&hcd->wakeup_work);
 #endif
 
+	/* handle any pending hub events before XHCI stops */
+	usb_flush_hub_wq();
+
 	mutex_lock(&usb_bus_idr_lock);
 	usb_disconnect(&rhdev);		/* Sets rhdev to NULL */
 	mutex_unlock(&usb_bus_idr_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aef81a1..ffa53d8 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -48,6 +48,11 @@
 /* synchronize hub-port add/remove and peering operations */
 DEFINE_MUTEX(usb_port_peer_mutex);
 
+static bool skip_extended_resume_delay = 1;
+module_param(skip_extended_resume_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_extended_resume_delay,
+		"removes extra delay added to finish bus resume");
+
 /* cycle leds on hubs that aren't blinking for attention */
 static bool blinkenlights;
 module_param(blinkenlights, bool, S_IRUGO);
@@ -627,6 +632,12 @@
 		kick_hub_wq(hub);
 }
 
+void usb_flush_hub_wq(void)
+{
+	flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
 /*
  * Let the USB core know that a USB 3.0 device has sent a Function Wake Device
  * Notification, which indicates it had initiated remote wakeup.
@@ -3398,7 +3409,9 @@
 		/* drive resume for USB_RESUME_TIMEOUT msec */
 		dev_dbg(&udev->dev, "usb %sresume\n",
 				(PMSG_IS_AUTO(msg) ? "auto-" : ""));
-		msleep(USB_RESUME_TIMEOUT);
+		if (!skip_extended_resume_delay)
+			usleep_range(USB_RESUME_TIMEOUT * 1000,
+					(USB_RESUME_TIMEOUT + 1) * 1000);
 
 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
 		 * stop resume signaling.  Then finish the resume
@@ -3407,7 +3420,7 @@
 		status = hub_port_status(hub, port1, &portstatus, &portchange);
 
 		/* TRSMRCY = 10 msec */
-		msleep(10);
+		usleep_range(10000, 10500);
 	}
 
  SuspendCleared:
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 5921514..7272f9a 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -450,9 +450,9 @@
 	 * Note: calling dma_set_mask() on a USB device would set the
 	 * mask for the entire HCD, so don't do that.
 	 */
-	dev->dev.dma_mask = bus->controller->dma_mask;
-	dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset;
-	set_dev_node(&dev->dev, dev_to_node(bus->controller));
+	dev->dev.dma_mask = bus->sysdev->dma_mask;
+	dev->dev.dma_pfn_offset = bus->sysdev->dma_pfn_offset;
+	set_dev_node(&dev->dev, dev_to_node(bus->sysdev));
 	dev->state = USB_STATE_ATTACHED;
 	dev->lpm_disable_count = 1;
 	atomic_set(&dev->urbnum, 0);
@@ -685,6 +685,54 @@
 }
 EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
 
+int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_sec_event_ring_setup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_setup);
+
+int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_sec_event_ring_cleanup);
+
+dma_addr_t
+usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned int intr_num)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
+}
+EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);
+
+dma_addr_t
+usb_get_dcba_dma_addr(struct usb_device *dev)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_dcba_dma_addr(dev);
+}
+EXPORT_SYMBOL(usb_get_dcba_dma_addr);
+
+dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep)
+{
+	if (dev->state == USB_STATE_NOTATTACHED)
+		return 0;
+
+	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
+}
+EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);
+
 /*-------------------------------------------------------------------*/
 /*
  * __usb_get_extra_descriptor() finds a descriptor of specific type in the
@@ -800,7 +848,7 @@
 	if (!urb
 			|| !urb->dev
 			|| !(bus = urb->dev->bus)
-			|| !(controller = bus->controller))
+			|| !(controller = bus->sysdev))
 		return NULL;
 
 	if (controller->dma_mask) {
@@ -838,7 +886,7 @@
 			|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 			|| !urb->dev
 			|| !(bus = urb->dev->bus)
-			|| !(controller = bus->controller))
+			|| !(controller = bus->sysdev))
 		return;
 
 	if (controller->dma_mask) {
@@ -872,7 +920,7 @@
 			|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
 			|| !urb->dev
 			|| !(bus = urb->dev->bus)
-			|| !(controller = bus->controller))
+			|| !(controller = bus->sysdev))
 		return;
 
 	if (controller->dma_mask) {
@@ -922,7 +970,7 @@
 
 	if (!dev
 			|| !(bus = dev->bus)
-			|| !(controller = bus->controller)
+			|| !(controller = bus->sysdev)
 			|| !controller->dma_mask)
 		return -EINVAL;
 
@@ -958,7 +1006,7 @@
 
 	if (!dev
 			|| !(bus = dev->bus)
-			|| !(controller = bus->controller)
+			|| !(controller = bus->sysdev)
 			|| !controller->dma_mask)
 		return;
 
@@ -986,7 +1034,7 @@
 
 	if (!dev
 			|| !(bus = dev->bus)
-			|| !(controller = bus->controller)
+			|| !(controller = bus->sysdev)
 			|| !controller->dma_mask)
 		return;
 
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ee06d07..8c52a13 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1547,6 +1547,10 @@
 	struct dwc3	*dwc = dev_get_drvdata(dev);
 	int		ret;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_SUSPEND_EVENT))
+		return 0;
+
 	ret = dwc3_suspend_common(dwc);
 	if (ret)
 		return ret;
@@ -1561,6 +1565,10 @@
 	struct dwc3	*dwc = dev_get_drvdata(dev);
 	int		ret;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT))
+		return 0;
+
 	pinctrl_pm_select_default_state(dev);
 
 	ret = dwc3_resume_common(dwc);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 29e80cc..5dd1832 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -249,6 +249,7 @@
 		val = dwc3_omap_read_utmi_ctrl(omap);
 		val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
 		dwc3_omap_write_utmi_ctrl(omap, val);
+		break;
 
 	case OMAP_DWC3_VBUS_OFF:
 		val = dwc3_omap_read_utmi_ctrl(omap);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 645cfff..990f423 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -28,23 +28,23 @@
 #define gadget_to_dwc(g)	(container_of(g, struct dwc3, gadget))
 
 /* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n)		((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n)		(((n) & 0x1f) << 0)
 #define DWC3_DEPCFG_XFER_COMPLETE_EN	(1 << 8)
 #define DWC3_DEPCFG_XFER_IN_PROGRESS_EN	(1 << 9)
 #define DWC3_DEPCFG_XFER_NOT_READY_EN	(1 << 10)
 #define DWC3_DEPCFG_FIFO_ERROR_EN	(1 << 11)
 #define DWC3_DEPCFG_STREAM_EVENT_EN	(1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n)	((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n)	(((n) & 0xff) << 16)
 #define DWC3_DEPCFG_STREAM_CAPABLE	(1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n)	((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n)	(((n) & 0x1f) << 25)
 #define DWC3_DEPCFG_BULK_BASED		(1 << 30)
 #define DWC3_DEPCFG_FIFO_BASED		(1 << 31)
 
 /* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n)		((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n)	((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n)	((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n)	((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n)		(((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n)	(((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n)	(((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n)	(((n) & 0xf) << 22)
 #define DWC3_DEPCFG_DATA_SEQ_NUM(n)	((n) << 26)
 /* This applies for core versions earlier than 1.94a */
 #define DWC3_DEPCFG_IGN_SEQ_NUM		(1 << 31)
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index e837536..d2fbed7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1428,17 +1428,39 @@
 	 */
 	if (!ncm_opts->bound) {
 		mutex_lock(&ncm_opts->lock);
+		ncm_opts->net = gether_setup_default();
+		if (IS_ERR(ncm_opts->net)) {
+			status = PTR_ERR(ncm_opts->net);
+			mutex_unlock(&ncm_opts->lock);
+			goto error;
+		}
 		gether_set_gadget(ncm_opts->net, cdev->gadget);
 		status = gether_register_netdev(ncm_opts->net);
 		mutex_unlock(&ncm_opts->lock);
-		if (status)
-			return status;
+		if (status) {
+			free_netdev(ncm_opts->net);
+			goto error;
+		}
 		ncm_opts->bound = true;
 	}
+
+	/* export host's Ethernet address in CDC format */
+	status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
+				      sizeof(ncm->ethaddr));
+	if (status < 12) { /* strlen("01234567890a") */
+		ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
+		__func__, status);
+		status = -EINVAL;
+		goto netdev_cleanup;
+	}
+	ncm->port.ioport = netdev_priv(ncm_opts->net);
+
 	us = usb_gstrings_attach(cdev, ncm_strings,
 				 ARRAY_SIZE(ncm_string_defs));
-	if (IS_ERR(us))
-		return PTR_ERR(us);
+	if (IS_ERR(us)) {
+		status = PTR_ERR(us);
+		goto netdev_cleanup;
+	}
 	ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
 	ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
 	ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1539,7 +1561,10 @@
 		kfree(ncm->notify_req->buf);
 		usb_ep_free_request(ncm->notify, ncm->notify_req);
 	}
+netdev_cleanup:
+	gether_cleanup(netdev_priv(ncm_opts->net));
 
+error:
 	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
 
 	return status;
@@ -1587,8 +1612,6 @@
 	opts = container_of(f, struct f_ncm_opts, func_inst);
 	if (opts->bound)
 		gether_cleanup(netdev_priv(opts->net));
-	else
-		free_netdev(opts->net);
 	kfree(opts);
 }
 
@@ -1601,12 +1624,6 @@
 		return ERR_PTR(-ENOMEM);
 	mutex_init(&opts->lock);
 	opts->func_inst.free_func_inst = ncm_free_inst;
-	opts->net = gether_setup_default();
-	if (IS_ERR(opts->net)) {
-		struct net_device *net = opts->net;
-		kfree(opts);
-		return ERR_CAST(net);
-	}
 
 	config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
 
@@ -1629,6 +1646,8 @@
 static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct f_ncm *ncm = func_to_ncm(f);
+	struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
+					func_inst);
 
 	DBG(c->cdev, "ncm unbind\n");
 
@@ -1640,13 +1659,15 @@
 
 	kfree(ncm->notify_req->buf);
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+	gether_cleanup(netdev_priv(opts->net));
+	opts->bound = false;
 }
 
 static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
 {
 	struct f_ncm		*ncm;
 	struct f_ncm_opts	*opts;
-	int status;
 
 	/* allocate and initialize one new instance */
 	ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1656,20 +1677,9 @@
 	opts = container_of(fi, struct f_ncm_opts, func_inst);
 	mutex_lock(&opts->lock);
 	opts->refcnt++;
-
-	/* export host's Ethernet address in CDC format */
-	status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
-				      sizeof(ncm->ethaddr));
-	if (status < 12) { /* strlen("01234567890a") */
-		kfree(ncm);
-		mutex_unlock(&opts->lock);
-		return ERR_PTR(-EINVAL);
-	}
 	ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
-
 	spin_lock_init(&ncm->lock);
 	ncm_reset_values(ncm);
-	ncm->port.ioport = netdev_priv(opts->net);
 	mutex_unlock(&opts->lock);
 	ncm->port.is_fixed = true;
 	ncm->port.supports_multi_frame = true;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 27ed51b..29b41b5 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -258,13 +258,6 @@
 	memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
 	v4l2_event_queue(&uvc->vdev, &v4l2_event);
 
-	/* Pass additional setup data to userspace */
-	if (uvc->event_setup_out && uvc->event_length) {
-		uvc->control_req->length = uvc->event_length;
-		return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
-			uvc->control_req, GFP_ATOMIC);
-	}
-
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 45bc997..a95b3e7 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@
 			dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
 			goto err;
 		}
-		ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+		sprintf(ep->name, "ep%d", ep->index);
+		ep->ep.name = ep->name;
 
 		ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
 		ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d5..b03b2eb 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@
 	void __iomem				*ep_regs;
 	void __iomem				*dma_regs;
 	void __iomem				*fifo;
+	char					name[8];
 	struct usb_ep				ep;
 	struct usba_udc				*udc;
 
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a81d9ab..4fa5de2 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1031,6 +1031,8 @@
 	int		rc;
 
 	dum = *((void **)dev_get_platdata(&pdev->dev));
+	/* Clear usb_gadget region for new registration to udc-core */
+	memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
 	dum->gadget.name = gadget_name;
 	dum->gadget.ops = &dummy_ops;
 	dum->gadget.max_speed = USB_SPEED_SUPER;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index b38a228..af0566d 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -361,7 +361,7 @@
 
 		case USB_PORT_FEAT_SUSPEND:
 			dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
-			if (valid_port(wIndex)) {
+			if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
 				ohci_at91_port_suspend(ohci_at91->sfr_regmap,
 						       1);
 				return 0;
@@ -404,7 +404,7 @@
 
 		case USB_PORT_FEAT_SUSPEND:
 			dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
-			if (valid_port(wIndex)) {
+			if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
 				ohci_at91_port_suspend(ohci_at91->sfr_regmap,
 						       0);
 				return 0;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 74c42f7..3425154 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -111,7 +111,7 @@
 	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
 
 	/* xhci 1.1 controllers have the HCCPARAMS2 register */
-	if (hci_version > 100) {
+	if (hci_version > 0x100) {
 		temp = readl(&xhci->cap_regs->hcc_params2);
 		xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
 		xhci_dbg(xhci, "  HC %s Force save context capability",
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a84fe94..7558021 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1338,7 +1338,7 @@
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_RESUME);
 				spin_unlock_irqrestore(&xhci->lock, flags);
-				msleep(USB_RESUME_TIMEOUT);
+				usleep_range(21000, 21500);
 				spin_lock_irqsave(&xhci->lock, flags);
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_U0);
@@ -1619,7 +1619,7 @@
 
 	if (need_usb2_u3_exit) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
-		msleep(USB_RESUME_TIMEOUT);
+		usleep_range(21000, 21500);
 		spin_lock_irqsave(&xhci->lock, flags);
 	}
 
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 7064892..be0a89e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1824,25 +1824,151 @@
 	kfree(command);
 }
 
-void xhci_mem_cleanup(struct xhci_hcd *xhci)
+void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
 {
-	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	union xhci_trb *erdp_trb, *current_trb;
+	struct xhci_segment	*seg;
+	u64 erdp_reg;
+	u32 iman_reg;
+	dma_addr_t deq;
+	unsigned long segment_offset;
+
+	/* disable irq, ack pending interrupt and ack all pending events */
+
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg &= ~IMAN_IE;
+	writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+	iman_reg =
+		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+	if (iman_reg & IMAN_IP)
+		writel_relaxed(iman_reg,
+			&xhci->sec_ir_set[intr_num]->irq_pending);
+
+	/* last acked event trb is in erdp reg  */
+	erdp_reg =
+		xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
+	deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
+	if (!deq) {
+		pr_debug("%s: event ring handling not required\n", __func__);
+		return;
+	}
+
+	seg = xhci->sec_event_ring[intr_num]->first_seg;
+	segment_offset = deq - seg->dma;
+
+	/* find out virtual address of the last acked event trb */
+	erdp_trb = current_trb = &seg->trbs[0] +
+				(segment_offset/sizeof(*current_trb));
+
+	/* read cycle state of the last acked trb to find out CCS */
+	xhci->sec_event_ring[intr_num]->cycle_state =
+				(current_trb->event_cmd.flags & TRB_CYCLE);
+
+	while (1) {
+		/* last trb of the event ring: toggle cycle state */
+		if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
+			xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
+			current_trb = &seg->trbs[0];
+		} else {
+			current_trb++;
+		}
+
+		/* cycle state transition */
+		if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
+		    xhci->sec_event_ring[intr_num]->cycle_state)
+			break;
+	}
+
+	if (erdp_trb != current_trb) {
+		deq =
+		xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
+					current_trb);
+		if (deq == 0)
+			xhci_warn(xhci,
+				"WARN ivalid SW event ring dequeue ptr.\n");
+		/* Update HC event ring dequeue pointer */
+		erdp_reg &= ERST_PTR_MASK;
+		erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
+	}
+
+	/* Clear the event handler busy flag (RW1C); event ring is empty. */
+	erdp_reg |= ERST_EHB;
+	xhci_write_64(xhci, erdp_reg,
+			&xhci->sec_ir_set[intr_num]->erst_dequeue);
+}
+
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num)
+{
 	int size;
-	int i, j, num_ports;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
 
-	cancel_delayed_work_sync(&xhci->cmd_timer);
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "invalid secondary interrupter num %d\n",
+			intr_num);
+		return -EINVAL;
+	}
 
-	/* Free the Event Ring Segment Table and the actual Event Ring */
+	size =
+	sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
+	if (xhci->sec_erst[intr_num].entries) {
+		xhci_handle_sec_intr_events(xhci, intr_num);
+		dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
+				xhci->sec_erst[intr_num].erst_dma_addr);
+		xhci->sec_erst[intr_num].entries = NULL;
+	}
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
+		intr_num);
+	if (xhci->sec_event_ring[intr_num])
+		xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
+	xhci->sec_event_ring[intr_num] = NULL;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Freed sec event ring");
+
+	return 0;
+}
+
+void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
+{
+	int size;
+	unsigned int i;
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+
+	/* sec event ring clean up */
+	for (i = 1; i < xhci->max_interrupters; i++)
+		xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);
+
+	kfree(xhci->sec_ir_set);
+	xhci->sec_ir_set = NULL;
+	kfree(xhci->sec_erst);
+	xhci->sec_erst = NULL;
+	kfree(xhci->sec_event_ring);
+	xhci->sec_event_ring = NULL;
+
+	/* primary event ring clean up */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
 	if (xhci->erst.entries)
 		dma_free_coherent(dev, size,
 				xhci->erst.entries, xhci->erst.erst_dma_addr);
 	xhci->erst.entries = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
 	if (xhci->event_ring)
 		xhci_ring_free(xhci, xhci->event_ring);
 	xhci->event_ring = NULL;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
+}
+
+void xhci_mem_cleanup(struct xhci_hcd *xhci)
+{
+	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
+	int i, j, num_ports;
+
+	cancel_delayed_work_sync(&xhci->cmd_timer);
+
+	xhci_event_ring_cleanup(xhci);
 
 	if (xhci->lpm_command)
 		xhci_free_command(xhci, xhci->lpm_command);
@@ -2083,30 +2209,6 @@
 	return 0;
 }
 
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
-{
-	u64 temp;
-	dma_addr_t deq;
-
-	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
-			xhci->event_ring->dequeue);
-	if (deq == 0 && !in_interrupt())
-		xhci_warn(xhci, "WARN something wrong with SW event ring "
-				"dequeue ptr.\n");
-	/* Update HC event ring dequeue pointer */
-	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
-	temp &= ERST_PTR_MASK;
-	/* Don't clear the EHB bit (which is RW1C) because
-	 * there might be more events to service.
-	 */
-	temp &= ~ERST_EHB;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write event ring dequeue pointer, "
-			"preserving EHB bit");
-	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
-			&xhci->ir_set->erst_dequeue);
-}
-
 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 		__le32 __iomem *addr, int max_caps)
 {
@@ -2365,13 +2467,183 @@
 	return 0;
 }
 
+int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
+	struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
+	unsigned int intr_num, gfp_t flags)
+{
+	dma_addr_t dma, deq;
+	u64 val_64;
+	unsigned int val;
+	struct xhci_segment *seg;
+	struct device *dev = xhci_to_hcd(xhci)->self.controller;
+
+	*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
+	if (!*er)
+		return -ENOMEM;
+
+	erst->entries = dma_alloc_coherent(dev,
+			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
+			flags);
+	if (!erst->entries) {
+		xhci_ring_free(xhci, *er);
+		return -ENOMEM;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: Allocated event ring segment table at 0x%llx",
+		intr_num, (unsigned long long)dma);
+
+	memset(erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+	erst->num_entries = ERST_NUM_SEGS;
+	erst->erst_dma_addr = dma;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d: num segs = %i, virt addr = %p, dma addr = 0x%llx",
+			intr_num,
+			erst->num_entries,
+			erst->entries,
+			(unsigned long long)erst->erst_dma_addr);
+
+	/* set ring base address and size for each segment table entry */
+	for (val = 0, seg = (*er)->first_seg; val < ERST_NUM_SEGS; val++) {
+		struct xhci_erst_entry *entry = &erst->entries[val];
+
+		entry->seg_addr = cpu_to_le64(seg->dma);
+		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	/* set ERST count with the number of entries in the segment table */
+	val = readl_relaxed(&ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"Write ERST size = %i to ir_set %d (some bits preserved)", val,
+		intr_num);
+	writel_relaxed(val, &ir_set->erst_size);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"intr# %d: Set ERST entries to point to event ring.",
+			intr_num);
+	/* set the segment table base address */
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Set ERST base address for ir_set %d = 0x%llx",
+			intr_num,
+			(unsigned long long)erst->erst_dma_addr);
+	val_64 = xhci_read_64(xhci, &ir_set->erst_base);
+	val_64 &= ERST_PTR_MASK;
+	val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
+	xhci_write_64(xhci, val_64, &ir_set->erst_base);
+
+	/* Set the event ring dequeue address */
+	deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
+	if (deq == 0 && !in_interrupt())
+		xhci_warn(xhci,
+		"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
+		intr_num);
+	/* Update HC event ring dequeue pointer */
+	val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
+	val_64 &= ERST_PTR_MASK;
+	/* Don't clear the EHB bit (which is RW1C) because
+	 * there might be more events to service.
+	 */
+	val_64 &= ~ERST_EHB;
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
+		intr_num);
+	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
+			&ir_set->erst_dequeue);
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+			"Wrote ERST address to ir_set %d.", intr_num);
+	xhci_print_ir_set(xhci, intr_num);
+
+	return 0;
+}
+
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num)
+{
+	int ret;
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
+		|| !xhci->sec_event_ring || !xhci->sec_erst ||
+		intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci,
+		"%s:state %x ir_set %p evt_ring %p erst %p intr# %d\n",
+		__func__, xhci->xhc_state, xhci->sec_ir_set,
+		xhci->sec_event_ring, xhci->sec_erst, intr_num);
+		return -EINVAL;
+	}
+
+	if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		goto done;
+
+	xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
+	ret = xhci_event_ring_setup(xhci,
+				&xhci->sec_event_ring[intr_num],
+				xhci->sec_ir_set[intr_num],
+				&xhci->sec_erst[intr_num],
+				intr_num, GFP_KERNEL);
+	if (ret) {
+		xhci_err(xhci, "sec event ring setup failed inter#%d\n",
+			intr_num);
+		return ret;
+	}
+done:
+	return 0;
+}
+
+int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
+{
+	int ret = 0;
+
+	/* primary + secondary */
+	xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating primary event ring");
+
+	/* Set ir_set to interrupt register set 0 */
+	xhci->ir_set = &xhci->run_regs->ir_set[0];
+	ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
+		&xhci->erst, 0, flags);
+	if (ret) {
+		xhci_err(xhci, "failed to setup primary event ring\n");
+		goto fail;
+	}
+
+	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+		"// Allocating sec event ring related pointers");
+
+	xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_ir_set), flags);
+	if (!xhci->sec_ir_set) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_event_ring), flags);
+	if (!xhci->sec_event_ring) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	xhci->sec_erst = kcalloc(xhci->max_interrupters,
+				sizeof(*xhci->sec_erst), flags);
+	if (!xhci->sec_erst)
+		ret = -ENOMEM;
+fail:
+	return ret;
+}
+
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
 {
 	dma_addr_t	dma;
 	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
 	unsigned int	val, val2;
 	u64		val_64;
-	struct xhci_segment	*seg;
 	u32 page_size, temp;
 	int i;
 
@@ -2497,74 +2769,17 @@
 	xhci->dba = (void __iomem *) xhci->cap_regs + val;
 	xhci_dbg_regs(xhci);
 	xhci_print_run_regs(xhci);
-	/* Set ir_set to interrupt register set 0 */
-	xhci->ir_set = &xhci->run_regs->ir_set[0];
 
 	/*
 	 * Event ring setup: Allocate a normal ring, but also setup
 	 * the event ring segment table (ERST).  Section 4.9.3.
 	 */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
-	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
-					0, flags);
-	if (!xhci->event_ring)
+	if (xhci_event_ring_init(xhci, GFP_KERNEL))
 		goto fail;
+
 	if (xhci_check_trb_in_td_math(xhci) < 0)
 		goto fail;
 
-	xhci->erst.entries = dma_alloc_coherent(dev,
-			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
-			flags);
-	if (!xhci->erst.entries)
-		goto fail;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Allocated event ring segment table at 0x%llx",
-			(unsigned long long)dma);
-
-	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
-	xhci->erst.num_entries = ERST_NUM_SEGS;
-	xhci->erst.erst_dma_addr = dma;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
-			xhci->erst.num_entries,
-			xhci->erst.entries,
-			(unsigned long long)xhci->erst.erst_dma_addr);
-
-	/* set ring base address and size for each segment table entry */
-	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
-		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
-		entry->seg_addr = cpu_to_le64(seg->dma);
-		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
-		entry->rsvd = 0;
-		seg = seg->next;
-	}
-
-	/* set ERST count with the number of entries in the segment table */
-	val = readl(&xhci->ir_set->erst_size);
-	val &= ERST_SIZE_MASK;
-	val |= ERST_NUM_SEGS;
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
-			val);
-	writel(val, &xhci->ir_set->erst_size);
-
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST entries to point to event ring.");
-	/* set the segment table base address */
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"// Set ERST base address for ir_set 0 = 0x%llx",
-			(unsigned long long)xhci->erst.erst_dma_addr);
-	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
-	val_64 &= ERST_PTR_MASK;
-	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
-	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
-	/* Set the event ring dequeue address */
-	xhci_set_hc_event_deq(xhci);
-	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
-			"Wrote ERST address to ir_set 0.");
-	xhci_print_ir_set(xhci, 0);
-
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
 	 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index aa3c706..ec1f0b9 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -174,6 +174,8 @@
 	if (!hcd)
 		return -ENOMEM;
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(hcd->regs)) {
@@ -229,6 +231,8 @@
 		goto disable_clk;
 	}
 
+	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+
 	if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
 		xhci->quirks |= XHCI_LPM_SUPPORT;
 
@@ -287,6 +291,7 @@
 	struct clk *clk = xhci->clk;
 
 	pm_runtime_disable(&dev->dev);
+	xhci->xhc_state |= XHCI_STATE_REMOVING;
 
 	usb_remove_hcd(xhci->shared_hcd);
 	usb_phy_shutdown(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34e23c7..64daa09 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4942,6 +4942,61 @@
 }
 EXPORT_SYMBOL_GPL(xhci_gen_setup);
 
+dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
+	unsigned int intr_num)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (intr_num >= xhci->max_interrupters) {
+		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
+			xhci->max_interrupters);
+		return 0;
+	}
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
+		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
+		&& xhci->sec_event_ring[intr_num]->first_seg)
+		return xhci->sec_event_ring[intr_num]->first_seg->dma;
+
+	return 0;
+}
+
+dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev)
+{
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
+		return xhci->dcbaa->dev_context_ptrs[udev->slot_id];
+
+	return 0;
+}
+
+dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
+	struct usb_device *udev, struct usb_host_endpoint *ep)
+{
+	int ret;
+	unsigned int ep_index;
+	struct xhci_virt_device *virt_dev;
+
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
+	if (ret <= 0) {
+		xhci_err(xhci, "%s: invalid args\n", __func__);
+		return 0;
+	}
+
+	virt_dev = xhci->devs[udev->slot_id];
+	ep_index = xhci_get_endpoint_index(&ep->desc);
+
+	if (virt_dev->eps[ep_index].ring &&
+		virt_dev->eps[ep_index].ring->first_seg)
+		return virt_dev->eps[ep_index].ring->first_seg->dma;
+
+	return 0;
+}
+
 static const struct hc_driver xhci_hc_driver = {
 	.description =		"xhci-hcd",
 	.product_desc =		"xHCI Host Controller",
@@ -5001,6 +5056,11 @@
 	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
 	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
 	.find_raw_port_number =	xhci_find_raw_port_number,
+	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
+	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
+	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
+	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
+	.get_dcba_dma_addr =		xhci_get_dcba_dma_addr,
 };
 
 void xhci_init_driver(struct hc_driver *drv,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 5250c72..0fe91df 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1535,6 +1535,9 @@
 	/* Our HCD's current interrupter register set */
 	struct	xhci_intr_reg __iomem *ir_set;
 
+	/* secondary interrupter */
+	struct	xhci_intr_reg __iomem **sec_ir_set;
+
 	/* Cached register copies of read-only HC data */
 	__u32		hcs_params1;
 	__u32		hcs_params2;
@@ -1576,6 +1579,11 @@
 	struct xhci_command	*current_cmd;
 	struct xhci_ring	*event_ring;
 	struct xhci_erst	erst;
+
+	/* secondary event ring and erst */
+	struct xhci_ring	**sec_event_ring;
+	struct xhci_erst	*sec_erst;
+
 	/* Scratchpad */
 	struct xhci_scratchpad  *scratchpad;
 	/* Store LPM test failed devices' information */
@@ -1842,6 +1850,8 @@
 void xhci_urb_free_priv(struct urb_priv *urb_priv);
 void xhci_free_command(struct xhci_hcd *xhci,
 		struct xhci_command *command);
+int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num);
+int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num);
 
 /* xHCI host controller glue */
 typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 095778f..37c63cb 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -781,12 +781,6 @@
 	iface_desc = interface->cur_altsetting;
 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
-	if (iface_desc->desc.bNumEndpoints < 1) {
-		dev_err(&interface->dev, "Invalid number of endpoints\n");
-		retval = -EINVAL;
-		goto error;
-	}
-
 	/* set up the endpoint information */
 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
 		endpoint = &iface_desc->endpoint[i].desc;
@@ -797,6 +791,21 @@
 			/* this one will match for the IOWarrior56 only */
 			dev->int_out_endpoint = endpoint;
 	}
+
+	if (!dev->int_in_endpoint) {
+		dev_err(&interface->dev, "no interrupt-in endpoint found\n");
+		retval = -ENODEV;
+		goto error;
+	}
+
+	if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+		if (!dev->int_out_endpoint) {
+			dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+			retval = -ENODEV;
+			goto error;
+		}
+	}
+
 	/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
 	dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
 	if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 2e731af..da08047 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -243,6 +243,10 @@
 #define MAX_VDM_RESPONSE_TIME	60 /* 2 * tVDMSenderResponse_max(30ms) */
 #define MAX_VDM_BUSY_TIME	100 /* 2 * tVDMBusy (50ms) */
 
+#define PD_SNK_PDO_FIXED(prs, hc, uc, usb_comm, drs, volt, curr) \
+	(((prs) << 29) | ((hc) << 28) | ((uc) << 27) | ((usb_comm) << 26) | \
+	 ((drs) << 25) | ((volt) << 10) | (curr))
+
 /* VDM header is the first 32-bit object following the 16-bit PD header */
 #define VDM_HDR_SVID(hdr)	((hdr) >> 16)
 #define VDM_IS_SVDM(hdr)	((hdr) & 0x8000)
@@ -273,7 +277,7 @@
 module_param(min_sink_current, int, 0600);
 
 static const u32 default_src_caps[] = { 0x36019096 };	/* VSafe5V @ 1.5A */
-static const u32 default_snk_caps[] = { 0x2601905A };	/* 5V @ 900mA */
+static const u32 default_snk_caps[] = { 0x2601912C };	/* VSafe5V @ 3A */
 
 struct vdm_tx {
 	u32			data[7];
@@ -305,7 +309,7 @@
 	spinlock_t		rx_lock;
 
 	u32			received_pdos[7];
-	int			src_cap_id;
+	u16			src_cap_id;
 	u8			selected_pdo;
 	u8			requested_pdo;
 	u32			rdo;	/* can be either source or sink */
@@ -318,6 +322,9 @@
 	bool			peer_pr_swap;
 	bool			peer_dr_swap;
 
+	u32			sink_caps[7];
+	int			num_sink_caps;
+
 	struct power_supply	*usb_psy;
 	struct notifier_block	psy_nb;
 
@@ -331,8 +338,10 @@
 	enum power_role		current_pr;
 	bool			in_pr_swap;
 	bool			pd_phy_opened;
-	struct completion	swap_complete;
+	bool			send_request;
+	struct completion	is_ready;
 
+	struct mutex		swap_lock;
 	struct dual_role_phy_instance	*dual_role;
 	struct dual_role_phy_desc	dr_desc;
 	bool			send_pr_swap;
@@ -456,6 +465,9 @@
 	 */
 	pd->rx_msgid = -1;
 	pd->tx_msgid = 0;
+	pd->send_request = false;
+	pd->send_pr_swap = false;
+	pd->send_dr_swap = false;
 }
 
 static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
@@ -835,7 +847,7 @@
 		}
 
 		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
-		complete(&pd->swap_complete);
+		complete(&pd->is_ready);
 		dual_role_instance_changed(pd->dual_role);
 		break;
 
@@ -970,7 +982,7 @@
 	case PE_SNK_READY:
 		pd->in_explicit_contract = true;
 		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
-		complete(&pd->swap_complete);
+		complete(&pd->is_ready);
 		dual_role_instance_changed(pd->dual_role);
 		break;
 
@@ -1539,9 +1551,9 @@
 		pd->hard_reset_recvd = false;
 		pd->caps_count = 0;
 		pd->hard_reset_count = 0;
-		pd->src_cap_id = 0;
 		pd->requested_voltage = 0;
 		pd->requested_current = 0;
+		pd->selected_pdo = pd->requested_pdo = 0;
 		memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
 		rx_msg_cleanup(pd);
 
@@ -1609,8 +1621,12 @@
 				POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
 
 		pd->in_pr_swap = false;
+		pd->in_explicit_contract = false;
+		pd->selected_pdo = pd->requested_pdo = 0;
+		pd->rdo = 0;
 		rx_msg_cleanup(pd);
 		reset_vdm_state(pd);
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
 
 		if (pd->current_pr == PR_SINK) {
 			usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
@@ -1718,8 +1734,8 @@
 			}
 		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
 			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
-					default_snk_caps,
-					ARRAY_SIZE(default_snk_caps), SOP_MSG);
+					pd->sink_caps, pd->num_sink_caps,
+					SOP_MSG);
 			if (ret) {
 				usbpd_err(&pd->dev, "Error sending Sink Caps\n");
 				usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
@@ -1833,8 +1849,10 @@
 
 		pd_send_hard_reset(pd);
 		pd->in_explicit_contract = false;
+		pd->rdo = 0;
 		rx_msg_cleanup(pd);
 		reset_vdm_state(pd);
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
 
 		pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
 		kick_sm(pd, PS_HARD_RESET_TIME);
@@ -1913,6 +1931,11 @@
 
 	case PE_SNK_SELECT_CAPABILITY:
 		if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+			u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+			bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+				&& (PD_SRC_PDO_TYPE(pdo) ==
+						PD_SRC_PDO_TYPE_AUGMENTED);
+
 			usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
 
 			/* prepare for voltage increase/decrease */
@@ -1924,11 +1947,12 @@
 					&val);
 
 			/*
-			 * if we are changing voltages, we must lower input
-			 * current to pSnkStdby (2.5W). Calculate it and set
-			 * PD_CURRENT_MAX accordingly.
+			 * if changing voltages (not within the same PPS PDO),
+			 * we must lower input current to pSnkStdby (2.5W).
+			 * Calculate it and set PD_CURRENT_MAX accordingly.
 			 */
-			if (pd->requested_voltage != pd->current_voltage) {
+			if (!same_pps &&
+				pd->requested_voltage != pd->current_voltage) {
 				int mv = max(pd->requested_voltage,
 						pd->current_voltage) / 1000;
 				val.intval = (2500000 / mv) * 1000;
@@ -1996,8 +2020,8 @@
 			usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
 		} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
 			ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
-					default_snk_caps,
-					ARRAY_SIZE(default_snk_caps), SOP_MSG);
+					pd->sink_caps, pd->num_sink_caps,
+					SOP_MSG);
 			if (ret) {
 				usbpd_err(&pd->dev, "Error sending Sink Caps\n");
 				usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
@@ -2068,6 +2092,9 @@
 			vconn_swap(pd);
 		} else if (IS_DATA(rx_msg, MSG_VDM)) {
 			handle_vdm_rx(pd, rx_msg);
+		} else if (pd->send_request) {
+			pd->send_request = false;
+			usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
 		} else if (pd->send_pr_swap && is_sink_tx_ok(pd)) {
 			pd->send_pr_swap = false;
 			ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
@@ -2151,7 +2178,10 @@
 
 		pd_send_hard_reset(pd);
 		pd->in_explicit_contract = false;
+		pd->selected_pdo = pd->requested_pdo = 0;
+		pd->rdo = 0;
 		reset_vdm_state(pd);
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
 		usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
 		break;
 
@@ -2533,17 +2563,21 @@
 				return -EAGAIN;
 			}
 
-			reinit_completion(&pd->swap_complete);
+			mutex_lock(&pd->swap_lock);
+			reinit_completion(&pd->is_ready);
 			pd->send_dr_swap = true;
 			kick_sm(pd, 0);
 
 			/* wait for operation to complete */
-			if (!wait_for_completion_timeout(&pd->swap_complete,
+			if (!wait_for_completion_timeout(&pd->is_ready,
 					msecs_to_jiffies(100))) {
 				usbpd_err(&pd->dev, "data_role swap timed out\n");
+				mutex_unlock(&pd->swap_lock);
 				return -ETIMEDOUT;
 			}
 
+			mutex_unlock(&pd->swap_lock);
+
 			if ((*val == DUAL_ROLE_PROP_DR_HOST &&
 					pd->current_dr != DR_DFP) ||
 				(*val == DUAL_ROLE_PROP_DR_DEVICE &&
@@ -2584,17 +2618,21 @@
 				return -EAGAIN;
 			}
 
-			reinit_completion(&pd->swap_complete);
+			mutex_lock(&pd->swap_lock);
+			reinit_completion(&pd->is_ready);
 			pd->send_pr_swap = true;
 			kick_sm(pd, 0);
 
 			/* wait for operation to complete */
-			if (!wait_for_completion_timeout(&pd->swap_complete,
+			if (!wait_for_completion_timeout(&pd->is_ready,
 					msecs_to_jiffies(2000))) {
 				usbpd_err(&pd->dev, "power_role swap timed out\n");
+				mutex_unlock(&pd->swap_lock);
 				return -ETIMEDOUT;
 			}
 
+			mutex_unlock(&pd->swap_lock);
+
 			if ((*val == DUAL_ROLE_PROP_PR_SRC &&
 					pd->current_pr != PR_SRC) ||
 				(*val == DUAL_ROLE_PROP_PR_SNK &&
@@ -2857,36 +2895,62 @@
 	int pdo, uv = 0, ua = 0;
 	int ret;
 
+	mutex_lock(&pd->swap_lock);
+
 	/* Only allowed if we are already in explicit sink contract */
 	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
 		usbpd_err(&pd->dev, "select_pdo: Cannot select new PDO yet\n");
-		return -EBUSY;
+		ret = -EBUSY;
+		goto out;
 	}
 
 	ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
 	if (ret != 2 && ret != 4) {
 		usbpd_err(&pd->dev, "select_pdo: Must specify <src cap id> <PDO> [<uV> <uA>]\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (src_cap_id != pd->src_cap_id) {
 		usbpd_err(&pd->dev, "select_pdo: src_cap_id mismatch.  Requested:%d, current:%d\n",
 				src_cap_id, pd->src_cap_id);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	if (pdo < 1 || pdo > 7) {
 		usbpd_err(&pd->dev, "select_pdo: invalid PDO:%d\n", pdo);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out;
 	}
 
 	ret = pd_select_pdo(pd, pdo, uv, ua);
 	if (ret)
-		return ret;
+		goto out;
 
-	usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
+	reinit_completion(&pd->is_ready);
+	pd->send_request = true;
+	kick_sm(pd, 0);
 
-	return size;
+	/* wait for operation to complete */
+	if (!wait_for_completion_timeout(&pd->is_ready,
+			msecs_to_jiffies(1000))) {
+		usbpd_err(&pd->dev, "select_pdo: request timed out\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* determine if request was accepted/rejected */
+	if (pd->selected_pdo != pd->requested_pdo ||
+			pd->current_voltage != pd->requested_voltage) {
+		usbpd_err(&pd->dev, "select_pdo: request rejected\n");
+		ret = -EINVAL;
+	}
+
+out:
+	pd->send_request = false;
+	mutex_unlock(&pd->swap_lock);
+	return ret ? ret : size;
 }
 
 static ssize_t select_pdo_show(struct device *dev,
@@ -3116,6 +3180,7 @@
 	INIT_WORK(&pd->sm_work, usbpd_sm);
 	hrtimer_init(&pd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	pd->timer.function = pd_timeout;
+	mutex_init(&pd->swap_lock);
 
 	pd->usb_psy = power_supply_get_by_name("usb");
 	if (!pd->usb_psy) {
@@ -3157,6 +3222,44 @@
 	pd->vconn_is_external = device_property_present(parent,
 					"qcom,vconn-uses-external-source");
 
+	pd->num_sink_caps = device_property_read_u32_array(parent,
+			"qcom,default-sink-caps", NULL, 0);
+	if (pd->num_sink_caps > 0) {
+		int i;
+		u32 sink_caps[14];
+
+		if (pd->num_sink_caps % 2 || pd->num_sink_caps > 14) {
+			ret = -EINVAL;
+			usbpd_err(&pd->dev, "default-sink-caps must be be specified as voltage/current, max 7 pairs\n");
+			goto put_psy;
+		}
+
+		ret = device_property_read_u32_array(parent,
+				"qcom,default-sink-caps", sink_caps,
+				pd->num_sink_caps);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error reading default-sink-caps\n");
+			goto put_psy;
+		}
+
+		pd->num_sink_caps /= 2;
+
+		for (i = 0; i < pd->num_sink_caps; i++) {
+			int v = sink_caps[i * 2] / 50;
+			int c = sink_caps[i * 2 + 1] / 10;
+
+			pd->sink_caps[i] =
+				PD_SNK_PDO_FIXED(0, 0, 0, 0, 0, v, c);
+		}
+
+		/* First PDO includes additional capabilities */
+		pd->sink_caps[0] |= PD_SNK_PDO_FIXED(1, 0, 0, 1, 1, 0, 0);
+	} else {
+		memcpy(pd->sink_caps, default_snk_caps,
+				sizeof(default_snk_caps));
+		pd->num_sink_caps = ARRAY_SIZE(default_snk_caps);
+	}
+
 	/*
 	 * Register the Android dual-role class (/sys/class/dual_role_usb/).
 	 * The first instance should be named "otg_default" as that's what
@@ -3188,7 +3291,7 @@
 	spin_lock_init(&pd->rx_lock);
 	INIT_LIST_HEAD(&pd->rx_q);
 	INIT_LIST_HEAD(&pd->svid_handlers);
-	init_completion(&pd->swap_complete);
+	init_completion(&pd->is_ready);
 
 	pd->psy_nb.notifier_call = psy_changed;
 	ret = power_supply_reg_notifier(&pd->psy_nb);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index 58eb287..bf155ae9 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -118,15 +118,23 @@
 
 	if (!qphy->clocks_enabled && on) {
 		clk_prepare_enable(qphy->ref_clk_src);
-		clk_prepare_enable(qphy->ref_clk);
-		clk_prepare_enable(qphy->cfg_ahb_clk);
+		if (qphy->ref_clk)
+			clk_prepare_enable(qphy->ref_clk);
+
+		if (qphy->cfg_ahb_clk)
+			clk_prepare_enable(qphy->cfg_ahb_clk);
+
 		qphy->clocks_enabled = true;
 	}
 
 	if (qphy->clocks_enabled && !on) {
-		clk_disable_unprepare(qphy->ref_clk);
+		if (qphy->cfg_ahb_clk)
+			clk_disable_unprepare(qphy->cfg_ahb_clk);
+
+		if (qphy->ref_clk)
+			clk_disable_unprepare(qphy->ref_clk);
+
 		clk_disable_unprepare(qphy->ref_clk_src);
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
 		qphy->clocks_enabled = false;
 	}
 
@@ -744,15 +752,28 @@
 		}
 	}
 
+	/* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */
 	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
-	if (IS_ERR(qphy->ref_clk_src))
+	if (IS_ERR(qphy->ref_clk_src)) {
 		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+		ret = PTR_ERR(qphy->ref_clk_src);
+		return ret;
+	}
 
-	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
-	if (IS_ERR(qphy->ref_clk))
-		dev_dbg(dev, "clk get failed for ref_clk\n");
-	else
+	/* ref_clk is needed only for DIFF_CLK case, hence make it optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "ref_clk") >= 0) {
+		qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+		if (IS_ERR(qphy->ref_clk)) {
+			ret = PTR_ERR(qphy->ref_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_dbg(dev,
+					"clk get failed for ref_clk\n");
+			return ret;
+		}
+
 		clk_set_rate(qphy->ref_clk, 19200000);
+	}
 
 	if (of_property_match_string(pdev->dev.of_node,
 				"clock-names", "cfg_ahb_clk") >= 0) {
@@ -933,14 +954,7 @@
 	struct qusb_phy *qphy = platform_get_drvdata(pdev);
 
 	usb_remove_phy(&qphy->phy);
-
-	if (qphy->clocks_enabled) {
-		clk_disable_unprepare(qphy->cfg_ahb_clk);
-		clk_disable_unprepare(qphy->ref_clk);
-		clk_disable_unprepare(qphy->ref_clk_src);
-		qphy->clocks_enabled = false;
-	}
-
+	qusb_phy_enable_clocks(qphy, false);
 	qusb_phy_enable_power(qphy, false, true);
 
 	return 0;
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index 43f06f3..ee521a0 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -84,6 +84,7 @@
 	struct clk		*ref_clk_src;
 	struct clk		*ref_clk;
 	struct clk		*aux_clk;
+	struct clk		*com_aux_clk;
 	struct clk		*cfg_ahb_clk;
 	struct clk		*pipe_clk;
 	struct reset_control	*phy_reset;
@@ -114,6 +115,8 @@
 };
 MODULE_DEVICE_TABLE(of, msm_usb_id_table);
 
+static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on);
+
 static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
 {
 	return phy->cable_connected ? "connected" : "disconnected";
@@ -292,21 +295,7 @@
 		return ret;
 	}
 
-	if (!phy->clk_enabled) {
-		if (phy->ref_clk_src)
-			clk_prepare_enable(phy->ref_clk_src);
-		if (phy->ref_clk)
-			clk_prepare_enable(phy->ref_clk);
-		clk_prepare_enable(phy->aux_clk);
-		clk_prepare_enable(phy->cfg_ahb_clk);
-		clk_set_rate(phy->pipe_clk, 125000000);
-		clk_prepare_enable(phy->pipe_clk);
-		phy->clk_enabled = true;
-	}
-
-	/* select usb3 phy mode */
-	if (phy->tcsr_usb3_dp_phymode)
-		writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+	msm_ssphy_qmp_enable_clks(phy, true);
 
 	writel_relaxed(0x01,
 		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -383,6 +372,10 @@
 		goto deassert_phy_phy_reset;
 	}
 
+	/* select usb3 phy mode */
+	if (phy->tcsr_usb3_dp_phymode)
+		writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+
 	/* Deassert USB3 PHY CSR reset */
 	ret = reset_control_deassert(phy->phy_reset);
 	if (ret) {
@@ -469,29 +462,13 @@
 		/* Make sure above write completed with PHY */
 		wmb();
 
-		clk_disable_unprepare(phy->cfg_ahb_clk);
-		clk_disable_unprepare(phy->aux_clk);
-		clk_disable_unprepare(phy->pipe_clk);
-		if (phy->ref_clk)
-			clk_disable_unprepare(phy->ref_clk);
-		if (phy->ref_clk_src)
-			clk_disable_unprepare(phy->ref_clk_src);
-		phy->clk_enabled = false;
+		msm_ssphy_qmp_enable_clks(phy, false);
 		phy->in_suspend = true;
 		msm_ssphy_power_enable(phy, 0);
 		dev_dbg(uphy->dev, "QMP PHY is suspend\n");
 	} else {
 		msm_ssphy_power_enable(phy, 1);
-		clk_prepare_enable(phy->pipe_clk);
-		if (!phy->clk_enabled) {
-			if (phy->ref_clk_src)
-				clk_prepare_enable(phy->ref_clk_src);
-			if (phy->ref_clk)
-				clk_prepare_enable(phy->ref_clk);
-			clk_prepare_enable(phy->aux_clk);
-			clk_prepare_enable(phy->cfg_ahb_clk);
-			phy->clk_enabled = true;
-		}
+		msm_ssphy_qmp_enable_clks(phy, true);
 		if (!phy->cable_connected) {
 			writel_relaxed(0x01,
 			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -533,16 +510,9 @@
 	return 0;
 }
 
-static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+static int msm_ssphy_qmp_get_clks(struct msm_ssphy_qmp *phy, struct device *dev)
 {
-	struct msm_ssphy_qmp *phy;
-	struct device *dev = &pdev->dev;
-	struct resource *res;
-	int ret = 0, size = 0, len;
-
-	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
-	if (!phy)
-		return -ENOMEM;
+	int ret = 0;
 
 	phy->aux_clk = devm_clk_get(dev, "aux_clk");
 	if (IS_ERR(phy->aux_clk)) {
@@ -552,11 +522,10 @@
 			dev_err(dev, "failed to get aux_clk\n");
 		goto err;
 	}
-
 	clk_set_rate(phy->aux_clk, clk_round_rate(phy->aux_clk, ULONG_MAX));
 
-	if (of_property_match_string(pdev->dev.of_node,
-				"clock-names", "cfg_ahb_clk") >= 0) {
+	if (of_property_match_string(dev->of_node,
+			"clock-names", "cfg_ahb_clk") >= 0) {
 		phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
 		if (IS_ERR(phy->cfg_ahb_clk)) {
 			ret = PTR_ERR(phy->cfg_ahb_clk);
@@ -576,6 +545,88 @@
 		goto err;
 	}
 
+	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(phy->ref_clk_src))
+		phy->ref_clk_src = NULL;
+
+	phy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(phy->ref_clk))
+		phy->ref_clk = NULL;
+
+	if (of_property_match_string(dev->of_node,
+			"clock-names", "com_aux_clk") >= 0) {
+		phy->com_aux_clk = devm_clk_get(dev, "com_aux_clk");
+		if (IS_ERR(phy->com_aux_clk)) {
+			ret = PTR_ERR(phy->com_aux_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"failed to get com_aux_clk ret %d\n", ret);
+			goto err;
+		}
+	}
+
+err:
+	return ret;
+}
+
+static void msm_ssphy_qmp_enable_clks(struct msm_ssphy_qmp *phy, bool on)
+{
+	dev_dbg(phy->phy.dev, "%s(): clk_enabled:%d on:%d\n", __func__,
+					phy->clk_enabled, on);
+
+	if (!phy->clk_enabled && on) {
+		if (phy->ref_clk_src)
+			clk_prepare_enable(phy->ref_clk_src);
+
+		if (phy->ref_clk)
+			clk_prepare_enable(phy->ref_clk);
+
+		if (phy->com_aux_clk)
+			clk_prepare_enable(phy->com_aux_clk);
+
+		clk_prepare_enable(phy->aux_clk);
+		if (phy->cfg_ahb_clk)
+			clk_prepare_enable(phy->cfg_ahb_clk);
+
+		clk_prepare_enable(phy->pipe_clk);
+		phy->clk_enabled = true;
+	}
+
+	if (phy->clk_enabled && !on) {
+		clk_disable_unprepare(phy->pipe_clk);
+
+		if (phy->cfg_ahb_clk)
+			clk_disable_unprepare(phy->cfg_ahb_clk);
+
+		clk_disable_unprepare(phy->aux_clk);
+		if (phy->com_aux_clk)
+			clk_disable_unprepare(phy->com_aux_clk);
+
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+
+		if (phy->ref_clk_src)
+			clk_disable_unprepare(phy->ref_clk_src);
+
+		phy->clk_enabled = false;
+	}
+}
+
+static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0, len;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	ret = msm_ssphy_qmp_get_clks(phy, dev);
+	if (ret)
+		goto err;
+
 	phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
 	if (IS_ERR(phy->phy_reset)) {
 		ret = PTR_ERR(phy->phy_reset);
@@ -726,13 +777,6 @@
 		goto err;
 	}
 
-	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
-	if (IS_ERR(phy->ref_clk_src))
-		phy->ref_clk_src = NULL;
-	phy->ref_clk = devm_clk_get(dev, "ref_clk");
-	if (IS_ERR(phy->ref_clk))
-		phy->ref_clk = NULL;
-
 	platform_set_drvdata(pdev, phy);
 
 	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
@@ -760,14 +804,8 @@
 		return 0;
 
 	usb_remove_phy(&phy->phy);
-	if (phy->ref_clk)
-		clk_disable_unprepare(phy->ref_clk);
-	if (phy->ref_clk_src)
-		clk_disable_unprepare(phy->ref_clk_src);
+	msm_ssphy_qmp_enable_clks(phy, false);
 	msm_ssusb_qmp_ldo_enable(phy, 0);
-	clk_disable_unprepare(phy->aux_clk);
-	clk_disable_unprepare(phy->cfg_ahb_clk);
-	clk_disable_unprepare(phy->pipe_clk);
 	kfree(phy);
 	return 0;
 }
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 6a1df9e..30bf0f5 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1482,16 +1482,20 @@
 	struct usb_serial *serial = port->serial;
 	struct tty_struct *tty;
 	struct digi_port *priv = usb_get_serial_port_data(port);
+	unsigned char *buf = urb->transfer_buffer;
 	int opcode, line, status, val;
 	int i;
 	unsigned int rts;
 
+	if (urb->actual_length < 4)
+		return -1;
+
 	/* handle each oob command */
-	for (i = 0; i < urb->actual_length - 3;) {
-		opcode = ((unsigned char *)urb->transfer_buffer)[i++];
-		line = ((unsigned char *)urb->transfer_buffer)[i++];
-		status = ((unsigned char *)urb->transfer_buffer)[i++];
-		val = ((unsigned char *)urb->transfer_buffer)[i++];
+	for (i = 0; i < urb->actual_length - 3; i += 4) {
+		opcode = buf[i];
+		line = buf[i + 1];
+		status = buf[i + 2];
+		val = buf[i + 3];
 
 		dev_dbg(&port->dev, "digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d\n",
 			opcode, line, status, val);
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index c02808a..f1a8fdc 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1674,6 +1674,12 @@
 	function    = TIUMP_GET_FUNC_FROM_CODE(data[0]);
 	dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
 		port_number, function, data[1]);
+
+	if (port_number >= edge_serial->serial->num_ports) {
+		dev_err(dev, "bad port number %d\n", port_number);
+		goto exit;
+	}
+
 	port = edge_serial->serial->port[port_number];
 	edge_port = usb_get_serial_port_data(port);
 	if (!edge_port) {
@@ -1755,7 +1761,7 @@
 
 	port_number = edge_port->port->port_number;
 
-	if (edge_port->lsr_event) {
+	if (urb->actual_length > 0 && edge_port->lsr_event) {
 		edge_port->lsr_event = 0;
 		dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
 			__func__, port_number, edge_port->lsr_mask, *data);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index a180b17..76564b3 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -142,12 +142,6 @@
 
 static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct usb_serial	*serial = port->serial;
-	struct usb_serial_port	*wport;
-
-	wport = serial->port[1];
-	tty_port_tty_set(&wport->port, tty);
-
 	return usb_serial_generic_open(tty, port);
 }
 
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 93c6c9b..8a069aa 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -200,6 +200,11 @@
 	if (!safe)
 		goto out;
 
+	if (length < 2) {
+		dev_err(&port->dev, "malformed packet\n");
+		return;
+	}
+
 	fcs = fcs_compute10(data, length, CRC10_INITFCS);
 	if (fcs) {
 		dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 80378dd..c882357 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
 static void tce_iommu_detach_group(void *iommu_data,
 		struct iommu_group *iommu_group);
 
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
 {
 	long ret = 0, locked, lock_limit;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
+	if (WARN_ON_ONCE(!mm))
+		return -EPERM;
 
 	if (!npages)
 		return 0;
 
-	down_write(&current->mm->mmap_sem);
-	locked = current->mm->locked_vm + npages;
+	down_write(&mm->mmap_sem);
+	locked = mm->locked_vm + npages;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
 		ret = -ENOMEM;
 	else
-		current->mm->locked_vm += npages;
+		mm->locked_vm += npages;
 
 	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK),
 			ret ? " - exceeded" : "");
 
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 
 	return ret;
 }
 
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
 {
-	if (!current || !current->mm || !npages)
-		return; /* process exited */
+	if (!mm || !npages)
+		return;
 
-	down_write(&current->mm->mmap_sem);
-	if (WARN_ON_ONCE(npages > current->mm->locked_vm))
-		npages = current->mm->locked_vm;
-	current->mm->locked_vm -= npages;
+	down_write(&mm->mmap_sem);
+	if (WARN_ON_ONCE(npages > mm->locked_vm))
+		npages = mm->locked_vm;
+	mm->locked_vm -= npages;
 	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK));
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 }
 
 /*
@@ -89,6 +89,15 @@
 };
 
 /*
+ * A container needs to remember which preregistered region  it has
+ * referenced to do proper cleanup at the userspace process exit.
+ */
+struct tce_iommu_prereg {
+	struct list_head next;
+	struct mm_iommu_table_group_mem_t *mem;
+};
+
+/*
  * The container descriptor supports only a single group per container.
  * Required by the API as the container is not supplied with the IOMMU group
  * at the moment of initialization.
@@ -97,24 +106,68 @@
 	struct mutex lock;
 	bool enabled;
 	bool v2;
+	bool def_window_pending;
 	unsigned long locked_pages;
+	struct mm_struct *mm;
 	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
 	struct list_head group_list;
+	struct list_head prereg_list;
 };
 
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+	if (container->mm) {
+		if (container->mm == current->mm)
+			return 0;
+		return -EPERM;
+	}
+	BUG_ON(!current->mm);
+	container->mm = current->mm;
+	atomic_inc(&container->mm->mm_count);
+
+	return 0;
+}
+
+static long tce_iommu_prereg_free(struct tce_container *container,
+		struct tce_iommu_prereg *tcemem)
+{
+	long ret;
+
+	ret = mm_iommu_put(container->mm, tcemem->mem);
+	if (ret)
+		return ret;
+
+	list_del(&tcemem->next);
+	kfree(tcemem);
+
+	return 0;
+}
+
 static long tce_iommu_unregister_pages(struct tce_container *container,
 		__u64 vaddr, __u64 size)
 {
 	struct mm_iommu_table_group_mem_t *mem;
+	struct tce_iommu_prereg *tcemem;
+	bool found = false;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
 		return -EINVAL;
 
-	mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
+	mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
 	if (!mem)
 		return -ENOENT;
 
-	return mm_iommu_put(mem);
+	list_for_each_entry(tcemem, &container->prereg_list, next) {
+		if (tcemem->mem == mem) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENOENT;
+
+	return tce_iommu_prereg_free(container, tcemem);
 }
 
 static long tce_iommu_register_pages(struct tce_container *container,
@@ -122,22 +175,36 @@
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem = NULL;
+	struct tce_iommu_prereg *tcemem;
 	unsigned long entries = size >> PAGE_SHIFT;
 
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
 			((vaddr + size) < vaddr))
 		return -EINVAL;
 
-	ret = mm_iommu_get(vaddr, entries, &mem);
+	mem = mm_iommu_find(container->mm, vaddr, entries);
+	if (mem) {
+		list_for_each_entry(tcemem, &container->prereg_list, next) {
+			if (tcemem->mem == mem)
+				return -EBUSY;
+		}
+	}
+
+	ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
 	if (ret)
 		return ret;
 
+	tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
+	tcemem->mem = mem;
+	list_add(&tcemem->next, &container->prereg_list);
+
 	container->enabled = true;
 
 	return 0;
 }
 
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -146,13 +213,13 @@
 
 	BUG_ON(tbl->it_userspace);
 
-	ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
 	uas = vzalloc(cb);
 	if (!uas) {
-		decrement_locked_vm(cb >> PAGE_SHIFT);
+		decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 		return -ENOMEM;
 	}
 	tbl->it_userspace = uas;
@@ -160,7 +227,8 @@
 	return 0;
 }
 
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -170,7 +238,7 @@
 
 	vfree(tbl->it_userspace);
 	tbl->it_userspace = NULL;
-	decrement_locked_vm(cb >> PAGE_SHIFT);
+	decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 }
 
 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -230,9 +298,6 @@
 	struct iommu_table_group *table_group;
 	struct tce_iommu_group *tcegrp;
 
-	if (!current->mm)
-		return -ESRCH; /* process exited */
-
 	if (container->enabled)
 		return -EBUSY;
 
@@ -277,8 +342,12 @@
 	if (!table_group->tce32_size)
 		return -EPERM;
 
+	ret = tce_iommu_mm_set(container);
+	if (ret)
+		return ret;
+
 	locked = table_group->tce32_size >> PAGE_SHIFT;
-	ret = try_increment_locked_vm(locked);
+	ret = try_increment_locked_vm(container->mm, locked);
 	if (ret)
 		return ret;
 
@@ -296,10 +365,8 @@
 
 	container->enabled = false;
 
-	if (!current->mm)
-		return;
-
-	decrement_locked_vm(container->locked_pages);
+	BUG_ON(!container->mm);
+	decrement_locked_vm(container->mm, container->locked_pages);
 }
 
 static void *tce_iommu_open(unsigned long arg)
@@ -317,6 +384,7 @@
 
 	mutex_init(&container->lock);
 	INIT_LIST_HEAD_RCU(&container->group_list);
+	INIT_LIST_HEAD_RCU(&container->prereg_list);
 
 	container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
 
@@ -326,7 +394,8 @@
 static int tce_iommu_clear(struct tce_container *container,
 		struct iommu_table *tbl,
 		unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl);
 
 static void tce_iommu_release(void *iommu_data)
 {
@@ -351,10 +420,20 @@
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_free_table(tbl);
+		tce_iommu_free_table(container, tbl);
+	}
+
+	while (!list_empty(&container->prereg_list)) {
+		struct tce_iommu_prereg *tcemem;
+
+		tcemem = list_first_entry(&container->prereg_list,
+				struct tce_iommu_prereg, next);
+		WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
 	}
 
 	tce_iommu_disable(container);
+	if (container->mm)
+		mmdrop(container->mm);
 	mutex_destroy(&container->lock);
 
 	kfree(container);
@@ -369,13 +448,14 @@
 	put_page(page);
 }
 
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+		unsigned long tce, unsigned long size,
 		unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem;
 
-	mem = mm_iommu_lookup(tce, size);
+	mem = mm_iommu_lookup(container->mm, tce, size);
 	if (!mem)
 		return -EINVAL;
 
@@ -388,18 +468,18 @@
 	return 0;
 }
 
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
-		unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+		struct iommu_table *tbl, unsigned long entry)
 {
 	struct mm_iommu_table_group_mem_t *mem = NULL;
 	int ret;
 	unsigned long hpa = 0;
 	unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 
-	if (!pua || !current || !current->mm)
+	if (!pua)
 		return;
 
-	ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+	ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
 			&hpa, &mem);
 	if (ret)
 		pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -429,7 +509,7 @@
 			continue;
 
 		if (container->v2) {
-			tce_iommu_unuse_page_v2(tbl, entry);
+			tce_iommu_unuse_page_v2(container, tbl, entry);
 			continue;
 		}
 
@@ -509,13 +589,19 @@
 	unsigned long hpa;
 	enum dma_data_direction dirtmp;
 
+	if (!tbl->it_userspace) {
+		ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
+		if (ret)
+			return ret;
+	}
+
 	for (i = 0; i < pages; ++i) {
 		struct mm_iommu_table_group_mem_t *mem = NULL;
 		unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
 				entry + i);
 
-		ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
-				&hpa, &mem);
+		ret = tce_iommu_prereg_ua_to_hpa(container,
+				tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
 		if (ret)
 			break;
 
@@ -536,7 +622,7 @@
 		ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
 		if (ret) {
 			/* dirtmp cannot be DMA_NONE here */
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 			pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
 					__func__, entry << tbl->it_page_shift,
 					tce, ret);
@@ -544,7 +630,7 @@
 		}
 
 		if (dirtmp != DMA_NONE)
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 
 		*pua = tce;
 
@@ -572,7 +658,7 @@
 	if (!table_size)
 		return -EINVAL;
 
-	ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
@@ -582,25 +668,17 @@
 	WARN_ON(!ret && !(*ptbl)->it_ops->free);
 	WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
 
-	if (!ret && container->v2) {
-		ret = tce_iommu_userspace_view_alloc(*ptbl);
-		if (ret)
-			(*ptbl)->it_ops->free(*ptbl);
-	}
-
-	if (ret)
-		decrement_locked_vm(table_size >> PAGE_SHIFT);
-
 	return ret;
 }
 
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl)
 {
 	unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
 
-	tce_iommu_userspace_view_free(tbl);
+	tce_iommu_userspace_view_free(tbl, container->mm);
 	tbl->it_ops->free(tbl);
-	decrement_locked_vm(pages);
+	decrement_locked_vm(container->mm, pages);
 }
 
 static long tce_iommu_create_window(struct tce_container *container,
@@ -663,7 +741,7 @@
 		table_group = iommu_group_get_iommudata(tcegrp->grp);
 		table_group->ops->unset_window(table_group, num);
 	}
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 
 	return ret;
 }
@@ -701,12 +779,41 @@
 
 	/* Free table */
 	tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 	container->tables[num] = NULL;
 
 	return 0;
 }
 
+static long tce_iommu_create_default_window(struct tce_container *container)
+{
+	long ret;
+	__u64 start_addr = 0;
+	struct tce_iommu_group *tcegrp;
+	struct iommu_table_group *table_group;
+
+	if (!container->def_window_pending)
+		return 0;
+
+	if (!tce_groups_attached(container))
+		return -ENODEV;
+
+	tcegrp = list_first_entry(&container->group_list,
+			struct tce_iommu_group, next);
+	table_group = iommu_group_get_iommudata(tcegrp->grp);
+	if (!table_group)
+		return -ENODEV;
+
+	ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
+			table_group->tce32_size, 1, &start_addr);
+	WARN_ON_ONCE(!ret && start_addr);
+
+	if (!ret)
+		container->def_window_pending = false;
+
+	return ret;
+}
+
 static long tce_iommu_ioctl(void *iommu_data,
 				 unsigned int cmd, unsigned long arg)
 {
@@ -727,7 +834,17 @@
 		}
 
 		return (ret < 0) ? 0 : ret;
+	}
 
+	/*
+	 * Sanity check to prevent one userspace from manipulating
+	 * another userspace mm.
+	 */
+	BUG_ON(!container);
+	if (container->mm && container->mm != current->mm)
+		return -EPERM;
+
+	switch (cmd) {
 	case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
 		struct vfio_iommu_spapr_tce_info info;
 		struct tce_iommu_group *tcegrp;
@@ -797,6 +914,10 @@
 				VFIO_DMA_MAP_FLAG_WRITE))
 			return -EINVAL;
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		num = tce_iommu_find_table(container, param.iova, &tbl);
 		if (num < 0)
 			return -ENXIO;
@@ -860,6 +981,10 @@
 		if (param.flags)
 			return -EINVAL;
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		num = tce_iommu_find_table(container, param.iova, &tbl);
 		if (num < 0)
 			return -ENXIO;
@@ -888,6 +1013,10 @@
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (copy_from_user(&param, (void __user *)arg, minsz))
 			return -EFAULT;
 
@@ -911,6 +1040,9 @@
 		if (!container->v2)
 			break;
 
+		if (!container->mm)
+			return -EPERM;
+
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
@@ -969,6 +1101,10 @@
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -986,6 +1122,10 @@
 
 		mutex_lock(&container->lock);
 
+		ret = tce_iommu_create_default_window(container);
+		if (ret)
+			return ret;
+
 		ret = tce_iommu_create_window(container, create.page_shift,
 				create.window_size, create.levels,
 				&create.start_addr);
@@ -1003,6 +1143,10 @@
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -1018,6 +1162,11 @@
 		if (remove.flags)
 			return -EINVAL;
 
+		if (container->def_window_pending && !remove.start_addr) {
+			container->def_window_pending = false;
+			return 0;
+		}
+
 		mutex_lock(&container->lock);
 
 		ret = tce_iommu_remove_window(container, remove.start_addr);
@@ -1043,7 +1192,7 @@
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_userspace_view_free(tbl);
+		tce_iommu_userspace_view_free(tbl, container->mm);
 		if (tbl->it_map)
 			iommu_release_ownership(tbl);
 
@@ -1062,10 +1211,7 @@
 		if (!tbl || !tbl->it_map)
 			continue;
 
-		rc = tce_iommu_userspace_view_alloc(tbl);
-		if (!rc)
-			rc = iommu_take_ownership(tbl);
-
+		rc = iommu_take_ownership(tbl);
 		if (rc) {
 			for (j = 0; j < i; ++j)
 				iommu_release_ownership(
@@ -1100,9 +1246,6 @@
 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
 		struct iommu_table_group *table_group)
 {
-	long i, ret = 0;
-	struct iommu_table *tbl = NULL;
-
 	if (!table_group->ops->create_table || !table_group->ops->set_window ||
 			!table_group->ops->release_ownership) {
 		WARN_ON_ONCE(1);
@@ -1111,47 +1254,7 @@
 
 	table_group->ops->take_ownership(table_group);
 
-	/*
-	 * If it the first group attached, check if there is
-	 * a default DMA window and create one if none as
-	 * the userspace expects it to exist.
-	 */
-	if (!tce_groups_attached(container) && !container->tables[0]) {
-		ret = tce_iommu_create_table(container,
-				table_group,
-				0, /* window number */
-				IOMMU_PAGE_SHIFT_4K,
-				table_group->tce32_size,
-				1, /* default levels */
-				&tbl);
-		if (ret)
-			goto release_exit;
-		else
-			container->tables[0] = tbl;
-	}
-
-	/* Set all windows to the new group */
-	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
-		tbl = container->tables[i];
-
-		if (!tbl)
-			continue;
-
-		/* Set the default window to a new group */
-		ret = table_group->ops->set_window(table_group, i, tbl);
-		if (ret)
-			goto release_exit;
-	}
-
 	return 0;
-
-release_exit:
-	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
-		table_group->ops->unset_window(table_group, i);
-
-	table_group->ops->release_ownership(table_group);
-
-	return ret;
 }
 
 static int tce_iommu_attach_group(void *iommu_data,
@@ -1203,10 +1306,13 @@
 	}
 
 	if (!table_group->ops || !table_group->ops->take_ownership ||
-			!table_group->ops->release_ownership)
+			!table_group->ops->release_ownership) {
 		ret = tce_iommu_take_ownership(container, table_group);
-	else
+	} else {
 		ret = tce_iommu_take_ownership_ddw(container, table_group);
+		if (!tce_groups_attached(container) && !container->tables[0])
+			container->def_window_pending = true;
+	}
 
 	if (!ret) {
 		tcegrp->grp = iommu_group;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d1b7ac7..a826864 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3879,8 +3879,7 @@
 	unsigned blocksize;
 	struct inode *inode = mapping->host;
 
-	/* If we are processing an encrypted inode during orphan list
-	 * handling */
+	/* If we are processing an encrypted inode during orphan list handling */
 	if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
 		return 0;
 
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c01eeaa..5cc0a36 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1436,6 +1436,204 @@
 
 #endif
 
+/*
+ * Print out various scheduling related per-task fields:
+ */
+
+#ifdef CONFIG_SMP
+
+static int sched_wake_up_idle_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_wake_up_idle(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_wake_up_idle_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int wake_up_idle, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &wake_up_idle);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_wake_up_idle(p, wake_up_idle);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_wake_up_idle_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_wake_up_idle_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_wake_up_idle_operations = {
+	.open		= sched_wake_up_idle_open,
+	.read		= seq_read,
+	.write		= sched_wake_up_idle_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SMP */
+
+#ifdef CONFIG_SCHED_HMP
+
+static int sched_init_task_load_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_init_task_load(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_init_task_load_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int init_task_load, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &init_task_load);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_init_task_load(p, init_task_load);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_init_task_load_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_init_task_load_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_init_task_load_operations = {
+	.open		= sched_init_task_load_open,
+	.read		= seq_read,
+	.write		= sched_init_task_load_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int sched_group_id_show(struct seq_file *m, void *v)
+{
+	struct inode *inode = m->private;
+	struct task_struct *p;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	seq_printf(m, "%d\n", sched_get_group_id(p));
+
+	put_task_struct(p);
+
+	return 0;
+}
+
+static ssize_t
+sched_group_id_write(struct file *file, const char __user *buf,
+	    size_t count, loff_t *offset)
+{
+	struct inode *inode = file_inode(file);
+	struct task_struct *p;
+	char buffer[PROC_NUMBUF];
+	int group_id, err;
+
+	memset(buffer, 0, sizeof(buffer));
+	if (count > sizeof(buffer) - 1)
+		count = sizeof(buffer) - 1;
+	if (copy_from_user(buffer, buf, count)) {
+		err = -EFAULT;
+		goto out;
+	}
+
+	err = kstrtoint(strstrip(buffer), 0, &group_id);
+	if (err)
+		goto out;
+
+	p = get_proc_task(inode);
+	if (!p)
+		return -ESRCH;
+
+	err = sched_set_group_id(p, group_id);
+
+	put_task_struct(p);
+
+out:
+	return err < 0 ? err : count;
+}
+
+static int sched_group_id_open(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, sched_group_id_show, inode);
+}
+
+static const struct file_operations proc_pid_sched_group_id_operations = {
+	.open		= sched_group_id_open,
+	.read		= seq_read,
+	.write		= sched_group_id_write,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#endif	/* CONFIG_SCHED_HMP */
+
 #ifdef CONFIG_SCHED_AUTOGROUP
 /*
  * Print out autogroup related information:
@@ -2861,6 +3059,13 @@
 	ONE("status",     S_IRUGO, proc_pid_status),
 	ONE("personality", S_IRUSR, proc_pid_personality),
 	ONE("limits",	  S_IRUGO, proc_pid_limits),
+#ifdef CONFIG_SMP
+	REG("sched_wake_up_idle",      S_IRUGO|S_IWUSR, proc_pid_sched_wake_up_idle_operations),
+#endif
+#ifdef CONFIG_SCHED_HMP
+	REG("sched_init_task_load",      S_IRUGO|S_IWUSR, proc_pid_sched_init_task_load_operations),
+	REG("sched_group_id",      S_IRUGO|S_IWUGO, proc_pid_sched_group_id_operations),
+#endif
 #ifdef CONFIG_SCHED_DEBUG
 	REG("sched",      S_IRUGO|S_IWUSR, proc_pid_sched_operations),
 #endif
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index f22de8a..2797d2f 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -76,17 +76,13 @@
 
 	if (dentry < lower_dentry) {
 		spin_lock(&dentry->d_lock);
-		spin_lock(&lower_dentry->d_lock);
+		spin_lock_nested(&lower_dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	} else {
 		spin_lock(&lower_dentry->d_lock);
-		spin_lock(&dentry->d_lock);
+		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 	}
 
-	if (dentry->d_name.len != lower_dentry->d_name.len) {
-		__d_drop(dentry);
-		err = 0;
-	} else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name,
-				dentry->d_name.len) != 0) {
+	if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
 		__d_drop(dentry);
 		err = 0;
 	}
@@ -165,7 +161,7 @@
 	}
 	*/
 	if (name->len == len) {
-		if (strncasecmp(name->name, str, len) == 0)
+		if (str_n_case_eq(name->name, str, len))
 			return 0;
 	}
 	return 1;
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 9408a54..fc5a632 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -30,6 +30,8 @@
 	ci->userid = pi->userid;
 	ci->d_uid = pi->d_uid;
 	ci->under_android = pi->under_android;
+	ci->under_cache = pi->under_cache;
+	ci->under_obb = pi->under_obb;
 	set_top(ci, pi->top);
 }
 
@@ -43,81 +45,214 @@
 	info->userid = userid;
 	info->d_uid = uid;
 	info->under_android = under_android;
+	info->under_cache = false;
+	info->under_obb = false;
 	set_top(info, top);
 }
 
 /* While renaming, there is a point where we want the path from dentry, but the name from newdentry */
-void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry)
+void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name)
 {
 	struct sdcardfs_inode_info *info = SDCARDFS_I(d_inode(dentry));
 	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
 	appid_t appid;
+	struct qstr q_Android = QSTR_LITERAL("Android");
+	struct qstr q_data = QSTR_LITERAL("data");
+	struct qstr q_obb = QSTR_LITERAL("obb");
+	struct qstr q_media = QSTR_LITERAL("media");
+	struct qstr q_cache = QSTR_LITERAL("cache");
 
 	/* By default, each inode inherits from its parent.
 	 * the properties are maintained on its private fields
 	 * because the inode attributes will be modified with that of
 	 * its lower inode.
-	 * The derived state will be updated on the last
-	 * stage of each system call by fix_derived_permission(inode).
+	 * These values are used by our custom permission call instead
+	 * of using the inode permissions.
 	 */
 
 	inherit_derived_state(d_inode(parent), d_inode(dentry));
 
+	/* Files don't get special labels */
+	if (!S_ISDIR(d_inode(dentry)->i_mode))
+		return;
 	/* Derive custom permissions based on parent and current node */
 	switch (parent_info->perm) {
-		case PERM_INHERIT:
-			/* Already inherited above */
-			break;
-		case PERM_PRE_ROOT:
-			/* Legacy internal layout places users at top level */
-			info->perm = PERM_ROOT;
-			info->userid = simple_strtoul(newdentry->d_name.name, NULL, 10);
+	case PERM_INHERIT:
+	case PERM_ANDROID_PACKAGE_CACHE:
+		/* Already inherited above */
+		break;
+	case PERM_PRE_ROOT:
+		/* Legacy internal layout places users at top level */
+		info->perm = PERM_ROOT;
+		info->userid = simple_strtoul(name->name, NULL, 10);
+		set_top(info, &info->vfs_inode);
+		break;
+	case PERM_ROOT:
+		/* Assume masked off by default. */
+		if (qstr_case_eq(name, &q_Android)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID;
+			info->under_android = true;
 			set_top(info, &info->vfs_inode);
-			break;
-		case PERM_ROOT:
-			/* Assume masked off by default. */
-			if (!strcasecmp(newdentry->d_name.name, "Android")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID;
-				info->under_android = true;
-				set_top(info, &info->vfs_inode);
-			}
-			break;
-		case PERM_ANDROID:
-			if (!strcasecmp(newdentry->d_name.name, "data")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID_DATA;
-				set_top(info, &info->vfs_inode);
-			} else if (!strcasecmp(newdentry->d_name.name, "obb")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID_OBB;
-				set_top(info, &info->vfs_inode);
-				/* Single OBB directory is always shared */
-			} else if (!strcasecmp(newdentry->d_name.name, "media")) {
-				/* App-specific directories inside; let anyone traverse */
-				info->perm = PERM_ANDROID_MEDIA;
-				set_top(info, &info->vfs_inode);
-			}
-			break;
-		case PERM_ANDROID_DATA:
-		case PERM_ANDROID_OBB:
-		case PERM_ANDROID_MEDIA:
-			appid = get_appid(newdentry->d_name.name);
-			if (appid != 0) {
-				info->d_uid = multiuser_get_uid(parent_info->userid, appid);
-			}
+		}
+		break;
+	case PERM_ANDROID:
+		if (qstr_case_eq(name, &q_data)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID_DATA;
 			set_top(info, &info->vfs_inode);
-			break;
+		} else if (qstr_case_eq(name, &q_obb)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID_OBB;
+			info->under_obb = true;
+			set_top(info, &info->vfs_inode);
+			/* Single OBB directory is always shared */
+		} else if (qstr_case_eq(name, &q_media)) {
+			/* App-specific directories inside; let anyone traverse */
+			info->perm = PERM_ANDROID_MEDIA;
+			set_top(info, &info->vfs_inode);
+		}
+		break;
+	case PERM_ANDROID_OBB:
+	case PERM_ANDROID_DATA:
+	case PERM_ANDROID_MEDIA:
+		info->perm = PERM_ANDROID_PACKAGE;
+		appid = get_appid(name->name);
+		if (appid != 0 && !is_excluded(name->name, parent_info->userid)) {
+			info->d_uid = multiuser_get_uid(parent_info->userid, appid);
+		}
+		set_top(info, &info->vfs_inode);
+		break;
+	case PERM_ANDROID_PACKAGE:
+		if (qstr_case_eq(name, &q_cache)) {
+			info->perm = PERM_ANDROID_PACKAGE_CACHE;
+			info->under_cache = true;
+		}
+		break;
 	}
 }
 
 void get_derived_permission(struct dentry *parent, struct dentry *dentry)
 {
-	get_derived_permission_new(parent, dentry, dentry);
+	get_derived_permission_new(parent, dentry, &dentry->d_name);
 }
 
-static int descendant_may_need_fixup(perm_t perm) {
-	if (perm == PERM_PRE_ROOT || perm == PERM_ROOT || perm == PERM_ANDROID)
+static appid_t get_type(const char *name)
+{
+	const char *ext = strrchr(name, '.');
+	appid_t id;
+
+	if (ext && ext[0]) {
+		ext = &ext[1];
+		id = get_ext_gid(ext);
+		return id?:AID_MEDIA_RW;
+	}
+	return AID_MEDIA_RW;
+}
+
+void fixup_lower_ownership(struct dentry *dentry, const char *name)
+{
+	struct path path;
+	struct inode *inode;
+	struct inode *delegated_inode = NULL;
+	int error;
+	struct sdcardfs_inode_info *info;
+	struct sdcardfs_inode_info *info_top;
+	perm_t perm;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	uid_t uid = sbi->options.fs_low_uid;
+	gid_t gid = sbi->options.fs_low_gid;
+	struct iattr newattrs;
+
+	info = SDCARDFS_I(d_inode(dentry));
+	perm = info->perm;
+	if (info->under_obb) {
+		perm = PERM_ANDROID_OBB;
+	} else if (info->under_cache) {
+		perm = PERM_ANDROID_PACKAGE_CACHE;
+	} else if (perm == PERM_INHERIT) {
+		info_top = SDCARDFS_I(grab_top(info));
+		perm = info_top->perm;
+		release_top(info);
+	}
+
+	switch (perm) {
+	case PERM_ROOT:
+	case PERM_ANDROID:
+	case PERM_ANDROID_DATA:
+	case PERM_ANDROID_MEDIA:
+	case PERM_ANDROID_PACKAGE:
+	case PERM_ANDROID_PACKAGE_CACHE:
+		uid = multiuser_get_uid(info->userid, uid);
+		break;
+	case PERM_ANDROID_OBB:
+		uid = AID_MEDIA_OBB;
+		break;
+	case PERM_PRE_ROOT:
+	default:
+		break;
+	}
+	switch (perm) {
+	case PERM_ROOT:
+	case PERM_ANDROID:
+	case PERM_ANDROID_DATA:
+	case PERM_ANDROID_MEDIA:
+		if (S_ISDIR(d_inode(dentry)->i_mode))
+			gid = multiuser_get_uid(info->userid, AID_MEDIA_RW);
+		else
+			gid = multiuser_get_uid(info->userid, get_type(name));
+		break;
+	case PERM_ANDROID_OBB:
+		gid = AID_MEDIA_OBB;
+		break;
+	case PERM_ANDROID_PACKAGE:
+		if (info->d_uid != 0)
+			gid = multiuser_get_ext_gid(info->d_uid);
+		else
+			gid = multiuser_get_uid(info->userid, uid);
+		break;
+	case PERM_ANDROID_PACKAGE_CACHE:
+		if (info->d_uid != 0)
+			gid = multiuser_get_cache_gid(info->d_uid);
+		else
+			gid = multiuser_get_uid(info->userid, uid);
+		break;
+	case PERM_PRE_ROOT:
+	default:
+		break;
+	}
+
+	sdcardfs_get_lower_path(dentry, &path);
+	inode = d_inode(path.dentry);
+	if (d_inode(path.dentry)->i_gid.val != gid || d_inode(path.dentry)->i_uid.val != uid) {
+retry_deleg:
+		newattrs.ia_valid = ATTR_GID | ATTR_UID | ATTR_FORCE;
+		newattrs.ia_uid = make_kuid(current_user_ns(), uid);
+		newattrs.ia_gid = make_kgid(current_user_ns(), gid);
+		if (!S_ISDIR(inode->i_mode))
+			newattrs.ia_valid |=
+				ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+		inode_lock(inode);
+		error = security_path_chown(&path, newattrs.ia_uid, newattrs.ia_gid);
+		if (!error)
+			error = notify_change2(path.mnt, path.dentry, &newattrs, &delegated_inode);
+		inode_unlock(inode);
+		if (delegated_inode) {
+			error = break_deleg_wait(&delegated_inode);
+			if (!error)
+				goto retry_deleg;
+		}
+		if (error)
+			pr_err("sdcardfs: Failed to touch up lower fs gid/uid.\n");
+	}
+	sdcardfs_put_lower_path(dentry, &path);
+}
+
+static int descendant_may_need_fixup(struct sdcardfs_inode_info *info, struct limit_search *limit)
+{
+	if (info->perm == PERM_ROOT)
+		return (limit->flags & BY_USERID)?info->userid == limit->userid:1;
+	if (info->perm == PERM_PRE_ROOT || info->perm == PERM_ANDROID)
 		return 1;
 	return 0;
 }
@@ -129,59 +264,48 @@
 	return 0;
 }
 
-void fixup_perms_recursive(struct dentry *dentry, const char* name, size_t len) {
+static void __fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit, int depth)
+{
 	struct dentry *child;
 	struct sdcardfs_inode_info *info;
-	if (!dget(dentry))
-		return;
+
+	/*
+	 * All paths will terminate their recursion on hitting PERM_ANDROID_OBB,
+	 * PERM_ANDROID_MEDIA, or PERM_ANDROID_DATA. This happens at a depth of
+	 * at most 3.
+	 */
+	WARN(depth > 3, "%s: Max expected depth exceeded!\n", __func__);
+	spin_lock_nested(&dentry->d_lock, depth);
 	if (!d_inode(dentry)) {
-		dput(dentry);
+		spin_unlock(&dentry->d_lock);
 		return;
 	}
 	info = SDCARDFS_I(d_inode(dentry));
 
 	if (needs_fixup(info->perm)) {
-		spin_lock(&dentry->d_lock);
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
-				dget(child);
-				if (!strncasecmp(child->d_name.name, name, len)) {
-					if (child->d_inode) {
-						get_derived_permission(dentry, child);
-						fixup_tmp_permissions(child->d_inode);
-						dput(child);
-						break;
-					}
+			spin_lock_nested(&child->d_lock, depth + 1);
+			if (!(limit->flags & BY_NAME) || qstr_case_eq(&child->d_name, &limit->name)) {
+				if (d_inode(child)) {
+					get_derived_permission(dentry, child);
+					fixup_tmp_permissions(d_inode(child));
+					spin_unlock(&child->d_lock);
+					break;
 				}
-				dput(child);
+			}
+			spin_unlock(&child->d_lock);
 		}
-		spin_unlock(&dentry->d_lock);
-	} else 	if (descendant_may_need_fixup(info->perm)) {
-		spin_lock(&dentry->d_lock);
+	} else 	if (descendant_may_need_fixup(info, limit)) {
 		list_for_each_entry(child, &dentry->d_subdirs, d_child) {
-				fixup_perms_recursive(child, name, len);
+				__fixup_perms_recursive(child, limit, depth + 1);
 		}
-		spin_unlock(&dentry->d_lock);
 	}
-	dput(dentry);
+	spin_unlock(&dentry->d_lock);
 }
 
-void fixup_top_recursive(struct dentry *parent) {
-	struct dentry *dentry;
-	struct sdcardfs_inode_info *info;
-	if (!d_inode(parent))
-		return;
-	info = SDCARDFS_I(d_inode(parent));
-	spin_lock(&parent->d_lock);
-	list_for_each_entry(dentry, &parent->d_subdirs, d_child) {
-		if (d_inode(dentry)) {
-			if (SDCARDFS_I(d_inode(parent))->top != SDCARDFS_I(d_inode(dentry))->top) {
-				get_derived_permission(parent, dentry);
-				fixup_tmp_permissions(d_inode(dentry));
-				fixup_top_recursive(dentry);
-			}
-		}
-	}
-	spin_unlock(&parent->d_lock);
+void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit)
+{
+	__fixup_perms_recursive(dentry, limit, 0);
 }
 
 /* main function for updating derived permission */
@@ -215,9 +339,10 @@
 	struct dentry *parent = dget_parent(dentry);
 	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	struct qstr obb = QSTR_LITERAL("obb");
 
 	if(parent_info->perm == PERM_ANDROID &&
-			!strcasecmp(dentry->d_name.name, "obb")) {
+			qstr_case_eq(&dentry->d_name, &obb)) {
 
 		/* /Android/obb is the base obbpath of DERIVED_UNIFIED */
 		if(!(sbi->options.multiuser == false
@@ -235,6 +360,8 @@
 	struct sdcardfs_dentry_info *di = SDCARDFS_D(dent);
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dent->d_sb);
 	char *path_buf, *obbpath_s;
+	int need_put = 0;
+	struct path lower_path;
 
 	/* check the base obbpath has been changed.
 	 * this routine can check an uninitialized obb dentry as well.
@@ -254,17 +381,20 @@
 			} else {
 				obbpath_s = d_path(&di->lower_path, path_buf, PATH_MAX);
 				if (d_unhashed(di->lower_path.dentry) ||
-					strcasecmp(sbi->obbpath_s, obbpath_s)) {
+					!str_case_eq(sbi->obbpath_s, obbpath_s)) {
 					ret = 1;
 				}
 				kfree(path_buf);
 			}
 
 			//unlock_dir(lower_parent);
-			path_put(&di->lower_path);
+			pathcpy(&lower_path, &di->lower_path);
+			need_put = 1;
 		}
 	}
 	spin_unlock(&di->lock);
+	if (need_put)
+		path_put(&lower_path);
 	return ret;
 }
 
@@ -274,15 +404,16 @@
 	struct dentry *parent = dget_parent(dentry);
 	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(d_inode(parent));
 	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	struct qstr q_obb = QSTR_LITERAL("obb");
 
 	spin_lock(&SDCARDFS_D(dentry)->lock);
 	if (sbi->options.multiuser) {
 		if(parent_info->perm == PERM_PRE_ROOT &&
-				!strcasecmp(dentry->d_name.name, "obb")) {
+				qstr_case_eq(&dentry->d_name, &q_obb)) {
 			ret = 1;
 		}
 	} else  if (parent_info->perm == PERM_ANDROID &&
-			!strcasecmp(dentry->d_name.name, "obb")) {
+			qstr_case_eq(&dentry->d_name, &q_obb)) {
 		ret = 1;
 	}
 	spin_unlock(&SDCARDFS_D(dentry)->lock);
@@ -309,7 +440,6 @@
 
 	if(!err) {
 		/* the obbpath base has been found */
-		printk(KERN_INFO "sdcardfs: the sbi->obbpath is found\n");
 		pathcpy(lower_path, &obbpath);
 	} else {
 		/* if the sbi->obbpath is not available, we can optionally
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 7750a04..0f2db26 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -216,16 +216,13 @@
 		goto out_err;
 	}
 
-	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-                         "	dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		err = -EACCES;
 		goto out_err;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(sbi, saved_cred);
+	OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
 
 	file->private_data =
 		kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 5b31170..96a9f87 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -20,18 +20,24 @@
 
 #include "sdcardfs.h"
 #include <linux/fs_struct.h>
+#include <linux/ratelimit.h>
 
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred * override_fsids(struct sdcardfs_sb_info* sbi)
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info)
 {
-	struct cred * cred;
-	const struct cred * old_cred;
+	struct cred *cred;
+	const struct cred *old_cred;
+	uid_t uid;
 
 	cred = prepare_creds();
 	if (!cred)
 		return NULL;
 
-	cred->fsuid = make_kuid(&init_user_ns, sbi->options.fs_low_uid);
+	if (info->under_obb)
+		uid = AID_MEDIA_OBB;
+	else
+		uid = multiuser_get_uid(info->userid, sbi->options.fs_low_uid);
+	cred->fsuid = make_kuid(&init_user_ns, uid);
 	cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
 
 	old_cred = override_creds(cred);
@@ -40,9 +46,9 @@
 }
 
 /* Do not directly use this function, use REVERT_CRED() instead. */
-void revert_fsids(const struct cred * old_cred)
+void revert_fsids(const struct cred *old_cred)
 {
-	const struct cred * cur_cred;
+	const struct cred *cur_cred;
 
 	cur_cred = current->cred;
 	revert_creds(old_cred);
@@ -61,16 +67,13 @@
 	struct fs_struct *saved_fs;
 	struct fs_struct *copied_fs;
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -98,6 +101,7 @@
 		goto out;
 	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
 	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+	fixup_lower_ownership(dentry, dentry->d_name.name);
 
 out:
 	current->fs = saved_fs;
@@ -162,16 +166,13 @@
 	struct path lower_path;
 	const struct cred *saved_cred = NULL;
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
@@ -269,17 +270,16 @@
 	int touch_err = 0;
 	struct fs_struct *saved_fs;
 	struct fs_struct *copied_fs;
+	struct qstr q_obb = QSTR_LITERAL("obb");
+	struct qstr q_data = QSTR_LITERAL("data");
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	/* check disk space */
 	if (!check_min_free_space(dentry, 0, 1)) {
@@ -343,16 +343,17 @@
 	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
 	/* update number of links on parent directory */
 	set_nlink(dir, sdcardfs_lower_inode(dir)->i_nlink);
-
+	fixup_lower_ownership(dentry, dentry->d_name.name);
 	unlock_dir(lower_parent_dentry);
-
-	if ((!sbi->options.multiuser) && (!strcasecmp(dentry->d_name.name, "obb"))
+	if ((!sbi->options.multiuser) && (qstr_case_eq(&dentry->d_name, &q_obb))
 		&& (pi->perm == PERM_ANDROID) && (pi->userid == 0))
 		make_nomedia_in_obb = 1;
 
 	/* When creating /Android/data and /Android/obb, mark them as .nomedia */
 	if (make_nomedia_in_obb ||
-		((pi->perm == PERM_ANDROID) && (!strcasecmp(dentry->d_name.name, "data")))) {
+		((pi->perm == PERM_ANDROID) && (qstr_case_eq(&dentry->d_name, &q_data)))) {
+		REVERT_CRED(saved_cred);
+		OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
 		set_fs_pwd(current->fs, &lower_path);
 		touch_err = touch(".nomedia", 0664);
 		if (touch_err) {
@@ -381,16 +382,13 @@
 	struct path lower_path;
 	const struct cred *saved_cred = NULL;
 
-	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(dir, &dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
 	 * the dentry on the original path should be deleted. */
@@ -467,24 +465,20 @@
 	struct dentry *lower_new_dir_dentry = NULL;
 	struct vfsmount *lower_mnt = NULL;
 	struct dentry *trap = NULL;
-	struct dentry *new_parent = NULL;
 	struct path lower_old_path, lower_new_path;
 	const struct cred *saved_cred = NULL;
 
 	if (flags)
 		return -EINVAL;
 
-	if(!check_caller_access_to_name(old_dir, old_dentry->d_name.name) ||
-		!check_caller_access_to_name(new_dir, new_dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  new_dentry: %s, task:%s\n",
-						 __func__, new_dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(old_dir, &old_dentry->d_name) ||
+		!check_caller_access_to_name(new_dir, &new_dentry->d_name)) {
 		err = -EACCES;
 		goto out_eacces;
 	}
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
 
 	sdcardfs_get_real_lower(old_dentry, &lower_old_path);
 	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -520,23 +514,11 @@
 	if (new_dir != old_dir) {
 		sdcardfs_copy_and_fix_attrs(old_dir, d_inode(lower_old_dir_dentry));
 		fsstack_copy_inode_size(old_dir, d_inode(lower_old_dir_dentry));
-
-		/* update the derived permission of the old_dentry
-		 * with its new parent
-		 */
-		new_parent = dget_parent(new_dentry);
-		if(new_parent) {
-			if(d_inode(old_dentry)) {
-				update_derived_permission_lock(old_dentry);
-			}
-			dput(new_parent);
-		}
 	}
-	/* At this point, not all dentry information has been moved, so
-	 * we pass along new_dentry for the name.*/
-	get_derived_permission_new(new_dentry->d_parent, old_dentry, new_dentry);
+	get_derived_permission_new(new_dentry->d_parent, old_dentry, &new_dentry->d_name);
 	fixup_tmp_permissions(d_inode(old_dentry));
-	fixup_top_recursive(old_dentry);
+	fixup_lower_ownership(old_dentry, new_dentry->d_name.name);
+	d_invalidate(old_dentry); /* Can't fixup ownership recursively :( */
 out:
 	unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
 	dput(lower_old_dir_dentry);
@@ -607,7 +589,7 @@
 
 static int sdcardfs_permission_wrn(struct inode *inode, int mask)
 {
-	WARN(1, "sdcardfs does not support permission. Use permission2.\n");
+	WARN_RATELIMIT(1, "sdcardfs does not support permission. Use permission2.\n");
 	return -EINVAL;
 }
 
@@ -692,7 +674,7 @@
 
 static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
 {
-	WARN(1, "sdcardfs does not support setattr. User setattr2.\n");
+	WARN_RATELIMIT(1, "sdcardfs does not support setattr. User setattr2.\n");
 	return -EINVAL;
 }
 
@@ -745,17 +727,18 @@
 	 * this user can change the lower inode: that should happen when
 	 * calling notify_change on the lower inode.
 	 */
+	/* prepare our own lower struct iattr (with the lower file) */
+	memcpy(&lower_ia, ia, sizeof(lower_ia));
+	/* Allow touch updating timestamps. A previous permission check ensures
+	 * we have write access. Changes to mode, owner, and group are ignored*/
+	ia->ia_valid |= ATTR_FORCE;
 	err = setattr_prepare(&tmp_d, ia);
 
 	if (!err) {
 		/* check the Android group ID */
 		parent = dget_parent(dentry);
-		if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
-			printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-							 "  dentry: %s, task:%s\n",
-							 __func__, dentry->d_name.name, current->comm);
+		if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name))
 			err = -EACCES;
-		}
 		dput(parent);
 	}
 
@@ -763,15 +746,13 @@
 		goto out_err;
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred);
+	OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
 
 	sdcardfs_get_lower_path(dentry, &lower_path);
 	lower_dentry = lower_path.dentry;
 	lower_mnt = lower_path.mnt;
 	lower_inode = sdcardfs_lower_inode(inode);
 
-	/* prepare our own lower struct iattr (with the lower file) */
-	memcpy(&lower_ia, ia, sizeof(lower_ia));
 	if (ia->ia_valid & ATTR_FILE)
 		lower_ia.ia_file = sdcardfs_lower_file(ia->ia_file);
 
@@ -862,33 +843,27 @@
 static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
 		 struct kstat *stat)
 {
-	struct dentry *lower_dentry;
-	struct inode *inode;
-	struct inode *lower_inode;
+	struct kstat lower_stat;
 	struct path lower_path;
 	struct dentry *parent;
 	int err;
 
 	parent = dget_parent(dentry);
-	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-						 "  dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
+	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		dput(parent);
 		return -EACCES;
 	}
 	dput(parent);
 
-	inode = d_inode(dentry);
-
 	sdcardfs_get_lower_path(dentry, &lower_path);
-	lower_dentry = lower_path.dentry;
-	lower_inode = sdcardfs_lower_inode(inode);
-
-	sdcardfs_copy_and_fix_attrs(inode, lower_inode);
-	fsstack_copy_inode_size(inode, lower_inode);
-
-	err = sdcardfs_fillattr(mnt, inode, stat);
+	err = vfs_getattr(&lower_path, &lower_stat);
+	if (err)
+		goto out;
+	sdcardfs_copy_and_fix_attrs(d_inode(dentry),
+			      d_inode(lower_path.dentry));
+	err = sdcardfs_fillattr(mnt, d_inode(dentry), stat);
+	stat->blocks = lower_stat.blocks;
+out:
 	sdcardfs_put_lower_path(dentry, &lower_path);
 	return err;
 }
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index d271617..7d26c26 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -206,6 +206,28 @@
 	return err;
 }
 
+struct sdcardfs_name_data {
+	struct dir_context ctx;
+	const struct qstr *to_find;
+	char *name;
+	bool found;
+};
+
+static int sdcardfs_name_match(struct dir_context *ctx, const char *name, int namelen,
+		loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct sdcardfs_name_data *buf = container_of(ctx, struct sdcardfs_name_data, ctx);
+	struct qstr candidate = QSTR_INIT(name, namelen);
+
+	if (qstr_case_eq(buf->to_find, &candidate)) {
+		memcpy(buf->name, name, namelen);
+		buf->name[namelen] = 0;
+		buf->found = true;
+		return 1;
+	}
+	return 0;
+}
+
 /*
  * Main driver function for sdcardfs's lookup.
  *
@@ -219,9 +241,9 @@
 	struct vfsmount *lower_dir_mnt;
 	struct dentry *lower_dir_dentry = NULL;
 	struct dentry *lower_dentry;
-	const char *name;
+	const struct qstr *name;
 	struct path lower_path;
-	struct qstr this;
+	struct qstr dname;
 	struct sdcardfs_sb_info *sbi;
 
 	sbi = SDCARDFS_SB(dentry->d_sb);
@@ -231,38 +253,50 @@
 	if (IS_ROOT(dentry))
 		goto out;
 
-	name = dentry->d_name.name;
+	name = &dentry->d_name;
 
 	/* now start the actual lookup procedure */
 	lower_dir_dentry = lower_parent_path->dentry;
 	lower_dir_mnt = lower_parent_path->mnt;
 
 	/* Use vfs_path_lookup to check if the dentry exists or not */
-	err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name, 0,
+	err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name->name, 0,
 				&lower_path);
 	/* check for other cases */
 	if (err == -ENOENT) {
-		struct dentry *child;
-		struct dentry *match = NULL;
-		inode_lock(d_inode(lower_dir_dentry));
-		spin_lock(&lower_dir_dentry->d_lock);
-		list_for_each_entry(child, &lower_dir_dentry->d_subdirs, d_child) {
-			if (child && d_inode(child)) {
-				if (strcasecmp(child->d_name.name, name)==0) {
-					match = dget(child);
-					break;
-				}
-			}
+		struct file *file;
+		const struct cred *cred = current_cred();
+
+		struct sdcardfs_name_data buffer = {
+			.ctx.actor = sdcardfs_name_match,
+			.to_find = name,
+			.name = __getname(),
+			.found = false,
+		};
+
+		if (!buffer.name) {
+			err = -ENOMEM;
+			goto out;
 		}
-		spin_unlock(&lower_dir_dentry->d_lock);
-		inode_unlock(d_inode(lower_dir_dentry));
-		if (match) {
+		file = dentry_open(lower_parent_path, O_RDONLY, cred);
+		if (IS_ERR(file)) {
+			err = PTR_ERR(file);
+			goto put_name;
+		}
+		err = iterate_dir(file, &buffer.ctx);
+		fput(file);
+		if (err)
+			goto put_name;
+
+		if (buffer.found)
 			err = vfs_path_lookup(lower_dir_dentry,
 						lower_dir_mnt,
-						match->d_name.name, 0,
+						buffer.name, 0,
 						&lower_path);
-			dput(match);
-		}
+		else
+			err = -ENOENT;
+put_name:
+		__putname(buffer.name);
 	}
 
 	/* no error: handle positive dentries */
@@ -307,14 +341,14 @@
 		goto out;
 
 	/* instatiate a new negative dentry */
-	this.name = name;
-	this.len = strlen(name);
-	this.hash = full_name_hash(dentry, this.name, this.len);
-	lower_dentry = d_lookup(lower_dir_dentry, &this);
+	dname.name = name->name;
+	dname.len = name->len;
+	dname.hash = full_name_hash(lower_dir_dentry, dname.name, dname.len);
+	lower_dentry = d_lookup(lower_dir_dentry, &dname);
 	if (lower_dentry)
 		goto setup_lower;
 
-	lower_dentry = d_alloc(lower_dir_dentry, &this);
+	lower_dentry = d_alloc(lower_dir_dentry, &dname);
 	if (!lower_dentry) {
 		err = -ENOMEM;
 		goto out;
@@ -359,16 +393,13 @@
 
 	parent = dget_parent(dentry);
 
-	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
+	if (!check_caller_access_to_name(d_inode(parent), &dentry->d_name)) {
 		ret = ERR_PTR(-EACCES);
-		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
-                         "	dentry: %s, task:%s\n",
-						 __func__, dentry->d_name.name, current->comm);
 		goto out_err;
         }
 
 	/* save current_cred and override it */
-	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred);
+	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
 
 	sdcardfs_get_lower_path(parent, &lower_parent_path);
 
@@ -392,6 +423,7 @@
 		/* get derived permission */
 		get_derived_permission(parent, dentry);
 		fixup_tmp_permissions(d_inode(dentry));
+		fixup_lower_ownership(dentry, dentry->d_name.name);
 	}
 	/* update parent directory's atime */
 	fsstack_copy_attr_atime(d_inode(parent),
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 7a8eae2..4e2aded 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -432,6 +432,7 @@
 	.kill_sb	= sdcardfs_kill_sb,
 	.fs_flags	= 0,
 };
+MODULE_ALIAS_FS(SDCARDFS_NAME);
 
 static int __init init_sdcardfs_fs(void)
 {
diff --git a/fs/sdcardfs/mmap.c b/fs/sdcardfs/mmap.c
index ac5f3de..51266f5 100644
--- a/fs/sdcardfs/mmap.c
+++ b/fs/sdcardfs/mmap.c
@@ -48,33 +48,54 @@
 	return err;
 }
 
+static int sdcardfs_page_mkwrite(struct vm_area_struct *vma,
+			       struct vm_fault *vmf)
+{
+	int err = 0;
+	struct file *file, *lower_file;
+	const struct vm_operations_struct *lower_vm_ops;
+	struct vm_area_struct lower_vma;
+
+	memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
+	file = lower_vma.vm_file;
+	lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
+	BUG_ON(!lower_vm_ops);
+	if (!lower_vm_ops->page_mkwrite)
+		goto out;
+
+	lower_file = sdcardfs_lower_file(file);
+	/*
+	 * XXX: vm_ops->page_mkwrite may be called in parallel.
+	 * Because we have to resort to temporarily changing the
+	 * vma->vm_file to point to the lower file, a concurrent
+	 * invocation of sdcardfs_page_mkwrite could see a different
+	 * value.  In this workaround, we keep a different copy of the
+	 * vma structure in our stack, so we never expose a different
+	 * value of the vma->vm_file called to us, even temporarily.
+	 * A better fix would be to change the calling semantics of
+	 * ->page_mkwrite to take an explicit file pointer.
+	 */
+	lower_vma.vm_file = lower_file;
+	err = lower_vm_ops->page_mkwrite(&lower_vma, vmf);
+out:
+	return err;
+}
+
 static ssize_t sdcardfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
 {
 	/*
-     * This function returns zero on purpose in order to support direct IO.
-	 * __dentry_open checks a_ops->direct_IO and returns EINVAL if it is null.
-     *
-	 * However, this function won't be called by certain file operations
-     * including generic fs functions.  * reads and writes are delivered to
-     * the lower file systems and the direct IOs will be handled by them.
-	 *
-     * NOTE: exceptionally, on the recent kernels (since Linux 3.8.x),
-     * swap_writepage invokes this function directly.
+	 * This function should never be called directly.  We need it
+	 * to exist, to get past a check in open_check_o_direct(),
+	 * which is called from do_last().
 	 */
-	printk(KERN_INFO "%s, operation is not supported\n", __func__);
-	return 0;
+	return -EINVAL;
 }
 
-/*
- * XXX: the default address_space_ops for sdcardfs is empty.  We cannot set
- * our inode->i_mapping->a_ops to NULL because too many code paths expect
- * the a_ops vector to be non-NULL.
- */
 const struct address_space_operations sdcardfs_aops = {
-	/* empty on purpose */
 	.direct_IO	= sdcardfs_direct_IO,
 };
 
 const struct vm_operations_struct sdcardfs_vm_ops = {
 	.fault		= sdcardfs_fault,
+	.page_mkwrite	= sdcardfs_page_mkwrite,
 };
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
index 923ba10..2e89b58 100644
--- a/fs/sdcardfs/multiuser.h
+++ b/fs/sdcardfs/multiuser.h
@@ -18,20 +18,27 @@
  * General Public License.
  */
 
-#define MULTIUSER_APP_PER_USER_RANGE 100000
+#define AID_USER_OFFSET     100000 /* offset for uid ranges for each user */
+#define AID_APP_START        10000 /* first app user */
+#define AID_APP_END          19999 /* last app user */
+#define AID_CACHE_GID_START  20000 /* start of gids for apps to mark cached data */
+#define AID_EXT_GID_START    30000 /* start of gids for apps to mark external data */
+#define AID_SHARED_GID_START 50000 /* start of gids for apps in each user to share */
 
 typedef uid_t userid_t;
 typedef uid_t appid_t;
 
-static inline userid_t multiuser_get_user_id(uid_t uid) {
-    return uid / MULTIUSER_APP_PER_USER_RANGE;
+static inline uid_t multiuser_get_uid(userid_t user_id, appid_t app_id)
+{
+	return (user_id * AID_USER_OFFSET) + (app_id % AID_USER_OFFSET);
 }
 
-static inline appid_t multiuser_get_app_id(uid_t uid) {
-    return uid % MULTIUSER_APP_PER_USER_RANGE;
+static inline gid_t multiuser_get_cache_gid(uid_t uid)
+{
+	return uid - AID_APP_START + AID_CACHE_GID_START;
 }
 
-static inline uid_t multiuser_get_uid(userid_t userId, appid_t appId) {
-    return userId * MULTIUSER_APP_PER_USER_RANGE + (appId % MULTIUSER_APP_PER_USER_RANGE);
+static inline gid_t multiuser_get_ext_gid(uid_t uid)
+{
+	return uid - AID_APP_START + AID_EXT_GID_START;
 }
-
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
index 03776fa..2cc076c 100644
--- a/fs/sdcardfs/packagelist.c
+++ b/fs/sdcardfs/packagelist.c
@@ -20,8 +20,10 @@
 
 #include "sdcardfs.h"
 #include <linux/hashtable.h>
+#include <linux/ctype.h>
 #include <linux/delay.h>
-
+#include <linux/radix-tree.h>
+#include <linux/dcache.h>
 
 #include <linux/init.h>
 #include <linux/module.h>
@@ -31,35 +33,50 @@
 
 struct hashtable_entry {
 	struct hlist_node hlist;
-	const char *key;
+	struct hlist_node dlist; /* for deletion cleanup */
+	struct qstr key;
 	atomic_t value;
 };
 
 static DEFINE_HASHTABLE(package_to_appid, 8);
+static DEFINE_HASHTABLE(package_to_userid, 8);
+static DEFINE_HASHTABLE(ext_to_groupid, 8);
+
 
 static struct kmem_cache *hashtable_entry_cachep;
 
-static unsigned int str_hash(const char *key) {
-	int i;
-	unsigned int h = strlen(key);
-	char *data = (char *)key;
-
-	for (i = 0; i < strlen(key); i++) {
-		h = h * 31 + *data;
-		data++;
-	}
-	return h;
+static unsigned int full_name_case_hash(const void *salt, const unsigned char *name, unsigned int len)
+{
+	unsigned long hash = init_name_hash(salt);
+	while (len--)
+		hash = partial_name_hash(tolower(*name++), hash);
+	return end_name_hash(hash);
 }
 
-appid_t get_appid(const char *app_name)
+static inline void qstr_init(struct qstr *q, const char *name)
+{
+	q->name = name;
+	q->len = strlen(q->name);
+	q->hash = full_name_case_hash(0, q->name, q->len);
+}
+
+static inline int qstr_copy(const struct qstr *src, struct qstr *dest)
+{
+	dest->name = kstrdup(src->name, GFP_KERNEL);
+	dest->hash_len = src->hash_len;
+	return !!dest->name;
+}
+
+
+static appid_t __get_appid(const struct qstr *key)
 {
 	struct hashtable_entry *hash_cur;
-	unsigned int hash = str_hash(app_name);
+	unsigned int hash = key->hash;
 	appid_t ret_id;
 
 	rcu_read_lock();
 	hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
-		if (!strcasecmp(app_name, hash_cur->key)) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
 			ret_id = atomic_read(&hash_cur->value);
 			rcu_read_unlock();
 			return ret_id;
@@ -69,16 +86,76 @@
 	return 0;
 }
 
+appid_t get_appid(const char *key)
+{
+	struct qstr q;
+	qstr_init(&q, key);
+	return __get_appid(&q);
+}
+
+static appid_t __get_ext_gid(const struct qstr *key)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = key->hash;
+	appid_t ret_id;
+
+	rcu_read_lock();
+	hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
+			ret_id = atomic_read(&hash_cur->value);
+			rcu_read_unlock();
+			return ret_id;
+		}
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+appid_t get_ext_gid(const char *key)
+{
+	struct qstr q;
+	qstr_init(&q, key);
+	return __get_ext_gid(&q);
+}
+
+static appid_t __is_excluded(const struct qstr *app_name, userid_t user)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = app_name->hash;
+
+	rcu_read_lock();
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (atomic_read(&hash_cur->value) == user &&
+				qstr_case_eq(app_name, &hash_cur->key)) {
+			rcu_read_unlock();
+			return 1;
+		}
+	}
+	rcu_read_unlock();
+	return 0;
+}
+
+appid_t is_excluded(const char *key, userid_t user)
+{
+	struct qstr q;
+	qstr_init(&q, key);
+	return __is_excluded(&q, user);
+}
+
 /* Kernel has already enforced everything we returned through
  * derive_permissions_locked(), so this is used to lock down access
  * even further, such as enforcing that apps hold sdcard_rw. */
-int check_caller_access_to_name(struct inode *parent_node, const char* name) {
+int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name)
+{
+	struct qstr q_autorun = QSTR_LITERAL("autorun.inf");
+	struct qstr q__android_secure = QSTR_LITERAL(".android_secure");
+	struct qstr q_android_secure = QSTR_LITERAL("android_secure");
 
 	/* Always block security-sensitive files at root */
 	if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) {
-		if (!strcasecmp(name, "autorun.inf")
-			|| !strcasecmp(name, ".android_secure")
-			|| !strcasecmp(name, "android_secure")) {
+		if (qstr_case_eq(name, &q_autorun)
+			|| qstr_case_eq(name, &q__android_secure)
+			|| qstr_case_eq(name, &q_android_secure)) {
 			return 0;
 		}
 	}
@@ -106,16 +183,17 @@
 	}
 }
 
-static struct hashtable_entry *alloc_packagelist_entry(const char *key,
+static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key,
 		appid_t value)
 {
 	struct hashtable_entry *ret = kmem_cache_alloc(hashtable_entry_cachep,
 			GFP_KERNEL);
 	if (!ret)
 		return NULL;
+	INIT_HLIST_NODE(&ret->dlist);
+	INIT_HLIST_NODE(&ret->hlist);
 
-	ret->key = kstrdup(key, GFP_KERNEL);
-	if (!ret->key) {
+	if (!qstr_copy(key, &ret->key)) {
 		kmem_cache_free(hashtable_entry_cachep, ret);
 		return NULL;
 	}
@@ -124,79 +202,251 @@
 	return ret;
 }
 
-static int insert_packagelist_entry_locked(const char *key, appid_t value)
+static int insert_packagelist_appid_entry_locked(const struct qstr *key, appid_t value)
 {
 	struct hashtable_entry *hash_cur;
 	struct hashtable_entry *new_entry;
-	unsigned int hash = str_hash(key);
+	unsigned int hash = key->hash;
 
 	hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
-		if (!strcasecmp(key, hash_cur->key)) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
 			atomic_set(&hash_cur->value, value);
 			return 0;
 		}
 	}
-	new_entry = alloc_packagelist_entry(key, value);
+	new_entry = alloc_hashtable_entry(key, value);
 	if (!new_entry)
 		return -ENOMEM;
 	hash_add_rcu(package_to_appid, &new_entry->hlist, hash);
 	return 0;
 }
 
-static void fixup_perms(struct super_block *sb, const char *key) {
-	if (sb && sb->s_magic == SDCARDFS_SUPER_MAGIC) {
-		fixup_perms_recursive(sb->s_root, key, strlen(key));
+static int insert_ext_gid_entry_locked(const struct qstr *key, appid_t value)
+{
+	struct hashtable_entry *hash_cur;
+	struct hashtable_entry *new_entry;
+	unsigned int hash = key->hash;
+
+	/* An extension can only belong to one gid */
+	hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key))
+			return -EINVAL;
+	}
+	new_entry = alloc_hashtable_entry(key, value);
+	if (!new_entry)
+		return -ENOMEM;
+	hash_add_rcu(ext_to_groupid, &new_entry->hlist, hash);
+	return 0;
+}
+
+static int insert_userid_exclude_entry_locked(const struct qstr *key, userid_t value)
+{
+	struct hashtable_entry *hash_cur;
+	struct hashtable_entry *new_entry;
+	unsigned int hash = key->hash;
+
+	/* Only insert if not already present */
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (atomic_read(&hash_cur->value) == value &&
+				qstr_case_eq(key, &hash_cur->key))
+			return 0;
+	}
+	new_entry = alloc_hashtable_entry(key, value);
+	if (!new_entry)
+		return -ENOMEM;
+	hash_add_rcu(package_to_userid, &new_entry->hlist, hash);
+	return 0;
+}
+
+static void fixup_all_perms_name(const struct qstr *key)
+{
+	struct sdcardfs_sb_info *sbinfo;
+	struct limit_search limit = {
+		.flags = BY_NAME,
+		.name = QSTR_INIT(key->name, key->len),
+	};
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo_has_sdcard_magic(sbinfo))
+			fixup_perms_recursive(sbinfo->sb->s_root, &limit);
 	}
 }
 
-static void fixup_all_perms(const char *key)
+static void fixup_all_perms_name_userid(const struct qstr *key, userid_t userid)
 {
 	struct sdcardfs_sb_info *sbinfo;
-	list_for_each_entry(sbinfo, &sdcardfs_super_list, list)
-		if (sbinfo)
-			fixup_perms(sbinfo->sb, key);
+	struct limit_search limit = {
+		.flags = BY_NAME | BY_USERID,
+		.name = QSTR_INIT(key->name, key->len),
+		.userid = userid,
+	};
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo_has_sdcard_magic(sbinfo))
+			fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+	}
 }
 
-static int insert_packagelist_entry(const char *key, appid_t value)
+static void fixup_all_perms_userid(userid_t userid)
+{
+	struct sdcardfs_sb_info *sbinfo;
+	struct limit_search limit = {
+		.flags = BY_USERID,
+		.userid = userid,
+	};
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo_has_sdcard_magic(sbinfo))
+			fixup_perms_recursive(sbinfo->sb->s_root, &limit);
+	}
+}
+
+static int insert_packagelist_entry(const struct qstr *key, appid_t value)
 {
 	int err;
 
 	mutex_lock(&sdcardfs_super_list_lock);
-	err = insert_packagelist_entry_locked(key, value);
+	err = insert_packagelist_appid_entry_locked(key, value);
 	if (!err)
-		fixup_all_perms(key);
+		fixup_all_perms_name(key);
 	mutex_unlock(&sdcardfs_super_list_lock);
 
 	return err;
 }
 
-static void free_packagelist_entry(struct hashtable_entry *entry)
+static int insert_ext_gid_entry(const struct qstr *key, appid_t value)
 {
-	kfree(entry->key);
-	hash_del_rcu(&entry->hlist);
+	int err;
+
+	mutex_lock(&sdcardfs_super_list_lock);
+	err = insert_ext_gid_entry_locked(key, value);
+	mutex_unlock(&sdcardfs_super_list_lock);
+
+	return err;
+}
+
+static int insert_userid_exclude_entry(const struct qstr *key, userid_t value)
+{
+	int err;
+
+	mutex_lock(&sdcardfs_super_list_lock);
+	err = insert_userid_exclude_entry_locked(key, value);
+	if (!err)
+		fixup_all_perms_name_userid(key, value);
+	mutex_unlock(&sdcardfs_super_list_lock);
+
+	return err;
+}
+
+static void free_hashtable_entry(struct hashtable_entry *entry)
+{
+	kfree(entry->key.name);
 	kmem_cache_free(hashtable_entry_cachep, entry);
 }
 
-static void remove_packagelist_entry_locked(const char *key)
+static void remove_packagelist_entry_locked(const struct qstr *key)
 {
 	struct hashtable_entry *hash_cur;
-	unsigned int hash = str_hash(key);
+	unsigned int hash = key->hash;
+	struct hlist_node *h_t;
+	HLIST_HEAD(free_list);
 
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
+			hash_del_rcu(&hash_cur->hlist);
+			hlist_add_head(&hash_cur->dlist, &free_list);
+		}
+	}
 	hash_for_each_possible_rcu(package_to_appid, hash_cur, hlist, hash) {
-		if (!strcasecmp(key, hash_cur->key)) {
+		if (qstr_case_eq(key, &hash_cur->key)) {
+			hash_del_rcu(&hash_cur->hlist);
+			hlist_add_head(&hash_cur->dlist, &free_list);
+			break;
+		}
+	}
+	synchronize_rcu();
+	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
+		free_hashtable_entry(hash_cur);
+}
+
+static void remove_packagelist_entry(const struct qstr *key)
+{
+	mutex_lock(&sdcardfs_super_list_lock);
+	remove_packagelist_entry_locked(key);
+	fixup_all_perms_name(key);
+	mutex_unlock(&sdcardfs_super_list_lock);
+	return;
+}
+
+static void remove_ext_gid_entry_locked(const struct qstr *key, gid_t group)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = key->hash;
+
+	hash_for_each_possible_rcu(ext_to_groupid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key) && atomic_read(&hash_cur->value) == group) {
 			hash_del_rcu(&hash_cur->hlist);
 			synchronize_rcu();
-			free_packagelist_entry(hash_cur);
-			return;
+			free_hashtable_entry(hash_cur);
+			break;
 		}
 	}
 }
 
-static void remove_packagelist_entry(const char *key)
+static void remove_ext_gid_entry(const struct qstr *key, gid_t group)
 {
 	mutex_lock(&sdcardfs_super_list_lock);
-	remove_packagelist_entry_locked(key);
-	fixup_all_perms(key);
+	remove_ext_gid_entry_locked(key, group);
+	mutex_unlock(&sdcardfs_super_list_lock);
+	return;
+}
+
+static void remove_userid_all_entry_locked(userid_t userid)
+{
+	struct hashtable_entry *hash_cur;
+	struct hlist_node *h_t;
+	HLIST_HEAD(free_list);
+	int i;
+
+	hash_for_each_rcu(package_to_userid, i, hash_cur, hlist) {
+		if (atomic_read(&hash_cur->value) == userid) {
+			hash_del_rcu(&hash_cur->hlist);
+			hlist_add_head(&hash_cur->dlist, &free_list);
+		}
+	}
+	synchronize_rcu();
+	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist) {
+		free_hashtable_entry(hash_cur);
+	}
+}
+
+static void remove_userid_all_entry(userid_t userid)
+{
+	mutex_lock(&sdcardfs_super_list_lock);
+	remove_userid_all_entry_locked(userid);
+	fixup_all_perms_userid(userid);
+	mutex_unlock(&sdcardfs_super_list_lock);
+	return;
+}
+
+static void remove_userid_exclude_entry_locked(const struct qstr *key, userid_t userid)
+{
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = key->hash;
+
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(key, &hash_cur->key) &&
+				atomic_read(&hash_cur->value) == userid) {
+			hash_del_rcu(&hash_cur->hlist);
+			synchronize_rcu();
+			free_hashtable_entry(hash_cur);
+			break;
+		}
+	}
+}
+
+static void remove_userid_exclude_entry(const struct qstr *key, userid_t userid)
+{
+	mutex_lock(&sdcardfs_super_list_lock);
+	remove_userid_exclude_entry_locked(key, userid);
+	fixup_all_perms_name_userid(key, userid);
 	mutex_unlock(&sdcardfs_super_list_lock);
 	return;
 }
@@ -210,36 +460,62 @@
 	mutex_lock(&sdcardfs_super_list_lock);
 	hash_for_each_rcu(package_to_appid, i, hash_cur, hlist) {
 		hash_del_rcu(&hash_cur->hlist);
-		hlist_add_head(&hash_cur->hlist, &free_list);
-
+		hlist_add_head(&hash_cur->dlist, &free_list);
+	}
+	hash_for_each_rcu(package_to_userid, i, hash_cur, hlist) {
+		hash_del_rcu(&hash_cur->hlist);
+		hlist_add_head(&hash_cur->dlist, &free_list);
 	}
 	synchronize_rcu();
-	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, hlist)
-		free_packagelist_entry(hash_cur);
+	hlist_for_each_entry_safe(hash_cur, h_t, &free_list, dlist)
+		free_hashtable_entry(hash_cur);
 	mutex_unlock(&sdcardfs_super_list_lock);
 	printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld\n");
 }
 
-struct package_appid {
+#define SDCARDFS_CONFIGFS_ATTR(_pfx, _name)			\
+static struct configfs_attribute _pfx##attr_##_name = {	\
+	.ca_name	= __stringify(_name),		\
+	.ca_mode	= S_IRUGO | S_IWUGO,		\
+	.ca_owner	= THIS_MODULE,			\
+	.show		= _pfx##_name##_show,		\
+	.store		= _pfx##_name##_store,		\
+}
+
+#define SDCARDFS_CONFIGFS_ATTR_RO(_pfx, _name)			\
+static struct configfs_attribute _pfx##attr_##_name = {	\
+	.ca_name	= __stringify(_name),		\
+	.ca_mode	= S_IRUGO,			\
+	.ca_owner	= THIS_MODULE,			\
+	.show		= _pfx##_name##_show,		\
+}
+
+#define SDCARDFS_CONFIGFS_ATTR_WO(_pfx, _name)			\
+static struct configfs_attribute _pfx##attr_##_name = {	\
+	.ca_name	= __stringify(_name),		\
+	.ca_mode	= S_IWUGO,			\
+	.ca_owner	= THIS_MODULE,			\
+	.store		= _pfx##_name##_store,		\
+}
+
+struct package_details {
 	struct config_item item;
-	int add_pid;
+	struct qstr name;
 };
 
-static inline struct package_appid *to_package_appid(struct config_item *item)
+static inline struct package_details *to_package_details(struct config_item *item)
 {
-	return item ? container_of(item, struct package_appid, item) : NULL;
+	return item ? container_of(item, struct package_details, item) : NULL;
 }
 
-static ssize_t package_appid_attr_show(struct config_item *item,
-				      char *page)
+static ssize_t package_details_appid_show(struct config_item *item, char *page)
 {
-	return scnprintf(page, PAGE_SIZE, "%u\n", get_appid(item->ci_name));
+	return scnprintf(page, PAGE_SIZE, "%u\n", __get_appid(&to_package_details(item)->name));
 }
 
-static ssize_t package_appid_attr_store(struct config_item *item,
+static ssize_t package_details_appid_store(struct config_item *item,
 				       const char *page, size_t count)
 {
-	struct package_appid *package_appid = to_package_appid(item);
 	unsigned int tmp;
 	int ret;
 
@@ -247,84 +523,260 @@
 	if (ret)
 		return ret;
 
-	ret = insert_packagelist_entry(item->ci_name, tmp);
-	package_appid->add_pid = tmp;
+	ret = insert_packagelist_entry(&to_package_details(item)->name, tmp);
+
 	if (ret)
 		return ret;
 
 	return count;
 }
 
-static struct configfs_attribute package_appid_attr_add_pid = {
-	.ca_owner = THIS_MODULE,
-	.ca_name = "appid",
-	.ca_mode = S_IRUGO | S_IWUGO,
-	.show = package_appid_attr_show,
-	.store = package_appid_attr_store,
-};
+static ssize_t package_details_excluded_userids_show(struct config_item *item,
+				      char *page)
+{
+	struct package_details *package_details = to_package_details(item);
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = package_details->name.hash;
+	int count = 0;
 
-static struct configfs_attribute *package_appid_attrs[] = {
-	&package_appid_attr_add_pid,
+	rcu_read_lock();
+	hash_for_each_possible_rcu(package_to_userid, hash_cur, hlist, hash) {
+		if (qstr_case_eq(&package_details->name, &hash_cur->key))
+			count += scnprintf(page + count, PAGE_SIZE - count,
+					"%d ", atomic_read(&hash_cur->value));
+	}
+	rcu_read_unlock();
+	if (count)
+		count--;
+	count += scnprintf(page + count, PAGE_SIZE - count, "\n");
+	return count;
+}
+
+static ssize_t package_details_excluded_userids_store(struct config_item *item,
+				       const char *page, size_t count)
+{
+	unsigned int tmp;
+	int ret;
+
+	ret = kstrtouint(page, 10, &tmp);
+	if (ret)
+		return ret;
+
+	ret = insert_userid_exclude_entry(&to_package_details(item)->name, tmp);
+
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static ssize_t package_details_clear_userid_store(struct config_item *item,
+				       const char *page, size_t count)
+{
+	unsigned int tmp;
+	int ret;
+
+	ret = kstrtouint(page, 10, &tmp);
+	if (ret)
+		return ret;
+	remove_userid_exclude_entry(&to_package_details(item)->name, tmp);
+	return count;
+}
+
+static void package_details_release(struct config_item *item)
+{
+	struct package_details *package_details = to_package_details(item);
+	printk(KERN_INFO "sdcardfs: removing %s\n", package_details->name.name);
+	remove_packagelist_entry(&package_details->name);
+	kfree(package_details->name.name);
+	kfree(package_details);
+}
+
+SDCARDFS_CONFIGFS_ATTR(package_details_, appid);
+SDCARDFS_CONFIGFS_ATTR(package_details_, excluded_userids);
+SDCARDFS_CONFIGFS_ATTR_WO(package_details_, clear_userid);
+
+static struct configfs_attribute *package_details_attrs[] = {
+	&package_details_attr_appid,
+	&package_details_attr_excluded_userids,
+	&package_details_attr_clear_userid,
 	NULL,
 };
 
-static void package_appid_release(struct config_item *item)
-{
-	printk(KERN_INFO "sdcardfs: removing %s\n", item->ci_dentry->d_name.name);
-	/* item->ci_name is freed already, so we rely on the dentry */
-	remove_packagelist_entry(item->ci_dentry->d_name.name);
-	kfree(to_package_appid(item));
-}
-
-static struct configfs_item_operations package_appid_item_ops = {
-	.release		= package_appid_release,
+static struct configfs_item_operations package_details_item_ops = {
+      .release = package_details_release,
 };
 
 static struct config_item_type package_appid_type = {
-	.ct_item_ops	= &package_appid_item_ops,
-	.ct_attrs	= package_appid_attrs,
+	.ct_item_ops	= &package_details_item_ops,
+	.ct_attrs	= package_details_attrs,
 	.ct_owner	= THIS_MODULE,
 };
 
-
-struct sdcardfs_packages {
+struct extensions_value {
 	struct config_group group;
+	unsigned int num;
 };
 
-static inline struct sdcardfs_packages *to_sdcardfs_packages(struct config_item *item)
+struct extension_details {
+	struct config_item item;
+	struct qstr name;
+	unsigned int num;
+};
+
+static inline struct extensions_value *to_extensions_value(struct config_item *item)
 {
-	return item ? container_of(to_config_group(item), struct sdcardfs_packages, group) : NULL;
+	return item ? container_of(to_config_group(item), struct extensions_value, group) : NULL;
 }
 
-static struct config_item *sdcardfs_packages_make_item(struct config_group *group, const char *name)
+static inline struct extension_details *to_extension_details(struct config_item *item)
 {
-	struct package_appid *package_appid;
+	return item ? container_of(item, struct extension_details, item) : NULL;
+}
 
-	package_appid = kzalloc(sizeof(struct package_appid), GFP_KERNEL);
-	if (!package_appid)
+static void extension_details_release(struct config_item *item)
+{
+	struct extension_details *extension_details = to_extension_details(item);
+
+	printk(KERN_INFO "sdcardfs: No longer mapping %s files to gid %d\n",
+			extension_details->name.name, extension_details->num);
+	remove_ext_gid_entry(&extension_details->name, extension_details->num);
+	kfree(extension_details->name.name);
+	kfree(extension_details);
+}
+
+static struct configfs_item_operations extension_details_item_ops = {
+	.release = extension_details_release,
+};
+
+static struct config_item_type extension_details_type = {
+	.ct_item_ops = &extension_details_item_ops,
+	.ct_owner = THIS_MODULE,
+};
+
+static struct config_item *extension_details_make_item(struct config_group *group, const char *name)
+{
+	struct extensions_value *extensions_value = to_extensions_value(&group->cg_item);
+	struct extension_details *extension_details = kzalloc(sizeof(struct extension_details), GFP_KERNEL);
+	const char *tmp;
+	int ret;
+	if (!extension_details)
 		return ERR_PTR(-ENOMEM);
 
-	config_item_init_type_name(&package_appid->item, name,
-				   &package_appid_type);
+	tmp = kstrdup(name, GFP_KERNEL);
+	if (!tmp) {
+		kfree(extension_details);
+		return ERR_PTR(-ENOMEM);
+	}
+	qstr_init(&extension_details->name, tmp);
+	ret = insert_ext_gid_entry(&extension_details->name, extensions_value->num);
 
-	package_appid->add_pid = 0;
+	if (ret) {
+		kfree(extension_details->name.name);
+		kfree(extension_details);
+		return ERR_PTR(ret);
+	}
+	config_item_init_type_name(&extension_details->item, name, &extension_details_type);
 
-	return &package_appid->item;
+	return &extension_details->item;
 }
 
-static ssize_t packages_attr_show(struct config_item *item,
-					 char *page)
+static struct configfs_group_operations extensions_value_group_ops = {
+	.make_item = extension_details_make_item,
+};
+
+static struct config_item_type extensions_name_type = {
+	.ct_group_ops	= &extensions_value_group_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_group *extensions_make_group(struct config_group *group, const char *name)
 {
-	struct hashtable_entry *hash_cur;
+	struct extensions_value *extensions_value;
+	unsigned int tmp;
+	int ret;
+
+	extensions_value = kzalloc(sizeof(struct extensions_value), GFP_KERNEL);
+	if (!extensions_value)
+		return ERR_PTR(-ENOMEM);
+	ret = kstrtouint(name, 10, &tmp);
+	if (ret) {
+		kfree(extensions_value);
+		return ERR_PTR(ret);
+	}
+
+	extensions_value->num = tmp;
+	config_group_init_type_name(&extensions_value->group, name,
+						&extensions_name_type);
+	return &extensions_value->group;
+}
+
+static void extensions_drop_group(struct config_group *group, struct config_item *item)
+{
+	struct extensions_value *value = to_extensions_value(item);
+	printk(KERN_INFO "sdcardfs: No longer mapping any files to gid %d\n", value->num);
+	kfree(value);
+}
+
+static struct configfs_group_operations extensions_group_ops = {
+	.make_group	= extensions_make_group,
+	.drop_item	= extensions_drop_group,
+};
+
+static struct config_item_type extensions_type = {
+	.ct_group_ops	= &extensions_group_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+struct config_group extension_group = {
+	.cg_item = {
+		.ci_namebuf = "extensions",
+		.ci_type = &extensions_type,
+	},
+};
+
+static struct config_item *packages_make_item(struct config_group *group, const char *name)
+{
+	struct package_details *package_details;
+	const char *tmp;
+
+	package_details = kzalloc(sizeof(struct package_details), GFP_KERNEL);
+	if (!package_details)
+		return ERR_PTR(-ENOMEM);
+	tmp = kstrdup(name, GFP_KERNEL);
+	if (!tmp) {
+		kfree(package_details);
+		return ERR_PTR(-ENOMEM);
+	}
+	qstr_init(&package_details->name, tmp);
+	config_item_init_type_name(&package_details->item, name,
+						&package_appid_type);
+
+	return &package_details->item;
+}
+
+static ssize_t packages_list_show(struct config_item *item, char *page)
+{
+	struct hashtable_entry *hash_cur_app;
+	struct hashtable_entry *hash_cur_user;
 	int i;
 	int count = 0, written = 0;
 	const char errormsg[] = "<truncated>\n";
+	unsigned int hash;
 
 	rcu_read_lock();
-	hash_for_each_rcu(package_to_appid, i, hash_cur, hlist) {
+	hash_for_each_rcu(package_to_appid, i, hash_cur_app, hlist) {
 		written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n",
-					(const char *)hash_cur->key, atomic_read(&hash_cur->value));
-		if (count + written == PAGE_SIZE - sizeof(errormsg)) {
+					hash_cur_app->key.name, atomic_read(&hash_cur_app->value));
+		hash = hash_cur_app->key.hash;
+		hash_for_each_possible_rcu(package_to_userid, hash_cur_user, hlist, hash) {
+			if (qstr_case_eq(&hash_cur_app->key, &hash_cur_user->key)) {
+				written += scnprintf(page + count + written - 1,
+					PAGE_SIZE - sizeof(errormsg) - count - written + 1,
+					" %d\n", atomic_read(&hash_cur_user->value)) - 1;
+			}
+		}
+		if (count + written == PAGE_SIZE - sizeof(errormsg) - 1) {
 			count += scnprintf(page + count, PAGE_SIZE - count, errormsg);
 			break;
 		}
@@ -335,59 +787,72 @@
 	return count;
 }
 
-static struct configfs_attribute sdcardfs_packages_attr_description = {
-	.ca_owner = THIS_MODULE,
-	.ca_name = "packages_gid.list",
-	.ca_mode = S_IRUGO,
-	.show = packages_attr_show,
-};
-
-static struct configfs_attribute *sdcardfs_packages_attrs[] = {
-	&sdcardfs_packages_attr_description,
-	NULL,
-};
-
-static void sdcardfs_packages_release(struct config_item *item)
+static ssize_t packages_remove_userid_store(struct config_item *item,
+				       const char *page, size_t count)
 {
+	unsigned int tmp;
+	int ret;
 
-	printk(KERN_INFO "sdcardfs: destroyed something?\n");
-	kfree(to_sdcardfs_packages(item));
+	ret = kstrtouint(page, 10, &tmp);
+	if (ret)
+		return ret;
+	remove_userid_all_entry(tmp);
+	return count;
 }
 
-static struct configfs_item_operations sdcardfs_packages_item_ops = {
-	.release	= sdcardfs_packages_release,
+static struct configfs_attribute packages_attr_packages_gid_list = {
+	.ca_name	= "packages_gid.list",
+	.ca_mode	= S_IRUGO,
+	.ca_owner	= THIS_MODULE,
+	.show		= packages_list_show,
+};
+
+SDCARDFS_CONFIGFS_ATTR_WO(packages_, remove_userid);
+
+static struct configfs_attribute *packages_attrs[] = {
+	&packages_attr_packages_gid_list,
+	&packages_attr_remove_userid,
+	NULL,
 };
 
 /*
  * Note that, since no extra work is required on ->drop_item(),
  * no ->drop_item() is provided.
  */
-static struct configfs_group_operations sdcardfs_packages_group_ops = {
-	.make_item	= sdcardfs_packages_make_item,
+static struct configfs_group_operations packages_group_ops = {
+	.make_item	= packages_make_item,
 };
 
-static struct config_item_type sdcardfs_packages_type = {
-	.ct_item_ops	= &sdcardfs_packages_item_ops,
-	.ct_group_ops	= &sdcardfs_packages_group_ops,
-	.ct_attrs	= sdcardfs_packages_attrs,
+static struct config_item_type packages_type = {
+	.ct_group_ops	= &packages_group_ops,
+	.ct_attrs	= packages_attrs,
 	.ct_owner	= THIS_MODULE,
 };
 
-static struct configfs_subsystem sdcardfs_packages_subsys = {
+struct config_group *sd_default_groups[] = {
+	&extension_group,
+	NULL,
+};
+
+static struct configfs_subsystem sdcardfs_packages = {
 	.su_group = {
 		.cg_item = {
 			.ci_namebuf = "sdcardfs",
-			.ci_type = &sdcardfs_packages_type,
+			.ci_type = &packages_type,
 		},
 	},
 };
 
 static int configfs_sdcardfs_init(void)
 {
-	int ret;
-	struct configfs_subsystem *subsys = &sdcardfs_packages_subsys;
+	int ret, i;
+	struct configfs_subsystem *subsys = &sdcardfs_packages;
 
 	config_group_init(&subsys->su_group);
+	for (i = 0; sd_default_groups[i]; i++) {
+		config_group_init(sd_default_groups[i]);
+		configfs_add_default_group(sd_default_groups[i], &subsys->su_group);
+	}
 	mutex_init(&subsys->su_mutex);
 	ret = configfs_register_subsystem(subsys);
 	if (ret) {
@@ -400,7 +865,7 @@
 
 static void configfs_sdcardfs_exit(void)
 {
-	configfs_unregister_subsystem(&sdcardfs_packages_subsys);
+	configfs_unregister_subsystem(&sdcardfs_packages);
 }
 
 int packagelist_init(void)
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 66a97ef..09ec1e4 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -29,6 +29,7 @@
 #include <linux/dcache.h>
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/aio.h>
 #include <linux/mm.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -65,6 +66,9 @@
 #define AID_SDCARD_PICS   1033	/* external storage photos access */
 #define AID_SDCARD_AV     1034	/* external storage audio/video access */
 #define AID_SDCARD_ALL    1035	/* access all users external storage */
+#define AID_MEDIA_OBB     1059  /* obb files */
+
+#define AID_SDCARD_IMAGE  1057
 
 #define AID_PACKAGE_INFO  1027
 
@@ -91,13 +95,19 @@
  * These two macro should be used in pair, and OVERRIDE_CRED() should be
  * placed at the beginning of a function, right after variable declaration.
  */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred)		\
-	saved_cred = override_fsids(sdcardfs_sbi);	\
-	if (!saved_cred) { return -ENOMEM; }
+#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info)		\
+	do {	\
+		saved_cred = override_fsids(sdcardfs_sbi, info);	\
+		if (!saved_cred)	\
+			return -ENOMEM;	\
+	} while (0)
 
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred)	\
-	saved_cred = override_fsids(sdcardfs_sbi);	\
-	if (!saved_cred) { return ERR_PTR(-ENOMEM); }
+#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info)	\
+	do {	\
+		saved_cred = override_fsids(sdcardfs_sbi, info);	\
+		if (!saved_cred)	\
+			return ERR_PTR(-ENOMEM);	\
+	} while (0)
 
 #define REVERT_CRED(saved_cred)	revert_fsids(saved_cred)
 
@@ -127,13 +137,18 @@
     PERM_ANDROID_OBB,
     /* This node is "/Android/media" */
     PERM_ANDROID_MEDIA,
+    /* This node is "/Android/[data|media|obb]/[package]" */
+    PERM_ANDROID_PACKAGE,
+    /* This node is "/Android/[data|media|obb]/[package]/cache" */
+    PERM_ANDROID_PACKAGE_CACHE,
 } perm_t;
 
 struct sdcardfs_sb_info;
 struct sdcardfs_mount_options;
+struct sdcardfs_inode_info;
 
 /* Do not directly use this function. Use OVERRIDE_CRED() instead. */
-const struct cred * override_fsids(struct sdcardfs_sb_info* sbi);
+const struct cred *override_fsids(struct sdcardfs_sb_info *sbi, struct sdcardfs_inode_info *info);
 /* Do not directly use this function, use REVERT_CRED() instead. */
 void revert_fsids(const struct cred * old_cred);
 
@@ -175,6 +190,8 @@
 	userid_t userid;
 	uid_t d_uid;
 	bool under_android;
+	bool under_cache;
+	bool under_obb;
 	/* top folder for ownership */
 	struct inode *top;
 
@@ -335,6 +352,11 @@
 SDCARDFS_DENT_FUNC(lower_path)
 SDCARDFS_DENT_FUNC(orig_path)
 
+static inline bool sbinfo_has_sdcard_magic(struct sdcardfs_sb_info *sbinfo)
+{
+  return sbinfo && sbinfo->sb && sbinfo->sb->s_magic == SDCARDFS_SUPER_MAGIC;
+}
+
 /* grab a refererence if we aren't linking to ourself */
 static inline void set_top(struct sdcardfs_inode_info *info, struct inode *top)
 {
@@ -442,20 +464,30 @@
 
 /* for packagelist.c */
 extern appid_t get_appid(const char *app_name);
-extern int check_caller_access_to_name(struct inode *parent_node, const char* name);
+extern appid_t get_ext_gid(const char *app_name);
+extern appid_t is_excluded(const char *app_name, userid_t userid);
+extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name);
 extern int open_flags_to_access_mode(int open_flags);
 extern int packagelist_init(void);
 extern void packagelist_exit(void);
 
 /* for derived_perm.c */
+#define BY_NAME		(1 << 0)
+#define BY_USERID	(1 << 1)
+struct limit_search {
+	unsigned int flags;
+	struct qstr name;
+	userid_t userid;
+};
+
 extern void setup_derived_state(struct inode *inode, perm_t perm, userid_t userid,
 			uid_t uid, bool under_android, struct inode *top);
 extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
-extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry);
-extern void fixup_top_recursive(struct dentry *parent);
-extern void fixup_perms_recursive(struct dentry *dentry, const char *name, size_t len);
+extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, const struct qstr *name);
+extern void fixup_perms_recursive(struct dentry *dentry, struct limit_search *limit);
 
 extern void update_derived_permission_lock(struct dentry *dentry);
+void fixup_lower_ownership(struct dentry *dentry, const char *name);
 extern int need_graft_path(struct dentry *dentry);
 extern int is_base_obbpath(struct dentry *dentry);
 extern int is_obbpath_invalid(struct dentry *dentry);
@@ -577,4 +609,22 @@
 	dest->i_flags = src->i_flags;
 	set_nlink(dest, src->i_nlink);
 }
+
+static inline bool str_case_eq(const char *s1, const char *s2)
+{
+	return !strcasecmp(s1, s2);
+}
+
+static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len)
+{
+	return !strncasecmp(s1, s2, len);
+}
+
+static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2)
+{
+	return q1->len == q2->len && str_case_eq(q1->name, q2->name);
+}
+
+#define QSTR_LITERAL(string) QSTR_INIT(string, sizeof(string)-1)
+
 #endif	/* not _SDCARDFS_H_ */
diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h
index dbee8901..e169172 100644
--- a/include/dt-bindings/clock/qcom,camcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,34 +88,17 @@
 #define CAM_CC_MCLK3_CLK_SRC					71
 #define CAM_CC_PLL0						72
 #define CAM_CC_PLL0_OUT_EVEN					73
-#define CAM_CC_PLL0_OUT_MAIN					74
-#define CAM_CC_PLL0_OUT_ODD					75
-#define CAM_CC_PLL0_OUT_TEST					76
-#define CAM_CC_PLL1						77
-#define CAM_CC_PLL1_OUT_EVEN					78
-#define CAM_CC_PLL1_OUT_MAIN					79
-#define CAM_CC_PLL1_OUT_ODD					80
-#define CAM_CC_PLL1_OUT_TEST					81
-#define CAM_CC_PLL2						82
-#define CAM_CC_PLL2_OUT_EVEN					83
-#define CAM_CC_PLL2_OUT_MAIN					84
-#define CAM_CC_PLL2_OUT_ODD					85
-#define CAM_CC_PLL2_OUT_TEST					86
-#define CAM_CC_PLL3						87
-#define CAM_CC_PLL3_OUT_EVEN					88
-#define CAM_CC_PLL3_OUT_MAIN					89
-#define CAM_CC_PLL3_OUT_ODD					90
-#define CAM_CC_PLL3_OUT_TEST					91
-#define CAM_CC_PLL_TEST_CLK					92
-#define CAM_CC_SLOW_AHB_CLK_SRC					93
-#define CAM_CC_SOC_AHB_CLK					94
-#define CAM_CC_SPDM_BPS_CLK					95
-#define CAM_CC_SPDM_IFE_0_CLK					96
-#define CAM_CC_SPDM_IFE_0_CSID_CLK				97
-#define CAM_CC_SPDM_IPE_0_CLK					98
-#define CAM_CC_SPDM_IPE_1_CLK					99
-#define CAM_CC_SPDM_JPEG_CLK					100
-#define CAM_CC_SYS_TMR_CLK					101
+#define CAM_CC_PLL1						74
+#define CAM_CC_PLL1_OUT_EVEN					75
+#define CAM_CC_PLL2						76
+#define CAM_CC_PLL2_OUT_EVEN					77
+#define CAM_CC_PLL2_OUT_ODD					78
+#define CAM_CC_PLL3						79
+#define CAM_CC_PLL3_OUT_EVEN					80
+#define CAM_CC_PLL_TEST_CLK					81
+#define CAM_CC_SLOW_AHB_CLK_SRC					82
+#define CAM_CC_SOC_AHB_CLK					83
+#define CAM_CC_SYS_TMR_CLK					84
 
 #define TITAN_CAM_CC_BPS_BCR					0
 #define TITAN_CAM_CC_CAMNOC_BCR					1
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 1e55c1d..d52e335 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -53,152 +53,149 @@
 #define GCC_GPU_GPLL0_DIV_CLK_SRC				35
 #define GCC_GPU_MEMNOC_GFX_CLK					36
 #define GCC_GPU_SNOC_DVM_GFX_CLK				37
-#define GCC_MMSS_QM_AHB_CLK					38
-#define GCC_MMSS_QM_CORE_CLK					39
-#define GCC_MMSS_QM_CORE_CLK_SRC				40
-#define GCC_MSS_AXIS2_CLK					41
-#define GCC_MSS_CFG_AHB_CLK					42
-#define GCC_MSS_GPLL0_DIV_CLK_SRC				43
-#define GCC_MSS_MFAB_AXIS_CLK					44
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK				45
-#define GCC_MSS_SNOC_AXI_CLK					46
-#define GCC_PCIE_0_AUX_CLK					47
-#define GCC_PCIE_0_AUX_CLK_SRC					48
-#define GCC_PCIE_0_CFG_AHB_CLK					49
-#define GCC_PCIE_0_CLKREF_CLK					50
-#define GCC_PCIE_0_MSTR_AXI_CLK					51
-#define GCC_PCIE_0_PIPE_CLK					52
-#define GCC_PCIE_0_SLV_AXI_CLK					53
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				54
-#define GCC_PCIE_1_AUX_CLK					55
-#define GCC_PCIE_1_AUX_CLK_SRC					56
-#define GCC_PCIE_1_CFG_AHB_CLK					57
-#define GCC_PCIE_1_CLKREF_CLK					58
-#define GCC_PCIE_1_MSTR_AXI_CLK					59
-#define GCC_PCIE_1_PIPE_CLK					60
-#define GCC_PCIE_1_SLV_AXI_CLK					61
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				62
-#define GCC_PCIE_PHY_AUX_CLK					63
-#define GCC_PCIE_PHY_REFGEN_CLK					64
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				65
-#define GCC_PDM2_CLK						66
-#define GCC_PDM2_CLK_SRC					67
-#define GCC_PDM_AHB_CLK						68
-#define GCC_PDM_XO4_CLK						69
-#define GCC_PRNG_AHB_CLK					70
-#define GCC_QMIP_CAMERA_AHB_CLK					71
-#define GCC_QMIP_DISP_AHB_CLK					72
-#define GCC_QMIP_VIDEO_AHB_CLK					73
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				74
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_CORE_CLK				76
-#define GCC_QUPV3_WRAP0_S0_CLK					77
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				78
-#define GCC_QUPV3_WRAP0_S1_CLK					79
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				80
-#define GCC_QUPV3_WRAP0_S2_CLK					81
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				82
-#define GCC_QUPV3_WRAP0_S3_CLK					83
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				84
-#define GCC_QUPV3_WRAP0_S4_CLK					85
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				86
-#define GCC_QUPV3_WRAP0_S5_CLK					87
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				88
-#define GCC_QUPV3_WRAP0_S6_CLK					89
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				90
-#define GCC_QUPV3_WRAP0_S7_CLK					91
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				92
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK				93
-#define GCC_QUPV3_WRAP1_CORE_CLK				94
-#define GCC_QUPV3_WRAP1_S0_CLK					95
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				96
-#define GCC_QUPV3_WRAP1_S1_CLK					97
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				98
-#define GCC_QUPV3_WRAP1_S2_CLK					99
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				100
-#define GCC_QUPV3_WRAP1_S3_CLK					101
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				102
-#define GCC_QUPV3_WRAP1_S4_CLK					103
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				104
-#define GCC_QUPV3_WRAP1_S5_CLK					105
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				106
-#define GCC_QUPV3_WRAP1_S6_CLK					107
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				108
-#define GCC_QUPV3_WRAP1_S7_CLK					109
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				110
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				111
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				112
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				113
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				114
-#define GCC_RX1_USB2_CLKREF_CLK					115
-#define GCC_RX2_QLINK_CLKREF_CLK				116
-#define GCC_RX3_MODEM_CLKREF_CLK				117
-#define GCC_SDCC2_AHB_CLK					118
-#define GCC_SDCC2_APPS_CLK					119
-#define GCC_SDCC2_APPS_CLK_SRC					120
-#define GCC_SDCC4_AHB_CLK					121
-#define GCC_SDCC4_APPS_CLK					122
-#define GCC_SDCC4_APPS_CLK_SRC					123
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				124
-#define GCC_TSIF_AHB_CLK					125
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				126
-#define GCC_TSIF_REF_CLK					127
-#define GCC_TSIF_REF_CLK_SRC					128
-#define GCC_UFS_CARD_AHB_CLK					129
-#define GCC_UFS_CARD_AXI_CLK					130
-#define GCC_UFS_CARD_AXI_CLK_SRC				131
-#define GCC_UFS_CARD_CLKREF_CLK					132
-#define GCC_UFS_CARD_ICE_CORE_CLK				133
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				134
-#define GCC_UFS_CARD_PHY_AUX_CLK				135
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				136
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				137
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				138
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				139
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				140
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			141
-#define GCC_UFS_MEM_CLKREF_CLK					142
-#define GCC_UFS_PHY_AHB_CLK					143
-#define GCC_UFS_PHY_AXI_CLK					144
-#define GCC_UFS_PHY_AXI_CLK_SRC					145
-#define GCC_UFS_PHY_ICE_CORE_CLK				146
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				147
-#define GCC_UFS_PHY_PHY_AUX_CLK					148
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				149
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				150
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				151
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				152
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				153
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				154
-#define GCC_USB30_PRIM_MASTER_CLK				155
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				156
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				157
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			158
-#define GCC_USB30_PRIM_SLEEP_CLK				159
-#define GCC_USB30_SEC_MASTER_CLK				160
-#define GCC_USB30_SEC_MASTER_CLK_SRC				161
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				162
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				163
-#define GCC_USB30_SEC_SLEEP_CLK					164
-#define GCC_USB3_PRIM_CLKREF_CLK				165
-#define GCC_USB3_PRIM_PHY_AUX_CLK				166
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				167
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				168
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				169
-#define GCC_USB3_SEC_CLKREF_CLK					170
-#define GCC_USB3_SEC_PHY_AUX_CLK				171
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				172
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				173
-#define GCC_USB3_SEC_PHY_PIPE_CLK				174
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				175
-#define GCC_VIDEO_AHB_CLK					176
-#define GCC_VIDEO_AXI_CLK					177
-#define GCC_VIDEO_XO_CLK					178
-#define GPLL0							179
-#define GPLL0_OUT_EVEN						180
-#define GPLL0_OUT_MAIN						181
-#define GPLL1							182
-#define GPLL1_OUT_MAIN						183
+#define GCC_MSS_AXIS2_CLK					38
+#define GCC_MSS_CFG_AHB_CLK					39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC				40
+#define GCC_MSS_MFAB_AXIS_CLK					41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK				42
+#define GCC_MSS_SNOC_AXI_CLK					43
+#define GCC_PCIE_0_AUX_CLK					44
+#define GCC_PCIE_0_AUX_CLK_SRC					45
+#define GCC_PCIE_0_CFG_AHB_CLK					46
+#define GCC_PCIE_0_CLKREF_CLK					47
+#define GCC_PCIE_0_MSTR_AXI_CLK					48
+#define GCC_PCIE_0_PIPE_CLK					49
+#define GCC_PCIE_0_SLV_AXI_CLK					50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				51
+#define GCC_PCIE_1_AUX_CLK					52
+#define GCC_PCIE_1_AUX_CLK_SRC					53
+#define GCC_PCIE_1_CFG_AHB_CLK					54
+#define GCC_PCIE_1_CLKREF_CLK					55
+#define GCC_PCIE_1_MSTR_AXI_CLK					56
+#define GCC_PCIE_1_PIPE_CLK					57
+#define GCC_PCIE_1_SLV_AXI_CLK					58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				59
+#define GCC_PCIE_PHY_AUX_CLK					60
+#define GCC_PCIE_PHY_REFGEN_CLK					61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				62
+#define GCC_PDM2_CLK						63
+#define GCC_PDM2_CLK_SRC					64
+#define GCC_PDM_AHB_CLK						65
+#define GCC_PDM_XO4_CLK						66
+#define GCC_PRNG_AHB_CLK					67
+#define GCC_QMIP_CAMERA_AHB_CLK					68
+#define GCC_QMIP_DISP_AHB_CLK					69
+#define GCC_QMIP_VIDEO_AHB_CLK					70
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				71
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				72
+#define GCC_QUPV3_WRAP0_CORE_CLK				73
+#define GCC_QUPV3_WRAP0_S0_CLK					74
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				75
+#define GCC_QUPV3_WRAP0_S1_CLK					76
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				77
+#define GCC_QUPV3_WRAP0_S2_CLK					78
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				79
+#define GCC_QUPV3_WRAP0_S3_CLK					80
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				81
+#define GCC_QUPV3_WRAP0_S4_CLK					82
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				83
+#define GCC_QUPV3_WRAP0_S5_CLK					84
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				85
+#define GCC_QUPV3_WRAP0_S6_CLK					86
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				87
+#define GCC_QUPV3_WRAP0_S7_CLK					88
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				89
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK				90
+#define GCC_QUPV3_WRAP1_CORE_CLK				91
+#define GCC_QUPV3_WRAP1_S0_CLK					92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP1_S1_CLK					94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP1_S2_CLK					96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP1_S3_CLK					98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP1_S4_CLK					100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP1_S5_CLK					102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP1_S6_CLK					104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
+#define GCC_QUPV3_WRAP1_S7_CLK					106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
+#define GCC_RX1_USB2_CLKREF_CLK					112
+#define GCC_RX2_QLINK_CLKREF_CLK				113
+#define GCC_RX3_MODEM_CLKREF_CLK				114
+#define GCC_SDCC2_AHB_CLK					115
+#define GCC_SDCC2_APPS_CLK					116
+#define GCC_SDCC2_APPS_CLK_SRC					117
+#define GCC_SDCC4_AHB_CLK					118
+#define GCC_SDCC4_APPS_CLK					119
+#define GCC_SDCC4_APPS_CLK_SRC					120
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				121
+#define GCC_TSIF_AHB_CLK					122
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				123
+#define GCC_TSIF_REF_CLK					124
+#define GCC_TSIF_REF_CLK_SRC					125
+#define GCC_UFS_CARD_AHB_CLK					126
+#define GCC_UFS_CARD_AXI_CLK					127
+#define GCC_UFS_CARD_AXI_CLK_SRC				128
+#define GCC_UFS_CARD_CLKREF_CLK					129
+#define GCC_UFS_CARD_ICE_CORE_CLK				130
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				131
+#define GCC_UFS_CARD_PHY_AUX_CLK				132
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				133
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				134
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				135
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				136
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				137
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			138
+#define GCC_UFS_MEM_CLKREF_CLK					139
+#define GCC_UFS_PHY_AHB_CLK					140
+#define GCC_UFS_PHY_AXI_CLK					141
+#define GCC_UFS_PHY_AXI_CLK_SRC					142
+#define GCC_UFS_PHY_ICE_CORE_CLK				143
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				144
+#define GCC_UFS_PHY_PHY_AUX_CLK					145
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				147
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				148
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				149
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				150
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				151
+#define GCC_USB30_PRIM_MASTER_CLK				152
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				153
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				154
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			155
+#define GCC_USB30_PRIM_SLEEP_CLK				156
+#define GCC_USB30_SEC_MASTER_CLK				157
+#define GCC_USB30_SEC_MASTER_CLK_SRC				158
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				159
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				160
+#define GCC_USB30_SEC_SLEEP_CLK					161
+#define GCC_USB3_PRIM_CLKREF_CLK				162
+#define GCC_USB3_PRIM_PHY_AUX_CLK				163
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				164
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				166
+#define GCC_USB3_SEC_CLKREF_CLK					167
+#define GCC_USB3_SEC_PHY_AUX_CLK				168
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				169
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				170
+#define GCC_USB3_SEC_PHY_PIPE_CLK				171
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				172
+#define GCC_VIDEO_AHB_CLK					173
+#define GCC_VIDEO_AXI_CLK					174
+#define GCC_VIDEO_XO_CLK					175
+#define GPLL0							176
+#define GPLL0_OUT_EVEN						177
+#define GPLL0_OUT_MAIN						178
+#define GPLL1							179
+#define GPLL1_OUT_MAIN						180
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 6aaf425..a13b031 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -18,19 +18,12 @@
 
 struct bpf_reg_state {
 	enum bpf_reg_type type;
-	/*
-	 * Used to determine if any memory access using this register will
-	 * result in a bad access.
-	 */
-	s64 min_value;
-	u64 max_value;
 	union {
 		/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
 		s64 imm;
 
 		/* valid when type == PTR_TO_PACKET* */
 		struct {
-			u32 id;
 			u16 off;
 			u16 range;
 		};
@@ -40,6 +33,13 @@
 		 */
 		struct bpf_map *map_ptr;
 	};
+	u32 id;
+	/* Used to determine if any memory access using this register will
+	 * result in a bad access. These two fields must be last.
+	 * See states_equal()
+	 */
+	s64 min_value;
+	u64 max_value;
 };
 
 enum bpf_stack_slot_type {
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index cc57986..23beb58 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -412,6 +412,7 @@
 
 #define CPUFREQ_TRANSITION_NOTIFIER	(0)
 #define CPUFREQ_POLICY_NOTIFIER		(1)
+#define CPUFREQ_GOVINFO_NOTIFIER	(2)
 
 /* Transition notifiers */
 #define CPUFREQ_PRECHANGE		(0)
@@ -424,6 +425,9 @@
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
 
+/* Govinfo Notifiers */
+#define CPUFREQ_LOAD_CHANGE		(0)
+
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
@@ -432,6 +436,16 @@
 		struct cpufreq_freqs *freqs);
 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 		struct cpufreq_freqs *freqs, int transition_failed);
+/*
+ * Governor specific info that can be passed to modules that subscribe
+ * to CPUFREQ_GOVINFO_NOTIFIER
+ */
+struct cpufreq_govinfo {
+	unsigned int cpu;
+	unsigned int load;
+	unsigned int sampling_rate_us;
+};
+extern struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 #else /* CONFIG_CPU_FREQ */
 static inline int cpufreq_register_notifier(struct notifier_block *nb,
@@ -584,6 +598,9 @@
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_interactive)
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
 extern struct cpufreq_governor cpufreq_gov_sched;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_sched)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b9337de..7f395e3 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -45,6 +45,7 @@
 	CPUHP_POWERPC_MMU_CTX_PREPARE,
 	CPUHP_XEN_PREPARE,
 	CPUHP_XEN_EVTCHN_PREPARE,
+	CPUHP_QCOM_CPUFREQ_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_ARM_SHMOBILE_SCU_PREPARE,
 	CPUHP_SH_SH3X_PREPARE,
@@ -86,6 +87,7 @@
 	CPUHP_AP_METAG_TIMER_STARTING,
 	CPUHP_AP_QCOM_TIMER_STARTING,
 	CPUHP_AP_QCOM_SLEEP_STARTING,
+	CPUHP_AP_QCOM_CPUFREQ_STARTING,
 	CPUHP_AP_ARMADA_TIMER_STARTING,
 	CPUHP_AP_MARCO_TIMER_STARTING,
 	CPUHP_AP_MIPS_GIC_TIMER_STARTING,
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 61d042b..6844929 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -163,6 +163,7 @@
 	__u64			 dreq_isr;
 	__u64			 dreq_gsr;
 	__be32			 dreq_service;
+	spinlock_t		 dreq_lock;
 	struct list_head	 dreq_featneg;
 	__u32			 dreq_timestamp_echo;
 	__u32			 dreq_timestamp_time;
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index ddd126c..560f047 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -36,6 +36,8 @@
 
 	spinlock_t	lock;
 	struct notifier_block notifier;
+
+	int		is_smmu_pt_coherent;
 };
 
 #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 542cc16..373dbd5 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -87,6 +87,17 @@
  * may be designed to use the original attributes instead.
  */
 #define DMA_ATTR_IOMMU_USE_UPSTREAM_HINT	(1UL << 13)
+/*
+ * When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA
+ * attribute can be used to force a buffer to be mapped as IO coherent.
+ */
+#define DMA_ATTR_FORCE_COHERENT			(1UL << 14)
+/*
+ * When passed to a DMA map call the DMA_ATTR_FORCE_NON_COHERENT DMA
+ * attribute can be used to force a buffer to not be mapped as IO
+ * coherent.
+ */
+#define DMA_ATTR_FORCE_NON_COHERENT		(1UL << 15)
 
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 192eef2f..d596a07 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1548,31 +1548,23 @@
 get_next_pkt_raw(struct vmbus_channel *channel)
 {
 	struct hv_ring_buffer_info *ring_info = &channel->inbound;
-	u32 read_loc = ring_info->priv_read_index;
+	u32 priv_read_loc = ring_info->priv_read_index;
 	void *ring_buffer = hv_get_ring_buffer(ring_info);
-	struct vmpacket_descriptor *cur_desc;
-	u32 packetlen;
 	u32 dsize = ring_info->ring_datasize;
-	u32 delta = read_loc - ring_info->ring_buffer->read_index;
+	/*
+	 * delta is the difference between what is available to read and
+	 * what was already consumed in place. We commit read index after
+	 * the whole batch is processed.
+	 */
+	u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
+		priv_read_loc - ring_info->ring_buffer->read_index :
+		(dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
 	u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
 
 	if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
 		return NULL;
 
-	if ((read_loc + sizeof(*cur_desc)) > dsize)
-		return NULL;
-
-	cur_desc = ring_buffer + read_loc;
-	packetlen = cur_desc->len8 << 3;
-
-	/*
-	 * If the packet under consideration is wrapping around,
-	 * return failure.
-	 */
-	if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
-		return NULL;
-
-	return cur_desc;
+	return ring_buffer + priv_read_loc;
 }
 
 /*
@@ -1584,16 +1576,14 @@
 				struct vmpacket_descriptor *desc)
 {
 	struct hv_ring_buffer_info *ring_info = &channel->inbound;
-	u32 read_loc = ring_info->priv_read_index;
 	u32 packetlen = desc->len8 << 3;
 	u32 dsize = ring_info->ring_datasize;
 
-	if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
-		BUG();
 	/*
 	 * Include the packet trailer.
 	 */
 	ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+	ring_info->priv_read_index %= dsize;
 }
 
 /*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 0e04308..5d3a4cd 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -138,6 +138,9 @@
 	DOMAIN_ATTR_FAST,
 	DOMAIN_ATTR_PGTBL_INFO,
 	DOMAIN_ATTR_USE_UPSTREAM_HINT,
+	DOMAIN_ATTR_EARLY_MAP,
+	DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+	DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
 	DOMAIN_ATTR_MAX,
 };
 
@@ -244,6 +247,7 @@
 
 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
 
+	bool (*is_iova_coherent)(struct iommu_domain *domain, dma_addr_t iova);
 	unsigned long pgsize_bitmap;
 };
 
@@ -277,6 +281,8 @@
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
 extern phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
 					   dma_addr_t iova);
+extern bool iommu_is_iova_coherent(struct iommu_domain *domain,
+				dma_addr_t iova);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
 			iommu_fault_handler_t handler, void *token);
 
@@ -518,6 +524,12 @@
 	return 0;
 }
 
+static inline bool iommu_is_iova_coherent(struct iommu_domain *domain,
+					  dma_addr_t iova)
+{
+	return 0;
+}
+
 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 				iommu_fault_handler_t handler, void *token)
 {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2d191bf..f7b0dab 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2109,7 +2109,7 @@
 void task_dirty_inc(struct task_struct *tsk);
 
 /* readahead.c */
-#define VM_MAX_READAHEAD	128	/* kbytes */
+#define VM_MAX_READAHEAD	512	/* kbytes */
 #define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
 
 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 73fad83..510a73a 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -119,6 +119,9 @@
 	u8			raw_pwr_cl_ddr_200_360;	/* 253 */
 	u8			raw_bkops_status;	/* 246 */
 	u8			raw_sectors[4];		/* 212 - 4 bytes */
+	u8			pre_eol_info;		/* 267 */
+	u8			device_life_time_est_typ_a;	/* 268 */
+	u8			device_life_time_est_typ_b;	/* 269 */
 
 	unsigned int            feature_support;
 #define MMC_DISCARD_FEATURE	BIT(0)                  /* CMD38 feature */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 0ac4125..68f60b8 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -273,6 +273,9 @@
 #define EXT_CSD_CACHE_SIZE		249	/* RO, 4 bytes */
 #define EXT_CSD_PWR_CL_DDR_200_360	253	/* RO */
 #define EXT_CSD_FIRMWARE_VERSION	254	/* RO, 8 bytes */
+#define EXT_CSD_PRE_EOL_INFO		267	/* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_A	268	/* RO */
+#define EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B	269	/* RO */
 #define EXT_CSD_SUPPORTED_MODE		493	/* RO */
 #define EXT_CSD_TAG_UNIT_SIZE		498	/* RO */
 #define EXT_CSD_DATA_TAG_SUPPORT	499	/* RO */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index f3ce6b4..e1ad51e 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -49,6 +49,7 @@
 #define PINCTRL_DEFAULT	"default"
 #define PINCTRL_SLEEP	"sleep"
 
+/* Common SE registers */
 #define GENI_INIT_CFG_REVISION		(0x0)
 #define GENI_S_INIT_CFG_REVISION	(0x4)
 #define GENI_FORCE_DEFAULT_REG		(0x20)
@@ -126,6 +127,9 @@
 #define FW_REV_PROTOCOL_MSK	(GENMASK(15, 8))
 #define FW_REV_PROTOCOL_SHFT	(8)
 
+/* GENI_CLK_SEL fields */
+#define CLK_SEL_MSK		(GENMASK(2, 0))
+
 /* SE_GENI_DMA_MODE_EN */
 #define GENI_DMA_MODE_EN	(BIT(0))
 
@@ -280,9 +284,10 @@
 	switch (mode) {
 	case FIFO_MODE:
 	{
-		if (proto == I2C) {
+		if (proto != UART) {
 			common_geni_m_irq_en |=
-				(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
+				(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN |
+				M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
 			common_geni_s_irq_en |= S_CMD_DONE_EN;
 		}
 		break;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index eba1f10..93094ba 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -354,6 +354,7 @@
  */
 struct usb_bus {
 	struct device *controller;	/* host/master side hardware */
+	struct device *sysdev;		/* as seen from firmware or bus */
 	int busnum;			/* Bus number (in order of reg) */
 	const char *bus_name;		/* stable id (PCI slot_name etc) */
 	u8 uses_dma;			/* Does the host controller use DMA? */
@@ -396,6 +397,15 @@
 	struct mon_bus *mon_bus;	/* non-null when associated */
 	int monitored;			/* non-zero when monitored */
 #endif
+	unsigned skip_resume:1;		/* All USB devices are brought into full
+					 * power state after system resume. It
+					 * is desirable for some buses to keep
+					 * their devices in suspend state even
+					 * after system resume. The devices
+					 * are resumed later when a remote
+					 * wakeup is detected or an interface
+					 * driver starts I/O.
+					 */
 };
 
 struct usb_dev_state;
@@ -734,6 +744,16 @@
 
 /* for drivers using iso endpoints */
 extern int usb_get_current_frame_number(struct usb_device *usb_dev);
+extern int usb_sec_event_ring_setup(struct usb_device *dev,
+	unsigned int intr_num);
+extern int usb_sec_event_ring_cleanup(struct usb_device *dev,
+	unsigned int intr_num);
+
+extern dma_addr_t usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
+	unsigned int intr_num);
+extern dma_addr_t usb_get_dcba_dma_addr(struct usb_device *dev);
+extern dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
+	struct usb_host_endpoint *ep);
 
 /* Sets up a group of bulk endpoints to support multiple stream IDs. */
 extern int usb_alloc_streams(struct usb_interface *interface,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 66fc137..5c0b3fa 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -398,6 +398,15 @@
 	/* Call for power on/off the port if necessary */
 	int	(*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
 
+	int (*sec_event_ring_setup)(struct usb_hcd *hcd, unsigned int intr_num);
+	int (*sec_event_ring_cleanup)(struct usb_hcd *hcd,
+			unsigned int intr_num);
+	dma_addr_t (*get_sec_event_ring_dma_addr)(struct usb_hcd *hcd,
+			unsigned int intr_num);
+	dma_addr_t (*get_xfer_ring_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev, struct usb_host_endpoint *ep);
+	dma_addr_t (*get_dcba_dma_addr)(struct usb_hcd *hcd,
+			struct usb_device *udev);
 };
 
 static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -436,7 +445,19 @@
 		struct usb_host_interface *old_alt,
 		struct usb_host_interface *new_alt);
 extern int usb_hcd_get_frame_number(struct usb_device *udev);
+extern int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
+	unsigned int intr_num);
+extern int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
+	unsigned int intr_num);
+extern dma_addr_t usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
+		unsigned int intr_num);
+extern dma_addr_t usb_hcd_get_dcba_dma_addr(struct usb_device *udev);
+extern dma_addr_t usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
+	struct usb_host_endpoint *ep);
 
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+		struct device *sysdev, struct device *dev, const char *bus_name,
+		struct usb_hcd *primary_hcd);
 extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
 		struct device *dev, const char *bus_name);
 extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
@@ -485,7 +506,7 @@
 extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
 extern void usb_wakeup_notification(struct usb_device *hdev,
 		unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
 extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
 extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
 
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index eb209d4..dc79773 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -65,7 +65,7 @@
 	struct hlist_node node;
 	struct user_namespace *ns;
 	kuid_t uid;
-	atomic_t count;
+	int count;
 	atomic_t ucount[UCOUNT_COUNTS];
 };
 
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index d66d44c..262fa64 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -105,7 +105,8 @@
 int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
 int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
-int msm_vidc_release_buffers(void *instance, int buffer_type);
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index);
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b);
 int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b);
 int msm_vidc_streamon(void *instance, enum v4l2_buf_type i);
diff --git a/include/media/radio-iris.h b/include/media/radio-iris.h
new file mode 100644
index 0000000..22888b0
--- /dev/null
+++ b/include/media/radio-iris.h
@@ -0,0 +1,322 @@
+/*
+ *
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
+ *
+ * This file is based on include/net/bluetooth/hci_core.h
+ *
+ * Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation;
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ * SOFTWARE IS DISCLAIMED.
+ */
+
+#ifndef __RADIO_IRIS_H
+#define __RADIO_IRIS_H
+
+#include <uapi/media/radio-iris.h>
+
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+
+struct radio_hci_dev {
+	char		name[8];
+	unsigned long	flags;
+	__u16		id;
+	__u8		bus;
+	__u8		dev_type;
+	__u8		dev_name[248];
+	__u8		dev_class[3];
+	__u8		features[8];
+	__u8		commands[64];
+
+	unsigned int	data_block_len;
+	unsigned long	cmd_last_tx;
+
+	struct sk_buff		*sent_cmd;
+
+	__u32			req_status;
+	__u32			req_result;
+	atomic_t	cmd_cnt;
+
+	struct tasklet_struct	cmd_task;
+	struct tasklet_struct	rx_task;
+	struct tasklet_struct	tx_task;
+
+	struct sk_buff_head	rx_q;
+	struct sk_buff_head	raw_q;
+	struct sk_buff_head	cmd_q;
+
+	struct mutex		req_lock;
+	wait_queue_head_t	req_wait_q;
+
+	int (*open)(struct radio_hci_dev *hdev);
+	int (*close)(struct radio_hci_dev *hdev);
+	int (*flush)(struct radio_hci_dev *hdev);
+	int (*send)(struct sk_buff *skb);
+	void (*destruct)(struct radio_hci_dev *hdev);
+	void (*notify)(struct radio_hci_dev *hdev, unsigned int evt);
+	void (*close_smd)(void);
+};
+
+int radio_hci_register_dev(struct radio_hci_dev *hdev);
+int radio_hci_unregister_dev(struct radio_hci_dev *hdev);
+int radio_hci_recv_frame(struct sk_buff *skb);
+int radio_hci_send_cmd(struct radio_hci_dev *hdev, __u16 opcode, __u32 plen,
+	void *param);
+void radio_hci_event_packet(struct radio_hci_dev *hdev, struct sk_buff *skb);
+
+#define hci_req_lock(d)		mutex_lock(&d->req_lock)
+#define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
+
+#undef FMDBG
+#ifdef FM_DEBUG
+#define FMDBG(fmt, args...) pr_info("iris_radio: " fmt, ##args)
+#else
+#define FMDBG(fmt, args...)
+#endif
+
+#undef FMDERR
+#define FMDERR(fmt, args...) pr_err("iris_radio: " fmt, ##args)
+
+/* HCI timeouts */
+#define RADIO_HCI_TIMEOUT	(10000)	/* 10 seconds */
+
+int hci_def_data_read(struct hci_fm_def_data_rd_req *arg,
+	struct radio_hci_dev *hdev);
+int hci_def_data_write(struct hci_fm_def_data_wr_req *arg,
+	struct radio_hci_dev *hdev);
+int hci_fm_do_calibration(__u8 *arg, struct radio_hci_dev *hdev);
+int hci_fm_do_calibration(__u8 *arg, struct radio_hci_dev *hdev);
+
+static inline int is_valid_tone(int tone)
+{
+	if ((tone >= MIN_TX_TONE_VAL) &&
+		(tone <= MAX_TX_TONE_VAL))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_hard_mute(int hard_mute)
+{
+	if ((hard_mute >= MIN_HARD_MUTE_VAL) &&
+		(hard_mute <= MAX_HARD_MUTE_VAL))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_srch_mode(int srch_mode)
+{
+	if ((srch_mode >= MIN_SRCH_MODE) &&
+		(srch_mode <= MAX_SRCH_MODE))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_scan_dwell_prd(int scan_dwell_prd)
+{
+	if ((scan_dwell_prd >= MIN_SCAN_DWELL) &&
+		(scan_dwell_prd <= MAX_SCAN_DWELL))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_sig_th(int sig_th)
+{
+	if ((sig_th >= MIN_SIG_TH) &&
+		(sig_th <= MAX_SIG_TH))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_pty(int pty)
+{
+	if ((pty >= MIN_PTY) &&
+		(pty <= MAX_PTY))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_pi(int pi)
+{
+	if ((pi >= MIN_PI) &&
+		(pi <= MAX_PI))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_srch_station_cnt(int cnt)
+{
+	if ((cnt >= MIN_SRCH_STATIONS_CNT) &&
+		(cnt <= MAX_SRCH_STATIONS_CNT))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_chan_spacing(int spacing)
+{
+	if ((spacing >= MIN_CHAN_SPACING) &&
+		(spacing <= MAX_CHAN_SPACING))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_emphasis(int emphasis)
+{
+	if ((emphasis >= MIN_EMPHASIS) &&
+		(emphasis <= MAX_EMPHASIS))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_rds_std(int rds_std)
+{
+	if ((rds_std >= MIN_RDS_STD) &&
+		(rds_std <= MAX_RDS_STD))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_antenna(int antenna_type)
+{
+	if ((antenna_type >= MIN_ANTENNA_VAL) &&
+		(antenna_type <= MAX_ANTENNA_VAL))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_ps_repeat_cnt(int cnt)
+{
+	if ((cnt >= MIN_TX_PS_REPEAT_CNT) &&
+		(cnt <= MAX_TX_PS_REPEAT_CNT))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_soft_mute(int soft_mute)
+{
+	if ((soft_mute >= MIN_SOFT_MUTE) &&
+		(soft_mute <= MAX_SOFT_MUTE))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_peek_len(int len)
+{
+	if ((len >= MIN_PEEK_ACCESS_LEN) &&
+		(len <= MAX_PEEK_ACCESS_LEN))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_reset_cntr(int cntr)
+{
+	if ((cntr >= MIN_RESET_CNTR) &&
+		(cntr <= MAX_RESET_CNTR))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_hlsi(int hlsi)
+{
+	if ((hlsi >= MIN_HLSI) &&
+		(hlsi <= MAX_HLSI))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_notch_filter(int filter)
+{
+	if ((filter >= MIN_NOTCH_FILTER) &&
+		(filter <= MAX_NOTCH_FILTER))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_intf_det_low_th(int th)
+{
+	if ((th >= MIN_INTF_DET_OUT_LW_TH) &&
+		(th <= MAX_INTF_DET_OUT_LW_TH))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_intf_det_hgh_th(int th)
+{
+	if ((th >= MIN_INTF_DET_OUT_HG_TH) &&
+		(th <= MAX_INTF_DET_OUT_HG_TH))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_sinr_th(int th)
+{
+	if ((th >= MIN_SINR_TH) &&
+		(th <= MAX_SINR_TH))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_sinr_samples(int samples_cnt)
+{
+	if ((samples_cnt >= MIN_SINR_SAMPLES) &&
+		(samples_cnt <= MAX_SINR_SAMPLES))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_fm_state(int state)
+{
+	if ((state >= 0) && (state < FM_MAX_NO_STATES))
+		return 1;
+	else
+		return 0;
+}
+
+static inline int is_valid_blend_value(int val)
+{
+	if ((val >= MIN_BLEND_HI) && (val <= MAX_BLEND_HI))
+		return 1;
+	else
+		return 0;
+}
+
+#endif
+
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ac5898a..c558387 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -17,7 +17,7 @@
 #include <linux/poll.h>
 #include <linux/dma-buf.h>
 
-#define VB2_MAX_FRAME	(32)
+#define VB2_MAX_FRAME	(64)
 #define VB2_MAX_PLANES	(8)
 
 /**
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index cd334c9..225bae1 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -125,6 +125,8 @@
 	return skb->protocol;
 }
 
+extern int tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle,
+				  int flow_enable);
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
diff --git a/include/soc/qcom/devfreq_devbw.h b/include/soc/qcom/devfreq_devbw.h
new file mode 100644
index 0000000..7edb2ab
--- /dev/null
+++ b/include/soc/qcom/devfreq_devbw.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DEVFREQ_DEVBW_H
+#define _DEVFREQ_DEVBW_H
+
+#include <linux/devfreq.h>
+
+#ifdef CONFIG_MSM_DEVFREQ_DEVBW
+int devfreq_add_devbw(struct device *dev);
+int devfreq_remove_devbw(struct device *dev);
+int devfreq_suspend_devbw(struct device *dev);
+int devfreq_resume_devbw(struct device *dev);
+#else
+static inline int devfreq_add_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_remove_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_suspend_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_resume_devbw(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#endif /* _DEVFREQ_DEVBW_H */
diff --git a/include/soc/qcom/msm_qmi_interface.h b/include/soc/qcom/msm_qmi_interface.h
index 349ca2f..c421209 100644
--- a/include/soc/qcom/msm_qmi_interface.h
+++ b/include/soc/qcom/msm_qmi_interface.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -92,6 +92,7 @@
 	QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
 	QMI_RESULT_SUCCESS_V01 = 0,
 	QMI_RESULT_FAILURE_V01 = 1,
+	QMI_ERR_DISABLED_V01 = 0x45,
 	QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
 };
 
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
index faecc0b..61992e6 100644
--- a/include/trace/events/cpufreq_interactive.h
+++ b/include/trace/events/cpufreq_interactive.h
@@ -8,102 +8,138 @@
 
 DECLARE_EVENT_CLASS(set,
 	TP_PROTO(u32 cpu_id, unsigned long targfreq,
-		 unsigned long actualfreq),
+	         unsigned long actualfreq),
 	TP_ARGS(cpu_id, targfreq, actualfreq),
 
 	TP_STRUCT__entry(
-		__field(u32, cpu_id)
-		__field(unsigned long, targfreq)
-		__field(unsigned long, actualfreq)
-	),
+	    __field(          u32, cpu_id    )
+	    __field(unsigned long, targfreq   )
+	    __field(unsigned long, actualfreq )
+	   ),
 
 	TP_fast_assign(
-		__entry->cpu_id = (u32)cpu_id;
-		__entry->targfreq = targfreq;
-		__entry->actualfreq = actualfreq;
+	    __entry->cpu_id = (u32) cpu_id;
+	    __entry->targfreq = targfreq;
+	    __entry->actualfreq = actualfreq;
 	),
 
 	TP_printk("cpu=%u targ=%lu actual=%lu",
-		__entry->cpu_id, __entry->targfreq,
-		__entry->actualfreq)
+	      __entry->cpu_id, __entry->targfreq,
+	      __entry->actualfreq)
 );
 
 DEFINE_EVENT(set, cpufreq_interactive_setspeed,
 	TP_PROTO(u32 cpu_id, unsigned long targfreq,
-		 unsigned long actualfreq),
+	     unsigned long actualfreq),
 	TP_ARGS(cpu_id, targfreq, actualfreq)
 );
 
 DECLARE_EVENT_CLASS(loadeval,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+		    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
 
-	TP_STRUCT__entry(
-		__field(unsigned long, cpu_id)
-		__field(unsigned long, load)
-		__field(unsigned long, curtarg)
-		__field(unsigned long, curactual)
-		__field(unsigned long, newtarg)
-	),
+	    TP_STRUCT__entry(
+		    __field(unsigned long, cpu_id    )
+		    __field(unsigned long, load      )
+		    __field(unsigned long, curtarg   )
+		    __field(unsigned long, curactual )
+		    __field(unsigned long, newtarg   )
+	    ),
 
-	TP_fast_assign(
-		__entry->cpu_id = cpu_id;
-		__entry->load = load;
-		__entry->curtarg = curtarg;
-		__entry->curactual = curactual;
-		__entry->newtarg = newtarg;
-	),
+	    TP_fast_assign(
+		    __entry->cpu_id = cpu_id;
+		    __entry->load = load;
+		    __entry->curtarg = curtarg;
+		    __entry->curactual = curactual;
+		    __entry->newtarg = newtarg;
+	    ),
 
-	TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
-		  __entry->cpu_id, __entry->load, __entry->curtarg,
-		  __entry->curactual, __entry->newtarg)
+	    TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+		      __entry->cpu_id, __entry->load, __entry->curtarg,
+		      __entry->curactual, __entry->newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_target,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_already,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 TRACE_EVENT(cpufreq_interactive_boost,
-	TP_PROTO(const char *s),
-	TP_ARGS(s),
-	TP_STRUCT__entry(
-		__string(s, s)
-	),
-	TP_fast_assign(
-		__assign_str(s, s);
-	),
-	TP_printk("%s", __get_str(s))
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
 );
 
 TRACE_EVENT(cpufreq_interactive_unboost,
-	TP_PROTO(const char *s),
-	TP_ARGS(s),
-	TP_STRUCT__entry(
-		__string(s, s)
-	),
-	TP_fast_assign(
-		__assign_str(s, s);
-	),
-	TP_printk("%s", __get_str(s))
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_load_change,
+	    TP_PROTO(unsigned long cpu_id),
+	    TP_ARGS(cpu_id),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	    ),
+	    TP_printk("re-evaluate for cpu=%lu", __entry->cpu_id)
+);
+
+TRACE_EVENT(cpufreq_interactive_cpuload,
+	    TP_PROTO(unsigned long cpu_id, unsigned int load,
+		     unsigned int new_task_pct, unsigned int prev,
+		     unsigned int predicted),
+	    TP_ARGS(cpu_id, load, new_task_pct, prev, predicted),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+		__field(unsigned int, load)
+		__field(unsigned int, new_task_pct)
+		__field(unsigned int, prev)
+		__field(unsigned int, predicted)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+		__entry->load = load;
+		__entry->new_task_pct = new_task_pct;
+		__entry->prev = prev;
+		__entry->predicted = predicted;
+	    ),
+	    TP_printk("cpu=%lu load=%u new_task_pct=%u prev=%u predicted=%u",
+		      __entry->cpu_id, __entry->load, __entry->new_task_pct,
+		      __entry->prev, __entry->predicted)
 );
 
 #endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index ec6f815..3354d4e 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -179,6 +179,48 @@
 	TP_ARGS(capacity, cpu_id)
 );
 
+TRACE_EVENT(cpu_frequency_switch_start,
+
+	TP_PROTO(unsigned int start_freq, unsigned int end_freq,
+		 unsigned int cpu_id),
+
+	TP_ARGS(start_freq, end_freq, cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		start_freq	)
+		__field(	u32,		end_freq	)
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->start_freq = start_freq;
+		__entry->end_freq = end_freq;
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("start=%lu end=%lu cpu_id=%lu",
+		  (unsigned long)__entry->start_freq,
+		  (unsigned long)__entry->end_freq,
+		  (unsigned long)__entry->cpu_id)
+);
+
+TRACE_EVENT(cpu_frequency_switch_end,
+
+	TP_PROTO(unsigned int cpu_id),
+
+	TP_ARGS(cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
 	TP_PROTO(struct device *dev, const char *pm_ops, int event),
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 14e49c7..b35533b 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM raw_syscalls
+#undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE syscalls
 
 #if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 94d7fcb..fb882f5 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -244,6 +244,30 @@
 	__u32 retained;       /* out, whether backing store still exists */
 };
 
+/* HDR WRGB x and y index */
+#define DISPLAY_PRIMARIES_WX 0
+#define DISPLAY_PRIMARIES_WY 1
+#define DISPLAY_PRIMARIES_RX 2
+#define DISPLAY_PRIMARIES_RY 3
+#define DISPLAY_PRIMARIES_GX 4
+#define DISPLAY_PRIMARIES_GY 5
+#define DISPLAY_PRIMARIES_BX 6
+#define DISPLAY_PRIMARIES_BY 7
+#define DISPLAY_PRIMARIES_MAX 8
+
+struct drm_panel_hdr_properties {
+	__u32 hdr_enabled;
+
+	/* WRGB X and y values arrayed in format */
+	/* [WX, WY, RX, RY, GX, GY, BX, BY] */
+	__u32 display_primaries[DISPLAY_PRIMARIES_MAX];
+
+	/* peak brightness supported by panel */
+	__u32 peak_brightness;
+	/* Blackness level supported by panel */
+	__u32 blackness_level;
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
@@ -255,8 +279,10 @@
 #define DRM_MSM_GEM_SUBMIT             0x06
 #define DRM_MSM_WAIT_FENCE             0x07
 #define DRM_MSM_GEM_MADVISE            0x08
-#define DRM_SDE_WB_CONFIG              0x08
-#define DRM_MSM_NUM_IOCTLS             0x09
+
+#define DRM_SDE_WB_CONFIG              0x40
+#define DRM_MSM_REGISTER_EVENT         0x41
+#define DRM_MSM_DEREGISTER_EVENT       0x42
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index 943940e..e809c03 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -134,4 +134,151 @@
 	__u32 c1[PGC_TBL_LEN];
 	__u32 c2[PGC_TBL_LEN];
 };
+
+#define AD4_LUT_GRP0_SIZE 33
+#define AD4_LUT_GRP1_SIZE 32
+/*
+ * struct drm_msm_ad4_init - ad4 init structure set by user-space client.
+ *                           Init param values can change based on tuning
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_init {
+	__u32 init_param_001[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_002[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_003[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_004[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_005[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_006[AD4_LUT_GRP1_SIZE];
+	__u32 init_param_007[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_008[AD4_LUT_GRP0_SIZE];
+	__u32 init_param_009;
+	__u32 init_param_010;
+	__u32 init_param_011;
+	__u32 init_param_012;
+	__u32 init_param_013;
+	__u32 init_param_014;
+	__u32 init_param_015;
+	__u32 init_param_016;
+	__u32 init_param_017;
+	__u32 init_param_018;
+	__u32 init_param_019;
+	__u32 init_param_020;
+	__u32 init_param_021;
+	__u32 init_param_022;
+	__u32 init_param_023;
+	__u32 init_param_024;
+	__u32 init_param_025;
+	__u32 init_param_026;
+	__u32 init_param_027;
+	__u32 init_param_028;
+	__u32 init_param_029;
+	__u32 init_param_030;
+	__u32 init_param_031;
+	__u32 init_param_032;
+	__u32 init_param_033;
+	__u32 init_param_034;
+	__u32 init_param_035;
+	__u32 init_param_036;
+	__u32 init_param_037;
+	__u32 init_param_038;
+	__u32 init_param_039;
+	__u32 init_param_040;
+	__u32 init_param_041;
+	__u32 init_param_042;
+	__u32 init_param_043;
+	__u32 init_param_044;
+	__u32 init_param_045;
+	__u32 init_param_046;
+	__u32 init_param_047;
+	__u32 init_param_048;
+	__u32 init_param_049;
+	__u32 init_param_050;
+	__u32 init_param_051;
+	__u32 init_param_052;
+	__u32 init_param_053;
+	__u32 init_param_054;
+	__u32 init_param_055;
+	__u32 init_param_056;
+	__u32 init_param_057;
+	__u32 init_param_058;
+	__u32 init_param_059;
+	__u32 init_param_060;
+	__u32 init_param_061;
+	__u32 init_param_062;
+	__u32 init_param_063;
+	__u32 init_param_064;
+	__u32 init_param_065;
+	__u32 init_param_066;
+	__u32 init_param_067;
+	__u32 init_param_068;
+	__u32 init_param_069;
+	__u32 init_param_070;
+	__u32 init_param_071;
+	__u32 init_param_072;
+	__u32 init_param_073;
+	__u32 init_param_074;
+	__u32 init_param_075;
+};
+
+/*
+ * struct drm_msm_ad4_cfg - ad4 config structure set by user-space client.
+ *                           Config param values can vary based on tuning,
+ *                           hence it is passed by user-space clients.
+ */
+struct drm_msm_ad4_cfg {
+	__u32 cfg_param_001;
+	__u32 cfg_param_002;
+	__u32 cfg_param_003;
+	__u32 cfg_param_004;
+	__u32 cfg_param_005;
+	__u32 cfg_param_006;
+	__u32 cfg_param_007;
+	__u32 cfg_param_008;
+	__u32 cfg_param_009;
+	__u32 cfg_param_010;
+	__u32 cfg_param_011;
+	__u32 cfg_param_012;
+	__u32 cfg_param_013;
+	__u32 cfg_param_014;
+	__u32 cfg_param_015;
+	__u32 cfg_param_016;
+	__u32 cfg_param_017;
+	__u32 cfg_param_018;
+	__u32 cfg_param_019;
+	__u32 cfg_param_020;
+	__u32 cfg_param_021;
+	__u32 cfg_param_022;
+	__u32 cfg_param_023;
+	__u32 cfg_param_024;
+	__u32 cfg_param_025;
+	__u32 cfg_param_026;
+	__u32 cfg_param_027;
+	__u32 cfg_param_028;
+	__u32 cfg_param_029;
+	__u32 cfg_param_030;
+	__u32 cfg_param_031;
+	__u32 cfg_param_032;
+	__u32 cfg_param_033;
+	__u32 cfg_param_034;
+	__u32 cfg_param_035;
+	__u32 cfg_param_036;
+	__u32 cfg_param_037;
+	__u32 cfg_param_038;
+	__u32 cfg_param_039;
+	__u32 cfg_param_040;
+	__u32 cfg_param_041;
+	__u32 cfg_param_042;
+	__u32 cfg_param_043;
+	__u32 cfg_param_044;
+	__u32 cfg_param_045;
+	__u32 cfg_param_046;
+	__u32 cfg_param_047;
+	__u32 cfg_param_048;
+	__u32 cfg_param_049;
+	__u32 cfg_param_050;
+	__u32 cfg_param_051;
+	__u32 cfg_param_052;
+	__u32 cfg_param_053;
+};
+
 #endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 2dfbc95..4823794 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -265,6 +265,7 @@
 header-y += map_to_7segment.h
 header-y += matroxfb.h
 header-y += mdio.h
+header-y += mdss_rotator.h
 header-y += media.h
 header-y += media-bus-format.h
 header-y += mei.h
@@ -306,6 +307,8 @@
 header-y += msm_ion.h
 header-y += msm_ipc.h
 header-y += msm_kgsl.h
+header-y += msm_mdp.h
+header-y += msm_mdp_ext.h
 header-y += msm_rmnet.h
 header-y += mtio.h
 header-y += nbd.h
@@ -515,4 +518,6 @@
 header-y += ipa_qmi_service_v01.h
 header-y += msm_ipa.h
 header-y += rmnet_ipa_fd_ioctl.h
+header-y += msm_dsps.h
 header-y += msm-core-interface.h
+header-y += msm_rotator.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 51f891f..7668b57 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -132,6 +132,7 @@
 
 /* struct binder_fd_array_object - object describing an array of fds in a buffer
  * @hdr:		common header structure
+ * @pad:		padding to ensure correct alignment
  * @num_fds:		number of file descriptors in the buffer
  * @parent:		index in offset array to buffer holding the fd array
  * @parent_offset:	start offset of fd array in the buffer
@@ -152,6 +153,7 @@
  */
 struct binder_fd_array_object {
 	struct binder_object_header	hdr;
+	__u32				pad;
 	binder_size_t			num_fds;
 	binder_size_t			parent;
 	binder_size_t			parent_offset;
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 1c31549..81c464a 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -59,6 +59,7 @@
 #define EPOLL_PACKED
 #endif
 
+#ifdef __KERNEL__
 struct epoll_event {
 	__u32 events;
 	__u64 data;
@@ -76,4 +77,5 @@
 	epev->events &= ~EPOLLWAKEUP;
 }
 #endif
+#endif /* __KERNEL__ */
 #endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/linux/mdss_rotator.h b/include/uapi/linux/mdss_rotator.h
new file mode 100644
index 0000000..167e1426
--- /dev/null
+++ b/include/uapi/linux/mdss_rotator.h
@@ -0,0 +1,144 @@
+#ifndef _UAPI_MDSS_ROTATOR_H_
+#define _UAPI_MDSS_ROTATOR_H_
+
+#include <linux/msm_mdp_ext.h>
+
+#define MDSS_ROTATOR_IOCTL_MAGIC 'w'
+
+/* open a rotation session */
+#define MDSS_ROTATION_OPEN \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 1, struct mdp_rotation_config *)
+
+/* change the rotation session configuration */
+#define MDSS_ROTATION_CONFIG \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 2, struct mdp_rotation_config *)
+
+/* queue the rotation request */
+#define MDSS_ROTATION_REQUEST \
+	_IOWR(MDSS_ROTATOR_IOCTL_MAGIC, 3, struct mdp_rotation_request *)
+
+/* close a rotation session with the specified rotation session ID */
+#define MDSS_ROTATION_CLOSE	_IOW(MDSS_ROTATOR_IOCTL_MAGIC, 4, unsigned int)
+
+/*
+ * Rotation request flag
+ */
+/* no rotation flag, i.e. color space conversion */
+#define MDP_ROTATION_NOP	0x01
+
+/* left/right flip */
+#define MDP_ROTATION_FLIP_LR	0x02
+
+/* up/down flip */
+#define MDP_ROTATION_FLIP_UD	0x04
+
+/* rotate 90 degree */
+#define MDP_ROTATION_90		0x08
+
+/* rotate 180 degre */
+#define MDP_ROTATION_180	(MDP_ROTATION_FLIP_LR | MDP_ROTATION_FLIP_UD)
+
+/* rotate 270 degree */
+#define MDP_ROTATION_270	(MDP_ROTATION_90 | MDP_ROTATION_180)
+
+/* format is interlaced */
+#define MDP_ROTATION_DEINTERLACE 0x10
+
+/* enable bwc */
+#define MDP_ROTATION_BWC_EN	0x40
+
+/* secure data */
+#define MDP_ROTATION_SECURE	0x80
+
+/*
+ * Rotation commit flag
+ */
+/* Flag indicates to validate the rotation request */
+#define MDSS_ROTATION_REQUEST_VALIDATE	0x01
+
+#define MDP_ROTATION_REQUEST_VERSION_1_0	0x00010000
+
+/*
+ * Client can let driver to allocate the hardware resources with
+ * this particular hw resource id.
+ */
+#define MDSS_ROTATION_HW_ANY	0xFFFFFFFF
+
+/*
+ * Configuration Structures
+ */
+struct mdp_rotation_buf_info {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	struct mult_factor comp_ratio;
+};
+
+struct mdp_rotation_config {
+	uint32_t	version;
+	uint32_t	session_id;
+	struct mdp_rotation_buf_info	input;
+	struct mdp_rotation_buf_info	output;
+	uint32_t	frame_rate;
+	uint32_t	flags;
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_item {
+	/* rotation request flag */
+	uint32_t	flags;
+
+	/* Source crop rectangle */
+	struct mdp_rect	src_rect;
+
+	/* Destination rectangle */
+	struct mdp_rect	dst_rect;
+
+	/* Input buffer for the request */
+	struct mdp_layer_buffer	input;
+
+	/* The output buffer for the request */
+	struct mdp_layer_buffer	output;
+
+	/*
+	 * DMA pipe selection for this request by client:
+	 * 0: DMA pipe 0
+	 * 1: DMA pipe 1
+	 * or MDSS_ROTATION_HW_ANY if client wants
+	 * driver to allocate any that is available
+	 */
+	uint32_t	pipe_idx;
+
+	/*
+	 * Write-back block selection for this request by client:
+	 * 0: Write-back block 0
+	 * 1: Write-back block 1
+	 * or MDSS_ROTATION_HW_ANY if client wants
+	 * driver to allocate any that is available
+	 */
+	uint32_t	wb_idx;
+
+	/* Which session ID is this request scheduled on */
+	uint32_t	session_id;
+
+	/* 32bits reserved value for future usage */
+	uint32_t	reserved[6];
+};
+
+struct mdp_rotation_request {
+	/* 32bit version indicates the request structure */
+	uint32_t	version;
+
+	uint32_t	flags;
+
+	/* Number of rotation request items in the list */
+	uint32_t	count;
+
+	/* Pointer to a list of rotation request items */
+	struct mdp_rotation_item __user	*list;
+
+	/* 32bits reserved value for future usage*/
+	uint32_t	reserved[6];
+};
+
+#endif /*_UAPI_MDSS_ROTATOR_H_*/
diff --git a/include/uapi/linux/msm_dsps.h b/include/uapi/linux/msm_dsps.h
new file mode 100644
index 0000000..a21927d
--- /dev/null
+++ b/include/uapi/linux/msm_dsps.h
@@ -0,0 +1,16 @@
+#ifndef _UAPI_DSPS_H_
+#define _UAPI_DSPS_H_
+
+#include <linux/ioctl.h>
+
+#define DSPS_IOCTL_MAGIC 'd'
+
+#define DSPS_IOCTL_ON	_IO(DSPS_IOCTL_MAGIC, 1)
+#define DSPS_IOCTL_OFF	_IO(DSPS_IOCTL_MAGIC, 2)
+
+#define DSPS_IOCTL_READ_SLOW_TIMER _IOR(DSPS_IOCTL_MAGIC, 3, unsigned int*)
+#define DSPS_IOCTL_READ_FAST_TIMER _IOR(DSPS_IOCTL_MAGIC, 4, unsigned int*)
+
+#define DSPS_IOCTL_RESET _IO(DSPS_IOCTL_MAGIC, 5)
+
+#endif	/* _UAPI_DSPS_H_ */
diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h
new file mode 100644
index 0000000..73f4938
--- /dev/null
+++ b/include/uapi/linux/msm_mdp.h
@@ -0,0 +1,1461 @@
+#ifndef _UAPI_MSM_MDP_H_
+#define _UAPI_MSM_MDP_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#else
+#include <linux/types.h>
+#endif
+#include <linux/fb.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP          _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int)
+#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int)
+#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor)
+#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap)
+#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data)
+/* new ioctls's for set/get ccs matrix */
+#define MSMFB_GET_CCS_MATRIX  _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs)
+#define MSMFB_SET_CCS_MATRIX  _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs)
+#define MSMFB_OVERLAY_SET       _IOWR(MSMFB_IOCTL_MAGIC, 135, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_UNSET     _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
+#define MSMFB_OVERLAY_PLAY      _IOW(MSMFB_IOCTL_MAGIC, 137, \
+						struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE	MSMFB_OVERLAY_PLAY
+
+#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
+					struct mdp_page_protection)
+#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
+					struct mdp_page_protection)
+#define MSMFB_OVERLAY_GET      _IOR(MSMFB_IOCTL_MAGIC, 140, \
+						struct mdp_overlay)
+#define MSMFB_OVERLAY_PLAY_ENABLE     _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int)
+#define MSMFB_OVERLAY_BLT       _IOWR(MSMFB_IOCTL_MAGIC, 142, \
+						struct msmfb_overlay_blt)
+#define MSMFB_OVERLAY_BLT_OFFSET     _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int)
+#define MSMFB_HISTOGRAM_START	_IOR(MSMFB_IOCTL_MAGIC, 144, \
+						struct mdp_histogram_start_req)
+#define MSMFB_HISTOGRAM_STOP	_IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int)
+#define MSMFB_NOTIFY_UPDATE	_IOWR(MSMFB_IOCTL_MAGIC, 146, unsigned int)
+
+#define MSMFB_OVERLAY_3D       _IOWR(MSMFB_IOCTL_MAGIC, 147, \
+						struct msmfb_overlay_3d)
+
+#define MSMFB_MIXER_INFO       _IOWR(MSMFB_IOCTL_MAGIC, 148, \
+						struct msmfb_mixer_info_req)
+#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \
+						struct msmfb_overlay_data)
+#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150)
+#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151)
+#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152)
+#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \
+						struct msmfb_data)
+#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
+#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
+#define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+#define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
+#define MSMFB_BUFFER_SYNC  _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync)
+#define MSMFB_OVERLAY_COMMIT      _IO(MSMFB_IOCTL_MAGIC, 163)
+#define MSMFB_DISPLAY_COMMIT      _IOW(MSMFB_IOCTL_MAGIC, 164, \
+						struct mdp_display_commit)
+#define MSMFB_METADATA_SET  _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
+#define MSMFB_METADATA_GET  _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
+#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 167, \
+						unsigned int)
+#define MSMFB_ASYNC_BLIT              _IOW(MSMFB_IOCTL_MAGIC, 168, unsigned int)
+#define MSMFB_OVERLAY_PREPARE		_IOWR(MSMFB_IOCTL_MAGIC, 169, \
+						struct mdp_overlay_list)
+#define MSMFB_LPM_ENABLE	_IOWR(MSMFB_IOCTL_MAGIC, 170, unsigned int)
+#define MSMFB_MDP_PP_GET_FEATURE_VERSION _IOWR(MSMFB_IOCTL_MAGIC, 171, \
+					      struct mdp_pp_feature_version)
+
+#define FB_TYPE_3D_PANEL 0x10101010
+#define MDP_IMGTYPE2_START 0x10000
+#define MSMFB_DRIVER_VERSION	0xF9E8D701
+/* Maximum number of formats supported by MDP*/
+#define MDP_IMGTYPE_END 0x100
+
+/* HW Revisions for different MDSS targets */
+#define MDSS_GET_MAJOR(rev)		((rev) >> 28)
+#define MDSS_GET_MINOR(rev)		(((rev) >> 16) & 0xFFF)
+#define MDSS_GET_STEP(rev)		((rev) & 0xFFFF)
+#define MDSS_GET_MAJOR_MINOR(rev)	((rev) >> 16)
+
+#define IS_MDSS_MAJOR_MINOR_SAME(rev1, rev2)	\
+	(MDSS_GET_MAJOR_MINOR((rev1)) == MDSS_GET_MAJOR_MINOR((rev2)))
+
+#define MDSS_MDP_REV(major, minor, step)	\
+	((((major) & 0x000F) << 28) |		\
+	 (((minor) & 0x0FFF) << 16) |		\
+	 ((step)   & 0xFFFF))
+
+#define MDSS_MDP_HW_REV_100	MDSS_MDP_REV(1, 0, 0) /* 8974 v1.0 */
+#define MDSS_MDP_HW_REV_101	MDSS_MDP_REV(1, 1, 0) /* 8x26 v1.0 */
+#define MDSS_MDP_HW_REV_101_1	MDSS_MDP_REV(1, 1, 1) /* 8x26 v2.0, 8926 v1.0 */
+#define MDSS_MDP_HW_REV_101_2	MDSS_MDP_REV(1, 1, 2) /* 8926 v2.0 */
+#define MDSS_MDP_HW_REV_102	MDSS_MDP_REV(1, 2, 0) /* 8974 v2.0 */
+#define MDSS_MDP_HW_REV_102_1	MDSS_MDP_REV(1, 2, 1) /* 8974 v3.0 (Pro) */
+#define MDSS_MDP_HW_REV_103	MDSS_MDP_REV(1, 3, 0) /* 8084 v1.0 */
+#define MDSS_MDP_HW_REV_103_1	MDSS_MDP_REV(1, 3, 1) /* 8084 v1.1 */
+#define MDSS_MDP_HW_REV_105	MDSS_MDP_REV(1, 5, 0) /* 8994 v1.0 */
+#define MDSS_MDP_HW_REV_106	MDSS_MDP_REV(1, 6, 0) /* 8916 v1.0 */
+#define MDSS_MDP_HW_REV_107	MDSS_MDP_REV(1, 7, 0) /* 8996 v1 */
+#define MDSS_MDP_HW_REV_107_1	MDSS_MDP_REV(1, 7, 1) /* 8996 v2 */
+#define MDSS_MDP_HW_REV_107_2	MDSS_MDP_REV(1, 7, 2) /* 8996 v3 */
+#define MDSS_MDP_HW_REV_108	MDSS_MDP_REV(1, 8, 0) /* 8939 v1.0 */
+#define MDSS_MDP_HW_REV_109	MDSS_MDP_REV(1, 9, 0) /* 8994 v2.0 */
+#define MDSS_MDP_HW_REV_110	MDSS_MDP_REV(1, 10, 0) /* 8992 v1.0 */
+#define MDSS_MDP_HW_REV_200	MDSS_MDP_REV(2, 0, 0) /* 8092 v1.0 */
+#define MDSS_MDP_HW_REV_112	MDSS_MDP_REV(1, 12, 0) /* 8952 v1.0 */
+#define MDSS_MDP_HW_REV_114	MDSS_MDP_REV(1, 14, 0) /* 8937 v1.0 */
+#define MDSS_MDP_HW_REV_115	MDSS_MDP_REV(1, 15, 0) /* msmgold */
+#define MDSS_MDP_HW_REV_116	MDSS_MDP_REV(1, 16, 0) /* msmtitanium */
+#define MDSS_MDP_HW_REV_300	MDSS_MDP_REV(3, 0, 0)  /* msmcobalt */
+#define MDSS_MDP_HW_REV_301	MDSS_MDP_REV(3, 0, 1)  /* msmcobalt v1.0 */
+
+enum {
+	NOTIFY_UPDATE_INIT,
+	NOTIFY_UPDATE_DEINIT,
+	NOTIFY_UPDATE_START,
+	NOTIFY_UPDATE_STOP,
+	NOTIFY_UPDATE_POWER_OFF,
+};
+
+enum {
+	NOTIFY_TYPE_NO_UPDATE,
+	NOTIFY_TYPE_SUSPEND,
+	NOTIFY_TYPE_UPDATE,
+	NOTIFY_TYPE_BL_UPDATE,
+	NOTIFY_TYPE_BL_AD_ATTEN_UPDATE,
+};
+
+enum {
+	MDP_RGB_565,      /* RGB 565 planer */
+	MDP_XRGB_8888,    /* RGB 888 padded */
+	MDP_Y_CBCR_H2V2,  /* Y and CbCr, pseudo planer w/ Cb is in MSB */
+	MDP_Y_CBCR_H2V2_ADRENO,
+	MDP_ARGB_8888,    /* ARGB 888 */
+	MDP_RGB_888,      /* RGB 888 planer */
+	MDP_Y_CRCB_H2V2,  /* Y and CrCb, pseudo planer w/ Cr is in MSB */
+	MDP_YCRYCB_H2V1,  /* YCrYCb interleave */
+	MDP_CBYCRY_H2V1,  /* CbYCrY interleave */
+	MDP_Y_CRCB_H2V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H2V1,   /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CRCB_H1V2,
+	MDP_Y_CBCR_H1V2,
+	MDP_RGBA_8888,    /* ARGB 888 */
+	MDP_BGRA_8888,	  /* ABGR 888 */
+	MDP_RGBX_8888,	  /* RGBX 888 */
+	MDP_Y_CRCB_H2V2_TILE,  /* Y and CrCb, pseudo planer tile */
+	MDP_Y_CBCR_H2V2_TILE,  /* Y and CbCr, pseudo planer tile */
+	MDP_Y_CR_CB_H2V2,  /* Y, Cr and Cb, planar */
+	MDP_Y_CR_CB_GH2V2,  /* Y, Cr and Cb, planar aligned to Android YV12 */
+	MDP_Y_CB_CR_H2V2,  /* Y, Cb and Cr, planar */
+	MDP_Y_CRCB_H1V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
+	MDP_Y_CBCR_H1V1,  /* Y and CbCr, pseduo planer w/ Cb is in MSB */
+	MDP_YCRCB_H1V1,   /* YCrCb interleave */
+	MDP_YCBCR_H1V1,   /* YCbCr interleave */
+	MDP_BGR_565,      /* BGR 565 planer */
+	MDP_BGR_888,      /* BGR 888 */
+	MDP_Y_CBCR_H2V2_VENUS,
+	MDP_BGRX_8888,   /* BGRX 8888 */
+	MDP_RGBA_8888_TILE,	  /* RGBA 8888 in tile format */
+	MDP_ARGB_8888_TILE,	  /* ARGB 8888 in tile format */
+	MDP_ABGR_8888_TILE,	  /* ABGR 8888 in tile format */
+	MDP_BGRA_8888_TILE,	  /* BGRA 8888 in tile format */
+	MDP_RGBX_8888_TILE,	  /* RGBX 8888 in tile format */
+	MDP_XRGB_8888_TILE,	  /* XRGB 8888 in tile format */
+	MDP_XBGR_8888_TILE,	  /* XBGR 8888 in tile format */
+	MDP_BGRX_8888_TILE,	  /* BGRX 8888 in tile format */
+	MDP_YCBYCR_H2V1,  /* YCbYCr interleave */
+	MDP_RGB_565_TILE,	  /* RGB 565 in tile format */
+	MDP_BGR_565_TILE,	  /* BGR 565 in tile format */
+	MDP_ARGB_1555,	/*ARGB 1555*/
+	MDP_RGBA_5551,	/*RGBA 5551*/
+	MDP_ARGB_4444,	/*ARGB 4444*/
+	MDP_RGBA_4444,	/*RGBA 4444*/
+	MDP_RGB_565_UBWC,
+	MDP_RGBA_8888_UBWC,
+	MDP_Y_CBCR_H2V2_UBWC,
+	MDP_RGBX_8888_UBWC,
+	MDP_Y_CRCB_H2V2_VENUS,
+	MDP_IMGTYPE_LIMIT,
+	MDP_RGB_BORDERFILL,	/* border fill pipe */
+	MDP_XRGB_1555,
+	MDP_RGBX_5551,
+	MDP_XRGB_4444,
+	MDP_RGBX_4444,
+	MDP_ABGR_1555,
+	MDP_BGRA_5551,
+	MDP_XBGR_1555,
+	MDP_BGRX_5551,
+	MDP_ABGR_4444,
+	MDP_BGRA_4444,
+	MDP_XBGR_4444,
+	MDP_BGRX_4444,
+	MDP_ABGR_8888,
+	MDP_XBGR_8888,
+	MDP_RGBA_1010102,
+	MDP_ARGB_2101010,
+	MDP_RGBX_1010102,
+	MDP_XRGB_2101010,
+	MDP_BGRA_1010102,
+	MDP_ABGR_2101010,
+	MDP_BGRX_1010102,
+	MDP_XBGR_2101010,
+	MDP_RGBA_1010102_UBWC,
+	MDP_RGBX_1010102_UBWC,
+	MDP_Y_CBCR_H2V2_P010,
+	MDP_Y_CBCR_H2V2_TP10_UBWC,
+	MDP_CRYCBY_H2V1,  /* CrYCbY interleave */
+	MDP_IMGTYPE_LIMIT1 = MDP_IMGTYPE_END,
+	MDP_FB_FORMAT = MDP_IMGTYPE2_START,    /* framebuffer format */
+	MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */
+};
+
+#define MDP_CRYCBY_H2V1 MDP_CRYCBY_H2V1
+
+enum {
+	PMEM_IMG,
+	FB_IMG,
+};
+
+enum {
+	HSIC_HUE = 0,
+	HSIC_SAT,
+	HSIC_INT,
+	HSIC_CON,
+	NUM_HSIC_PARAM,
+};
+
+enum mdss_mdp_max_bw_mode {
+	MDSS_MAX_BW_LIMIT_DEFAULT = 0x1,
+	MDSS_MAX_BW_LIMIT_CAMERA = 0x2,
+	MDSS_MAX_BW_LIMIT_HFLIP = 0x4,
+	MDSS_MAX_BW_LIMIT_VFLIP = 0x8,
+};
+
+#define MDSS_MDP_ROT_ONLY		0x80
+#define MDSS_MDP_RIGHT_MIXER		0x100
+#define MDSS_MDP_DUAL_PIPE		0x200
+
+/* mdp_blit_req flag values */
+#define MDP_ROT_NOP 0
+#define MDP_FLIP_LR 0x1
+#define MDP_FLIP_UD 0x2
+#define MDP_ROT_90 0x4
+#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER 0x8
+#define MDP_BLUR 0x10
+#define MDP_BLEND_FG_PREMULT 0x20000
+#define MDP_IS_FG 0x40000
+#define MDP_SOLID_FILL 0x00000020
+#define MDP_VPU_PIPE 0x00000040
+#define MDP_DEINTERLACE 0x80000000
+#define MDP_SHARPENING  0x40000000
+#define MDP_NO_DMA_BARRIER_START	0x20000000
+#define MDP_NO_DMA_BARRIER_END		0x10000000
+#define MDP_NO_BLIT			0x08000000
+#define MDP_BLIT_WITH_DMA_BARRIERS	0x000
+#define MDP_BLIT_WITH_NO_DMA_BARRIERS    \
+	(MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END)
+#define MDP_BLIT_SRC_GEM                0x04000000
+#define MDP_BLIT_DST_GEM                0x02000000
+#define MDP_BLIT_NON_CACHED		0x01000000
+#define MDP_OV_PIPE_SHARE		0x00800000
+#define MDP_DEINTERLACE_ODD		0x00400000
+#define MDP_OV_PLAY_NOWAIT		0x00200000
+#define MDP_SOURCE_ROTATED_90		0x00100000
+#define MDP_OVERLAY_PP_CFG_EN		0x00080000
+#define MDP_BACKEND_COMPOSITION		0x00040000
+#define MDP_BORDERFILL_SUPPORTED	0x00010000
+#define MDP_SECURE_OVERLAY_SESSION      0x00008000
+#define MDP_SECURE_DISPLAY_OVERLAY_SESSION	0x00002000
+#define MDP_OV_PIPE_FORCE_DMA		0x00004000
+#define MDP_MEMORY_ID_TYPE_FB		0x00001000
+#define MDP_BWC_EN			0x00000400
+#define MDP_DECIMATION_EN		0x00000800
+#define MDP_SMP_FORCE_ALLOC		0x00200000
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP 0xff
+
+#define MDP_FB_PAGE_PROTECTION_NONCACHED         (0)
+#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE      (1)
+#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE    (3)
+#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE  (4)
+/* Sentinel: Don't use! */
+#define MDP_FB_PAGE_PROTECTION_INVALID           (5)
+/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */
+#define MDP_NUM_FB_PAGE_PROTECTION_VALUES        (5)
+
+#define MDP_DEEP_COLOR_YUV444    0x1
+#define MDP_DEEP_COLOR_RGB30B    0x2
+#define MDP_DEEP_COLOR_RGB36B    0x4
+#define MDP_DEEP_COLOR_RGB48B    0x8
+
+struct mdp_rect {
+	uint32_t x;
+	uint32_t y;
+	uint32_t w;
+	uint32_t h;
+};
+
+struct mdp_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+	uint32_t offset;
+	int memory_id;		/* the file descriptor */
+	uint32_t priv;
+};
+
+struct mult_factor {
+	uint32_t numer;
+	uint32_t denom;
+};
+
+/*
+ * {3x3} + {3} ccs matrix
+ */
+
+#define MDP_CCS_RGB2YUV	0
+#define MDP_CCS_YUV2RGB	1
+
+#define MDP_CCS_SIZE	9
+#define MDP_BV_SIZE	3
+
+struct mdp_ccs {
+	int direction;			/* MDP_CCS_RGB2YUV or YUV2RGB */
+	uint16_t ccs[MDP_CCS_SIZE];	/* 3x3 color coefficients */
+	uint16_t bv[MDP_BV_SIZE];	/* 1x3 bias vector */
+};
+
+struct mdp_csc {
+	int id;
+	uint32_t csc_mv[9];
+	uint32_t csc_pre_bv[3];
+	uint32_t csc_post_bv[3];
+	uint32_t csc_pre_lv[6];
+	uint32_t csc_post_lv[6];
+};
+
+/* The version of the mdp_blit_req structure so that
+ * user applications can selectively decide which functionality
+ * to include
+ */
+
+#define MDP_BLIT_REQ_VERSION 3
+
+struct color {
+	uint32_t r;
+	uint32_t g;
+	uint32_t b;
+	uint32_t alpha;
+};
+
+struct mdp_blit_req {
+	struct mdp_img src;
+	struct mdp_img dst;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	struct color const_color;
+	uint32_t alpha;
+	uint32_t transp_mask;
+	uint32_t flags;
+	int sharpening_strength;  /* -127 <--> 127, default 64 */
+	uint8_t color_space;
+	uint32_t fps;
+};
+
+struct mdp_blit_req_list {
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MSMFB_DATA_VERSION 2
+
+struct msmfb_data {
+	uint32_t offset;
+	int memory_id;
+	int id;
+	uint32_t flags;
+	uint32_t priv;
+	uint32_t iova;
+};
+
+#define MSMFB_NEW_REQUEST -1
+
+struct msmfb_overlay_data {
+	uint32_t id;
+	struct msmfb_data data;
+	uint32_t version_key;
+	struct msmfb_data plane1_data;
+	struct msmfb_data plane2_data;
+	struct msmfb_data dst_data;
+};
+
+struct msmfb_img {
+	uint32_t width;
+	uint32_t height;
+	uint32_t format;
+};
+
+#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1
+struct msmfb_writeback_data {
+	struct msmfb_data buf_info;
+	struct msmfb_img img;
+};
+
+#define MDP_PP_OPS_ENABLE 0x1
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+#define MDP_PP_OPS_DISABLE 0x8
+#define MDP_PP_IGC_FLAG_ROM0	0x10
+#define MDP_PP_IGC_FLAG_ROM1	0x20
+
+
+#define MDSS_PP_DSPP_CFG	0x000
+#define MDSS_PP_SSPP_CFG	0x100
+#define MDSS_PP_LM_CFG	0x200
+#define MDSS_PP_WB_CFG	0x300
+
+#define MDSS_PP_ARG_MASK	0x3C00
+#define MDSS_PP_ARG_NUM		4
+#define MDSS_PP_ARG_SHIFT	10
+#define MDSS_PP_LOCATION_MASK	0x0300
+#define MDSS_PP_LOGICAL_MASK	0x00FF
+
+#define MDSS_PP_ADD_ARG(var, arg) ((var) | (0x1 << (MDSS_PP_ARG_SHIFT + (arg))))
+#define PP_ARG(x, var) ((var) & (0x1 << (MDSS_PP_ARG_SHIFT + (x))))
+#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
+#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
+
+
+struct mdp_qseed_cfg {
+	uint32_t table_num;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_sharp_cfg {
+	uint32_t flags;
+	uint32_t strength;
+	uint32_t edge_thr;
+	uint32_t smooth_thr;
+	uint32_t noise_thr;
+};
+
+struct mdp_qseed_cfg_data {
+	uint32_t block;
+	struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG         0x1
+#define MDP_OVERLAY_PP_QSEED_CFG       0x2
+#define MDP_OVERLAY_PP_PA_CFG          0x4
+#define MDP_OVERLAY_PP_IGC_CFG         0x8
+#define MDP_OVERLAY_PP_SHARP_CFG       0x10
+#define MDP_OVERLAY_PP_HIST_CFG        0x20
+#define MDP_OVERLAY_PP_HIST_LUT_CFG    0x40
+#define MDP_OVERLAY_PP_PA_V2_CFG       0x80
+#define MDP_OVERLAY_PP_PCC_CFG	       0x100
+
+#define MDP_CSC_FLAG_ENABLE	0x1
+#define MDP_CSC_FLAG_YUV_IN	0x2
+#define MDP_CSC_FLAG_YUV_OUT	0x4
+
+#define MDP_CSC_MATRIX_COEFF_SIZE	9
+#define MDP_CSC_CLAMP_SIZE		6
+#define MDP_CSC_BIAS_SIZE		3
+
+struct mdp_csc_cfg {
+	/* flags for enable CSC, toggling RGB,YUV input/output */
+	uint32_t flags;
+	uint32_t csc_mv[MDP_CSC_MATRIX_COEFF_SIZE];
+	uint32_t csc_pre_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_post_bv[MDP_CSC_BIAS_SIZE];
+	uint32_t csc_pre_lv[MDP_CSC_CLAMP_SIZE];
+	uint32_t csc_post_lv[MDP_CSC_CLAMP_SIZE];
+};
+
+struct mdp_csc_cfg_data {
+	uint32_t block;
+	struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_pa_cfg {
+	uint32_t flags;
+	uint32_t hue_adj;
+	uint32_t sat_adj;
+	uint32_t val_adj;
+	uint32_t cont_adj;
+};
+
+struct mdp_pa_mem_col_cfg {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+#define MDP_SIX_ZONE_LUT_SIZE		384
+
+/* PA Write/Read extension flags */
+#define MDP_PP_PA_HUE_ENABLE		0x10
+#define MDP_PP_PA_SAT_ENABLE		0x20
+#define MDP_PP_PA_VAL_ENABLE		0x40
+#define MDP_PP_PA_CONT_ENABLE		0x80
+#define MDP_PP_PA_SIX_ZONE_ENABLE	0x100
+#define MDP_PP_PA_SKIN_ENABLE		0x200
+#define MDP_PP_PA_SKY_ENABLE		0x400
+#define MDP_PP_PA_FOL_ENABLE		0x800
+
+/* PA masks */
+/* Masks used in PA v1_7 only */
+#define MDP_PP_PA_MEM_PROT_HUE_EN	0x1
+#define MDP_PP_PA_MEM_PROT_SAT_EN	0x2
+#define MDP_PP_PA_MEM_PROT_VAL_EN	0x4
+#define MDP_PP_PA_MEM_PROT_CONT_EN	0x8
+#define MDP_PP_PA_MEM_PROT_SIX_EN	0x10
+#define MDP_PP_PA_MEM_PROT_BLEND_EN	0x20
+/* Masks used in all PAv2 versions */
+#define MDP_PP_PA_HUE_MASK		0x1000
+#define MDP_PP_PA_SAT_MASK		0x2000
+#define MDP_PP_PA_VAL_MASK		0x4000
+#define MDP_PP_PA_CONT_MASK		0x8000
+#define MDP_PP_PA_SIX_ZONE_HUE_MASK	0x10000
+#define MDP_PP_PA_SIX_ZONE_SAT_MASK	0x20000
+#define MDP_PP_PA_SIX_ZONE_VAL_MASK	0x40000
+#define MDP_PP_PA_MEM_COL_SKIN_MASK	0x80000
+#define MDP_PP_PA_MEM_COL_SKY_MASK	0x100000
+#define MDP_PP_PA_MEM_COL_FOL_MASK	0x200000
+#define MDP_PP_PA_MEM_PROTECT_EN	0x400000
+#define MDP_PP_PA_SAT_ZERO_EXP_EN	0x800000
+
+/* Flags for setting PA saturation and value hold */
+#define MDP_PP_PA_LEFT_HOLD		0x1
+#define MDP_PP_PA_RIGHT_HOLD		0x2
+
+struct mdp_pa_v2_data {
+	/* Mask bits for PA features */
+	uint32_t flags;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_cfg skin_cfg;
+	struct mdp_pa_mem_col_cfg sky_cfg;
+	struct mdp_pa_mem_col_cfg fol_cfg;
+	uint32_t six_zone_len;
+	uint32_t six_zone_thresh;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+struct mdp_pa_mem_col_data_v1_7 {
+	uint32_t color_adjust_p0;
+	uint32_t color_adjust_p1;
+	uint32_t color_adjust_p2;
+	uint32_t blend_gain;
+	uint8_t sat_hold;
+	uint8_t val_hold;
+	uint32_t hue_region;
+	uint32_t sat_region;
+	uint32_t val_region;
+};
+
+struct mdp_pa_data_v1_7 {
+	uint32_t mode;
+	uint32_t global_hue_adj;
+	uint32_t global_sat_adj;
+	uint32_t global_val_adj;
+	uint32_t global_cont_adj;
+	struct mdp_pa_mem_col_data_v1_7 skin_cfg;
+	struct mdp_pa_mem_col_data_v1_7 sky_cfg;
+	struct mdp_pa_mem_col_data_v1_7 fol_cfg;
+	uint32_t six_zone_thresh;
+	uint32_t six_zone_adj_p0;
+	uint32_t six_zone_adj_p1;
+	uint8_t six_zone_sat_hold;
+	uint8_t six_zone_val_hold;
+	uint32_t six_zone_len;
+	uint32_t *six_zone_curve_p0;
+	uint32_t *six_zone_curve_p1;
+};
+
+
+struct mdp_pa_v2_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	struct mdp_pa_v2_data pa_v2_data;
+	void *cfg_payload;
+};
+
+
+enum {
+	mdp_igc_rec601 = 1,
+	mdp_igc_rec709,
+	mdp_igc_srgb,
+	mdp_igc_custom,
+	mdp_igc_rec_max,
+};
+
+struct mdp_igc_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t len, ops;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+	void *cfg_payload;
+};
+
+struct mdp_igc_lut_data_v1_7 {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+};
+
+struct mdp_igc_lut_data_payload {
+	uint32_t table_fmt;
+	uint32_t len;
+	uint64_t __user c0_c1_data;
+	uint64_t __user c2_data;
+	uint32_t strength;
+};
+
+struct mdp_histogram_cfg {
+	uint32_t ops;
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+struct mdp_hist_lut_data_v1_7 {
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_hist_lut_data {
+	uint32_t block;
+	uint32_t version;
+	uint32_t hist_lut_first;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+	void *cfg_payload;
+};
+
+struct mdp_pcc_coeff {
+	uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
+};
+
+struct mdp_pcc_coeff_v1_7 {
+	uint32_t c, r, g, b, rg, gb, rb, rgb;
+};
+
+struct mdp_pcc_data_v1_7 {
+	struct mdp_pcc_coeff_v1_7 r, g, b;
+};
+
+struct mdp_pcc_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t ops;
+	struct mdp_pcc_coeff r, g, b;
+	void *cfg_payload;
+};
+
+enum {
+	mdp_lut_igc,
+	mdp_lut_pgc,
+	mdp_lut_hist,
+	mdp_lut_rgb,
+	mdp_lut_max,
+};
+struct mdp_overlay_pp_params {
+	uint32_t config_ops;
+	struct mdp_csc_cfg csc_cfg;
+	struct mdp_qseed_cfg qseed_cfg[2];
+	struct mdp_pa_cfg pa_cfg;
+	struct mdp_pa_v2_data pa_v2_cfg;
+	struct mdp_igc_lut_data igc_cfg;
+	struct mdp_sharp_cfg sharp_cfg;
+	struct mdp_histogram_cfg hist_cfg;
+	struct mdp_hist_lut_data hist_lut_cfg;
+	/* PAv2 cfg data for PA 2.x versions */
+	struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+	struct mdp_pcc_cfg_data pcc_cfg_data;
+};
+
+/**
+ * enum mdss_mdp_blend_op - Different blend operations set by userspace
+ *
+ * @BLEND_OP_NOT_DEFINED:    No blend operation defined for the layer.
+ * @BLEND_OP_OPAQUE:         Apply a constant blend operation. The layer
+ *                           would appear opaque in case fg plane alpha is
+ *                           0xff.
+ * @BLEND_OP_PREMULTIPLIED:  Apply source over blend rule. Layer already has
+ *                           alpha pre-multiplication done. If fg plane alpha
+ *                           is less than 0xff, apply modulation as well. This
+ *                           operation is intended on layers having alpha
+ *                           channel.
+ * @BLEND_OP_COVERAGE:       Apply source over blend rule. Layer is not alpha
+ *                           pre-multiplied. Apply pre-multiplication. If fg
+ *                           plane alpha is less than 0xff, apply modulation as
+ *                           well.
+ * @BLEND_OP_MAX:            Used to track maximum blend operation possible by
+ *                           mdp.
+ */
+enum mdss_mdp_blend_op {
+	BLEND_OP_NOT_DEFINED = 0,
+	BLEND_OP_OPAQUE,
+	BLEND_OP_PREMULTIPLIED,
+	BLEND_OP_COVERAGE,
+	BLEND_OP_MAX,
+};
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define MAX_PLANES	4
+struct mdp_scale_data {
+	uint8_t enable_pxl_ext;
+
+	int init_phase_x[MAX_PLANES];
+	int phase_step_x[MAX_PLANES];
+	int init_phase_y[MAX_PLANES];
+	int phase_step_y[MAX_PLANES];
+
+	int num_ext_pxls_left[MAX_PLANES];
+	int num_ext_pxls_right[MAX_PLANES];
+	int num_ext_pxls_top[MAX_PLANES];
+	int num_ext_pxls_btm[MAX_PLANES];
+
+	int left_ftch[MAX_PLANES];
+	int left_rpt[MAX_PLANES];
+	int right_ftch[MAX_PLANES];
+	int right_rpt[MAX_PLANES];
+
+	int top_rpt[MAX_PLANES];
+	int btm_rpt[MAX_PLANES];
+	int top_ftch[MAX_PLANES];
+	int btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+};
+
+/**
+ * enum mdp_overlay_pipe_type - Different pipe type set by userspace
+ *
+ * @PIPE_TYPE_AUTO:    Not specified, pipe will be selected according to flags.
+ * @PIPE_TYPE_VIG:     VIG pipe.
+ * @PIPE_TYPE_RGB:     RGB pipe.
+ * @PIPE_TYPE_DMA:     DMA pipe.
+ * @PIPE_TYPE_CURSOR:  CURSOR pipe.
+ * @PIPE_TYPE_MAX:     Used to track maximum number of pipe type.
+ */
+enum mdp_overlay_pipe_type {
+	PIPE_TYPE_AUTO = 0,
+	PIPE_TYPE_VIG,
+	PIPE_TYPE_RGB,
+	PIPE_TYPE_DMA,
+	PIPE_TYPE_CURSOR,
+	PIPE_TYPE_MAX,
+};
+
+/**
+ * struct mdp_overlay - overlay surface structure
+ * @src:	Source image information (width, height, format).
+ * @src_rect:	Source crop rectangle, portion of image that will be fetched.
+ *		This should always be within boundaries of source image.
+ * @dst_rect:	Destination rectangle, the position and size of image on screen.
+ *		This should always be within panel boundaries.
+ * @z_order:	Blending stage to occupy in display, if multiple layers are
+ *		present, highest z_order usually means the top most visible
+ *		layer. The range acceptable is from 0-3 to support blending
+ *		up to 4 layers.
+ * @is_fg:	This flag is used to disable blending of any layers with z_order
+ *		less than this overlay. It means that any layers with z_order
+ *		less than this layer will not be blended and will be replaced
+ *		by the background border color.
+ * @alpha:	Used to set plane opacity. The range can be from 0-255, where
+ *		0 means completely transparent and 255 means fully opaque.
+ * @transp_mask: Color used as color key for transparency. Any pixel in fetched
+ *		image matching this color will be transparent when blending.
+ *		The color should be in same format as the source image format.
+ * @flags:	This is used to customize operation of overlay. See MDP flags
+ *		for more information.
+ * @pipe_type:  Used to specify the type of overlay pipe.
+ * @user_data:	DEPRECATED* Used to store user application specific information.
+ * @bg_color:	Solid color used to fill the overlay surface when no source
+ *		buffer is provided.
+ * @horz_deci:	Horizontal decimation value, this indicates the amount of pixels
+ *		dropped for each pixel that is fetched from a line. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimate by 2 (drop 1 pixel for each pixel fetched)
+ *		2: decimate by 4 (drop 3 pixels for each pixel fetched)
+ *		3: decimate by 8 (drop 7 pixels for each pixel fetched)
+ *		4: decimate by 16 (drop 15 pixels for each pixel fetched)
+ * @vert_deci:	Vertical decimation value, this indicates the amount of lines
+ *		dropped for each line that is fetched from overlay. The value
+ *		given should be power of two of decimation amount.
+ *		0: no decimation
+ *		1: decimation by 2 (drop 1 line for each line fetched)
+ *		2: decimation by 4 (drop 3 lines for each line fetched)
+ *		3: decimation by 8 (drop 7 lines for each line fetched)
+ *		4: decimation by 16 (drop 15 lines for each line fetched)
+ * @overlay_pp_cfg: Overlay post processing configuration, for more information
+ *		see struct mdp_overlay_pp_params.
+ * @priority:	Priority is returned by the driver when overlay is set for the
+ *		first time. It indicates the priority of the underlying pipe
+ *		serving the overlay. This priority can be used by user-space
+ *		in source split when pipes are re-used and shuffled around to
+ *		reduce fallbacks.
+ */
+struct mdp_overlay {
+	struct msmfb_img src;
+	struct mdp_rect src_rect;
+	struct mdp_rect dst_rect;
+	uint32_t z_order;	/* stage number */
+	uint32_t is_fg;		/* control alpha & transp */
+	uint32_t alpha;
+	uint32_t blend_op;
+	uint32_t transp_mask;
+	uint32_t flags;
+	uint32_t pipe_type;
+	uint32_t id;
+	uint8_t priority;
+	uint32_t user_data[6];
+	uint32_t bg_color;
+	uint8_t horz_deci;
+	uint8_t vert_deci;
+	struct mdp_overlay_pp_params overlay_pp_cfg;
+	struct mdp_scale_data scale;
+	uint8_t color_space;
+	uint32_t frame_rate;
+};
+
+struct msmfb_overlay_3d {
+	uint32_t is_3d;
+	uint32_t width;
+	uint32_t height;
+};
+
+
+struct msmfb_overlay_blt {
+	uint32_t enable;
+	uint32_t offset;
+	uint32_t width;
+	uint32_t height;
+	uint32_t bpp;
+};
+
+struct mdp_histogram {
+	uint32_t frame_cnt;
+	uint32_t bin_cnt;
+	uint32_t *r;
+	uint32_t *g;
+	uint32_t *b;
+};
+
+#define MISR_CRC_BATCH_SIZE 32
+enum {
+	DISPLAY_MISR_EDP,
+	DISPLAY_MISR_DSI0,
+	DISPLAY_MISR_DSI1,
+	DISPLAY_MISR_HDMI,
+	DISPLAY_MISR_LCDC,
+	DISPLAY_MISR_MDP,
+	DISPLAY_MISR_ATV,
+	DISPLAY_MISR_DSI_CMD,
+	DISPLAY_MISR_MAX
+};
+
+enum {
+	MISR_OP_NONE,
+	MISR_OP_SFM,
+	MISR_OP_MFM,
+	MISR_OP_BM,
+	MISR_OP_MAX
+};
+
+struct mdp_misr {
+	uint32_t block_id;
+	uint32_t frame_count;
+	uint32_t crc_op_mode;
+	uint32_t crc_value[MISR_CRC_BATCH_SIZE];
+};
+
+/*
+ * mdp_block_type defines the identifiers for pipes in MDP 4.3 and up
+ *
+ * MDP_BLOCK_RESERVED is provided for backward compatibility and is
+ * deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used
+ * instead.
+ *
+ * MDP_LOGICAL_BLOCK_DISP_0 identifies the display pipe which fb0 uses,
+ * same for others.
+ */
+
+enum {
+	MDP_BLOCK_RESERVED = 0,
+	MDP_BLOCK_OVERLAY_0,
+	MDP_BLOCK_OVERLAY_1,
+	MDP_BLOCK_VG_1,
+	MDP_BLOCK_VG_2,
+	MDP_BLOCK_RGB_1,
+	MDP_BLOCK_RGB_2,
+	MDP_BLOCK_DMA_P,
+	MDP_BLOCK_DMA_S,
+	MDP_BLOCK_DMA_E,
+	MDP_BLOCK_OVERLAY_2,
+	MDP_LOGICAL_BLOCK_DISP_0 = 0x10,
+	MDP_LOGICAL_BLOCK_DISP_1,
+	MDP_LOGICAL_BLOCK_DISP_2,
+	MDP_BLOCK_MAX,
+};
+
+/*
+ * mdp_histogram_start_req is used to provide the parameters for
+ * histogram start request
+ */
+
+struct mdp_histogram_start_req {
+	uint32_t block;
+	uint8_t frame_cnt;
+	uint8_t bit_mask;
+	uint16_t num_bins;
+};
+
+/*
+ * mdp_histogram_data is used to return the histogram data, once
+ * the histogram is done/stopped/cance
+ */
+
+struct mdp_histogram_data {
+	uint32_t block;
+	uint32_t bin_cnt;
+	uint32_t *c0;
+	uint32_t *c1;
+	uint32_t *c2;
+	uint32_t *extra_info;
+};
+
+
+#define GC_LUT_ENTRIES_V1_7	512
+
+struct mdp_ar_gc_lut_data {
+	uint32_t x_start;
+	uint32_t slope;
+	uint32_t offset;
+};
+
+#define MDP_PP_PGC_ROUNDING_ENABLE 0x10
+struct mdp_pgc_lut_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint8_t num_r_stages;
+	uint8_t num_g_stages;
+	uint8_t num_b_stages;
+	struct mdp_ar_gc_lut_data *r_data;
+	struct mdp_ar_gc_lut_data *g_data;
+	struct mdp_ar_gc_lut_data *b_data;
+	void *cfg_payload;
+};
+
+#define PGC_LUT_ENTRIES 1024
+struct mdp_pgc_lut_data_v1_7 {
+	uint32_t  len;
+	uint32_t  *c0_data;
+	uint32_t  *c1_data;
+	uint32_t  *c2_data;
+};
+
+/*
+ * mdp_rgb_lut_data is used to provide parameters for configuring the
+ * generic RGB lut in case of gamma correction or other LUT updation usecases
+ */
+struct mdp_rgb_lut_data {
+	uint32_t flags;
+	uint32_t lut_type;
+	struct fb_cmap cmap;
+};
+
+enum {
+	mdp_rgb_lut_gc,
+	mdp_rgb_lut_hist,
+};
+
+struct mdp_lut_cfg_data {
+	uint32_t lut_type;
+	union {
+		struct mdp_igc_lut_data igc_lut_data;
+		struct mdp_pgc_lut_data pgc_lut_data;
+		struct mdp_hist_lut_data hist_lut_data;
+		struct mdp_rgb_lut_data rgb_lut_data;
+	} data;
+};
+
+struct mdp_bl_scale_data {
+	uint32_t min_lvl;
+	uint32_t scale;
+};
+
+struct mdp_pa_cfg_data {
+	uint32_t block;
+	struct mdp_pa_cfg pa_data;
+};
+
+#define MDP_DITHER_DATA_V1_7_SZ 16
+
+struct mdp_dither_data_v1_7 {
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	uint32_t len;
+	uint32_t data[MDP_DITHER_DATA_V1_7_SZ];
+	uint32_t temporal_en;
+};
+
+struct mdp_pa_dither_data {
+	uint64_t data_flags;
+	uint32_t matrix_sz;
+	uint64_t __user matrix_data;
+	uint32_t strength;
+	uint32_t offset_en;
+};
+
+struct mdp_dither_cfg_data {
+	uint32_t version;
+	uint32_t block;
+	uint32_t flags;
+	uint32_t mode;
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+	void *cfg_payload;
+};
+
+#define MDP_GAMUT_TABLE_NUM		8
+#define MDP_GAMUT_TABLE_NUM_V1_7	4
+#define MDP_GAMUT_SCALE_OFF_TABLE_NUM	3
+#define MDP_GAMUT_TABLE_V1_7_SZ 1229
+#define MDP_GAMUT_SCALE_OFF_SZ 16
+#define MDP_GAMUT_TABLE_V1_7_COARSE_SZ 32
+
+struct mdp_gamut_cfg_data {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t version;
+	/* v1 version specific params */
+	uint32_t gamut_first;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+	uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+	/* params for newer versions of gamut */
+	void *cfg_payload;
+};
+
+enum {
+	mdp_gamut_fine_mode = 0x1,
+	mdp_gamut_coarse_mode,
+};
+
+struct mdp_gamut_data_v1_7 {
+	uint32_t mode;
+	uint32_t map_en;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c0_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t *c1_c2_data[MDP_GAMUT_TABLE_NUM_V1_7];
+	uint32_t  tbl_scale_off_sz[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+	uint32_t  *scale_off_data[MDP_GAMUT_SCALE_OFF_TABLE_NUM];
+};
+
+struct mdp_calib_config_data {
+	uint32_t ops;
+	uint32_t addr;
+	uint32_t data;
+};
+
+struct mdp_calib_config_buffer {
+	uint32_t ops;
+	uint32_t size;
+	uint32_t *buffer;
+};
+
+struct mdp_calib_dcm_state {
+	uint32_t ops;
+	uint32_t dcm_state;
+};
+
+enum {
+	DCM_UNINIT,
+	DCM_UNBLANK,
+	DCM_ENTER,
+	DCM_EXIT,
+	DCM_BLANK,
+	DTM_ENTER,
+	DTM_EXIT,
+};
+
+#define MDSS_PP_SPLIT_LEFT_ONLY		0x10000000
+#define MDSS_PP_SPLIT_RIGHT_ONLY	0x20000000
+#define MDSS_PP_SPLIT_MASK		0x30000000
+
+#define MDSS_MAX_BL_BRIGHTNESS 255
+#define AD_BL_LIN_LEN 256
+#define AD_BL_ATT_LUT_LEN 33
+
+#define MDSS_AD_MODE_AUTO_BL	0x0
+#define MDSS_AD_MODE_AUTO_STR	0x1
+#define MDSS_AD_MODE_TARG_STR	0x3
+#define MDSS_AD_MODE_MAN_STR	0x7
+#define MDSS_AD_MODE_CALIB	0xF
+
+#define MDP_PP_AD_INIT	0x10
+#define MDP_PP_AD_CFG	0x20
+
+struct mdss_ad_init {
+	uint32_t asym_lut[33];
+	uint32_t color_corr_lut[33];
+	uint8_t i_control[2];
+	uint16_t black_lvl;
+	uint16_t white_lvl;
+	uint8_t var;
+	uint8_t limit_ampl;
+	uint8_t i_dither;
+	uint8_t slope_max;
+	uint8_t slope_min;
+	uint8_t dither_ctl;
+	uint8_t format;
+	uint8_t auto_size;
+	uint16_t frame_w;
+	uint16_t frame_h;
+	uint8_t logo_v;
+	uint8_t logo_h;
+	uint32_t alpha;
+	uint32_t alpha_base;
+	uint32_t al_thresh;
+	uint32_t bl_lin_len;
+	uint32_t bl_att_len;
+	uint32_t *bl_lin;
+	uint32_t *bl_lin_inv;
+	uint32_t *bl_att_lut;
+};
+
+#define MDSS_AD_BL_CTRL_MODE_EN 1
+#define MDSS_AD_BL_CTRL_MODE_DIS 0
+struct mdss_ad_cfg {
+	uint32_t mode;
+	uint32_t al_calib_lut[33];
+	uint16_t backlight_min;
+	uint16_t backlight_max;
+	uint16_t backlight_scale;
+	uint16_t amb_light_min;
+	uint16_t filter[2];
+	uint16_t calib[4];
+	uint8_t strength_limit;
+	uint8_t t_filter_recursion;
+	uint16_t stab_itr;
+	uint32_t bl_ctrl_mode;
+};
+
+struct mdss_ad_bl_cfg {
+	uint32_t bl_min_delta;
+	uint32_t bl_low_limit;
+};
+
+/* ops uses standard MDP_PP_* flags */
+struct mdss_ad_init_cfg {
+	uint32_t ops;
+	union {
+		struct mdss_ad_init init;
+		struct mdss_ad_cfg cfg;
+	} params;
+};
+
+/* mode uses MDSS_AD_MODE_* flags */
+struct mdss_ad_input {
+	uint32_t mode;
+	union {
+		uint32_t amb_light;
+		uint32_t strength;
+		uint32_t calib_bl;
+	} in;
+	uint32_t output;
+};
+
+#define MDSS_CALIB_MODE_BL	0x1
+struct mdss_calib_cfg {
+	uint32_t ops;
+	uint32_t calib_mask;
+};
+
+enum {
+	mdp_op_pcc_cfg,
+	mdp_op_csc_cfg,
+	mdp_op_lut_cfg,
+	mdp_op_qseed_cfg,
+	mdp_bl_scale_cfg,
+	mdp_op_pa_cfg,
+	mdp_op_pa_v2_cfg,
+	mdp_op_dither_cfg,
+	mdp_op_gamut_cfg,
+	mdp_op_calib_cfg,
+	mdp_op_ad_cfg,
+	mdp_op_ad_input,
+	mdp_op_calib_mode,
+	mdp_op_calib_buffer,
+	mdp_op_calib_dcm_state,
+	mdp_op_max,
+	mdp_op_pa_dither_cfg,
+	mdp_op_ad_bl_cfg,
+	mdp_op_pp_max = 255,
+};
+#define mdp_op_pa_dither_cfg mdp_op_pa_dither_cfg
+#define mdp_op_pp_max mdp_op_pp_max
+
+#define mdp_op_ad_bl_cfg mdp_op_ad_bl_cfg
+
+enum {
+	WB_FORMAT_NV12,
+	WB_FORMAT_RGB_565,
+	WB_FORMAT_RGB_888,
+	WB_FORMAT_xRGB_8888,
+	WB_FORMAT_ARGB_8888,
+	WB_FORMAT_BGRA_8888,
+	WB_FORMAT_BGRX_8888,
+	WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
+struct msmfb_mdp_pp {
+	uint32_t op;
+	union {
+		struct mdp_pcc_cfg_data pcc_cfg_data;
+		struct mdp_csc_cfg_data csc_cfg_data;
+		struct mdp_lut_cfg_data lut_cfg_data;
+		struct mdp_qseed_cfg_data qseed_cfg_data;
+		struct mdp_bl_scale_data bl_scale_data;
+		struct mdp_pa_cfg_data pa_cfg_data;
+		struct mdp_pa_v2_cfg_data pa_v2_cfg_data;
+		struct mdp_dither_cfg_data dither_cfg_data;
+		struct mdp_gamut_cfg_data gamut_cfg_data;
+		struct mdp_calib_config_data calib_cfg;
+		struct mdss_ad_init_cfg ad_init_cfg;
+		struct mdss_calib_cfg mdss_calib_cfg;
+		struct mdss_ad_input ad_input;
+		struct mdp_calib_config_buffer calib_buffer;
+		struct mdp_calib_dcm_state calib_dcm;
+		struct mdss_ad_bl_cfg ad_bl_cfg;
+	} data;
+};
+
+#define FB_METADATA_VIDEO_INFO_CODE_SUPPORT 1
+enum {
+	metadata_op_none,
+	metadata_op_base_blend,
+	metadata_op_frame_rate,
+	metadata_op_vic,
+	metadata_op_wb_format,
+	metadata_op_wb_secure,
+	metadata_op_get_caps,
+	metadata_op_crc,
+	metadata_op_get_ion_fd,
+	metadata_op_max
+};
+
+struct mdp_blend_cfg {
+	uint32_t is_premultiplied;
+};
+
+struct mdp_mixer_cfg {
+	uint32_t writeback_format;
+	uint32_t alpha;
+};
+
+struct mdss_hw_caps {
+	uint32_t mdp_rev;
+	uint8_t rgb_pipes;
+	uint8_t vig_pipes;
+	uint8_t dma_pipes;
+	uint8_t max_smp_cnt;
+	uint8_t smp_per_pipe;
+	uint32_t features;
+};
+
+struct msmfb_metadata {
+	uint32_t op;
+	uint32_t flags;
+	union {
+		struct mdp_misr misr_request;
+		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
+		uint32_t panel_frame_rate;
+		uint32_t video_info_code;
+		struct mdss_hw_caps caps;
+		uint8_t secure_en;
+		int fbmem_ionfd;
+	} data;
+};
+
+#define MDP_MAX_FENCE_FD	32
+#define MDP_BUF_SYNC_FLAG_WAIT	1
+#define MDP_BUF_SYNC_FLAG_RETIRE_FENCE	0x10
+
+struct mdp_buf_sync {
+	uint32_t flags;
+	uint32_t acq_fen_fd_cnt;
+	uint32_t session_id;
+	int *acq_fen_fd;
+	int *rel_fen_fd;
+	int *retire_fen_fd;
+};
+
+struct mdp_async_blit_req_list {
+	struct mdp_buf_sync sync;
+	uint32_t count;
+	struct mdp_blit_req req[];
+};
+
+#define MDP_DISPLAY_COMMIT_OVERLAY	1
+
+struct mdp_display_commit {
+	uint32_t flags;
+	uint32_t wait_for_finish;
+	struct fb_var_screeninfo var;
+	/*
+	 * user needs to follow guidelines as per below rules
+	 * 1. source split is enabled: l_roi = roi and r_roi = 0
+	 * 2. source split is disabled:
+	 *	2.1 split display: l_roi = l_roi and r_roi = r_roi
+	 *	2.2 non split display: l_roi = roi and r_roi = 0
+	 */
+	struct mdp_rect l_roi;
+	struct mdp_rect r_roi;
+};
+
+/**
+ * struct mdp_overlay_list - argument for ioctl MSMFB_OVERLAY_PREPARE
+ * @num_overlays:	Number of overlay layers as part of the frame.
+ * @overlay_list:	Pointer to a list of overlay structures identifying
+ *			the layers as part of the frame
+ * @flags:		Flags can be used to extend behavior.
+ * @processed_overlays:	Output parameter indicating how many pipes were
+ *			successful. If there are no errors this number should
+ *			match num_overlays. Otherwise it will indicate the last
+ *			successful index for overlay that couldn't be set.
+ */
+struct mdp_overlay_list {
+	uint32_t num_overlays;
+	struct mdp_overlay **overlay_list;
+	uint32_t flags;
+	uint32_t processed_overlays;
+};
+
+struct mdp_page_protection {
+	uint32_t page_protection;
+};
+
+
+struct mdp_mixer_info {
+	int pndx;
+	int pnum;
+	int ptype;
+	int mixer_num;
+	int z_order;
+};
+
+#define MAX_PIPE_PER_MIXER  7
+
+struct msmfb_mixer_info_req {
+	int mixer_num;
+	int cnt;
+	struct mdp_mixer_info info[MAX_PIPE_PER_MIXER];
+};
+
+enum {
+	DISPLAY_SUBSYSTEM_ID,
+	ROTATOR_SUBSYSTEM_ID,
+};
+
+enum {
+	MDP_IOMMU_DOMAIN_CP,
+	MDP_IOMMU_DOMAIN_NS,
+};
+
+enum {
+	MDP_WRITEBACK_MIRROR_OFF,
+	MDP_WRITEBACK_MIRROR_ON,
+	MDP_WRITEBACK_MIRROR_PAUSE,
+	MDP_WRITEBACK_MIRROR_RESUME,
+};
+
+enum mdp_color_space {
+	MDP_CSC_ITU_R_601,
+	MDP_CSC_ITU_R_601_FR,
+	MDP_CSC_ITU_R_709,
+};
+
+enum {
+	mdp_igc_v1_7 = 1,
+	mdp_igc_vmax,
+	mdp_hist_lut_v1_7,
+	mdp_hist_lut_vmax,
+	mdp_pgc_v1_7,
+	mdp_pgc_vmax,
+	mdp_dither_v1_7,
+	mdp_dither_vmax,
+	mdp_gamut_v1_7,
+	mdp_gamut_vmax,
+	mdp_pa_v1_7,
+	mdp_pa_vmax,
+	mdp_pcc_v1_7,
+	mdp_pcc_vmax,
+	mdp_pp_legacy,
+	mdp_dither_pa_v1_7,
+	mdp_igc_v3,
+	mdp_pp_unknown = 255
+};
+
+#define mdp_dither_pa_v1_7 mdp_dither_pa_v1_7
+#define mdp_pp_unknown mdp_pp_unknown
+#define mdp_igc_v3 mdp_igc_v3
+
+/* PP Features */
+enum {
+	IGC = 1,
+	PCC,
+	GC,
+	PA,
+	GAMUT,
+	DITHER,
+	QSEED,
+	HIST_LUT,
+	HIST,
+	PP_FEATURE_MAX,
+	PA_DITHER,
+	PP_MAX_FEATURES = 25,
+};
+
+#define PA_DITHER PA_DITHER
+#define PP_MAX_FEATURES PP_MAX_FEATURES
+
+struct mdp_pp_feature_version {
+	uint32_t pp_feature;
+	uint32_t version_info;
+};
+#endif /*_UAPI_MSM_MDP_H_*/
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
new file mode 100644
index 0000000..05a105b
--- /dev/null
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -0,0 +1,688 @@
+#ifndef _MSM_MDP_EXT_H_
+#define _MSM_MDP_EXT_H_
+
+#include <linux/msm_mdp.h>
+
+#define MDP_IOCTL_MAGIC 'S'
+/* atomic commit ioctl used for validate and commit request */
+#define MSMFB_ATOMIC_COMMIT	_IOWR(MDP_IOCTL_MAGIC, 128, void *)
+
+/*
+ * Ioctl for updating the layer position asynchronously. Initially, pipes
+ * should be configured with MDP_LAYER_ASYNC flag set during the atomic commit,
+ * after which any number of position update calls can be made. This would
+ * enable multiple position updates within a single vsync. However, the screen
+ * update would happen only after vsync, which would pick the latest update.
+ *
+ * Limitations:
+ * - Currently supported only for video mode panels with single LM or dual LM
+ *   with source_split enabled.
+ * - Only position update is supported with no scaling/cropping.
+ * - Async layers should have unique z_order.
+ */
+#define MSMFB_ASYNC_POSITION_UPDATE _IOWR(MDP_IOCTL_MAGIC, 129, \
+					struct mdp_position_update)
+
+/*
+ * Ioctl for sending the config information.
+ * QSEED3 coefficeint LUT tables is passed by the user space using this IOCTL.
+ */
+#define MSMFB_MDP_SET_CFG _IOW(MDP_IOCTL_MAGIC, 130, \
+					      struct mdp_set_cfg)
+
+/*
+ * To allow proper structure padding for 64bit/32bit target
+ */
+#ifdef __LP64
+#define MDP_LAYER_COMMIT_V1_PAD 3
+#else
+#define MDP_LAYER_COMMIT_V1_PAD 4
+#endif
+
+/*
+ * LAYER FLAG CONFIGURATION
+ */
+/* left-right layer flip flag */
+#define MDP_LAYER_FLIP_LR		0x1
+
+/* up-down layer flip flag */
+#define MDP_LAYER_FLIP_UD		0x2
+
+/*
+ * This flag enables pixel extension for the current layer. Validate/commit
+ * call uses scale parameters when this flag is enabled.
+ */
+#define MDP_LAYER_ENABLE_PIXEL_EXT	0x4
+
+/* Flag indicates that layer is foreground layer */
+#define MDP_LAYER_FORGROUND		0x8
+
+/* Flag indicates that layer is associated with secure session */
+#define MDP_LAYER_SECURE_SESSION	0x10
+
+/*
+ * Flag indicates that layer is drawing solid fill. Validate/commit call
+ * does not expect buffer when this flag is enabled.
+ */
+#define MDP_LAYER_SOLID_FILL		0x20
+
+/* Layer format is deinterlace */
+#define MDP_LAYER_DEINTERLACE		0x40
+
+/* layer contains bandwidth compressed format data */
+#define MDP_LAYER_BWC			0x80
+
+/* layer is async position updatable */
+#define MDP_LAYER_ASYNC			0x100
+
+/* layer contains postprocessing configuration data */
+#define MDP_LAYER_PP			0x200
+
+/* Flag indicates that layer is associated with secure display session */
+#define MDP_LAYER_SECURE_DISPLAY_SESSION 0x400
+
+/* Flag enabled qseed3 scaling for the current layer */
+#define MDP_LAYER_ENABLE_QSEED3_SCALE   0x800
+
+/*
+ * layer will work in multirect mode, where single hardware should
+ * fetch multiple rectangles with a single hardware
+ */
+#define MDP_LAYER_MULTIRECT_ENABLE		0x1000
+
+/*
+ * if flag present and multirect is enabled, multirect will work in parallel
+ * fetch mode, otherwise it will default to serial fetch mode.
+ */
+#define MDP_LAYER_MULTIRECT_PARALLEL_MODE	0x2000
+
+/*
+ * DESTINATION SCALER FLAG CONFIGURATION
+ */
+
+/* Enable/disable Destination scaler */
+#define MDP_DESTSCALER_ENABLE		0x1
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Scaling parameter update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_SCALE_UPDATE	0x2
+
+/*
+ * Indicating mdp_destination_scaler_data contains
+ * Detail enhancement setting update. Can be set anytime.
+ */
+#define MDP_DESTSCALER_ENHANCER_UPDATE	0x4
+
+/*
+ * VALIDATE/COMMIT FLAG CONFIGURATION
+ */
+
+/*
+ * Client enables it to inform that call is to validate layers before commit.
+ * If this flag is not set then driver will use MSMFB_ATOMIC_COMMIT for commit.
+ */
+#define MDP_VALIDATE_LAYER			0x01
+
+/*
+ * This flag is only valid for commit call. Commit behavior is synchronous
+ * when this flag is defined. It blocks current call till processing is
+ * complete. Behavior is asynchronous otherwise.
+ */
+#define MDP_COMMIT_WAIT_FOR_FINISH		0x02
+
+/*
+ * This flag is only valid for commit call and used for debugging purpose. It
+ * forces the to wait for sync fences.
+ */
+#define MDP_COMMIT_SYNC_FENCE_WAIT		0x04
+
+/* Flag to enable AVR(Adaptive variable refresh) feature. */
+#define MDP_COMMIT_AVR_EN			0x08
+
+/*
+ * Flag to select one shot mode when AVR feature is enabled.
+ * Default mode is continuous mode.
+ */
+#define MDP_COMMIT_AVR_ONE_SHOT_MODE		0x10
+
+/* Flag to enable concurrent writeback for the frame */
+#define MDP_COMMIT_CWB_EN 0x800
+
+/*
+ * Flag to select DSPP as the data point for CWB. If CWB
+ * is enabled without this flag, LM will be selected as data point.
+ */
+#define MDP_COMMIT_CWB_DSPP 0x1000
+
+#define MDP_COMMIT_VERSION_1_0		0x00010000
+
+/*
+ * Configuration structures
+ * All parameters are input to driver unless mentioned output parameter
+ * explicitly.
+ */
+struct mdp_layer_plane {
+	/* DMA buffer file descriptor information. */
+	int fd;
+
+	/* Pixel offset in the dma buffer. */
+	uint32_t offset;
+
+	/* Number of bytes in one scan line including padding bytes. */
+	uint32_t stride;
+};
+
+struct mdp_layer_buffer {
+	/* layer width in pixels. */
+	uint32_t width;
+
+	/* layer height in pixels. */
+	uint32_t height;
+
+	/*
+	 * layer format in DRM-style fourcc, refer drm_fourcc.h for
+	 * standard formats
+	 */
+	uint32_t format;
+
+	/* plane to hold the fd, offset, etc for all color components */
+	struct mdp_layer_plane planes[MAX_PLANES];
+
+	/* valid planes count in layer planes list */
+	uint32_t plane_count;
+
+	/* compression ratio factor, value depends on the pixel format */
+	struct mult_factor comp_ratio;
+
+	/*
+	 * SyncFence associated with this buffer. It is used in two ways.
+	 *
+	 * 1. Driver waits to consume the buffer till producer signals in case
+	 * of primary and external display.
+	 *
+	 * 2. Writeback device uses buffer structure for output buffer where
+	 * driver is producer. However, client sends the fence with buffer to
+	 * indicate that consumer is still using the buffer and it is not ready
+	 * for new content.
+	 */
+	int	 fence;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t reserved;
+};
+
+/*
+ * One layer holds configuration for one pipe. If client wants to stage single
+ * layer on two pipes then it should send two different layers with relative
+ * (x,y) information. Client must send same information during validate and
+ * commit call. Commit call may fail if client sends different layer information
+ * attached to same pipe during validate and commit. Device invalidate the pipe
+ * once it receives the vsync for that commit.
+ */
+struct mdp_input_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag configuration section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the index
+	 * in validate and commit call. Device reserves the pipe once validate
+	 * is successful. Device only uses validated pipe during commit call.
+	 * If client sends different layer/pipe configuration in validate &
+	 * commit then commit may fail.
+	 */
+	uint32_t		pipe_ndx;
+
+	/*
+	 * Horizontal decimation value, this indicates the amount of pixels
+	 * dropped for each pixel that is fetched from a line. It does not
+	 * result in bandwidth reduction because pixels are still fetched from
+	 * memory but dropped internally by hardware.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimate by 2 (drop 1 pixel for each pixel fetched)
+	 * 2: decimate by 4 (drop 3 pixels for each pixel fetched)
+	 * 3: decimate by 8 (drop 7 pixels for each pixel fetched)
+	 * 4: decimate by 16 (drop 15 pixels for each pixel fetched)
+	 */
+	uint8_t			horz_deci;
+
+	/*
+	 * Vertical decimation value, this indicates the amount of lines
+	 * dropped for each line that is fetched from overlay. It saves
+	 * bandwidth because decimated pixels are not fetched.
+	 * The decimation value given should be power of two of decimation
+	 * amount.
+	 * 0: no decimation
+	 * 1: decimation by 2 (drop 1 line for each line fetched)
+	 * 2: decimation by 4 (drop 3 lines for each line fetched)
+	 * 3: decimation by 8 (drop 7 lines for each line fetched)
+	 * 4: decimation by 16 (drop 15 lines for each line fetched)
+	 */
+	uint8_t			vert_deci;
+
+	/*
+	 * Used to set plane opacity. The range can be from 0-255, where
+	 * 0 means completely transparent and 255 means fully opaque.
+	 */
+	uint8_t			alpha;
+
+	/*
+	 * Blending stage to occupy in display, if multiple layers are present,
+	 * highest z_order usually means the top most visible layer. The range
+	 * acceptable is from 0-7 to support blending up to 8 layers.
+	 */
+	uint16_t		z_order;
+
+	/*
+	 * Color used as color key for transparency. Any pixel in fetched
+	 * image matching this color will be transparent when blending.
+	 * The color should be in same format as the source image format.
+	 */
+	uint32_t		transp_mask;
+
+	/*
+	 * Solid color used to fill the overlay surface when no source
+	 * buffer is provided.
+	 */
+	uint32_t		bg_color;
+
+	/* blend operation defined in "mdss_mdp_blend_op" enum. */
+	enum mdss_mdp_blend_op		blend_op;
+
+	/* color space of the source */
+	enum mdp_color_space	color_space;
+
+	/*
+	 * Source crop rectangle, portion of image that will be fetched. This
+	 * should always be within boundaries of source image.
+	 */
+	struct mdp_rect		src_rect;
+
+	/*
+	 * Destination rectangle, the position and size of image on screen.
+	 * This should always be within panel boundaries.
+	 */
+	struct mdp_rect		dst_rect;
+
+	/* Scaling parameters. */
+	void __user	*scale;
+
+	/* Buffer attached with each layer. Device uses it for commit call. */
+	struct mdp_layer_buffer	buffer;
+
+	/*
+	 * Source side post processing configuration information for each
+	 * layer.
+	 */
+	void __user		*pp_info;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Only for validate call. Frame buffer device sets error code
+	 * based on validate call failure scenario.
+	 */
+	int			error_code;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t		reserved[6];
+};
+
+struct mdp_output_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Writeback destination selection for output. Client provides the index
+	 * in validate and commit call.
+	 */
+	uint32_t			writeback_ndx;
+
+	/* Buffer attached with output layer. Device uses it for commit call */
+	struct mdp_layer_buffer		buffer;
+
+	/* 32bits reserved value for future usage. */
+	uint32_t			reserved[6];
+};
+
+/*
+ * Destination scaling info structure holds setup paramaters for upscaling
+ * setting in the destination scaling block.
+ */
+struct mdp_destination_scaler_data {
+	/*
+	 * Flag to switch between mode for destination scaler. Please Refer to
+	 * destination scaler flag config for all possible setting.
+	 */
+	uint32_t			flags;
+
+	/*
+	 * Destination scaler selection index. Client provides the index in
+	 * validate and commit call.
+	 */
+	uint32_t			dest_scaler_ndx;
+
+	/*
+	 * LM width configuration per Destination scaling updates
+	 */
+	uint32_t			lm_width;
+
+	/*
+	 * LM height configuration per Destination scaling updates
+	 */
+	uint32_t			lm_height;
+
+	/*
+	 * The scaling parameters for all the mode except disable. For
+	 * disabling the scaler, there is no need to provide the scale.
+	 * A userspace pointer points to struct mdp_scale_data_v2.
+	 */
+	uint64_t	__user scale;
+};
+
+/*
+ * Commit structure holds layer stack send by client for validate and commit
+ * call. If layers are different between validate and commit call then commit
+ * call will also do validation. In such case, commit may fail.
+ */
+struct mdp_layer_commit_v1 {
+	/*
+	 * Flag to enable/disable properties for commit/validate call. Refer
+	 * validate/commit flag config section for all possible flags.
+	 */
+	uint32_t		flags;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides release fence handle to client. It
+	 * triggers release fence when display hardware has consumed all the
+	 * buffers attached to this commit call and buffer is ready for reuse
+	 * for primary and external. For writeback case, it triggers it when
+	 * output buffer is ready for consumer.
+	 */
+	int			release_fence;
+
+	/*
+	 * Left_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * left part of panel when it is split display. For non-split display,
+	 * it defines the "region of interest" on the panel.
+	 */
+	struct mdp_rect		left_roi;
+
+	/*
+	 * Right_roi is optional configuration. Client configures it only when
+	 * partial update is enabled. It defines the "region of interest" on
+	 * right part of panel for split display configuration. It is not
+	 * required for non-split display.
+	 */
+	struct mdp_rect		right_roi;
+
+	 /* Pointer to a list of input layers for composition. */
+	struct mdp_input_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t		input_layer_cnt;
+
+	/*
+	 * Output layer for writeback display. It supports only one
+	 * layer as output layer. This is not required for primary
+	 * and external displays
+	 */
+	struct mdp_output_layer __user *output_layer;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device provides retire fence handle if
+	 * COMMIT_RETIRE_FENCE flag is set in commit call. It triggers
+	 * retire fence when current layers are swapped with new layers
+	 * on display hardware. For video mode panel and writeback,
+	 * retire fence and release fences are triggered at the same
+	 * time while command mode panel triggers release fence first
+	 * (on pingpong done) and retire fence (on rdptr done)
+	 * after that.
+	 */
+	int			retire_fence;
+
+	/*
+	 * Scaler data and control for setting up destination scaler.
+	 * A userspace pointer that points to a list of
+	 * struct mdp_destination_scaler_data.
+	 */
+	void __user		*dest_scaler;
+
+	/*
+	 * Represents number of Destination scaler data provied by userspace.
+	 */
+	uint32_t		dest_scaler_cnt;
+
+	/* 32-bits reserved value for future usage. */
+	uint32_t		reserved[MDP_LAYER_COMMIT_V1_PAD];
+};
+
+/*
+ * mdp_overlay_list - argument for ioctl MSMFB_ATOMIC_COMMIT
+ */
+struct mdp_layer_commit {
+	/*
+	 * 32bit version indicates the commit structure selection
+	 * from union. Lower 16bits indicates the minor version while
+	 * higher 16bits indicates the major version. It selects the
+	 * commit structure based on major version selection. Minor version
+	 * indicates that reserved fields are in use.
+	 *
+	 * Current supported version is 1.0 (Major:1 Minor:0)
+	 */
+	uint32_t version;
+	union {
+		/* Layer commit/validate definition for V1 */
+		struct mdp_layer_commit_v1 commit_v1;
+	};
+};
+
+struct mdp_point {
+	uint32_t x;
+	uint32_t y;
+};
+
+/*
+ * Async updatable layers. One layer holds configuration for one pipe.
+ */
+struct mdp_async_layer {
+	/*
+	 * Flag to enable/disable properties for layer configuration. Refer
+	 * layer flag config section for all possible flags.
+	 */
+	uint32_t flags;
+
+	/*
+	 * Pipe selection for this layer by client. Client provides the
+	 * pipe index that the device reserved during ATOMIC_COMMIT.
+	 */
+	uint32_t		pipe_ndx;
+
+	/* Source start x,y. */
+	struct mdp_point	src;
+
+	/* Destination start x,y. */
+	struct mdp_point	dst;
+
+	/*
+	 * This is an output parameter.
+	 *
+	 * Frame buffer device sets error code based on the failure.
+	 */
+	int			error_code;
+
+	uint32_t		reserved[3];
+};
+
+/*
+ * mdp_position_update - argument for ioctl MSMFB_ASYNC_POSITION_UPDATE
+ */
+struct mdp_position_update {
+	 /* Pointer to a list of async updatable input layers */
+	struct mdp_async_layer __user *input_layers;
+
+	/* Input layer count present in input list */
+	uint32_t input_layer_cnt;
+};
+
+#define MAX_DET_CURVES		3
+struct mdp_det_enhance_data {
+	uint32_t enable;
+	int16_t sharpen_level1;
+	int16_t sharpen_level2;
+	uint16_t clip;
+	uint16_t limit;
+	uint16_t thr_quiet;
+	uint16_t thr_dieout;
+	uint16_t thr_low;
+	uint16_t thr_high;
+	uint16_t prec_shift;
+	int16_t adjust_a[MAX_DET_CURVES];
+	int16_t adjust_b[MAX_DET_CURVES];
+	int16_t adjust_c[MAX_DET_CURVES];
+};
+
+/* Flags to enable Scaler and its sub components */
+#define ENABLE_SCALE			0x1
+#define ENABLE_DETAIL_ENHANCE		0x2
+#define ENABLE_DIRECTION_DETECTION	0x4
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP			0x1
+#define SCALER_LUT_DIR_WR		0x2
+#define SCALER_LUT_Y_CIR_WR		0x4
+#define SCALER_LUT_UV_CIR_WR		0x8
+#define SCALER_LUT_Y_SEP_WR		0x10
+#define SCALER_LUT_UV_SEP_WR		0x20
+
+/* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D		0x0
+#define FILTER_CIRCULAR_2D		0x1
+#define FILTER_SEPARABLE_1D		0x2
+#define FILTER_BILINEAR			0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT	0x0
+#define FILTER_ALPHA_BILINEAR		0x1
+
+/**
+ * struct mdp_scale_data_v2
+ * Driver uses this new Data structure for storing all scaling params
+ * This structure contains all pixel extension data and QSEED3 filter
+ * configuration and coefficient table indices
+ */
+struct mdp_scale_data_v2 {
+	uint32_t enable;
+
+	/* Init phase values */
+	int32_t init_phase_x[MAX_PLANES];
+	int32_t phase_step_x[MAX_PLANES];
+	int32_t init_phase_y[MAX_PLANES];
+	int32_t phase_step_y[MAX_PLANES];
+
+	/*
+	 * This should be set to toal horizontal pixels
+	 * left + right +  width
+	 */
+	uint32_t num_ext_pxls_left[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_right[MAX_PLANES];
+
+	/*
+	 * This should be set to vertical pixels
+	 * top + bottom + height
+	 */
+	uint32_t num_ext_pxls_top[MAX_PLANES];
+
+	/* Unused param for backward compatibility */
+	uint32_t num_ext_pxls_btm[MAX_PLANES];
+
+	/* over fetch pixels */
+	int32_t left_ftch[MAX_PLANES];
+	int32_t left_rpt[MAX_PLANES];
+	int32_t right_ftch[MAX_PLANES];
+	int32_t right_rpt[MAX_PLANES];
+
+	/* Repeat pixels */
+	uint32_t top_rpt[MAX_PLANES];
+	uint32_t btm_rpt[MAX_PLANES];
+	uint32_t top_ftch[MAX_PLANES];
+	uint32_t btm_ftch[MAX_PLANES];
+
+	uint32_t roi_w[MAX_PLANES];
+
+	/*
+	 * alpha plane can only be scaled using bilinear or pixel
+	 * repeat/drop, specify these for Y and UV planes only
+	 */
+	uint32_t preload_x[MAX_PLANES];
+	uint32_t preload_y[MAX_PLANES];
+	uint32_t src_width[MAX_PLANES];
+	uint32_t src_height[MAX_PLANES];
+
+	uint32_t dst_width;
+	uint32_t dst_height;
+
+	uint32_t y_rgb_filter_cfg;
+	uint32_t uv_filter_cfg;
+	uint32_t alpha_filter_cfg;
+	uint32_t blend_cfg;
+
+	uint32_t lut_flag;
+	uint32_t dir_lut_idx;
+
+	/* for Y(RGB) and UV planes*/
+	uint32_t y_rgb_cir_lut_idx;
+	uint32_t uv_cir_lut_idx;
+	uint32_t y_rgb_sep_lut_idx;
+	uint32_t uv_sep_lut_idx;
+
+	struct mdp_det_enhance_data detail_enhance;
+
+	/* reserved value for future usage. */
+	uint64_t reserved[8];
+};
+
+/**
+ * struct mdp_scale_luts_info
+ * This struct pointer is received as payload in SET_CFG_IOCTL when the flags
+ * is set to MDP_QSEED3_LUT_CFG
+ * @dir_lut:      Direction detection coefficients table
+ * @cir_lut:      Circular coefficeints table
+ * @sep_lut:      Separable coefficeints table
+ * @dir_lut_size: Size of direction coefficients table
+ * @cir_lut_size: Size of circular coefficients table
+ * @sep_lut_size: Size of separable coefficients table
+ */
+struct mdp_scale_luts_info {
+	uint64_t __user dir_lut;
+	uint64_t __user cir_lut;
+	uint64_t __user sep_lut;
+	uint32_t dir_lut_size;
+	uint32_t cir_lut_size;
+	uint32_t sep_lut_size;
+};
+
+#define MDP_QSEED3_LUT_CFG 0x1
+
+struct mdp_set_cfg {
+	uint64_t flags;
+	uint32_t len;
+	uint64_t __user payload;
+};
+#endif
diff --git a/include/uapi/linux/msm_rotator.h b/include/uapi/linux/msm_rotator.h
new file mode 100644
index 0000000..e1a2ecb
--- /dev/null
+++ b/include/uapi/linux/msm_rotator.h
@@ -0,0 +1,60 @@
+#ifndef _UAPI__MSM_ROTATOR_H__
+#define _UAPI__MSM_ROTATOR_H__
+
+#include <linux/types.h>
+#include <linux/msm_mdp.h>
+
+#define MSM_ROTATOR_IOCTL_MAGIC 'R'
+
+#define MSM_ROTATOR_IOCTL_START   \
+		_IOWR(MSM_ROTATOR_IOCTL_MAGIC, 1, struct msm_rotator_img_info)
+#define MSM_ROTATOR_IOCTL_ROTATE   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 2, struct msm_rotator_data_info)
+#define MSM_ROTATOR_IOCTL_FINISH   \
+		_IOW(MSM_ROTATOR_IOCTL_MAGIC, 3, int)
+
+#define ROTATOR_VERSION_01	0xA5B4C301
+
+enum rotator_clk_type {
+	ROTATOR_CORE_CLK,
+	ROTATOR_PCLK,
+	ROTATOR_IMEM_CLK
+};
+
+struct msm_rotator_img_info {
+	unsigned int session_id;
+	struct msmfb_img  src;
+	struct msmfb_img  dst;
+	struct mdp_rect src_rect;
+	unsigned int    dst_x;
+	unsigned int    dst_y;
+	unsigned char   rotations;
+	int enable;
+	unsigned int	downscale_ratio;
+	unsigned int secure;
+};
+
+struct msm_rotator_data_info {
+	int session_id;
+	struct msmfb_data src;
+	struct msmfb_data dst;
+	unsigned int version_key;
+	struct msmfb_data src_chroma;
+	struct msmfb_data dst_chroma;
+};
+
+struct msm_rot_clocks {
+	const char *clk_name;
+	enum rotator_clk_type clk_type;
+	unsigned int clk_rate;
+};
+
+struct msm_rotator_platform_data {
+	unsigned int number_of_clocks;
+	unsigned int hardware_version_number;
+	struct msm_rot_clocks *rotator_clks;
+	struct msm_bus_scale_pdata *bus_scale_table;
+	char rot_iommu_split_domain;
+};
+#endif
+
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
index d08c63f..0c5d5dd 100644
--- a/include/uapi/linux/packet_diag.h
+++ b/include/uapi/linux/packet_diag.h
@@ -64,7 +64,7 @@
 	__u32	pdmc_count;
 	__u16	pdmc_type;
 	__u16	pdmc_alen;
-	__u8	pdmc_addr[MAX_ADDR_LEN];
+	__u8	pdmc_addr[32]; /* MAX_ADDR_LEN */
 };
 
 struct packet_diag_ring {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index df7451d..4937c09 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -130,8 +130,11 @@
 struct tc_prio_qopt {
 	int	bands;			/* Number of bands */
 	__u8	priomap[TC_PRIO_MAX+1];	/* Map: logical priority -> PRIO band */
+	__u8	enable_flow;		/* Enable dequeue */
 };
 
+#define TCQ_PRIO_FLOW_CONTROL 1
+
 /* MULTIQ section */
 
 struct tc_multiq_qopt {
diff --git a/include/uapi/linux/rmnet_data.h b/include/uapi/linux/rmnet_data.h
index 7044df4..48c173e 100644
--- a/include/uapi/linux/rmnet_data.h
+++ b/include/uapi/linux/rmnet_data.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -206,8 +206,19 @@
 	 *       uint32_t MAP Flow Handle
 	 * Returns: status code
 	 */
-	RMNET_NETLINK_DEL_VND_TC_FLOW
+	RMNET_NETLINK_DEL_VND_TC_FLOW,
+
+	/*
+	 * RMNET_NETLINK_NEW_VND_WITH_NAME - Creates a new virtual network
+	 *                                   device node with the specified
+	 *                                   device name
+	 * Args: int32_t node number
+	 *       char[] vnd_name - Use as name
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND_WITH_NAME
 };
+#define RMNET_NETLINK_NEW_VND_WITH_NAME RMNET_NETLINK_NEW_VND_WITH_NAME
 
 enum rmnet_config_endpoint_modes_e {
 	/* Pass the frame up the stack with no modifications to skb->dev      */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index a242e72..fd379ec 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -70,7 +70,7 @@
  * Common stuff for both V4L1 and V4L2
  * Moved from videodev.h
  */
-#define VIDEO_MAX_FRAME               32
+#define VIDEO_MAX_FRAME               64
 #define VIDEO_MAX_PLANES               8
 
 /*
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
index 0e00bb5..d138beb 100644
--- a/include/uapi/media/Kbuild
+++ b/include/uapi/media/Kbuild
@@ -4,6 +4,9 @@
 header-y += cam_isp_vfe.h
 header-y += cam_isp_ife.h
 header-y += cam_sensor.h
+header-y += cam_sync.h
 header-y += msm_media_info.h
 header-y += msm_vidc.h
 header-y += msm_sde_rotator.h
+header-y += radio-iris.h
+header-y += radio-iris-commands.h
diff --git a/include/uapi/media/cam_sync.h b/include/uapi/media/cam_sync.h
new file mode 100644
index 0000000..003c9ad
--- /dev/null
+++ b/include/uapi/media/cam_sync.h
@@ -0,0 +1,134 @@
+#ifndef __UAPI_CAM_SYNC_H__
+#define __UAPI_CAM_SYNC_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+
+#define CAM_SYNC_DEVICE_NAME                     "cam_sync_device"
+
+/* V4L event which user space will subscribe to */
+#define CAM_SYNC_V4L_EVENT                       (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Specific event ids to get notified in user space */
+#define CAM_SYNC_V4L_EVENT_ID_CB_TRIG            0
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define CAM_SYNC_USER_PAYLOAD_SIZE               2
+
+/* Device type for sync device needed for device discovery */
+#define CAM_SYNC_DEVICE_TYPE                     (MEDIA_ENT_F_OLD_BASE)
+
+#define CAM_SYNC_GET_PAYLOAD_PTR(ev, type)       \
+	(type *)((char *)ev.u.data + sizeof(struct cam_sync_ev_header))
+
+#define CAM_SYNC_GET_HEADER_PTR(ev)              \
+	((struct cam_sync_ev_header *)ev.u.data)
+
+#define CAM_SYNC_STATE_INVALID                   0
+#define CAM_SYNC_STATE_ACTIVE                    1
+#define CAM_SYNC_STATE_SIGNALED_SUCCESS          2
+#define CAM_SYNC_STATE_SIGNALED_ERROR            3
+
+/**
+ * struct cam_sync_ev_header - Event header for sync event notification
+ *
+ * @sync_obj: Sync object
+ * @status:   Status of the object
+ */
+struct cam_sync_ev_header {
+	int32_t sync_obj;
+	int32_t status;
+};
+
+/**
+ * struct cam_sync_info - Sync object creation information
+ *
+ * @name:       Optional string representation of the sync object
+ * @sync_obj:   Sync object returned after creation in kernel
+ */
+struct cam_sync_info {
+	char name[64];
+	int32_t sync_obj;
+};
+
+/**
+ * struct cam_sync_signal - Sync object signaling struct
+ *
+ * @sync_obj:   Sync object to be signaled
+ * @sync_state: State of the sync object to which it should be signaled
+ */
+struct cam_sync_signal {
+	int32_t sync_obj;
+	uint32_t sync_state;
+};
+
+/**
+ * struct cam_sync_merge - Merge information for sync objects
+ *
+ * @sync_objs:  Pointer to sync objects
+ * @num_objs:   Number of objects in the array
+ * @merged:     Merged sync object
+ */
+struct cam_sync_merge {
+	__u64 sync_objs;
+	uint32_t num_objs;
+	int32_t merged;
+};
+
+/**
+ * struct cam_sync_userpayload_info - Payload info from user space
+ *
+ * @sync_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct cam_sync_userpayload_info {
+	int32_t sync_obj;
+	uint32_t reserved;
+	__u64 payload[CAM_SYNC_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct cam_sync_wait - Sync object wait information
+ *
+ * @sync_obj:   Sync object to wait on
+ * @reserved:   Reserved
+ * @timeout_ms: Timeout in milliseconds
+ */
+struct cam_sync_wait {
+	int32_t sync_obj;
+	uint32_t reserved;
+	uint64_t timeout_ms;
+};
+
+/**
+ * struct cam_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id:         IOCTL command id
+ * @size:       Size of command payload
+ * @result:     Result of command execution
+ * @reserved:   Reserved
+ * @ioctl_ptr:  Pointer to user data
+ */
+struct cam_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__user __u64 ioctl_ptr;
+};
+
+#define CAM_PRIVATE_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_private_ioctl_arg)
+
+#define CAM_SYNC_CREATE                          0
+#define CAM_SYNC_DESTROY                         1
+#define CAM_SYNC_SIGNAL                          2
+#define CAM_SYNC_MERGE                           3
+#define CAM_SYNC_REGISTER_PAYLOAD                4
+#define CAM_SYNC_DEREGISTER_PAYLOAD              5
+#define CAM_SYNC_WAIT                            6
+
+#endif /* __UAPI_CAM_SYNC_H__ */
diff --git a/include/uapi/media/radio-iris-commands.h b/include/uapi/media/radio-iris-commands.h
new file mode 100644
index 0000000..b9dce3d
--- /dev/null
+++ b/include/uapi/media/radio-iris-commands.h
@@ -0,0 +1,108 @@
+#ifndef __UAPI_RADIO_IRIS_COMMANDS_H
+#define __UAPI_RADIO_IRIS_COMMANDS_H
+
+enum v4l2_cid_private_iris_t {
+	V4L2_CID_PRIVATE_IRIS_SRCHMODE = (0x08000000 + 1),
+	V4L2_CID_PRIVATE_IRIS_SCANDWELL,
+	V4L2_CID_PRIVATE_IRIS_SRCHON,
+	V4L2_CID_PRIVATE_IRIS_STATE,
+	V4L2_CID_PRIVATE_IRIS_TRANSMIT_MODE,
+	V4L2_CID_PRIVATE_IRIS_RDSGROUP_MASK,
+	V4L2_CID_PRIVATE_IRIS_REGION,
+	V4L2_CID_PRIVATE_IRIS_SIGNAL_TH,
+	V4L2_CID_PRIVATE_IRIS_SRCH_PTY,
+	V4L2_CID_PRIVATE_IRIS_SRCH_PI,
+	V4L2_CID_PRIVATE_IRIS_SRCH_CNT,
+	V4L2_CID_PRIVATE_IRIS_EMPHASIS,
+	V4L2_CID_PRIVATE_IRIS_RDS_STD,
+	V4L2_CID_PRIVATE_IRIS_SPACING,
+	V4L2_CID_PRIVATE_IRIS_RDSON,
+	V4L2_CID_PRIVATE_IRIS_RDSGROUP_PROC,
+	V4L2_CID_PRIVATE_IRIS_LP_MODE,
+	V4L2_CID_PRIVATE_IRIS_ANTENNA,
+	V4L2_CID_PRIVATE_IRIS_RDSD_BUF,
+	V4L2_CID_PRIVATE_IRIS_PSALL,  /*0x8000014*/
+
+	/*v4l2 Tx controls*/
+	V4L2_CID_PRIVATE_IRIS_TX_SETPSREPEATCOUNT,
+	V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_PS_NAME,
+	V4L2_CID_PRIVATE_IRIS_STOP_RDS_TX_RT,
+	V4L2_CID_PRIVATE_IRIS_IOVERC,
+	V4L2_CID_PRIVATE_IRIS_INTDET,
+	V4L2_CID_PRIVATE_IRIS_MPX_DCC,
+	V4L2_CID_PRIVATE_IRIS_AF_JUMP,
+	V4L2_CID_PRIVATE_IRIS_RSSI_DELTA,
+	V4L2_CID_PRIVATE_IRIS_HLSI, /*0x800001d*/
+
+	/*Diagnostic commands*/
+	V4L2_CID_PRIVATE_IRIS_SOFT_MUTE,
+	V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_ADDR,
+	V4L2_CID_PRIVATE_IRIS_RIVA_ACCS_LEN,
+	V4L2_CID_PRIVATE_IRIS_RIVA_PEEK,
+	V4L2_CID_PRIVATE_IRIS_RIVA_POKE,
+	V4L2_CID_PRIVATE_IRIS_SSBI_ACCS_ADDR,
+	V4L2_CID_PRIVATE_IRIS_SSBI_PEEK,
+	V4L2_CID_PRIVATE_IRIS_SSBI_POKE,
+	V4L2_CID_PRIVATE_IRIS_TX_TONE,
+	V4L2_CID_PRIVATE_IRIS_RDS_GRP_COUNTERS,
+	V4L2_CID_PRIVATE_IRIS_SET_NOTCH_FILTER, /* 0x8000028 */
+	V4L2_CID_PRIVATE_IRIS_SET_AUDIO_PATH, /* TAVARUA specific command */
+	V4L2_CID_PRIVATE_IRIS_DO_CALIBRATION,
+	V4L2_CID_PRIVATE_IRIS_SRCH_ALGORITHM, /* TAVARUA specific command */
+	V4L2_CID_PRIVATE_IRIS_GET_SINR,
+	V4L2_CID_PRIVATE_INTF_LOW_THRESHOLD,
+	V4L2_CID_PRIVATE_INTF_HIGH_THRESHOLD,
+	V4L2_CID_PRIVATE_SINR_THRESHOLD,
+	V4L2_CID_PRIVATE_SINR_SAMPLES,
+	V4L2_CID_PRIVATE_SPUR_FREQ,
+	V4L2_CID_PRIVATE_SPUR_FREQ_RMSSI,
+	V4L2_CID_PRIVATE_SPUR_SELECTION,
+	V4L2_CID_PRIVATE_UPDATE_SPUR_TABLE,
+	V4L2_CID_PRIVATE_VALID_CHANNEL,
+	V4L2_CID_PRIVATE_AF_RMSSI_TH,
+	V4L2_CID_PRIVATE_AF_RMSSI_SAMPLES,
+	V4L2_CID_PRIVATE_GOOD_CH_RMSSI_TH,
+	V4L2_CID_PRIVATE_SRCHALGOTYPE,
+	V4L2_CID_PRIVATE_CF0TH12,
+	V4L2_CID_PRIVATE_SINRFIRSTSTAGE,
+	V4L2_CID_PRIVATE_RMSSIFIRSTSTAGE,
+	V4L2_CID_PRIVATE_RXREPEATCOUNT,
+	V4L2_CID_PRIVATE_IRIS_RSSI_TH,
+	V4L2_CID_PRIVATE_IRIS_AF_JUMP_RSSI_TH,
+	V4L2_CID_PRIVATE_BLEND_SINRHI,
+	V4L2_CID_PRIVATE_BLEND_RMSSIHI,
+
+	/*using private CIDs under userclass*/
+	V4L2_CID_PRIVATE_IRIS_READ_DEFAULT = 0x00980928,
+	V4L2_CID_PRIVATE_IRIS_WRITE_DEFAULT,
+	V4L2_CID_PRIVATE_IRIS_SET_CALIBRATION,
+	V4L2_CID_PRIVATE_IRIS_SET_SPURTABLE = 0x0098092D,
+	V4L2_CID_PRIVATE_IRIS_GET_SPUR_TBL  = 0x0098092E,
+};
+
+enum iris_evt_t {
+	IRIS_EVT_RADIO_READY,
+	IRIS_EVT_TUNE_SUCC,
+	IRIS_EVT_SEEK_COMPLETE,
+	IRIS_EVT_SCAN_NEXT,
+	IRIS_EVT_NEW_RAW_RDS,
+	IRIS_EVT_NEW_RT_RDS,
+	IRIS_EVT_NEW_PS_RDS,
+	IRIS_EVT_ERROR,
+	IRIS_EVT_BELOW_TH,
+	IRIS_EVT_ABOVE_TH,
+	IRIS_EVT_STEREO,
+	IRIS_EVT_MONO,
+	IRIS_EVT_RDS_AVAIL,
+	IRIS_EVT_RDS_NOT_AVAIL,
+	IRIS_EVT_NEW_SRCH_LIST,
+	IRIS_EVT_NEW_AF_LIST,
+	IRIS_EVT_TXRDSDAT,
+	IRIS_EVT_TXRDSDONE,
+	IRIS_EVT_RADIO_DISABLED,
+	IRIS_EVT_NEW_ODA,
+	IRIS_EVT_NEW_RT_PLUS,
+	IRIS_EVT_NEW_ERT,
+	IRIS_EVT_SPUR_TBL,
+};
+#endif
diff --git a/include/uapi/media/radio-iris.h b/include/uapi/media/radio-iris.h
new file mode 100644
index 0000000..015016c
--- /dev/null
+++ b/include/uapi/media/radio-iris.h
@@ -0,0 +1,813 @@
+#ifndef __UAPI_RADIO_IRIS_H
+#define __UAPI_RADIO_IRIS_H
+
+#include <linux/types.h>
+#include <media/radio-iris-commands.h>
+
+#define MIN_TX_TONE_VAL  0x00
+#define MAX_TX_TONE_VAL  0x07
+#define MIN_HARD_MUTE_VAL  0x00
+#define MAX_HARD_MUTE_VAL  0x03
+#define MIN_SRCH_MODE  0x00
+#define MAX_SRCH_MODE  0x09
+#define MIN_SCAN_DWELL  0x00
+#define MAX_SCAN_DWELL  0x0F
+#define MIN_SIG_TH  0x00
+#define MAX_SIG_TH  0x03
+#define MIN_PTY  0X00
+#define MAX_PTY  0x1F
+#define MIN_PI  0x0000
+#define MAX_PI  0xFFFF
+#define MIN_SRCH_STATIONS_CNT  0x00
+#define MAX_SRCH_STATIONS_CNT  0x14
+#define MIN_CHAN_SPACING  0x00
+#define MAX_CHAN_SPACING  0x02
+#define MIN_EMPHASIS  0x00
+#define MAX_EMPHASIS  0x01
+#define MIN_RDS_STD  0x00
+#define MAX_RDS_STD  0x02
+#define MIN_ANTENNA_VAL  0x00
+#define MAX_ANTENNA_VAL  0x01
+#define MIN_TX_PS_REPEAT_CNT  0x01
+#define MAX_TX_PS_REPEAT_CNT  0x0F
+#define MIN_SOFT_MUTE  0x00
+#define MAX_SOFT_MUTE  0x01
+#define MIN_PEEK_ACCESS_LEN  0x01
+#define MAX_PEEK_ACCESS_LEN  0xF9
+#define MIN_RESET_CNTR  0x00
+#define MAX_RESET_CNTR  0x01
+#define MIN_HLSI  0x00
+#define MAX_HLSI  0x02
+#define MIN_NOTCH_FILTER  0x00
+#define MAX_NOTCH_FILTER  0x02
+#define MIN_INTF_DET_OUT_LW_TH  0x00
+#define MAX_INTF_DET_OUT_LW_TH  0xFF
+#define MIN_INTF_DET_OUT_HG_TH  0x00
+#define MAX_INTF_DET_OUT_HG_TH  0xFF
+#define MIN_SINR_TH  -128
+#define MAX_SINR_TH  127
+#define MIN_SINR_SAMPLES  0x01
+#define MAX_SINR_SAMPLES  0xFF
+#define MIN_BLEND_HI  -128
+#define MAX_BLEND_HI  127
+
+
+/* ---- HCI Packet structures ---- */
+#define RADIO_HCI_COMMAND_HDR_SIZE sizeof(struct radio_hci_command_hdr)
+#define RADIO_HCI_EVENT_HDR_SIZE   sizeof(struct radio_hci_event_hdr)
+
+/* HCI data types */
+#define RADIO_HCI_COMMAND_PKT   0x11
+#define RADIO_HCI_EVENT_PKT     0x14
+/*HCI reponce packets*/
+#define MAX_RIVA_PEEK_RSP_SIZE   251
+/* default data access */
+#define DEFAULT_DATA_OFFSET 2
+#define DEFAULT_DATA_SIZE 249
+/* Power levels are 0-7, but SOC will expect values from 0-255
+ * So the each level step size will be 255/7 = 36
+ */
+#define FM_TX_PWR_LVL_STEP_SIZE 36
+#define FM_TX_PWR_LVL_0         0 /* Lowest power lvl that can be set for Tx */
+#define FM_TX_PWR_LVL_MAX       7 /* Max power lvl for Tx */
+#define FM_TX_PHY_CFG_MODE   0x3c
+#define FM_TX_PHY_CFG_LEN    0x10
+#define FM_TX_PWR_GAIN_OFFSET 14
+/**RDS CONFIG MODE**/
+#define FM_RDS_CNFG_MODE	0x0f
+#define FM_RDS_CNFG_LEN		0x10
+#define AF_RMSSI_TH_LSB_OFFSET	10
+#define AF_RMSSI_TH_MSB_OFFSET	11
+#define AF_RMSSI_SAMPLES_OFFSET	15
+/**RX CONFIG MODE**/
+#define FM_RX_CONFG_MODE	0x15
+#define FM_RX_CNFG_LEN		0x20
+#define GD_CH_RMSSI_TH_OFFSET	12
+#define MAX_GD_CH_RMSSI_TH	127
+#define SRCH_ALGO_TYPE_OFFSET  25
+#define SINRFIRSTSTAGE_OFFSET  26
+#define RMSSIFIRSTSTAGE_OFFSET 27
+#define CF0TH12_BYTE1_OFFSET   8
+#define CF0TH12_BYTE2_OFFSET   9
+#define CF0TH12_BYTE3_OFFSET   10
+#define CF0TH12_BYTE4_OFFSET   11
+#define MAX_SINR_FIRSTSTAGE	127
+#define MAX_RMSSI_FIRSTSTAGE	127
+#define RDS_PS0_XFR_MODE 0x01
+#define RDS_PS0_LEN 6
+#define RX_REPEATE_BYTE_OFFSET 5
+#define FM_SPUR_TBL_SIZE 240
+#define SPUR_DATA_LEN 16
+#define ENTRIES_EACH_CMD 15
+#define SPUR_DATA_INDEX 2
+#define FM_AF_LIST_MAX_SIZE   200
+/* Each AF frequency consist of sizeof(int) bytes */
+#define AF_LIST_MAX     (FM_AF_LIST_MAX_SIZE / 4)
+
+#define MAX_BLEND_INDEX 49
+
+#define TUNE_PARAM 16
+#define FM_RDS_3A_GRP (0x40)
+struct radio_hci_command_hdr {
+	__le16	opcode;		/* OCF & OGF */
+	__u8	plen;
+} __packed;
+
+struct radio_hci_event_hdr {
+	__u8	evt;
+	__u8	plen;
+} __packed;
+
+/* Opcode OCF */
+/* HCI recv control commands opcode */
+#define HCI_OCF_FM_ENABLE_RECV_REQ          0x0001
+#define HCI_OCF_FM_DISABLE_RECV_REQ         0x0002
+#define HCI_OCF_FM_GET_RECV_CONF_REQ        0x0003
+#define HCI_OCF_FM_SET_RECV_CONF_REQ        0x0004
+#define HCI_OCF_FM_SET_MUTE_MODE_REQ        0x0005
+#define HCI_OCF_FM_SET_STEREO_MODE_REQ      0x0006
+#define HCI_OCF_FM_SET_ANTENNA              0x0007
+#define HCI_OCF_FM_SET_SIGNAL_THRESHOLD     0x0008
+#define HCI_OCF_FM_GET_SIGNAL_THRESHOLD     0x0009
+#define HCI_OCF_FM_GET_STATION_PARAM_REQ    0x000A
+#define HCI_OCF_FM_GET_PROGRAM_SERVICE_REQ  0x000B
+#define HCI_OCF_FM_GET_RADIO_TEXT_REQ       0x000C
+#define HCI_OCF_FM_GET_AF_LIST_REQ          0x000D
+#define HCI_OCF_FM_SEARCH_STATIONS          0x000E
+#define HCI_OCF_FM_SEARCH_RDS_STATIONS      0x000F
+#define HCI_OCF_FM_SEARCH_STATIONS_LIST     0x0010
+#define HCI_OCF_FM_CANCEL_SEARCH            0x0011
+#define HCI_OCF_FM_RDS_GRP                  0x0012
+#define HCI_OCF_FM_RDS_GRP_PROCESS          0x0013
+#define HCI_OCF_FM_EN_WAN_AVD_CTRL          0x0014
+#define HCI_OCF_FM_EN_NOTCH_CTRL            0x0015
+#define HCI_OCF_FM_SET_EVENT_MASK           0x0016
+#define HCI_OCF_FM_SET_CH_DET_THRESHOLD     0x0017
+#define HCI_OCF_FM_GET_CH_DET_THRESHOLD     0x0018
+#define HCI_OCF_FM_SET_BLND_TBL             0x001B
+#define HCI_OCF_FM_GET_BLND_TBL             0x001C
+/* HCI trans control commans opcode*/
+#define HCI_OCF_FM_ENABLE_TRANS_REQ         0x0001
+#define HCI_OCF_FM_DISABLE_TRANS_REQ        0x0002
+#define HCI_OCF_FM_GET_TRANS_CONF_REQ       0x0003
+#define HCI_OCF_FM_SET_TRANS_CONF_REQ       0x0004
+#define HCI_OCF_FM_RDS_RT_REQ               0x0008
+#define HCI_OCF_FM_RDS_PS_REQ               0x0009
+
+
+/* HCI common control commands opcode */
+#define HCI_OCF_FM_TUNE_STATION_REQ         0x0001
+#define HCI_OCF_FM_DEFAULT_DATA_READ        0x0002
+#define HCI_OCF_FM_DEFAULT_DATA_WRITE       0x0003
+#define HCI_OCF_FM_RESET                    0x0004
+#define HCI_OCF_FM_GET_FEATURE_LIST         0x0005
+#define HCI_OCF_FM_DO_CALIBRATION           0x0006
+#define HCI_OCF_FM_SET_CALIBRATION          0x0007
+#define HCI_OCF_FM_SET_SPUR_TABLE           0x0008
+#define HCI_OCF_FM_GET_SPUR_TABLE           0x0009
+
+/*HCI Status parameters commands*/
+#define HCI_OCF_FM_READ_GRP_COUNTERS        0x0001
+
+/*HCI Diagnostic commands*/
+#define HCI_OCF_FM_PEEK_DATA                0x0002
+#define HCI_OCF_FM_POKE_DATA                0x0003
+#define HCI_OCF_FM_SSBI_PEEK_REG            0x0004
+#define HCI_OCF_FM_SSBI_POKE_REG            0x0005
+#define HCI_OCF_FM_STATION_DBG_PARAM        0x0007
+#define HCI_FM_SET_INTERNAL_TONE_GENRATOR   0x0008
+
+/* Opcode OGF */
+#define HCI_OGF_FM_RECV_CTRL_CMD_REQ            0x0013
+#define HCI_OGF_FM_TRANS_CTRL_CMD_REQ           0x0014
+#define HCI_OGF_FM_COMMON_CTRL_CMD_REQ          0x0015
+#define HCI_OGF_FM_STATUS_PARAMETERS_CMD_REQ    0x0016
+#define HCI_OGF_FM_TEST_CMD_REQ                 0x0017
+#define HCI_OGF_FM_DIAGNOSTIC_CMD_REQ           0x003F
+
+/* Command opcode pack/unpack */
+#define hci_opcode_pack(ogf, ocf)  ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
+#define hci_opcode_ogf(op)		(op >> 10)
+#define hci_opcode_ocf(op)		(op & 0x03ff)
+#define hci_recv_ctrl_cmd_op_pack(ocf) \
+	((__u16) hci_opcode_pack(HCI_OGF_FM_RECV_CTRL_CMD_REQ, ocf))
+#define hci_trans_ctrl_cmd_op_pack(ocf) \
+	((__u16) hci_opcode_pack(HCI_OGF_FM_TRANS_CTRL_CMD_REQ, ocf))
+#define hci_common_cmd_op_pack(ocf)	\
+	((__u16) hci_opcode_pack(HCI_OGF_FM_COMMON_CTRL_CMD_REQ, ocf))
+#define hci_status_param_op_pack(ocf)	\
+	((__u16) hci_opcode_pack(HCI_OGF_FM_STATUS_PARAMETERS_CMD_REQ, ocf))
+#define hci_diagnostic_cmd_op_pack(ocf)	\
+	((__u16) hci_opcode_pack(HCI_OGF_FM_DIAGNOSTIC_CMD_REQ, ocf))
+
+
+/* HCI commands with no arguments*/
+#define HCI_FM_ENABLE_RECV_CMD 1
+#define HCI_FM_DISABLE_RECV_CMD 2
+#define HCI_FM_GET_RECV_CONF_CMD 3
+#define HCI_FM_GET_STATION_PARAM_CMD 4
+#define HCI_FM_GET_SIGNAL_TH_CMD 5
+#define HCI_FM_GET_PROGRAM_SERVICE_CMD 6
+#define HCI_FM_GET_RADIO_TEXT_CMD 7
+#define HCI_FM_GET_AF_LIST_CMD 8
+#define HCI_FM_CANCEL_SEARCH_CMD 9
+#define HCI_FM_RESET_CMD 10
+#define HCI_FM_GET_FEATURES_CMD 11
+#define HCI_FM_STATION_DBG_PARAM_CMD 12
+#define HCI_FM_ENABLE_TRANS_CMD 13
+#define HCI_FM_DISABLE_TRANS_CMD 14
+#define HCI_FM_GET_TX_CONFIG 15
+#define HCI_FM_GET_DET_CH_TH_CMD 16
+#define HCI_FM_GET_BLND_TBL_CMD 17
+
+/* Defines for FM TX*/
+#define TX_PS_DATA_LENGTH 108
+#define TX_RT_DATA_LENGTH 64
+#define PS_STRING_LEN     9
+
+/* ----- HCI Command request ----- */
+struct hci_fm_recv_conf_req {
+	__u8	emphasis;
+	__u8	ch_spacing;
+	__u8	rds_std;
+	__u8	hlsi;
+	__u32	band_low_limit;
+	__u32	band_high_limit;
+} __packed;
+
+/* ----- HCI Command request ----- */
+struct hci_fm_trans_conf_req_struct {
+	__u8	emphasis;
+	__u8	rds_std;
+	__u32	band_low_limit;
+	__u32	band_high_limit;
+} __packed;
+
+
+/* ----- HCI Command request ----- */
+struct hci_fm_tx_ps {
+	__u8    ps_control;
+	__u16	pi;
+	__u8	pty;
+	__u8	ps_repeatcount;
+	__u8	ps_num;
+	__u8    ps_data[TX_PS_DATA_LENGTH];
+} __packed;
+
+struct hci_fm_tx_rt {
+	__u8    rt_control;
+	__u16	pi;
+	__u8	pty;
+	__u8	rt_len;
+	__u8    rt_data[TX_RT_DATA_LENGTH];
+} __packed;
+
+struct hci_fm_mute_mode_req {
+	__u8	hard_mute;
+	__u8	soft_mute;
+} __packed;
+
+struct hci_fm_stereo_mode_req {
+	__u8    stereo_mode;
+	__u8    sig_blend;
+	__u8    intf_blend;
+	__u8    most_switch;
+} __packed;
+
+struct hci_fm_search_station_req {
+	__u8    srch_mode;
+	__u8    scan_time;
+	__u8    srch_dir;
+} __packed;
+
+struct hci_fm_search_rds_station_req {
+	struct hci_fm_search_station_req srch_station;
+	__u8    srch_pty;
+	__u16   srch_pi;
+} __packed;
+
+struct hci_fm_search_station_list_req {
+	__u8    srch_list_mode;
+	__u8    srch_list_dir;
+	__u32   srch_list_max;
+	__u8    srch_pty;
+} __packed;
+
+struct hci_fm_rds_grp_req {
+	__u32   rds_grp_enable_mask;
+	__u32   rds_buf_size;
+	__u8    en_rds_change_filter;
+} __packed;
+
+struct hci_fm_en_avd_ctrl_req {
+	__u8    no_freqs;
+	__u8    freq_index;
+	__u8    lo_shft;
+	__u16   freq_min;
+	__u16   freq_max;
+} __packed;
+
+struct hci_fm_def_data_rd_req {
+	__u8    mode;
+	__u8    length;
+	__u8    param_len;
+	__u8    param;
+} __packed;
+
+struct hci_fm_def_data_wr_req {
+	__u8    mode;
+	__u8    length;
+	__u8   data[DEFAULT_DATA_SIZE];
+} __packed;
+
+struct hci_fm_riva_data {
+	__u8 subopcode;
+	__u32   start_addr;
+	__u8    length;
+} __packed;
+
+struct hci_fm_riva_poke {
+	struct hci_fm_riva_data cmd_params;
+	__u8    data[MAX_RIVA_PEEK_RSP_SIZE];
+} __packed;
+
+struct hci_fm_ssbi_req {
+	__u16   start_addr;
+	__u8    data;
+} __packed;
+struct hci_fm_ssbi_peek {
+	__u16 start_address;
+} __packed;
+
+struct hci_fm_ch_det_threshold {
+	char sinr;
+	__u8 sinr_samples;
+	__u8 low_th;
+	__u8 high_th;
+
+} __packed;
+
+struct hci_fm_blend_table {
+	__u8 ucBlendType;
+	__u8 ucBlendRampRateUp;
+	__u8 ucBlendDebounceNumSampleUp;
+	__u8 ucBlendDebounceIdxUp;
+	__u8 ucBlendSinrIdxSkipStep;
+	__u8 scBlendSinrHi;
+	__u8 scBlendRmssiHi;
+	__u8 ucBlendIndexHi;
+	__u8 ucBlendIndex[MAX_BLEND_INDEX];
+} __packed;
+
+/*HCI events*/
+#define HCI_EV_TUNE_STATUS              0x01
+#define HCI_EV_RDS_LOCK_STATUS          0x02
+#define HCI_EV_STEREO_STATUS            0x03
+#define HCI_EV_SERVICE_AVAILABLE        0x04
+#define HCI_EV_SEARCH_PROGRESS          0x05
+#define HCI_EV_SEARCH_RDS_PROGRESS      0x06
+#define HCI_EV_SEARCH_LIST_PROGRESS     0x07
+#define HCI_EV_RDS_RX_DATA              0x08
+#define HCI_EV_PROGRAM_SERVICE          0x09
+#define HCI_EV_RADIO_TEXT               0x0A
+#define HCI_EV_FM_AF_LIST               0x0B
+#define HCI_EV_TX_RDS_GRP_AVBLE         0x0C
+#define HCI_EV_TX_RDS_GRP_COMPL         0x0D
+#define HCI_EV_TX_RDS_CONT_GRP_COMPL    0x0E
+#define HCI_EV_CMD_COMPLETE             0x0F
+#define HCI_EV_CMD_STATUS               0x10
+#define HCI_EV_TUNE_COMPLETE            0x11
+#define HCI_EV_SEARCH_COMPLETE          0x12
+#define HCI_EV_SEARCH_RDS_COMPLETE      0x13
+#define HCI_EV_SEARCH_LIST_COMPLETE     0x14
+
+#define HCI_REQ_DONE	  0
+#define HCI_REQ_PEND	  1
+#define HCI_REQ_CANCELED  2
+#define HCI_REQ_STATUS    3
+
+#define MAX_RAW_RDS_GRPS	21
+
+#define RDSGRP_DATA_OFFSET	 0x1
+
+/*RT PLUS*/
+#define DUMMY_CLASS		0
+#define RT_PLUS_LEN_1_TAG	3
+#define RT_ERT_FLAG_BIT		5
+
+/*TAG1*/
+#define TAG1_MSB_OFFSET		3
+#define TAG1_MSB_MASK		7
+#define TAG1_LSB_OFFSET		5
+#define TAG1_POS_MSB_MASK	31
+#define TAG1_POS_MSB_OFFSET	1
+#define TAG1_POS_LSB_OFFSET	7
+#define TAG1_LEN_OFFSET		1
+#define TAG1_LEN_MASK		63
+
+/*TAG2*/
+#define TAG2_MSB_OFFSET		5
+#define TAG2_MSB_MASK		1
+#define TAG2_LSB_OFFSET		3
+#define TAG2_POS_MSB_MASK	7
+#define TAG2_POS_MSB_OFFSET	3
+#define TAG2_POS_LSB_OFFSET	5
+#define TAG2_LEN_MASK		31
+
+#define AGT_MASK		31
+/*Extract 5 left most bits of lsb of 2nd block*/
+#define AGT(x)			(x & AGT_MASK)
+/*16 bits of 4th block*/
+#define AID(lsb, msb)		((msb << 8) | (lsb))
+/*Extract 5 right most bits of msb of 2nd block*/
+#define GTC(blk2msb)		(blk2msb >> 3)
+
+#define GRP_3A			0x6
+#define RT_PLUS_AID		0x4bd7
+
+/*ERT*/
+#define ERT_AID			0x6552
+#define CARRIAGE_RETURN		0x000D
+#define MAX_ERT_SEGMENT		31
+#define ERT_FORMAT_DIR_BIT	1
+
+#define EXTRACT_BIT(data, bit_pos) ((data & (1 << bit_pos)) >> bit_pos)
+
+struct hci_ev_tune_status {
+	__u8    sub_event;
+	__le32  station_freq;
+	__u8    serv_avble;
+	char    rssi;
+	__u8    stereo_prg;
+	__u8    rds_sync_status;
+	__u8    mute_mode;
+	char    sinr;
+	__u8	intf_det_th;
+} __packed;
+
+struct rds_blk_data {
+	__u8	rdsMsb;
+	__u8	rdsLsb;
+	__u8	blockStatus;
+} __packed;
+
+struct rds_grp_data {
+	struct rds_blk_data rdsBlk[4];
+} __packed;
+
+struct hci_ev_rds_rx_data {
+	__u8    num_rds_grps;
+	struct  rds_grp_data rds_grp_data[MAX_RAW_RDS_GRPS];
+} __packed;
+
+struct hci_ev_prg_service {
+	__le16   pi_prg_id;
+	__u8    pty_prg_type;
+	__u8    ta_prg_code_type;
+	__u8    ta_ann_code_flag;
+	__u8    ms_switch_code_flag;
+	__u8    dec_id_ctrl_code_flag;
+	__u8    ps_num;
+	__u8    prg_service_name[119];
+} __packed;
+
+struct hci_ev_radio_text {
+	__le16   pi_prg_id;
+	__u8    pty_prg_type;
+	__u8    ta_prg_code_type;
+	__u8    txt_ab_flag;
+	__u8    radio_txt[64];
+} __packed;
+
+struct hci_ev_af_list {
+	__le32   tune_freq;
+	__le16   pi_code;
+	__u8    af_size;
+	__u8    af_list[FM_AF_LIST_MAX_SIZE];
+} __packed;
+
+struct hci_ev_cmd_complete {
+	__u8    num_hci_cmd_pkts;
+	__le16   cmd_opcode;
+} __packed;
+
+struct hci_ev_cmd_status {
+	__u8    status;
+	__u8    num_hci_cmd_pkts;
+	__le16   status_opcode;
+} __packed;
+
+struct hci_ev_srch_st {
+	__le32    station_freq;
+	__u8    rds_cap;
+	__u8   pty;
+	__le16   status_opcode;
+} __packed;
+
+struct hci_ev_rel_freq {
+	__u8  rel_freq_msb;
+	__u8  rel_freq_lsb;
+
+} __packed;
+struct hci_ev_srch_list_compl {
+	__u8    num_stations_found;
+	struct hci_ev_rel_freq  rel_freq[20];
+} __packed;
+
+/* ----- HCI Event Response ----- */
+struct hci_fm_conf_rsp {
+	__u8    status;
+	struct hci_fm_recv_conf_req recv_conf_rsp;
+} __packed;
+
+struct hci_fm_get_trans_conf_rsp {
+	__u8    status;
+	struct hci_fm_trans_conf_req_struct trans_conf_rsp;
+} __packed;
+struct hci_fm_sig_threshold_rsp {
+	__u8    status;
+	__u8    sig_threshold;
+} __packed;
+
+struct hci_fm_station_rsp {
+	struct hci_ev_tune_status station_rsp;
+} __packed;
+
+struct hci_fm_prgm_srv_rsp {
+	__u8    status;
+	struct hci_ev_prg_service prg_srv;
+} __packed;
+
+struct hci_fm_radio_txt_rsp {
+	__u8    status;
+	struct hci_ev_radio_text rd_txt;
+} __packed;
+
+struct hci_fm_af_list_rsp {
+	__u8    status;
+	struct hci_ev_af_list rd_txt;
+} __packed;
+
+struct hci_fm_data_rd_rsp {
+	__u8    status;
+	__u8    ret_data_len;
+	__u8    data[DEFAULT_DATA_SIZE];
+} __packed;
+
+struct hci_fm_feature_list_rsp {
+	__u8    status;
+	__u8    feature_mask;
+} __packed;
+
+struct hci_fm_dbg_param_rsp {
+	__u8    status;
+	__u8    blend;
+	__u8    soft_mute;
+	__u8    inf_blend;
+	__u8    inf_soft_mute;
+	__u8    pilot_pil;
+	__u8    io_verc;
+	__u8    in_det_out;
+} __packed;
+
+#define CLKSPURID_INDEX0	0
+#define CLKSPURID_INDEX1	5
+#define CLKSPURID_INDEX2	10
+#define CLKSPURID_INDEX3	15
+#define CLKSPURID_INDEX4	20
+#define CLKSPURID_INDEX5	25
+
+#define MAX_SPUR_FREQ_LIMIT	30
+#define CKK_SPUR		0x3B
+#define SPUR_DATA_SIZE		0x4
+#define SPUR_ENTRIES_PER_ID	0x5
+
+#define COMPUTE_SPUR(val)         ((((val) - (76000)) / (50)))
+#define GET_FREQ(val, bit)        ((bit == 1) ? ((val) >> 8) : ((val) & 0xFF))
+#define GET_SPUR_ENTRY_LEVEL(val) ((val) / (5))
+
+struct hci_fm_spur_data {
+	__u32	freq[MAX_SPUR_FREQ_LIMIT];
+	__s8	rmssi[MAX_SPUR_FREQ_LIMIT];
+	__u8	enable[MAX_SPUR_FREQ_LIMIT];
+} __packed;
+
+
+/* HCI dev events */
+#define RADIO_HCI_DEV_REG			1
+#define RADIO_HCI_DEV_WRITE			2
+
+/* FM RDS */
+#define RDS_PTYPE 2
+#define RDS_PID_LOWER 1
+#define RDS_PID_HIGHER 0
+#define RDS_OFFSET 5
+#define RDS_PS_LENGTH_OFFSET 7
+#define RDS_STRING 8
+#define RDS_PS_DATA_OFFSET 8
+#define RDS_CONFIG_OFFSET  3
+#define RDS_AF_JUMP_OFFSET 4
+#define PI_CODE_OFFSET 4
+#define AF_SIZE_OFFSET 6
+#define AF_LIST_OFFSET 7
+#define RT_A_B_FLAG_OFFSET 4
+/*FM states*/
+
+enum radio_state_t {
+	FM_OFF,
+	FM_RECV,
+	FM_TRANS,
+	FM_RESET,
+	FM_CALIB,
+	FM_TURNING_OFF,
+	FM_RECV_TURNING_ON,
+	FM_TRANS_TURNING_ON,
+	FM_MAX_NO_STATES,
+};
+
+enum emphasis_type {
+	FM_RX_EMP75 = 0x0,
+	FM_RX_EMP50 = 0x1
+};
+
+enum channel_space_type {
+	FM_RX_SPACE_200KHZ = 0x0,
+	FM_RX_SPACE_100KHZ = 0x1,
+	FM_RX_SPACE_50KHZ = 0x2
+};
+
+enum high_low_injection {
+	AUTO_HI_LO_INJECTION = 0x0,
+	LOW_SIDE_INJECTION = 0x1,
+	HIGH_SIDE_INJECTION = 0x2
+};
+
+enum fm_rds_type {
+	FM_RX_RDBS_SYSTEM = 0x0,
+	FM_RX_RDS_SYSTEM = 0x1
+};
+
+enum iris_region_t {
+	IRIS_REGION_US,
+	IRIS_REGION_EU,
+	IRIS_REGION_JAPAN,
+	IRIS_REGION_JAPAN_WIDE,
+	IRIS_REGION_OTHER
+};
+
+#define STD_BUF_SIZE        (256)
+
+enum iris_buf_t {
+	IRIS_BUF_SRCH_LIST,
+	IRIS_BUF_EVENTS,
+	IRIS_BUF_RT_RDS,
+	IRIS_BUF_PS_RDS,
+	IRIS_BUF_RAW_RDS,
+	IRIS_BUF_AF_LIST,
+	IRIS_BUF_PEEK,
+	IRIS_BUF_SSBI_PEEK,
+	IRIS_BUF_RDS_CNTRS,
+	IRIS_BUF_RD_DEFAULT,
+	IRIS_BUF_CAL_DATA,
+	IRIS_BUF_RT_PLUS,
+	IRIS_BUF_ERT,
+	IRIS_BUF_SPUR,
+	IRIS_BUF_MAX,
+};
+
+enum iris_xfr_t {
+	IRIS_XFR_SYNC,
+	IRIS_XFR_ERROR,
+	IRIS_XFR_SRCH_LIST,
+	IRIS_XFR_RT_RDS,
+	IRIS_XFR_PS_RDS,
+	IRIS_XFR_AF_LIST,
+	IRIS_XFR_MAX
+};
+
+/* Search options */
+enum search_t {
+	SEEK,
+	SCAN,
+	SCAN_FOR_STRONG,
+	SCAN_FOR_WEAK,
+	RDS_SEEK_PTY,
+	RDS_SCAN_PTY,
+	RDS_SEEK_PI,
+	RDS_AF_JUMP,
+};
+
+enum spur_entry_levels {
+	ENTRY_0,
+	ENTRY_1,
+	ENTRY_2,
+	ENTRY_3,
+	ENTRY_4,
+	ENTRY_5,
+};
+
+/* Band limits */
+#define REGION_US_EU_BAND_LOW              87500
+#define REGION_US_EU_BAND_HIGH             108000
+#define REGION_JAPAN_STANDARD_BAND_LOW     76000
+#define REGION_JAPAN_STANDARD_BAND_HIGH    90000
+#define REGION_JAPAN_WIDE_BAND_LOW         90000
+#define REGION_JAPAN_WIDE_BAND_HIGH        108000
+
+#define SRCH_MODE	0x07
+#define SRCH_DIR	0x08 /* 0-up 1-down */
+#define SCAN_DWELL	0x70
+#define SRCH_ON		0x80
+
+/* I/O Control */
+#define IOC_HRD_MUTE	0x03
+#define IOC_SFT_MUTE	0x01
+#define IOC_MON_STR	0x01
+#define IOC_SIG_BLND	0x01
+#define IOC_INTF_BLND	0x01
+#define IOC_ANTENNA	0x01
+
+/* RDS Control */
+#define RDS_ON		0x01
+#define RDS_BUF_SZ  100
+
+/* constants */
+#define  RDS_BLOCKS_NUM	(4)
+#define BYTES_PER_BLOCK	(3)
+#define MAX_PS_LENGTH	(108)
+#define MAX_RT_LENGTH	(64)
+#define RDS_GRP_CNTR_LEN (36)
+#define RX_RT_DATA_LENGTH (63)
+/* Search direction */
+#define SRCH_DIR_UP		(0)
+#define SRCH_DIR_DOWN		(1)
+
+/*Search RDS stations*/
+#define SEARCH_RDS_STNS_MODE_OFFSET 4
+
+/*Search Station list */
+#define PARAMS_PER_STATION 0x08
+#define STN_NUM_OFFSET     0x01
+#define STN_FREQ_OFFSET    0x02
+#define KHZ_TO_MHZ         1000
+#define GET_MSB(x)((x >> 8) & 0xFF)
+#define GET_LSB(x)((x) & 0xFF)
+
+/* control options */
+#define CTRL_ON			(1)
+#define CTRL_OFF		(0)
+
+/*Diagnostic commands*/
+
+#define RIVA_PEEK_OPCODE 0x0D
+#define RIVA_POKE_OPCODE 0x0C
+
+#define PEEK_DATA_OFSET 0x1
+#define RIVA_PEEK_PARAM     0x6
+#define RIVA_PEEK_LEN_OFSET  0x6
+#define SSBI_PEEK_LEN    0x01
+/*Calibration data*/
+#define PROCS_CALIB_MODE  1
+#define PROCS_CALIB_SIZE  23
+#define DC_CALIB_MODE     2
+#define DC_CALIB_SIZE     48
+#define RSB_CALIB_MODE    3
+#define RSB_CALIB_SIZE    4
+#define CALIB_DATA_OFSET  2
+#define CALIB_MODE_OFSET  1
+#define MAX_CALIB_SIZE 75
+
+/* Channel validity */
+#define INVALID_CHANNEL		(0)
+#define VALID_CHANNEL		(1)
+
+struct hci_fm_set_cal_req_proc {
+	__u8    mode;
+	/*Max process calibration data size*/
+	__u8    data[PROCS_CALIB_SIZE];
+} __packed;
+
+struct hci_fm_set_cal_req_dc {
+	__u8    mode;
+	/*Max DC calibration data size*/
+	__u8    data[DC_CALIB_SIZE];
+} __packed;
+
+struct hci_cc_do_calibration_rsp {
+	__u8 status;
+	__u8 mode;
+	__u8 data[MAX_CALIB_SIZE];
+} __packed;
+
+struct hci_fm_set_spur_table_req {
+	__u8 mode;
+	__u8 no_of_freqs_entries;
+	__u8 spur_data[FM_SPUR_TBL_SIZE];
+} __packed;
+/* Low Power mode*/
+#define SIG_LEVEL_INTR  (1 << 0)
+#define RDS_SYNC_INTR   (1 << 1)
+#define AUDIO_CTRL_INTR (1 << 2)
+#define AF_JUMP_ENABLE  (1 << 4)
+
+#endif
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index d791e0a..fad00e0 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += fc/
+header-y += ufs/
 header-y += scsi_bsg_fc.h
 header-y += scsi_netlink.h
 header-y += scsi_netlink_fc.h
diff --git a/include/uapi/video/Kbuild b/include/uapi/video/Kbuild
index ac7203b..b98fa51 100644
--- a/include/uapi/video/Kbuild
+++ b/include/uapi/video/Kbuild
@@ -1,4 +1,6 @@
 # UAPI Header export list
 header-y += edid.h
+header-y += msm_hdmi_hdcp_mgr.h
+header-y += msm_hdmi_modes.h
 header-y += sisfb.h
 header-y += uvesafb.h
diff --git a/include/uapi/video/msm_hdmi_hdcp_mgr.h b/include/uapi/video/msm_hdmi_hdcp_mgr.h
new file mode 100644
index 0000000..85fa918
--- /dev/null
+++ b/include/uapi/video/msm_hdmi_hdcp_mgr.h
@@ -0,0 +1,54 @@
+#ifndef _UAPI__HDMI_HDCP_MGR_H
+#define _UAPI__MSM_HDMI_HDCP_MGR_H
+
+enum DS_TYPE {  /* type of downstream device */
+	DS_UNKNOWN,
+	DS_RECEIVER,
+	DS_REPEATER,
+};
+
+enum {
+	MSG_ID_IDX,
+	RET_CODE_IDX,
+	HEADER_LEN,
+};
+
+enum RET_CODE {
+	HDCP_NOT_AUTHED,
+	HDCP_AUTHED,
+	HDCP_DISABLE,
+};
+
+enum MSG_ID { /* List of functions expected to be called after it */
+	DOWN_CHECK_TOPOLOGY,
+	UP_REQUEST_TOPOLOGY,
+	UP_SEND_TOPOLOGY,
+	DOWN_REQUEST_TOPOLOGY,
+	MSG_NUM,
+};
+
+enum SOURCE_ID {
+	HDCP_V1_TX,
+	HDCP_V1_RX,
+	HDCP_V2_RX,
+	HDCP_V2_TX,
+	SRC_NUM,
+};
+
+/*
+ * how to parse sysfs params buffer
+ * from hdcp_tx driver.
+ */
+
+struct HDCP_V2V1_MSG_TOPOLOGY {
+	/* indicates downstream's type */
+	uint32_t ds_type;
+	uint8_t bksv[5];
+	uint8_t dev_count;
+	uint8_t depth;
+	uint8_t ksv_list[5 * 127];
+	uint32_t max_cascade_exceeded;
+	uint32_t max_dev_exceeded;
+};
+
+#endif /* _UAPI__MSM_HDMI_HDCP_MGR_H */
diff --git a/include/uapi/video/msm_hdmi_modes.h b/include/uapi/video/msm_hdmi_modes.h
new file mode 100644
index 0000000..8a02997
--- /dev/null
+++ b/include/uapi/video/msm_hdmi_modes.h
@@ -0,0 +1,559 @@
+#ifndef _UAPI_MSM_HDMI_MODES_H__
+#define _UAPI_MSM_HDMI_MODES_H__
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define MSM_HDMI_RGB_888_24BPP_FORMAT       (1 << 0)
+#define MSM_HDMI_YUV_420_12BPP_FORMAT       (1 << 1)
+
+enum aspect_ratio {
+	HDMI_RES_AR_INVALID,
+	HDMI_RES_AR_4_3,
+	HDMI_RES_AR_5_4,
+	HDMI_RES_AR_16_9,
+	HDMI_RES_AR_16_10,
+	HDMI_RES_AR_64_27,
+	HDMI_RES_AR_256_135,
+	HDMI_RES_AR_MAX,
+};
+
+enum msm_hdmi_s3d_mode {
+	HDMI_S3D_NONE,
+	HDMI_S3D_SIDE_BY_SIDE,
+	HDMI_S3D_TOP_AND_BOTTOM,
+	HDMI_S3D_FRAME_PACKING,
+	HDMI_S3D_MAX,
+};
+
+struct msm_hdmi_mode_timing_info {
+	uint32_t	video_format;
+	uint32_t	active_h;
+	uint32_t	front_porch_h;
+	uint32_t	pulse_width_h;
+	uint32_t	back_porch_h;
+	uint32_t	active_low_h;
+	uint32_t	active_v;
+	uint32_t	front_porch_v;
+	uint32_t	pulse_width_v;
+	uint32_t	back_porch_v;
+	uint32_t	active_low_v;
+	/* Must divide by 1000 to get the actual frequency in MHZ */
+	uint32_t	pixel_freq;
+	/* Must divide by 1000 to get the actual frequency in HZ */
+	uint32_t	refresh_rate;
+	uint32_t	interlaced;
+	uint32_t	supported;
+	enum aspect_ratio ar;
+	/* Flags indicating support for specific pixel formats */
+	uint32_t        pixel_formats;
+};
+
+#define MSM_HDMI_INIT_RES_PAGE          1
+
+#define MSM_HDMI_MODES_CEA		(1 << 0)
+#define MSM_HDMI_MODES_XTND		(1 << 1)
+#define MSM_HDMI_MODES_DVI		(1 << 2)
+#define MSM_HDMI_MODES_ALL		(MSM_HDMI_MODES_CEA |\
+					 MSM_HDMI_MODES_XTND |\
+					 MSM_HDMI_MODES_DVI)
+
+/* all video formats defined by CEA 861D */
+#define HDMI_VFRMT_UNKNOWN		0
+#define HDMI_VFRMT_640x480p60_4_3	1
+#define HDMI_VFRMT_720x480p60_4_3	2
+#define HDMI_VFRMT_720x480p60_16_9	3
+#define HDMI_VFRMT_1280x720p60_16_9	4
+#define HDMI_VFRMT_1920x1080i60_16_9	5
+#define HDMI_VFRMT_720x480i60_4_3	6
+#define HDMI_VFRMT_1440x480i60_4_3	HDMI_VFRMT_720x480i60_4_3
+#define HDMI_VFRMT_720x480i60_16_9	7
+#define HDMI_VFRMT_1440x480i60_16_9	HDMI_VFRMT_720x480i60_16_9
+#define HDMI_VFRMT_720x240p60_4_3	8
+#define HDMI_VFRMT_1440x240p60_4_3	HDMI_VFRMT_720x240p60_4_3
+#define HDMI_VFRMT_720x240p60_16_9	9
+#define HDMI_VFRMT_1440x240p60_16_9	HDMI_VFRMT_720x240p60_16_9
+#define HDMI_VFRMT_2880x480i60_4_3	10
+#define HDMI_VFRMT_2880x480i60_16_9	11
+#define HDMI_VFRMT_2880x240p60_4_3	12
+#define HDMI_VFRMT_2880x240p60_16_9	13
+#define HDMI_VFRMT_1440x480p60_4_3	14
+#define HDMI_VFRMT_1440x480p60_16_9	15
+#define HDMI_VFRMT_1920x1080p60_16_9	16
+#define HDMI_VFRMT_720x576p50_4_3	17
+#define HDMI_VFRMT_720x576p50_16_9	18
+#define HDMI_VFRMT_1280x720p50_16_9	19
+#define HDMI_VFRMT_1920x1080i50_16_9	20
+#define HDMI_VFRMT_720x576i50_4_3	21
+#define HDMI_VFRMT_1440x576i50_4_3	HDMI_VFRMT_720x576i50_4_3
+#define HDMI_VFRMT_720x576i50_16_9	22
+#define HDMI_VFRMT_1440x576i50_16_9	HDMI_VFRMT_720x576i50_16_9
+#define HDMI_VFRMT_720x288p50_4_3	23
+#define HDMI_VFRMT_1440x288p50_4_3	HDMI_VFRMT_720x288p50_4_3
+#define HDMI_VFRMT_720x288p50_16_9	24
+#define HDMI_VFRMT_1440x288p50_16_9	HDMI_VFRMT_720x288p50_16_9
+#define HDMI_VFRMT_2880x576i50_4_3	25
+#define HDMI_VFRMT_2880x576i50_16_9	26
+#define HDMI_VFRMT_2880x288p50_4_3	27
+#define HDMI_VFRMT_2880x288p50_16_9	28
+#define HDMI_VFRMT_1440x576p50_4_3	29
+#define HDMI_VFRMT_1440x576p50_16_9	30
+#define HDMI_VFRMT_1920x1080p50_16_9	31
+#define HDMI_VFRMT_1920x1080p24_16_9	32
+#define HDMI_VFRMT_1920x1080p25_16_9	33
+#define HDMI_VFRMT_1920x1080p30_16_9	34
+#define HDMI_VFRMT_2880x480p60_4_3	35
+#define HDMI_VFRMT_2880x480p60_16_9	36
+#define HDMI_VFRMT_2880x576p50_4_3	37
+#define HDMI_VFRMT_2880x576p50_16_9	38
+#define HDMI_VFRMT_1920x1250i50_16_9	39
+#define HDMI_VFRMT_1920x1080i100_16_9	40
+#define HDMI_VFRMT_1280x720p100_16_9	41
+#define HDMI_VFRMT_720x576p100_4_3	42
+#define HDMI_VFRMT_720x576p100_16_9	43
+#define HDMI_VFRMT_720x576i100_4_3	44
+#define HDMI_VFRMT_1440x576i100_4_3	HDMI_VFRMT_720x576i100_4_3
+#define HDMI_VFRMT_720x576i100_16_9	45
+#define HDMI_VFRMT_1440x576i100_16_9	HDMI_VFRMT_720x576i100_16_9
+#define HDMI_VFRMT_1920x1080i120_16_9	46
+#define HDMI_VFRMT_1280x720p120_16_9	47
+#define HDMI_VFRMT_720x480p120_4_3	48
+#define HDMI_VFRMT_720x480p120_16_9	49
+#define HDMI_VFRMT_720x480i120_4_3	50
+#define HDMI_VFRMT_1440x480i120_4_3	HDMI_VFRMT_720x480i120_4_3
+#define HDMI_VFRMT_720x480i120_16_9	51
+#define HDMI_VFRMT_1440x480i120_16_9	HDMI_VFRMT_720x480i120_16_9
+#define HDMI_VFRMT_720x576p200_4_3	52
+#define HDMI_VFRMT_720x576p200_16_9	53
+#define HDMI_VFRMT_720x576i200_4_3	54
+#define HDMI_VFRMT_1440x576i200_4_3	HDMI_VFRMT_720x576i200_4_3
+#define HDMI_VFRMT_720x576i200_16_9	55
+#define HDMI_VFRMT_1440x576i200_16_9	HDMI_VFRMT_720x576i200_16_9
+#define HDMI_VFRMT_720x480p240_4_3	56
+#define HDMI_VFRMT_720x480p240_16_9	57
+#define HDMI_VFRMT_720x480i240_4_3	58
+#define HDMI_VFRMT_1440x480i240_4_3	HDMI_VFRMT_720x480i240_4_3
+#define HDMI_VFRMT_720x480i240_16_9	59
+#define HDMI_VFRMT_1440x480i240_16_9	HDMI_VFRMT_720x480i240_16_9
+#define HDMI_VFRMT_1280x720p24_16_9	60
+#define HDMI_VFRMT_1280x720p25_16_9	61
+#define HDMI_VFRMT_1280x720p30_16_9	62
+#define HDMI_VFRMT_1920x1080p120_16_9	63
+#define HDMI_VFRMT_1920x1080p100_16_9	64
+#define HDMI_VFRMT_1280x720p24_64_27    65
+#define HDMI_VFRMT_1280x720p25_64_27    66
+#define HDMI_VFRMT_1280x720p30_64_27    67
+#define HDMI_VFRMT_1280x720p50_64_27    68
+#define HDMI_VFRMT_1280x720p60_64_27    69
+#define HDMI_VFRMT_1280x720p100_64_27   70
+#define HDMI_VFRMT_1280x720p120_64_27   71
+#define HDMI_VFRMT_1920x1080p24_64_27   72
+#define HDMI_VFRMT_1920x1080p25_64_27   73
+#define HDMI_VFRMT_1920x1080p30_64_27   74
+#define HDMI_VFRMT_1920x1080p50_64_27   75
+#define HDMI_VFRMT_1920x1080p60_64_27   76
+#define HDMI_VFRMT_1920x1080p100_64_27  77
+#define HDMI_VFRMT_1920x1080p120_64_27  78
+#define HDMI_VFRMT_1680x720p24_64_27    79
+#define HDMI_VFRMT_1680x720p25_64_27    80
+#define HDMI_VFRMT_1680x720p30_64_27    81
+#define HDMI_VFRMT_1680x720p50_64_27    82
+#define HDMI_VFRMT_1680x720p60_64_27    83
+#define HDMI_VFRMT_1680x720p100_64_27   84
+#define HDMI_VFRMT_1680x720p120_64_27   85
+#define HDMI_VFRMT_2560x1080p24_64_27   86
+#define HDMI_VFRMT_2560x1080p25_64_27   87
+#define HDMI_VFRMT_2560x1080p30_64_27   88
+#define HDMI_VFRMT_2560x1080p50_64_27   89
+#define HDMI_VFRMT_2560x1080p60_64_27   90
+#define HDMI_VFRMT_2560x1080p100_64_27  91
+#define HDMI_VFRMT_2560x1080p120_64_27  92
+#define HDMI_VFRMT_3840x2160p24_16_9    93
+#define HDMI_VFRMT_3840x2160p25_16_9    94
+#define HDMI_VFRMT_3840x2160p30_16_9    95
+#define HDMI_VFRMT_3840x2160p50_16_9    96
+#define HDMI_VFRMT_3840x2160p60_16_9    97
+#define HDMI_VFRMT_4096x2160p24_256_135 98
+#define HDMI_VFRMT_4096x2160p25_256_135 99
+#define HDMI_VFRMT_4096x2160p30_256_135 100
+#define HDMI_VFRMT_4096x2160p50_256_135 101
+#define HDMI_VFRMT_4096x2160p60_256_135 102
+#define HDMI_VFRMT_3840x2160p24_64_27   103
+#define HDMI_VFRMT_3840x2160p25_64_27   104
+#define HDMI_VFRMT_3840x2160p30_64_27   105
+#define HDMI_VFRMT_3840x2160p50_64_27   106
+#define HDMI_VFRMT_3840x2160p60_64_27   107
+
+/* Video Identification Codes from 107-127 are reserved for the future */
+#define HDMI_VFRMT_END			127
+
+#define EVFRMT_OFF(x)			(HDMI_VFRMT_END + x)
+
+/* extended video formats */
+#define HDMI_EVFRMT_3840x2160p30_16_9	EVFRMT_OFF(1)
+#define HDMI_EVFRMT_3840x2160p25_16_9	EVFRMT_OFF(2)
+#define HDMI_EVFRMT_3840x2160p24_16_9	EVFRMT_OFF(3)
+#define HDMI_EVFRMT_4096x2160p24_16_9	EVFRMT_OFF(4)
+#define HDMI_EVFRMT_END			HDMI_EVFRMT_4096x2160p24_16_9
+
+#define WQXGA_OFF(x)			(HDMI_EVFRMT_END + x)
+
+/* WQXGA */
+#define HDMI_VFRMT_2560x1600p60_16_9	WQXGA_OFF(1)
+#define HDMI_WQXGAFRMT_END		HDMI_VFRMT_2560x1600p60_16_9
+
+#define WXGA_OFF(x)			(HDMI_WQXGAFRMT_END + x)
+
+/* WXGA */
+#define HDMI_VFRMT_1280x800p60_16_10	WXGA_OFF(1)
+#define HDMI_VFRMT_1366x768p60_16_10	WXGA_OFF(2)
+#define HDMI_WXGAFRMT_END		HDMI_VFRMT_1366x768p60_16_10
+
+#define ETI_OFF(x)			(HDMI_WXGAFRMT_END + x)
+
+/* ESTABLISHED TIMINGS I */
+#define HDMI_VFRMT_800x600p60_4_3	ETI_OFF(1)
+#define ETI_VFRMT_END			HDMI_VFRMT_800x600p60_4_3
+
+#define ETII_OFF(x)			(ETI_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS II */
+#define HDMI_VFRMT_1024x768p60_4_3	ETII_OFF(1)
+#define HDMI_VFRMT_1280x1024p60_5_4	ETII_OFF(2)
+#define ETII_VFRMT_END			HDMI_VFRMT_1280x1024p60_5_4
+
+#define ETIII_OFF(x)			(ETII_VFRMT_END + x)
+
+/* ESTABLISHED TIMINGS III */
+#define HDMI_VFRMT_848x480p60_16_9	ETIII_OFF(1)
+#define HDMI_VFRMT_1280x960p60_4_3	ETIII_OFF(2)
+#define HDMI_VFRMT_1360x768p60_16_9	ETIII_OFF(3)
+#define HDMI_VFRMT_1440x900p60_16_10	ETIII_OFF(4)
+#define HDMI_VFRMT_1400x1050p60_4_3	ETIII_OFF(5)
+#define HDMI_VFRMT_1680x1050p60_16_10	ETIII_OFF(6)
+#define HDMI_VFRMT_1600x1200p60_4_3	ETIII_OFF(7)
+#define HDMI_VFRMT_1920x1200p60_16_10	ETIII_OFF(8)
+#define ETIII_VFRMT_END			HDMI_VFRMT_1920x1200p60_16_10
+
+#define RESERVE_OFF(x)			(ETIII_VFRMT_END + x)
+
+#define HDMI_VFRMT_RESERVE1		RESERVE_OFF(1)
+#define HDMI_VFRMT_RESERVE2		RESERVE_OFF(2)
+#define HDMI_VFRMT_RESERVE3		RESERVE_OFF(3)
+#define HDMI_VFRMT_RESERVE4		RESERVE_OFF(4)
+#define HDMI_VFRMT_RESERVE5		RESERVE_OFF(5)
+#define HDMI_VFRMT_RESERVE6		RESERVE_OFF(6)
+#define HDMI_VFRMT_RESERVE7		RESERVE_OFF(7)
+#define HDMI_VFRMT_RESERVE8		RESERVE_OFF(8)
+#define RESERVE_VFRMT_END		HDMI_VFRMT_RESERVE8
+
+#define HDMI_VFRMT_MAX			(RESERVE_VFRMT_END + 1)
+
+/* Timing information for supported modes */
+#define VFRMT_NOT_SUPPORTED(VFRMT) \
+	{VFRMT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, false,		\
+		HDMI_RES_AR_INVALID}
+
+#define HDMI_VFRMT_640x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_640x480p60_4_3, 640, 16, 96, 48, true,		\
+	 480, 10, 2, 33, true, 25200, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_4_3_TIMING				\
+	{HDMI_VFRMT_720x480p60_4_3, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_720x480p60_16_9, 720, 16, 62, 60, true,		\
+	 480, 9, 6, 30, true, 27027, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p60_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p60_16_9, 1280, 110, 40, 220, false,	\
+	 720, 5, 5, 20, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080i60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 88, 44, 148, false,	\
+	 540, 2, 5, 5, false, 74250, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x480i60_4_3_TIMING				\
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x480i60_16_9_TIMING				\
+	{HDMI_VFRMT_1440x480i60_16_9, 1440, 38, 124, 114, true,		\
+	 240, 4, 3, 15, true, 27000, 60000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p60_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_720x576p50_4_3_TIMING				\
+	{HDMI_VFRMT_720x576p50_4_3, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_720x576p50_16_9_TIMING				\
+	{HDMI_VFRMT_720x576p50_16_9, 720, 12, 64, 68, true,		\
+	 576,  5, 5, 39, true, 27000, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x720p50_16_9_TIMING				\
+	{HDMI_VFRMT_1280x720p50_16_9, 1280, 440, 40, 220, false,	\
+	 720,  5, 5, 20, false, 74250, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x576i50_4_3_TIMING				\
+	{HDMI_VFRMT_1440x576i50_4_3, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1440x576i50_16_9_TIMING				\
+	{HDMI_VFRMT_1440x576i50_16_9, 1440, 24, 126, 138, true,		\
+	 288,  2, 3, 19, true, 27000, 50000, true, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p50_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p50_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 148500, 50000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p24_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 638, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 24000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p25_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p25_16_9, 1920, 528, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 25000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1920x1080p30_16_9_TIMING				\
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 88, 44, 148, false,	\
+	 1080, 4, 5, 36, false, 74250, 30000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1024x768p60_4_3_TIMING                               \
+	{HDMI_VFRMT_1024x768p60_4_3, 1024, 24, 136, 160, false,         \
+	768, 2, 6, 29, false, 65000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1280x1024p60_5_4_TIMING				\
+	{HDMI_VFRMT_1280x1024p60_5_4, 1280, 48, 112, 248, false,	\
+	1024, 1, 3, 38, false, 108000, 60000, false, true, HDMI_RES_AR_5_4, 0}
+#define HDMI_VFRMT_2560x1600p60_16_9_TIMING				\
+	{HDMI_VFRMT_2560x1600p60_16_9, 2560, 48, 32, 80, false,		\
+	 1600, 3, 6, 37, false, 268500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p30_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p25_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_3840x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_EVFRMT_4096x2160p24_16_9_TIMING				\
+	{HDMI_EVFRMT_4096x2160p24_16_9, 4096, 1020, 88, 296, false,	\
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_800x600p60_4_3_TIMING				\
+	{HDMI_VFRMT_800x600p60_4_3, 800, 40, 128, 88, false,	\
+	 600, 1, 4, 23, false, 40000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_848x480p60_16_9_TIMING				\
+	{HDMI_VFRMT_848x480p60_16_9, 848, 16, 112, 112, false,	\
+	 480, 6, 8, 23, false, 33750, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1280x960p60_4_3_TIMING\
+	{HDMI_VFRMT_1280x960p60_4_3, 1280, 96, 112, 312, false,	\
+	 960, 1, 3, 36, false, 108000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1360x768p60_16_9_TIMING\
+	{HDMI_VFRMT_1360x768p60_16_9, 1360, 64, 112, 256, false,	\
+	 768, 3, 6, 18, false, 85500, 60000, false, true, HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_1440x900p60_16_10_TIMING\
+	{HDMI_VFRMT_1440x900p60_16_10, 1440, 48, 32, 80, false,	\
+	 900, 3, 6, 17, true, 88750, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1400x1050p60_4_3_TIMING\
+	{HDMI_VFRMT_1400x1050p60_4_3, 1400, 48, 32, 80, false,	\
+	 1050, 3, 4, 23, true, 101000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1680x1050p60_16_10_TIMING\
+	{HDMI_VFRMT_1680x1050p60_16_10, 1680, 48, 32, 80, false,	\
+	 1050, 3, 6, 21, true, 119000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1600x1200p60_4_3_TIMING\
+	{HDMI_VFRMT_1600x1200p60_4_3, 1600, 64, 192, 304, false,	\
+	 1200, 1, 3, 46, false, 162000, 60000, false, true, HDMI_RES_AR_4_3, 0}
+#define HDMI_VFRMT_1920x1200p60_16_10_TIMING\
+	{HDMI_VFRMT_1920x1200p60_16_10, 1920, 48, 32, 80, false,\
+	 1200, 3, 6, 26, true, 154000, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1366x768p60_16_10_TIMING\
+	{HDMI_VFRMT_1366x768p60_16_10, 1366, 70, 143, 213, false,\
+	 768, 3, 3, 24, false, 85500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_1280x800p60_16_10_TIMING\
+	{HDMI_VFRMT_1280x800p60_16_10, 1280, 72, 128, 200, true,\
+	 800, 3, 6, 22, false, 83500, 60000, false, true, HDMI_RES_AR_16_10, 0}
+#define HDMI_VFRMT_3840x2160p24_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_16_9, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p25_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p30_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p50_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_16_9, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+#define HDMI_VFRMT_3840x2160p60_16_9_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_16_9, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_16_9, 0}
+
+#define HDMI_VFRMT_4096x2160p24_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p24_256_135, 4096, 1020, 88, 296, false,   \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p25_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p25_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p30_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p30_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p50_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p50_256_135, 4096, 968, 88, 128, false,    \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+#define HDMI_VFRMT_4096x2160p60_256_135_TIMING                          \
+	{HDMI_VFRMT_4096x2160p60_256_135, 4096, 88, 88, 128, false,     \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_256_135, 0}
+
+#define HDMI_VFRMT_3840x2160p24_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p24_64_27, 3840, 1276, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 24000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p25_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p25_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 297000, 25000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p30_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p30_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 297000, 30000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p50_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p50_64_27, 3840, 1056, 88, 296, false,      \
+	 2160, 8, 10, 72, false, 594000, 50000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+#define HDMI_VFRMT_3840x2160p60_64_27_TIMING                             \
+	{HDMI_VFRMT_3840x2160p60_64_27, 3840, 176, 88, 296, false,       \
+	 2160, 8, 10, 72, false, 594000, 60000, false, true, \
+		HDMI_RES_AR_64_27, 0}
+
+#define MSM_HDMI_MODES_SET_TIMING(LUT, MODE) do {		\
+	struct msm_hdmi_mode_timing_info mode = MODE##_TIMING;	\
+	LUT[MODE] = mode;\
+	} while (0)
+
+#define MSM_HDMI_MODES_INIT_TIMINGS(__lut)	\
+do {	\
+	unsigned int i;	\
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {	\
+		struct msm_hdmi_mode_timing_info mode =	\
+			VFRMT_NOT_SUPPORTED(i);	\
+		(__lut)[i] = mode;	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_SET_SUPP_TIMINGS(__lut, __type)	\
+do {	\
+	if (__type & MSM_HDMI_MODES_CEA) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_640x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x480i60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_720x576p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x720p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x576i50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p50_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1080p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_16_9);  \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p24_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p25_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p30_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p50_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_4096x2160p60_256_135);\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p24_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p25_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p30_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p50_64_27); \
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_3840x2160p60_64_27); \
+	}	\
+	if (__type & MSM_HDMI_MODES_XTND) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p30_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p25_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_3840x2160p24_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_EVFRMT_4096x2160p24_16_9);	\
+	}	\
+	if (__type & MSM_HDMI_MODES_DVI) {	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1024x768p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x1024p60_5_4);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_2560x1600p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_800x600p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_848x480p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x960p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1360x768p60_16_9);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1440x900p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1400x1050p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1680x1050p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1600x1200p60_4_3);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1920x1200p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1366x768p60_16_10);	\
+		MSM_HDMI_MODES_SET_TIMING(__lut,	\
+			HDMI_VFRMT_1280x800p60_16_10);	\
+	}	\
+} while (0)
+
+#define MSM_HDMI_MODES_GET_DETAILS(mode, MODE) do {		\
+	struct msm_hdmi_mode_timing_info info = MODE##_TIMING;	\
+	*mode = info;						\
+	} while (0)
+
+#endif /* _UAPI_MSM_HDMI_MODES_H__ */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8199821..85d1c94 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -212,9 +212,10 @@
 		else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
 			 t == PTR_TO_MAP_VALUE_OR_NULL ||
 			 t == PTR_TO_MAP_VALUE_ADJ)
-			verbose("(ks=%d,vs=%d)",
+			verbose("(ks=%d,vs=%d,id=%u)",
 				reg->map_ptr->key_size,
-				reg->map_ptr->value_size);
+				reg->map_ptr->value_size,
+				reg->id);
 		if (reg->min_value != BPF_REGISTER_MIN_RANGE)
 			verbose(",min_value=%lld",
 				(long long)reg->min_value);
@@ -443,11 +444,17 @@
 	regs[BPF_REG_1].type = PTR_TO_CTX;
 }
 
+static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
+{
+	regs[regno].type = UNKNOWN_VALUE;
+	regs[regno].id = 0;
+	regs[regno].imm = 0;
+}
+
 static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
 {
 	BUG_ON(regno >= MAX_BPF_REG);
-	regs[regno].type = UNKNOWN_VALUE;
-	regs[regno].imm = 0;
+	__mark_reg_unknown_value(regs, regno);
 }
 
 static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
@@ -1252,6 +1259,7 @@
 			return -EINVAL;
 		}
 		regs[BPF_REG_0].map_ptr = meta.map_ptr;
+		regs[BPF_REG_0].id = ++env->id_gen;
 	} else {
 		verbose("unknown return type %d of func %d\n",
 			fn->ret_type, func_id);
@@ -1668,8 +1676,7 @@
 						insn->src_reg);
 					return -EACCES;
 				}
-				regs[insn->dst_reg].type = UNKNOWN_VALUE;
-				regs[insn->dst_reg].map_ptr = NULL;
+				mark_reg_unknown_value(regs, insn->dst_reg);
 			}
 		} else {
 			/* case: R = imm
@@ -1931,6 +1938,43 @@
 	check_reg_overflow(true_reg);
 }
 
+static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
+			 enum bpf_reg_type type)
+{
+	struct bpf_reg_state *reg = &regs[regno];
+
+	if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) {
+		reg->type = type;
+		/* We don't need id from this point onwards anymore, thus we
+		 * should better reset it, so that state pruning has chances
+		 * to take effect.
+		 */
+		reg->id = 0;
+		if (type == UNKNOWN_VALUE)
+			__mark_reg_unknown_value(regs, regno);
+	}
+}
+
+/* The logic is similar to find_good_pkt_pointers(), both could eventually
+ * be folded together at some point.
+ */
+static void mark_map_regs(struct bpf_verifier_state *state, u32 regno,
+			  enum bpf_reg_type type)
+{
+	struct bpf_reg_state *regs = state->regs;
+	u32 id = regs[regno].id;
+	int i;
+
+	for (i = 0; i < MAX_BPF_REG; i++)
+		mark_map_reg(regs, i, id, type);
+
+	for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
+		if (state->stack_slot_type[i] != STACK_SPILL)
+			continue;
+		mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type);
+	}
+}
+
 static int check_cond_jmp_op(struct bpf_verifier_env *env,
 			     struct bpf_insn *insn, int *insn_idx)
 {
@@ -2018,18 +2062,13 @@
 	if (BPF_SRC(insn->code) == BPF_K &&
 	    insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
 	    dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-		if (opcode == BPF_JEQ) {
-			/* next fallthrough insn can access memory via
-			 * this register
-			 */
-			regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
-			/* branch targer cannot access it, since reg == 0 */
-			mark_reg_unknown_value(other_branch->regs,
-					       insn->dst_reg);
-		} else {
-			other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
-			mark_reg_unknown_value(regs, insn->dst_reg);
-		}
+		/* Mark all identical map registers in each branch as either
+		 * safe or unknown depending R == 0 or R != 0 conditional.
+		 */
+		mark_map_regs(this_branch, insn->dst_reg,
+			      opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE);
+		mark_map_regs(other_branch, insn->dst_reg,
+			      opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE);
 	} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
 		   dst_reg->type == PTR_TO_PACKET &&
 		   regs[insn->src_reg].type == PTR_TO_PACKET_END) {
@@ -2469,7 +2508,7 @@
 		 * we didn't do a variable access into a map then we are a-ok.
 		 */
 		if (!varlen_map_access &&
-		    rold->type == rcur->type && rold->imm == rcur->imm)
+		    memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0)
 			continue;
 
 		/* If we didn't map access then again we don't care about the
diff --git a/kernel/exit.c b/kernel/exit.c
index 46a7c2b..83e8afa 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -778,6 +778,7 @@
 
 	exit_signals(tsk);  /* sets PF_EXITING */
 
+	sched_exit(tsk);
 	schedtune_exit_task(tsk);
 
 	/*
diff --git a/kernel/futex.c b/kernel/futex.c
index 38b68c2..4c6b6e6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2813,7 +2813,6 @@
 {
 	struct hrtimer_sleeper timeout, *to = NULL;
 	struct rt_mutex_waiter rt_waiter;
-	struct rt_mutex *pi_mutex = NULL;
 	struct futex_hash_bucket *hb;
 	union futex_key key2 = FUTEX_KEY_INIT;
 	struct futex_q q = futex_q_init;
@@ -2897,6 +2896,8 @@
 		if (q.pi_state && (q.pi_state->owner != current)) {
 			spin_lock(q.lock_ptr);
 			ret = fixup_pi_state_owner(uaddr2, &q, current);
+			if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
+				rt_mutex_unlock(&q.pi_state->pi_mutex);
 			/*
 			 * Drop the reference to the pi state which
 			 * the requeue_pi() code acquired for us.
@@ -2905,6 +2906,8 @@
 			spin_unlock(q.lock_ptr);
 		}
 	} else {
+		struct rt_mutex *pi_mutex;
+
 		/*
 		 * We have been woken up by futex_unlock_pi(), a timeout, or a
 		 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
@@ -2928,18 +2931,19 @@
 		if (res)
 			ret = (res < 0) ? res : 0;
 
+		/*
+		 * If fixup_pi_state_owner() faulted and was unable to handle
+		 * the fault, unlock the rt_mutex and return the fault to
+		 * userspace.
+		 */
+		if (ret && rt_mutex_owner(pi_mutex) == current)
+			rt_mutex_unlock(pi_mutex);
+
 		/* Unqueue and drop the lock. */
 		unqueue_me_pi(&q);
 	}
 
-	/*
-	 * If fixup_pi_state_owner() faulted and was unable to handle the
-	 * fault, unlock the rt_mutex and return the fault to userspace.
-	 */
-	if (ret == -EFAULT) {
-		if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
-			rt_mutex_unlock(pi_mutex);
-	} else if (ret == -EINTR) {
+	if (ret == -EINTR) {
 		/*
 		 * We've already been requeued, but cannot restart by calling
 		 * futex_lock_pi() directly. We could restart this syscall, but
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 1591f6b..2bef4ab 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -216,10 +216,8 @@
 		 */
 		if (sem->count == 0)
 			break;
-		if (signal_pending_state(state, current)) {
-			ret = -EINTR;
-			goto out;
-		}
+		if (signal_pending_state(state, current))
+			goto out_nolock;
 		set_task_state(tsk, state);
 		raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 		schedule();
@@ -227,12 +225,19 @@
 	}
 	/* got the lock */
 	sem->count = -1;
-out:
 	list_del(&waiter.list);
 
 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 
 	return ret;
+
+out_nolock:
+	list_del(&waiter.list);
+	if (!list_empty(&sem->wait_list))
+		__rwsem_do_wake(sem, 1);
+	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+	return -EINTR;
 }
 
 void __sched __down_write(struct rw_semaphore *sem)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f82983..fe084ef 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -89,6 +89,7 @@
 #include "sched.h"
 #include "../workqueue_internal.h"
 #include "../smpboot.h"
+#include "../time/tick-internal.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
@@ -3393,7 +3394,8 @@
 	if (curr->sched_class == &fair_sched_class)
 		check_for_migration(rq, curr);
 
-	core_ctl_check(wallclock);
+	if (cpu == tick_do_timer_cpu)
+		core_ctl_check(wallclock);
 	sched_freq_tick(cpu);
 }
 
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index aac12bf..1dde338 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,8 @@
  * GNU General Public License for more details.
  */
 
+#define pr_fmt(fmt)	"core_ctl: " fmt
+
 #include <linux/init.h>
 #include <linux/notifier.h>
 #include <linux/cpu.h>
@@ -36,7 +38,7 @@
 	cpumask_t cpu_mask;
 	unsigned int need_cpus;
 	unsigned int task_thres;
-	s64 last_isolate_ts;
+	s64 need_ts;
 	struct list_head lru;
 	bool pending;
 	spinlock_t pending_lock;
@@ -50,7 +52,6 @@
 };
 
 struct cpu_data {
-	bool online;
 	bool is_busy;
 	unsigned int busy;
 	unsigned int cpu;
@@ -242,22 +243,6 @@
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
 }
 
-static ssize_t show_cpus(const struct cluster_data *state, char *buf)
-{
-	struct cpu_data *c;
-	ssize_t count = 0;
-	unsigned long flags;
-
-	spin_lock_irqsave(&state_lock, flags);
-	list_for_each_entry(c, &state->lru, sib) {
-		count += snprintf(buf + count, PAGE_SIZE - count,
-				  "CPU%u (%s)\n", c->cpu,
-				  c->online ? "Online" : "Offline");
-	}
-	spin_unlock_irqrestore(&state_lock, flags);
-	return count;
-}
-
 static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
 {
 	return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -277,9 +262,6 @@
 
 	for_each_possible_cpu(cpu) {
 		c = &per_cpu(cpu_state, cpu);
-		if (!c->cluster)
-			continue;
-
 		cluster = c->cluster;
 		if (!cluster || !cluster->inited)
 			continue;
@@ -289,10 +271,11 @@
 		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tCPU: %u\n", c->cpu);
 		count += snprintf(buf + count, PAGE_SIZE - count,
-					"\tOnline: %u\n", c->online);
+					"\tOnline: %u\n",
+					cpu_online(c->cpu));
 		count += snprintf(buf + count, PAGE_SIZE - count,
-					"\tActive: %u\n",
-					!cpu_isolated(c->cpu));
+					"\tIsolated: %u\n",
+					cpu_isolated(c->cpu));
 		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tFirst CPU: %u\n",
 						cluster->first_cpu);
@@ -301,6 +284,9 @@
 		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tIs busy: %u\n", c->is_busy);
 		count += snprintf(buf + count, PAGE_SIZE - count,
+					"\tNot preferred: %u\n",
+						c->not_preferred);
+		count += snprintf(buf + count, PAGE_SIZE - count,
 					"\tNr running: %u\n", cluster->nrrun);
 		count += snprintf(buf + count, PAGE_SIZE - count,
 			"\tActive CPUs: %u\n", get_active_cpu_count(cluster));
@@ -323,13 +309,14 @@
 	int ret;
 
 	ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
-	if (ret != 1 && ret != state->num_cpus)
+	if (ret != state->num_cpus)
 		return -EINVAL;
 
-	i = 0;
 	spin_lock_irqsave(&state_lock, flags);
-	list_for_each_entry(c, &state->lru, sib)
-		c->not_preferred = val[i++];
+	for (i = 0; i < state->num_cpus; i++) {
+		c = &per_cpu(cpu_state, i + state->first_cpu);
+		c->not_preferred = val[i];
+	}
 	spin_unlock_irqrestore(&state_lock, flags);
 
 	return count;
@@ -340,11 +327,14 @@
 	struct cpu_data *c;
 	ssize_t count = 0;
 	unsigned long flags;
+	int i;
 
 	spin_lock_irqsave(&state_lock, flags);
-	list_for_each_entry(c, &state->lru, sib)
-		count += snprintf(buf + count, PAGE_SIZE - count,
-				"\tCPU:%d %u\n", c->cpu, c->not_preferred);
+	for (i = 0; i < state->num_cpus; i++) {
+		c = &per_cpu(cpu_state, i + state->first_cpu);
+		count += scnprintf(buf + count, PAGE_SIZE - count,
+				"CPU#%d: %u\n", c->cpu, c->not_preferred);
+	}
 	spin_unlock_irqrestore(&state_lock, flags);
 
 	return count;
@@ -372,7 +362,6 @@
 core_ctl_attr_rw(busy_down_thres);
 core_ctl_attr_rw(task_thres);
 core_ctl_attr_rw(is_big_cluster);
-core_ctl_attr_ro(cpus);
 core_ctl_attr_ro(need_cpus);
 core_ctl_attr_ro(active_cpus);
 core_ctl_attr_ro(global_state);
@@ -386,7 +375,6 @@
 	&busy_down_thres.attr,
 	&task_thres.attr,
 	&is_big_cluster.attr,
-	&cpus.attr,
 	&need_cpus.attr,
 	&active_cpus.attr,
 	&global_state.attr,
@@ -530,7 +518,7 @@
 
 static bool is_active(const struct cpu_data *state)
 {
-	return state->online && !cpu_isolated(state->cpu);
+	return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
 }
 
 static bool adjustment_possible(const struct cluster_data *cluster,
@@ -549,6 +537,7 @@
 	bool need_flag = false;
 	unsigned int active_cpus;
 	unsigned int new_need;
+	s64 now;
 
 	if (unlikely(!cluster->inited))
 		return 0;
@@ -573,9 +562,10 @@
 	need_flag = adjustment_possible(cluster, new_need);
 
 	last_need = cluster->need_cpus;
-	cluster->need_cpus = new_need;
+	now = ktime_to_ms(ktime_get());
 
-	if (!need_flag) {
+	if (new_need == last_need) {
+		cluster->need_ts = now;
 		spin_unlock_irqrestore(&state_lock, flags);
 		return 0;
 	}
@@ -583,12 +573,15 @@
 	if (need_cpus > cluster->active_cpus) {
 		ret = 1;
 	} else if (need_cpus < cluster->active_cpus) {
-		s64 now = ktime_to_ms(ktime_get());
-		s64 elapsed = now - cluster->last_isolate_ts;
+		s64 elapsed = now - cluster->need_ts;
 
 		ret = elapsed >= cluster->offline_delay_ms;
 	}
 
+	if (ret) {
+		cluster->need_ts = now;
+		cluster->need_cpus = new_need;
+	}
 	trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
 				 ret && need_flag);
 	spin_unlock_irqrestore(&state_lock, flags);
@@ -660,6 +653,9 @@
 	int ret = 0;
 	bool boost_state_changed = false;
 
+	if (unlikely(!initialized))
+		return 0;
+
 	spin_lock_irqsave(&state_lock, flags);
 	for_each_cluster(cluster, index) {
 		if (cluster->is_big_cluster) {
@@ -746,7 +742,6 @@
 		if (!sched_isolate_cpu(c->cpu)) {
 			c->isolated_by_us = true;
 			move_cpu_lru(c);
-			cluster->last_isolate_ts = ktime_to_ms(ktime_get());
 		} else {
 			pr_debug("Unable to isolate CPU%u\n", c->cpu);
 		}
@@ -779,7 +774,6 @@
 		if (!sched_isolate_cpu(c->cpu)) {
 			c->isolated_by_us = true;
 			move_cpu_lru(c);
-			cluster->last_isolate_ts = ktime_to_ms(ktime_get());
 		} else {
 			pr_debug("Unable to isolate CPU%u\n", c->cpu);
 		}
@@ -808,7 +802,7 @@
 
 		if (!c->isolated_by_us)
 			continue;
-		if ((c->online && !cpu_isolated(c->cpu)) ||
+		if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
 			(!force && c->not_preferred))
 			continue;
 		if (cluster->active_cpus == need)
@@ -897,19 +891,7 @@
 		return NOTIFY_OK;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-
-		/* If online state of CPU somehow got out of sync, fix it. */
-		if (state->online) {
-			state->online = false;
-			cluster->active_cpus = get_active_cpu_count(cluster);
-			pr_warn("CPU%d offline when state is online\n", cpu);
-		}
-		break;
-
 	case CPU_ONLINE:
-
-		state->online = true;
 		cluster->active_cpus = get_active_cpu_count(cluster);
 
 		/*
@@ -934,15 +916,6 @@
 		/* Move a CPU to the end of the LRU when it goes offline. */
 		move_cpu_lru(state);
 
-		/* Fall through */
-
-	case CPU_UP_CANCELED:
-
-		/* If online state of CPU somehow got out of sync, fix it. */
-		if (!state->online)
-			pr_warn("CPU%d online when state is offline\n", cpu);
-
-		state->online = false;
 		state->busy = 0;
 		cluster->active_cpus = get_active_cpu_count(cluster);
 		break;
@@ -961,6 +934,42 @@
 
 /* ============================ init code ============================== */
 
+static cpumask_var_t core_ctl_disable_cpumask;
+static bool core_ctl_disable_cpumask_present;
+
+static int __init core_ctl_disable_setup(char *str)
+{
+	if (!*str)
+		return -EINVAL;
+
+	alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
+
+	if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
+		free_bootmem_cpumask_var(core_ctl_disable_cpumask);
+		return -EINVAL;
+	}
+
+	core_ctl_disable_cpumask_present = true;
+	pr_info("disable_cpumask=%*pbl\n",
+			cpumask_pr_args(core_ctl_disable_cpumask));
+
+	return 0;
+}
+early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
+
+static bool should_skip(const struct cpumask *mask)
+{
+	if (!core_ctl_disable_cpumask_present)
+		return false;
+
+	/*
+	 * We operate on a cluster basis. Disable the core_ctl for
+	 * a cluster, if all of it's cpus are specified in
+	 * core_ctl_disable_cpumask
+	 */
+	return cpumask_subset(mask, core_ctl_disable_cpumask);
+}
+
 static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
 {
 	unsigned int i;
@@ -982,6 +991,9 @@
 	unsigned int cpu;
 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
+	if (should_skip(mask))
+		return 0;
+
 	if (find_cluster_by_first_cpu(first_cpu))
 		return 0;
 
@@ -1021,8 +1033,6 @@
 		state = &per_cpu(cpu_state, cpu);
 		state->cluster = cluster;
 		state->cpu = cpu;
-		if (cpu_online(cpu))
-			state->online = true;
 		list_add_tail(&state->sib, &cluster->lru);
 	}
 	cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1084,6 +1094,9 @@
 {
 	unsigned int cpu;
 
+	if (should_skip(cpu_possible_mask))
+		return 0;
+
 	core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
 					* NSEC_PER_MSEC;
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4eaf13e..2a8643c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5440,7 +5440,7 @@
 }
 
 static int find_new_capacity(struct energy_env *eenv,
-	const struct sched_group_energy const *sge)
+	const struct sched_group_energy * const sge)
 {
 	int idx;
 	unsigned long util = group_max_util(eenv);
@@ -10554,7 +10554,6 @@
 	u8 need_idle:1;
 	u8 need_waker_cluster:1;
 	u8 sync:1;
-	u8 ignore_prev_cpu:1;
 	enum sched_boost_policy boost_policy;
 	u8 pack_task:1;
 	int prev_cpu;
@@ -10564,6 +10563,7 @@
 	u64 cpu_load;
 	u32 sbc_best_flag;
 	u32 sbc_best_cluster_flag;
+	struct cpumask search_cpus;
 };
 
 struct cluster_cpu_stats {
@@ -10768,11 +10768,14 @@
 {
 	struct sched_cluster *next = NULL;
 	int i;
+	struct cpumask search_cpus;
 
 	while (!bitmap_empty(env->backup_list, num_clusters)) {
 		next = next_candidate(env->backup_list, 0, num_clusters);
 		__clear_bit(next->id, env->backup_list);
-		for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
+
+		cpumask_and(&search_cpus, &env->search_cpus, &next->cpus);
+		for_each_cpu(i, &search_cpus) {
 			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
 			sched_irqload(i), power_cost(i, task_load(env->p) +
 					cpu_cravg_sync(i, env->sync)), 0);
@@ -10944,11 +10947,7 @@
 	int i;
 	struct cpumask search_cpus;
 
-	cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
-	cpumask_andnot(&search_cpus, &search_cpus, cpu_isolated_mask);
-
-	if (env->ignore_prev_cpu)
-		cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+	cpumask_and(&search_cpus, &env->search_cpus, &c->cpus);
 
 	env->need_idle = wake_to_idle(env->p) || c->wake_up_idle;
 
@@ -10960,7 +10959,7 @@
 			power_cost(i, task_load(env->p) +
 					cpu_cravg_sync(i, env->sync)), 0);
 
-		if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
+		if (skip_cpu(i, env))
 			continue;
 
 		update_spare_capacity(stats, env, i, c->capacity,
@@ -11015,9 +11014,7 @@
 		return false;
 
 	prev_cpu = env->prev_cpu;
-	if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
-					unlikely(!cpu_active(prev_cpu)) ||
-					cpu_isolated(prev_cpu))
+	if (!cpumask_test_cpu(prev_cpu, &env->search_cpus))
 		return false;
 
 	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
@@ -11050,7 +11047,7 @@
 			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
 		update_spare_capacity(stats, env, prev_cpu,
 				cluster->capacity, env->cpu_load);
-		env->ignore_prev_cpu = 1;
+		cpumask_clear_cpu(prev_cpu, &env->search_cpus);
 		return false;
 	}
 
@@ -11066,23 +11063,17 @@
 }
 
 static inline bool
-bias_to_waker_cpu(struct task_struct *p, int cpu)
+bias_to_waker_cpu(struct cpu_select_env *env, int cpu)
 {
 	return sysctl_sched_prefer_sync_wakee_to_waker &&
 	       cpu_rq(cpu)->nr_running == 1 &&
-	       cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
-	       cpu_active(cpu) && !cpu_isolated(cpu);
+	       cpumask_test_cpu(cpu, &env->search_cpus);
 }
 
 static inline int
-cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
+cluster_allowed(struct cpu_select_env *env, struct sched_cluster *cluster)
 {
-	cpumask_t tmp_mask;
-
-	cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
-	cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
-
-	return !cpumask_empty(&tmp_mask);
+	return cpumask_intersects(&env->search_cpus, &cluster->cpus);
 }
 
 /* return cheapest cpu that can fit this task */
@@ -11103,7 +11094,6 @@
 		.need_waker_cluster	= 0,
 		.sync			= sync,
 		.prev_cpu		= target,
-		.ignore_prev_cpu	= 0,
 		.rtg			= NULL,
 		.sbc_best_flag		= 0,
 		.sbc_best_cluster_flag	= 0,
@@ -11116,6 +11106,9 @@
 	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
 	bitmap_zero(env.backup_list, NR_CPUS);
 
+	cpumask_and(&env.search_cpus, tsk_cpus_allowed(p), cpu_active_mask);
+	cpumask_andnot(&env.search_cpus, &env.search_cpus, cpu_isolated_mask);
+
 	init_cluster_cpu_stats(&stats);
 	special = env_has_special_flags(&env);
 
@@ -11125,19 +11118,19 @@
 
 	if (grp && grp->preferred_cluster) {
 		pref_cluster = grp->preferred_cluster;
-		if (!cluster_allowed(p, pref_cluster))
+		if (!cluster_allowed(&env, pref_cluster))
 			clear_bit(pref_cluster->id, env.candidate_list);
 		else
 			env.rtg = grp;
 	} else if (!special) {
 		cluster = cpu_rq(cpu)->cluster;
 		if (wake_to_waker_cluster(&env)) {
-			if (bias_to_waker_cpu(p, cpu)) {
+			if (bias_to_waker_cpu(&env, cpu)) {
 				target = cpu;
 				sbc_flag = SBC_FLAG_WAKER_CLUSTER |
 					   SBC_FLAG_WAKER_CPU;
 				goto out;
-			} else if (cluster_allowed(p, cluster)) {
+			} else if (cluster_allowed(&env, cluster)) {
 				env.need_waker_cluster = 1;
 				bitmap_zero(env.candidate_list, NR_CPUS);
 				__set_bit(cluster->id, env.candidate_list);
@@ -11387,8 +11380,15 @@
 	nice = task_nice(p);
 	rcu_read_lock();
 	grp = task_related_thread_group(p);
+	/*
+	 * Don't assume higher capacity means higher power. If the task
+	 * is running on the power efficient CPU, avoid migrating it
+	 * to a lower capacity cluster.
+	 */
 	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
-	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
+			upmigrate_discouraged(p)) &&
+			cpu_capacity(cpu) > min_capacity &&
+			cpu_max_power_cost(cpu) == max_power_cost) {
 		rcu_read_unlock();
 		return DOWN_MIGRATION;
 	}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 1de1fb1..c0adf4e 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -454,6 +454,12 @@
 	cluster1 = container_of(a, struct sched_cluster, list);
 	cluster2 = container_of(b, struct sched_cluster, list);
 
+	/*
+	 * Don't assume higher capacity means higher power. If the
+	 * power cost is same, sort the higher capacity cluster before
+	 * the lower capacity cluster to start placing the tasks
+	 * on the higher capacity cluster.
+	 */
 	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
 		(cluster1->max_power_cost == cluster2->max_power_cost &&
 		cluster1->max_possible_capacity <
@@ -711,7 +717,7 @@
 unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
 
 
-__read_mostly unsigned int sysctl_sched_new_task_windows = 5;
+#define SCHED_NEW_TASK_WINDOWS 5
 
 #define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
 
@@ -952,8 +958,8 @@
 unsigned int __read_mostly sysctl_sched_short_burst;
 unsigned int __read_mostly sysctl_sched_short_sleep = 1 * NSEC_PER_MSEC;
 
-static void
-_update_up_down_migrate(unsigned int *up_migrate, unsigned int *down_migrate)
+static void _update_up_down_migrate(unsigned int *up_migrate,
+			unsigned int *down_migrate, bool is_group)
 {
 	unsigned int delta;
 
@@ -967,7 +973,8 @@
 	*up_migrate >>= 10;
 	*up_migrate *= NSEC_PER_USEC;
 
-	*up_migrate = min(*up_migrate, sched_ravg_window);
+	if (!is_group)
+		*up_migrate = min(*up_migrate, sched_ravg_window);
 
 	*down_migrate /= NSEC_PER_USEC;
 	*down_migrate *= up_down_migrate_scale_factor;
@@ -982,14 +989,14 @@
 	unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
 	unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
 
-	_update_up_down_migrate(&up_migrate, &down_migrate);
+	_update_up_down_migrate(&up_migrate, &down_migrate, false);
 	sched_upmigrate = up_migrate;
 	sched_downmigrate = down_migrate;
 
 	up_migrate = pct_to_real(sysctl_sched_group_upmigrate_pct);
 	down_migrate = pct_to_real(sysctl_sched_group_downmigrate_pct);
 
-	_update_up_down_migrate(&up_migrate, &down_migrate);
+	_update_up_down_migrate(&up_migrate, &down_migrate, true);
 	sched_group_upmigrate = up_migrate;
 	sched_group_downmigrate = down_migrate;
 }
@@ -1842,7 +1849,7 @@
 
 static inline bool is_new_task(struct task_struct *p)
 {
-	return p->ravg.active_windows < sysctl_sched_new_task_windows;
+	return p->ravg.active_windows < SCHED_NEW_TASK_WINDOWS;
 }
 
 #define INC_STEP 8
@@ -2571,7 +2578,8 @@
 	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
 }
 
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
 {
 	/*
 	 * No need to bother updating task demand for exiting tasks
@@ -2590,6 +2598,17 @@
 			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
 		return 0;
 
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+	}
+
 	return 1;
 }
 
@@ -2730,7 +2749,7 @@
 	u64 runtime;
 
 	new_window = mark_start < window_start;
-	if (!account_busy_for_task_demand(p, event)) {
+	if (!account_busy_for_task_demand(rq, p, event)) {
 		if (new_window)
 			/*
 			 * If the time accounted isn't being accounted as
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 41622ca..e7f6794 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1031,7 +1031,7 @@
 
 	unsigned int group_weight;
 	struct sched_group_capacity *sgc;
-	const struct sched_group_energy const *sge;
+	const struct sched_group_energy *sge;
 
 	/*
 	 * The CPUs this group covers.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d2a397f..f55a02b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -451,13 +451,6 @@
 		.proc_handler   = proc_dointvec,
 	},
 	{
-		.procname       = "sched_new_task_windows",
-		.data           = &sysctl_sched_new_task_windows,
-		.maxlen         = sizeof(unsigned int),
-		.mode           = 0644,
-		.proc_handler   = sched_window_update_handler,
-	},
-	{
 		.procname	= "sched_pred_alert_freq",
 		.data		= &sysctl_sched_pred_alert_freq,
 		.maxlen		= sizeof(unsigned int),
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 39008d7..ad538fe 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1220,7 +1220,7 @@
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
 			   cputime_t *newval, cputime_t *oldval)
 {
-	unsigned long long now;
+	unsigned long long now = 0;
 
 	WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
 	cpu_timer_sample_group(clock_idx, tsk, &now);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4274797..ed7ba6d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -547,6 +547,19 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_SWITCH_PROFILER
+	bool "CPU frequency switch time profiler"
+	select GENERIC_TRACER
+	help
+	  This option enables the CPU frequency switch profiler. A file is
+	  created in debugfs called "cpu_freq_switch_profile_enabled", which
+	  defaults to zero. When a 1 is echoed into this file, profiling begins.
+	  When a zero is echoed, profiling stops. A "cpu_freq_switch" file is
+	  also created in the trace_stats directory; this file shows the
+	  switches that have occurred and duration statistics.
+
+	  If in doubt, say N.
+
 config FTRACE_MCOUNT_RECORD
 	def_bool y
 	depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 08e5e47..8ee9cc1 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -38,6 +38,7 @@
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
 obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
+obj-$(CONFIG_CPU_FREQ_SWITCH_PROFILER) += trace_cpu_freq_switch.o
 obj-$(CONFIG_NOP_TRACER) += trace_nop.o
 obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 62110a3..fa7fd14 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -588,8 +588,12 @@
 static void tsv_read_data(struct encode_context *ectxt,
 			  void *data, uint32_t size)
 {
-	if (WARN_ON((ectxt->offset + size) > MAX_MSG_SIZE))
+	if (WARN_ON((ectxt->offset + size) > MAX_MSG_SIZE)) {
+		memcpy(data, (ectxt->buff + ectxt->offset),
+			MAX_MSG_SIZE - ectxt->offset - 1);
+		ectxt->offset += MAX_MSG_SIZE - ectxt->offset - 1;
 		return;
+	}
 	memcpy(data, (ectxt->buff + ectxt->offset), size);
 	ectxt->offset += size;
 }
@@ -604,8 +608,12 @@
 static void tsv_read_header(struct encode_context *ectxt,
 			    struct tsv_header *hdr)
 {
-	if (WARN_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE))
+	if (WARN_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE)) {
+		memcpy(hdr, (ectxt->buff + ectxt->offset),
+			MAX_MSG_SIZE - ectxt->offset - 1);
+		ectxt->offset += MAX_MSG_SIZE - ectxt->offset - 1;
 		return;
+	}
 	memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr));
 	ectxt->offset += sizeof(*hdr);
 }
diff --git a/kernel/trace/trace_cpu_freq_switch.c b/kernel/trace/trace_cpu_freq_switch.c
new file mode 100644
index 0000000..0fcfde3
--- /dev/null
+++ b/kernel/trace/trace_cpu_freq_switch.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hrtimer.h>
+#include <linux/tracefs.h>
+#include <linux/ktime.h>
+#include <trace/events/power.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+struct trans {
+	struct rb_node node;
+	unsigned int cpu;
+	unsigned int start_freq;
+	unsigned int end_freq;
+	unsigned int min_us;
+	unsigned int max_us;
+	ktime_t total_t;
+	unsigned int count;
+};
+static struct rb_root freq_trans_tree = RB_ROOT;
+
+static struct trans *tr_search(struct rb_root *root, unsigned int cpu,
+			       unsigned int start_freq, unsigned int end_freq)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct trans *tr = container_of(node, struct trans, node);
+
+		if (cpu < tr->cpu)
+			node = node->rb_left;
+		else if (cpu > tr->cpu)
+			node = node->rb_right;
+		else if (start_freq < tr->start_freq)
+			node = node->rb_left;
+		else if (start_freq > tr->start_freq)
+			node = node->rb_right;
+		else if (end_freq < tr->end_freq)
+			node = node->rb_left;
+		else if (end_freq > tr->end_freq)
+			node = node->rb_right;
+		else
+			return tr;
+	}
+	return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct trans *tr)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	while (*new) {
+		struct trans *this = container_of(*new, struct trans, node);
+
+		parent = *new;
+		if (tr->cpu < this->cpu)
+			new = &((*new)->rb_left);
+		else if (tr->cpu > this->cpu)
+			new = &((*new)->rb_right);
+		else if (tr->start_freq < this->start_freq)
+			new = &((*new)->rb_left);
+		else if (tr->start_freq > this->start_freq)
+			new = &((*new)->rb_right);
+		else if (tr->end_freq < this->end_freq)
+			new = &((*new)->rb_left);
+		else if (tr->end_freq > this->end_freq)
+			new = &((*new)->rb_right);
+		else
+			return -EINVAL;
+	}
+
+	rb_link_node(&tr->node, parent, new);
+	rb_insert_color(&tr->node, root);
+
+	return 0;
+}
+
+struct trans_state {
+	spinlock_t lock;
+	unsigned int start_freq;
+	unsigned int end_freq;
+	ktime_t start_t;
+	bool started;
+};
+static DEFINE_PER_CPU(struct trans_state, freq_trans_state);
+
+static DEFINE_SPINLOCK(state_lock);
+
+static void probe_start(void *ignore, unsigned int start_freq,
+			unsigned int end_freq, unsigned int cpu)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	per_cpu(freq_trans_state, cpu).start_freq = start_freq;
+	per_cpu(freq_trans_state, cpu).end_freq = end_freq;
+	per_cpu(freq_trans_state, cpu).start_t = ktime_get();
+	per_cpu(freq_trans_state, cpu).started = true;
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void probe_end(void *ignore, unsigned int cpu)
+{
+	unsigned long flags;
+	struct trans *tr;
+	s64 dur_us;
+	ktime_t dur_t, end_t = ktime_get();
+
+	spin_lock_irqsave(&state_lock, flags);
+
+	if (!per_cpu(freq_trans_state, cpu).started)
+		goto out;
+
+	dur_t = ktime_sub(end_t, per_cpu(freq_trans_state, cpu).start_t);
+	dur_us = ktime_to_us(dur_t);
+
+	tr = tr_search(&freq_trans_tree, cpu,
+		       per_cpu(freq_trans_state, cpu).start_freq,
+		       per_cpu(freq_trans_state, cpu).end_freq);
+	if (!tr) {
+		tr = kzalloc(sizeof(*tr), GFP_ATOMIC);
+		if (!tr) {
+			WARN_ONCE(1, "CPU frequency trace is now invalid!\n");
+			goto out;
+		}
+
+		tr->start_freq = per_cpu(freq_trans_state, cpu).start_freq;
+		tr->end_freq = per_cpu(freq_trans_state, cpu).end_freq;
+		tr->cpu = cpu;
+		tr->min_us = UINT_MAX;
+		tr_insert(&freq_trans_tree, tr);
+	}
+	tr->total_t = ktime_add(tr->total_t, dur_t);
+	tr->count++;
+
+	if (dur_us > tr->max_us)
+		tr->max_us = dur_us;
+	if (dur_us < tr->min_us)
+		tr->min_us = dur_us;
+
+	per_cpu(freq_trans_state, cpu).started = false;
+out:
+	spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void *freq_switch_stat_start(struct tracer_stat *trace)
+{
+	struct rb_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	n = rb_first(&freq_trans_tree);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return n;
+}
+
+static void *freq_switch_stat_next(void *prev, int idx)
+{
+	struct rb_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	n = rb_next(prev);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return n;
+}
+
+static int freq_switch_stat_show(struct seq_file *s, void *p)
+{
+	unsigned long flags;
+	struct trans *tr = p;
+
+	spin_lock_irqsave(&state_lock, flags);
+	seq_printf(s, "%3d %9d %8d %5d %6lld %6d %6d\n", tr->cpu,
+		   tr->start_freq, tr->end_freq, tr->count,
+		   div_s64(ktime_to_us(tr->total_t), tr->count),
+		   tr->min_us, tr->max_us);
+	spin_unlock_irqrestore(&state_lock, flags);
+
+	return 0;
+}
+
+static void freq_switch_stat_release(void *stat)
+{
+	struct trans *tr = stat;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state_lock, flags);
+	rb_erase(&tr->node, &freq_trans_tree);
+	spin_unlock_irqrestore(&state_lock, flags);
+	kfree(tr);
+}
+
+static int freq_switch_stat_headers(struct seq_file *s)
+{
+	seq_puts(s, "CPU START_KHZ  END_KHZ COUNT AVG_US MIN_US MAX_US\n");
+	seq_puts(s, "  |         |        |     |      |      |      |\n");
+	return 0;
+}
+
+struct tracer_stat freq_switch_stats __read_mostly = {
+	.name = "cpu_freq_switch",
+	.stat_start = freq_switch_stat_start,
+	.stat_next = freq_switch_stat_next,
+	.stat_show = freq_switch_stat_show,
+	.stat_release = freq_switch_stat_release,
+	.stat_headers = freq_switch_stat_headers
+};
+
+static void trace_freq_switch_disable(void)
+{
+	unregister_stat_tracer(&freq_switch_stats);
+	unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+	unregister_trace_cpu_frequency_switch_start(probe_start, NULL);
+	pr_info("disabled cpu frequency switch time profiling\n");
+}
+
+static int trace_freq_switch_enable(void)
+{
+	int ret;
+
+	ret = register_trace_cpu_frequency_switch_start(probe_start, NULL);
+	if (ret)
+		goto out;
+
+	ret = register_trace_cpu_frequency_switch_end(probe_end, NULL);
+	if (ret)
+		goto err_register_switch_end;
+
+	ret = register_stat_tracer(&freq_switch_stats);
+	if (ret)
+		goto err_register_stat_tracer;
+
+	pr_info("enabled cpu frequency switch time profiling\n");
+	return 0;
+
+err_register_stat_tracer:
+	unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+err_register_switch_end:
+	register_trace_cpu_frequency_switch_start(probe_start, NULL);
+out:
+	pr_err("failed to enable cpu frequency switch time profiling\n");
+
+	return ret;
+}
+
+static DEFINE_MUTEX(debugfs_lock);
+static bool trace_freq_switch_enabled;
+
+static int debug_toggle_tracing(void *data, u64 val)
+{
+	int ret = 0;
+
+	mutex_lock(&debugfs_lock);
+
+	if (val == 1 && !trace_freq_switch_enabled)
+		ret = trace_freq_switch_enable();
+	else if (val == 0 && trace_freq_switch_enabled)
+		trace_freq_switch_disable();
+	else if (val > 1)
+		ret = -EINVAL;
+
+	if (!ret)
+		trace_freq_switch_enabled = val;
+
+	mutex_unlock(&debugfs_lock);
+
+	return ret;
+}
+
+static int debug_tracing_state_get(void *data, u64 *val)
+{
+	mutex_lock(&debugfs_lock);
+	*val = trace_freq_switch_enabled;
+	mutex_unlock(&debugfs_lock);
+
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debug_tracing_state_fops, debug_tracing_state_get,
+			debug_toggle_tracing, "%llu\n");
+
+static int __init trace_freq_switch_init(void)
+{
+	struct dentry *d_tracer = tracing_init_dentry();
+
+	if (IS_ERR(d_tracer))
+		return 0;
+
+	tracefs_create_file("cpu_freq_switch_profile_enabled",
+		0644, d_tracer, NULL, &debug_tracing_state_fops);
+
+	return 0;
+}
+late_initcall(trace_freq_switch_init);
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 4bbd38e..f4ac185 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -139,7 +139,7 @@
 
 		new->ns = ns;
 		new->uid = uid;
-		atomic_set(&new->count, 0);
+		new->count = 0;
 
 		spin_lock_irq(&ucounts_lock);
 		ucounts = find_ucounts(ns, uid, hashent);
@@ -150,8 +150,10 @@
 			ucounts = new;
 		}
 	}
-	if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
+	if (ucounts->count == INT_MAX)
 		ucounts = NULL;
+	else
+		ucounts->count += 1;
 	spin_unlock_irq(&ucounts_lock);
 	return ucounts;
 }
@@ -160,13 +162,15 @@
 {
 	unsigned long flags;
 
-	if (atomic_dec_and_test(&ucounts->count)) {
-		spin_lock_irqsave(&ucounts_lock, flags);
+	spin_lock_irqsave(&ucounts_lock, flags);
+	ucounts->count -= 1;
+	if (!ucounts->count)
 		hlist_del_init(&ucounts->node);
-		spin_unlock_irqrestore(&ucounts_lock, flags);
+	else
+		ucounts = NULL;
+	spin_unlock_irqrestore(&ucounts_lock, flags);
 
-		kfree(ucounts);
-	}
+	kfree(ucounts);
 }
 
 static inline bool atomic_inc_below(atomic_t *v, int u)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index af5e988..fa9c7cd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1006,6 +1006,15 @@
 	  (it defaults to deactivated on bootup and will only be activated
 	  if some application like powertop activates it explicitly).
 
+config DEBUG_TASK_STACK_SCAN_OFF
+	bool "Disable kmemleak task stack scan by default"
+	depends on DEBUG_KMEMLEAK
+	help
+	  Say Y here to disable kmemleak task stack scan by default
+	  at compile time. It can be enabled later if required by
+	  writing to the debugfs entry :
+	  echo "stack=on" > /sys/kernel/debug/kmemleak.
+
 config DEBUG_PREEMPT
 	bool "Debug preemptible kernel"
 	depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
diff --git a/mm/Kconfig b/mm/Kconfig
index 86e3e0e..0183305 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -656,6 +656,15 @@
 
 	  A sane initial value is 80 MB.
 
+config BALANCE_ANON_FILE_RECLAIM
+	bool "During reclaim treat anon and file backed pages equally"
+	depends on SWAP
+	help
+	  When performing memory reclaim treat anonymous and file backed pages
+	  equally.
+	  Swapping anonymous pages out to memory can be efficient enough to justify
+	  treating anonymous and file backed pages equally.
+
 # For architectures that support deferred memory initialisation
 config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
 	bool
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index d1380ed..9a20a55 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -224,8 +224,20 @@
 static unsigned long jiffies_last_scan;
 /* delay between automatic memory scannings */
 static signed long jiffies_scan_wait;
-/* enables or disables the task stacks scanning */
+
+/*
+ * Enables or disables the task stacks scanning.
+ * Set to 1 if at compile time we want it enabled.
+ * Else set to 0 to have it disabled by default.
+ * This can be enabled by writing to "stack=on" using
+ * kmemleak debugfs entry.
+ */
+#ifdef CONFIG_DEBUG_TASK_STACK_SCAN_OFF
+static int kmemleak_stack_scan;
+#else
 static int kmemleak_stack_scan = 1;
+#endif
+
 /* protects the memory scanning, parameters and debug/kmemleak file access */
 static DEFINE_MUTEX(scan_mutex);
 /* setting kmemleak=on, will set this var, skipping the disable */
diff --git a/mm/ksm.c b/mm/ksm.c
index 9ae6011..56e92dc 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -37,6 +37,7 @@
 #include <linux/freezer.h>
 #include <linux/oom.h>
 #include <linux/numa.h>
+#include <linux/show_mem_notifier.h>
 
 #include <asm/tlbflush.h>
 #include "internal.h"
@@ -223,6 +224,9 @@
 /* Milliseconds ksmd should sleep between batches */
 static unsigned int ksm_thread_sleep_millisecs = 20;
 
+/* Boolean to indicate whether to use deferred timer or not */
+static bool use_deferred_timer;
+
 #ifdef CONFIG_NUMA
 /* Zeroed when merging across nodes is not allowed */
 static unsigned int ksm_merge_across_nodes = 1;
@@ -236,7 +240,7 @@
 #define KSM_RUN_MERGE	1
 #define KSM_RUN_UNMERGE	2
 #define KSM_RUN_OFFLINE	4
-static unsigned long ksm_run = KSM_RUN_STOP;
+static unsigned long ksm_run = KSM_RUN_MERGE;
 static void wait_while_offlining(void);
 
 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
@@ -247,6 +251,20 @@
 		sizeof(struct __struct), __alignof__(struct __struct),\
 		(__flags), NULL)
 
+static int ksm_show_mem_notifier(struct notifier_block *nb,
+				unsigned long action,
+				void *data)
+{
+	pr_info("ksm_pages_sharing: %lu\n", ksm_pages_sharing);
+	pr_info("ksm_pages_shared: %lu\n", ksm_pages_shared);
+
+	return 0;
+}
+
+static struct notifier_block ksm_show_mem_notifier_block = {
+	.notifier_call = ksm_show_mem_notifier,
+};
+
 static int __init ksm_slab_init(void)
 {
 	rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
@@ -1705,6 +1723,41 @@
 	}
 }
 
+static void process_timeout(unsigned long __data)
+{
+	wake_up_process((struct task_struct *)__data);
+}
+
+static signed long __sched deferred_schedule_timeout(signed long timeout)
+{
+	struct timer_list timer;
+	unsigned long expire;
+
+	__set_current_state(TASK_INTERRUPTIBLE);
+	if (timeout < 0) {
+		pr_err("schedule_timeout: wrong timeout value %lx\n",
+							timeout);
+		__set_current_state(TASK_RUNNING);
+		goto out;
+	}
+
+	expire = timeout + jiffies;
+
+	setup_deferrable_timer_on_stack(&timer, process_timeout,
+			(unsigned long)current);
+	mod_timer(&timer, expire);
+	schedule();
+	del_singleshot_timer_sync(&timer);
+
+	/* Remove the timer from the object tracker */
+	destroy_timer_on_stack(&timer);
+
+	timeout = expire - jiffies;
+
+out:
+	return timeout < 0 ? 0 : timeout;
+}
+
 static int ksmd_should_run(void)
 {
 	return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
@@ -1725,7 +1778,11 @@
 		try_to_freeze();
 
 		if (ksmd_should_run()) {
-			schedule_timeout_interruptible(
+			if (use_deferred_timer)
+				deferred_schedule_timeout(
+				msecs_to_jiffies(ksm_thread_sleep_millisecs));
+			else
+				schedule_timeout_interruptible(
 				msecs_to_jiffies(ksm_thread_sleep_millisecs));
 		} else {
 			wait_event_freezable(ksm_thread_wait,
@@ -2175,6 +2232,26 @@
 }
 KSM_ATTR(run);
 
+static ssize_t deferred_timer_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, 8, "%d\n", use_deferred_timer);
+}
+
+static ssize_t deferred_timer_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buf, size_t count)
+{
+	unsigned long enable;
+	int err;
+
+	err = kstrtoul(buf, 10, &enable);
+	use_deferred_timer = enable;
+
+	return count;
+}
+KSM_ATTR(deferred_timer);
+
 #ifdef CONFIG_NUMA
 static ssize_t merge_across_nodes_show(struct kobject *kobj,
 				struct kobj_attribute *attr, char *buf)
@@ -2287,6 +2364,7 @@
 	&pages_unshared_attr.attr,
 	&pages_volatile_attr.attr,
 	&full_scans_attr.attr,
+	&deferred_timer_attr.attr,
 #ifdef CONFIG_NUMA
 	&merge_across_nodes_attr.attr,
 #endif
@@ -2331,6 +2409,8 @@
 	/* There is no significance to this priority 100 */
 	hotplug_memory_notifier(ksm_memory_callback, 100);
 #endif
+
+	show_mem_notifier_register(&ksm_show_mem_notifier_block);
 	return 0;
 
 out_free:
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b47fda0..8e82002 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1742,9 +1742,9 @@
 	set_page_refcounted(page);
 
 	arch_alloc_page(page, order);
+	kasan_alloc_pages(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 	kernel_poison_pages(page, 1 << order, 1);
-	kasan_alloc_pages(page, order);
 	set_page_owner(page, order, gfp_flags);
 }
 
diff --git a/mm/readahead.c b/mm/readahead.c
index c8a955b..7dc48ba 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -233,6 +233,8 @@
 
 /*
  * Set the initial window size, round to next power of 2 and square
+ * Small size is not dependent on max value - only a one-page read is regarded
+ * as small.
  * for small size, x 4 for medium, and x 2 for large
  * for 128k (32 page) max ra
  * 1-8 page = 32k initial, > 8 page = 128k initial
@@ -241,7 +243,7 @@
 {
 	unsigned long newsize = roundup_pow_of_two(size);
 
-	if (newsize <= max / 32)
+	if (newsize <= 1)
 		newsize = newsize * 4;
 	else if (newsize <= max / 4)
 		newsize = newsize * 2;
diff --git a/mm/slab.c b/mm/slab.c
index bd878f0..1f82d16 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2332,7 +2332,7 @@
 	return nr_freed;
 }
 
-int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *cachep)
 {
 	int ret = 0;
 	int node;
@@ -2352,7 +2352,7 @@
 
 int __kmem_cache_shutdown(struct kmem_cache *cachep)
 {
-	return __kmem_cache_shrink(cachep, false);
+	return __kmem_cache_shrink(cachep);
 }
 
 void __kmem_cache_release(struct kmem_cache *cachep)
diff --git a/mm/slab.h b/mm/slab.h
index bc05fdc..ceb7d70 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -146,7 +146,7 @@
 
 int __kmem_cache_shutdown(struct kmem_cache *);
 void __kmem_cache_release(struct kmem_cache *);
-int __kmem_cache_shrink(struct kmem_cache *, bool);
+int __kmem_cache_shrink(struct kmem_cache *);
 void slab_kmem_cache_release(struct kmem_cache *);
 
 struct seq_file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 329b038..5d2f24f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -573,6 +573,29 @@
 	get_online_cpus();
 	get_online_mems();
 
+#ifdef CONFIG_SLUB
+	/*
+	 * In case of SLUB, we need to disable empty slab caching to
+	 * avoid pinning the offline memory cgroup by freeable kmem
+	 * pages charged to it. SLAB doesn't need this, as it
+	 * periodically purges unused slabs.
+	 */
+	mutex_lock(&slab_mutex);
+	list_for_each_entry(s, &slab_caches, list) {
+		c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL;
+		if (c) {
+			c->cpu_partial = 0;
+			c->min_partial = 0;
+		}
+	}
+	mutex_unlock(&slab_mutex);
+	/*
+	 * kmem_cache->cpu_partial is checked locklessly (see
+	 * put_cpu_partial()). Make sure the change is visible.
+	 */
+	synchronize_sched();
+#endif
+
 	mutex_lock(&slab_mutex);
 	list_for_each_entry(s, &slab_caches, list) {
 		if (!is_root_cache(s))
@@ -584,7 +607,7 @@
 		if (!c)
 			continue;
 
-		__kmem_cache_shrink(c, true);
+		__kmem_cache_shrink(c);
 		arr->entries[idx] = NULL;
 	}
 	mutex_unlock(&slab_mutex);
@@ -755,7 +778,7 @@
 	get_online_cpus();
 	get_online_mems();
 	kasan_cache_shrink(cachep);
-	ret = __kmem_cache_shrink(cachep, false);
+	ret = __kmem_cache_shrink(cachep);
 	put_online_mems();
 	put_online_cpus();
 	return ret;
diff --git a/mm/slob.c b/mm/slob.c
index 5ec1580..eac04d4 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -634,7 +634,7 @@
 {
 }
 
-int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *d)
 {
 	return 0;
 }
diff --git a/mm/slub.c b/mm/slub.c
index 7aa0e97..2b01429 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -663,11 +663,21 @@
 	dump_stack();
 }
 
+#ifdef CONFIG_SLUB_DEBUG_PANIC_ON
+static void slab_panic(const char *cause)
+{
+	panic("%s\n", cause);
+}
+#else
+static inline void slab_panic(const char *cause) {}
+#endif
+
 void object_err(struct kmem_cache *s, struct page *page,
 			u8 *object, char *reason)
 {
 	slab_bug(s, "%s", reason);
 	print_trailer(s, page, object);
+	slab_panic(reason);
 }
 
 static void slab_err(struct kmem_cache *s, struct page *page,
@@ -682,6 +692,7 @@
 	slab_bug(s, "%s", buf);
 	print_page_info(page);
 	dump_stack();
+	slab_panic("slab error");
 }
 
 static void init_object(struct kmem_cache *s, void *object, u8 val)
@@ -703,6 +714,7 @@
 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
 						void *from, void *to)
 {
+	slab_panic("object poison overwritten");
 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
 	memset(from, data, to - from);
 }
@@ -3887,7 +3899,7 @@
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *s)
 {
 	int node;
 	int i;
@@ -3899,21 +3911,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	if (deactivate) {
-		/*
-		 * Disable empty slabs caching. Used to avoid pinning offline
-		 * memory cgroups by kmem pages that can be freed.
-		 */
-		s->cpu_partial = 0;
-		s->min_partial = 0;
-
-		/*
-		 * s->cpu_partial is checked locklessly (see put_cpu_partial),
-		 * so we have to make sure the change is visible.
-		 */
-		synchronize_sched();
-	}
-
 	flush_all(s);
 	for_each_kmem_cache_node(s, node, n) {
 		INIT_LIST_HEAD(&discard);
@@ -3970,7 +3967,7 @@
 
 	mutex_lock(&slab_mutex);
 	list_for_each_entry(s, &slab_caches, list)
-		__kmem_cache_shrink(s, false);
+		__kmem_cache_shrink(s);
 	mutex_unlock(&slab_mutex);
 
 	return 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 30a88b9..9d3f6d3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -319,6 +319,10 @@
 	long batch_size = shrinker->batch ? shrinker->batch
 					  : SHRINK_BATCH;
 	long scanned = 0, next_deferred;
+	long min_cache_size = batch_size;
+
+	if (current_is_kswapd())
+		min_cache_size = 0;
 
 	freeable = shrinker->count_objects(shrinker, shrinkctl);
 	if (freeable == 0)
@@ -386,7 +390,7 @@
 	 * scanning at high prio and therefore should try to reclaim as much as
 	 * possible.
 	 */
-	while (total_scan >= batch_size ||
+	while (total_scan > min_cache_size ||
 	       total_scan >= freeable) {
 		unsigned long ret;
 		unsigned long nr_to_scan = min(batch_size, total_scan);
@@ -2204,7 +2208,8 @@
 	 * lruvec even if it has plenty of old anonymous pages unless the
 	 * system is under heavy pressure.
 	 */
-	if (!inactive_list_is_low(lruvec, true, sc) &&
+	if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
+	    !inactive_list_is_low(lruvec, true, sc) &&
 	    lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
 		scan_balance = SCAN_FILE;
 		goto out;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7cb41ae..8498e35 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -186,8 +186,9 @@
 		/* Do not flood unicast traffic to ports that turn it off */
 		if (pkt_type == BR_PKT_UNICAST && !(p->flags & BR_FLOOD))
 			continue;
+		/* Do not flood if mc off, except for traffic we originate */
 		if (pkt_type == BR_PKT_MULTICAST &&
-		    !(p->flags & BR_MCAST_FLOOD))
+		    !(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
 			continue;
 
 		/* Do not flood to ports that enable proxy ARP */
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 855b72f..267b46a 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -29,6 +29,7 @@
 static int
 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+	br_drop_fake_rtable(skb);
 	return netif_receive_skb(skb);
 }
 
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 7fbdbae..aa1df1a 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -521,21 +521,6 @@
 }
 
 
-/* PF_BRIDGE/LOCAL_IN ************************************************/
-/* The packet is locally destined, which requires a real
- * dst_entry, so detach the fake one.  On the way up, the
- * packet would pass through PRE_ROUTING again (which already
- * took place when the packet entered the bridge), but we
- * register an IPv4 PRE_ROUTING 'sabotage' hook that will
- * prevent this from happening. */
-static unsigned int br_nf_local_in(void *priv,
-				   struct sk_buff *skb,
-				   const struct nf_hook_state *state)
-{
-	br_drop_fake_rtable(skb);
-	return NF_ACCEPT;
-}
-
 /* PF_BRIDGE/FORWARD *************************************************/
 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -906,12 +891,6 @@
 		.priority = NF_BR_PRI_BRNF,
 	},
 	{
-		.hook = br_nf_local_in,
-		.pf = NFPROTO_BRIDGE,
-		.hooknum = NF_BR_LOCAL_IN,
-		.priority = NF_BR_PRI_BRNF,
-	},
-	{
 		.hook = br_nf_forward_ip,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_FORWARD,
diff --git a/net/core/dev.c b/net/core/dev.c
index 555ed4b..dff8012 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1697,27 +1697,54 @@
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
 static atomic_t netstamp_needed_deferred;
+static atomic_t netstamp_wanted;
 static void netstamp_clear(struct work_struct *work)
 {
 	int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
+	int wanted;
 
-	while (deferred--)
-		static_key_slow_dec(&netstamp_needed);
+	wanted = atomic_add_return(deferred, &netstamp_wanted);
+	if (wanted > 0)
+		static_key_enable(&netstamp_needed);
+	else
+		static_key_disable(&netstamp_needed);
 }
 static DECLARE_WORK(netstamp_work, netstamp_clear);
 #endif
 
 void net_enable_timestamp(void)
 {
+#ifdef HAVE_JUMP_LABEL
+	int wanted;
+
+	while (1) {
+		wanted = atomic_read(&netstamp_wanted);
+		if (wanted <= 0)
+			break;
+		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
+			return;
+	}
+	atomic_inc(&netstamp_needed_deferred);
+	schedule_work(&netstamp_work);
+#else
 	static_key_slow_inc(&netstamp_needed);
+#endif
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
 void net_disable_timestamp(void)
 {
 #ifdef HAVE_JUMP_LABEL
-	/* net_disable_timestamp() can be called from non process context */
-	atomic_inc(&netstamp_needed_deferred);
+	int wanted;
+
+	while (1) {
+		wanted = atomic_read(&netstamp_wanted);
+		if (wanted <= 1)
+			break;
+		if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
+			return;
+	}
+	atomic_dec(&netstamp_needed_deferred);
 	schedule_work(&netstamp_work);
 #else
 	static_key_slow_dec(&netstamp_needed);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1e3e008..f0f462c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3814,13 +3814,14 @@
 	if (!skb_may_tx_timestamp(sk, false))
 		return;
 
-	/* take a reference to prevent skb_orphan() from freeing the socket */
-	sock_hold(sk);
-
-	*skb_hwtstamps(skb) = *hwtstamps;
-	__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
-
-	sock_put(sk);
+	/* Take a reference to prevent skb_orphan() from freeing the socket,
+	 * but only if the socket refcount is not zero.
+	 */
+	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+		*skb_hwtstamps(skb) = *hwtstamps;
+		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+		sock_put(sk);
+	}
 }
 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
 
@@ -3871,7 +3872,7 @@
 {
 	struct sock *sk = skb->sk;
 	struct sock_exterr_skb *serr;
-	int err;
+	int err = 1;
 
 	skb->wifi_acked_valid = 1;
 	skb->wifi_acked = acked;
@@ -3881,14 +3882,15 @@
 	serr->ee.ee_errno = ENOMSG;
 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
 
-	/* take a reference to prevent skb_orphan() from freeing the socket */
-	sock_hold(sk);
-
-	err = sock_queue_err_skb(sk, skb);
+	/* Take a reference to prevent skb_orphan() from freeing the socket,
+	 * but only if the socket refcount is not zero.
+	 */
+	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
+		err = sock_queue_err_skb(sk, skb);
+		sock_put(sk);
+	}
 	if (err)
 		kfree_skb(skb);
-
-	sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
 
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index f053198..5e3a730 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -749,6 +749,7 @@
 	for (i = 0; i < hc->tx_seqbufc; i++)
 		kfree(hc->tx_seqbuf[i]);
 	hc->tx_seqbufc = 0;
+	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8fedc2d..4a05d78 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -577,6 +577,7 @@
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
 	const int old_state = sk->sk_state;
+	bool acceptable;
 	int queued = 0;
 
 	/*
@@ -603,8 +604,13 @@
 	 */
 	if (sk->sk_state == DCCP_LISTEN) {
 		if (dh->dccph_type == DCCP_PKT_REQUEST) {
-			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
-								    skb) < 0)
+			/* It is possible that we process SYN packets from backlog,
+			 * so we need to make sure to disable BH right there.
+			 */
+			local_bh_disable();
+			acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
+			local_bh_enable();
+			if (!acceptable)
 				return 1;
 			consume_skb(skb);
 			return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index edbe59d..86b0933 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -289,7 +289,8 @@
 
 	switch (type) {
 	case ICMP_REDIRECT:
-		dccp_do_redirect(skb, sk);
+		if (!sock_owned_by_user(sk))
+			dccp_do_redirect(skb, sk);
 		goto out;
 	case ICMP_SOURCE_QUENCH:
 		/* Just silently ignore these. */
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 7506c03..237d62c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -122,10 +122,12 @@
 	np = inet6_sk(sk);
 
 	if (type == NDISC_REDIRECT) {
-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+		if (!sock_owned_by_user(sk)) {
+			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
-		if (dst)
-			dst->ops->redirect(dst, sk, skb);
+			if (dst)
+				dst->ops->redirect(dst, sk, skb);
+		}
 		goto out;
 	}
 
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 53eddf9..39e7e2b 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -122,6 +122,7 @@
 			/* It is still raw copy of parent, so invalidate
 			 * destructor and make plain sk_free() */
 			newsk->sk_destruct = NULL;
+			bh_unlock_sock(newsk);
 			sk_free(newsk);
 			return NULL;
 		}
@@ -145,6 +146,13 @@
 	struct dccp_request_sock *dreq = dccp_rsk(req);
 	bool own_req;
 
+	/* TCP/DCCP listeners became lockless.
+	 * DCCP stores complex state in its request_sock, so we need
+	 * a protection for them, now this code runs without being protected
+	 * by the parent (listener) lock.
+	 */
+	spin_lock_bh(&dreq->dreq_lock);
+
 	/* Check for retransmitted REQUEST */
 	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
 
@@ -159,7 +167,7 @@
 			inet_rtx_syn_ack(sk, req);
 		}
 		/* Network Duplicate, discard packet */
-		return NULL;
+		goto out;
 	}
 
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
@@ -185,20 +193,20 @@
 
 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
 							 req, &own_req);
-	if (!child)
-		goto listen_overflow;
+	if (child) {
+		child = inet_csk_complete_hashdance(sk, child, req, own_req);
+		goto out;
+	}
 
-	return inet_csk_complete_hashdance(sk, child, req, own_req);
-
-listen_overflow:
-	dccp_pr_debug("listen_overflow!\n");
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
 drop:
 	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
 		req->rsk_ops->send_reset(sk, skb);
 
 	inet_csk_reqsk_queue_drop(sk, req);
-	return NULL;
+out:
+	spin_unlock_bh(&dreq->dreq_lock);
+	return child;
 }
 
 EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -249,6 +257,7 @@
 {
 	struct dccp_request_sock *dreq = dccp_rsk(req);
 
+	spin_lock_init(&dreq->dreq_lock);
 	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
 	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
 	inet_rsk(req)->acked	   = 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c836bfe..8bc6c4e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1475,8 +1475,10 @@
 	int proto = iph->protocol;
 	int err = -ENOSYS;
 
-	if (skb->encapsulation)
+	if (skb->encapsulation) {
+		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
 		skb_set_inner_network_header(skb, nhoff);
+	}
 
 	csum_replace2(&iph->check, iph->tot_len, newlen);
 	iph->tot_len = newlen;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 5ba912d..873df83 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1973,6 +1973,7 @@
 {
 	int res;
 
+	tos &= IPTOS_RT_MASK;
 	rcu_read_lock();
 
 	/* Multicast recognition logic is moved from route cache to here.
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7fb6704..723059a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5917,9 +5917,15 @@
 		if (th->syn) {
 			if (th->fin)
 				goto discard;
-			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
-				return 1;
+			/* It is possible that we process SYN packets from backlog,
+			 * so we need to make sure to disable BH right there.
+			 */
+			local_bh_disable();
+			acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
+			local_bh_enable();
 
+			if (!acceptable)
+				return 1;
 			consume_skb(skb);
 			return 0;
 		}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index eb5a0e1..eca1433 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -269,10 +269,13 @@
  */
 void tcp_v4_mtu_reduced(struct sock *sk)
 {
-	struct dst_entry *dst;
 	struct inet_sock *inet = inet_sk(sk);
-	u32 mtu = tcp_sk(sk)->mtu_info;
+	struct dst_entry *dst;
+	u32 mtu;
 
+	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+		return;
+	mtu = tcp_sk(sk)->mtu_info;
 	dst = inet_csk_update_pmtu(sk, mtu);
 	if (!dst)
 		return;
@@ -418,7 +421,8 @@
 
 	switch (type) {
 	case ICMP_REDIRECT:
-		do_redirect(icmp_skb, sk);
+		if (!sock_owned_by_user(sk))
+			do_redirect(icmp_skb, sk);
 		goto out;
 	case ICMP_SOURCE_QUENCH:
 		/* Just silently ignore these. */
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 3ea1cf8..b1e65b3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -249,7 +249,8 @@
 
 	sk_mem_reclaim_partial(sk);
 
-	if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
 		goto out;
 
 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -552,7 +553,8 @@
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	int event;
 
-	if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
+	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+	    !icsk->icsk_pending)
 		goto out;
 
 	if (time_after(icsk->icsk_timeout, jiffies)) {
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ef54852..8c88a37 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -908,6 +908,8 @@
 			ins = &rt->dst.rt6_next;
 			iter = *ins;
 			while (iter) {
+				if (iter->rt6i_metric > rt->rt6i_metric)
+					break;
 				if (rt6_qualify_for_ecmp(iter)) {
 					*ins = iter->dst.rt6_next;
 					fib6_purge_rt(iter, fn, info->nl_net);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index fc7b401..33b04ec 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -294,8 +294,10 @@
 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
 	int err = -ENOSYS;
 
-	if (skb->encapsulation)
+	if (skb->encapsulation) {
+		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
 		skb_set_inner_network_header(skb, nhoff);
+	}
 
 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9a87bfb..e27b8fd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -757,13 +757,14 @@
 	 *	Fragment the datagram.
 	 */
 
-	*prevhdr = NEXTHDR_FRAGMENT;
 	troom = rt->dst.dev->needed_tailroom;
 
 	/*
 	 *	Keep copying data until we run out.
 	 */
 	while (left > 0)	{
+		u8 *fragnexthdr_offset;
+
 		len = left;
 		/* IF: it doesn't fit, use 'mtu' - the data space left */
 		if (len > mtu)
@@ -808,6 +809,10 @@
 		 */
 		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 
+		fragnexthdr_offset = skb_network_header(frag);
+		fragnexthdr_offset += prevhdr - skb_network_header(skb);
+		*fragnexthdr_offset = NEXTHDR_FRAGMENT;
+
 		/*
 		 *	Build fragment header.
 		 */
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 3bce120..bbeedff 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -692,6 +692,10 @@
 	u->link = p->link;
 	u->i_key = p->i_key;
 	u->o_key = p->o_key;
+	if (u->i_key)
+		u->i_flags |= GRE_KEY;
+	if (u->o_key)
+		u->o_flags |= GRE_KEY;
 	u->proto = p->proto;
 
 	memcpy(u->name, p->name, sizeof(u->name));
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 9948b5c..986d4ca 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -589,6 +589,7 @@
 	hdr = ipv6_hdr(skb);
 	fhdr = (struct frag_hdr *)skb_transport_header(skb);
 
+	skb_orphan(skb);
 	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
 		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
 	if (fq == NULL) {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 37c4b38..1c3bc0a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -376,10 +376,12 @@
 	np = inet6_sk(sk);
 
 	if (type == NDISC_REDIRECT) {
-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+		if (!sock_owned_by_user(sk)) {
+			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 
-		if (dst)
-			dst->ops->redirect(dst, sk, skb);
+			if (dst)
+				dst->ops->redirect(dst, sk, skb);
+		}
 		goto out;
 	}
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index c0f0750..ff750bb 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -388,7 +388,7 @@
 drop:
 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
 	kfree_skb(skb);
-	return -1;
+	return 0;
 }
 
 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 5b77377..1309e2c 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -956,7 +956,8 @@
 				/* fall through */
 			case NETDEV_CHANGE:
 				nh->nh_flags |= RTNH_F_LINKDOWN;
-				ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
+				if (event != NETDEV_UNREGISTER)
+					ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1;
 				break;
 			}
 			if (event == NETDEV_UNREGISTER)
@@ -1696,6 +1697,7 @@
 	for (index = 0; index < platform_labels; index++) {
 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
 		RCU_INIT_POINTER(platform_label[index], NULL);
+		mpls_notify_route(net, index, rt, NULL, NULL);
 		mpls_rt_free(rt);
 	}
 	rtnl_unlock();
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index eab210b..48386bf 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -367,7 +367,6 @@
 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
 		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
 
-		skb_orphan(skb);
 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
 		err = nf_ct_frag6_gather(net, skb, user);
 		if (err) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 34de326..f2b04a7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3140,7 +3140,7 @@
 			    int addr_len)
 {
 	struct sock *sk = sock->sk;
-	char name[15];
+	char name[sizeof(uaddr->sa_data) + 1];
 
 	/*
 	 *	Check legality
@@ -3148,7 +3148,11 @@
 
 	if (addr_len != sizeof(struct sockaddr))
 		return -EINVAL;
-	strlcpy(name, uaddr->sa_data, sizeof(name));
+	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
+	 * zero-terminated.
+	 */
+	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
+	name[sizeof(uaddr->sa_data)] = 0;
 
 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
 }
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index f82676d..2a30d55 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -655,6 +655,13 @@
 						rmnet_header->vnd.vnd_name);
 		break;
 
+	case RMNET_NETLINK_NEW_VND_WITH_NAME:
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+		resp_rmnet->return_code = rmnet_create_vnd_name(
+						rmnet_header->vnd.id,
+						rmnet_header->vnd.vnd_name);
+		break;
+
 	case RMNET_NETLINK_FREE_VND:
 		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
 		/* Please check rmnet_vnd_free_dev documentation regarding
@@ -1096,10 +1103,10 @@
 
 	ASSERT_RTNL();
 	LOGL("(%d);", id);
-	return rmnet_vnd_create_dev(id, &dev, NULL);
+	return rmnet_vnd_create_dev(id, &dev, NULL, 0);
 }
 
-/* rmnet_create_vnd() - Create virtual network device node
+/* rmnet_create_vnd_prefix() - Create virtual network device node
  * @id:       RmNet virtual device node id
  * @prefix:   String prefix for device name
  *
@@ -1112,7 +1119,24 @@
 
 	ASSERT_RTNL();
 	LOGL("(%d, \"%s\");", id, prefix);
-	return rmnet_vnd_create_dev(id, &dev, prefix);
+	return rmnet_vnd_create_dev(id, &dev, prefix, 0);
+}
+
+/**
+ * rmnet_create_vnd_name() - Create virtual network device node
+ * @id:       RmNet virtual device node id
+ * @prefix:   String prefix for device name
+ *
+ * Return:
+ *      - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd_name(int id, const char *name)
+{
+	struct net_device *dev;
+
+	ASSERT_RTNL();
+	LOGL("(%d, \"%s\");", id, name);
+	return rmnet_vnd_create_dev(id, &dev, name, 1);
 }
 
 /* rmnet_free_vnd() - Free virtual network device node
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index b929158..5ce4600 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -121,6 +121,7 @@
 			   unsigned long event, void *data);
 int rmnet_create_vnd(int id);
 int rmnet_create_vnd_prefix(int id, const char *name);
+int rmnet_create_vnd_name(int id, const char *name);
 int rmnet_free_vnd(int id);
 
 struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
index 64217bd..72f3c3b 100644
--- a/net/rmnet_data/rmnet_data_vnd.c
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -556,7 +556,7 @@
  *      - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
  */
 int rmnet_vnd_create_dev(int id, struct net_device **new_device,
-			 const char *prefix)
+			 const char *prefix, int use_name)
 {
 	struct net_device *dev;
 	char dev_prefix[IFNAMSIZ];
@@ -572,11 +572,16 @@
 		return RMNET_CONFIG_DEVICE_IN_USE;
 	}
 
-	if (!prefix)
+	if (!prefix && !use_name)
 		p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
 			      RMNET_DATA_DEV_NAME_STR);
-	else
+	else if (prefix && use_name)
+		p = scnprintf(dev_prefix, IFNAMSIZ, "%s", prefix);
+	else if (prefix && !use_name)
 		p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
+	else
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+
 	if (p >= (IFNAMSIZ - 1)) {
 		LOGE("Specified prefix longer than IFNAMSIZ");
 		return RMNET_CONFIG_BAD_ARGUMENTS;
@@ -584,7 +589,7 @@
 
 	dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
 			   dev_prefix,
-			   NET_NAME_ENUM,
+			   use_name ? NET_NAME_UNKNOWN : NET_NAME_ENUM,
 			   rmnet_vnd_setup);
 	if (!dev) {
 		LOGE("Failed to to allocate netdev for id %d", id);
diff --git a/net/rmnet_data/rmnet_data_vnd.h b/net/rmnet_data/rmnet_data_vnd.h
index e0afeff..9d8eb54 100644
--- a/net/rmnet_data/rmnet_data_vnd.h
+++ b/net/rmnet_data/rmnet_data_vnd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
 struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev);
 int rmnet_vnd_get_name(int id, char *name, int name_len);
 int rmnet_vnd_create_dev(int id, struct net_device **new_device,
-			 const char *prefix);
+			 const char *prefix, int use_name);
 int rmnet_vnd_free_dev(int id);
 int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
 int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index c6c2a93..c651cfc 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -820,10 +820,8 @@
 		goto out_module_put;
 
 	err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops);
-	if (err < 0)
+	if (err <= 0)
 		goto out_module_put;
-	if (err == 0)
-		goto noflush_out;
 
 	nla_nest_end(skb, nest);
 
@@ -840,7 +838,6 @@
 out_module_put:
 	module_put(ops->owner);
 err_out:
-noflush_out:
 	kfree_skb(skb);
 	return err;
 }
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index eae07a2..1191179 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -113,6 +113,9 @@
 	if (ret < 0)
 		return ret;
 
+	if (!tb[TCA_CONNMARK_PARMS])
+		return -EINVAL;
+
 	parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
 	if (!tcf_hash_check(tn, parm->index, a, bind)) {
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index e7d9638..f85313d 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -228,7 +228,6 @@
 
 	return skb->len;
 nla_put_failure:
-	rcu_read_unlock();
 	nlmsg_trim(skb, b);
 	return -1;
 }
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 206dc24..744cfe6c5 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1179,6 +1179,45 @@
 }
 
 /*
+ * enable/disable flow on qdisc.
+ */
+int
+tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle, int enable_flow)
+{
+	struct Qdisc *q;
+	int qdisc_len = 0;
+	struct __qdisc_change_req {
+		struct nlattr attr;
+		struct tc_prio_qopt data;
+	} req =	{
+		.attr = {sizeof(struct __qdisc_change_req), TCA_OPTIONS},
+		.data = {3, {1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, 1}
+		};
+
+	/* override flow bit */
+	req.data.enable_flow = enable_flow;
+
+	/* look up using tcm handle */
+	q = qdisc_lookup(dev, tcm_handle);
+
+	/* call registered change function */
+	if (likely(q && q->ops)) {
+		if (likely(q->ops->change)) {
+			qdisc_len = q->q.qlen;
+			if (q->ops->change(q, &req.attr))
+				pr_err("%s(): qdisc change failed", __func__);
+		} else {
+			WARN_ONCE(1, "%s(): called on queue which does %s",
+				  __func__, "not support change() operation");
+		}
+	} else {
+		WARN_ONCE(1, "%s(): called on bad queue", __func__);
+	}
+	return qdisc_len;
+}
+EXPORT_SYMBOL(tc_qdisc_flow_control);
+
+/*
  * Create/change qdisc.
  */
 
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8f57589..353c6a1 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -27,6 +27,7 @@
 	struct tcf_proto __rcu *filter_list;
 	u8  prio2band[TC_PRIO_MAX+1];
 	struct Qdisc *queues[TCQ_PRIO_BANDS];
+	u8 enable_flow;
 };
 
 
@@ -99,6 +100,9 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int prio;
 
+	if (!q->enable_flow)
+		return NULL;
+
 	for (prio = 0; prio < q->bands; prio++) {
 		struct Qdisc *qdisc = q->queues[prio];
 		struct sk_buff *skb = qdisc->ops->peek(qdisc);
@@ -113,6 +117,9 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int prio;
 
+	if (!q->enable_flow)
+		return NULL;
+
 	for (prio = 0; prio < q->bands; prio++) {
 		struct Qdisc *qdisc = q->queues[prio];
 		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
@@ -137,6 +144,7 @@
 		qdisc_reset(q->queues[prio]);
 	sch->qstats.backlog = 0;
 	sch->q.qlen = 0;
+	q->enable_flow = 1;
 }
 
 static void
@@ -181,6 +189,7 @@
 	}
 
 	sch_tree_lock(sch);
+	q->enable_flow = qopt->enable_flow;
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
@@ -214,6 +223,7 @@
 	struct tc_prio_qopt opt;
 
 	opt.bands = q->bands;
+	opt.enable_flow = q->enable_flow;
 	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 41adf36..b5c279b 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -504,6 +504,7 @@
 
 static void __exit strp_mod_exit(void)
 {
+	destroy_workqueue(strp_wq);
 }
 module_init(strp_mod_init);
 module_exit(strp_mod_exit);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2d03d5b..459577e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -992,7 +992,7 @@
 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
 	char *sun_path = sunaddr->sun_path;
 	int err;
-	unsigned int hash;
+	unsigned int hash = 0;
 	struct unix_address *addr;
 	struct hlist_head *list;
 	struct path path = { NULL, NULL };
diff --git a/security/security.c b/security/security.c
index f825304..1ba5274 100644
--- a/security/security.c
+++ b/security/security.c
@@ -508,6 +508,7 @@
 		return 0;
 	return call_int_hook(path_chown, 0, path, uid, gid);
 }
+EXPORT_SYMBOL(security_path_chown);
 
 int security_path_chroot(const struct path *path)
 {
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index c08e746..1a529ba 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -497,7 +497,7 @@
 {
 	struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
 	int ret;
-	u32 rd_status;
+	u32 rd_status = 0;
 
 	ret = wcd_spi_cmd_nop(spi);
 	if (ret < 0) {
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index a452ad7..f32cfa4 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -162,5 +162,13 @@
 
 source "sound/usb/line6/Kconfig"
 
+config SND_USB_AUDIO_QMI
+	tristate "USB Audio QMI Service driver"
+	depends on MSM_QMI_INTERFACE
+	help
+	  Starts USB Audio QMI server to communicate with remote entity
+	  to perform operations like enable or disable particular audio
+	  stream on a connected USB device.
+
 endif	# SND_USB
 
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 2d2d122..d2ac038 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -26,3 +26,4 @@
 
 obj-$(CONFIG_SND) += misc/ usx2y/ caiaq/ 6fire/ hiface/ bcd2000/
 obj-$(CONFIG_SND_USB_LINE6)	+= line6/
+obj-$(CONFIG_SND_USB_AUDIO_QMI) += usb_audio_qmi_v01.o usb_audio_qmi_svc.o
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 90a4e68..ccf06de 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -113,6 +113,71 @@
 static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
 static struct usb_driver usb_audio_driver;
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip))
+{
+	int idx;
+	struct snd_usb_stream *as;
+	struct snd_usb_substream *subs = NULL;
+	struct snd_usb_audio *chip = NULL;
+
+	mutex_lock(&register_mutex);
+	/*
+	 * legacy audio snd card number assignment is dynamic. Hence
+	 * search using chip->card->number
+	 */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!usb_chip[idx])
+			continue;
+		if (usb_chip[idx]->card->number == card_num) {
+			chip = usb_chip[idx];
+			break;
+		}
+	}
+
+	if (!chip || atomic_read(&chip->shutdown)) {
+		pr_debug("%s: instance of usb crad # %d does not exist\n",
+			__func__, card_num);
+		goto err;
+	}
+
+	if (pcm_idx >= chip->pcm_devs) {
+		pr_err("%s: invalid pcm dev number %u > %d\n", __func__,
+			pcm_idx, chip->pcm_devs);
+		goto err;
+	}
+
+	if (direction > SNDRV_PCM_STREAM_CAPTURE) {
+		pr_err("%s: invalid direction %u\n", __func__, direction);
+		goto err;
+	}
+
+	list_for_each_entry(as, &chip->pcm_list, list) {
+		if (as->pcm_index == pcm_idx) {
+			subs = &as->substream[direction];
+			if (subs->interface < 0 && !subs->data_endpoint &&
+				!subs->sync_endpoint) {
+				pr_debug("%s: stream disconnected, bail out\n",
+					__func__);
+				subs = NULL;
+				goto err;
+			}
+			goto done;
+		}
+	}
+
+done:
+	chip->card_num = card_num;
+	chip->disconnect_cb = disconnect_cb;
+err:
+	*uchip = chip;
+	if (!subs)
+		pr_debug("%s: substream instance not found\n", __func__);
+	mutex_unlock(&register_mutex);
+	return subs;
+}
+
 /*
  * disconnect streams
  * called from usb_audio_disconnect()
@@ -325,6 +390,7 @@
 	list_for_each_entry_safe(ep, n, &chip->ep_list, list)
 		snd_usb_endpoint_free(ep);
 
+	mutex_destroy(&chip->dev_lock);
 	mutex_destroy(&chip->mutex);
 	if (!atomic_read(&chip->shutdown))
 		dev_set_drvdata(&chip->dev->dev, NULL);
@@ -383,6 +449,7 @@
 	}
 
 	mutex_init(&chip->mutex);
+	mutex_init(&chip->dev_lock);
 	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
@@ -630,6 +697,8 @@
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
 	usb_set_intfdata(intf, chip);
+	intf->needs_remote_wakeup = 1;
+	usb_enable_autosuspend(chip->dev);
 	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
@@ -659,6 +728,9 @@
 
 	card = chip->card;
 
+	if (chip->disconnect_cb)
+		chip->disconnect_cb(chip);
+
 	mutex_lock(&register_mutex);
 	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 111b0f0..25cddcc 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -167,4 +167,8 @@
 	struct list_head list;
 };
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip));
+
 #endif /* __USBAUDIO_CARD_H */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c5251aa..70e1477 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -357,7 +357,7 @@
 		err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
 		if (err < 0)
 			usb_audio_err(ep->chip,
-				"Unable to submit urb #%d: %d (urb %p)\n",
+				"Unable to submit urb #%d: %d (urb %pK)\n",
 				ctx->index, err, ctx->urb);
 		else
 			set_bit(ctx->index, &ep->active_mask);
@@ -459,7 +459,7 @@
 		    ep->iface == alts->desc.bInterfaceNumber &&
 		    ep->altsetting == alts->desc.bAlternateSetting) {
 			usb_audio_dbg(ep->chip,
-				      "Re-using EP %x in iface %d,%d @%p\n",
+				      "Re-using EP %x in iface %d,%d @%pK\n",
 					ep_num, ep->iface, ep->altsetting, ep);
 			goto __exit_unlock;
 		}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 48afae0..db85d92 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -228,7 +228,7 @@
 	if (!test_and_set_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags)) {
 		struct snd_usb_endpoint *ep = subs->data_endpoint;
 
-		dev_dbg(&subs->dev->dev, "Starting data EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting data EP @%pK\n", ep);
 
 		ep->data_subs = subs;
 		err = snd_usb_endpoint_start(ep);
@@ -257,7 +257,7 @@
 			}
 		}
 
-		dev_dbg(&subs->dev->dev, "Starting sync EP @%p\n", ep);
+		dev_dbg(&subs->dev->dev, "Starting sync EP @%pK\n", ep);
 
 		ep->sync_slave = subs->data_endpoint;
 		err = snd_usb_endpoint_start(ep);
@@ -554,6 +554,70 @@
 	return 0;
 }
 
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable)
+{
+	struct audioformat *fmt;
+	struct usb_host_interface *alts;
+	struct usb_interface *iface;
+	int ret;
+
+	if (!enable) {
+		if (subs->interface >= 0) {
+			usb_set_interface(subs->dev, subs->interface, 0);
+			subs->altset_idx = 0;
+			subs->interface = -1;
+			subs->cur_audiofmt = NULL;
+		}
+
+		snd_usb_autosuspend(subs->stream->chip);
+		return 0;
+	}
+
+	snd_usb_autoresume(subs->stream->chip);
+	fmt = find_format(subs);
+	if (!fmt) {
+		dev_err(&subs->dev->dev,
+		"cannot set format: format = %#x, rate = %d, channels = %d\n",
+			   subs->pcm_format, subs->cur_rate, subs->channels);
+		return -EINVAL;
+	}
+
+	subs->altset_idx = 0;
+	subs->interface = -1;
+	if (atomic_read(&subs->stream->chip->shutdown)) {
+		ret = -ENODEV;
+	} else {
+		ret = set_format(subs, fmt);
+		if (ret < 0)
+			return ret;
+
+		iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+		if (!iface) {
+			dev_err(&subs->dev->dev, "Could not get iface %d\n",
+				subs->cur_audiofmt->iface);
+			return -ENODEV;
+		}
+
+		alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+		ret = snd_usb_init_sample_rate(subs->stream->chip,
+					       subs->cur_audiofmt->iface,
+					       alts,
+					       subs->cur_audiofmt,
+					       subs->cur_rate);
+		if (ret < 0) {
+			dev_err(&subs->dev->dev, "failed to set rate %d\n",
+				subs->cur_rate);
+			return ret;
+		}
+	}
+
+	subs->interface = fmt->iface;
+	subs->altset_idx = fmt->altset_idx;
+
+	return 0;
+}
+
 /*
  * Return the score of matching two audioformats.
  * Veto the audioformat if:
@@ -571,13 +635,13 @@
 
 	if (fp->channels < 1) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no channels\n", __func__, fp);
+			"%s: (fmt @%pK) no channels\n", __func__, fp);
 		return 0;
 	}
 
 	if (!(fp->formats & pcm_format_to_bits(pcm_format))) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for format %d\n", __func__,
+			"%s: (fmt @%pK) no match for format %d\n", __func__,
 			fp, pcm_format);
 		return 0;
 	}
@@ -590,7 +654,7 @@
 	}
 	if (!score) {
 		dev_dbg(&subs->dev->dev,
-			"%s: (fmt @%p) no match for rate %d\n", __func__,
+			"%s: (fmt @%pK) no match for rate %d\n", __func__,
 			fp, rate);
 		return 0;
 	}
@@ -599,7 +663,7 @@
 		score++;
 
 	dev_dbg(&subs->dev->dev,
-		"%s: (fmt @%p) score %d\n", __func__, fp, score);
+		"%s: (fmt @%pK) score %d\n", __func__, fp, score);
 
 	return score;
 }
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
index df7a003..d581f94 100644
--- a/sound/usb/pcm.h
+++ b/sound/usb/pcm.h
@@ -9,6 +9,7 @@
 int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
 		       struct usb_host_interface *alts,
 		       struct audioformat *fmt);
-
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	bool enable);
 
 #endif /* __USBAUDIO_PCM_H */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 8e9548bc..7437cd5 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -69,9 +69,14 @@
 static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
 {
 	struct snd_usb_stream *stream = pcm->private_data;
+	struct snd_usb_audio *chip;
+
 	if (stream) {
+		mutex_lock(&stream->chip->dev_lock);
+		chip = stream->chip;
 		stream->pcm = NULL;
 		snd_usb_audio_stream_free(stream);
+		mutex_unlock(&chip->dev_lock);
 	}
 }
 
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
new file mode 100644
index 0000000..5a1974e
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -0,0 +1,1325 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/audio-v2.h>
+#include <linux/uaccess.h>
+#include <sound/pcm.h>
+#include <sound/core.h>
+#include <sound/asound.h>
+#include <linux/usb.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "usbaudio.h"
+#include "card.h"
+#include "helper.h"
+#include "pcm.h"
+#include "usb_audio_qmi_v01.h"
+
+#define SND_PCM_CARD_NUM_MASK 0xffff0000
+#define SND_PCM_DEV_NUM_MASK 0xff00
+#define SND_PCM_STREAM_DIRECTION 0xff
+
+#define PREPEND_SID_TO_IOVA(iova, sid) (u64)(((u64)(iova)) | \
+					(((u64)sid) << 32))
+
+/*  event ring iova base address */
+#define IOVA_BASE 0x1000
+
+#define IOVA_DCBA_BASE 0x2000
+#define IOVA_XFER_RING_BASE (IOVA_DCBA_BASE + PAGE_SIZE * (SNDRV_CARDS + 1))
+#define IOVA_XFER_BUF_BASE (IOVA_XFER_RING_BASE + PAGE_SIZE * SNDRV_CARDS * 32)
+#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
+#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
+
+#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
+
+struct iova_info {
+	struct list_head list;
+	unsigned long start_iova;
+	size_t size;
+	bool in_use;
+};
+
+struct intf_info {
+	unsigned long data_xfer_ring_va;
+	size_t data_xfer_ring_size;
+	unsigned long sync_xfer_ring_va;
+	size_t sync_xfer_ring_size;
+	unsigned long xfer_buf_va;
+	size_t xfer_buf_size;
+	phys_addr_t xfer_buf_pa;
+	u8 *xfer_buf;
+	u8 intf_num;
+	u8 pcm_card_num;
+	u8 pcm_dev_num;
+	u8 direction;
+	bool in_use;
+};
+
+struct uaudio_dev {
+	struct usb_device *udev;
+	/* audio control interface */
+	struct usb_host_interface *ctrl_intf;
+	unsigned int card_num;
+	atomic_t in_use;
+	struct kref kref;
+	unsigned long dcba_iova;
+	size_t dcba_size;
+	wait_queue_head_t disconnect_wq;
+
+	/* interface specific */
+	int num_intf;
+	struct intf_info *info;
+};
+
+static struct uaudio_dev uadev[SNDRV_CARDS];
+
+struct uaudio_qmi_dev {
+	struct device *dev;
+	u32 sid;
+	u32 intr_num;
+	struct iommu_domain *domain;
+
+	/* list to keep track of available iova */
+	struct list_head dcba_list;
+	size_t dcba_iova_size;
+	unsigned long curr_dcba_iova;
+	struct list_head xfer_ring_list;
+	size_t xfer_ring_iova_size;
+	unsigned long curr_xfer_ring_iova;
+	struct list_head xfer_buf_list;
+	size_t xfer_buf_iova_size;
+	unsigned long curr_xfer_buf_iova;
+	/* bit fields representing pcm card enabled */
+	unsigned long card_slot;
+	/* cache event ring phys addr */
+	u64 er_phys_addr;
+};
+
+static struct uaudio_qmi_dev *uaudio_qdev;
+
+struct uaudio_qmi_svc {
+	struct qmi_handle *uaudio_svc_hdl;
+	void *curr_conn;
+	struct work_struct recv_msg_work;
+	struct work_struct qmi_disconnect_work;
+	struct workqueue_struct *uaudio_wq;
+	ktime_t t_request_recvd;
+	ktime_t t_resp_sent;
+};
+
+static struct uaudio_qmi_svc *uaudio_svc;
+
+static struct msg_desc uaudio_stream_req_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_REQ_V01,
+	.ei_array = qmi_uaudio_stream_req_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_resp_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UAUDIO_STREAM_RESP_V01,
+	.ei_array = qmi_uaudio_stream_resp_msg_v01_ei,
+};
+
+static struct msg_desc uaudio_stream_ind_desc = {
+	.max_msg_len = QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_UADUIO_STREAM_IND_V01,
+	.ei_array = qmi_uaudio_stream_ind_msg_v01_ei,
+};
+
+enum mem_type {
+	MEM_EVENT_RING,
+	MEM_DCBA,
+	MEM_XFER_RING,
+	MEM_XFER_BUF,
+};
+
+enum usb_qmi_audio_format {
+	USB_QMI_PCM_FORMAT_S8 = 0,
+	USB_QMI_PCM_FORMAT_U8,
+	USB_QMI_PCM_FORMAT_S16_LE,
+	USB_QMI_PCM_FORMAT_S16_BE,
+	USB_QMI_PCM_FORMAT_U16_LE,
+	USB_QMI_PCM_FORMAT_U16_BE,
+	USB_QMI_PCM_FORMAT_S24_LE,
+	USB_QMI_PCM_FORMAT_S24_BE,
+	USB_QMI_PCM_FORMAT_U24_LE,
+	USB_QMI_PCM_FORMAT_U24_BE,
+	USB_QMI_PCM_FORMAT_S24_3LE,
+	USB_QMI_PCM_FORMAT_S24_3BE,
+	USB_QMI_PCM_FORMAT_U24_3LE,
+	USB_QMI_PCM_FORMAT_U24_3BE,
+	USB_QMI_PCM_FORMAT_S32_LE,
+	USB_QMI_PCM_FORMAT_S32_BE,
+	USB_QMI_PCM_FORMAT_U32_LE,
+	USB_QMI_PCM_FORMAT_U32_BE,
+};
+
+static unsigned long uaudio_get_iova(unsigned long *curr_iova,
+	size_t *curr_iova_size, struct list_head *head, size_t size)
+{
+	struct iova_info *info, *new_info = NULL;
+	struct list_head *curr_head;
+	unsigned long va = 0;
+	size_t tmp_size = size;
+	bool found = false;
+
+	if (size % PAGE_SIZE) {
+		pr_err("%s: size %zu is not page size multiple\n", __func__,
+			size);
+		goto done;
+	}
+
+	if (size > *curr_iova_size) {
+		pr_err("%s: size %zu > curr size %zu\n", __func__, size,
+			*curr_iova_size);
+		goto done;
+	}
+	if (*curr_iova_size == 0) {
+		pr_err("%s: iova mapping is full\n", __func__);
+		goto done;
+	}
+
+	list_for_each_entry(info, head, list) {
+		/* exact size iova_info */
+		if (!info->in_use && info->size == size) {
+			info->in_use = true;
+			va = info->start_iova;
+			*curr_iova_size -= size;
+			found = true;
+			pr_debug("%s: exact size :%zu found\n", __func__, size);
+			goto done;
+		} else if (!info->in_use && tmp_size >= info->size) {
+			if (!new_info)
+				new_info = info;
+			pr_debug("%s: partial size: %zu found\n", __func__,
+				info->size);
+			tmp_size -= info->size;
+			if (tmp_size)
+				continue;
+
+			va = new_info->start_iova;
+			for (curr_head = &new_info->list; curr_head !=
+			&info->list; curr_head = curr_head->next) {
+				new_info = list_entry(curr_head, struct
+						iova_info, list);
+				new_info->in_use = true;
+			}
+			info->in_use = true;
+			*curr_iova_size -= size;
+			found = true;
+			goto done;
+		} else {
+			/* iova region in use */
+			new_info = NULL;
+			tmp_size = size;
+		}
+	}
+
+	info = kzalloc(sizeof(struct iova_info), GFP_KERNEL);
+	if (!info) {
+		va = 0;
+		goto done;
+	}
+
+	va = info->start_iova = *curr_iova;
+	info->size = size;
+	info->in_use = true;
+	*curr_iova += size;
+	*curr_iova_size -= size;
+	found = true;
+	list_add_tail(&info->list, head);
+
+done:
+	if (!found)
+		pr_err("%s: unable to find %zu size iova\n", __func__, size);
+	else
+		pr_debug("%s: va:%lu curr_iova:%lu curr_iova_size:%zu\n",
+		__func__, va, *curr_iova, *curr_iova_size);
+
+	return va;
+}
+
+static unsigned long uaudio_iommu_map(enum mem_type mtype, phys_addr_t pa,
+		size_t size)
+{
+	unsigned long va = 0;
+	bool map = true;
+	int ret;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		va = IOVA_BASE;
+		/* er already mapped */
+		if (uaudio_qdev->er_phys_addr == pa)
+			map = false;
+		break;
+	case MEM_DCBA:
+		va = uaudio_get_iova(&uaudio_qdev->curr_dcba_iova,
+		&uaudio_qdev->dcba_iova_size, &uaudio_qdev->dcba_list, size);
+		break;
+	case MEM_XFER_RING:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_ring_iova,
+		&uaudio_qdev->xfer_ring_iova_size, &uaudio_qdev->xfer_ring_list,
+		size);
+		break;
+	case MEM_XFER_BUF:
+		va = uaudio_get_iova(&uaudio_qdev->curr_xfer_buf_iova,
+		&uaudio_qdev->xfer_buf_iova_size, &uaudio_qdev->xfer_buf_list,
+		size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+	}
+
+	if (!va)
+		map = false;
+
+	if (!map)
+		goto done;
+
+	pr_debug("%s: map pa %pa to iova %lu for memtype %d\n", __func__, &pa,
+		va, mtype);
+	ret = iommu_map(uaudio_qdev->domain, va, pa, size,
+		IOMMU_READ | IOMMU_WRITE | IOMMU_MMIO);
+	if (ret)
+		pr_err("%s:failed to map pa:%pa iova:%lu memtype:%d ret:%d\n",
+			__func__, &pa, va, mtype, ret);
+done:
+	return va;
+}
+
+static void uaudio_put_iova(unsigned long va, size_t size, struct list_head
+	*head, size_t *curr_iova_size)
+{
+	struct iova_info *info;
+	size_t tmp_size = size;
+	bool found = false;
+
+	list_for_each_entry(info, head, list) {
+		if (info->start_iova == va) {
+			if (!info->in_use) {
+				pr_err("%s: va %lu is not in use\n", __func__,
+					va);
+				return;
+			}
+			found = true;
+			info->in_use = false;
+			if (info->size == size)
+				goto done;
+		}
+
+		if (found && tmp_size >= info->size) {
+			info->in_use = false;
+			tmp_size -= info->size;
+			if (!tmp_size)
+				goto done;
+		}
+	}
+
+	if (!found) {
+		pr_err("%s: unable to find the va %lu\n", __func__, va);
+		return;
+	}
+done:
+	*curr_iova_size += size;
+	pr_debug("%s: curr_iova_size %zu\n", __func__, *curr_iova_size);
+}
+
+static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long va,
+	size_t size)
+{
+	size_t umap_size;
+	bool unmap = true;
+
+	if (!va || !size)
+		return;
+
+	switch (mtype) {
+	case MEM_EVENT_RING:
+		if (uaudio_qdev->er_phys_addr)
+			uaudio_qdev->er_phys_addr = 0;
+		else
+			unmap = false;
+		break;
+	case MEM_DCBA:
+		uaudio_put_iova(va, size, &uaudio_qdev->dcba_list,
+		&uaudio_qdev->dcba_iova_size);
+		break;
+	case MEM_XFER_RING:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_ring_list,
+		&uaudio_qdev->xfer_ring_iova_size);
+		break;
+	case MEM_XFER_BUF:
+		uaudio_put_iova(va, size, &uaudio_qdev->xfer_buf_list,
+		&uaudio_qdev->xfer_buf_iova_size);
+		break;
+	default:
+		pr_err("%s: unknown mem type %d\n", __func__, mtype);
+		unmap = false;
+	}
+
+	if (!unmap)
+		return;
+
+	pr_debug("%s: unmap iova %lu for memtype %d\n", __func__, va, mtype);
+
+	umap_size = iommu_unmap(uaudio_qdev->domain, va, size);
+	if (umap_size != size)
+		pr_err("%s: unmapped size %zu for iova %lu\n", __func__,
+		umap_size, va);
+}
+
+static int prepare_qmi_response(struct snd_usb_substream *subs,
+		struct qmi_uaudio_stream_req_msg_v01 *req_msg,
+		struct qmi_uaudio_stream_resp_msg_v01 *resp, int info_idx)
+{
+	struct usb_interface *iface;
+	struct usb_host_interface *alts;
+	struct usb_interface_descriptor *altsd;
+	struct usb_host_endpoint *ep;
+	struct uac_format_type_i_continuous_descriptor *fmt;
+	struct uac_format_type_i_discrete_descriptor *fmt_v1;
+	struct uac_format_type_i_ext_descriptor *fmt_v2;
+	struct uac1_as_header_descriptor *as;
+	int ret = -ENODEV;
+	int protocol, card_num, pcm_dev_num;
+	void *hdr_ptr;
+	u8 *xfer_buf;
+	u32 len, mult, remainder, xfer_buf_len;
+	unsigned long va, tr_data_va = 0, tr_sync_va = 0, dcba_va = 0,
+	xfer_buf_va = 0;
+	phys_addr_t xhci_pa, xfer_buf_pa;
+
+	iface = usb_ifnum_to_if(subs->dev, subs->interface);
+	if (!iface) {
+		pr_err("%s: interface # %d does not exist\n", __func__,
+			subs->interface);
+		goto err;
+	}
+
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+	xfer_buf_len = req_msg->xfer_buff_size;
+
+	alts = &iface->altsetting[subs->altset_idx];
+	altsd = get_iface_desc(alts);
+	protocol = altsd->bInterfaceProtocol;
+
+	/* get format type */
+	fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_FORMAT_TYPE);
+	if (!fmt) {
+		pr_err("%s: %u:%d : no UAC_FORMAT_TYPE desc\n", __func__,
+			subs->interface, subs->altset_idx);
+		goto err;
+	}
+
+	if (!uadev[card_num].ctrl_intf) {
+		pr_err("%s: audio ctrl intf info not cached\n", __func__);
+		goto err;
+	}
+
+	hdr_ptr = snd_usb_find_csint_desc(uadev[card_num].ctrl_intf->extra,
+					uadev[card_num].ctrl_intf->extralen,
+					NULL, UAC_HEADER);
+	if (!hdr_ptr) {
+		pr_err("%s: no UAC_HEADER desc\n", __func__);
+		goto err;
+	}
+
+	if (protocol == UAC_VERSION_1) {
+		as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL,
+			UAC_AS_GENERAL);
+		if (!as) {
+			pr_err("%s: %u:%d : no UAC_AS_GENERAL desc\n", __func__,
+				subs->interface, subs->altset_idx);
+			goto err;
+		}
+		resp->data_path_delay = as->bDelay;
+		resp->data_path_delay_valid = 1;
+		fmt_v1 = (struct uac_format_type_i_discrete_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v1->bSubframeSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac1_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else if (protocol == UAC_VERSION_2) {
+		fmt_v2 = (struct uac_format_type_i_ext_descriptor *)fmt;
+		resp->usb_audio_subslot_size = fmt_v2->bSubslotSize;
+		resp->usb_audio_subslot_size_valid = 1;
+
+		resp->usb_audio_spec_revision =
+			((struct uac2_ac_header_descriptor *)hdr_ptr)->bcdADC;
+		resp->usb_audio_spec_revision_valid = 1;
+	} else {
+		pr_err("%s: unknown protocol version %x\n", __func__, protocol);
+		goto err;
+	}
+
+	resp->slot_id = subs->dev->slot_id;
+	resp->slot_id_valid = 1;
+
+	memcpy(&resp->std_as_opr_intf_desc, &alts->desc, sizeof(alts->desc));
+	resp->std_as_opr_intf_desc_valid = 1;
+
+	ep = usb_pipe_endpoint(subs->dev, subs->data_endpoint->pipe);
+	if (!ep) {
+		pr_err("%s: data ep # %d context is null\n", __func__,
+			subs->data_endpoint->ep_num);
+		goto err;
+	}
+	memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
+	resp->std_as_data_ep_desc_valid = 1;
+
+	xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get data ep ring dma address\n", __func__);
+		goto err;
+	}
+
+	resp->xhci_mem_info.tr_data.pa = xhci_pa;
+
+	if (subs->sync_endpoint) {
+		ep = usb_pipe_endpoint(subs->dev, subs->sync_endpoint->pipe);
+		if (!ep) {
+			pr_debug("%s: implicit fb on data ep\n", __func__);
+			goto skip_sync_ep;
+		}
+		memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
+		resp->std_as_sync_ep_desc_valid = 1;
+
+		xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
+		if (!xhci_pa) {
+			pr_err("%s:failed to get sync ep ring dma address\n",
+				__func__);
+			goto err;
+		}
+		resp->xhci_mem_info.tr_sync.pa = xhci_pa;
+	}
+
+skip_sync_ep:
+	resp->interrupter_num = uaudio_qdev->intr_num;
+	resp->interrupter_num_valid = 1;
+
+	/*  map xhci data structures PA memory to iova */
+
+	/* event ring */
+	ret = usb_sec_event_ring_setup(subs->dev, resp->interrupter_num);
+	if (ret) {
+		pr_err("%s: failed to setup sec event ring ret %d\n", __func__,
+			ret);
+		goto err;
+	}
+	xhci_pa = usb_get_sec_event_ring_dma_addr(subs->dev,
+			resp->interrupter_num);
+	if (!xhci_pa) {
+		pr_err("%s: failed to get sec event ring dma address\n",
+		__func__);
+		goto err;
+	}
+
+	va = uaudio_iommu_map(MEM_EVENT_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto err;
+
+	resp->xhci_mem_info.evt_ring.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.evt_ring.pa = xhci_pa;
+	resp->xhci_mem_info.evt_ring.size = PAGE_SIZE;
+	uaudio_qdev->er_phys_addr = xhci_pa;
+
+	/* dcba */
+	xhci_pa = usb_get_dcba_dma_addr(subs->dev);
+	if (!xhci_pa) {
+		pr_err("%s:failed to get dcba dma address\n", __func__);
+		goto unmap_er;
+	}
+
+	if (!uadev[card_num].dcba_iova) { /* mappped per usb device */
+		va = uaudio_iommu_map(MEM_DCBA, xhci_pa, PAGE_SIZE);
+		if (!va)
+			goto unmap_er;
+
+		uadev[card_num].dcba_iova = va;
+		uadev[card_num].dcba_size = PAGE_SIZE;
+	}
+
+	dcba_va = uadev[card_num].dcba_iova;
+	resp->xhci_mem_info.dcba.va = PREPEND_SID_TO_IOVA(dcba_va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.dcba.pa = xhci_pa;
+	resp->xhci_mem_info.dcba.size = PAGE_SIZE;
+
+	/* data transfer ring */
+	xhci_pa = resp->xhci_mem_info.tr_data.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_dcba;
+
+	tr_data_va = va;
+	resp->xhci_mem_info.tr_data.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_data.size = PAGE_SIZE;
+
+	/* sync transfer ring */
+	if (!resp->xhci_mem_info.tr_sync.pa)
+		goto skip_sync;
+
+	xhci_pa = resp->xhci_mem_info.tr_sync.pa;
+	va = uaudio_iommu_map(MEM_XFER_RING, xhci_pa, PAGE_SIZE);
+	if (!va)
+		goto unmap_data;
+
+	tr_sync_va = va;
+	resp->xhci_mem_info.tr_sync.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+	resp->xhci_mem_info.tr_sync.size = PAGE_SIZE;
+
+skip_sync:
+	/* xfer buffer, multiple of 4K only */
+	if (!xfer_buf_len)
+		xfer_buf_len = PAGE_SIZE;
+
+	mult = xfer_buf_len / PAGE_SIZE;
+	remainder = xfer_buf_len % PAGE_SIZE;
+	len = mult * PAGE_SIZE;
+	len += remainder ? PAGE_SIZE : 0;
+
+	if (len > MAX_XFER_BUFF_LEN) {
+		pr_err("%s: req buf len %d > max buf len %lu, setting %lu\n",
+		__func__, len, MAX_XFER_BUFF_LEN, MAX_XFER_BUFF_LEN);
+		len = MAX_XFER_BUFF_LEN;
+	}
+
+	xfer_buf = usb_alloc_coherent(subs->dev, len, GFP_KERNEL, &xfer_buf_pa);
+	if (!xfer_buf)
+		goto unmap_sync;
+
+	resp->xhci_mem_info.xfer_buff.pa = xfer_buf_pa;
+	resp->xhci_mem_info.xfer_buff.size = len;
+
+	va = uaudio_iommu_map(MEM_XFER_BUF, xfer_buf_pa, len);
+	if (!va)
+		goto unmap_sync;
+
+	xfer_buf_va = va;
+	resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
+						uaudio_qdev->sid);
+
+	resp->xhci_mem_info_valid = 1;
+
+	if (!atomic_read(&uadev[card_num].in_use)) {
+		kref_init(&uadev[card_num].kref);
+		init_waitqueue_head(&uadev[card_num].disconnect_wq);
+		uadev[card_num].num_intf =
+			subs->dev->config->desc.bNumInterfaces;
+		uadev[card_num].info =
+			kzalloc(sizeof(struct intf_info) *
+			uadev[card_num].num_intf, GFP_KERNEL);
+		if (!uadev[card_num].info) {
+			ret = -ENOMEM;
+			goto unmap_xfer_buf;
+		}
+		uadev[card_num].udev = subs->dev;
+		atomic_set(&uadev[card_num].in_use, 1);
+	} else {
+		kref_get(&uadev[card_num].kref);
+	}
+
+	uadev[card_num].card_num = card_num;
+
+	/* cache intf specific info to use it for unmap and free xfer buf */
+	uadev[card_num].info[info_idx].data_xfer_ring_va = tr_data_va;
+	uadev[card_num].info[info_idx].data_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].sync_xfer_ring_va = tr_sync_va;
+	uadev[card_num].info[info_idx].sync_xfer_ring_size = PAGE_SIZE;
+	uadev[card_num].info[info_idx].xfer_buf_va = xfer_buf_va;
+	uadev[card_num].info[info_idx].xfer_buf_pa = xfer_buf_pa;
+	uadev[card_num].info[info_idx].xfer_buf_size = len;
+	uadev[card_num].info[info_idx].xfer_buf = xfer_buf;
+	uadev[card_num].info[info_idx].pcm_card_num = card_num;
+	uadev[card_num].info[info_idx].pcm_dev_num = pcm_dev_num;
+	uadev[card_num].info[info_idx].direction = subs->direction;
+	uadev[card_num].info[info_idx].intf_num = subs->interface;
+	uadev[card_num].info[info_idx].in_use = true;
+
+	set_bit(card_num, &uaudio_qdev->card_slot);
+
+	return 0;
+
+unmap_xfer_buf:
+	uaudio_iommu_unmap(MEM_XFER_BUF, xfer_buf_va, len);
+unmap_sync:
+	usb_free_coherent(subs->dev, len, xfer_buf, xfer_buf_pa);
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_sync_va, PAGE_SIZE);
+unmap_data:
+	uaudio_iommu_unmap(MEM_XFER_RING, tr_data_va, PAGE_SIZE);
+unmap_dcba:
+	uaudio_iommu_unmap(MEM_DCBA, dcba_va, PAGE_SIZE);
+unmap_er:
+	uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+err:
+	return ret;
+}
+
+static void uaudio_dev_intf_cleanup(struct usb_device *udev,
+	struct intf_info *info)
+{
+	uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va,
+		info->data_xfer_ring_size);
+	info->data_xfer_ring_va = 0;
+	info->data_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_RING, info->sync_xfer_ring_va,
+		info->sync_xfer_ring_size);
+	info->sync_xfer_ring_va = 0;
+	info->sync_xfer_ring_size = 0;
+
+	uaudio_iommu_unmap(MEM_XFER_BUF, info->xfer_buf_va,
+		info->xfer_buf_size);
+	info->xfer_buf_va = 0;
+
+	usb_free_coherent(udev, info->xfer_buf_size,
+		info->xfer_buf, info->xfer_buf_pa);
+	info->xfer_buf_size = 0;
+	info->xfer_buf = NULL;
+	info->xfer_buf_pa = 0;
+
+	info->in_use = false;
+}
+
+static void uaudio_dev_cleanup(struct uaudio_dev *dev)
+{
+	int if_idx;
+
+	/* free xfer buffer and unmap xfer ring and buf per interface */
+	for (if_idx = 0; if_idx < dev->num_intf; if_idx++) {
+		if (!dev->info[if_idx].in_use)
+			continue;
+		uaudio_dev_intf_cleanup(dev->udev, &dev->info[if_idx]);
+		pr_debug("%s: release resources: intf# %d card# %d\n", __func__,
+			dev->info[if_idx].intf_num, dev->card_num);
+	}
+
+	/* iommu_unmap dcba iova for a usb device */
+	uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
+
+	dev->dcba_iova = 0;
+	dev->dcba_size = 0;
+	dev->num_intf = 0;
+
+	/* free interface info */
+	kfree(dev->info);
+	dev->info = NULL;
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	dev->udev = NULL;
+}
+
+static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
+{
+	int ret;
+	struct uaudio_dev *dev;
+	int card_num = chip->card_num;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct qmi_uaudio_stream_ind_msg_v01 disconnect_ind = {0};
+
+	pr_debug("%s: for card# %d\n", __func__, card_num);
+
+	if (card_num >=  SNDRV_CARDS) {
+		pr_err("%s: invalid card number\n", __func__);
+		return;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	dev = &uadev[card_num];
+
+	/* clean up */
+	if (!dev->udev) {
+		pr_debug("%s: no clean up required\n", __func__);
+		goto done;
+	}
+
+	if (atomic_read(&dev->in_use)) {
+		mutex_unlock(&chip->dev_lock);
+
+		pr_debug("%s: sending qmi indication disconnect\n", __func__);
+		disconnect_ind.dev_event = USB_AUDIO_DEV_DISCONNECT_V01;
+		disconnect_ind.slot_id = dev->udev->slot_id;
+		ret = qmi_send_ind(svc->uaudio_svc_hdl, svc->curr_conn,
+				&uaudio_stream_ind_desc, &disconnect_ind,
+				sizeof(disconnect_ind));
+		if (ret < 0) {
+			pr_err("%s: qmi send failed wiht err: %d\n",
+					__func__, ret);
+			return;
+		}
+
+		ret = wait_event_interruptible(dev->disconnect_wq,
+				!atomic_read(&dev->in_use));
+		if (ret < 0) {
+			pr_debug("%s: failed with ret %d\n", __func__, ret);
+			return;
+		}
+		mutex_lock(&chip->dev_lock);
+	}
+
+	uaudio_dev_cleanup(dev);
+done:
+	mutex_unlock(&chip->dev_lock);
+}
+
+static void uaudio_dev_release(struct kref *kref)
+{
+	struct uaudio_dev *dev = container_of(kref, struct uaudio_dev, kref);
+
+	pr_debug("%s for dev %pK\n", __func__, dev);
+
+	atomic_set(&dev->in_use, 0);
+
+	clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+	/* all audio devices are disconnected */
+	if (!uaudio_qdev->card_slot) {
+		usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+		uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+		pr_debug("%s: all audio devices disconnected\n", __func__);
+	}
+
+	wake_up(&dev->disconnect_wq);
+}
+
+/* maps audio format received over QMI to asound.h based pcm format */
+static int map_pcm_format(unsigned int fmt_received)
+{
+	switch (fmt_received) {
+	case USB_QMI_PCM_FORMAT_S8:
+		return SNDRV_PCM_FORMAT_S8;
+	case USB_QMI_PCM_FORMAT_U8:
+		return SNDRV_PCM_FORMAT_U8;
+	case USB_QMI_PCM_FORMAT_S16_LE:
+		return SNDRV_PCM_FORMAT_S16_LE;
+	case USB_QMI_PCM_FORMAT_S16_BE:
+		return SNDRV_PCM_FORMAT_S16_BE;
+	case USB_QMI_PCM_FORMAT_U16_LE:
+		return SNDRV_PCM_FORMAT_U16_LE;
+	case USB_QMI_PCM_FORMAT_U16_BE:
+		return SNDRV_PCM_FORMAT_U16_BE;
+	case USB_QMI_PCM_FORMAT_S24_LE:
+		return SNDRV_PCM_FORMAT_S24_LE;
+	case USB_QMI_PCM_FORMAT_S24_BE:
+		return SNDRV_PCM_FORMAT_S24_BE;
+	case USB_QMI_PCM_FORMAT_U24_LE:
+		return SNDRV_PCM_FORMAT_U24_LE;
+	case USB_QMI_PCM_FORMAT_U24_BE:
+		return SNDRV_PCM_FORMAT_U24_BE;
+	case USB_QMI_PCM_FORMAT_S24_3LE:
+		return SNDRV_PCM_FORMAT_S24_3LE;
+	case USB_QMI_PCM_FORMAT_S24_3BE:
+		return SNDRV_PCM_FORMAT_S24_3BE;
+	case USB_QMI_PCM_FORMAT_U24_3LE:
+		return SNDRV_PCM_FORMAT_U24_3LE;
+	case USB_QMI_PCM_FORMAT_U24_3BE:
+		return SNDRV_PCM_FORMAT_U24_3BE;
+	case USB_QMI_PCM_FORMAT_S32_LE:
+		return SNDRV_PCM_FORMAT_S32_LE;
+	case USB_QMI_PCM_FORMAT_S32_BE:
+		return SNDRV_PCM_FORMAT_S32_BE;
+	case USB_QMI_PCM_FORMAT_U32_LE:
+		return SNDRV_PCM_FORMAT_U32_LE;
+	case USB_QMI_PCM_FORMAT_U32_BE:
+		return SNDRV_PCM_FORMAT_U32_BE;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int info_idx_from_ifnum(int card_num, int intf_num, bool enable)
+{
+	int i;
+
+	/*
+	 * default index 0 is used when info is allocated upon
+	 * first enable audio stream req for a pcm device
+	 */
+	if (enable && !uadev[card_num].info)
+		return 0;
+
+	for (i = 0; i < uadev[card_num].num_intf; i++) {
+		if (enable && !uadev[card_num].info[i].in_use)
+			return i;
+		else if (!enable &&
+				uadev[card_num].info[i].intf_num == intf_num)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static int handle_uaudio_stream_req(void *req_h, void *req)
+{
+	struct qmi_uaudio_stream_req_msg_v01 *req_msg;
+	struct qmi_uaudio_stream_resp_msg_v01 resp = {{0}, 0};
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+	struct intf_info *info;
+	int pcm_format;
+	u8 pcm_card_num, pcm_dev_num, direction;
+	int info_idx = -EINVAL, ret = 0;
+
+	req_msg = (struct qmi_uaudio_stream_req_msg_v01 *)req;
+
+	if (!req_msg->audio_format_valid || !req_msg->bit_rate_valid ||
+	!req_msg->number_of_ch_valid || !req_msg->xfer_buff_size_valid) {
+		pr_err("%s: invalid request msg\n", __func__);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	direction = req_msg->usb_token & SND_PCM_STREAM_DIRECTION;
+	pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
+	pcm_card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
+
+	pr_debug("%s:card#:%d dev#:%d dir:%d en:%d fmt:%d rate:%d #ch:%d\n",
+		__func__, pcm_card_num, pcm_dev_num, direction, req_msg->enable,
+		req_msg->audio_format, req_msg->bit_rate,
+		req_msg->number_of_ch);
+
+	if (pcm_card_num >= SNDRV_CARDS) {
+		pr_err("%s: invalid card # %u", __func__, pcm_card_num);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	pcm_format = map_pcm_format(req_msg->audio_format);
+	if (pcm_format == -EINVAL) {
+		pr_err("%s: unsupported pcm format received %d\n",
+		__func__, req_msg->audio_format);
+		ret = -EINVAL;
+		goto response;
+	}
+
+	subs = find_snd_usb_substream(pcm_card_num, pcm_dev_num, direction,
+					&chip, uaudio_disconnect_cb);
+	if (!subs || !chip || atomic_read(&chip->shutdown)) {
+		pr_err("%s: can't find substream for card# %u, dev# %u dir%u\n",
+			__func__, pcm_card_num, pcm_dev_num, direction);
+		ret = -ENODEV;
+		goto response;
+	}
+
+	mutex_lock(&chip->dev_lock);
+	info_idx = info_idx_from_ifnum(pcm_card_num, subs->interface,
+		req_msg->enable);
+	if (atomic_read(&chip->shutdown) || !subs->stream || !subs->stream->pcm
+			|| !subs->stream->chip) {
+		ret = -ENODEV;
+		mutex_unlock(&chip->dev_lock);
+		goto response;
+	}
+
+	if (req_msg->enable) {
+		if (info_idx < 0) {
+			pr_err("%s interface# %d already in use card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			ret = -EBUSY;
+			mutex_unlock(&chip->dev_lock);
+			goto response;
+		}
+	}
+
+	subs->pcm_format = pcm_format;
+	subs->channels = req_msg->number_of_ch;
+	subs->cur_rate = req_msg->bit_rate;
+	uadev[pcm_card_num].ctrl_intf = chip->ctrl_intf;
+
+	ret = snd_usb_enable_audio_stream(subs, req_msg->enable);
+
+	if (!ret && req_msg->enable)
+		ret = prepare_qmi_response(subs, req_msg, &resp, info_idx);
+
+	mutex_unlock(&chip->dev_lock);
+
+response:
+	if (!req_msg->enable && ret != -EINVAL) {
+		if (info_idx >= 0) {
+			mutex_lock(&chip->dev_lock);
+			info = &uadev[pcm_card_num].info[info_idx];
+			uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
+			pr_debug("%s:release resources: intf# %d card# %d\n",
+				__func__, subs->interface, pcm_card_num);
+			mutex_unlock(&chip->dev_lock);
+		}
+		if (atomic_read(&uadev[pcm_card_num].in_use))
+			kref_put(&uadev[pcm_card_num].kref,
+					uaudio_dev_release);
+	}
+
+	resp.usb_token = req_msg->usb_token;
+	resp.usb_token_valid = 1;
+	resp.internal_status = ret;
+	resp.internal_status_valid = 1;
+	resp.status = ret ? USB_AUDIO_STREAM_REQ_FAILURE_V01 : ret;
+	resp.status_valid = 1;
+	ret = qmi_send_resp_from_cb(svc->uaudio_svc_hdl, svc->curr_conn, req_h,
+			&uaudio_stream_resp_desc, &resp, sizeof(resp));
+
+	svc->t_resp_sent = ktime_get();
+
+	pr_debug("%s: t_resp sent - t_req recvd (in ms) %lld\n", __func__,
+		ktime_to_ms(ktime_sub(svc->t_resp_sent, svc->t_request_recvd)));
+
+	return ret;
+}
+
+static int uaudio_qmi_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || !conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+	if (svc->curr_conn) {
+		pr_err("%s: Service is busy\n", __func__);
+		return -ECONNREFUSED;
+	}
+	svc->curr_conn = conn_h;
+	return 0;
+}
+
+static void uaudio_qmi_disconnect_work(struct work_struct *w)
+{
+	struct intf_info *info;
+	int idx, if_idx;
+	struct snd_usb_substream *subs;
+	struct snd_usb_audio *chip = NULL;
+
+	/* find all active intf for set alt 0 and cleanup usb audio dev */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!atomic_read(&uadev[idx].in_use))
+			continue;
+
+		for (if_idx = 0; if_idx < uadev[idx].num_intf; if_idx++) {
+			if (!uadev[idx].info || !uadev[idx].info[if_idx].in_use)
+				continue;
+			info = &uadev[idx].info[if_idx];
+			subs = find_snd_usb_substream(info->pcm_card_num,
+							info->pcm_dev_num,
+							info->direction,
+							&chip,
+							uaudio_disconnect_cb);
+			if (!subs || !chip || atomic_read(&chip->shutdown)) {
+				pr_debug("%s:no subs for c#%u, dev#%u dir%u\n",
+					__func__, info->pcm_card_num,
+					info->pcm_dev_num,
+					info->direction);
+				continue;
+			}
+			snd_usb_enable_audio_stream(subs, 0);
+		}
+		atomic_set(&uadev[idx].in_use, 0);
+		mutex_lock(&chip->dev_lock);
+		uaudio_dev_cleanup(&uadev[idx]);
+		mutex_unlock(&chip->dev_lock);
+	}
+}
+
+static int uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	svc->curr_conn = NULL;
+	queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
+
+	return 0;
+}
+
+static int uaudio_qmi_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	if (svc->uaudio_svc_hdl != handle || svc->curr_conn != conn_h) {
+		pr_err("%s: handle mismatch\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		ret = handle_uaudio_stream_req(req_h, req);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static int uaudio_qmi_svc_req_desc_cb(unsigned int msg_id,
+	struct msg_desc **req_desc)
+{
+	int ret;
+
+	pr_debug("%s: msg_id %d\n", __func__, msg_id);
+
+	switch (msg_id) {
+	case QMI_UAUDIO_STREAM_REQ_V01:
+		*req_desc = &uaudio_stream_req_desc;
+		ret = sizeof(struct qmi_uaudio_stream_req_msg_v01);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
+static void uaudio_qmi_svc_recv_msg(struct work_struct *w)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc = container_of(w, struct uaudio_qmi_svc,
+		recv_msg_work);
+
+	do {
+		pr_debug("%s: Notified about a Receive Event", __func__);
+	} while ((ret = qmi_recv_msg(svc->uaudio_svc_hdl)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", __func__);
+}
+
+static void uaudio_qmi_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	pr_debug("%s: event %d", __func__, event);
+
+	svc->t_request_recvd = ktime_get();
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_work(svc->uaudio_wq, &svc->recv_msg_work);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options uaudio_svc_ops_options = {
+	.version = 1,
+	.service_id = UAUDIO_STREAM_SERVICE_ID_V01,
+	.service_vers = UAUDIO_STREAM_SERVICE_VERS_V01,
+	.connect_cb = uaudio_qmi_svc_connect_cb,
+	.disconnect_cb = uaudio_qmi_svc_disconnect_cb,
+	.req_desc_cb = uaudio_qmi_svc_req_desc_cb,
+	.req_cb = uaudio_qmi_svc_req_cb,
+};
+
+static int uaudio_qmi_plat_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node = pdev->dev.of_node;
+
+	uaudio_qdev = devm_kzalloc(&pdev->dev, sizeof(struct uaudio_qmi_dev),
+		GFP_KERNEL);
+	if (!uaudio_qdev)
+		return -ENOMEM;
+
+	uaudio_qdev->dev = &pdev->dev;
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-stream-id",
+				&uaudio_qdev->sid);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read sid.\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(node, "qcom,usb-audio-intr-num",
+				&uaudio_qdev->intr_num);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to read intr num.\n");
+		return -ENODEV;
+	}
+
+	uaudio_qdev->domain = iommu_domain_alloc(pdev->dev.bus);
+	if (!uaudio_qdev->domain) {
+		dev_err(&pdev->dev, "failed to allocate iommu domain\n");
+		return -ENODEV;
+	}
+
+	/* attach to external processor iommu */
+	ret = iommu_attach_device(uaudio_qdev->domain, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to attach device ret = %d\n", ret);
+		goto free_domain;
+	}
+
+	/* initialize dcba, xfer ring and xfer buf iova list */
+	INIT_LIST_HEAD(&uaudio_qdev->dcba_list);
+	uaudio_qdev->curr_dcba_iova = IOVA_DCBA_BASE;
+	uaudio_qdev->dcba_iova_size = SNDRV_CARDS * PAGE_SIZE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_ring_list);
+	uaudio_qdev->curr_xfer_ring_iova = IOVA_XFER_RING_BASE;
+	uaudio_qdev->xfer_ring_iova_size =
+			IOVA_XFER_RING_MAX - IOVA_XFER_RING_BASE;
+
+	INIT_LIST_HEAD(&uaudio_qdev->xfer_buf_list);
+	uaudio_qdev->curr_xfer_buf_iova = IOVA_XFER_BUF_BASE;
+	uaudio_qdev->xfer_buf_iova_size =
+		IOVA_XFER_BUF_MAX - IOVA_XFER_BUF_BASE;
+
+	return 0;
+
+free_domain:
+	iommu_domain_free(uaudio_qdev->domain);
+	return ret;
+}
+
+static int uaudio_qmi_plat_remove(struct platform_device *pdev)
+{
+	iommu_detach_device(uaudio_qdev->domain, &pdev->dev);
+	iommu_domain_free(uaudio_qdev->domain);
+	uaudio_qdev->domain = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id of_uaudio_matach[] = {
+	{
+		.compatible = "qcom,usb-audio-qmi-dev",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_uaudio_matach);
+
+static struct platform_driver uaudio_qmi_driver = {
+	.probe		= uaudio_qmi_plat_probe,
+	.remove		= uaudio_qmi_plat_remove,
+	.driver		= {
+		.name	= "uaudio-qmi",
+		.of_match_table	= of_uaudio_matach,
+	},
+};
+
+static int uaudio_qmi_svc_init(void)
+{
+	int ret;
+	struct uaudio_qmi_svc *svc;
+
+	svc = kzalloc(sizeof(struct uaudio_qmi_svc), GFP_KERNEL);
+	if (!svc)
+		return -ENOMEM;
+
+	svc->uaudio_wq = create_singlethread_workqueue("uaudio_svc");
+	if (!svc->uaudio_wq) {
+		ret = -ENOMEM;
+		goto free_svc;
+	}
+
+	svc->uaudio_svc_hdl = qmi_handle_create(uaudio_qmi_svc_ntfy, NULL);
+	if (!svc->uaudio_svc_hdl) {
+		pr_err("%s: Error creating svc_hdl\n", __func__);
+		ret = -EFAULT;
+		goto destroy_uaudio_wq;
+	}
+
+	ret = qmi_svc_register(svc->uaudio_svc_hdl, &uaudio_svc_ops_options);
+	if (ret < 0) {
+		pr_err("%s:Error registering uaudio svc %d\n", __func__, ret);
+		goto destroy_svc_handle;
+	}
+
+	INIT_WORK(&svc->recv_msg_work, uaudio_qmi_svc_recv_msg);
+	INIT_WORK(&svc->qmi_disconnect_work, uaudio_qmi_disconnect_work);
+
+	uaudio_svc = svc;
+
+	return 0;
+
+destroy_svc_handle:
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+destroy_uaudio_wq:
+	destroy_workqueue(svc->uaudio_wq);
+free_svc:
+	kfree(svc);
+	return ret;
+}
+
+static void uaudio_qmi_svc_exit(void)
+{
+	struct uaudio_qmi_svc *svc = uaudio_svc;
+
+	qmi_svc_unregister(svc->uaudio_svc_hdl);
+	flush_workqueue(svc->uaudio_wq);
+	qmi_handle_destroy(svc->uaudio_svc_hdl);
+	destroy_workqueue(svc->uaudio_wq);
+	kfree(svc);
+	uaudio_svc = NULL;
+}
+
+static int __init uaudio_qmi_plat_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&uaudio_qmi_driver);
+	if (ret)
+		return ret;
+
+	return uaudio_qmi_svc_init();
+}
+
+static void __exit uaudio_qmi_plat_exit(void)
+{
+	uaudio_qmi_svc_exit();
+	platform_driver_unregister(&uaudio_qmi_driver);
+}
+
+module_init(uaudio_qmi_plat_init);
+module_exit(uaudio_qmi_plat_exit);
+
+MODULE_DESCRIPTION("USB AUDIO QMI Service Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
new file mode 100644
index 0000000..fef7505
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -0,0 +1,833 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "usb_audio_qmi_v01.h"
+
+static struct elem_info mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   va),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_8_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint64_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   pa),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct mem_info_v01,
+					   size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info apps_mem_info_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   evt_ring),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_data),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   tr_sync),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   xfer_buff),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct apps_mem_info_v01,
+					   dcba),
+		.ei_array      = mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_endpoint_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bEndpointAddress),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bmAttributes),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   wMaxPacketSize),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bInterval),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bRefresh),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_endpoint_descriptor_v01,
+					   bSynchAddress),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info usb_interface_descriptor_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bLength),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bDescriptorType),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceNumber),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bAlternateSetting),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bNumEndpoints),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceSubClass),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   bInterfaceProtocol),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0,
+		.offset         = offsetof(struct usb_interface_descriptor_v01,
+					   iInterface),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   enable),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   audio_format),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   number_of_ch),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   bit_rate),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+					   xfer_buff_size),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum usb_audio_stream_status_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					internal_status),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x19,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1A,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x1B,
+		.offset         = offsetof(
+					struct qmi_uaudio_stream_resp_msg_v01,
+					interrupter_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(
+				enum usb_audio_device_indication_enum_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   dev_event),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   slot_id),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_token),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_interface_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_opr_intf_desc),
+		.ei_array      = usb_interface_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x12,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_data_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct usb_endpoint_descriptor_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x13,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   std_as_sync_ep_desc),
+		.ei_array      = usb_endpoint_descriptor_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_2_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x14,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_spec_revision),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x15,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   data_path_delay),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x16,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   usb_audio_subslot_size),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info_valid),
+	},
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct apps_mem_info_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x17,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   xhci_mem_info),
+		.ei_array      = apps_mem_info_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x18,
+		.offset         = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+					   interrupter_num),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
new file mode 100644
index 0000000..83a966c
--- /dev/null
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef USB_QMI_V01_H
+#define USB_QMI_V01_H
+
+#define UAUDIO_STREAM_SERVICE_ID_V01 0x41D
+#define UAUDIO_STREAM_SERVICE_VERS_V01 0x01
+
+#define QMI_UAUDIO_STREAM_RESP_V01 0x0001
+#define QMI_UAUDIO_STREAM_REQ_V01 0x0001
+#define QMI_UADUIO_STREAM_IND_V01 0x0001
+
+
+struct mem_info_v01 {
+	uint64_t va;
+	uint64_t pa;
+	uint32_t size;
+};
+
+struct apps_mem_info_v01 {
+	struct mem_info_v01 evt_ring;
+	struct mem_info_v01 tr_data;
+	struct mem_info_v01 tr_sync;
+	struct mem_info_v01 xfer_buff;
+	struct mem_info_v01 dcba;
+};
+
+struct usb_endpoint_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bEndpointAddress;
+	uint8_t bmAttributes;
+	uint16_t wMaxPacketSize;
+	uint8_t bInterval;
+	uint8_t bRefresh;
+	uint8_t bSynchAddress;
+};
+
+struct usb_interface_descriptor_v01 {
+	uint8_t bLength;
+	uint8_t bDescriptorType;
+	uint8_t bInterfaceNumber;
+	uint8_t bAlternateSetting;
+	uint8_t bNumEndpoints;
+	uint8_t bInterfaceClass;
+	uint8_t bInterfaceSubClass;
+	uint8_t bInterfaceProtocol;
+	uint8_t iInterface;
+};
+
+enum usb_audio_stream_status_enum_v01 {
+	USB_AUDIO_STREAM_STATUS_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_STREAM_REQ_SUCCESS_V01 = 0,
+	USB_AUDIO_STREAM_REQ_FAILURE_V01 = 1,
+	USB_AUDIO_STREAM_REQ_FAILURE_NOT_FOUND_V01 = 2,
+	USB_AUDIO_STREAM_REQ_FAILURE_INVALID_PARAM_V01 = 3,
+	USB_AUDIO_STREAM_REQ_FAILURE_MEMALLOC_V01 = 4,
+	USB_AUDIO_STREAM_STATUS_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum usb_audio_device_indication_enum_v01 {
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MIN_VAL_V01 = INT_MIN,
+	USB_AUDIO_DEV_CONNECT_V01 = 0,
+	USB_AUDIO_DEV_DISCONNECT_V01 = 1,
+	USB_AUDIO_DEV_SUSPEND_V01 = 2,
+	USB_AUDIO_DEV_RESUME_V01 = 3,
+	USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_uaudio_stream_req_msg_v01 {
+	uint8_t enable;
+	uint32_t usb_token;
+	uint8_t audio_format_valid;
+	uint32_t audio_format;
+	uint8_t number_of_ch_valid;
+	uint32_t number_of_ch;
+	uint8_t bit_rate_valid;
+	uint32_t bit_rate;
+	uint8_t xfer_buff_size_valid;
+	uint32_t xfer_buff_size;
+};
+#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 39
+extern struct elem_info qmi_uaudio_stream_req_msg_v01_ei[];
+
+struct qmi_uaudio_stream_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	uint8_t status_valid;
+	enum usb_audio_stream_status_enum_v01 status;
+	uint8_t internal_status_valid;
+	uint32_t internal_status;
+	uint8_t slot_id_valid;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+};
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
+extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
+
+struct qmi_uaudio_stream_ind_msg_v01 {
+	enum usb_audio_device_indication_enum_v01 dev_event;
+	uint32_t slot_id;
+	uint8_t usb_token_valid;
+	uint32_t usb_token;
+	uint8_t std_as_opr_intf_desc_valid;
+	struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+	uint8_t std_as_data_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+	uint8_t std_as_sync_ep_desc_valid;
+	struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+	uint8_t usb_audio_spec_revision_valid;
+	uint16_t usb_audio_spec_revision;
+	uint8_t data_path_delay_valid;
+	uint8_t data_path_delay;
+	uint8_t usb_audio_subslot_size_valid;
+	uint8_t usb_audio_subslot_size;
+	uint8_t xhci_mem_info_valid;
+	struct apps_mem_info_v01 xhci_mem_info;
+	uint8_t interrupter_num_valid;
+	uint8_t interrupter_num;
+};
+#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 177
+extern struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[];
+
+#endif
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 4d5c89a..93c4bed 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -61,6 +61,10 @@
 	bool autoclock;			/* from the 'autoclock' module param */
 
 	struct usb_host_interface *ctrl_intf;	/* the audio control interface */
+
+	struct mutex dev_lock;	/* to protect any race with disconnect */
+	int card_num;	/* cache pcm card number to use upon disconnect */
+	void (*disconnect_cb)(struct snd_usb_audio *chip);
 };
 
 #define usb_audio_err(chip, fmt, args...) \
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index ebe1b9f..85814d1 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -187,21 +187,37 @@
 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
 				    bool new_active_state)
 {
+	struct kvm_vcpu *requester_vcpu;
 	spin_lock(&irq->irq_lock);
+
+	/*
+	 * The vcpu parameter here can mean multiple things depending on how
+	 * this function is called; when handling a trap from the kernel it
+	 * depends on the GIC version, and these functions are also called as
+	 * part of save/restore from userspace.
+	 *
+	 * Therefore, we have to figure out the requester in a reliable way.
+	 *
+	 * When accessing VGIC state from user space, the requester_vcpu is
+	 * NULL, which is fine, because we guarantee that no VCPUs are running
+	 * when accessing VGIC state from user space so irq->vcpu->cpu is
+	 * always -1.
+	 */
+	requester_vcpu = kvm_arm_get_running_vcpu();
+
 	/*
 	 * If this virtual IRQ was written into a list register, we
 	 * have to make sure the CPU that runs the VCPU thread has
-	 * synced back LR state to the struct vgic_irq.  We can only
-	 * know this for sure, when either this irq is not assigned to
-	 * anyone's AP list anymore, or the VCPU thread is not
-	 * running on any CPUs.
+	 * synced back the LR state to the struct vgic_irq.
 	 *
-	 * In the opposite case, we know the VCPU thread may be on its
-	 * way back from the guest and still has to sync back this
-	 * IRQ, so we release and re-acquire the spin_lock to let the
-	 * other thread sync back the IRQ.
+	 * As long as the conditions below are true, we know the VCPU thread
+	 * may be on its way back from the guest (we kicked the VCPU thread in
+	 * vgic_change_active_prepare)  and still has to sync back this IRQ,
+	 * so we release and re-acquire the spin_lock to let the other thread
+	 * sync back the IRQ.
 	 */
 	while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+	       irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
 	       irq->vcpu->cpu != -1) /* VCPU thread is running */
 		cond_resched_lock(&irq->irq_lock);