Merge "soc: qcom: glink: Validate ctx before using" into msm-4.9
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 0cf9a6b..472122f 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -227,49 +227,7 @@
 usage, similar to "ondemand" and "conservative" governors, but with a
 different set of configurable behaviors.
 
-The tunable values for this governor are:
-
-above_hispeed_delay: When speed is at or above hispeed_freq, wait for
-this long before raising speed in response to continued high load.
-The format is a single delay value, optionally followed by pairs of
-CPU speeds and the delay to use at or above those speeds.  Colons can
-be used between the speeds and associated delays for readability.  For
-example:
-
-   80000 1300000:200000 1500000:40000
-
-uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
-200000 uS is used until speed 1.5 GHz, at which speed (and above)
-delay 40000 uS is used.  If speeds are specified these must appear in
-ascending order.  Default is 20000 uS.
-
-boost: If non-zero, immediately boost speed of all CPUs to at least
-hispeed_freq until zero is written to this attribute.  If zero, allow
-CPU speeds to drop below hispeed_freq according to load as usual.
-Default is zero.
-
-boostpulse: On each write, immediately boost speed of all CPUs to
-hispeed_freq for at least the period of time specified by
-boostpulse_duration, after which speeds are allowed to drop below
-hispeed_freq according to load as usual. Its a write-only file.
-
-boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
-on a write to boostpulse, before allowing speed to drop according to
-load as usual.  Default is 80000 uS.
-
-go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
-Default is 99%.
-
-hispeed_freq: An intermediate "high speed" at which to initially ramp
-when CPU load hits the value specified in go_hispeed_load.  If load
-stays high for the amount of time specified in above_hispeed_delay,
-then speed may be bumped higher.  Default is the maximum speed allowed
-by the policy at governor initialization time.
-
-io_is_busy: If set, the governor accounts IO time as CPU busy time.
-
-min_sample_time: The minimum amount of time to spend at the current
-frequency before ramping down. Default is 80000 uS.
+The tuneable values for this governor are:
 
 target_loads: CPU load values used to adjust speed to influence the
 current CPU load toward that value.  In general, the lower the target
@@ -288,6 +246,32 @@
 values also usually appear in an ascending order. The default is
 target load 90% for all speeds.
 
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
 timer_rate: Sample rate for reevaluating CPU load when the CPU is not
 idle.  A deferrable timer is used, such that the CPU will not be woken
 from idle to service this timer until something else needs to run.
@@ -304,6 +288,65 @@
 when not at lowest speed.  A value of -1 means defer timers
 indefinitely at all speeds.  Default is 80000 uS.
 
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+align_windows: If non-zero, align governor timer window to fire at
+multiples of number of jiffies timer_rate converts to.
+
+use_sched_load: If non-zero, query scheduler for CPU busy time,
+instead of collecting it directly in governor. This would allow
+scheduler to adjust the busy time of each CPU to account for known
+information such as migration. If non-zero, this also implies governor
+sampling windows are aligned across CPUs, with same timer_rate,
+regardless what align_windows is set to. Default is zero.
+
+use_migration_notif: If non-zero, schedule hrtimer to fire in 1ms
+to reevaluate frequency of notified CPU, unless the hrtimer is already
+pending. If zero, ignore scheduler notification. Default is zero.
+
+max_freq_hysteresis: Each time freq evaluation chooses policy->max,
+next max_freq_hysteresis is considered as hysteresis period. During
+this period, frequency target will not drop below hispeed_freq, no
+matter how light actual workload is. If CPU load of any sampling
+window exceeds go_hispeed_load during this period, governor will
+directly increase frequency back to policy->max. Default is 0 uS.
+
+ignore_hispeed_on_notif: If non-zero, do not apply hispeed related
+logic if frequency evaluation is triggered by scheduler notification.
+This includes ignoring go_hispeed_load, hispeed_freq in frequency
+selection, and ignoring above_hispeed_delay that prevents frequency
+ramp up. For evaluation triggered by timer, hispeed logic is still
+always applied. ignore_hispeed_on_notif has no effect if
+use_migration_notif is set to zero. Default is zero.
+
+fast_ramp_down: If non-zero, do not apply min_sample_time if
+frequency evaluation is triggered by scheduler notification. For
+evaluation triggered by timer, min_sample_time is still always
+enforced. fast_ramp_down has no effect if use_migration_notif is
+set to zero. Default is zero.
+
+enable_prediction: If non-zero, two frequencies will be calculated
+during each sampling period: one based on busy time in previous sampling
+period (f_prev), and the other based on prediction provided by scheduler
+(f_pred). Max of both will be selected as final frequency. Hispeed
+related logic, including both frequency selection and delay is ignored
+if enable_prediction is set. If only f_pred but not f_prev picked
+policy->max, max_freq_hysteresis period is not started/extended.
+use_sched_load must be turned on before enabling this feature.
+Default is zero.
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
new file mode 100644
index 0000000..6bed785
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/bimc-bwmon.txt
@@ -0,0 +1,25 @@
+MSM BIMC bandwidth monitor device
+
+bimc-bwmon is a device that represents the MSM BIMC bandwidth monitors that
+can be used to measure the bandwidth of read/write traffic from the BIMC
+master ports. For example, the CPU subsystem sits on one BIMC master port.
+
+Required properties:
+- compatible:		Must be "qcom,bimc-bwmon", "qcom,bimc-bwmon2"
+- reg:			Pairs of physical base addresses and region sizes of
+			memory mapped registers.
+- reg-names:		Names of the bases for the above registers. Expected
+			bases are: "base", "global_base"
+- interrupts:		Lists the threshold IRQ.
+- qcom,mport:		The hardware master port that this device can monitor
+- qcom,target-dev:	The DT device that corresponds to this master port
+
+Example:
+	qcom,cpu-bwmon {
+		compatible = "qcom,bimc-bwmon";
+		reg = <0xfc388000 0x300>, <0xfc381000 0x200>;
+		reg-names = "base", "global_base";
+		interrupts = <0 183 1>;
+		qcom,mport = <0>;
+		qcom,target-dev = <&cpubw>;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devbw.txt b/Documentation/devicetree/bindings/devfreq/devbw.txt
new file mode 100644
index 0000000..ece0fa7
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devbw.txt
@@ -0,0 +1,39 @@
+MSM device bandwidth device
+
+devbw is a device that represents a MSM device's BW requirements from its
+master port(s) to a different device's slave port(s) in a MSM SoC. This
+device is typically used to vote for BW requirements from a device's (Eg:
+CPU, GPU) master port(s) to the slave (Eg: DDR) port(s).
+
+Required properties:
+- compatible:		Must be "qcom,devbw"
+- qcom,src-dst-ports:	A list of tuples where each tuple consists of a bus
+			master port number and a bus slave port number.
+- qcom,bw-tbl:		A list of meaningful instantaneous bandwidth values
+			(in MB/s) that can be requested from the device
+			master port to the slave port. The list of values
+			depend on the supported bus/slave frequencies and the
+			bus width.
+
+Optional properties:
+- qcom,active-only:	Indicates that the bandwidth votes need to be
+			enforced only when the CPU subsystem is active.
+- governor:		Initial governor to use for the device.
+			Default: "performance"
+
+Example:
+
+	qcom,cpubw {
+		compatible = "qcom,devbw";
+		qcom,src-dst-ports = <1 512>, <2 512>;
+		qcom,active-only;
+		qcom,bw-tbl =
+			<  572 /*  75 MHz */ >,
+			< 1144 /* 150 MHz */ >,
+			< 1525 /* 200 MHz */ >,
+			< 2342 /* 307 MHz */ >,
+			< 3509 /* 460 MHz */ >,
+			< 4684 /* 614 MHz */ >,
+			< 6103 /* 800 MHz */ >,
+			< 7102 /* 931 MHz */ >;
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt b/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt
new file mode 100644
index 0000000..6537538
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-cpufreq.txt
@@ -0,0 +1,53 @@
+Devfreq CPUfreq governor
+
+devfreq-cpufreq is a parent device that contains one or more child devices.
+Each child device provides CPU frequency to device frequency mapping for a
+specific device. Examples of devices that could use this are: DDR, cache and
+CCI.
+
+Parent device name shall be "devfreq-cpufreq".
+
+Required child device properties:
+- cpu-to-dev-map, or cpu-to-dev-map-<X>:
+			A list of tuples where each tuple consists of a
+			CPU frequency (KHz) and the corresponding device
+			frequency. CPU frequencies not listed in the table
+			will use the device frequency that corresponds to the
+			next rounded up CPU frequency.
+			Use "cpu-to-dev-map" if all CPUs in the system should
+			share same mapping.
+			Use cpu-to-dev-map-<cpuid> to describe different
+			mappings for different CPUs. The property should be
+			listed only for the first CPU if multiple CPUs are
+			synchronous.
+- target-dev:		Phandle to device that this mapping applies to.
+
+Example:
+	devfreq-cpufreq {
+		cpubw-cpufreq {
+			target-dev = <&cpubw>;
+			cpu-to-dev-map =
+				<  300000  1144 >,
+				<  422400  2288 >,
+				<  652800  3051 >,
+				<  883200  5996 >,
+				< 1190400  8056 >,
+				< 1497600 10101 >,
+				< 1728000 12145 >,
+				< 2649600 16250 >;
+		};
+
+		cache-cpufreq {
+			target-dev = <&cache>;
+			cpu-to-dev-map =
+				<  300000  300000 >,
+				<  422400  422400 >,
+				<  652800  499200 >,
+				<  883200  576000 >,
+				<  960000  960000 >,
+				< 1497600 1036800 >,
+				< 1574400 1574400 >,
+				< 1728000 1651200 >,
+				< 2649600 1728000 >;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
new file mode 100644
index 0000000..4072053
--- /dev/null
+++ b/Documentation/devicetree/bindings/devfreq/devfreq-simple-dev.txt
@@ -0,0 +1,47 @@
+Devfreq simple device
+
+devfreq-simple-dev is a device that represents a simple device that cannot do
+any status reporting and uses a clock that can be scaled by one of more
+devfreq governors.  It provides a list of usable frequencies for the device
+and some additional optional parameters.
+
+Required properties:
+- compatible:		Must be "devfreq-simple-dev"
+- clock-names:		Must be "devfreq_clk"
+- clocks:		Must refer to the clock that's fed to the device.
+- freq-tbl-khz:		A list of usable frequencies (in KHz) for the device
+			clock.
+Optional properties:
+- polling-ms:	Polling interval for the device in milliseconds. Default: 50
+- governor:	Initial governor to user for the device. Default: "performance"
+
+Example:
+
+	qcom,cache {
+		compatible = "devfreq-simple-dev";
+		clock-names = "devfreq_clk";
+		clocks = <&clock_krait clk_l2_clk>;
+		polling-ms = 50;
+		governor = "cpufreq";
+		freq-tbl-khz =
+			<  300000 >,
+			<  345600 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  729600 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1344000 >,
+			< 1420800 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >;
+	};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index d07c3a5..2a23d05 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -317,6 +317,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 36d878f..844b286 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -329,6 +329,7 @@
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SDM845=y
 CONFIG_UHID=y
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f4cc1bd..0f6039e 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -328,32 +328,6 @@
 	},
 };
 
-static const struct freq_tbl ftbl_gcc_mmss_qm_core_clk_src[] = {
-	F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0),
-	F(150000000, P_GPLL0_OUT_EVEN, 2, 0, 0),
-	F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
-	{ }
-};
-
-static struct clk_rcg2 gcc_mmss_qm_core_clk_src = {
-	.cmd_rcgr = 0xb040,
-	.mnd_width = 0,
-	.hid_width = 5,
-	.parent_map = gcc_parent_map_0,
-	.freq_tbl = ftbl_gcc_mmss_qm_core_clk_src,
-	.clkr.hw.init = &(struct clk_init_data){
-		.name = "gcc_mmss_qm_core_clk_src",
-		.parent_names = gcc_parent_names_0,
-		.num_parents = 4,
-		.flags = CLK_SET_RATE_PARENT,
-		.ops = &clk_rcg2_ops,
-		VDD_CX_FMAX_MAP3(
-			MIN, 75000000,
-			LOWER, 150000000,
-			LOW, 300000000),
-	},
-};
-
 static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
 	F(9600000, P_BI_TCXO, 2, 0, 0),
 	F(19200000, P_BI_TCXO, 1, 0, 0),
@@ -1669,37 +1643,6 @@
 	},
 };
 
-static struct clk_branch gcc_mmss_qm_ahb_clk = {
-	.halt_reg = 0xb05c,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0xb05c,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mmss_qm_ahb_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_mmss_qm_core_clk = {
-	.halt_reg = 0xb038,
-	.halt_check = BRANCH_HALT,
-	.clkr = {
-		.enable_reg = 0xb038,
-		.enable_mask = BIT(0),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_mmss_qm_core_clk",
-			.parent_names = (const char *[]){
-				"gcc_mmss_qm_core_clk_src",
-			},
-			.num_parents = 1,
-			.flags = CLK_SET_RATE_PARENT,
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_mss_axis2_clk = {
 	.halt_reg = 0x8a008,
 	.halt_check = BRANCH_HALT,
@@ -3233,9 +3176,6 @@
 	[GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
 	[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
 	[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
-	[GCC_MMSS_QM_AHB_CLK] = &gcc_mmss_qm_ahb_clk.clkr,
-	[GCC_MMSS_QM_CORE_CLK] = &gcc_mmss_qm_core_clk.clkr,
-	[GCC_MMSS_QM_CORE_CLK_SRC] = &gcc_mmss_qm_core_clk_src.clkr,
 	[GCC_MSS_AXIS2_CLK] = &gcc_mss_axis2_clk.clkr,
 	[GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
 	[GCC_MSS_GPLL0_DIV_CLK_SRC] = &gcc_mss_gpll0_div_clk_src.clkr,
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 884e557..f18dccf 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -113,7 +113,6 @@
 config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
 	bool "interactive"
 	select CPU_FREQ_GOV_INTERACTIVE
-	select CPU_FREQ_GOV_PERFORMANCE
 	help
 	  Use the CPUFreq governor 'interactive' as default. This allows
 	  you to get a full dynamic cpu frequency capable system by simply
@@ -187,6 +186,23 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	tristate "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called cpufreq_interactive.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
@@ -211,6 +227,17 @@
 
 	  If in doubt, say N.
 
+config CPU_BOOST
+	tristate "Event base short term CPU freq boost"
+	depends on CPU_FREQ
+	help
+	  This driver boosts the frequency of one or more CPUs based on
+	  various events that might occur in the system. As of now, the
+	  events it reacts to are:
+	  - Migration of important threads from one CPU to another.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_SCHED
 	bool "'sched' cpufreq governor"
 	depends on CPU_FREQ
@@ -224,26 +251,6 @@
 
 	  If in doubt, say N.
 
-config CPU_FREQ_GOV_INTERACTIVE
-	tristate "'interactive' cpufreq policy governor"
-	depends on CPU_FREQ
-	select CPU_FREQ_GOV_ATTR_SET
-	select IRQ_WORK
-	help
-	  'interactive' - This driver adds a dynamic cpufreq policy governor
-	  designed for latency-sensitive workloads.
-
-	  This governor attempts to reduce the latency of clock
-	  increases so that the system is more responsive to
-	  interactive workloads.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called cpufreq_interactive.
-
-	  For details, take a look at linux/Documentation/cpu-freq.
-
-	  If in doubt, say N.
-
 config CPU_FREQ_GOV_SCHEDUTIL
 	bool "'schedutil' cpufreq policy governor"
 	depends on CPU_FREQ && SMP
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index d89b8af..96e18162 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -258,3 +258,9 @@
 	  support for its operation.
 
 	  If in doubt, say N.
+
+config CPU_FREQ_MSM
+	bool "MSM CPUFreq support"
+	depends on CPU_FREQ
+	help
+	  This enables the CPUFreq driver for Qualcomm CPUs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f0c9905..bf98b28 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
 obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET)	+= cpufreq_governor_attr_set.o
+obj-$(CONFIG_CPU_BOOST)			+= cpu-boost.o
 
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
 obj-$(CONFIG_CPUFREQ_DT_PLATDEV)	+= cpufreq-dt-platdev.o
@@ -60,6 +61,7 @@
 obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
 obj-$(CONFIG_ARM_MT8173_CPUFREQ)	+= mt8173-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_MSM)              += qcom-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
 obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)	+= pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)			+= pxa3xx-cpufreq.o
@@ -82,7 +84,6 @@
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
 obj-$(CONFIG_MACH_MVEBU_V7)		+= mvebu-cpufreq.o
 
-
 ##################################################################################
 # PowerPC platform drivers
 obj-$(CONFIG_CPU_FREQ_CBE)		+= ppc-cbe-cpufreq.o
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
new file mode 100644
index 0000000..07603fe
--- /dev/null
+++ b/drivers/cpufreq/cpu-boost.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpu-boost: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/time.h>
+
+struct cpu_sync {
+	int cpu;
+	unsigned int input_boost_min;
+	unsigned int input_boost_freq;
+};
+
+static DEFINE_PER_CPU(struct cpu_sync, sync_info);
+static struct workqueue_struct *cpu_boost_wq;
+
+static struct work_struct input_boost_work;
+
+static bool input_boost_enabled;
+
+static unsigned int input_boost_ms = 40;
+module_param(input_boost_ms, uint, 0644);
+
+static bool sched_boost_on_input;
+module_param(sched_boost_on_input, bool, 0644);
+
+static bool sched_boost_active;
+
+static struct delayed_work input_boost_rem;
+static u64 last_input_time;
+#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
+
+static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
+{
+	int i, ntokens = 0;
+	unsigned int val, cpu;
+	const char *cp = buf;
+	bool enabled = false;
+
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	/* single number: apply to all CPUs */
+	if (!ntokens) {
+		if (sscanf(buf, "%u\n", &val) != 1)
+			return -EINVAL;
+		for_each_possible_cpu(i)
+			per_cpu(sync_info, i).input_boost_freq = val;
+		goto check_enable;
+	}
+
+	/* CPU:value pair */
+	if (!(ntokens % 2))
+		return -EINVAL;
+
+	cp = buf;
+	for (i = 0; i < ntokens; i += 2) {
+		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+			return -EINVAL;
+		if (cpu > num_possible_cpus())
+			return -EINVAL;
+
+		per_cpu(sync_info, cpu).input_boost_freq = val;
+		cp = strchr(cp, ' ');
+		cp++;
+	}
+
+check_enable:
+	for_each_possible_cpu(i) {
+		if (per_cpu(sync_info, i).input_boost_freq) {
+			enabled = true;
+			break;
+		}
+	}
+	input_boost_enabled = enabled;
+
+	return 0;
+}
+
+static int get_input_boost_freq(char *buf, const struct kernel_param *kp)
+{
+	int cnt = 0, cpu;
+	struct cpu_sync *s;
+
+	for_each_possible_cpu(cpu) {
+		s = &per_cpu(sync_info, cpu);
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"%d:%u ", cpu, s->input_boost_freq);
+	}
+	cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+	return cnt;
+}
+
+static const struct kernel_param_ops param_ops_input_boost_freq = {
+	.set = set_input_boost_freq,
+	.get = get_input_boost_freq,
+};
+module_param_cb(input_boost_freq, &param_ops_input_boost_freq, NULL, 0644);
+
+/*
+ * The CPUFREQ_ADJUST notifier is used to override the current policy min to
+ * make sure policy min >= boost_min. The cpufreq framework then does the job
+ * of enforcing the new policy.
+ */
+static int boost_adjust_notify(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct cpufreq_policy *policy = data;
+	unsigned int cpu = policy->cpu;
+	struct cpu_sync *s = &per_cpu(sync_info, cpu);
+	unsigned int ib_min = s->input_boost_min;
+
+	switch (val) {
+	case CPUFREQ_ADJUST:
+		if (!ib_min)
+			break;
+
+		pr_debug("CPU%u policy min before boost: %u kHz\n",
+			 cpu, policy->min);
+		pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min);
+
+		cpufreq_verify_within_limits(policy, ib_min, UINT_MAX);
+
+		pr_debug("CPU%u policy min after boost: %u kHz\n",
+			 cpu, policy->min);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block boost_adjust_nb = {
+	.notifier_call = boost_adjust_notify,
+};
+
+static void update_policy_online(void)
+{
+	unsigned int i;
+
+	/* Re-evaluate policy to trigger adjust notifier for online CPUs */
+	get_online_cpus();
+	for_each_online_cpu(i) {
+		pr_debug("Updating policy for CPU%d\n", i);
+		cpufreq_update_policy(i);
+	}
+	put_online_cpus();
+}
+
+static void do_input_boost_rem(struct work_struct *work)
+{
+	unsigned int i, ret;
+	struct cpu_sync *i_sync_info;
+
+	/* Reset the input_boost_min for all CPUs in the system */
+	pr_debug("Resetting input boost min for all CPUs\n");
+	for_each_possible_cpu(i) {
+		i_sync_info = &per_cpu(sync_info, i);
+		i_sync_info->input_boost_min = 0;
+	}
+
+	/* Update policies for all online CPUs */
+	update_policy_online();
+
+	if (sched_boost_active) {
+		ret = sched_set_boost(0);
+		if (ret)
+			pr_err("cpu-boost: HMP boost disable failed\n");
+		sched_boost_active = false;
+	}
+}
+
+static void do_input_boost(struct work_struct *work)
+{
+	unsigned int i, ret;
+	struct cpu_sync *i_sync_info;
+
+	cancel_delayed_work_sync(&input_boost_rem);
+	if (sched_boost_active) {
+		sched_set_boost(0);
+		sched_boost_active = false;
+	}
+
+	/* Set the input_boost_min for all CPUs in the system */
+	pr_debug("Setting input boost min for all CPUs\n");
+	for_each_possible_cpu(i) {
+		i_sync_info = &per_cpu(sync_info, i);
+		i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
+	}
+
+	/* Update policies for all online CPUs */
+	update_policy_online();
+
+	/* Enable scheduler boost to migrate tasks to big cluster */
+	if (sched_boost_on_input) {
+		ret = sched_set_boost(1);
+		if (ret)
+			pr_err("cpu-boost: HMP boost enable failed\n");
+		else
+			sched_boost_active = true;
+	}
+
+	queue_delayed_work(cpu_boost_wq, &input_boost_rem,
+					msecs_to_jiffies(input_boost_ms));
+}
+
+static void cpuboost_input_event(struct input_handle *handle,
+		unsigned int type, unsigned int code, int value)
+{
+	u64 now;
+
+	if (!input_boost_enabled)
+		return;
+
+	now = ktime_to_us(ktime_get());
+	if (now - last_input_time < MIN_INPUT_INTERVAL)
+		return;
+
+	if (work_pending(&input_boost_work))
+		return;
+
+	queue_work(cpu_boost_wq, &input_boost_work);
+	last_input_time = ktime_to_us(ktime_get());
+}
+
+static int cpuboost_input_connect(struct input_handler *handler,
+		struct input_dev *dev, const struct input_device_id *id)
+{
+	struct input_handle *handle;
+	int error;
+
+	handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = "cpufreq";
+
+	error = input_register_handle(handle);
+	if (error)
+		goto err2;
+
+	error = input_open_device(handle);
+	if (error)
+		goto err1;
+
+	return 0;
+err1:
+	input_unregister_handle(handle);
+err2:
+	kfree(handle);
+	return error;
+}
+
+static void cpuboost_input_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id cpuboost_ids[] = {
+	/* multi-touch touchscreen */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) },
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
+	},
+	/* touchpad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		.absbit = { [BIT_WORD(ABS_X)] =
+			BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+	},
+	/* Keypad */
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
+	{ },
+};
+
+static struct input_handler cpuboost_input_handler = {
+	.event          = cpuboost_input_event,
+	.connect        = cpuboost_input_connect,
+	.disconnect     = cpuboost_input_disconnect,
+	.name           = "cpu-boost",
+	.id_table       = cpuboost_ids,
+};
+
+static int cpu_boost_init(void)
+{
+	int cpu, ret;
+	struct cpu_sync *s;
+
+	cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
+	if (!cpu_boost_wq)
+		return -EFAULT;
+
+	INIT_WORK(&input_boost_work, do_input_boost);
+	INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
+
+	for_each_possible_cpu(cpu) {
+		s = &per_cpu(sync_info, cpu);
+		s->cpu = cpu;
+	}
+	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
+
+	ret = input_register_handler(&cpuboost_input_handler);
+	return 0;
+}
+late_initcall(cpu_boost_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 019e817..f7e1c1b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -93,6 +93,7 @@
  */
 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
 static struct srcu_notifier_head cpufreq_transition_notifier_list;
+struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 static bool init_cpufreq_transition_notifier_list_called;
 static int __init init_cpufreq_transition_notifier_list(void)
@@ -103,6 +104,15 @@
 }
 pure_initcall(init_cpufreq_transition_notifier_list);
 
+static bool init_cpufreq_govinfo_notifier_list_called;
+static int __init init_cpufreq_govinfo_notifier_list(void)
+{
+	ATOMIC_INIT_NOTIFIER_HEAD(&cpufreq_govinfo_notifier_list);
+	init_cpufreq_govinfo_notifier_list_called = true;
+	return 0;
+}
+pure_initcall(init_cpufreq_govinfo_notifier_list);
+
 static int off __read_mostly;
 static int cpufreq_disabled(void)
 {
@@ -1078,7 +1088,8 @@
 	if (has_target()) {
 		ret = cpufreq_start_governor(policy);
 		if (ret)
-			pr_err("%s: Failed to start governor\n", __func__);
+			pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 	}
 	up_write(&policy->rwsem);
 	return ret;
@@ -1250,6 +1261,9 @@
 		for_each_cpu(j, policy->related_cpus)
 			per_cpu(cpufreq_cpu_data, j) = policy;
 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	} else {
+		policy->min = policy->user_policy.min;
+		policy->max = policy->user_policy.max;
 	}
 
 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
@@ -1776,7 +1790,8 @@
 	if (cpufreq_disabled())
 		return -EINVAL;
 
-	WARN_ON(!init_cpufreq_transition_notifier_list_called);
+	WARN_ON(!init_cpufreq_transition_notifier_list_called ||
+		!init_cpufreq_govinfo_notifier_list_called);
 
 	switch (list) {
 	case CPUFREQ_TRANSITION_NOTIFIER:
@@ -1797,6 +1812,10 @@
 		ret = blocking_notifier_chain_register(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_register(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1837,6 +1856,10 @@
 		ret = blocking_notifier_chain_unregister(
 				&cpufreq_policy_notifier_list, nb);
 		break;
+	case CPUFREQ_GOVINFO_NOTIFIER:
+		ret = atomic_notifier_chain_unregister(
+				&cpufreq_govinfo_notifier_list, nb);
+		break;
 	default:
 		ret = -EINVAL;
 	}
@@ -1980,15 +2003,6 @@
 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
 		 policy->cpu, target_freq, relation, old_target_freq);
 
-	/*
-	 * This might look like a redundant call as we are checking it again
-	 * after finding index. But it is left intentionally for cases where
-	 * exactly same freq is called again and so we can save on few function
-	 * calls.
-	 */
-	if (target_freq == policy->cur)
-		return 0;
-
 	/* Save last value to restore later on errors */
 	policy->restore_freq = policy->cur;
 
@@ -2533,7 +2547,7 @@
 	hp_online = ret;
 	ret = 0;
 
-	pr_debug("driver %s up and running\n", driver_data->name);
+	pr_info("driver %s up and running\n", driver_data->name);
 	goto out;
 
 err_if_unreg:
@@ -2565,7 +2579,7 @@
 	if (!cpufreq_driver || (driver != cpufreq_driver))
 		return -EINVAL;
 
-	pr_debug("unregistering driver %s\n", driver->name);
+	pr_info("unregistering driver %s\n", driver->name);
 
 	/* Protect against concurrent cpu hotplug */
 	get_online_cpus();
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index d6cac0e..1b8c739 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1,7 +1,7 @@
 /*
  * drivers/cpufreq/cpufreq_interactive.c
  *
- * Copyright (C) 2010-2016 Google, Inc.
+ * Copyright (C) 2010 Google, Inc.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -13,14 +13,12 @@
  * GNU General Public License for more details.
  *
  * Author: Mike Chan (mike@android.com)
+ *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
 #include <linux/cpufreq.h>
-#include <linux/irq_work.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/rwsem.h>
@@ -29,50 +27,92 @@
 #include <linux/tick.h>
 #include <linux/time.h>
 #include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/cpufreq_interactive.h>
 
-#define gov_attr_ro(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0444, show_##_name, NULL)
+struct cpufreq_interactive_policyinfo {
+	struct timer_list policy_timer;
+	struct timer_list policy_slack_timer;
+	struct hrtimer notif_timer;
+	spinlock_t load_lock; /* protects load tracking stat */
+	u64 last_evaluated_jiffy;
+	struct cpufreq_policy *policy;
+	struct cpufreq_policy p_nolim; /* policy copy with no limits */
+	struct cpufreq_frequency_table *freq_table;
+	spinlock_t target_freq_lock; /*protects target freq */
+	unsigned int target_freq;
+	unsigned int floor_freq;
+	unsigned int min_freq;
+	u64 floor_validate_time;
+	u64 hispeed_validate_time;
+	u64 max_freq_hyst_start_time;
+	struct rw_semaphore enable_sem;
+	bool reject_notification;
+	bool notif_pending;
+	unsigned long notif_cpu;
+	int governor_enabled;
+	struct cpufreq_interactive_tunables *cached_tunables;
+	struct sched_load *sl;
+};
 
-#define gov_attr_wo(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0200, NULL, store_##_name)
+/* Protected by per-policy load_lock */
+struct cpufreq_interactive_cpuinfo {
+	u64 time_in_idle;
+	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
+	unsigned int loadadjfreq;
+};
 
-#define gov_attr_rw(_name)						\
-static struct governor_attr _name =					\
-__ATTR(_name, 0644, show_##_name, store_##_name)
+static DEFINE_PER_CPU(struct cpufreq_interactive_policyinfo *, polinfo);
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
 
-/* Separate instance required for each 'interactive' directory in sysfs */
-struct interactive_tunables {
-	struct gov_attr_set attr_set;
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
 
+static int set_window_count;
+static int migration_register_count;
+static struct mutex sched_lock;
+static cpumask_t controlled_cpus;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+	DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+	int usage_count;
 	/* Hi speed to bump to from lo speed when load burst (default max) */
 	unsigned int hispeed_freq;
-
 	/* Go to hi speed when CPU load at or above this value. */
 #define DEFAULT_GO_HISPEED_LOAD 99
 	unsigned long go_hispeed_load;
-
 	/* Target load. Lower values result in higher CPU speeds. */
 	spinlock_t target_loads_lock;
 	unsigned int *target_loads;
 	int ntarget_loads;
-
 	/*
 	 * The minimum amount of time to spend at a frequency before we can ramp
 	 * down.
 	 */
 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
 	unsigned long min_sample_time;
-
-	/* The sample rate of the timer used to increase frequency */
-	unsigned long sampling_rate;
-
+	/*
+	 * The sample rate of the timer used to increase frequency
+	 */
+	unsigned long timer_rate;
 	/*
 	 * Wait this long before raising speed above hispeed, by default a
 	 * single timer interval.
@@ -80,175 +120,181 @@
 	spinlock_t above_hispeed_delay_lock;
 	unsigned int *above_hispeed_delay;
 	int nabove_hispeed_delay;
-
 	/* Non-zero means indefinite speed boost active */
-	int boost;
+	int boost_val;
 	/* Duration of a boot pulse in usecs */
-	int boostpulse_duration;
+	int boostpulse_duration_val;
 	/* End time of boost pulse in ktime converted to usecs */
 	u64 boostpulse_endtime;
 	bool boosted;
-
 	/*
-	 * Max additional time to wait in idle, beyond sampling_rate, at speeds
+	 * Max additional time to wait in idle, beyond timer_rate, at speeds
 	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
 	 */
-#define DEFAULT_TIMER_SLACK (4 * DEFAULT_SAMPLING_RATE)
-	unsigned long timer_slack_delay;
-	unsigned long timer_slack;
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+	int timer_slack_val;
 	bool io_is_busy;
+
+	/* scheduler input related flags */
+	bool use_sched_load;
+	bool use_migration_notif;
+
+	/*
+	 * Whether to align timer windows across all CPUs. When
+	 * use_sched_load is true, this flag is ignored and windows
+	 * will always be aligned.
+	 */
+	bool align_windows;
+
+	/*
+	 * Stay at max freq for at least max_freq_hysteresis before dropping
+	 * frequency.
+	 */
+	unsigned int max_freq_hysteresis;
+
+	/* Ignore hispeed_freq and above_hispeed_delay for notification */
+	bool ignore_hispeed_on_notif;
+
+	/* Ignore min_sample_time for notification */
+	bool fast_ramp_down;
+
+	/* Whether to enable prediction or not */
+	bool enable_prediction;
 };
 
-/* Separate instance required for each 'struct cpufreq_policy' */
-struct interactive_policy {
-	struct cpufreq_policy *policy;
-	struct interactive_tunables *tunables;
-	struct list_head tunables_hook;
-};
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+static struct cpufreq_interactive_tunables *cached_common_tunables;
 
-/* Separate instance required for each CPU */
-struct interactive_cpu {
-	struct update_util_data update_util;
-	struct interactive_policy *ipolicy;
+static struct attribute_group *get_sysfs_attr(void);
 
-	struct irq_work irq_work;
-	u64 last_sample_time;
-	unsigned long next_sample_jiffies;
-	bool work_in_progress;
-
-	struct rw_semaphore enable_sem;
-	struct timer_list slack_timer;
-
-	spinlock_t load_lock; /* protects the next 4 fields */
-	u64 time_in_idle;
-	u64 time_in_idle_timestamp;
-	u64 cputime_speedadj;
-	u64 cputime_speedadj_timestamp;
-
-	spinlock_t target_freq_lock; /*protects target freq */
-	unsigned int target_freq;
-
-	unsigned int floor_freq;
-	u64 pol_floor_val_time; /* policy floor_validate_time */
-	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
-	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
-	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
-};
-
-static DEFINE_PER_CPU(struct interactive_cpu, interactive_cpu);
-
-/* Realtime thread handles frequency scaling */
-static struct task_struct *speedchange_task;
-static cpumask_t speedchange_cpumask;
-static spinlock_t speedchange_cpumask_lock;
-
-/* Target load. Lower values result in higher CPU speeds. */
-#define DEFAULT_TARGET_LOAD 90
-static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
-
-#define DEFAULT_SAMPLING_RATE (20 * USEC_PER_MSEC)
-#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_SAMPLING_RATE
-static unsigned int default_above_hispeed_delay[] = {
-	DEFAULT_ABOVE_HISPEED_DELAY
-};
-
-/* Iterate over interactive policies for tunables */
-#define for_each_ipolicy(__ip)	\
-	list_for_each_entry(__ip, &tunables->attr_set.policy_list, tunables_hook)
-
-static struct interactive_tunables *global_tunables;
-static DEFINE_MUTEX(global_tunables_lock);
-
-static inline void update_slack_delay(struct interactive_tunables *tunables)
+/* Round to starting jiffy of next evaluation window */
+static u64 round_to_nw_start(u64 jif,
+			     struct cpufreq_interactive_tunables *tunables)
 {
-	tunables->timer_slack_delay = usecs_to_jiffies(tunables->timer_slack +
-						       tunables->sampling_rate);
-}
+	unsigned long step = usecs_to_jiffies(tunables->timer_rate);
+	u64 ret;
 
-static bool timer_slack_required(struct interactive_cpu *icpu)
-{
-	struct interactive_policy *ipolicy = icpu->ipolicy;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-
-	if (tunables->timer_slack < 0)
-		return false;
-
-	if (icpu->target_freq > ipolicy->policy->min)
-		return true;
-
-	return false;
-}
-
-static void gov_slack_timer_start(struct interactive_cpu *icpu, int cpu)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-
-	icpu->slack_timer.expires = jiffies + tunables->timer_slack_delay;
-	add_timer_on(&icpu->slack_timer, cpu);
-}
-
-static void gov_slack_timer_modify(struct interactive_cpu *icpu)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-
-	mod_timer(&icpu->slack_timer, jiffies + tunables->timer_slack_delay);
-}
-
-static void slack_timer_resched(struct interactive_cpu *icpu, int cpu,
-				bool modify)
-{
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	unsigned long flags;
-
-	spin_lock_irqsave(&icpu->load_lock, flags);
-
-	icpu->time_in_idle = get_cpu_idle_time(cpu,
-					       &icpu->time_in_idle_timestamp,
-					       tunables->io_is_busy);
-	icpu->cputime_speedadj = 0;
-	icpu->cputime_speedadj_timestamp = icpu->time_in_idle_timestamp;
-
-	if (timer_slack_required(icpu)) {
-		if (modify)
-			gov_slack_timer_modify(icpu);
-		else
-			gov_slack_timer_start(icpu, cpu);
+	if (tunables->use_sched_load || tunables->align_windows) {
+		do_div(jif, step);
+		ret = (jif + 1) * step;
+	} else {
+		ret = jiffies + usecs_to_jiffies(tunables->timer_rate);
 	}
 
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
-}
-
-static unsigned int
-freq_to_above_hispeed_delay(struct interactive_tunables *tunables,
-			    unsigned int freq)
-{
-	unsigned long flags;
-	unsigned int ret;
-	int i;
-
-	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
-
-	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
-	     freq >= tunables->above_hispeed_delay[i + 1]; i += 2)
-		;
-
-	ret = tunables->above_hispeed_delay[i];
-	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return ret;
 }
 
-static unsigned int freq_to_targetload(struct interactive_tunables *tunables,
-				       unsigned int freq)
+static inline int set_window_helper(
+			struct cpufreq_interactive_tunables *tunables)
 {
+	return sched_set_window(round_to_nw_start(get_jiffies_64(), tunables),
+			 usecs_to_jiffies(tunables->timer_rate));
+}
+
+static void cpufreq_interactive_timer_resched(unsigned long cpu,
+					      bool slack_only)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 expires;
 	unsigned long flags;
-	unsigned int ret;
 	int i;
 
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	if (!slack_only) {
+		for_each_cpu(i, ppol->policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, i);
+			pcpu->time_in_idle = get_cpu_idle_time(i,
+						&pcpu->time_in_idle_timestamp,
+						tunables->io_is_busy);
+			pcpu->cputime_speedadj = 0;
+			pcpu->cputime_speedadj_timestamp =
+						pcpu->time_in_idle_timestamp;
+		}
+		del_timer(&ppol->policy_timer);
+		ppol->policy_timer.expires = expires;
+		add_timer(&ppol->policy_timer);
+	}
+
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		del_timer(&ppol->policy_slack_timer);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The policy_timer and policy_slack_timer must be deactivated when calling
+ * this function.
+ */
+static void cpufreq_interactive_timer_start(
+	struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	u64 expires = round_to_nw_start(ppol->last_evaluated_jiffy, tunables);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ppol->load_lock, flags);
+	ppol->policy_timer.expires = expires;
+	add_timer(&ppol->policy_timer);
+	if (tunables->timer_slack_val >= 0 &&
+	    ppol->target_freq > ppol->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		ppol->policy_slack_timer.expires = expires;
+		add_timer(&ppol->policy_slack_timer);
+	}
+
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		pcpu->time_in_idle =
+			get_cpu_idle_time(i, &pcpu->time_in_idle_timestamp,
+					  tunables->io_is_busy);
+		pcpu->cputime_speedadj = 0;
+		pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	}
+	spin_unlock_irqrestore(&ppol->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+		;
+
+	ret = tunables->above_hispeed_delay[i];
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static unsigned int freq_to_targetload(
+	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
 	for (i = 0; i < tunables->ntarget_loads - 1 &&
-	     freq >= tunables->target_loads[i + 1]; i += 2)
+		    freq >= tunables->target_loads[i+1]; i += 2)
 		;
 
 	ret = tunables->target_loads[i];
@@ -256,76 +302,102 @@
 	return ret;
 }
 
+#define DEFAULT_MAX_LOAD 100
+u32 get_freq_max_load(int cpu, unsigned int freq)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+
+	if (!cpumask_test_cpu(cpu, &controlled_cpus))
+		return DEFAULT_MAX_LOAD;
+
+	if (have_governor_per_policy()) {
+		if (!ppol || !ppol->cached_tunables)
+			return DEFAULT_MAX_LOAD;
+		return freq_to_targetload(ppol->cached_tunables, freq);
+	}
+
+	if (!cached_common_tunables)
+		return DEFAULT_MAX_LOAD;
+	return freq_to_targetload(cached_common_tunables, freq);
+}
+
 /*
  * If increasing frequencies never map to a lower target load then
  * choose_freq() will find the minimum frequency that does not exceed its
  * target load given the current load.
  */
-static unsigned int choose_freq(struct interactive_cpu *icpu,
-				unsigned int loadadjfreq)
+static unsigned int choose_freq(struct cpufreq_interactive_policyinfo *pcpu,
+		unsigned int loadadjfreq)
 {
-	struct cpufreq_policy *policy = icpu->ipolicy->policy;
-	struct cpufreq_frequency_table *freq_table = policy->freq_table;
-	unsigned int prevfreq, freqmin = 0, freqmax = UINT_MAX, tl;
-	unsigned int freq = policy->cur;
+	unsigned int freq = pcpu->policy->cur;
+	unsigned int prevfreq, freqmin, freqmax;
+	unsigned int tl;
 	int index;
 
+	freqmin = 0;
+	freqmax = UINT_MAX;
+
 	do {
 		prevfreq = freq;
-		tl = freq_to_targetload(icpu->ipolicy->tunables, freq);
+		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
 
 		/*
 		 * Find the lowest frequency where the computed load is less
 		 * than or equal to the target load.
 		 */
 
-		index = cpufreq_frequency_table_target(policy, loadadjfreq / tl,
+		index = cpufreq_frequency_table_target(&pcpu->p_nolim,
+						       loadadjfreq / tl,
 						       CPUFREQ_RELATION_L);
-
-		freq = freq_table[index].frequency;
+		freq = pcpu->freq_table[index].frequency;
 
 		if (freq > prevfreq) {
-			/* The previous frequency is too low */
+			/* The previous frequency is too low. */
 			freqmin = prevfreq;
 
-			if (freq < freqmax)
-				continue;
-
-			/* Find highest frequency that is less than freqmax */
-			index = cpufreq_frequency_table_target(policy,
-					freqmax - 1, CPUFREQ_RELATION_H);
-
-			freq = freq_table[index].frequency;
-
-			if (freq == freqmin) {
+			if (freq >= freqmax) {
 				/*
-				 * The first frequency below freqmax has already
-				 * been found to be too low. freqmax is the
-				 * lowest speed we found that is fast enough.
+				 * Find the highest frequency that is less
+				 * than freqmax.
 				 */
-				freq = freqmax;
-				break;
+				index = cpufreq_frequency_table_target(
+					    &pcpu->p_nolim,
+					    freqmax - 1, CPUFREQ_RELATION_H);
+				freq = pcpu->freq_table[index].frequency;
+
+				if (freq == freqmin) {
+					/*
+					 * The first frequency below freqmax
+					 * has already been found to be too
+					 * low.  freqmax is the lowest speed
+					 * we found that is fast enough.
+					 */
+					freq = freqmax;
+					break;
+				}
 			}
 		} else if (freq < prevfreq) {
 			/* The previous frequency is high enough. */
 			freqmax = prevfreq;
 
-			if (freq > freqmin)
-				continue;
+			if (freq <= freqmin) {
+				/*
+				 * Find the lowest frequency that is higher
+				 * than freqmin.
+				 */
+				index = cpufreq_frequency_table_target(
+					    &pcpu->p_nolim,
+					    freqmin + 1, CPUFREQ_RELATION_L);
+				freq = pcpu->freq_table[index].frequency;
 
-			/* Find lowest frequency that is higher than freqmin */
-			index = cpufreq_frequency_table_target(policy,
-					freqmin + 1, CPUFREQ_RELATION_L);
-
-			freq = freq_table[index].frequency;
-
-			/*
-			 * If freqmax is the first frequency above
-			 * freqmin then we have already found that
-			 * this speed is fast enough.
-			 */
-			if (freq == freqmax)
-				break;
+				/*
+				 * If freqmax is the first frequency above
+				 * freqmin then we have already found that
+				 * this speed is fast enough.
+				 */
+				if (freq == freqmax)
+					break;
+			}
 		}
 
 		/* If same frequency chosen as previous then done. */
@@ -334,97 +406,216 @@
 	return freq;
 }
 
-static u64 update_load(struct interactive_cpu *icpu, int cpu)
+static u64 update_load(int cpu)
 {
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	unsigned int delta_idle, delta_time;
-	u64 now_idle, now, active_time;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	u64 now;
+	u64 now_idle;
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	u64 active_time;
 
 	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
-	delta_idle = (unsigned int)(now_idle - icpu->time_in_idle);
-	delta_time = (unsigned int)(now - icpu->time_in_idle_timestamp);
+	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
 
 	if (delta_time <= delta_idle)
 		active_time = 0;
 	else
 		active_time = delta_time - delta_idle;
 
-	icpu->cputime_speedadj += active_time * icpu->ipolicy->policy->cur;
+	pcpu->cputime_speedadj += active_time * ppol->policy->cur;
 
-	icpu->time_in_idle = now_idle;
-	icpu->time_in_idle_timestamp = now;
-
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
 	return now;
 }
 
-/* Re-evaluate load to see if a frequency change is required or not */
-static void eval_target_freq(struct interactive_cpu *icpu)
+static unsigned int sl_busy_to_laf(struct cpufreq_interactive_policyinfo *ppol,
+				   unsigned long busy)
 {
-	struct interactive_tunables *tunables = icpu->ipolicy->tunables;
-	struct cpufreq_policy *policy = icpu->ipolicy->policy;
-	struct cpufreq_frequency_table *freq_table = policy->freq_table;
-	u64 cputime_speedadj, now, max_fvtime;
-	unsigned int new_freq, loadadjfreq, index, delta_time;
-	unsigned long flags;
+	int prev_load;
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+
+	prev_load = mult_frac(ppol->policy->cpuinfo.max_freq * 100,
+				busy, tunables->timer_rate);
+	return prev_load;
+}
+
+#define NEW_TASK_RATIO 75
+#define PRED_TOLERANCE_PCT 10
+static void cpufreq_interactive_timer(unsigned long data)
+{
+	s64 now;
+	unsigned int delta_time;
+	u64 cputime_speedadj;
 	int cpu_load;
-	int cpu = smp_processor_id();
+	int pol_load = 0;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, data);
+	struct cpufreq_interactive_tunables *tunables =
+		ppol->policy->governor_data;
+	struct sched_load *sl = ppol->sl;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	unsigned int new_freq;
+	unsigned int prev_laf = 0, t_prevlaf;
+	unsigned int pred_laf = 0, t_predlaf = 0;
+	unsigned int prev_chfreq, pred_chfreq, chosen_freq;
+	unsigned int index;
+	unsigned long flags;
+	unsigned long max_cpu;
+	int cpu, i;
+	int new_load_pct = 0;
+	int prev_l, pred_l = 0;
+	struct cpufreq_govinfo govinfo;
+	bool skip_hispeed_logic, skip_min_sample_time;
+	bool jump_to_max_no_ts = false;
+	bool jump_to_max = false;
 
-	spin_lock_irqsave(&icpu->load_lock, flags);
-	now = update_load(icpu, smp_processor_id());
-	delta_time = (unsigned int)(now - icpu->cputime_speedadj_timestamp);
-	cputime_speedadj = icpu->cputime_speedadj;
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
-
-	if (WARN_ON_ONCE(!delta_time))
+	if (!down_read_trylock(&ppol->enable_sem))
 		return;
-
-	spin_lock_irqsave(&icpu->target_freq_lock, flags);
-	do_div(cputime_speedadj, delta_time);
-	loadadjfreq = (unsigned int)cputime_speedadj * 100;
-	cpu_load = loadadjfreq / policy->cur;
-	tunables->boosted = tunables->boost ||
-			    now < tunables->boostpulse_endtime;
-
-	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
-		if (policy->cur < tunables->hispeed_freq) {
-			new_freq = tunables->hispeed_freq;
-		} else {
-			new_freq = choose_freq(icpu, loadadjfreq);
-
-			if (new_freq < tunables->hispeed_freq)
-				new_freq = tunables->hispeed_freq;
-		}
-	} else {
-		new_freq = choose_freq(icpu, loadadjfreq);
-		if (new_freq > tunables->hispeed_freq &&
-		    policy->cur < tunables->hispeed_freq)
-			new_freq = tunables->hispeed_freq;
-	}
-
-	if (policy->cur >= tunables->hispeed_freq &&
-	    new_freq > policy->cur &&
-	    now - icpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, policy->cur)) {
-		trace_cpufreq_interactive_notyet(cpu, cpu_load,
-				icpu->target_freq, policy->cur, new_freq);
+	if (!ppol->governor_enabled)
 		goto exit;
+
+	now = ktime_to_us(ktime_get());
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	spin_lock(&ppol->load_lock);
+
+	skip_hispeed_logic =
+		tunables->ignore_hispeed_on_notif && ppol->notif_pending;
+	skip_min_sample_time = tunables->fast_ramp_down && ppol->notif_pending;
+	ppol->notif_pending = false;
+	now = ktime_to_us(ktime_get());
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+
+	if (tunables->use_sched_load)
+		sched_get_cpus_busy(sl, ppol->policy->cpus);
+	max_cpu = cpumask_first(ppol->policy->cpus);
+	i = 0;
+	for_each_cpu(cpu, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, cpu);
+		if (tunables->use_sched_load) {
+			t_prevlaf = sl_busy_to_laf(ppol, sl[i].prev_load);
+			prev_l = t_prevlaf / ppol->target_freq;
+			if (tunables->enable_prediction) {
+				t_predlaf = sl_busy_to_laf(ppol,
+						sl[i].predicted_load);
+				pred_l = t_predlaf / ppol->target_freq;
+			}
+			if (sl[i].prev_load)
+				new_load_pct = sl[i].new_task_load * 100 /
+							sl[i].prev_load;
+			else
+				new_load_pct = 0;
+		} else {
+			now = update_load(cpu);
+			delta_time = (unsigned int)
+				(now - pcpu->cputime_speedadj_timestamp);
+			if (WARN_ON_ONCE(!delta_time))
+				continue;
+			cputime_speedadj = pcpu->cputime_speedadj;
+			do_div(cputime_speedadj, delta_time);
+			t_prevlaf = (unsigned int)cputime_speedadj * 100;
+			prev_l = t_prevlaf / ppol->target_freq;
+		}
+
+		/* find max of loadadjfreq inside policy */
+		if (t_prevlaf > prev_laf) {
+			prev_laf = t_prevlaf;
+			max_cpu = cpu;
+		}
+		pred_laf = max(t_predlaf, pred_laf);
+
+		cpu_load = max(prev_l, pred_l);
+		pol_load = max(pol_load, cpu_load);
+		trace_cpufreq_interactive_cpuload(cpu, cpu_load, new_load_pct,
+						  prev_l, pred_l);
+
+		/* save loadadjfreq for notification */
+		pcpu->loadadjfreq = max(t_prevlaf, t_predlaf);
+
+		/* detect heavy new task and jump to policy->max */
+		if (prev_l >= tunables->go_hispeed_load &&
+		    new_load_pct >= NEW_TASK_RATIO) {
+			skip_hispeed_logic = true;
+			jump_to_max = true;
+		}
+		i++;
+	}
+	spin_unlock(&ppol->load_lock);
+
+	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+	prev_chfreq = choose_freq(ppol, prev_laf);
+	pred_chfreq = choose_freq(ppol, pred_laf);
+	chosen_freq = max(prev_chfreq, pred_chfreq);
+
+	if (prev_chfreq < ppol->policy->max && pred_chfreq >= ppol->policy->max)
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
+
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis &&
+	    pol_load >= tunables->go_hispeed_load &&
+	    ppol->target_freq < ppol->policy->max) {
+		skip_hispeed_logic = true;
+		skip_min_sample_time = true;
+		if (!jump_to_max)
+			jump_to_max_no_ts = true;
 	}
 
-	icpu->loc_hispeed_val_time = now;
+	new_freq = chosen_freq;
+	if (jump_to_max_no_ts || jump_to_max) {
+		new_freq = ppol->policy->cpuinfo.max_freq;
+	} else if (!skip_hispeed_logic) {
+		if (pol_load >= tunables->go_hispeed_load ||
+		    tunables->boosted) {
+			if (ppol->target_freq < tunables->hispeed_freq)
+				new_freq = tunables->hispeed_freq;
+			else
+				new_freq = max(new_freq,
+					       tunables->hispeed_freq);
+		}
+	}
 
-	index = cpufreq_frequency_table_target(policy, new_freq,
-					       CPUFREQ_RELATION_L);
-	new_freq = freq_table[index].frequency;
+	if (now - ppol->max_freq_hyst_start_time <
+	    tunables->max_freq_hysteresis)
+		new_freq = max(tunables->hispeed_freq, new_freq);
+
+	if (!skip_hispeed_logic &&
+	    ppol->target_freq >= tunables->hispeed_freq &&
+	    new_freq > ppol->target_freq &&
+	    now - ppol->hispeed_validate_time <
+	    freq_to_above_hispeed_delay(tunables, ppol->target_freq)) {
+		trace_cpufreq_interactive_notyet(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	ppol->hispeed_validate_time = now;
+
+	index = cpufreq_frequency_table_target(&ppol->p_nolim, new_freq,
+					   CPUFREQ_RELATION_L);
+	new_freq = ppol->freq_table[index].frequency;
 
 	/*
 	 * Do not scale below floor_freq unless we have been at or above the
 	 * floor frequency for the minimum sample time since last validated.
 	 */
-	max_fvtime = max(icpu->pol_floor_val_time, icpu->loc_floor_val_time);
-	if (new_freq < icpu->floor_freq && icpu->target_freq >= policy->cur) {
-		if (now - max_fvtime < tunables->min_sample_time) {
-			trace_cpufreq_interactive_notyet(cpu, cpu_load,
-				icpu->target_freq, policy->cur, new_freq);
-			goto exit;
+	if (!skip_min_sample_time && new_freq < ppol->floor_freq) {
+		if (now - ppol->floor_validate_time <
+				tunables->min_sample_time) {
+			trace_cpufreq_interactive_notyet(
+				max_cpu, pol_load, ppol->target_freq,
+				ppol->policy->cur, new_freq);
+			spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+			goto rearm;
 		}
 	}
 
@@ -433,114 +624,63 @@
 	 * or above the selected frequency for a minimum of min_sample_time,
 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
 	 * allow the speed to drop as soon as the boostpulse duration expires
-	 * (or the indefinite boost is turned off).
+	 * (or the indefinite boost is turned off). If policy->max is restored
+	 * for max_freq_hysteresis, don't extend the timestamp. Otherwise, it
+	 * could incorrectly extended the duration of max_freq_hysteresis by
+	 * min_sample_time.
 	 */
 
-	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
-		icpu->floor_freq = new_freq;
-		if (icpu->target_freq >= policy->cur || new_freq >= policy->cur)
-			icpu->loc_floor_val_time = now;
+	if ((!tunables->boosted || new_freq > tunables->hispeed_freq)
+	    && !jump_to_max_no_ts) {
+		ppol->floor_freq = new_freq;
+		ppol->floor_validate_time = now;
 	}
 
-	if (icpu->target_freq == new_freq &&
-	    icpu->target_freq <= policy->cur) {
-		trace_cpufreq_interactive_already(cpu, cpu_load,
-			icpu->target_freq, policy->cur, new_freq);
-		goto exit;
+	if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
+		ppol->max_freq_hyst_start_time = now;
+
+	if (ppol->target_freq == new_freq &&
+			ppol->target_freq <= ppol->policy->cur) {
+		trace_cpufreq_interactive_already(
+			max_cpu, pol_load, ppol->target_freq,
+			ppol->policy->cur, new_freq);
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+		goto rearm;
 	}
 
-	trace_cpufreq_interactive_target(cpu, cpu_load, icpu->target_freq,
-					 policy->cur, new_freq);
+	trace_cpufreq_interactive_target(max_cpu, pol_load, ppol->target_freq,
+					 ppol->policy->cur, new_freq);
 
-	icpu->target_freq = new_freq;
-	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
-
+	ppol->target_freq = new_freq;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
-	cpumask_set_cpu(cpu, &speedchange_cpumask);
+	cpumask_set_cpu(max_cpu, &speedchange_cpumask);
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	wake_up_process_no_notif(speedchange_task);
 
-	wake_up_process(speedchange_task);
-	return;
+rearm:
+	if (!timer_pending(&ppol->policy_timer))
+		cpufreq_interactive_timer_resched(data, false);
+
+	/*
+	 * Send govinfo notification.
+	 * Govinfo notification could potentially wake up another thread
+	 * managed by its clients. Thread wakeups might trigger a load
+	 * change callback that executes this function again. Therefore
+	 * no spinlock could be held when sending the notification.
+	 */
+	for_each_cpu(i, ppol->policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		govinfo.cpu = i;
+		govinfo.load = pcpu->loadadjfreq / ppol->policy->max;
+		govinfo.sampling_rate_us = tunables->timer_rate;
+		atomic_notifier_call_chain(&cpufreq_govinfo_notifier_list,
+					   CPUFREQ_LOAD_CHANGE, &govinfo);
+	}
 
 exit:
-	spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
-}
-
-static void cpufreq_interactive_update(struct interactive_cpu *icpu)
-{
-	eval_target_freq(icpu);
-	slack_timer_resched(icpu, smp_processor_id(), true);
-}
-
-static void cpufreq_interactive_idle_end(void)
-{
-	struct interactive_cpu *icpu = &per_cpu(interactive_cpu,
-						smp_processor_id());
-
-	if (!down_read_trylock(&icpu->enable_sem))
-		return;
-
-	if (icpu->ipolicy) {
-		/*
-		 * We haven't sampled load for more than sampling_rate time, do
-		 * it right now.
-		 */
-		if (time_after_eq(jiffies, icpu->next_sample_jiffies))
-			cpufreq_interactive_update(icpu);
-	}
-
-	up_read(&icpu->enable_sem);
-}
-
-static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
-						unsigned int *pmax_freq,
-						u64 *phvt, u64 *pfvt)
-{
-	struct interactive_cpu *icpu;
-	u64 hvt = ~0ULL, fvt = 0;
-	unsigned int max_freq = 0, i;
-
-	for_each_cpu(i, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, i);
-
-		fvt = max(fvt, icpu->loc_floor_val_time);
-		if (icpu->target_freq > max_freq) {
-			max_freq = icpu->target_freq;
-			hvt = icpu->loc_hispeed_val_time;
-		} else if (icpu->target_freq == max_freq) {
-			hvt = min(hvt, icpu->loc_hispeed_val_time);
-		}
-	}
-
-	*pmax_freq = max_freq;
-	*phvt = hvt;
-	*pfvt = fvt;
-}
-
-static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
-					   struct cpufreq_policy *policy)
-{
-	struct interactive_cpu *icpu;
-	u64 hvt, fvt;
-	unsigned int max_freq;
-	int i;
-
-	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
-
-	for_each_cpu(i, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, i);
-		icpu->pol_floor_val_time = fvt;
-	}
-
-	if (max_freq != policy->cur) {
-		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
-		for_each_cpu(i, policy->cpus) {
-			icpu = &per_cpu(interactive_cpu, i);
-			icpu->pol_hispeed_val_time = hvt;
-		}
-	}
-
-	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
+	up_read(&ppol->enable_sem);
+	return;
 }
 
 static int cpufreq_interactive_speedchange_task(void *data)
@@ -548,112 +688,182 @@
 	unsigned int cpu;
 	cpumask_t tmp_mask;
 	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
 
-again:
-	set_current_state(TASK_INTERRUPTIBLE);
-	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
-
-	if (cpumask_empty(&speedchange_cpumask)) {
-		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-		schedule();
-
-		if (kthread_should_stop())
-			return 0;
-
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+		if (cpumask_empty(&speedchange_cpumask)) {
+			spin_unlock_irqrestore(&speedchange_cpumask_lock,
+					       flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		tmp_mask = speedchange_cpumask;
+		cpumask_clear(&speedchange_cpumask);
+		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			ppol = per_cpu(polinfo, cpu);
+			if (!down_read_trylock(&ppol->enable_sem))
+				continue;
+			if (!ppol->governor_enabled) {
+				up_read(&ppol->enable_sem);
+				continue;
+			}
+
+			if (ppol->target_freq != ppol->policy->cur)
+				__cpufreq_driver_target(ppol->policy,
+							ppol->target_freq,
+							CPUFREQ_RELATION_H);
+			trace_cpufreq_interactive_setspeed(cpu,
+						     ppol->target_freq,
+						     ppol->policy->cur);
+			up_read(&ppol->enable_sem);
+		}
 	}
 
-	set_current_state(TASK_RUNNING);
-	tmp_mask = speedchange_cpumask;
-	cpumask_clear(&speedchange_cpumask);
-	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
-
-	for_each_cpu(cpu, &tmp_mask) {
-		struct interactive_cpu *icpu = &per_cpu(interactive_cpu, cpu);
-		struct cpufreq_policy *policy = icpu->ipolicy->policy;
-
-		if (unlikely(!down_read_trylock(&icpu->enable_sem)))
-			continue;
-
-		if (likely(icpu->ipolicy))
-			cpufreq_interactive_adjust_cpu(cpu, policy);
-
-		up_read(&icpu->enable_sem);
-	}
-
-	goto again;
+	return 0;
 }
 
-static void cpufreq_interactive_boost(struct interactive_tunables *tunables)
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
 {
-	struct interactive_policy *ipolicy;
-	struct cpufreq_policy *policy;
-	struct interactive_cpu *icpu;
-	unsigned long flags[2];
-	bool wakeup = false;
 	int i;
+	int anyboost = 0;
+	unsigned long flags[2];
+	struct cpufreq_interactive_policyinfo *ppol;
 
 	tunables->boosted = true;
 
 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
-	for_each_ipolicy(ipolicy) {
-		policy = ipolicy->policy;
+	for_each_online_cpu(i) {
+		ppol = per_cpu(polinfo, i);
+		if (!ppol || tunables != ppol->policy->governor_data)
+			continue;
 
-		for_each_cpu(i, policy->cpus) {
-			icpu = &per_cpu(interactive_cpu, i);
-
-			if (!down_read_trylock(&icpu->enable_sem))
-				continue;
-
-			if (!icpu->ipolicy) {
-				up_read(&icpu->enable_sem);
-				continue;
-			}
-
-			spin_lock_irqsave(&icpu->target_freq_lock, flags[1]);
-			if (icpu->target_freq < tunables->hispeed_freq) {
-				icpu->target_freq = tunables->hispeed_freq;
-				cpumask_set_cpu(i, &speedchange_cpumask);
-				icpu->pol_hispeed_val_time = ktime_to_us(ktime_get());
-				wakeup = true;
-			}
-			spin_unlock_irqrestore(&icpu->target_freq_lock, flags[1]);
-
-			up_read(&icpu->enable_sem);
+		spin_lock_irqsave(&ppol->target_freq_lock, flags[1]);
+		if (ppol->target_freq < tunables->hispeed_freq) {
+			ppol->target_freq = tunables->hispeed_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			ppol->hispeed_validate_time =
+				ktime_to_us(ktime_get());
+			anyboost = 1;
 		}
+
+		/*
+		 * Set floor freq and (re)start timer for when last
+		 * validated.
+		 */
+
+		ppol->floor_freq = tunables->hispeed_freq;
+		ppol->floor_validate_time = ktime_to_us(ktime_get());
+		spin_unlock_irqrestore(&ppol->target_freq_lock, flags[1]);
+		break;
 	}
 
 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
 
-	if (wakeup)
-		wake_up_process(speedchange_task);
+	if (anyboost)
+		wake_up_process_no_notif(speedchange_task);
 }
 
-static int cpufreq_interactive_notifier(struct notifier_block *nb,
-					unsigned long val, void *data)
+static int load_change_callback(struct notifier_block *nb, unsigned long val,
+				void *data)
 {
-	struct cpufreq_freqs *freq = data;
-	struct interactive_cpu *icpu = &per_cpu(interactive_cpu, freq->cpu);
+	unsigned long cpu = (unsigned long) data;
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables;
 	unsigned long flags;
 
-	if (val != CPUFREQ_POSTCHANGE)
+	if (!ppol || ppol->reject_notification)
 		return 0;
 
-	if (!down_read_trylock(&icpu->enable_sem))
+	if (!down_read_trylock(&ppol->enable_sem))
 		return 0;
+	if (!ppol->governor_enabled)
+		goto exit;
 
-	if (!icpu->ipolicy) {
-		up_read(&icpu->enable_sem);
+	tunables = ppol->policy->governor_data;
+	if (!tunables->use_sched_load || !tunables->use_migration_notif)
+		goto exit;
+
+	spin_lock_irqsave(&ppol->target_freq_lock, flags);
+	ppol->notif_pending = true;
+	ppol->notif_cpu = cpu;
+	spin_unlock_irqrestore(&ppol->target_freq_lock, flags);
+
+	if (!hrtimer_is_queued(&ppol->notif_timer))
+		hrtimer_start(&ppol->notif_timer, ms_to_ktime(1),
+			      HRTIMER_MODE_REL);
+exit:
+	up_read(&ppol->enable_sem);
+	return 0;
+}
+
+static enum hrtimer_restart cpufreq_interactive_hrtimer(struct hrtimer *timer)
+{
+	struct cpufreq_interactive_policyinfo *ppol = container_of(timer,
+			struct cpufreq_interactive_policyinfo, notif_timer);
+	int cpu;
+
+	if (!down_read_trylock(&ppol->enable_sem))
+		return 0;
+	if (!ppol->governor_enabled) {
+		up_read(&ppol->enable_sem);
 		return 0;
 	}
+	cpu = ppol->notif_cpu;
+	trace_cpufreq_interactive_load_change(cpu);
+	del_timer(&ppol->policy_timer);
+	del_timer(&ppol->policy_slack_timer);
+	cpufreq_interactive_timer(cpu);
 
-	spin_lock_irqsave(&icpu->load_lock, flags);
-	update_load(icpu, freq->cpu);
-	spin_unlock_irqrestore(&icpu->load_lock, flags);
+	up_read(&ppol->enable_sem);
+	return HRTIMER_NORESTART;
+}
 
-	up_read(&icpu->enable_sem);
+static struct notifier_block load_notifier_block = {
+	.notifier_call = load_change_callback,
+};
 
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_policyinfo *ppol;
+	int cpu;
+	unsigned long flags;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		ppol = per_cpu(polinfo, freq->cpu);
+		if (!ppol)
+			return 0;
+		if (!down_read_trylock(&ppol->enable_sem))
+			return 0;
+		if (!ppol->governor_enabled) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+
+		if (cpumask_first(ppol->policy->cpus) != freq->cpu) {
+			up_read(&ppol->enable_sem);
+			return 0;
+		}
+		spin_lock_irqsave(&ppol->load_lock, flags);
+		for_each_cpu(cpu, ppol->policy->cpus)
+			update_load(cpu);
+		spin_unlock_irqrestore(&ppol->load_lock, flags);
+
+		up_read(&ppol->enable_sem);
+	}
 	return 0;
 }
 
@@ -663,26 +873,29 @@
 
 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
 {
-	const char *cp = buf;
-	int ntokens = 1, i = 0;
+	const char *cp;
+	int i;
+	int ntokens = 1;
 	unsigned int *tokenized_data;
 	int err = -EINVAL;
 
+	cp = buf;
 	while ((cp = strpbrk(cp + 1, " :")))
 		ntokens++;
 
 	if (!(ntokens & 0x1))
 		goto err;
 
-	tokenized_data = kcalloc(ntokens, sizeof(*tokenized_data), GFP_KERNEL);
+	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
 	if (!tokenized_data) {
 		err = -ENOMEM;
 		goto err;
 	}
 
 	cp = buf;
+	i = 0;
 	while (i < ntokens) {
-		if (kstrtouint(cp, 0, &tokenized_data[i++]) < 0)
+		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
 			goto err_kfree;
 
 		cp = strpbrk(cp, " :");
@@ -703,25 +916,13 @@
 	return ERR_PTR(err);
 }
 
-/* Interactive governor sysfs interface */
-static struct interactive_tunables *to_tunables(struct gov_attr_set *attr_set)
+static ssize_t show_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	char *buf)
 {
-	return container_of(attr_set, struct interactive_tunables, attr_set);
-}
-
-#define show_one(file_name, type)					\
-static ssize_t show_##file_name(struct gov_attr_set *attr_set, char *buf) \
-{									\
-	struct interactive_tunables *tunables = to_tunables(attr_set);	\
-	return sprintf(buf, type "\n", tunables->file_name);		\
-}
-
-static ssize_t show_target_loads(struct gov_attr_set *attr_set, char *buf)
-{
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long flags;
-	ssize_t ret = 0;
 	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
 
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 
@@ -731,21 +932,20 @@
 
 	sprintf(buf + ret - 1, "\n");
 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
-
 	return ret;
 }
 
-static ssize_t store_target_loads(struct gov_attr_set *attr_set,
-				  const char *buf, size_t count)
+static ssize_t store_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned int *new_target_loads;
-	unsigned long flags;
 	int ntokens;
+	unsigned int *new_target_loads = NULL;
+	unsigned long flags;
 
 	new_target_loads = get_tokenized_data(buf, &ntokens);
 	if (IS_ERR(new_target_loads))
-		return PTR_ERR(new_target_loads);
+		return PTR_RET(new_target_loads);
 
 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
 	if (tunables->target_loads != default_target_loads)
@@ -754,16 +954,17 @@
 	tunables->ntarget_loads = ntokens;
 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
 
+	sched_update_freq_max_load(&controlled_cpus);
+
 	return count;
 }
 
-static ssize_t show_above_hispeed_delay(struct gov_attr_set *attr_set,
-					char *buf)
+static ssize_t show_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long flags;
-	ssize_t ret = 0;
 	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
 
 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
 
@@ -774,21 +975,20 @@
 
 	sprintf(buf + ret - 1, "\n");
 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return ret;
 }
 
-static ssize_t store_above_hispeed_delay(struct gov_attr_set *attr_set,
-					 const char *buf, size_t count)
+static ssize_t store_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
+	int ntokens;
 	unsigned int *new_above_hispeed_delay = NULL;
 	unsigned long flags;
-	int ntokens;
 
 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
 	if (IS_ERR(new_above_hispeed_delay))
-		return PTR_ERR(new_above_hispeed_delay);
+		return PTR_RET(new_above_hispeed_delay);
 
 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
 	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
@@ -796,71 +996,105 @@
 	tunables->above_hispeed_delay = new_above_hispeed_delay;
 	tunables->nabove_hispeed_delay = ntokens;
 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
-
 	return count;
+
 }
 
-static ssize_t store_hispeed_freq(struct gov_attr_set *attr_set,
-				  const char *buf, size_t count)
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long int val;
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	long unsigned int val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->hispeed_freq = val;
-
 	return count;
 }
 
-static ssize_t store_go_hispeed_load(struct gov_attr_set *attr_set,
-				     const char *buf, size_t count)
+#define show_store_one(file_name)					\
+static ssize_t show_##file_name(					\
+	struct cpufreq_interactive_tunables *tunables, char *buf)	\
+{									\
+	return snprintf(buf, PAGE_SIZE, "%u\n", tunables->file_name);	\
+}									\
+static ssize_t store_##file_name(					\
+		struct cpufreq_interactive_tunables *tunables,		\
+		const char *buf, size_t count)				\
+{									\
+	int ret;							\
+	unsigned long int val;						\
+									\
+	ret = kstrtoul(buf, 0, &val);				\
+	if (ret < 0)							\
+		return ret;						\
+	tunables->file_name = val;					\
+	return count;							\
+}
+show_store_one(max_freq_hysteresis);
+show_store_one(align_windows);
+show_store_one(ignore_hispeed_on_notif);
+show_store_one(fast_ramp_down);
+show_store_one(enable_prediction);
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->go_hispeed_load = val;
-
 	return count;
 }
 
-static ssize_t store_min_sample_time(struct gov_attr_set *attr_set,
-				     const char *buf, size_t count)
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->min_sample_time = val;
-
 	return count;
 }
 
-static ssize_t show_timer_rate(struct gov_attr_set *attr_set, char *buf)
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-
-	return sprintf(buf, "%lu\n", tunables->sampling_rate);
+	return sprintf(buf, "%lu\n", tunables->timer_rate);
 }
 
-static ssize_t store_timer_rate(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val, val_round;
 	int ret;
+	unsigned long val, val_round;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
@@ -870,43 +1104,62 @@
 	if (val != val_round)
 		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
 			val_round);
+	tunables->timer_rate = val_round;
 
-	tunables->sampling_rate = val_round;
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->timer_rate = val_round;
+	}
+	set_window_helper(tunables);
 
 	return count;
 }
 
-static ssize_t store_timer_slack(struct gov_attr_set *attr_set, const char *buf,
-				 size_t count)
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtol(buf, 10, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->timer_slack = val;
-	update_slack_delay(tunables);
-
+	tunables->timer_slack_val = val;
 	return count;
 }
 
-static ssize_t store_boost(struct gov_attr_set *attr_set, const char *buf,
-			   size_t count)
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+			  char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+			   const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->boost = val;
+	tunables->boost_val = val;
 
-	if (tunables->boost) {
+	if (tunables->boost_val) {
 		trace_cpufreq_interactive_boost("on");
 		if (!tunables->boosted)
 			cpufreq_interactive_boost(tunables);
@@ -918,111 +1171,469 @@
 	return count;
 }
 
-static ssize_t store_boostpulse(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+				const char *buf, size_t count)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
 	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
-					tunables->boostpulse_duration;
+		tunables->boostpulse_duration_val;
 	trace_cpufreq_interactive_boost("pulse");
 	if (!tunables->boosted)
 		cpufreq_interactive_boost(tunables);
-
 	return count;
 }
 
-static ssize_t store_boostpulse_duration(struct gov_attr_set *attr_set,
-					 const char *buf, size_t count)
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
 
-	tunables->boostpulse_duration = val;
-
+	tunables->boostpulse_duration_val = val;
 	return count;
 }
 
-static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
-				size_t count)
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
 {
-	struct interactive_tunables *tunables = to_tunables(attr_set);
-	unsigned long val;
+	return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
 	int ret;
+	unsigned long val;
+	struct cpufreq_interactive_tunables *t;
+	int cpu;
 
 	ret = kstrtoul(buf, 0, &val);
 	if (ret < 0)
 		return ret;
-
 	tunables->io_is_busy = val;
 
+	if (!tunables->use_sched_load)
+		return count;
+
+	for_each_possible_cpu(cpu) {
+		if (!per_cpu(polinfo, cpu))
+			continue;
+		t = per_cpu(polinfo, cpu)->cached_tunables;
+		if (t && t->use_sched_load)
+			t->io_is_busy = val;
+	}
+	sched_set_io_is_busy(val);
+
 	return count;
 }
 
-show_one(hispeed_freq, "%u");
-show_one(go_hispeed_load, "%lu");
-show_one(min_sample_time, "%lu");
-show_one(timer_slack, "%lu");
-show_one(boost, "%u");
-show_one(boostpulse_duration, "%u");
-show_one(io_is_busy, "%u");
-
-gov_attr_rw(target_loads);
-gov_attr_rw(above_hispeed_delay);
-gov_attr_rw(hispeed_freq);
-gov_attr_rw(go_hispeed_load);
-gov_attr_rw(min_sample_time);
-gov_attr_rw(timer_rate);
-gov_attr_rw(timer_slack);
-gov_attr_rw(boost);
-gov_attr_wo(boostpulse);
-gov_attr_rw(boostpulse_duration);
-gov_attr_rw(io_is_busy);
-
-static struct attribute *interactive_attributes[] = {
-	&target_loads.attr,
-	&above_hispeed_delay.attr,
-	&hispeed_freq.attr,
-	&go_hispeed_load.attr,
-	&min_sample_time.attr,
-	&timer_rate.attr,
-	&timer_slack.attr,
-	&boost.attr,
-	&boostpulse.attr,
-	&boostpulse_duration.attr,
-	&io_is_busy.attr,
-	NULL
-};
-
-static struct kobj_type interactive_tunables_ktype = {
-	.default_attrs = interactive_attributes,
-	.sysfs_ops = &governor_sysfs_ops,
-};
-
-static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
-					     unsigned long val, void *data)
+static int cpufreq_interactive_enable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
 {
-	if (val == IDLE_END)
-		cpufreq_interactive_idle_end();
+	int rc = 0, j;
+	struct cpufreq_interactive_tunables *t;
 
+	mutex_lock(&sched_lock);
+
+	set_window_count++;
+	if (set_window_count > 1) {
+		for_each_possible_cpu(j) {
+			if (!per_cpu(polinfo, j))
+				continue;
+			t = per_cpu(polinfo, j)->cached_tunables;
+			if (t && t->use_sched_load) {
+				tunables->timer_rate = t->timer_rate;
+				tunables->io_is_busy = t->io_is_busy;
+				break;
+			}
+		}
+	} else {
+		rc = set_window_helper(tunables);
+		if (rc) {
+			pr_err("%s: Failed to set sched window\n", __func__);
+			set_window_count--;
+			goto out;
+		}
+		sched_set_io_is_busy(tunables->io_is_busy);
+	}
+
+	if (!tunables->use_migration_notif)
+		goto out;
+
+	migration_register_count++;
+	if (migration_register_count > 1)
+		goto out;
+	else
+		atomic_notifier_chain_register(&load_alert_notifier_head,
+						&load_notifier_block);
+out:
+	mutex_unlock(&sched_lock);
+	return rc;
+}
+
+static int cpufreq_interactive_disable_sched_input(
+			struct cpufreq_interactive_tunables *tunables)
+{
+	mutex_lock(&sched_lock);
+
+	if (tunables->use_migration_notif) {
+		migration_register_count--;
+		if (migration_register_count < 1)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	set_window_count--;
+
+	mutex_unlock(&sched_lock);
 	return 0;
 }
 
-static struct notifier_block cpufreq_interactive_idle_nb = {
-	.notifier_call = cpufreq_interactive_idle_notifier,
+static ssize_t show_use_sched_load(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", tunables->use_sched_load);
+}
+
+static ssize_t store_use_sched_load(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_sched_load == (bool) val)
+		return count;
+
+	tunables->use_sched_load = val;
+
+	if (val)
+		ret = cpufreq_interactive_enable_sched_input(tunables);
+	else
+		ret = cpufreq_interactive_disable_sched_input(tunables);
+
+	if (ret) {
+		tunables->use_sched_load = !val;
+		return ret;
+	}
+
+	return count;
+}
+
+static ssize_t show_use_migration_notif(
+		struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			tunables->use_migration_notif);
+}
+
+static ssize_t store_use_migration_notif(
+			struct cpufreq_interactive_tunables *tunables,
+			const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	if (tunables->use_migration_notif == (bool) val)
+		return count;
+	tunables->use_migration_notif = val;
+
+	if (!tunables->use_sched_load)
+		return count;
+
+	mutex_lock(&sched_lock);
+	if (val) {
+		migration_register_count++;
+		if (migration_register_count == 1)
+			atomic_notifier_chain_register(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	} else {
+		migration_register_count--;
+		if (!migration_register_count)
+			atomic_notifier_chain_unregister(
+					&load_alert_notifier_head,
+					&load_notifier_block);
+	}
+	mutex_unlock(&sched_lock);
+
+	return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	return show_##file_name(common_tunables, buf);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	return show_##file_name(policy->governor_data, buf);		\
+}
+
+#define store_gov_pol_sys(file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf,		\
+	size_t count)							\
+{									\
+	return store_##file_name(common_tunables, buf, count);		\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	return store_##file_name(policy->governor_data, buf, count);	\
+}
+
+#define show_store_gov_pol_sys(file_name)				\
+show_gov_pol_sys(file_name);						\
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+show_store_gov_pol_sys(use_sched_load);
+show_store_gov_pol_sys(use_migration_notif);
+show_store_gov_pol_sys(max_freq_hysteresis);
+show_store_gov_pol_sys(align_windows);
+show_store_gov_pol_sys(ignore_hispeed_on_notif);
+show_store_gov_pol_sys(fast_ramp_down);
+show_store_gov_pol_sys(enable_prediction);
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(use_sched_load);
+gov_sys_pol_attr_rw(use_migration_notif);
+gov_sys_pol_attr_rw(max_freq_hysteresis);
+gov_sys_pol_attr_rw(align_windows);
+gov_sys_pol_attr_rw(ignore_hispeed_on_notif);
+gov_sys_pol_attr_rw(fast_ramp_down);
+gov_sys_pol_attr_rw(enable_prediction);
+
+static struct global_attr boostpulse_gov_sys =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+	&target_loads_gov_sys.attr,
+	&above_hispeed_delay_gov_sys.attr,
+	&hispeed_freq_gov_sys.attr,
+	&go_hispeed_load_gov_sys.attr,
+	&min_sample_time_gov_sys.attr,
+	&timer_rate_gov_sys.attr,
+	&timer_slack_gov_sys.attr,
+	&boost_gov_sys.attr,
+	&boostpulse_gov_sys.attr,
+	&boostpulse_duration_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
+	&use_sched_load_gov_sys.attr,
+	&use_migration_notif_gov_sys.attr,
+	&max_freq_hysteresis_gov_sys.attr,
+	&align_windows_gov_sys.attr,
+	&ignore_hispeed_on_notif_gov_sys.attr,
+	&fast_ramp_down_gov_sys.attr,
+	&enable_prediction_gov_sys.attr,
+	NULL,
 };
 
+static struct attribute_group interactive_attr_group_gov_sys = {
+	.attrs = interactive_attributes_gov_sys,
+	.name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+	&target_loads_gov_pol.attr,
+	&above_hispeed_delay_gov_pol.attr,
+	&hispeed_freq_gov_pol.attr,
+	&go_hispeed_load_gov_pol.attr,
+	&min_sample_time_gov_pol.attr,
+	&timer_rate_gov_pol.attr,
+	&timer_slack_gov_pol.attr,
+	&boost_gov_pol.attr,
+	&boostpulse_gov_pol.attr,
+	&boostpulse_duration_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	&use_sched_load_gov_pol.attr,
+	&use_migration_notif_gov_pol.attr,
+	&max_freq_hysteresis_gov_pol.attr,
+	&align_windows_gov_pol.attr,
+	&ignore_hispeed_on_notif_gov_pol.attr,
+	&fast_ramp_down_gov_pol.attr,
+	&enable_prediction_gov_pol.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+	.attrs = interactive_attributes_gov_pol,
+	.name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+	if (have_governor_per_policy())
+		return &interactive_attr_group_gov_pol;
+	else
+		return &interactive_attr_group_gov_sys;
+}
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static struct cpufreq_interactive_tunables *alloc_tunable(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_tunables *tunables;
+
+	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+	if (!tunables)
+		return ERR_PTR(-ENOMEM);
+
+	tunables->above_hispeed_delay = default_above_hispeed_delay;
+	tunables->nabove_hispeed_delay =
+		ARRAY_SIZE(default_above_hispeed_delay);
+	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+	tunables->target_loads = default_target_loads;
+	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_rate = DEFAULT_TIMER_RATE;
+	tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+	spin_lock_init(&tunables->target_loads_lock);
+	spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+	return tunables;
+}
+
+static struct cpufreq_interactive_policyinfo *get_policyinfo(
+					struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_policyinfo *ppol =
+				per_cpu(polinfo, policy->cpu);
+	int i;
+	struct sched_load *sl;
+
+	/* polinfo already allocated for policy, return */
+	if (ppol)
+		return ppol;
+
+	ppol = kzalloc(sizeof(*ppol), GFP_KERNEL);
+	if (!ppol)
+		return ERR_PTR(-ENOMEM);
+
+	sl = kcalloc(cpumask_weight(policy->related_cpus), sizeof(*sl),
+		     GFP_KERNEL);
+	if (!sl) {
+		kfree(ppol);
+		return ERR_PTR(-ENOMEM);
+	}
+	ppol->sl = sl;
+
+	init_timer_deferrable(&ppol->policy_timer);
+	ppol->policy_timer.function = cpufreq_interactive_timer;
+	init_timer(&ppol->policy_slack_timer);
+	ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
+	hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	ppol->notif_timer.function = cpufreq_interactive_hrtimer;
+	spin_lock_init(&ppol->load_lock);
+	spin_lock_init(&ppol->target_freq_lock);
+	init_rwsem(&ppol->enable_sem);
+
+	for_each_cpu(i, policy->related_cpus)
+		per_cpu(polinfo, i) = ppol;
+	return ppol;
+}
+
+/* This function is not multithread-safe. */
+static void free_policyinfo(int cpu)
+{
+	struct cpufreq_interactive_policyinfo *ppol = per_cpu(polinfo, cpu);
+	int j;
+
+	if (!ppol)
+		return;
+
+	for_each_possible_cpu(j)
+		if (per_cpu(polinfo, j) == ppol)
+			per_cpu(polinfo, cpu) = NULL;
+	kfree(ppol->cached_tunables);
+	kfree(ppol->sl);
+	kfree(ppol);
+}
+
+static struct cpufreq_interactive_tunables *get_tunables(
+				struct cpufreq_interactive_policyinfo *ppol)
+{
+	if (have_governor_per_policy())
+		return ppol->cached_tunables;
+	else
+		return cached_common_tunables;
+}
+
 /* Interactive Governor callbacks */
 struct interactive_governor {
 	struct cpufreq_governor gov;
@@ -1033,305 +1644,207 @@
 
 #define CPU_FREQ_GOV_INTERACTIVE	(&interactive_gov.gov)
 
-static void irq_work(struct irq_work *irq_work)
-{
-	struct interactive_cpu *icpu = container_of(irq_work, struct
-						    interactive_cpu, irq_work);
-
-	cpufreq_interactive_update(icpu);
-	icpu->work_in_progress = false;
-}
-
-static void update_util_handler(struct update_util_data *data, u64 time,
-				unsigned int flags)
-{
-	struct interactive_cpu *icpu = container_of(data,
-					struct interactive_cpu, update_util);
-	struct interactive_policy *ipolicy = icpu->ipolicy;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-	u64 delta_ns;
-
-	/*
-	 * The irq-work may not be allowed to be queued up right now.
-	 * Possible reasons:
-	 * - Work has already been queued up or is in progress.
-	 * - It is too early (too little time from the previous sample).
-	 */
-	if (icpu->work_in_progress)
-		return;
-
-	delta_ns = time - icpu->last_sample_time;
-	if ((s64)delta_ns < tunables->sampling_rate * NSEC_PER_USEC)
-		return;
-
-	icpu->last_sample_time = time;
-	icpu->next_sample_jiffies = usecs_to_jiffies(tunables->sampling_rate) +
-				    jiffies;
-
-	icpu->work_in_progress = true;
-	irq_work_queue(&icpu->irq_work);
-}
-
-static void gov_set_update_util(struct interactive_policy *ipolicy)
-{
-	struct cpufreq_policy *policy = ipolicy->policy;
-	struct interactive_cpu *icpu;
-	int cpu;
-
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
-
-		icpu->last_sample_time = 0;
-		icpu->next_sample_jiffies = 0;
-		cpufreq_add_update_util_hook(cpu, &icpu->update_util,
-					     update_util_handler);
-	}
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
-	int i;
-
-	for_each_cpu(i, policy->cpus)
-		cpufreq_remove_update_util_hook(i);
-
-	synchronize_sched();
-}
-
-static void icpu_cancel_work(struct interactive_cpu *icpu)
-{
-	irq_work_sync(&icpu->irq_work);
-	icpu->work_in_progress = false;
-	del_timer_sync(&icpu->slack_timer);
-}
-
-static struct interactive_policy *
-interactive_policy_alloc(struct cpufreq_policy *policy)
-{
-	struct interactive_policy *ipolicy;
-
-	ipolicy = kzalloc(sizeof(*ipolicy), GFP_KERNEL);
-	if (!ipolicy)
-		return NULL;
-
-	ipolicy->policy = policy;
-
-	return ipolicy;
-}
-
-static void interactive_policy_free(struct interactive_policy *ipolicy)
-{
-	kfree(ipolicy);
-}
-
-static struct interactive_tunables *
-interactive_tunables_alloc(struct interactive_policy *ipolicy)
-{
-	struct interactive_tunables *tunables;
-
-	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
-	if (!tunables)
-		return NULL;
-
-	gov_attr_set_init(&tunables->attr_set, &ipolicy->tunables_hook);
-	if (!have_governor_per_policy())
-		global_tunables = tunables;
-
-	ipolicy->tunables = tunables;
-
-	return tunables;
-}
-
-static void interactive_tunables_free(struct interactive_tunables *tunables)
-{
-	if (!have_governor_per_policy())
-		global_tunables = NULL;
-
-	kfree(tunables);
-}
-
 int cpufreq_interactive_init(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy;
-	struct interactive_tunables *tunables;
-	int ret;
+	int rc;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	/* State should be equivalent to EXIT */
-	if (policy->governor_data)
-		return -EBUSY;
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	ipolicy = interactive_policy_alloc(policy);
-	if (!ipolicy)
-		return -ENOMEM;
+	ppol = get_policyinfo(policy);
+	if (IS_ERR(ppol))
+		return PTR_ERR(ppol);
 
-	mutex_lock(&global_tunables_lock);
-
-	if (global_tunables) {
-		if (WARN_ON(have_governor_per_policy())) {
-			ret = -EINVAL;
-			goto free_int_policy;
-		}
-
-		policy->governor_data = ipolicy;
-		ipolicy->tunables = global_tunables;
-
-		gov_attr_set_get(&global_tunables->attr_set,
-				 &ipolicy->tunables_hook);
-		goto out;
+	if (have_governor_per_policy()) {
+		WARN_ON(tunables);
+	} else if (tunables) {
+		tunables->usage_count++;
+		cpumask_or(&controlled_cpus, &controlled_cpus,
+			   policy->related_cpus);
+		sched_update_freq_max_load(policy->related_cpus);
+		policy->governor_data = tunables;
+		return 0;
 	}
 
-	tunables = interactive_tunables_alloc(ipolicy);
+	tunables = get_tunables(ppol);
 	if (!tunables) {
-		ret = -ENOMEM;
-		goto free_int_policy;
+		tunables = alloc_tunable(policy);
+		if (IS_ERR(tunables))
+			return PTR_ERR(tunables);
 	}
 
-	tunables->hispeed_freq = policy->max;
-	tunables->above_hispeed_delay = default_above_hispeed_delay;
-	tunables->nabove_hispeed_delay =
-		ARRAY_SIZE(default_above_hispeed_delay);
-	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
-	tunables->target_loads = default_target_loads;
-	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
-	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
-	tunables->boostpulse_duration = DEFAULT_MIN_SAMPLE_TIME;
-	tunables->sampling_rate = DEFAULT_SAMPLING_RATE;
-	tunables->timer_slack = DEFAULT_TIMER_SLACK;
-	update_slack_delay(tunables);
+	tunables->usage_count = 1;
+	policy->governor_data = tunables;
+	if (!have_governor_per_policy())
+		common_tunables = tunables;
 
-	spin_lock_init(&tunables->target_loads_lock);
-	spin_lock_init(&tunables->above_hispeed_delay_lock);
+	rc = sysfs_create_group(get_governor_parent_kobj(policy),
+			get_sysfs_attr());
+	if (rc) {
+		kfree(tunables);
+		policy->governor_data = NULL;
+		if (!have_governor_per_policy())
+			common_tunables = NULL;
+		return rc;
+	}
 
-	policy->governor_data = ipolicy;
-
-	ret = kobject_init_and_add(&tunables->attr_set.kobj,
-				   &interactive_tunables_ktype,
-				   get_governor_parent_kobj(policy), "%s",
-				   interactive_gov.gov.name);
-	if (ret)
-		goto fail;
-
-	/* One time initialization for governor */
-	if (!interactive_gov.usage_count++) {
-		idle_notifier_register(&cpufreq_interactive_idle_nb);
+	if (!interactive_gov.usage_count++)
 		cpufreq_register_notifier(&cpufreq_notifier_block,
-					  CPUFREQ_TRANSITION_NOTIFIER);
-	}
+				CPUFREQ_TRANSITION_NOTIFIER);
 
- out:
-	mutex_unlock(&global_tunables_lock);
+	if (tunables->use_sched_load)
+		cpufreq_interactive_enable_sched_input(tunables);
+
+	cpumask_or(&controlled_cpus, &controlled_cpus,
+		   policy->related_cpus);
+	sched_update_freq_max_load(policy->related_cpus);
+
+	if (have_governor_per_policy())
+		ppol->cached_tunables = tunables;
+	else
+		cached_common_tunables = tunables;
+
 	return 0;
-
- fail:
-	policy->governor_data = NULL;
-	interactive_tunables_free(tunables);
-
- free_int_policy:
-	mutex_unlock(&global_tunables_lock);
-
-	interactive_policy_free(ipolicy);
-	pr_err("governor initialization failed (%d)\n", ret);
-
-	return ret;
 }
 
 void cpufreq_interactive_exit(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_tunables *tunables = ipolicy->tunables;
-	unsigned int count;
+	struct cpufreq_interactive_tunables *tunables;
 
-	mutex_lock(&global_tunables_lock);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	/* Last policy using the governor ? */
-	if (!--interactive_gov.usage_count) {
-		cpufreq_unregister_notifier(&cpufreq_notifier_block,
-					    CPUFREQ_TRANSITION_NOTIFIER);
-		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+	BUG_ON(!tunables);
+
+	cpumask_andnot(&controlled_cpus, &controlled_cpus,
+		       policy->related_cpus);
+	sched_update_freq_max_load(cpu_possible_mask);
+	if (!--tunables->usage_count) {
+		/* Last policy using the governor ? */
+		if (!--interactive_gov.usage_count)
+			cpufreq_unregister_notifier(&cpufreq_notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+
+		sysfs_remove_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr());
+
+		common_tunables = NULL;
 	}
 
-	count = gov_attr_set_put(&tunables->attr_set, &ipolicy->tunables_hook);
 	policy->governor_data = NULL;
-	if (!count)
-		interactive_tunables_free(tunables);
 
-	mutex_unlock(&global_tunables_lock);
-
-	interactive_policy_free(ipolicy);
+	if (tunables->use_sched_load)
+		cpufreq_interactive_disable_sched_input(tunables);
 }
 
 int cpufreq_interactive_start(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_frequency_table *freq_table;
+	struct cpufreq_interactive_tunables *tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-		icpu->target_freq = policy->cur;
-		icpu->floor_freq = icpu->target_freq;
-		icpu->pol_floor_val_time = ktime_to_us(ktime_get());
-		icpu->loc_floor_val_time = icpu->pol_floor_val_time;
-		icpu->pol_hispeed_val_time = icpu->pol_floor_val_time;
-		icpu->loc_hispeed_val_time = icpu->pol_floor_val_time;
+	BUG_ON(!tunables);
+	mutex_lock(&gov_lock);
 
-		down_write(&icpu->enable_sem);
-		icpu->ipolicy = ipolicy;
-		up_write(&icpu->enable_sem);
+	freq_table = policy->freq_table;
+	if (!tunables->hispeed_freq)
+		tunables->hispeed_freq = policy->max;
 
-		slack_timer_resched(icpu, cpu, false);
-	}
+	ppol = per_cpu(polinfo, policy->cpu);
+	ppol->policy = policy;
+	ppol->target_freq = policy->cur;
+	ppol->freq_table = freq_table;
+	ppol->p_nolim = *policy;
+	ppol->p_nolim.min = policy->cpuinfo.min_freq;
+	ppol->p_nolim.max = policy->cpuinfo.max_freq;
+	ppol->floor_freq = ppol->target_freq;
+	ppol->floor_validate_time = ktime_to_us(ktime_get());
+	ppol->hispeed_validate_time = ppol->floor_validate_time;
+	ppol->min_freq = policy->min;
+	ppol->reject_notification = true;
+	ppol->notif_pending = false;
+	down_write(&ppol->enable_sem);
+	del_timer_sync(&ppol->policy_timer);
+	del_timer_sync(&ppol->policy_slack_timer);
+	ppol->policy_timer.data = policy->cpu;
+	ppol->last_evaluated_jiffy = get_jiffies_64();
+	cpufreq_interactive_timer_start(tunables, policy->cpu);
+	ppol->governor_enabled = 1;
+	up_write(&ppol->enable_sem);
+	ppol->reject_notification = false;
 
-	gov_set_update_util(ipolicy);
+	mutex_unlock(&gov_lock);
 	return 0;
 }
 
 void cpufreq_interactive_stop(struct cpufreq_policy *policy)
 {
-	struct interactive_policy *ipolicy = policy->governor_data;
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	gov_clear_update_util(ipolicy->policy);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	BUG_ON(!tunables);
 
-		icpu_cancel_work(icpu);
+	mutex_lock(&gov_lock);
 
-		down_write(&icpu->enable_sem);
-		icpu->ipolicy = NULL;
-		up_write(&icpu->enable_sem);
-	}
+	ppol = per_cpu(polinfo, policy->cpu);
+	ppol->reject_notification = true;
+	down_write(&ppol->enable_sem);
+	ppol->governor_enabled = 0;
+	ppol->target_freq = 0;
+	del_timer_sync(&ppol->policy_timer);
+	del_timer_sync(&ppol->policy_slack_timer);
+	up_write(&ppol->enable_sem);
+	ppol->reject_notification = false;
+
+	mutex_unlock(&gov_lock);
 }
 
 void cpufreq_interactive_limits(struct cpufreq_policy *policy)
 {
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
-	unsigned long flags;
+	struct cpufreq_interactive_policyinfo *ppol;
+	struct cpufreq_interactive_tunables *tunables;
 
-	cpufreq_policy_apply_limits(policy);
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
 
-	for_each_cpu(cpu, policy->cpus) {
-		icpu = &per_cpu(interactive_cpu, cpu);
+	BUG_ON(!tunables);
+	ppol = per_cpu(polinfo, policy->cpu);
 
-		spin_lock_irqsave(&icpu->target_freq_lock, flags);
+	__cpufreq_driver_target(policy,
+			ppol->target_freq, CPUFREQ_RELATION_L);
 
-		if (policy->max < icpu->target_freq)
-			icpu->target_freq = policy->max;
-		else if (policy->min > icpu->target_freq)
-			icpu->target_freq = policy->min;
-
-		spin_unlock_irqrestore(&icpu->target_freq_lock, flags);
+	down_read(&ppol->enable_sem);
+	if (ppol->governor_enabled) {
+		if (policy->min < ppol->min_freq)
+			cpufreq_interactive_timer_resched(policy->cpu,
+							  true);
+		ppol->min_freq = policy->min;
 	}
+	up_read(&ppol->enable_sem);
 }
 
 static struct interactive_governor interactive_gov = {
 	.gov = {
 		.name			= "interactive",
-		.max_transition_latency	= TRANSITION_LATENCY_LIMIT,
+		.max_transition_latency	= 10000000,
 		.owner			= THIS_MODULE,
 		.init			= cpufreq_interactive_init,
 		.exit			= cpufreq_interactive_exit,
@@ -1341,47 +1854,24 @@
 	}
 };
 
-static void cpufreq_interactive_nop_timer(unsigned long data)
-{
-	/*
-	 * The purpose of slack-timer is to wake up the CPU from IDLE, in order
-	 * to decrease its frequency if it is not set to minimum already.
-	 *
-	 * This is important for platforms where CPU with higher frequencies
-	 * consume higher power even at IDLE.
-	 */
-}
-
 static int __init cpufreq_interactive_gov_init(void)
 {
-	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
-	struct interactive_cpu *icpu;
-	unsigned int cpu;
-
-	for_each_possible_cpu(cpu) {
-		icpu = &per_cpu(interactive_cpu, cpu);
-
-		init_irq_work(&icpu->irq_work, irq_work);
-		spin_lock_init(&icpu->load_lock);
-		spin_lock_init(&icpu->target_freq_lock);
-		init_rwsem(&icpu->enable_sem);
-
-		/* Initialize per-cpu slack-timer */
-		init_timer_pinned(&icpu->slack_timer);
-		icpu->slack_timer.function = cpufreq_interactive_nop_timer;
-	}
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
 
 	spin_lock_init(&speedchange_cpumask_lock);
-	speedchange_task = kthread_create(cpufreq_interactive_speedchange_task,
-					  NULL, "cfinteractive");
+	mutex_init(&gov_lock);
+	mutex_init(&sched_lock);
+	speedchange_task =
+		kthread_create(cpufreq_interactive_speedchange_task, NULL,
+			       "cfinteractive");
 	if (IS_ERR(speedchange_task))
 		return PTR_ERR(speedchange_task);
 
 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
 	get_task_struct(speedchange_task);
 
-	/* wake up so the thread does not look hung to the freezer */
-	wake_up_process(speedchange_task);
+	/* NB: wake up so the thread does not look hung to the freezer */
+	wake_up_process_no_notif(speedchange_task);
 
 	return cpufreq_register_governor(CPU_FREQ_GOV_INTERACTIVE);
 }
@@ -1399,12 +1889,19 @@
 
 static void __exit cpufreq_interactive_gov_exit(void)
 {
+	int cpu;
+
 	cpufreq_unregister_governor(CPU_FREQ_GOV_INTERACTIVE);
 	kthread_stop(speedchange_task);
 	put_task_struct(speedchange_task);
+
+	for_each_possible_cpu(cpu)
+		free_policyinfo(cpu);
 }
+
 module_exit(cpufreq_interactive_gov_exit);
 
 MODULE_AUTHOR("Mike Chan <mike@android.com>");
-MODULE_DESCRIPTION("'cpufreq_interactive' - A dynamic cpufreq governor for Latency sensitive workloads");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
 MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 0000000..0caa8d1
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,496 @@
+/* drivers/cpufreq/qcom-cpufreq.c
+ *
+ * MSM architecture cpufreq driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Author: Mike A. Chan <mikechan@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/suspend.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+
+static DEFINE_MUTEX(l2bw_lock);
+
+static struct clk *cpu_clk[NR_CPUS];
+static struct clk *l2_clk;
+static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
+static bool hotplug_ready;
+
+struct cpufreq_suspend_t {
+	struct mutex suspend_mutex;
+	int device_suspended;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+
+static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
+			unsigned int index)
+{
+	int ret = 0;
+	struct cpufreq_freqs freqs;
+	unsigned long rate;
+
+	freqs.old = policy->cur;
+	freqs.new = new_freq;
+	freqs.cpu = policy->cpu;
+
+	trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
+	cpufreq_freq_transition_begin(policy, &freqs);
+
+	rate = new_freq * 1000;
+	rate = clk_round_rate(cpu_clk[policy->cpu], rate);
+	ret = clk_set_rate(cpu_clk[policy->cpu], rate);
+	cpufreq_freq_transition_end(policy, &freqs, ret);
+	if (!ret)
+		trace_cpu_frequency_switch_end(policy->cpu);
+
+	return ret;
+}
+
+static int msm_cpufreq_target(struct cpufreq_policy *policy,
+				unsigned int target_freq,
+				unsigned int relation)
+{
+	int ret = 0;
+	int index;
+	struct cpufreq_frequency_table *table;
+
+	mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+
+	if (target_freq == policy->cur)
+		goto done;
+
+	if (per_cpu(suspend_data, policy->cpu).device_suspended) {
+		pr_debug("cpufreq: cpu%d scheduling frequency change in suspend.\n",
+			 policy->cpu);
+		ret = -EFAULT;
+		goto done;
+	}
+
+	table = policy->freq_table;
+	if (!table) {
+		pr_err("cpufreq: Failed to get frequency table for CPU%u\n",
+		       policy->cpu);
+		ret = -ENODEV;
+		goto done;
+	}
+	index = cpufreq_frequency_table_target(policy, target_freq, relation);
+
+	pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
+		policy->cpu, target_freq, relation,
+		policy->min, policy->max, table[index].frequency);
+
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+done:
+	mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
+	return ret;
+}
+
+static int msm_cpufreq_verify(struct cpufreq_policy *policy)
+{
+	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+			policy->cpuinfo.max_freq);
+	return 0;
+}
+
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+	return clk_get_rate(cpu_clk[cpu]) / 1000;
+}
+
+static int msm_cpufreq_init(struct cpufreq_policy *policy)
+{
+	int cur_freq;
+	int index;
+	int ret = 0;
+	struct cpufreq_frequency_table *table =
+			per_cpu(freq_table, policy->cpu);
+	int cpu;
+
+	/*
+	 * In some SoC, some cores are clocked by same source, and their
+	 * frequencies can not be changed independently. Find all other
+	 * CPUs that share same clock, and mark them as controlled by
+	 * same policy.
+	 */
+	for_each_possible_cpu(cpu)
+		if (cpu_clk[cpu] == cpu_clk[policy->cpu])
+			cpumask_set_cpu(cpu, policy->cpus);
+
+	ret = cpufreq_table_validate_and_show(policy, table);
+	if (ret) {
+		pr_err("cpufreq: failed to get policy min/max\n");
+		return ret;
+	}
+
+	cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
+
+	index =  cpufreq_frequency_table_target(policy, cur_freq,
+						CPUFREQ_RELATION_H);
+	/*
+	 * Call set_cpu_freq unconditionally so that when cpu is set to
+	 * online, frequency limit will always be updated.
+	 */
+	ret = set_cpu_freq(policy, table[index].frequency,
+			   table[index].driver_data);
+	if (ret)
+		return ret;
+	pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
+			policy->cpu, cur_freq, table[index].frequency);
+	policy->cur = table[index].frequency;
+
+	return 0;
+}
+
+static int qcom_cpufreq_dead_cpu(unsigned int cpu)
+{
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	clk_unprepare(cpu_clk[cpu]);
+	clk_unprepare(l2_clk);
+	return 0;
+}
+
+static int qcom_cpufreq_up_cpu(unsigned int cpu)
+{
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	rc = clk_prepare(l2_clk);
+	if (rc < 0)
+		return rc;
+	rc = clk_prepare(cpu_clk[cpu]);
+	if (rc < 0)
+		clk_unprepare(l2_clk);
+	return rc;
+}
+
+static int qcom_cpufreq_dying_cpu(unsigned int cpu)
+{
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	clk_disable(cpu_clk[cpu]);
+	clk_disable(l2_clk);
+	return 0;
+}
+
+static int qcom_cpufreq_starting_cpu(unsigned int cpu)
+{
+	int rc;
+
+	/* Fail hotplug until this driver can get CPU clocks */
+	if (!hotplug_ready)
+		return -EINVAL;
+
+	rc = clk_enable(l2_clk);
+	if (rc < 0)
+		return rc;
+	rc = clk_enable(cpu_clk[cpu]);
+	if (rc < 0)
+		clk_disable(l2_clk);
+	return rc;
+}
+
+static int msm_cpufreq_suspend(void)
+{
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
+		per_cpu(suspend_data, cpu).device_suspended = 1;
+		mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
+	}
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_resume(void)
+{
+	int cpu, ret;
+	struct cpufreq_policy policy;
+
+	for_each_possible_cpu(cpu) {
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	/*
+	 * Freq request might be rejected during suspend, resulting
+	 * in policy->cur violating min/max constraint.
+	 * Correct the frequency as soon as possible.
+	 */
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		ret = cpufreq_get_policy(&policy, cpu);
+		if (ret)
+			continue;
+		if (policy.cur <= policy.max && policy.cur >= policy.min)
+			continue;
+		ret = cpufreq_update_policy(cpu);
+		if (ret)
+			pr_info("cpufreq: Current frequency violates policy min/max for CPU%d\n",
+			       cpu);
+		else
+			pr_info("cpufreq: Frequency violation fixed for CPU%d\n",
+				cpu);
+	}
+	put_online_cpus();
+
+	return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_pm_event(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	switch (event) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+		return msm_cpufreq_resume();
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		return msm_cpufreq_suspend();
+	default:
+		return NOTIFY_DONE;
+	}
+}
+
+static struct notifier_block msm_cpufreq_pm_notifier = {
+	.notifier_call = msm_cpufreq_pm_event,
+};
+
+static struct freq_attr *msm_freq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver msm_cpufreq_driver = {
+	/* lps calculations are handled here. */
+	.flags		= CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+	.init		= msm_cpufreq_init,
+	.verify		= msm_cpufreq_verify,
+	.target		= msm_cpufreq_target,
+	.get		= msm_cpufreq_get_freq,
+	.name		= "msm",
+	.attr		= msm_freq_attr,
+};
+
+static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
+						char *tbl_name, int cpu)
+{
+	int ret, nf, i, j;
+	u32 *data;
+	struct cpufreq_frequency_table *ftbl;
+
+	/* Parse list of usable CPU frequencies. */
+	if (!of_find_property(dev->of_node, tbl_name, &nf))
+		return ERR_PTR(-EINVAL);
+	nf /= sizeof(*data);
+
+	if (nf == 0)
+		return ERR_PTR(-EINVAL);
+
+	data = devm_kzalloc(dev, nf * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ftbl = devm_kzalloc(dev, (nf + 1) * sizeof(*ftbl), GFP_KERNEL);
+	if (!ftbl)
+		return ERR_PTR(-ENOMEM);
+
+	j = 0;
+	for (i = 0; i < nf; i++) {
+		unsigned long f;
+
+		f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			break;
+		f /= 1000;
+
+		/*
+		 * Don't repeat frequencies if they round up to the same clock
+		 * frequency.
+		 *
+		 */
+		if (j > 0 && f <= ftbl[j - 1].frequency)
+			continue;
+
+		ftbl[j].driver_data = j;
+		ftbl[j].frequency = f;
+		j++;
+	}
+
+	ftbl[j].driver_data = j;
+	ftbl[j].frequency = CPUFREQ_TABLE_END;
+
+	devm_kfree(dev, data);
+
+	return ftbl;
+}
+
+static int msm_cpufreq_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	char clk_name[] = "cpu??_clk";
+	char tbl_name[] = "qcom,cpufreq-table-??";
+	struct clk *c;
+	int cpu;
+	struct cpufreq_frequency_table *ftbl;
+
+	l2_clk = devm_clk_get(dev, "l2_clk");
+	if (IS_ERR(l2_clk))
+		l2_clk = NULL;
+
+	for_each_possible_cpu(cpu) {
+		snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+		c = devm_clk_get(dev, clk_name);
+		if (cpu == 0 && IS_ERR(c))
+			return PTR_ERR(c);
+		else if (IS_ERR(c))
+			c = cpu_clk[cpu-1];
+		cpu_clk[cpu] = c;
+	}
+	hotplug_ready = true;
+
+	/* Use per-policy governor tunable for some targets */
+	if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
+		msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
+
+	/* Parse commong cpufreq table for all CPUs */
+	ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
+	if (!IS_ERR(ftbl)) {
+		for_each_possible_cpu(cpu)
+			per_cpu(freq_table, cpu) = ftbl;
+		return 0;
+	}
+
+	/*
+	 * No common table. Parse individual tables for each unique
+	 * CPU clock.
+	 */
+	for_each_possible_cpu(cpu) {
+		snprintf(tbl_name, sizeof(tbl_name),
+			 "qcom,cpufreq-table-%d", cpu);
+		ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
+
+		/* CPU0 must contain freq table */
+		if (cpu == 0 && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU0's freq table\n");
+			return PTR_ERR(ftbl);
+		}
+		if (cpu == 0) {
+			per_cpu(freq_table, cpu) = ftbl;
+			continue;
+		}
+
+		if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
+			dev_err(dev, "Failed to parse CPU%d's freq table\n",
+				cpu);
+			return PTR_ERR(ftbl);
+		}
+
+		/* Use previous CPU's table if it shares same clock */
+		if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
+			if (!IS_ERR(ftbl)) {
+				dev_warn(dev, "Conflicting tables for CPU%d\n",
+					 cpu);
+				devm_kfree(dev, ftbl);
+			}
+			ftbl = per_cpu(freq_table, cpu - 1);
+		}
+		per_cpu(freq_table, cpu) = ftbl;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id msm_cpufreq_match_table[] = {
+	{ .compatible = "qcom,msm-cpufreq" },
+	{}
+};
+
+static struct platform_driver msm_cpufreq_plat_driver = {
+	.probe = msm_cpufreq_probe,
+	.driver = {
+		.name = "msm-cpufreq",
+		.of_match_table = msm_cpufreq_match_table,
+	},
+};
+
+static int __init msm_cpufreq_register(void)
+{
+	int cpu, rc;
+
+	for_each_possible_cpu(cpu) {
+		mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
+		per_cpu(suspend_data, cpu).device_suspended = 0;
+	}
+
+	rc = platform_driver_register(&msm_cpufreq_plat_driver);
+	if (rc < 0) {
+		/* Unblock hotplug if msm-cpufreq probe fails */
+		cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
+		cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
+		for_each_possible_cpu(cpu)
+			mutex_destroy(&(per_cpu(suspend_data, cpu).
+					suspend_mutex));
+		return rc;
+	}
+
+	register_pm_notifier(&msm_cpufreq_pm_notifier);
+	return cpufreq_register_driver(&msm_cpufreq_driver);
+}
+
+subsys_initcall(msm_cpufreq_register);
+
+static int __init msm_cpufreq_early_register(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
+					"AP_QCOM_CPUFREQ_STARTING",
+					qcom_cpufreq_starting_cpu,
+					qcom_cpufreq_dying_cpu);
+	if (ret)
+		return ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
+					"QCOM_CPUFREQ_PREPARE",
+					qcom_cpufreq_up_cpu,
+					qcom_cpufreq_dead_cpu);
+	if (!ret)
+		return ret;
+	cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
+	return ret;
+}
+core_initcall(msm_cpufreq_early_register);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3c24e57..817f3b9 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -72,6 +72,42 @@
 	  through sysfs entries. The passive governor recommends that
 	  devfreq device uses the OPP table to get the frequency/voltage.
 
+config DEVFREQ_GOV_CPUFREQ
+	tristate "CPUfreq"
+	depends on CPU_FREQ
+	help
+	  Chooses frequency based on the online CPUs' current frequency and a
+	  CPU frequency to device frequency mapping table(s). This governor
+	  can be useful for controlling devices such as DDR, cache, CCI, etc.
+
+config QCOM_BIMC_BWMON
+	tristate "QCOM BIMC Bandwidth monitor hardware"
+	depends on ARCH_QCOM
+	help
+	  The BIMC Bandwidth monitor hardware allows for monitoring the
+	  traffic coming from each master port connected to the BIMC. It also
+	  has the capability to raise an IRQ when the count exceeds a
+	  programmable limit.
+
+config DEVFREQ_GOV_QCOM_BW_HWMON
+	tristate "HW monitor based governor for device BW"
+	depends on QCOM_BIMC_BWMON
+	help
+	  HW monitor based governor for device to DDR bandwidth voting.
+	  This governor sets the CPU BW vote by using BIMC counters to monitor
+	  the CPU's use of DDR. Since this uses target specific counters it
+	  can conflict with existing profiling tools.  This governor is unlikely
+	  to be useful for non-QCOM devices.
+
+config DEVFREQ_GOV_QCOM_CACHE_HWMON
+	tristate "HW monitor based governor for cache frequency"
+	help
+	  HW monitor based governor for cache frequency scaling. This
+	  governor sets the cache frequency by using PM counters to monitor the
+	  CPU's use of cache. Since this governor uses some of the PM counters
+	  it can conflict with existing profiling tools. This governor is
+	  unlikely to be useful for other devices.
+
 comment "DEVFREQ Drivers"
 
 config DEVFREQ_GOV_QCOM_ADRENO_TZ
@@ -121,6 +157,30 @@
           It sets the frequency for the memory controller and reads the usage counts
           from hardware.
 
+config DEVFREQ_SIMPLE_DEV
+	tristate "Device driver for simple clock device with no status info"
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	help
+	  Device driver for simple devices that control their frequency using
+	  clock APIs and don't have any form of status reporting.
+
+config QCOM_DEVFREQ_DEVBW
+	bool "QCOM DEVFREQ device for device master <-> slave IB/AB BW voting"
+	depends on ARCH_QCOM
+	select DEVFREQ_GOV_PERFORMANCE
+	select DEVFREQ_GOV_POWERSAVE
+	select DEVFREQ_GOV_USERSPACE
+	select DEVFREQ_GOV_CPUFREQ
+	default n
+	help
+	  Different devfreq governors use this devfreq device to make CPU to
+	  DDR IB/AB bandwidth votes. This driver provides a SoC topology
+	  agnostic interface to so that some of the devfreq governors can be
+	  shared across SoCs.
+
 source "drivers/devfreq/event/Kconfig"
 
 endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 0fedc4c..05f4a83 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -5,12 +5,19 @@
 obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE)	+= governor_powersave.o
 obj-$(CONFIG_DEVFREQ_GOV_USERSPACE)	+= governor_userspace.o
 obj-$(CONFIG_DEVFREQ_GOV_PASSIVE)	+= governor_passive.o
+obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ)	+= governor_cpufreq.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_QCOM_BIMC_BWMON)		+= bimc-bwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON)	+= governor_bw_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON)	+= governor_cache_hwmon.o
+
 # DEVFREQ Drivers
 obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ)	+= exynos-bus.o
 obj-$(CONFIG_ARM_RK3399_DMC_DEVFREQ)	+= rk3399_dmc.o
 obj-$(CONFIG_ARM_TEGRA_DEVFREQ)		+= tegra-devfreq.o
+obj-$(CONFIG_QCOM_DEVFREQ_DEVBW)		+= devfreq_devbw.o
+obj-$(CONFIG_DEVFREQ_SIMPLE_DEV)	+= devfreq_simple_dev.o
 
 # DEVFREQ Event Drivers
 obj-$(CONFIG_PM_DEVFREQ_EVENT)		+= event/
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
new file mode 100644
index 0000000..df0f4e9
--- /dev/null
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bimc-bwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_bw_hwmon.h"
+
+#define GLB_INT_STATUS(m)	((m)->global_base + 0x100)
+#define GLB_INT_CLR(m)		((m)->global_base + 0x108)
+#define	GLB_INT_EN(m)		((m)->global_base + 0x10C)
+#define MON_INT_STATUS(m)	((m)->base + 0x100)
+#define MON_INT_CLR(m)		((m)->base + 0x108)
+#define	MON_INT_EN(m)		((m)->base + 0x10C)
+#define	MON_EN(m)		((m)->base + 0x280)
+#define MON_CLEAR(m)		((m)->base + 0x284)
+#define MON_CNT(m)		((m)->base + 0x288)
+#define MON_THRES(m)		((m)->base + 0x290)
+#define MON_MASK(m)		((m)->base + 0x298)
+#define MON_MATCH(m)		((m)->base + 0x29C)
+
+struct bwmon_spec {
+	bool wrap_on_thres;
+	bool overflow;
+};
+
+struct bwmon {
+	void __iomem *base;
+	void __iomem *global_base;
+	unsigned int mport;
+	unsigned int irq;
+	const struct bwmon_spec *spec;
+	struct device *dev;
+	struct bw_hwmon hw;
+};
+
+#define to_bwmon(ptr)		container_of(ptr, struct bwmon, hw)
+
+static DEFINE_SPINLOCK(glb_lock);
+static void mon_enable(struct bwmon *m)
+{
+	writel_relaxed(0x1, MON_EN(m));
+}
+
+static void mon_disable(struct bwmon *m)
+{
+	writel_relaxed(0x0, MON_EN(m));
+}
+
+static void mon_clear(struct bwmon *m)
+{
+	writel_relaxed(0x1, MON_CLEAR(m));
+	/*
+	 * The counter clear and IRQ clear bits are not in the same 4KB
+	 * region. So, we need to make sure the counter clear is completed
+	 * before we try to clear the IRQ or do any other counter operations.
+	 */
+	mb();
+}
+
+static void mon_irq_enable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val |= 1 << m->mport;
+	writel_relaxed(val, GLB_INT_EN(m));
+	spin_unlock(&glb_lock);
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val |= 0x1;
+	writel_relaxed(val, MON_INT_EN(m));
+}
+
+static void mon_irq_disable(struct bwmon *m)
+{
+	u32 val;
+
+	spin_lock(&glb_lock);
+	val = readl_relaxed(GLB_INT_EN(m));
+	val &= ~(1 << m->mport);
+	writel_relaxed(val, GLB_INT_EN(m));
+	spin_unlock(&glb_lock);
+
+	val = readl_relaxed(MON_INT_EN(m));
+	val &= ~0x1;
+	writel_relaxed(val, MON_INT_EN(m));
+}
+
+static unsigned int mon_irq_status(struct bwmon *m)
+{
+	u32 mval;
+
+	mval = readl_relaxed(MON_INT_STATUS(m));
+
+	dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+			readl_relaxed(GLB_INT_STATUS(m)));
+
+	return mval;
+}
+
+static void mon_irq_clear(struct bwmon *m)
+{
+	writel_relaxed(0x3, MON_INT_CLR(m));
+	mb();
+	writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+	mb();
+}
+
+static void mon_set_limit(struct bwmon *m, u32 count)
+{
+	writel_relaxed(count, MON_THRES(m));
+	dev_dbg(m->dev, "Thres: %08x\n", count);
+}
+
+static u32 mon_get_limit(struct bwmon *m)
+{
+	return readl_relaxed(MON_THRES(m));
+}
+
+#define THRES_HIT(status)	(status & BIT(0))
+#define OVERFLOW(status)	(status & BIT(1))
+static unsigned long mon_get_count(struct bwmon *m)
+{
+	unsigned long count, status;
+
+	count = readl_relaxed(MON_CNT(m));
+	status = mon_irq_status(m);
+
+	dev_dbg(m->dev, "Counter: %08lx\n", count);
+
+	if (OVERFLOW(status) && m->spec->overflow)
+		count += 0xFFFFFFFF;
+	if (THRES_HIT(status) && m->spec->wrap_on_thres)
+		count += mon_get_limit(m);
+
+	dev_dbg(m->dev, "Actual Count: %08lx\n", count);
+
+	return count;
+}
+
+/* ********** CPUBW specific code  ********** */
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
+{
+	bytes *= USEC_PER_SEC;
+	do_div(bytes, us);
+	bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
+	return bytes;
+}
+
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
+				  unsigned int tolerance_percent)
+{
+	mbps *= (100 + tolerance_percent) * ms;
+	mbps /= 100;
+	mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+	mbps *= SZ_1M;
+	return mbps;
+}
+
+static unsigned long meas_bw_and_set_irq(struct bw_hwmon *hw,
+					 unsigned int tol, unsigned int us)
+{
+	unsigned long mbps;
+	u32 limit;
+	unsigned int sample_ms = hw->df->profile->polling_ms;
+	struct bwmon *m = to_bwmon(hw);
+
+	mon_disable(m);
+
+	mbps = mon_get_count(m);
+	mbps = bytes_to_mbps(mbps, us);
+
+	/*
+	 * If the counter wraps on thres, don't set the thres too low.
+	 * Setting it too low runs the risk of the counter wrapping around
+	 * multiple times before the IRQ is processed.
+	 */
+	if (likely(!m->spec->wrap_on_thres))
+		limit = mbps_to_bytes(mbps, sample_ms, tol);
+	else
+		limit = mbps_to_bytes(max(mbps, 400UL), sample_ms, tol);
+
+	mon_set_limit(m, limit);
+
+	mon_clear(m);
+	mon_irq_clear(m);
+	mon_enable(m);
+
+	dev_dbg(m->dev, "MBps = %lu\n", mbps);
+	return mbps;
+}
+
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+	struct bwmon *m = dev;
+	if (mon_irq_status(m)) {
+		update_bw_hwmon(&m->hw);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+	struct bwmon *m = to_bwmon(hw);
+	u32 limit;
+	int ret;
+
+	ret = request_threaded_irq(m->irq, NULL, bwmon_intr_handler,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	mon_disable(m);
+
+	limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+	mon_set_limit(m, limit);
+
+	mon_clear(m);
+	mon_irq_clear(m);
+	mon_irq_enable(m);
+	mon_enable(m);
+
+	return 0;
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_irq_disable(m);
+	mon_clear(m);
+	mon_irq_clear(m);
+}
+
+static int suspend_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+
+	free_irq(m->irq, m);
+	mon_disable(m);
+	mon_irq_disable(m);
+	mon_irq_clear(m);
+
+	return 0;
+}
+
+static int resume_bw_hwmon(struct bw_hwmon *hw)
+{
+	struct bwmon *m = to_bwmon(hw);
+	int ret;
+
+	mon_clear(m);
+	mon_irq_enable(m);
+	mon_enable(m);
+	ret = request_threaded_irq(m->irq, NULL, bwmon_intr_handler,
+				  IRQF_ONESHOT | IRQF_SHARED,
+				  dev_name(m->dev), m);
+	if (ret) {
+		dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*************************************************************************/
+
+static const struct bwmon_spec spec[] = {
+	{ .wrap_on_thres = true, .overflow = false },
+	{ .wrap_on_thres = false, .overflow = true },
+};
+
+static const struct of_device_id bimc_bwmon_match_table[] = {
+	{ .compatible = "qcom,bimc-bwmon", .data = &spec[0] },
+	{ .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
+	{}
+};
+
+static int bimc_bwmon_driver_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct bwmon *m;
+	const struct of_device_id *id;
+	int ret;
+	u32 data;
+
+	m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+	if (!m)
+		return -ENOMEM;
+	m->dev = dev;
+
+	ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
+	if (ret) {
+		dev_err(dev, "mport not found!\n");
+		return ret;
+	}
+	m->mport = data;
+
+	id = of_match_device(bimc_bwmon_match_table, dev);
+	if (!id) {
+		dev_err(dev, "Unknown device type!\n");
+		return -ENODEV;
+	}
+	m->spec = id->data;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+	if (!res) {
+		dev_err(dev, "base not found!\n");
+		return -EINVAL;
+	}
+	m->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->base) {
+		dev_err(dev, "Unable map base!\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global_base");
+	if (!res) {
+		dev_err(dev, "global_base not found!\n");
+		return -EINVAL;
+	}
+	m->global_base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!m->global_base) {
+		dev_err(dev, "Unable map global_base!\n");
+		return -ENOMEM;
+	}
+
+	m->irq = platform_get_irq(pdev, 0);
+	if (m->irq < 0) {
+		dev_err(dev, "Unable to get IRQ number\n");
+		return m->irq;
+	}
+
+	m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+	if (!m->hw.of_node)
+		return -EINVAL;
+	m->hw.start_hwmon = &start_bw_hwmon;
+	m->hw.stop_hwmon = &stop_bw_hwmon;
+	m->hw.suspend_hwmon = &suspend_bw_hwmon;
+	m->hw.resume_hwmon = &resume_bw_hwmon;
+	m->hw.meas_bw_and_set_irq = &meas_bw_and_set_irq;
+
+	ret = register_bw_hwmon(dev, &m->hw);
+	if (ret) {
+		dev_err(dev, "Dev BW hwmon registration failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static struct platform_driver bimc_bwmon_driver = {
+	.probe = bimc_bwmon_driver_probe,
+	.driver = {
+		.name = "bimc-bwmon",
+		.of_match_table = bimc_bwmon_match_table,
+	},
+};
+
+module_platform_driver(bimc_bwmon_driver);
+MODULE_DESCRIPTION("BIMC bandwidth monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_devbw.c b/drivers/devfreq/devfreq_devbw.c
new file mode 100644
index 0000000..5c7959c
--- /dev/null
+++ b/drivers/devfreq/devfreq_devbw.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devbw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+/* Has to be ULL to prevent overflow where this macro is used. */
+#define MBYTE (1ULL << 20)
+#define MAX_PATHS	2
+#define DBL_BUF		2
+
+struct dev_data {
+	struct msm_bus_vectors vectors[MAX_PATHS * DBL_BUF];
+	struct msm_bus_paths bw_levels[DBL_BUF];
+	struct msm_bus_scale_pdata bw_data;
+	int num_paths;
+	u32 bus_client;
+	int cur_idx;
+	int cur_ab;
+	int cur_ib;
+	long gov_ab;
+	struct devfreq *df;
+	struct devfreq_dev_profile dp;
+};
+
+static int set_bw(struct device *dev, int new_ib, int new_ab)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	int i, ret;
+
+	if (d->cur_ib == new_ib && d->cur_ab == new_ab)
+		return 0;
+
+	i = (d->cur_idx + 1) % DBL_BUF;
+
+	d->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[0].ab = new_ab / d->num_paths * MBYTE;
+	d->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
+	d->bw_levels[i].vectors[1].ab = new_ab / d->num_paths * MBYTE;
+
+	dev_dbg(dev, "BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
+
+	ret = msm_bus_scale_client_update_request(d->bus_client, i);
+	if (ret) {
+		dev_err(dev, "bandwidth request failed (%d)\n", ret);
+	} else {
+		d->cur_idx = i;
+		d->cur_ib = new_ib;
+		d->cur_ab = new_ab;
+	}
+
+	return ret;
+}
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int devbw_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	find_freq(&d->dp, freq, flags);
+	return set_bw(dev, *freq, d->gov_ab);
+}
+
+static int devbw_get_dev_status(struct device *dev,
+				struct devfreq_dev_status *stat)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	stat->private_data = &d->gov_ab;
+	return 0;
+}
+
+#define PROP_PORTS "qcom,src-dst-ports"
+#define PROP_TBL "qcom,bw-tbl"
+#define PROP_ACTIVE "qcom,active-only"
+
+int devfreq_add_devbw(struct device *dev)
+{
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, ports[MAX_PATHS * 2];
+	const char *gov_name;
+	int ret, len, i, num_paths;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	dev_set_drvdata(dev, d);
+
+	if (of_find_property(dev->of_node, PROP_PORTS, &len)) {
+		len /= sizeof(ports[0]);
+		if (len % 2 || len > ARRAY_SIZE(ports)) {
+			dev_err(dev, "Unexpected number of ports\n");
+			return -EINVAL;
+		}
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_PORTS,
+						 ports, len);
+		if (ret)
+			return ret;
+
+		num_paths = len / 2;
+	} else {
+		return -EINVAL;
+	}
+
+	d->bw_levels[0].vectors = &d->vectors[0];
+	d->bw_levels[1].vectors = &d->vectors[MAX_PATHS];
+	d->bw_data.usecase = d->bw_levels;
+	d->bw_data.num_usecases = ARRAY_SIZE(d->bw_levels);
+	d->bw_data.name = dev_name(dev);
+	d->bw_data.active_only = of_property_read_bool(dev->of_node,
+							PROP_ACTIVE);
+
+	for (i = 0; i < num_paths; i++) {
+		d->bw_levels[0].vectors[i].src = ports[2 * i];
+		d->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+		d->bw_levels[1].vectors[i].src = ports[2 * i];
+		d->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+	}
+	d->bw_levels[0].num_paths = num_paths;
+	d->bw_levels[1].num_paths = num_paths;
+	d->num_paths = num_paths;
+
+	p = &d->dp;
+	p->polling_ms = 50;
+	p->target = devbw_target;
+	p->get_dev_status = devbw_get_dev_status;
+
+	if (of_find_property(dev->of_node, PROP_TBL, &len)) {
+		len /= sizeof(*data);
+		data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		p->freq_table = devm_kzalloc(dev,
+					     len * sizeof(*p->freq_table),
+					     GFP_KERNEL);
+		if (!p->freq_table)
+			return -ENOMEM;
+
+		ret = of_property_read_u32_array(dev->of_node, PROP_TBL,
+						 data, len);
+		if (ret)
+			return ret;
+
+		for (i = 0; i < len; i++)
+			p->freq_table[i] = data[i];
+		p->max_state = len;
+	}
+
+	d->bus_client = msm_bus_scale_register_client(&d->bw_data);
+	if (!d->bus_client) {
+		dev_err(dev, "Unable to register bus client\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df)) {
+		msm_bus_scale_unregister_client(d->bus_client);
+		return PTR_ERR(d->df);
+	}
+
+	return 0;
+}
+
+int devfreq_remove_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	msm_bus_scale_unregister_client(d->bus_client);
+	devfreq_remove_device(d->df);
+	return 0;
+}
+
+int devfreq_suspend_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	return devfreq_suspend_device(d->df);
+}
+
+int devfreq_resume_devbw(struct device *dev)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+
+	return devfreq_resume_device(d->df);
+}
+
+static int devfreq_devbw_probe(struct platform_device *pdev)
+{
+	return devfreq_add_devbw(&pdev->dev);
+}
+
+static int devfreq_devbw_remove(struct platform_device *pdev)
+{
+	return devfreq_remove_devbw(&pdev->dev);
+}
+
+static const struct of_device_id devbw_match_table[] = {
+	{ .compatible = "qcom,devbw" },
+	{}
+};
+
+static struct platform_driver devbw_driver = {
+	.probe = devfreq_devbw_probe,
+	.remove = devfreq_devbw_remove,
+	.driver = {
+		.name = "devbw",
+		.of_match_table = devbw_match_table,
+	},
+};
+
+module_platform_driver(devbw_driver);
+MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_simple_dev.c b/drivers/devfreq/devfreq_simple_dev.c
new file mode 100644
index 0000000..a21f3f3
--- /dev/null
+++ b/drivers/devfreq/devfreq_simple_dev.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devfreq-simple-dev: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <trace/events/power.h>
+
+struct dev_data {
+	struct clk *clk;
+	struct devfreq *df;
+	struct devfreq_dev_profile profile;
+};
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+			u32 flags)
+{
+	int i;
+	unsigned long atmost, atleast, f;
+
+	atmost = p->freq_table[0];
+	atleast = p->freq_table[p->max_state-1];
+	for (i = 0; i < p->max_state; i++) {
+		f = p->freq_table[i];
+		if (f <= *freq)
+			atmost = max(f, atmost);
+		if (f >= *freq)
+			atleast = min(f, atleast);
+	}
+
+	if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+		*freq = atmost;
+	else
+		*freq = atleast;
+}
+
+static int dev_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	unsigned long rfreq;
+
+	find_freq(&d->profile, freq, flags);
+
+	rfreq = clk_round_rate(d->clk, *freq * 1000);
+	if (IS_ERR_VALUE(rfreq)) {
+		dev_err(dev, "devfreq: Cannot find matching frequency for %lu\n",
+			*freq);
+		return rfreq;
+	}
+
+	return clk_set_rate(d->clk, rfreq);
+}
+
+static int dev_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+	struct dev_data *d = dev_get_drvdata(dev);
+	unsigned long f;
+
+	f = clk_get_rate(d->clk);
+	if (IS_ERR_VALUE(f))
+		return f;
+	*freq = f / 1000;
+	return 0;
+}
+
+#define PROP_TBL "freq-tbl-khz"
+static int devfreq_clock_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct dev_data *d;
+	struct devfreq_dev_profile *p;
+	u32 *data, poll;
+	const char *gov_name;
+	int ret, len, i, j;
+	unsigned long f;
+
+	d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	d->clk = devm_clk_get(dev, "devfreq_clk");
+	if (IS_ERR(d->clk))
+		return PTR_ERR(d->clk);
+
+	if (!of_find_property(dev->of_node, PROP_TBL, &len))
+		return -EINVAL;
+
+	len /= sizeof(*data);
+	data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	p = &d->profile;
+	p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table),
+				     GFP_KERNEL);
+	if (!p->freq_table)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len);
+	if (ret)
+		return ret;
+
+	j = 0;
+	for (i = 0; i < len; i++) {
+		f = clk_round_rate(d->clk, data[i] * 1000);
+		if (IS_ERR_VALUE(f))
+			dev_warn(dev, "Unable to find dev rate for %d KHz",
+				 data[i]);
+		else
+			p->freq_table[j++] = f / 1000;
+	}
+	p->max_state = j;
+	devm_kfree(dev, data);
+
+	if (p->max_state == 0) {
+		dev_err(dev, "Error parsing property %s!\n", PROP_TBL);
+		return -EINVAL;
+	}
+
+	p->target = dev_target;
+	p->get_cur_freq = dev_get_cur_freq;
+	ret = dev_get_cur_freq(dev, &p->initial_freq);
+	if (ret)
+		return ret;
+
+	p->polling_ms = 50;
+	if (!of_property_read_u32(dev->of_node, "polling-ms", &poll))
+		p->polling_ms = poll;
+
+	if (of_property_read_string(dev->of_node, "governor", &gov_name))
+		gov_name = "performance";
+
+
+	d->df = devfreq_add_device(dev, p, gov_name, NULL);
+	if (IS_ERR(d->df))
+		return PTR_ERR(d->df);
+
+	return 0;
+}
+
+static int devfreq_clock_remove(struct platform_device *pdev)
+{
+	struct dev_data *d = platform_get_drvdata(pdev);
+
+	devfreq_remove_device(d->df);
+
+	return 0;
+}
+
+static const struct of_device_id devfreq_simple_match_table[] = {
+	{ .compatible = "devfreq-simple-dev" },
+	{}
+};
+
+static struct platform_driver devfreq_clock_driver = {
+	.probe = devfreq_clock_probe,
+	.remove = devfreq_clock_remove,
+	.driver = {
+		.name = "devfreq-simple-dev",
+		.of_match_table = devfreq_simple_match_table,
+	},
+};
+module_platform_driver(devfreq_clock_driver);
+MODULE_DESCRIPTION("Devfreq driver for setting generic device clock frequency");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
new file mode 100644
index 0000000..400943a
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bw-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_bw_hwmon.h"
+
+struct hwmon_node {
+	unsigned int tolerance_percent;
+	unsigned int guard_band_mbps;
+	unsigned int decay_rate;
+	unsigned int io_percent;
+	unsigned int bw_step;
+	unsigned long prev_ab;
+	unsigned long *dev_ab;
+	unsigned long resume_freq;
+	unsigned long resume_ab;
+	ktime_t prev_ts;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct bw_hwmon *hw;
+	struct devfreq_governor *gov;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct hwmon_node *hw = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = sscanf(buf, "%u", &val);					\
+	if (ret != 1)							\
+		return -EINVAL;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, min, max)		\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+static unsigned long measure_bw_and_set_irq(struct hwmon_node *node)
+{
+	ktime_t ts;
+	unsigned int us;
+	unsigned long mbps;
+	struct bw_hwmon *hw = node->hw;
+
+	/*
+	 * Since we are stopping the counters, we don't want this short work
+	 * to be interrupted by other tasks and cause the measurements to be
+	 * wrong. Not blocking interrupts to avoid affecting interrupt
+	 * latency and since they should be short anyway because they run in
+	 * atomic context.
+	 */
+	preempt_disable();
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (!us)
+		us = 1;
+
+	mbps = hw->meas_bw_and_set_irq(hw, node->tolerance_percent, us);
+	node->prev_ts = ts;
+
+	preempt_enable();
+
+	dev_dbg(hw->df->dev.parent, "BW MBps = %6lu, period = %u\n", mbps, us);
+	trace_bw_hwmon_meas(dev_name(hw->df->dev.parent),
+				mbps,
+				us,
+				0);
+
+	return mbps;
+}
+
+static void compute_bw(struct hwmon_node *node, int mbps,
+			unsigned long *freq, unsigned long *ab)
+{
+	int new_bw;
+
+	mbps += node->guard_band_mbps;
+
+	if (mbps > node->prev_ab) {
+		new_bw = mbps;
+	} else {
+		new_bw = mbps * node->decay_rate
+			+ node->prev_ab * (100 - node->decay_rate);
+		new_bw /= 100;
+	}
+
+	node->prev_ab = new_bw;
+	if (ab)
+		*ab = roundup(new_bw, node->bw_step);
+	*freq = (new_bw * 100) / node->io_percent;
+	trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
+				new_bw,
+				*freq,
+				0,
+				0);
+}
+
+static struct hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node ||
+		    (!node->hw->dev && !node->hw->of_node &&
+		     node->gov == df->governor)) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+#define TOO_SOON_US	(1 * USEC_PER_MSEC)
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	struct devfreq *df;
+	struct hwmon_node *node;
+	ktime_t ts;
+	unsigned int us;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = find_hwmon_node(df);
+	if (!node)
+		return -ENODEV;
+
+	if (!node->mon_started)
+		return -EBUSY;
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	/*
+	 * Don't recalc bandwidth if the interrupt comes right after a
+	 * previous bandwidth calculation.  This is done for two reasons:
+	 *
+	 * 1. Sampling the BW during a very short duration can result in a
+	 *    very inaccurate measurement due to very short bursts.
+	 * 2. This can only happen if the limit was hit very close to the end
+	 *    of the previous sample period. Which means the current BW
+	 *    estimate is not very off and doesn't need to be readjusted.
+	 */
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (us > TOO_SOON_US) {
+		mutex_lock(&df->lock);
+		ret = update_devfreq(df);
+		if (ret)
+			dev_err(df->dev.parent,
+				"Unable to update freq on request!\n");
+		mutex_unlock(&df->lock);
+	}
+
+	devfreq_monitor_start(df);
+
+	return 0;
+}
+
+static int start_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+	struct device *dev = df->dev.parent;
+	unsigned long mbps;
+	int ret;
+
+	node->prev_ts = ktime_get();
+
+	if (init) {
+		node->prev_ab = 0;
+		node->resume_freq = 0;
+		node->resume_ab = 0;
+		mbps = (df->previous_freq * node->io_percent) / 100;
+		ret = hw->start_hwmon(hw, mbps);
+	} else {
+		ret = hw->resume_hwmon(hw);
+	}
+
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+		return ret;
+	}
+
+	if (init)
+		devfreq_monitor_start(df);
+	else
+		devfreq_monitor_resume(df);
+
+	node->mon_started = true;
+
+	return 0;
+}
+
+static void stop_monitor(struct devfreq *df, bool init)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	node->mon_started = false;
+
+	if (init) {
+		devfreq_monitor_stop(df);
+		hw->stop_hwmon(hw);
+	} else {
+		devfreq_monitor_suspend(df);
+		hw->suspend_hwmon(hw);
+	}
+
+}
+
+static int gov_start(struct devfreq *df)
+{
+	int ret = 0;
+	struct device *dev = df->dev.parent;
+	struct hwmon_node *node;
+	struct bw_hwmon *hw;
+	struct devfreq_dev_status stat;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+
+	stat.private_data = NULL;
+	if (df->profile->get_dev_status)
+		ret = df->profile->get_dev_status(df->dev.parent, &stat);
+	if (ret || !stat.private_data)
+		dev_warn(dev, "Device doesn't take AB votes!\n");
+	else
+		node->dev_ab = stat.private_data;
+
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	if (start_monitor(df, true))
+		goto err_start;
+
+	ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+	if (ret)
+		goto err_sysfs;
+
+	return 0;
+
+err_sysfs:
+	stop_monitor(df, true);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	node->dev_ab = NULL;
+	return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	struct bw_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+	stop_monitor(df, true);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	/*
+	 * Not all governors know about this additional extended device
+	 * configuration. To avoid leaving the extended configuration at a
+	 * stale state, set it to 0 and let the next governor take it from
+	 * there.
+	 */
+	if (node->dev_ab)
+		*node->dev_ab = 0;
+	node->dev_ab = NULL;
+}
+
+static int gov_suspend(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+	unsigned long resume_freq = df->previous_freq;
+	unsigned long resume_ab = *node->dev_ab;
+
+	if (!node->hw->suspend_hwmon)
+		return -ENOSYS;
+
+	if (node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already suspended!\n");
+		return -EBUSY;
+	}
+
+	stop_monitor(df, false);
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = resume_freq;
+	node->resume_ab = resume_ab;
+
+	return 0;
+}
+
+static int gov_resume(struct devfreq *df)
+{
+	struct hwmon_node *node = df->data;
+
+	if (!node->hw->resume_hwmon)
+		return -ENOSYS;
+
+	if (!node->resume_freq) {
+		dev_warn(df->dev.parent, "Governor already resumed!\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&df->lock);
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+
+	node->resume_freq = 0;
+	node->resume_ab = 0;
+
+	return start_monitor(df, false);
+}
+
+static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	unsigned long mbps;
+	struct hwmon_node *node = df->data;
+
+	/* Suspend/resume sequence */
+	if (!node->mon_started) {
+		*freq = node->resume_freq;
+		*node->dev_ab = node->resume_ab;
+		return 0;
+	}
+
+	mbps = measure_bw_and_set_irq(node);
+	compute_bw(node, mbps, freq, node->dev_ab);
+
+	return 0;
+}
+
+gov_attr(tolerance_percent, 0U, 30U);
+gov_attr(guard_band_mbps, 0U, 2000U);
+gov_attr(decay_rate, 0U, 100U);
+gov_attr(io_percent, 1U, 100U);
+gov_attr(bw_step, 50U, 1000U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_tolerance_percent.attr,
+	&dev_attr_guard_band_mbps.attr,
+	&dev_attr_decay_rate.attr,
+	&dev_attr_io_percent.attr,
+	&dev_attr_bw_step.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "bw_hwmon",
+	.attrs = dev_attr,
+};
+
+static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = gov_start(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent,
+			"Enabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		gov_stop(df);
+		dev_dbg(df->dev.parent,
+			"Disabled dev BW HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+
+	case DEVFREQ_GOV_SUSPEND:
+		ret = gov_suspend(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to suspend BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
+		break;
+
+	case DEVFREQ_GOV_RESUME:
+		ret = gov_resume(df);
+		if (ret) {
+			dev_err(df->dev.parent,
+				"Unable to resume BW HW mon governor (%d)\n",
+				ret);
+			return ret;
+		}
+
+		dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_gov_bw_hwmon = {
+	.name = "bw_hwmon",
+	.get_target_freq = devfreq_bw_hwmon_get_freq,
+	.event_handler = devfreq_bw_hwmon_ev_handler,
+};
+
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
+{
+	int ret = 0;
+	struct hwmon_node *node;
+	struct attribute_group *attr_grp;
+
+	if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	if (hwmon->gov) {
+		attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
+		if (!attr_grp)
+			return -ENOMEM;
+
+		hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
+		hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
+		attr_grp->name = hwmon->gov->name;
+		attr_grp->attrs = dev_attr;
+
+		node->gov = hwmon->gov;
+		node->attr_grp = attr_grp;
+	} else {
+		node->gov = &devfreq_gov_bw_hwmon;
+		node->attr_grp = &dev_attr_group;
+	}
+
+	node->tolerance_percent = 10;
+	node->guard_band_mbps = 100;
+	node->decay_rate = 90;
+	node->io_percent = 16;
+	node->bw_step = 190;
+	node->hw = hwmon;
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &hwmon_list);
+	mutex_unlock(&list_lock);
+
+	if (hwmon->gov) {
+		ret = devfreq_add_governor(hwmon->gov);
+	} else {
+		mutex_lock(&state_lock);
+		if (!use_cnt)
+			ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
+		if (!ret)
+			use_cnt++;
+		mutex_unlock(&state_lock);
+	}
+
+	if (!ret)
+		dev_info(dev, "BW HWmon governor registered.\n");
+	else
+		dev_err(dev, "BW HWmon governor registration failed!\n");
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.h b/drivers/devfreq/governor_bw_hwmon.h
new file mode 100644
index 0000000..8c368e5
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct bw_hwmon - dev BW HW monitor info
+ * @start_hwmon:		Start the HW monitoring of the dev BW
+ * @stop_hwmon:			Stop the HW monitoring of dev BW
+ * @is_valid_irq:		Check whether the IRQ was triggered by the
+ *				counters used to monitor dev BW.
+ * @meas_bw_and_set_irq:	Return the measured bandwidth and set up the
+ *				IRQ to fire if the usage exceeds current
+ *				measurement by @tol percent.
+ * @irq:			IRQ number that corresponds to this HW
+ *				monitor.
+ * @dev:			Pointer to device that this HW monitor can
+ *				monitor.
+ * @of_node:			OF node of device that this HW monitor can
+ *				monitor.
+ * @gov:			devfreq_governor struct that should be used
+ *				when registering this HW monitor with devfreq.
+ *				Only the name field is expected to be
+ *				initialized.
+ * @df:				Devfreq node that this HW monitor is being
+ *				used for. NULL when not actively in use and
+ *				non-NULL when in use.
+ *
+ * One of dev, of_node or governor_name needs to be specified for a
+ * successful registration.
+ *
+ */
+struct bw_hwmon {
+	int (*start_hwmon)(struct bw_hwmon *hw, unsigned long mbps);
+	void (*stop_hwmon)(struct bw_hwmon *hw);
+	int (*suspend_hwmon)(struct bw_hwmon *hw);
+	int (*resume_hwmon)(struct bw_hwmon *hw);
+	unsigned long (*meas_bw_and_set_irq)(struct bw_hwmon *hw,
+					unsigned int tol, unsigned int us);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq_governor *gov;
+
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon);
+int update_bw_hwmon(struct bw_hwmon *hwmon);
+#else
+static inline int register_bw_hwmon(struct device *dev,
+					struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
diff --git a/drivers/devfreq/governor_cache_hwmon.c b/drivers/devfreq/governor_cache_hwmon.c
new file mode 100644
index 0000000..89c012a
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cache-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+#include "governor_cache_hwmon.h"
+
+struct cache_hwmon_node {
+	unsigned int cycles_per_low_req;
+	unsigned int cycles_per_med_req;
+	unsigned int cycles_per_high_req;
+	unsigned int min_busy;
+	unsigned int max_busy;
+	unsigned int tolerance_mrps;
+	unsigned int guard_band_mhz;
+	unsigned int decay_rate;
+	unsigned long prev_mhz;
+	ktime_t prev_ts;
+	bool mon_started;
+	struct list_head list;
+	void *orig_data;
+	struct cache_hwmon *hw;
+	struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(cache_hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct cache_hwmon_node *hw = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", hw->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	int ret;							\
+	unsigned int val;						\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct cache_hwmon_node *hw = df->data;				\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	hw->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define MIN_MS	10U
+#define MAX_MS	500U
+
+static struct cache_hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+	struct cache_hwmon_node *node, *found = NULL;
+
+	mutex_lock(&list_lock);
+	list_for_each_entry(node, &cache_hwmon_list, list)
+		if (node->hw->dev == df->dev.parent ||
+		    node->hw->of_node == df->dev.parent->of_node) {
+			found = node;
+			break;
+		}
+	mutex_unlock(&list_lock);
+
+	return found;
+}
+
+static unsigned long measure_mrps_and_set_irq(struct cache_hwmon_node *node,
+			struct mrps_stats *stat)
+{
+	ktime_t ts;
+	unsigned int us;
+	struct cache_hwmon *hw = node->hw;
+
+	/*
+	 * Since we are stopping the counters, we don't want this short work
+	 * to be interrupted by other tasks and cause the measurements to be
+	 * wrong. Not blocking interrupts to avoid affecting interrupt
+	 * latency and since they should be short anyway because they run in
+	 * atomic context.
+	 */
+	preempt_disable();
+
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (!us)
+		us = 1;
+
+	hw->meas_mrps_and_set_irq(hw, node->tolerance_mrps, us, stat);
+	node->prev_ts = ts;
+
+	preempt_enable();
+
+	dev_dbg(hw->df->dev.parent,
+		"stat H=%3lu, M=%3lu, T=%3lu, b=%3u, f=%4lu, us=%d\n",
+		 stat->high, stat->med, stat->high + stat->med,
+		 stat->busy_percent, hw->df->previous_freq / 1000, us);
+
+	return 0;
+}
+
+static void compute_cache_freq(struct cache_hwmon_node *node,
+		struct mrps_stats *mrps, unsigned long *freq)
+{
+	unsigned long new_mhz;
+	unsigned int busy;
+
+	new_mhz = mrps->high * node->cycles_per_high_req
+		+ mrps->med * node->cycles_per_med_req
+		+ mrps->low * node->cycles_per_low_req;
+
+	busy = max(node->min_busy, mrps->busy_percent);
+	busy = min(node->max_busy, busy);
+
+	new_mhz *= 100;
+	new_mhz /= busy;
+
+	if (new_mhz < node->prev_mhz) {
+		new_mhz = new_mhz * node->decay_rate + node->prev_mhz
+				* (100 - node->decay_rate);
+		new_mhz /= 100;
+	}
+	node->prev_mhz = new_mhz;
+
+	new_mhz += node->guard_band_mhz;
+	*freq = new_mhz * 1000;
+}
+
+#define TOO_SOON_US	(1 * USEC_PER_MSEC)
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+	struct cache_hwmon_node *node;
+	struct devfreq *df;
+	ktime_t ts;
+	unsigned int us;
+	int ret;
+
+	if (!hwmon)
+		return -EINVAL;
+	df = hwmon->df;
+	if (!df)
+		return -ENODEV;
+	node = df->data;
+	if (!node)
+		return -ENODEV;
+	if (!node->mon_started)
+		return -EBUSY;
+
+	dev_dbg(df->dev.parent, "Got update request\n");
+	devfreq_monitor_stop(df);
+
+	/*
+	 * Don't recalc cache freq if the interrupt comes right after a
+	 * previous cache freq calculation.  This is done for two reasons:
+	 *
+	 * 1. Sampling the cache request during a very short duration can
+	 *    result in a very inaccurate measurement due to very short
+	 *    bursts.
+	 * 2. This can only happen if the limit was hit very close to the end
+	 *    of the previous sample period. Which means the current cache
+	 *    request estimate is not very off and doesn't need to be
+	 *    readjusted.
+	 */
+	ts = ktime_get();
+	us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+	if (us > TOO_SOON_US) {
+		mutex_lock(&df->lock);
+		ret = update_devfreq(df);
+		if (ret)
+			dev_err(df->dev.parent,
+				"Unable to update freq on request!\n");
+		mutex_unlock(&df->lock);
+	}
+
+	devfreq_monitor_start(df);
+
+	return 0;
+}
+
+static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	struct mrps_stats stat;
+	struct cache_hwmon_node *node = df->data;
+
+	memset(&stat, 0, sizeof(stat));
+	measure_mrps_and_set_irq(node, &stat);
+	compute_cache_freq(node, &stat, freq);
+
+	return 0;
+}
+
+gov_attr(cycles_per_low_req, 1U, 100U);
+gov_attr(cycles_per_med_req, 1U, 100U);
+gov_attr(cycles_per_high_req, 1U, 100U);
+gov_attr(min_busy, 1U, 100U);
+gov_attr(max_busy, 1U, 100U);
+gov_attr(tolerance_mrps, 0U, 100U);
+gov_attr(guard_band_mhz, 0U, 500U);
+gov_attr(decay_rate, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_cycles_per_low_req.attr,
+	&dev_attr_cycles_per_med_req.attr,
+	&dev_attr_cycles_per_high_req.attr,
+	&dev_attr_min_busy.attr,
+	&dev_attr_max_busy.attr,
+	&dev_attr_tolerance_mrps.attr,
+	&dev_attr_guard_band_mhz.attr,
+	&dev_attr_decay_rate.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cache_hwmon",
+	.attrs = dev_attr,
+};
+
+static int start_monitoring(struct devfreq *df)
+{
+	int ret;
+	struct mrps_stats mrps;
+	struct device *dev = df->dev.parent;
+	struct cache_hwmon_node *node;
+	struct cache_hwmon *hw;
+
+	node = find_hwmon_node(df);
+	if (!node) {
+		dev_err(dev, "Unable to find HW monitor!\n");
+		return -ENODEV;
+	}
+	hw = node->hw;
+	hw->df = df;
+	node->orig_data = df->data;
+	df->data = node;
+
+	node->prev_ts = ktime_get();
+	node->prev_mhz = 0;
+	mrps.high = (df->previous_freq / 1000) - node->guard_band_mhz;
+	mrps.high /= node->cycles_per_high_req;
+	mrps.med = mrps.low = 0;
+
+	ret = hw->start_hwmon(hw, &mrps);
+	if (ret) {
+		dev_err(dev, "Unable to start HW monitor!\n");
+		goto err_start;
+	}
+
+	devfreq_monitor_start(df);
+	node->mon_started = true;
+
+	ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
+	if (ret) {
+		dev_err(dev, "Error creating sys entries!\n");
+		goto sysfs_fail;
+	}
+
+	return 0;
+
+sysfs_fail:
+	node->mon_started = false;
+	devfreq_monitor_stop(df);
+	hw->stop_hwmon(hw);
+err_start:
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+	return ret;
+}
+
+static void stop_monitoring(struct devfreq *df)
+{
+	struct cache_hwmon_node *node = df->data;
+	struct cache_hwmon *hw = node->hw;
+
+	sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
+	node->mon_started = false;
+	devfreq_monitor_stop(df);
+	hw->stop_hwmon(hw);
+	df->data = node->orig_data;
+	node->orig_data = NULL;
+	hw->df = NULL;
+}
+
+static int devfreq_cache_hwmon_ev_handler(struct devfreq *df,
+					unsigned int event, void *data)
+{
+	int ret;
+	unsigned int sample_ms;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		sample_ms = df->profile->polling_ms;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		df->profile->polling_ms = sample_ms;
+
+		ret = start_monitoring(df);
+		if (ret)
+			return ret;
+
+		dev_dbg(df->dev.parent, "Enabled Cache HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+		stop_monitoring(df);
+		dev_dbg(df->dev.parent, "Disabled Cache HW monitor governor\n");
+		break;
+
+	case DEVFREQ_GOV_INTERVAL:
+		sample_ms = *(unsigned int *)data;
+		sample_ms = max(MIN_MS, sample_ms);
+		sample_ms = min(MAX_MS, sample_ms);
+		devfreq_interval_update(df, &sample_ms);
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cache_hwmon = {
+	.name = "cache_hwmon",
+	.get_target_freq = devfreq_cache_hwmon_get_freq,
+	.event_handler = devfreq_cache_hwmon_ev_handler,
+};
+
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon)
+{
+	int ret = 0;
+	struct cache_hwmon_node *node;
+
+	if (!hwmon->dev && !hwmon->of_node)
+		return -EINVAL;
+
+	node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	node->cycles_per_med_req = 20;
+	node->cycles_per_high_req = 35;
+	node->min_busy = 100;
+	node->max_busy = 100;
+	node->tolerance_mrps = 5;
+	node->guard_band_mhz = 100;
+	node->decay_rate = 90;
+	node->hw = hwmon;
+	node->attr_grp = &dev_attr_group;
+
+	mutex_lock(&state_lock);
+	if (!use_cnt) {
+		ret = devfreq_add_governor(&devfreq_cache_hwmon);
+		if (!ret)
+			use_cnt++;
+	}
+	mutex_unlock(&state_lock);
+
+	if (!ret) {
+		dev_info(dev, "Cache HWmon governor registered.\n");
+	} else {
+		dev_err(dev, "Failed to add Cache HWmon governor\n");
+		return ret;
+	}
+
+	mutex_lock(&list_lock);
+	list_add_tail(&node->list, &cache_hwmon_list);
+	mutex_unlock(&list_lock);
+
+	return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based cache freq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_cache_hwmon.h b/drivers/devfreq/governor_cache_hwmon.h
new file mode 100644
index 0000000..c6baf6e
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_CACHE_HWMON_H
+#define _GOVERNOR_CACHE_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+struct mrps_stats {
+	unsigned long high;
+	unsigned long med;
+	unsigned long low;
+	unsigned int busy_percent;
+};
+
+/**
+ * struct cache_hwmon - devfreq Cache HW monitor info
+ * @start_hwmon:	Start the HW monitoring
+ * @stop_hwmon:		Stop the HW monitoring
+ * @meas_mrps_and_set_irq:	Return the measured count and set up the
+ *				IRQ to fire if usage exceeds current
+ *				measurement by @tol percent.
+ * @dev:		device that this HW monitor can monitor.
+ * @of_node:		OF node of device that this HW monitor can monitor.
+ * @df:			Devfreq node that this HW montior is being used
+ *			for. NULL when not actively in use, and non-NULL
+ *			when in use.
+ */
+struct cache_hwmon {
+	int (*start_hwmon)(struct cache_hwmon *hw, struct mrps_stats *mrps);
+	void (*stop_hwmon)(struct cache_hwmon *hw);
+	unsigned long (*meas_mrps_and_set_irq)(struct cache_hwmon *hw,
+					unsigned int tol, unsigned int us,
+					struct mrps_stats *mrps);
+	struct device *dev;
+	struct device_node *of_node;
+	struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon);
+int update_cache_hwmon(struct cache_hwmon *hwmon);
+#else
+static inline int register_cache_hwmon(struct device *dev,
+				       struct cache_hwmon *hwmon)
+{
+	return 0;
+}
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+	return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_CACHE_HWMON_H */
diff --git a/drivers/devfreq/governor_cpufreq.c b/drivers/devfreq/governor_cpufreq.c
new file mode 100644
index 0000000..bae1d39
--- /dev/null
+++ b/drivers/devfreq/governor_cpufreq.c
@@ -0,0 +1,715 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dev-cpufreq: " fmt
+
+#include <linux/devfreq.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct cpu_state {
+	unsigned int freq;
+	unsigned int min_freq;
+	unsigned int max_freq;
+	bool on;
+	unsigned int first_cpu;
+};
+static struct cpu_state *state[NR_CPUS];
+static int cpufreq_cnt;
+
+struct freq_map {
+	unsigned int cpu_khz;
+	unsigned int target_freq;
+};
+
+struct devfreq_node {
+	struct devfreq *df;
+	void *orig_data;
+	struct device *dev;
+	struct device_node *of_node;
+	struct list_head list;
+	struct freq_map **map;
+	struct freq_map *common_map;
+	unsigned int timeout;
+	struct delayed_work dwork;
+	bool drop;
+	unsigned long prev_tgt;
+};
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev,				\
+			struct device_attribute *attr, char *buf)	\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	return snprintf(buf, PAGE_SIZE, "%u\n", n->name);		\
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev,				\
+			struct device_attribute *attr, const char *buf,	\
+			size_t count)					\
+{									\
+	struct devfreq *df = to_devfreq(dev);				\
+	struct devfreq_node *n = df->data;				\
+	int ret;							\
+	unsigned int val;						\
+	ret = kstrtoint(buf, 10, &val);					\
+	if (ret)							\
+		return ret;						\
+	val = max(val, _min);						\
+	val = min(val, _max);						\
+	n->name = val;							\
+	return count;							\
+}
+
+#define gov_attr(__attr, min, max)	\
+show_attr(__attr)			\
+store_attr(__attr, (min), (max))	\
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static int update_node(struct devfreq_node *node)
+{
+	int ret;
+	struct devfreq *df = node->df;
+
+	if (!df)
+		return 0;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&df->lock);
+	node->drop = false;
+	ret = update_devfreq(df);
+	if (ret) {
+		dev_err(df->dev.parent, "Unable to update frequency\n");
+		goto out;
+	}
+
+	if (!node->timeout)
+		goto out;
+
+	if (df->previous_freq <= df->min_freq)
+		goto out;
+
+	schedule_delayed_work(&node->dwork,
+			      msecs_to_jiffies(node->timeout));
+out:
+	mutex_unlock(&df->lock);
+	return ret;
+}
+
+static void update_all_devfreqs(void)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list) {
+		update_node(node);
+	}
+}
+
+static void do_timeout(struct work_struct *work)
+{
+	struct devfreq_node *node = container_of(to_delayed_work(work),
+						struct devfreq_node, dwork);
+	struct devfreq *df = node->df;
+
+	mutex_lock(&df->lock);
+	node->drop = true;
+	update_devfreq(df);
+	mutex_unlock(&df->lock);
+}
+
+static struct devfreq_node *find_devfreq_node(struct device *dev)
+{
+	struct devfreq_node *node;
+
+	list_for_each_entry(node, &devfreq_list, list)
+		if (node->dev == dev || node->of_node == dev->of_node)
+			return node;
+
+	return NULL;
+}
+
+/* ==================== cpufreq part ==================== */
+static void add_policy(struct cpufreq_policy *policy)
+{
+	struct cpu_state *new_state;
+	unsigned int cpu, first_cpu;
+
+	if (state[policy->cpu]) {
+		state[policy->cpu]->freq = policy->cur;
+		state[policy->cpu]->on = true;
+	} else {
+		new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL);
+		if (!new_state)
+			return;
+
+		first_cpu = cpumask_first(policy->related_cpus);
+		new_state->first_cpu = first_cpu;
+		new_state->freq = policy->cur;
+		new_state->min_freq = policy->cpuinfo.min_freq;
+		new_state->max_freq = policy->cpuinfo.max_freq;
+		new_state->on = true;
+
+		for_each_cpu(cpu, policy->related_cpus)
+			state[cpu] = new_state;
+	}
+}
+
+static int cpufreq_policy_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_policy *policy = data;
+
+	switch (event) {
+	case CPUFREQ_CREATE_POLICY:
+		mutex_lock(&state_lock);
+		add_policy(policy);
+		update_all_devfreqs();
+		mutex_unlock(&state_lock);
+		break;
+
+	case CPUFREQ_REMOVE_POLICY:
+		mutex_lock(&state_lock);
+		if (state[policy->cpu]) {
+			state[policy->cpu]->on = false;
+			update_all_devfreqs();
+		}
+		mutex_unlock(&state_lock);
+		break;
+	}
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_policy_nb = {
+	.notifier_call = cpufreq_policy_notifier
+};
+
+static int cpufreq_trans_notifier(struct notifier_block *nb,
+		unsigned long event, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpu_state *s;
+
+	if (event != CPUFREQ_POSTCHANGE)
+		return 0;
+
+	mutex_lock(&state_lock);
+
+	s = state[freq->cpu];
+	if (!s)
+		goto out;
+
+	if (s->freq != freq->new) {
+		s->freq = freq->new;
+		update_all_devfreqs();
+	}
+
+out:
+	mutex_unlock(&state_lock);
+	return 0;
+}
+
+static struct notifier_block cpufreq_trans_nb = {
+	.notifier_call = cpufreq_trans_notifier
+};
+
+static int register_cpufreq(void)
+{
+	int ret = 0;
+	unsigned int cpu;
+	struct cpufreq_policy *policy;
+
+	mutex_lock(&state_lock);
+
+	if (cpufreq_cnt)
+		goto cnt_not_zero;
+
+	get_online_cpus();
+	ret = cpufreq_register_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	if (ret)
+		goto out;
+
+	ret = cpufreq_register_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+	if (ret) {
+		cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+		goto out;
+	}
+
+	for_each_online_cpu(cpu) {
+		policy = cpufreq_cpu_get(cpu);
+		if (policy) {
+			add_policy(policy);
+			cpufreq_cpu_put(policy);
+		}
+	}
+out:
+	put_online_cpus();
+cnt_not_zero:
+	if (!ret)
+		cpufreq_cnt++;
+	mutex_unlock(&state_lock);
+	return ret;
+}
+
+static int unregister_cpufreq(void)
+{
+	int ret = 0;
+	int cpu;
+
+	mutex_lock(&state_lock);
+
+	if (cpufreq_cnt > 1)
+		goto out;
+
+	cpufreq_unregister_notifier(&cpufreq_policy_nb,
+				CPUFREQ_POLICY_NOTIFIER);
+	cpufreq_unregister_notifier(&cpufreq_trans_nb,
+				CPUFREQ_TRANSITION_NOTIFIER);
+
+	for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) {
+		if (!state[cpu])
+			continue;
+		if (state[cpu]->first_cpu == cpu)
+			kfree(state[cpu]);
+		state[cpu] = NULL;
+	}
+
+out:
+	cpufreq_cnt--;
+	mutex_unlock(&state_lock);
+	return ret;
+}
+
+/* ==================== devfreq part ==================== */
+
+static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu)
+{
+	unsigned long *freq_table = df->profile->freq_table;
+	unsigned int cpu_min = state[cpu]->min_freq;
+	unsigned int cpu_max = state[cpu]->max_freq;
+	unsigned int cpu_freq = state[cpu]->freq;
+	unsigned int dev_min, dev_max, cpu_percent;
+
+	if (freq_table) {
+		dev_min = freq_table[0];
+		dev_max = freq_table[df->profile->max_state - 1];
+	} else {
+		if (df->max_freq <= df->min_freq)
+			return 0;
+		dev_min = df->min_freq;
+		dev_max = df->max_freq;
+	}
+
+	cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min);
+	return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
+}
+
+static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu)
+{
+	struct freq_map *map = NULL;
+	unsigned int cpu_khz = 0, freq;
+	struct devfreq_node *n = df->data;
+
+	if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) {
+		freq = 0;
+		goto out;
+	}
+
+	if (n->common_map)
+		map = n->common_map;
+	else if (n->map)
+		map = n->map[cpu];
+
+	cpu_khz = state[cpu]->freq;
+
+	if (!map) {
+		freq = interpolate_freq(df, cpu);
+		goto out;
+	}
+
+	while (map->cpu_khz && map->cpu_khz < cpu_khz)
+		map++;
+	if (!map->cpu_khz)
+		map--;
+	freq = map->target_freq;
+
+out:
+	dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq);
+	return freq;
+}
+
+static int devfreq_cpufreq_get_freq(struct devfreq *df,
+					unsigned long *freq)
+{
+	unsigned int cpu, tgt_freq = 0;
+	struct devfreq_node *node;
+
+	node = df->data;
+	if (!node) {
+		pr_err("Unable to find devfreq node!\n");
+		return -ENODEV;
+	}
+
+	if (node->drop) {
+		*freq = 0;
+		return 0;
+	}
+
+	for_each_possible_cpu(cpu)
+		tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu));
+
+	if (node->timeout && tgt_freq < node->prev_tgt)
+		*freq = 0;
+	else
+		*freq = tgt_freq;
+
+	node->prev_tgt = tgt_freq;
+
+	return 0;
+}
+
+static unsigned int show_table(char *buf, unsigned int len,
+				struct freq_map *map)
+{
+	unsigned int cnt = 0;
+
+	cnt += snprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n");
+
+	while (map->cpu_khz && cnt < len) {
+		cnt += snprintf(buf + cnt, len - cnt, "%8u\t%11u\n",
+				map->cpu_khz, map->target_freq);
+		map++;
+	}
+	if (cnt < len)
+		cnt += snprintf(buf + cnt, len - cnt, "\n");
+
+	return cnt;
+}
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct devfreq *df = to_devfreq(dev);
+	struct devfreq_node *n = df->data;
+	struct freq_map *map;
+	unsigned int cnt = 0, cpu;
+
+	mutex_lock(&state_lock);
+	if (n->common_map) {
+		map = n->common_map;
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Common table for all CPUs:\n");
+		cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+	} else if (n->map) {
+		for_each_possible_cpu(cpu) {
+			map = n->map[cpu];
+			if (!map)
+				continue;
+			cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+					"CPU %u:\n", cpu);
+			if (cnt >= PAGE_SIZE)
+				break;
+			cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+			if (cnt >= PAGE_SIZE)
+				break;
+		}
+	} else {
+		cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+				"Device freq interpolated based on CPU freq\n");
+	}
+	mutex_unlock(&state_lock);
+
+	return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+gov_attr(timeout, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+	&dev_attr_freq_map.attr,
+	&dev_attr_timeout.attr,
+	NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+	.name = "cpufreq",
+	.attrs = dev_attr,
+};
+
+static int devfreq_cpufreq_gov_start(struct devfreq *devfreq)
+{
+	int ret = 0;
+	struct devfreq_node *node;
+	bool alloc = false;
+
+	ret = register_cpufreq();
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+	if (ret) {
+		unregister_cpufreq();
+		return ret;
+	}
+
+	mutex_lock(&state_lock);
+
+	node = find_devfreq_node(devfreq->dev.parent);
+	if (node == NULL) {
+		node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+		if (!node) {
+			pr_err("Out of memory!\n");
+			ret = -ENOMEM;
+			goto alloc_fail;
+		}
+		alloc = true;
+		node->dev = devfreq->dev.parent;
+		list_add_tail(&node->list, &devfreq_list);
+	}
+
+	INIT_DELAYED_WORK(&node->dwork, do_timeout);
+
+	node->df = devfreq;
+	node->orig_data = devfreq->data;
+	devfreq->data = node;
+
+	ret = update_node(node);
+	if (ret)
+		goto update_fail;
+
+	mutex_unlock(&state_lock);
+	return 0;
+
+update_fail:
+	devfreq->data = node->orig_data;
+	if (alloc) {
+		list_del(&node->list);
+		kfree(node);
+	}
+alloc_fail:
+	mutex_unlock(&state_lock);
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+	return ret;
+}
+
+static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq)
+{
+	struct devfreq_node *node = devfreq->data;
+
+	cancel_delayed_work_sync(&node->dwork);
+
+	mutex_lock(&state_lock);
+	devfreq->data = node->orig_data;
+	if (node->map || node->common_map) {
+		node->df = NULL;
+	} else {
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+
+	sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+	unregister_cpufreq();
+}
+
+static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq,
+					unsigned int event, void *data)
+{
+	int ret;
+
+	switch (event) {
+	case DEVFREQ_GOV_START:
+
+		ret = devfreq_cpufreq_gov_start(devfreq);
+		if (ret) {
+			pr_err("Governor start failed!\n");
+			return ret;
+		}
+		pr_debug("Enabled dev CPUfreq governor\n");
+		break;
+
+	case DEVFREQ_GOV_STOP:
+
+		devfreq_cpufreq_gov_stop(devfreq);
+		pr_debug("Disabled dev CPUfreq governor\n");
+		break;
+	}
+
+	return 0;
+}
+
+static struct devfreq_governor devfreq_cpufreq = {
+	.name = "cpufreq",
+	.get_target_freq = devfreq_cpufreq_get_freq,
+	.event_handler = devfreq_cpufreq_ev_handler,
+};
+
+#define NUM_COLS	2
+static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name)
+{
+	int len, nf, i, j;
+	u32 data;
+	struct freq_map *tbl;
+
+	if (!of_find_property(of_node, prop_name, &len))
+		return NULL;
+	len /= sizeof(data);
+
+	if (len % NUM_COLS || len == 0)
+		return NULL;
+	nf = len / NUM_COLS;
+
+	tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		return NULL;
+
+	for (i = 0, j = 0; i < nf; i++, j += 2) {
+		of_property_read_u32_index(of_node, prop_name, j, &data);
+		tbl[i].cpu_khz = data;
+
+		of_property_read_u32_index(of_node, prop_name, j + 1, &data);
+		tbl[i].target_freq = data;
+	}
+	tbl[i].cpu_khz = 0;
+
+	return tbl;
+}
+
+#define PROP_TARGET "target-dev"
+#define PROP_TABLE "cpu-to-dev-map"
+static int add_table_from_of(struct device_node *of_node)
+{
+	struct device_node *target_of_node;
+	struct devfreq_node *node;
+	struct freq_map *common_tbl;
+	struct freq_map **tbl_list = NULL;
+	static char prop_name[] = PROP_TABLE "-999999";
+	int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name);
+
+	target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0);
+	if (!target_of_node)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	common_tbl = read_tbl(of_node, PROP_TABLE);
+	if (!common_tbl) {
+		tbl_list = kzalloc(sizeof(*tbl_list) * NR_CPUS, GFP_KERNEL);
+		if (!tbl_list) {
+			ret = -ENOMEM;
+			goto err_list;
+		}
+
+		for_each_possible_cpu(cpu) {
+			ret = snprintf(prop_name, prop_sz, "%s-%d",
+					PROP_TABLE, cpu);
+			if (ret >= prop_sz) {
+				pr_warn("More CPUs than I can handle!\n");
+				pr_warn("Skipping rest of the tables!\n");
+				break;
+			}
+			tbl_list[cpu] = read_tbl(of_node, prop_name);
+			if (tbl_list[cpu])
+				cnt++;
+		}
+	}
+	if (!common_tbl && !cnt) {
+		ret = -EINVAL;
+		goto err_tbl;
+	}
+
+	mutex_lock(&state_lock);
+	node->of_node = target_of_node;
+	node->map = tbl_list;
+	node->common_map = common_tbl;
+	list_add_tail(&node->list, &devfreq_list);
+	mutex_unlock(&state_lock);
+
+	return 0;
+err_tbl:
+	kfree(tbl_list);
+err_list:
+	kfree(node);
+	return ret;
+}
+
+static int __init devfreq_cpufreq_init(void)
+{
+	int ret;
+	struct device_node *of_par, *of_child;
+
+	of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
+	if (of_par) {
+		for_each_child_of_node(of_par, of_child) {
+			ret = add_table_from_of(of_child);
+			if (ret)
+				pr_err("Parsing %s failed!\n", of_child->name);
+			else
+				pr_debug("Parsed %s.\n", of_child->name);
+		}
+		of_node_put(of_par);
+	} else {
+		pr_info("No tables parsed from DT.\n");
+	}
+
+	ret = devfreq_add_governor(&devfreq_cpufreq);
+	if (ret) {
+		pr_err("Governor add failed!\n");
+		return ret;
+	}
+
+	return 0;
+}
+subsys_initcall(devfreq_cpufreq_init);
+
+static void __exit devfreq_cpufreq_exit(void)
+{
+	int ret, cpu;
+	struct devfreq_node *node, *tmp;
+
+	ret = devfreq_remove_governor(&devfreq_cpufreq);
+	if (ret)
+		pr_err("Governor remove failed!\n");
+
+	mutex_lock(&state_lock);
+	list_for_each_entry_safe(node, tmp, &devfreq_list, list) {
+		kfree(node->common_map);
+		for_each_possible_cpu(cpu)
+			kfree(node->map[cpu]);
+		kfree(node->map);
+		list_del(&node->list);
+		kfree(node);
+	}
+	mutex_unlock(&state_lock);
+}
+module_exit(devfreq_cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index c1dfb44..a402fe8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -51,6 +51,7 @@
 	sde/sde_hw_color_proc_v4.o \
 	sde_rsc.o \
 	sde_rsc_hw.o \
+	sde/sde_hw_ad4.o \
 
 # use drm gpu driver only if qcom_kgsl driver not available
 ifneq ($(CONFIG_QCOM_KGSL),y)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index aa11a36..0974a50 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1133,7 +1133,7 @@
 	.debugfs_cleanup    = msm_debugfs_cleanup,
 #endif
 	.ioctls             = msm_ioctls,
-	.num_ioctls         = DRM_MSM_NUM_IOCTLS,
+	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
 	.fops               = &fops,
 	.name               = "msm_drm",
 	.desc               = "MSM Snapdragon DRM",
diff --git a/drivers/gpu/drm/msm/sde/sde_ad4.h b/drivers/gpu/drm/msm/sde/sde_ad4.h
new file mode 100644
index 0000000..5ed7ae2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_ad4.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SDE_AD4_H_
+#define _SDE_AD4_H_
+
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+#include "sde_hw_dspp.h"
+
+/**
+ * enum ad4_modes - ad4 modes supported by driver
+ */
+enum ad4_modes {
+	AD4_OFF,
+	AD4_AUTO_STRENGTH,
+	AD4_CALIBRATION,
+	AD4_MANUAL,
+};
+
+/**
+ * struct drm_prop_enum_list - drm structure for creating enum property and
+ *                             enumerating values
+ */
+static const struct drm_prop_enum_list ad4_modes[] = {
+	{AD4_OFF, "off"},
+	{AD4_AUTO_STRENGTH, "auto_strength_mode"},
+	{AD4_CALIBRATION, "calibration_mode"},
+	{AD4_MANUAL, "manual_mode"},
+};
+
+/**
+ * enum ad_property - properties that can be set for ad
+ */
+enum ad_property {
+	AD_MODE,
+	AD_INIT,
+	AD_CFG,
+	AD_INPUT,
+	AD_SUSPEND,
+	AD_ASSERTIVE,
+	AD_BACKLIGHT,
+	AD_PROPMAX,
+};
+
+/**
+ * struct sde_ad_hw_cfg - structure for setting the ad properties
+ * @prop: enum of ad property
+ * @hw_cfg: payload for the prop being set.
+ */
+struct sde_ad_hw_cfg {
+	enum ad_property prop;
+	struct sde_hw_cp_cfg *hw_cfg;
+};
+
+/**
+ * sde_validate_dspp_ad4() - api to validate if ad property is allowed for
+ *                           the display with allocated dspp/mixers.
+ * @dspp: pointer to dspp info structure.
+ * @prop: pointer to u32 pointing to ad property
+ */
+int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop);
+
+/**
+ * sde_setup_dspp_ad4 - api to apply the ad property, sde_validate_dspp_ad4
+ *                      should be called before call this function
+ * @dspp: pointer to dspp info structure.
+ * @cfg: pointer to struct sde_ad_hw_cfg
+ */
+void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *cfg);
+#endif /* _SDE_AD4_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index f7fcd01..cb6917a 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -21,6 +21,7 @@
 #include "sde_crtc.h"
 #include "sde_hw_dspp.h"
 #include "sde_hw_lm.h"
+#include "sde_ad4.h"
 
 struct sde_cp_node {
 	u32 property_id;
@@ -60,6 +61,12 @@
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
 
+static void sde_cp_update_list(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc, bool dirty_list);
+
+static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc);
+
 #define setup_dspp_prop_install_funcs(func) \
 do { \
 	func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
@@ -96,6 +103,12 @@
 	SDE_CP_CRTC_DSPP_HIST,
 	SDE_CP_CRTC_DSPP_AD,
 	SDE_CP_CRTC_DSPP_VLUT,
+	SDE_CP_CRTC_DSPP_AD_MODE,
+	SDE_CP_CRTC_DSPP_AD_INIT,
+	SDE_CP_CRTC_DSPP_AD_CFG,
+	SDE_CP_CRTC_DSPP_AD_INPUT,
+	SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS,
+	SDE_CP_CRTC_DSPP_AD_BACKLIGHT,
 	SDE_CP_CRTC_DSPP_MAX,
 	/* DSPP features end */
 
@@ -136,9 +149,10 @@
 	} else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
 		/* Check if local blob is Set */
 		if (!blob) {
-			hw_cfg->len = sizeof(prop_node->prop_val);
-			if (prop_node->prop_val)
+			if (prop_node->prop_val) {
+				hw_cfg->len = sizeof(prop_node->prop_val);
 				hw_cfg->payload = &prop_node->prop_val;
+			}
 		} else {
 			hw_cfg->len = (prop_node->prop_val) ? blob->length :
 					0;
@@ -147,6 +161,10 @@
 		}
 		if (prop_node->prop_val)
 			*feature_enabled = true;
+	} else if (prop_node->prop_flags & DRM_MODE_PROP_ENUM) {
+		*feature_enabled = (prop_node->prop_val != 0);
+		hw_cfg->len = sizeof(prop_node->prop_val);
+		hw_cfg->payload = &prop_node->prop_val;
 	} else {
 		DRM_ERROR("property type is not supported\n");
 	}
@@ -178,7 +196,7 @@
 		}
 	}
 
-	if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+	if (!found || !(prop_node->prop_flags & DRM_MODE_PROP_RANGE)) {
 		DRM_ERROR("local blob create failed prop found %d flags %d\n",
 		       found, prop_node->prop_flags);
 		return ret;
@@ -232,10 +250,14 @@
 {
 	int ret = -EINVAL;
 
-	if (property->flags & DRM_MODE_PROP_BLOB)
+	if (property->flags & DRM_MODE_PROP_BLOB) {
 		ret = sde_cp_disable_crtc_blob_property(prop_node);
-	else if (property->flags & DRM_MODE_PROP_RANGE)
+	} else if (property->flags & DRM_MODE_PROP_RANGE) {
 		ret = sde_cp_handle_range_property(prop_node, 0);
+	} else if (property->flags & DRM_MODE_PROP_ENUM) {
+		ret = 0;
+		prop_node->prop_val = 0;
+	}
 	return ret;
 }
 
@@ -275,10 +297,14 @@
 {
 	int ret = -EINVAL;
 
-	if (property->flags & DRM_MODE_PROP_BLOB)
+	if (property->flags & DRM_MODE_PROP_BLOB) {
 		ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
-	else if (property->flags & DRM_MODE_PROP_RANGE)
+	} else if (property->flags & DRM_MODE_PROP_RANGE) {
 		ret = sde_cp_handle_range_property(prop_node, val);
+	} else if (property->flags & DRM_MODE_PROP_ENUM) {
+		ret = 0;
+		prop_node->prop_val = val;
+	}
 	return ret;
 }
 
@@ -331,6 +357,8 @@
 	INIT_LIST_HEAD(&sde_crtc->active_list);
 	INIT_LIST_HEAD(&sde_crtc->dirty_list);
 	INIT_LIST_HEAD(&sde_crtc->feature_list);
+	INIT_LIST_HEAD(&sde_crtc->ad_dirty);
+	INIT_LIST_HEAD(&sde_crtc->ad_active);
 }
 
 static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
@@ -357,8 +385,8 @@
 	prop = priv->cp_property[feature];
 
 	if (!prop) {
-		prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
-					   name, 0);
+		prop = drm_property_create_range(crtc->dev,
+				DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
 		if (!prop) {
 			DRM_ERROR("property create failed: %s\n", name);
 			kfree(prop_node);
@@ -412,7 +440,7 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
-static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+static void sde_cp_crtc_install_blob_property(struct drm_crtc *crtc, char *name,
 			u32 feature, u32 blob_sz)
 {
 	struct drm_property *prop;
@@ -452,6 +480,46 @@
 	sde_cp_crtc_prop_attach(&prop_attach);
 }
 
+static void sde_cp_crtc_install_enum_property(struct drm_crtc *crtc,
+	u32 feature, const struct drm_prop_enum_list *list, u32 enum_sz,
+	char *name)
+{
+	struct drm_property *prop;
+	struct sde_cp_node *prop_node = NULL;
+	struct msm_drm_private *priv;
+	uint64_t val = 0;
+	struct sde_cp_prop_attach prop_attach;
+
+	if (feature >=  SDE_CP_CRTC_MAX_FEATURES) {
+		DRM_ERROR("invalid feature %d max %d\n", feature,
+		       SDE_CP_CRTC_MAX_FEATURES);
+		return;
+	}
+
+	prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+	if (!prop_node)
+		return;
+
+	priv = crtc->dev->dev_private;
+	prop = priv->cp_property[feature];
+
+	if (!prop) {
+		prop = drm_property_create_enum(crtc->dev, 0, name,
+			list, enum_sz);
+		if (!prop) {
+			DRM_ERROR("property create failed: %s\n", name);
+			kfree(prop_node);
+			return;
+		}
+		priv->cp_property[feature] = prop;
+	}
+
+	INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+				feature, val);
+
+	sde_cp_crtc_prop_attach(&prop_attach);
+}
+
 static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
 				   struct sde_crtc *sde_crtc, u32 last_feature)
 {
@@ -462,13 +530,18 @@
 	int i = 0;
 	bool feature_enabled = false;
 	int ret = 0;
+	struct sde_ad_hw_cfg ad_cfg;
 
 	sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+	hw_cfg.num_of_mixers = sde_crtc->num_mixers;
+	hw_cfg.displayh = sde_crtc->base.mode.hdisplay;
+	hw_cfg.displayv = sde_crtc->base.mode.vdisplay;
 
 	for (i = 0; i < num_mixers && !ret; i++) {
 		hw_lm = sde_crtc->mixers[i].hw_lm;
 		hw_dspp = sde_crtc->mixers[i].hw_dspp;
 		hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl;
+		hw_cfg.mixer_info = hw_lm;
 		if (i == num_mixers - 1)
 			hw_cfg.last_feature = last_feature;
 		else
@@ -558,6 +631,60 @@
 			}
 			hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
 			break;
+		case SDE_CP_CRTC_DSPP_AD_MODE:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_MODE;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INIT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_INIT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_CFG:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_CFG;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INPUT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_INPUT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_ASSERTIVE;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
+		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+			if (!hw_dspp || !hw_dspp->ops.setup_ad) {
+				ret = -EINVAL;
+				continue;
+			}
+			ad_cfg.prop = AD_BACKLIGHT;
+			ad_cfg.hw_cfg = &hw_cfg;
+			hw_dspp->ops.setup_ad(hw_dspp, &ad_cfg);
+			break;
 		default:
 			ret = -EINVAL;
 			break;
@@ -574,7 +701,7 @@
 	if (feature_enabled) {
 		DRM_DEBUG_DRIVER("Add feature to active list %d\n",
 				 prop_node->property_id);
-		list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+		sde_cp_update_list(prop_node, sde_crtc, false);
 	} else {
 		DRM_DEBUG_DRIVER("remove feature from active list %d\n",
 			 prop_node->property_id);
@@ -612,10 +739,17 @@
 		return;
 	}
 
-	/* Check if dirty list is empty for early return */
-	if (list_empty(&sde_crtc->dirty_list)) {
-		DRM_DEBUG_DRIVER("Dirty list is empty\n");
-		return;
+	/* Check if dirty lists are empty and ad features are disabled for
+	 * early return. If ad properties are active then we need to issue
+	 * dspp flush.
+	 **/
+	if (list_empty(&sde_crtc->dirty_list) &&
+		list_empty(&sde_crtc->ad_dirty)) {
+		if (list_empty(&sde_crtc->ad_active)) {
+			DRM_DEBUG_DRIVER("Dirty list is empty\n");
+			return;
+		}
+		set_dspp_flush = true;
 	}
 
 	num_of_features = 0;
@@ -623,7 +757,7 @@
 		num_of_features++;
 
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
-							dirty_list) {
+				dirty_list) {
 		num_of_features--;
 		sde_cp_crtc_setfeature(prop_node, sde_crtc,
 				(num_of_features == 0));
@@ -634,6 +768,18 @@
 			set_lm_flush = true;
 	}
 
+	num_of_features = 0;
+	list_for_each_entry(prop_node, &sde_crtc->ad_dirty, dirty_list)
+		num_of_features++;
+
+	list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty,
+				dirty_list) {
+		num_of_features--;
+		set_dspp_flush = true;
+		sde_cp_crtc_setfeature(prop_node, sde_crtc,
+				(num_of_features == 0));
+	}
+
 	for (i = 0; i < num_mixers; i++) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
 		if (!ctl)
@@ -791,6 +937,13 @@
 			sde_crtc->num_mixers);
 		return -EINVAL;
 	}
+
+	ret = sde_cp_ad_validate_prop(prop_node, sde_crtc);
+	if (ret) {
+		DRM_ERROR("ad property validation failed ret %d\n", ret);
+		return ret;
+	}
+
 	/* remove the property from dirty list */
 	list_del_init(&prop_node->dirty_list);
 
@@ -804,7 +957,7 @@
 		/* remove the property from active list */
 		list_del_init(&prop_node->active_list);
 		/* Mark the feature as dirty */
-		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		sde_cp_update_list(prop_node, sde_crtc, true);
 	}
 	return ret;
 }
@@ -888,7 +1041,7 @@
 
 	list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
 				 active_list) {
-		list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+		sde_cp_update_list(prop_node, sde_crtc, true);
 		list_del_init(&prop_node->active_list);
 	}
 }
@@ -913,7 +1066,7 @@
 		"SDE_DSPP_PCC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_PCC, sizeof(struct drm_msm_pcc));
 		break;
 	default:
@@ -988,6 +1141,33 @@
 		sde_cp_crtc_install_immutable_property(crtc,
 			feature_name, SDE_CP_CRTC_DSPP_AD);
 		break;
+	case 4:
+		sde_cp_crtc_install_immutable_property(crtc,
+			feature_name, SDE_CP_CRTC_DSPP_AD);
+
+		sde_cp_crtc_install_enum_property(crtc,
+			SDE_CP_CRTC_DSPP_AD_MODE, ad4_modes,
+			ARRAY_SIZE(ad4_modes), "SDE_DSPP_AD_V4_MODE");
+
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INIT",
+			SDE_CP_CRTC_DSPP_AD_INIT, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_INIT,
+			sizeof(struct drm_msm_ad4_init));
+
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_CFG",
+			SDE_CP_CRTC_DSPP_AD_CFG, 0, U64_MAX, 0);
+		sde_cp_create_local_blob(crtc, SDE_CP_CRTC_DSPP_AD_CFG,
+			sizeof(struct drm_msm_ad4_cfg));
+		sde_cp_crtc_install_range_property(crtc,
+			"SDE_DSPP_AD_V4_ASSERTIVNESS",
+			SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS, 0, (BIT(8) - 1), 0);
+		sde_cp_crtc_install_range_property(crtc, "SDE_DSPP_AD_V4_INPUT",
+			SDE_CP_CRTC_DSPP_AD_INPUT, 0, U16_MAX, 0);
+		sde_cp_crtc_install_range_property(crtc,
+				"SDE_DSPP_AD_V4_BACKLIGHT",
+			SDE_CP_CRTC_DSPP_AD_BACKLIGHT, 0, (BIT(16) - 1),
+			0);
+		break;
 	default:
 		DRM_ERROR("version %d not supported\n", version);
 		break;
@@ -1008,7 +1188,7 @@
 		 "SDE_LM_GC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_LM_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
@@ -1032,7 +1212,7 @@
 		"SDE_DSPP_GAMUT_V", version);
 	switch (version) {
 	case 4:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_GAMUT,
 			sizeof(struct drm_msm_3d_gamut));
 		break;
@@ -1057,7 +1237,7 @@
 		"SDE_DSPP_GC_V", version);
 	switch (version) {
 	case 1:
-		sde_cp_crtc_create_blob_property(crtc, feature_name,
+		sde_cp_crtc_install_blob_property(crtc, feature_name,
 			SDE_CP_CRTC_DSPP_GC, sizeof(struct drm_msm_pgc_lut));
 		break;
 	default:
@@ -1065,3 +1245,74 @@
 		break;
 	}
 }
+
+static void sde_cp_update_list(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc, bool dirty_list)
+{
+	switch (prop_node->feature) {
+	case SDE_CP_CRTC_DSPP_AD_MODE:
+	case SDE_CP_CRTC_DSPP_AD_INIT:
+	case SDE_CP_CRTC_DSPP_AD_CFG:
+	case SDE_CP_CRTC_DSPP_AD_INPUT:
+	case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+	case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list, &crtc->ad_dirty);
+		else
+			list_add_tail(&prop_node->active_list,
+					&crtc->ad_active);
+		break;
+	default:
+		/* color processing properties handle here */
+		if (dirty_list)
+			list_add_tail(&prop_node->dirty_list,
+					&crtc->dirty_list);
+		else
+			list_add_tail(&prop_node->active_list,
+					&crtc->active_list);
+		break;
+	};
+}
+
+static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
+		struct sde_crtc *crtc)
+{
+	int i = 0, ret = 0;
+	u32 ad_prop;
+
+	for (i = 0; i < crtc->num_mixers && !ret; i++) {
+		if (!crtc->mixers[i].hw_dspp) {
+			ret = -EINVAL;
+			continue;
+		}
+		switch (prop_node->feature) {
+		case SDE_CP_CRTC_DSPP_AD_MODE:
+			ad_prop = AD_MODE;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INIT:
+			ad_prop = AD_INIT;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_CFG:
+			ad_prop = AD_CFG;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_INPUT:
+			ad_prop = AD_INPUT;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_ASSERTIVENESS:
+			ad_prop = AD_ASSERTIVE;
+			break;
+		case SDE_CP_CRTC_DSPP_AD_BACKLIGHT:
+			ad_prop = AD_BACKLIGHT;
+			break;
+		default:
+			/* Not an AD property */
+			return 0;
+		}
+		if (!crtc->mixers[i].hw_dspp->ops.validate_ad)
+			ret = -EINVAL;
+		else
+			ret = crtc->mixers[i].hw_dspp->ops.validate_ad(
+				crtc->mixers[i].hw_dspp, &ad_prop);
+	}
+	return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a037250b..bd0e4fd 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1299,7 +1299,6 @@
 	struct sde_crtc_mixer *mixer;
 	struct sde_hw_mixer *lm;
 	struct drm_display_mode *mode;
-	struct sde_hw_mixer_cfg cfg;
 	struct drm_encoder *encoder;
 	int i;
 
@@ -1330,11 +1329,11 @@
 
 	for (i = 0; i < sde_crtc->num_mixers; i++) {
 		lm = mixer[i].hw_lm;
-		cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
-		cfg.out_height = mode->vdisplay;
-		cfg.right_mixer = (i == 0) ? false : true;
-		cfg.flags = 0;
-		lm->ops.setup_mixer_out(lm, &cfg);
+		lm->cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
+		lm->cfg.out_height = mode->vdisplay;
+		lm->cfg.right_mixer = (i == 0) ? false : true;
+		lm->cfg.flags = 0;
+		lm->ops.setup_mixer_out(lm, &lm->cfg);
 	}
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 286d9e6..3d95799 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -124,6 +124,8 @@
  * @feature_list  : list of color processing features supported on a crtc
  * @active_list   : list of color processing features are active
  * @dirty_list    : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
  * @crtc_lock     : crtc lock around create, destroy and access.
  * @frame_pending : Whether or not an update is pending
  * @frame_events  : static allocation of in-flight frame events
@@ -165,6 +167,8 @@
 	struct list_head feature_list;
 	struct list_head active_list;
 	struct list_head dirty_list;
+	struct list_head ad_dirty;
+	struct list_head ad_active;
 
 	struct mutex crtc_lock;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
new file mode 100644
index 0000000..78fa634
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c
@@ -0,0 +1,883 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_catalog.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_lm.h"
+#include "sde_ad4.h"
+
+#define IDLE_2_RUN(x) ((x) == (ad4_init | ad4_cfg | ad4_mode | ad4_input))
+#define MERGE_WIDTH_RIGHT 4
+#define MERGE_WIDTH_LEFT 3
+
+enum ad4_ops_bitmask {
+	ad4_init = BIT(AD_INIT),
+	ad4_cfg = BIT(AD_CFG),
+	ad4_mode = BIT(AD_MODE),
+	ad4_input = BIT(AD_INPUT),
+	ad4_ops_max = BIT(31),
+};
+
+enum ad4_state {
+	ad4_state_idle,
+	ad4_state_run,
+	ad4_state_max,
+};
+
+typedef int (*ad4_prop_setup)(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *ad);
+
+static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode);
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg);
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg);
+
+static ad4_prop_setup prop_set_func[ad4_state_max][AD_PROPMAX] = {
+	[ad4_state_idle][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_idle][AD_INIT] = ad4_init_setup_idle,
+	[ad4_state_idle][AD_CFG] = ad4_cfg_setup_idle,
+	[ad4_state_idle][AD_INPUT] = ad4_input_setup_idle,
+	[ad4_state_idle][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_idle][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_idle][AD_BACKLIGHT] = ad4_backlight_setup,
+	[ad4_state_run][AD_MODE] = ad4_mode_setup_common,
+	[ad4_state_run][AD_INIT] = ad4_init_setup,
+	[ad4_state_run][AD_CFG] = ad4_cfg_setup,
+	[ad4_state_run][AD_INPUT] = ad4_input_setup,
+	[ad4_state_run][AD_SUSPEND] = ad4_suspend_setup,
+	[ad4_state_run][AD_ASSERTIVE] = ad4_assertive_setup,
+	[ad4_state_run][AD_BACKLIGHT] = ad4_backlight_setup,
+};
+
+struct ad4_info {
+	enum ad4_state state;
+	u32 completed_ops_mask;
+	bool ad4_support;
+	enum ad4_modes cached_mode;
+	u32 cached_als;
+};
+
+static struct ad4_info info[DSPP_MAX] = {
+	[DSPP_0] = {ad4_state_idle, 0, true, AD4_OFF},
+	[DSPP_1] = {ad4_state_idle, 0, true, AD4_OFF},
+	[DSPP_2] = {ad4_state_max, 0, false, AD4_OFF},
+	[DSPP_3] = {ad4_state_max, 0, false, AD4_OFF},
+};
+
+void sde_setup_dspp_ad4(struct sde_hw_dspp *dspp, void *ad_cfg)
+{
+	int ret = 0;
+	struct sde_ad_hw_cfg *cfg = ad_cfg;
+
+	ret = ad4_params_check(dspp, ad_cfg);
+	if (ret)
+		return;
+
+	ret = prop_set_func[info[dspp->idx].state][cfg->prop](dspp, ad_cfg);
+	if (ret)
+		DRM_ERROR("op failed %d ret %d\n", cfg->prop, ret);
+}
+
+int sde_validate_dspp_ad4(struct sde_hw_dspp *dspp, u32 *prop)
+{
+
+	if (!dspp || !prop) {
+		DRM_ERROR("invalid params dspp %pK prop %pK\n", dspp, prop);
+		return -EINVAL;
+	}
+
+	if (*prop >= AD_PROPMAX) {
+		DRM_ERROR("invalid prop set %d\n", *prop);
+		return -EINVAL;
+	}
+
+	if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ad4_params_check(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	struct sde_hw_mixer *hw_lm;
+
+	if (!dspp || !cfg || !cfg->hw_cfg) {
+		DRM_ERROR("invalid dspp %pK cfg %pk hw_cfg %pK\n",
+			dspp, cfg, ((cfg) ? (cfg->hw_cfg) : NULL));
+		return -EINVAL;
+	}
+
+	if (!cfg->hw_cfg->mixer_info) {
+		DRM_ERROR("invalid mixed info\n");
+		return -EINVAL;
+	}
+
+	if (dspp->idx > DSPP_MAX || !info[dspp->idx].ad4_support) {
+		DRM_ERROR("ad4 not supported for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	if (cfg->prop >= AD_PROPMAX) {
+		DRM_ERROR("invalid prop set %d\n", cfg->prop);
+		return -EINVAL;
+	}
+
+	if (info[dspp->idx].state >= ad4_state_max) {
+		DRM_ERROR("in max state for dspp idx %d\n", dspp->idx);
+		return -EINVAL;
+	}
+
+	if (!prop_set_func[info[dspp->idx].state][cfg->prop]) {
+		DRM_ERROR("prop set not implemented for state %d prop %d\n",
+				info[dspp->idx].state, cfg->prop);
+		return -EINVAL;
+	}
+
+	if (!cfg->hw_cfg->num_of_mixers ||
+	    cfg->hw_cfg->num_of_mixers > CRTC_DUAL_MIXERS) {
+		DRM_ERROR("invalid mixer cnt %d\n",
+				cfg->hw_cfg->num_of_mixers);
+		return -EINVAL;
+	}
+	hw_lm = cfg->hw_cfg->mixer_info;
+
+	if (cfg->hw_cfg->num_of_mixers == 1 &&
+	    hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
+	    hw_lm->cfg.out_width != cfg->hw_cfg->displayh) {
+		DRM_ERROR("single_lm lmh %d lmw %d displayh %d displayw %d\n",
+			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
+			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
+		return -EINVAL;
+	} else if (hw_lm->cfg.out_height != cfg->hw_cfg->displayv &&
+		    hw_lm->cfg.out_width != (cfg->hw_cfg->displayh >> 2)) {
+		DRM_ERROR("dual_lm lmh %d lmw %d displayh %d displayw %d\n",
+			hw_lm->cfg.out_height, hw_lm->cfg.out_width,
+			cfg->hw_cfg->displayh, cfg->hw_cfg->displayv);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode)
+{
+	u32 blk_offset;
+
+	blk_offset = 0x04;
+	if (mode == AD4_OFF) {
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				0x101);
+		info[dspp->idx].state = ad4_state_idle;
+		info[dspp->idx].completed_ops_mask = 0;
+	} else {
+		info[dspp->idx].state = ad4_state_run;
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+				0);
+	}
+
+	return 0;
+}
+
+static int ad4_init_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 frame_start, frame_end, proc_start, proc_end;
+	struct sde_hw_mixer *hw_lm;
+	u32 blk_offset, tile_ctl, val, i;
+	u32 off1, off2, off3, off4, off5, off6;
+	struct drm_msm_ad4_init *init;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_init;
+		return 0;
+	}
+
+	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_init)) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(struct drm_msm_ad4_init), cfg->hw_cfg->len,
+			cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	hw_lm = cfg->hw_cfg->mixer_info;
+	if (cfg->hw_cfg->num_of_mixers == 1) {
+		frame_start = 0;
+		frame_end = 0xffff;
+		proc_start = 0;
+		proc_end = 0xffff;
+		tile_ctl = 0;
+	} else {
+		tile_ctl = 0x5;
+		if (hw_lm->cfg.right_mixer) {
+			frame_start = (cfg->hw_cfg->displayh >> 1) -
+				MERGE_WIDTH_RIGHT;
+			frame_end = cfg->hw_cfg->displayh - 1;
+			proc_start = (cfg->hw_cfg->displayh >> 1);
+			proc_end = frame_end;
+			tile_ctl |= 0x10;
+		} else {
+			frame_start = 0;
+			frame_end = (cfg->hw_cfg->displayh >> 1) +
+				MERGE_WIDTH_LEFT;
+			proc_start = 0;
+			proc_end = (cfg->hw_cfg->displayh >> 1) - 1;
+		}
+	}
+
+	init = cfg->hw_cfg->payload;
+	blk_offset = 8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    init->init_param_009);
+
+	blk_offset = 0xc;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    init->init_param_010);
+
+	init->init_param_012 = cfg->hw_cfg->displayv & (BIT(17) - 1);
+	init->init_param_011 = cfg->hw_cfg->displayh & (BIT(17) - 1);
+	blk_offset = 0x10;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+	    ((init->init_param_011 << 16) | init->init_param_012));
+
+	blk_offset = 0x14;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			tile_ctl);
+
+	blk_offset = 0x44;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		((((init->init_param_013) & (BIT(17) - 1)) << 16) |
+		 (init->init_param_014 & (BIT(17) - 1))));
+
+	blk_offset = 0x5c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_015 & (BIT(16) - 1)));
+	blk_offset = 0x60;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_016 & (BIT(8) - 1)));
+	blk_offset = 0x64;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_017 & (BIT(12) - 1)));
+	blk_offset = 0x68;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_018 & (BIT(12) - 1)));
+	blk_offset = 0x6c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_019 & (BIT(12) - 1)));
+	blk_offset = 0x70;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_020 & (BIT(16) - 1)));
+	blk_offset = 0x74;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_021 & (BIT(8) - 1)));
+	blk_offset = 0x78;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_022 & (BIT(8) - 1)));
+	blk_offset = 0x7c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_023 & (BIT(16) - 1)));
+	blk_offset = 0x80;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_024 & (BIT(16) - 1)) << 16) |
+		((init->init_param_025 & (BIT(16) - 1)))));
+	blk_offset = 0x84;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_026 & (BIT(16) - 1)) << 16) |
+		((init->init_param_027 & (BIT(16) - 1)))));
+
+	blk_offset = 0x90;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_028 & (BIT(16) - 1)));
+	blk_offset = 0x94;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_029 & (BIT(16) - 1)));
+
+	blk_offset = 0x98;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_035 & (BIT(16) - 1)) << 16) |
+		((init->init_param_030 & (BIT(16) - 1)))));
+
+	blk_offset = 0x9c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_032 & (BIT(16) - 1)) << 16) |
+		((init->init_param_031 & (BIT(16) - 1)))));
+	blk_offset = 0xa0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_034 & (BIT(16) - 1)) << 16) |
+		((init->init_param_033 & (BIT(16) - 1)))));
+
+	blk_offset = 0xb4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_036 & (BIT(8) - 1)));
+	blk_offset = 0xcc;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_037 & (BIT(8) - 1)));
+	blk_offset = 0xc0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_038 & (BIT(8) - 1)));
+	blk_offset = 0xd8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_039 & (BIT(8) - 1)));
+
+	blk_offset = 0xe8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_040 & (BIT(16) - 1)));
+
+	blk_offset = 0xf4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_041 & (BIT(8) - 1)));
+
+	blk_offset = 0x100;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_042 & (BIT(16) - 1)));
+
+	blk_offset = 0x10c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_043 & (BIT(8) - 1)));
+
+	blk_offset = 0x120;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_044 & (BIT(16) - 1)));
+	blk_offset = 0x124;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_045 & (BIT(16) - 1)));
+
+	blk_offset = 0x128;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_046 & (BIT(1) - 1)));
+	blk_offset = 0x12c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_047 & (BIT(8) - 1)));
+
+	blk_offset = 0x13c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_048 & (BIT(5) - 1)));
+	blk_offset = 0x140;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_049 & (BIT(8) - 1)));
+
+	blk_offset = 0x144;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_050 & (BIT(8) - 1)));
+	blk_offset = 0x148;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_051 & (BIT(8) - 1)) << 8) |
+		((init->init_param_052 & (BIT(8) - 1)))));
+
+	blk_offset = 0x14c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_053 & (BIT(10) - 1)));
+	blk_offset = 0x150;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_054 & (BIT(10) - 1)));
+	blk_offset = 0x154;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_055 & (BIT(8) - 1)));
+
+	blk_offset = 0x158;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_056 & (BIT(8) - 1)));
+	blk_offset = 0x164;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_057 & (BIT(8) - 1)));
+	blk_offset = 0x168;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_058 & (BIT(4) - 1)));
+
+	blk_offset = 0x17c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(frame_start & (BIT(16) - 1)));
+	blk_offset = 0x180;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(frame_end & (BIT(16) - 1)));
+	blk_offset = 0x184;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(proc_start & (BIT(16) - 1)));
+	blk_offset = 0x188;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(proc_end & (BIT(16) - 1)));
+
+	blk_offset = 0x18c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_059 & (BIT(4) - 1)));
+
+	blk_offset = 0x190;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+		(((init->init_param_061 & (BIT(8) - 1)) << 8) |
+		((init->init_param_060 & (BIT(8) - 1)))));
+
+	blk_offset = 0x194;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_062 & (BIT(10) - 1)));
+
+	blk_offset = 0x1a0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_063 & (BIT(10) - 1)));
+	blk_offset = 0x1a4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_064 & (BIT(10) - 1)));
+	blk_offset = 0x1a8;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_065 & (BIT(10) - 1)));
+	blk_offset = 0x1ac;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_066 & (BIT(8) - 1)));
+	blk_offset = 0x1b0;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_067 & (BIT(8) - 1)));
+	blk_offset = 0x1b4;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_068 & (BIT(6) - 1)));
+
+	blk_offset = 0x460;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_069 & (BIT(16) - 1)));
+	blk_offset = 0x464;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_070 & (BIT(10) - 1)));
+	blk_offset = 0x468;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_071 & (BIT(10) - 1)));
+	blk_offset = 0x46c;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_072 & (BIT(10) - 1)));
+	blk_offset = 0x470;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_073 & (BIT(8) - 1)));
+	blk_offset = 0x474;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_074 & (BIT(10) - 1)));
+	blk_offset = 0x478;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(init->init_param_075 & (BIT(10) - 1)));
+
+	off1 = 0x1c0;
+	off2 = 0x210;
+	off3 = 0x260;
+	off4 = 0x2b0;
+	off5 = 0x380;
+	off6 = 0x3d0;
+	for (i = 0; i < AD4_LUT_GRP0_SIZE - 1; i = i + 2) {
+		val = (init->init_param_001[i] & (BIT(16) - 1));
+		val |= ((init->init_param_001[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+		off1 += 4;
+
+		val = (init->init_param_002[i] & (BIT(16) - 1));
+		val |= ((init->init_param_002[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+		off2 += 4;
+
+		val = (init->init_param_003[i] & (BIT(16) - 1));
+		val |= ((init->init_param_003[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
+		off3 += 4;
+
+		val = (init->init_param_004[i] & (BIT(16) - 1));
+		val |= ((init->init_param_004[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
+		off4 += 4;
+
+		val = (init->init_param_007[i] & (BIT(16) - 1));
+		val |= ((init->init_param_007[i + 1] &
+				(BIT(16) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
+		off5 += 4;
+
+		val = (init->init_param_008[i] & (BIT(12) - 1));
+		val |= ((init->init_param_008[i + 1] &
+				(BIT(12) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
+		off6 += 4;
+	}
+	/* write last index data */
+	i = AD4_LUT_GRP0_SIZE - 1;
+	val = ((init->init_param_001[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+	val = ((init->init_param_002[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+	val = ((init->init_param_003[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off3, val);
+	val = ((init->init_param_004[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off4, val);
+	val = ((init->init_param_007[i] & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off5, val);
+	val = ((init->init_param_008[i] & (BIT(12) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off6, val);
+
+	off1 = 0x300;
+	off2 = 0x340;
+	for (i = 0; i < AD4_LUT_GRP1_SIZE; i = i + 2) {
+		val = (init->init_param_005[i] & (BIT(16) - 1));
+		val |= ((init->init_param_005[i + 1] &
+				(BIT(16) - 1)) << 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off1, val);
+		off1 += 4;
+
+		val = (init->init_param_006[i] & (BIT(16) - 1));
+		val |= ((init->init_param_006[i + 1] & (BIT(16) - 1))
+				<< 16);
+		SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + off2, val);
+		off2 += 4;
+	}
+
+	return 0;
+}
+
+static int ad4_cfg_setup(struct sde_hw_dspp *dspp, struct sde_ad_hw_cfg *cfg)
+{
+	u32 blk_offset, val;
+	struct drm_msm_ad4_cfg *ad_cfg;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+		return 0;
+	}
+
+	if (cfg->hw_cfg->len != sizeof(struct drm_msm_ad4_cfg)) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(struct drm_msm_ad4_cfg), cfg->hw_cfg->len,
+			cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+	ad_cfg = cfg->hw_cfg->payload;
+
+	blk_offset = 0x18;
+	val = (ad_cfg->cfg_param_002 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_001 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_004 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_003 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_005 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_006 & (BIT(7) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x30;
+	val = (ad_cfg->cfg_param_007 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_008 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_009 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_010 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = ((ad_cfg->cfg_param_011 & (BIT(16) - 1)) << 16);
+	val |= (ad_cfg->cfg_param_012 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+
+	blk_offset = 0x88;
+	val = (ad_cfg->cfg_param_013 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_014 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xa4;
+	val = (ad_cfg->cfg_param_015 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_016 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_017 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_018 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xc4;
+	val = (ad_cfg->cfg_param_019 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_020 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xb8;
+	val = (ad_cfg->cfg_param_021 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_022 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xd0;
+	val = (ad_cfg->cfg_param_023 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_024 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xdc;
+	val = (ad_cfg->cfg_param_025 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_026 & (BIT(16) - 1)) << 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_027 & (BIT(16) - 1));
+	val |= ((ad_cfg->cfg_param_028 & (BIT(16) - 1)) << 16);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_029 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xec;
+	val = (ad_cfg->cfg_param_030 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_031 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0xf8;
+	val = (ad_cfg->cfg_param_032 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_033 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x104;
+	val = (ad_cfg->cfg_param_034 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_035 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x110;
+	val = (ad_cfg->cfg_param_036 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_037 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_038 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_039 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x134;
+	val = (ad_cfg->cfg_param_040 & (BIT(12) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_041 & (BIT(7) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x15c;
+	val = (ad_cfg->cfg_param_042 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_043 & (BIT(10) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	blk_offset = 0x16c;
+	val = (ad_cfg->cfg_param_044 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_045 & (BIT(8) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+	blk_offset += 4;
+	val = (ad_cfg->cfg_param_046 & (BIT(16) - 1));
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, val);
+
+	return 0;
+}
+
+static int ad4_input_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, als;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x28;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		als = 0;
+		val = &als;
+	}
+	info[dspp->idx].cached_als = *val;
+	info[dspp->idx].completed_ops_mask |= ad4_input;
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(16) - 1)));
+
+	return 0;
+}
+
+static int ad4_suspend_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	info[dspp->idx].state = ad4_state_idle;
+	info[dspp->idx].completed_ops_mask = 0;
+	return 0;
+}
+
+static int ad4_mode_setup_common(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+
+	if (cfg->hw_cfg->len != sizeof(u64) || !cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	info[dspp->idx].cached_mode = *((enum ad4_modes *)
+					(cfg->hw_cfg->payload));
+	info[dspp->idx].completed_ops_mask |= ad4_mode;
+
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_init_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_init;
+		return 0;
+	}
+
+	ret = ad4_init_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_init;
+
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_cfg_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	if (!cfg->hw_cfg->payload) {
+		info[dspp->idx].completed_ops_mask &= ~ad4_cfg;
+		return 0;
+	}
+
+	ret = ad4_cfg_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_cfg;
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+	return 0;
+}
+
+static int ad4_input_setup_idle(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	int ret;
+
+	ret = ad4_input_setup(dspp, cfg);
+	if (ret)
+		return ret;
+
+	info[dspp->idx].completed_ops_mask |= ad4_input;
+	if (IDLE_2_RUN(info[dspp->idx].completed_ops_mask))
+		ad4_mode_setup(dspp, info[dspp->idx].cached_mode);
+
+	return 0;
+}
+
+static int ad4_assertive_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, assertive;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x30;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		assertive = 0;
+		val = &assertive;
+	}
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(8) - 1)));
+	return 0;
+}
+
+static int ad4_backlight_setup(struct sde_hw_dspp *dspp,
+		struct sde_ad_hw_cfg *cfg)
+{
+	u64 *val, bl;
+	u32 blk_offset;
+
+	if (cfg->hw_cfg->len != sizeof(u64) && cfg->hw_cfg->payload) {
+		DRM_ERROR("invalid sz param exp %zd given %d cfg %pK\n",
+			sizeof(u64), cfg->hw_cfg->len, cfg->hw_cfg->payload);
+		return -EINVAL;
+	}
+
+	blk_offset = 0x2c;
+	if (cfg->hw_cfg->payload) {
+		val = cfg->hw_cfg->payload;
+	} else {
+		bl = 0;
+		val = &bl;
+	}
+
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset,
+			(*val & (BIT(16) - 1)));
+	return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 369d5d1..74d1714 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1668,6 +1668,7 @@
 					blocks_prop_exists, blocks_prop_value);
 
 		sblk->ad.id = SDE_DSPP_AD;
+		sde_cfg->ad_count = ad_off_count;
 		if (ad_prop_value && (i < ad_off_count) &&
 		    ad_prop_exists[AD_OFF]) {
 			sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 2b34016..980ee66 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -739,6 +739,9 @@
 
 	u32 reg_dma_count;
 	struct sde_reg_dma_cfg dma_cfg;
+
+	u32 ad_count;
+
 	/* Add additional block data structures here */
 
 	struct sde_perf_cfg perf;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
index 6110a07..51680d3 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -16,6 +16,7 @@
 #include "sde_hw_dspp.h"
 #include "sde_hw_color_processing.h"
 #include "sde_dbg.h"
+#include "sde_ad4.h"
 
 static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
 		struct sde_mdss_cfg *m,
@@ -96,6 +97,13 @@
 						sde_setup_dspp_gc_v1_7;
 			}
 			break;
+		case SDE_DSPP_AD:
+			if (c->cap->sblk->ad.version ==
+			    SDE_COLOR_PROCESS_VER(4, 0)) {
+				c->ops.setup_ad = sde_setup_dspp_ad4;
+				c->ops.validate_ad = sde_validate_dspp_ad4;
+			}
+			break;
 		default:
 			break;
 		}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
index 25e1f3b..455daa4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -139,6 +139,20 @@
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * validate_ad - check if ad property can be set
+	 * @ctx: Pointer to dspp context
+	 * @prop: Pointer to ad property being validated
+	 */
+	int (*validate_ad)(struct sde_hw_dspp *ctx, u32 *prop);
+
+	/**
+	 * setup_ad - update the ad property
+	 * @ctx: Pointer to dspp context
+	 * @cfg: Pointer to ad configuration
+	 */
+	void (*setup_ad)(struct sde_hw_dspp *ctx, void *cfg);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
index 5af260a..1ef36ac 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -93,6 +93,9 @@
 
 	/* ops */
 	struct sde_hw_lm_ops ops;
+
+	/* store mixer info specific to display */
+	struct sde_hw_mixer_cfg cfg;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index c4917d3..9eae387 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -462,12 +462,20 @@
  * @len: Length of the payload.
  * @ctl: control pointer associated with dspp/lm.
  * @last_feature: last feature that will be set.
+ * @num_of_mixers: number of layer mixers for the display.
+ * @mixer_info: mixer info pointer associated with lm.
+ * @displayv: height of the display.
+ * @displayh: width of the display.
  */
 struct sde_hw_cp_cfg {
 	void *payload;
 	u32 len;
 	void *ctl;
 	u32 last_feature;
+	u32 num_of_mixers;
+	void *mixer_info;
+	u32 displayv;
+	u32 displayh;
 };
 
 /**
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index dd70794..c7abd9d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -96,6 +96,7 @@
  * @SDE_CAPS_R1_WB: MDSS V1.x WB block
  * @SDE_CAPS_R3_WB: MDSS V3.x WB block
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
+ * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
  */
 enum sde_caps_settings {
@@ -103,6 +104,7 @@
 	SDE_CAPS_R3_WB,
 	SDE_CAPS_R3_1P5_DOWNSCALE,
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
+	SDE_CAPS_MIN_BUS_VOTE,
 	SDE_CAPS_SBUF_1,
 	SDE_CAPS_MAX,
 };
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index a3603da..643e8a0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -67,6 +67,8 @@
 #define ROT_OVERHEAD_NUMERATOR		27
 #define ROT_OVERHEAD_DENOMINATOR	10000
 
+/* default minimum bandwidth vote */
+#define ROT_ENABLE_BW_VOTE		64000
 /*
  * Max rotator hw blocks possible. Used for upper array limits instead of
  * alloc and freeing small array
@@ -96,6 +98,9 @@
 	.active_only = 1,
 };
 
+/* forward prototype */
+static int sde_rotator_update_perf(struct sde_rot_mgr *mgr);
+
 static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
 		u64 quota)
 {
@@ -292,6 +297,7 @@
 
 static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
 {
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
 	int ret;
 
 	if (WARN_ON(mgr->regulator_enable == on)) {
@@ -302,6 +308,11 @@
 	SDEROT_EVTLOG(on);
 	SDEROT_DBG("%s: rotator regulators\n", on ? "Enable" : "Disable");
 
+	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && on) {
+		mgr->minimum_bw_vote = mgr->enable_bw_vote;
+		sde_rotator_update_perf(mgr);
+	}
+
 	if (mgr->ops_hw_pre_pmevent)
 		mgr->ops_hw_pre_pmevent(mgr, on);
 
@@ -316,6 +327,11 @@
 	if (mgr->ops_hw_post_pmevent)
 		mgr->ops_hw_post_pmevent(mgr, on);
 
+	if (test_bit(SDE_CAPS_MIN_BUS_VOTE, mdata->sde_caps_map) && !on) {
+		mgr->minimum_bw_vote = 0;
+		sde_rotator_update_perf(mgr);
+	}
+
 	mgr->regulator_enable = on;
 }
 
@@ -1323,6 +1339,7 @@
 	}
 
 	total_bw += mgr->pending_close_bw_vote;
+	total_bw = max_t(u64, total_bw, mgr->minimum_bw_vote);
 	sde_rotator_enable_reg_bus(mgr, total_bw);
 	ATRACE_INT("bus_quota", total_bw);
 	sde_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
@@ -2794,6 +2811,7 @@
 	mgr->pdev = pdev;
 	mgr->device = &pdev->dev;
 	mgr->pending_close_bw_vote = 0;
+	mgr->enable_bw_vote = ROT_ENABLE_BW_VOTE;
 	mgr->hwacquire_timeout = ROT_HW_ACQUIRE_TIMEOUT_IN_MS;
 	mgr->queue_count = 1;
 	mgr->pixel_per_clk.numer = ROT_PIXEL_PER_CLK_NUMERATOR;
@@ -3013,6 +3031,7 @@
 	sde_rot_mgr_lock(mgr);
 	atomic_inc(&mgr->device_suspended);
 	sde_rotator_suspend_cancel_rot_work(mgr);
+	mgr->minimum_bw_vote = 0;
 	sde_rotator_update_perf(mgr);
 	ATRACE_END("pm_active");
 	SDEROT_DBG("end pm active %d\n", atomic_read(&mgr->device_suspended));
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index be36f42..0818917 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -367,6 +367,8 @@
  * @doneq: array of rotator done queue corresponding to hardware queue
  * @file_list: list of all sessions managed by rotator manager
  * @pending_close_bw_vote: bandwidth of closed sessions with pending work
+ * @minimum_bw_vote: minimum bandwidth required for current use case
+ * @enable_bw_vote: minimum bandwidth required for power enable
  * @data_bus: data bus configuration state
  * @reg_bus: register bus configuration state
  * @module_power: power/clock configuration state
@@ -406,6 +408,8 @@
 	struct list_head file_list;
 
 	u64 pending_close_bw_vote;
+	u64 minimum_bw_vote;
+	u64 enable_bw_vote;
 	struct sde_rot_bus_data_type data_bus;
 	struct sde_rot_bus_data_type reg_bus;
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index da2705a..86e04d6 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -868,6 +868,12 @@
 		return -EINVAL;
 	}
 
+	if (!debugfs_create_u64("enable_bw_vote", 0644,
+			debugfs_root, &mgr->enable_bw_vote)) {
+		SDEROT_WARN("failed to create enable_bw_vote\n");
+		return -EINVAL;
+	}
+
 	if (mgr->ops_hw_create_debugfs) {
 		ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
 		if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 645baea..4278b6d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -2210,6 +2210,7 @@
 	/* features exposed via mdss h/w version */
 	if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_400)) {
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
+		set_bit(SDE_CAPS_MIN_BUS_VOTE,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
 		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 6b3ddfa..49fe5fb 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1481,21 +1481,13 @@
 		case HAL_INTRA_REFRESH_NONE:
 			hfi->mode = HFI_INTRA_REFRESH_NONE;
 			break;
-		case HAL_INTRA_REFRESH_ADAPTIVE:
-			hfi->mode = HFI_INTRA_REFRESH_ADAPTIVE;
-			hfi->mbs = prop->air_mbs;
-			break;
 		case HAL_INTRA_REFRESH_CYCLIC:
 			hfi->mode = HFI_INTRA_REFRESH_CYCLIC;
-			hfi->mbs = prop->cir_mbs;
-			break;
-		case HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE:
-			hfi->mode = HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE;
-			hfi->mbs = prop->air_mbs;
+			hfi->mbs = prop->ir_mbs;
 			break;
 		case HAL_INTRA_REFRESH_RANDOM:
 			hfi->mode = HFI_INTRA_REFRESH_RANDOM;
-			hfi->mbs = prop->air_mbs;
+			hfi->mbs = prop->ir_mbs;
 			break;
 		default:
 			dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
index 2db245e..c82db74 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c
@@ -73,11 +73,6 @@
 
 	trace_msm_v4l2_vidc_close_start("msm_v4l2_close start");
 	vidc_inst = get_vidc_inst(filp, NULL);
-	rc = msm_vidc_release_buffers(vidc_inst,
-			V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
-	if (rc)
-		dprintk(VIDC_WARN,
-			"Failed in %s for release output buffers\n", __func__);
 
 	rc = msm_vidc_close(vidc_inst);
 	trace_msm_v4l2_vidc_close_end("msm_v4l2_close end");
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index ff7204c..2ec5155 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1646,38 +1646,31 @@
 		pdata = &enable;
 		break;
 	}
-	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: {
-		struct v4l2_ctrl *air_mbs, *air_ref = NULL, *cir_mbs = NULL;
-		bool is_cont_intra_supported = false;
+	case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE:
+	{
+		struct v4l2_ctrl *ir_mbs;
 
-		air_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS);
-
-		is_cont_intra_supported =
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) ||
-		(inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC);
+		ir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS);
 
 		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
-		intra_refresh.mode = ctrl->val;
-		intra_refresh.air_mbs = air_mbs->val;
-		intra_refresh.air_ref = air_ref->val;
-		intra_refresh.cir_mbs = cir_mbs->val;
+		intra_refresh.mode   = ctrl->val;
+		intra_refresh.ir_mbs = ir_mbs->val;
 
 		pdata = &intra_refresh;
 		break;
 	}
-	case V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS: {
-		struct v4l2_ctrl *ir_mode, *air_ref = NULL, *cir_mbs = NULL;
+	case V4L2_CID_MPEG_VIDC_VIDEO_IR_MBS:
+	{
+		struct v4l2_ctrl *ir_mode;
 
 		ir_mode = TRY_GET_CTRL(
 				V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE);
 
 		property_id = HAL_PARAM_VENC_INTRA_REFRESH;
 
-		intra_refresh.air_mbs = ctrl->val;
-		intra_refresh.mode = ir_mode->val;
-		intra_refresh.air_ref = air_ref->val;
-		intra_refresh.cir_mbs = cir_mbs->val;
+		intra_refresh.mode   = ir_mode->val;
+		intra_refresh.ir_mbs = ctrl->val;
 
 		pdata = &intra_refresh;
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2d803bb..270fc31 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -813,11 +813,13 @@
 		inst->bufq[port].num_planes == b->length;
 }
 
-int msm_vidc_release_buffers(void *instance, int buffer_type)
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index)
 {
 	struct msm_vidc_inst *inst = instance;
 	struct buffer_info *bi, *dummy;
 	int i, rc = 0;
+	int found_buf = 0;
 
 	if (!inst)
 		return -EINVAL;
@@ -835,7 +837,8 @@
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == buffer_type) {
+		if (bi->type == buffer_type && bi->v4l2_index == buffer_index) {
+			found_buf = 1;
 			list_del(&bi->list);
 			for (i = 0; i < bi->num_planes; i++) {
 				if (bi->handle[i] && bi->mapped[i]) {
@@ -846,15 +849,38 @@
 						bi->buff_off[i], bi->mapped[i]);
 					msm_comm_smem_free(inst,
 							bi->handle[i]);
+					found_buf = 2;
 				}
 			}
 			kfree(bi);
+			break;
 		}
 	}
 	mutex_unlock(&inst->registeredbufs.lock);
+
+	switch (found_buf) {
+	case 0:
+		dprintk(VIDC_WARN,
+			"%s: No buffer(type: %d) found for index %d\n",
+			__func__, buffer_type, buffer_index);
+		break;
+	case 1:
+		dprintk(VIDC_WARN,
+			"%s: Buffer(type: %d) found for index %d.",
+			__func__, buffer_type, buffer_index);
+		dprintk(VIDC_WARN, "zero planes mapped.\n");
+		break;
+	case 2:
+		dprintk(VIDC_DBG,
+			"%s: Released buffer(type: %d) for index %d\n",
+			__func__, buffer_type, buffer_index);
+		break;
+	default:
+		break;
+	}
 	return rc;
 }
-EXPORT_SYMBOL(msm_vidc_release_buffers);
+EXPORT_SYMBOL(msm_vidc_release_buffer);
 
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
 {
@@ -1103,7 +1129,6 @@
 	.put_userptr = vidc_put_userptr,
 };
 
-
 static void msm_vidc_cleanup_buffer(struct vb2_buffer *vb)
 {
 	int rc = 0;
@@ -1137,8 +1162,7 @@
 		return;
 	}
 
-	rc = msm_vidc_release_buffers(inst,
-		vb->type);
+	rc = msm_vidc_release_buffer(inst, vb->type, vb->index);
 	if (rc)
 		dprintk(VIDC_ERR, "%s : Failed to release buffers : %d\n",
 			__func__, rc);
@@ -2130,10 +2154,17 @@
 	if (!inst || !inst->core)
 		return -EINVAL;
 
+	/*
+	 * Make sure that HW stop working on these buffers that
+	 * we are going to free.
+	 */
+	if (inst->state != MSM_VIDC_CORE_INVALID &&
+		inst->core->state != VIDC_CORE_INVALID)
+		rc = msm_comm_try_state(inst,
+				MSM_VIDC_RELEASE_RESOURCES_DONE);
 
 	mutex_lock(&inst->registeredbufs.lock);
 	list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) {
-		if (bi->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 			int i = 0;
 
 			list_del(&bi->list);
@@ -2146,7 +2177,6 @@
 
 			kfree(bi);
 		}
-	}
 	mutex_unlock(&inst->registeredbufs.lock);
 
 	cleanup_instance(inst);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 8375c2f..eff16f2 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -234,7 +234,6 @@
 	HAL_PARAM_VENC_BITRATE_TYPE,
 	HAL_PARAM_VENC_H264_PIC_ORDER_CNT,
 	HAL_PARAM_VENC_LOW_LATENCY,
-	HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED,
 	HAL_CONFIG_VENC_BLUR_RESOLUTION,
 	HAL_PARAM_VENC_H264_TRANSFORM_8x8,
 	HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
@@ -793,17 +792,13 @@
 enum hal_intra_refresh_mode {
 	HAL_INTRA_REFRESH_NONE,
 	HAL_INTRA_REFRESH_CYCLIC,
-	HAL_INTRA_REFRESH_ADAPTIVE,
-	HAL_INTRA_REFRESH_CYCLIC_ADAPTIVE,
 	HAL_INTRA_REFRESH_RANDOM,
 	HAL_UNUSED_INTRA = 0x10000000,
 };
 
 struct hal_intra_refresh {
 	enum hal_intra_refresh_mode mode;
-	u32 air_mbs;
-	u32 air_ref;
-	u32 cir_mbs;
+	u32 ir_mbs;
 };
 
 enum hal_multi_slice {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index ad2a336..dc64ad2 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -454,9 +454,7 @@
 
 #define HFI_INTRA_REFRESH_NONE				(HFI_COMMON_BASE + 0x1)
 #define HFI_INTRA_REFRESH_CYCLIC			(HFI_COMMON_BASE + 0x2)
-#define HFI_INTRA_REFRESH_ADAPTIVE			(HFI_COMMON_BASE + 0x3)
-#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE	(HFI_COMMON_BASE + 0x4)
-#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x5)
+#define HFI_INTRA_REFRESH_RANDOM			(HFI_COMMON_BASE + 0x3)
 
 struct hfi_intra_refresh {
 	u32 mode;
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 133f6b5..2c48419 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -887,6 +887,10 @@
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	wil_dbg_misc(wil, "Prevent DS in BL & mark FW to set T_POWER_ON=0\n");
+	wil_s(wil, RGF_USER_USAGE_8, BIT_USER_PREVENT_DEEP_SLEEP |
+	      BIT_USER_SUPPORT_T_POWER_ON_0);
+
 	if (wil->platform_ops.notify) {
 		rc = wil->platform_ops.notify(wil->platform_handle,
 					      WIL_PLATFORM_EVT_PRE_RESET);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index bfffc0e..4bccef3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -140,6 +140,9 @@
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
 	#define BIT_USER_OOB_MODE		BIT(31)
+#define RGF_USER_USAGE_8		(0x880020)
+	#define BIT_USER_PREVENT_DEEP_SLEEP	BIT(0)
+	#define BIT_USER_SUPPORT_T_POWER_ON_0	BIT(1)
 #define RGF_USER_HW_MACHINE_STATE	(0x8801dc)
 	#define HW_MACHINE_BOOT_DONE	(0x3fffffd)
 #define RGF_USER_USER_CPU_0		(0x8801e0)
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 3330595..47da1b3 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -55,6 +55,8 @@
 #define VDDIO_MAX_UV	2040000
 #define VDDIO_MAX_UA	70300
 
+#define PCIE20_CAP_LINKCTRLSTATUS 0x80
+
 #define WIGIG_MIN_CPU_BOOST_KBPS	150000
 
 struct device;
@@ -87,6 +89,7 @@
 	u32 rc_index; /* PCIE root complex index */
 	struct pci_dev *pcidev;
 	struct pci_saved_state *pristine_state;
+	bool l1_enabled_in_enum;
 
 	/* SMMU */
 	bool use_smmu; /* have SMMU enabled? */
@@ -476,6 +479,47 @@
 	msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
 }
 
+int msm_11ad_ctrl_aspm_l1(struct msm11ad_ctx *ctx, bool enable)
+{
+	int rc;
+	u32 val;
+	struct pci_dev *pdev = ctx->pcidev;
+	bool l1_enabled;
+
+	/* Read current state */
+	rc = pci_read_config_dword(pdev,
+				   PCIE20_CAP_LINKCTRLSTATUS, &val);
+	if (rc) {
+		dev_err(ctx->dev,
+			"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n", rc);
+		return rc;
+	}
+	dev_dbg(ctx->dev, "PCIE20_CAP_LINKCTRLSTATUS read returns 0x%x\n", val);
+
+	l1_enabled = val & PCI_EXP_LNKCTL_ASPM_L1;
+	if (l1_enabled == enable) {
+		dev_dbg(ctx->dev, "ASPM_L1 is already %s\n",
+			l1_enabled ? "enabled" : "disabled");
+		return 0;
+	}
+
+	if (enable)
+		val |= PCI_EXP_LNKCTL_ASPM_L1; /* enable bit 1 */
+	else
+		val &= ~PCI_EXP_LNKCTL_ASPM_L1; /* disable bit 1 */
+
+	dev_dbg(ctx->dev, "writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x)\n",
+		val);
+	rc = pci_write_config_dword(pdev,
+				    PCIE20_CAP_LINKCTRLSTATUS, val);
+	if (rc)
+		dev_err(ctx->dev,
+			"writing PCIE20_CAP_LINKCTRLSTATUS (val 0x%x) failed:%d\n",
+			val, rc);
+
+	return rc;
+}
+
 static int ops_suspend(void *handle)
 {
 	int rc;
@@ -561,6 +605,16 @@
 		goto err_suspend_rc;
 	}
 
+	/* Disable L1, in case it is enabled */
+	if (ctx->l1_enabled_in_enum) {
+		rc = msm_11ad_ctrl_aspm_l1(ctx, false);
+		if (rc) {
+			dev_err(ctx->dev,
+				"failed to disable L1, rc %d\n", rc);
+			goto err_suspend_rc;
+		}
+	}
+
 	return 0;
 
 err_suspend_rc:
@@ -847,6 +901,7 @@
 	struct device_node *rc_node;
 	struct pci_dev *pcidev = NULL;
 	int rc;
+	u32 val;
 
 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -965,6 +1020,31 @@
 		goto out_rc;
 	}
 	ctx->pcidev = pcidev;
+
+	/* Read current state */
+	rc = pci_read_config_dword(pcidev,
+				   PCIE20_CAP_LINKCTRLSTATUS, &val);
+	if (rc) {
+		dev_err(ctx->dev,
+			"reading PCIE20_CAP_LINKCTRLSTATUS failed:%d\n",
+			rc);
+		goto out_rc;
+	}
+
+	ctx->l1_enabled_in_enum = val & PCI_EXP_LNKCTL_ASPM_L1;
+	dev_dbg(ctx->dev, "L1 is %s in enumeration\n",
+		ctx->l1_enabled_in_enum ? "enabled" : "disabled");
+
+	/* Disable L1, in case it is enabled */
+	if (ctx->l1_enabled_in_enum) {
+		rc = msm_11ad_ctrl_aspm_l1(ctx, false);
+		if (rc) {
+			dev_err(ctx->dev,
+				"failed to disable L1, rc %d\n", rc);
+			goto out_rc;
+		}
+	}
+
 	rc = pci_save_state(pcidev);
 	if (rc) {
 		dev_err(ctx->dev, "pci_save_state failed :%d\n", rc);
@@ -1212,6 +1292,13 @@
 		 * TODO: Enable rf_clk3 clock before resetting the device to
 		 * ensure stable ref clock during the device reset
 		 */
+		/* Re-enable L1 in case it was enabled in enumeration */
+		if (ctx->l1_enabled_in_enum) {
+			rc = msm_11ad_ctrl_aspm_l1(ctx, true);
+			if (rc)
+				dev_err(ctx->dev,
+					"failed to enable L1, rc %d\n", rc);
+		}
 		break;
 	case WIL_PLATFORM_EVT_FW_RDY:
 		/*
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index dadd1e8d..26a305f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1441,6 +1441,9 @@
 {
 	struct usb_device	*udev = to_usb_device(dev);
 
+	if (udev->bus->skip_resume && udev->state == USB_STATE_SUSPENDED)
+		return 0;
+
 	unbind_no_pm_drivers_interfaces(udev);
 
 	/* From now on we are sure all drivers support suspend/resume
@@ -1470,6 +1473,15 @@
 	struct usb_device	*udev = to_usb_device(dev);
 	int			status;
 
+	/*
+	 * Some buses would like to keep their devices in suspend
+	 * state after system resume.  Their resume happen when
+	 * a remote wakeup is detected or interface driver start
+	 * I/O.
+	 */
+	if (udev->bus->skip_resume)
+		return 0;
+
 	/* For all calls, take the device back to full power and
 	 * tell the PM core in case it was autosuspended previously.
 	 * Unbind the interfaces that will need rebinding later,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f270ee9..d6a973b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -3023,6 +3023,9 @@
 	cancel_work_sync(&hcd->wakeup_work);
 #endif
 
+	/* handle any pending hub events before XHCI stops */
+	usb_flush_hub_wq();
+
 	mutex_lock(&usb_bus_idr_lock);
 	usb_disconnect(&rhdev);		/* Sets rhdev to NULL */
 	mutex_unlock(&usb_bus_idr_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index aef81a1..ffa53d8 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -48,6 +48,11 @@
 /* synchronize hub-port add/remove and peering operations */
 DEFINE_MUTEX(usb_port_peer_mutex);
 
+static bool skip_extended_resume_delay = 1;
+module_param(skip_extended_resume_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(skip_extended_resume_delay,
+		"removes extra delay added to finish bus resume");
+
 /* cycle leds on hubs that aren't blinking for attention */
 static bool blinkenlights;
 module_param(blinkenlights, bool, S_IRUGO);
@@ -627,6 +632,12 @@
 		kick_hub_wq(hub);
 }
 
+void usb_flush_hub_wq(void)
+{
+	flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
 /*
  * Let the USB core know that a USB 3.0 device has sent a Function Wake Device
  * Notification, which indicates it had initiated remote wakeup.
@@ -3398,7 +3409,9 @@
 		/* drive resume for USB_RESUME_TIMEOUT msec */
 		dev_dbg(&udev->dev, "usb %sresume\n",
 				(PMSG_IS_AUTO(msg) ? "auto-" : ""));
-		msleep(USB_RESUME_TIMEOUT);
+		if (!skip_extended_resume_delay)
+			usleep_range(USB_RESUME_TIMEOUT * 1000,
+					(USB_RESUME_TIMEOUT + 1) * 1000);
 
 		/* Virtual root hubs can trigger on GET_PORT_STATUS to
 		 * stop resume signaling.  Then finish the resume
@@ -3407,7 +3420,7 @@
 		status = hub_port_status(hub, port1, &portstatus, &portchange);
 
 		/* TRSMRCY = 10 msec */
-		msleep(10);
+		usleep_range(10000, 10500);
 	}
 
  SuspendCleared:
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index e837536..d2fbed7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1428,17 +1428,39 @@
 	 */
 	if (!ncm_opts->bound) {
 		mutex_lock(&ncm_opts->lock);
+		ncm_opts->net = gether_setup_default();
+		if (IS_ERR(ncm_opts->net)) {
+			status = PTR_ERR(ncm_opts->net);
+			mutex_unlock(&ncm_opts->lock);
+			goto error;
+		}
 		gether_set_gadget(ncm_opts->net, cdev->gadget);
 		status = gether_register_netdev(ncm_opts->net);
 		mutex_unlock(&ncm_opts->lock);
-		if (status)
-			return status;
+		if (status) {
+			free_netdev(ncm_opts->net);
+			goto error;
+		}
 		ncm_opts->bound = true;
 	}
+
+	/* export host's Ethernet address in CDC format */
+	status = gether_get_host_addr_cdc(ncm_opts->net, ncm->ethaddr,
+				      sizeof(ncm->ethaddr));
+	if (status < 12) { /* strlen("01234567890a") */
+		ERROR(cdev, "%s: failed to get host eth addr, err %d\n",
+		__func__, status);
+		status = -EINVAL;
+		goto netdev_cleanup;
+	}
+	ncm->port.ioport = netdev_priv(ncm_opts->net);
+
 	us = usb_gstrings_attach(cdev, ncm_strings,
 				 ARRAY_SIZE(ncm_string_defs));
-	if (IS_ERR(us))
-		return PTR_ERR(us);
+	if (IS_ERR(us)) {
+		status = PTR_ERR(us);
+		goto netdev_cleanup;
+	}
 	ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id;
 	ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id;
 	ncm_data_intf.iInterface = us[STRING_DATA_IDX].id;
@@ -1539,7 +1561,10 @@
 		kfree(ncm->notify_req->buf);
 		usb_ep_free_request(ncm->notify, ncm->notify_req);
 	}
+netdev_cleanup:
+	gether_cleanup(netdev_priv(ncm_opts->net));
 
+error:
 	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
 
 	return status;
@@ -1587,8 +1612,6 @@
 	opts = container_of(f, struct f_ncm_opts, func_inst);
 	if (opts->bound)
 		gether_cleanup(netdev_priv(opts->net));
-	else
-		free_netdev(opts->net);
 	kfree(opts);
 }
 
@@ -1601,12 +1624,6 @@
 		return ERR_PTR(-ENOMEM);
 	mutex_init(&opts->lock);
 	opts->func_inst.free_func_inst = ncm_free_inst;
-	opts->net = gether_setup_default();
-	if (IS_ERR(opts->net)) {
-		struct net_device *net = opts->net;
-		kfree(opts);
-		return ERR_CAST(net);
-	}
 
 	config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
 
@@ -1629,6 +1646,8 @@
 static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct f_ncm *ncm = func_to_ncm(f);
+	struct f_ncm_opts *opts = container_of(f->fi, struct f_ncm_opts,
+					func_inst);
 
 	DBG(c->cdev, "ncm unbind\n");
 
@@ -1640,13 +1659,15 @@
 
 	kfree(ncm->notify_req->buf);
 	usb_ep_free_request(ncm->notify, ncm->notify_req);
+
+	gether_cleanup(netdev_priv(opts->net));
+	opts->bound = false;
 }
 
 static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
 {
 	struct f_ncm		*ncm;
 	struct f_ncm_opts	*opts;
-	int status;
 
 	/* allocate and initialize one new instance */
 	ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1656,20 +1677,9 @@
 	opts = container_of(fi, struct f_ncm_opts, func_inst);
 	mutex_lock(&opts->lock);
 	opts->refcnt++;
-
-	/* export host's Ethernet address in CDC format */
-	status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
-				      sizeof(ncm->ethaddr));
-	if (status < 12) { /* strlen("01234567890a") */
-		kfree(ncm);
-		mutex_unlock(&opts->lock);
-		return ERR_PTR(-EINVAL);
-	}
 	ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
-
 	spin_lock_init(&ncm->lock);
 	ncm_reset_values(ncm);
-	ncm->port.ioport = netdev_priv(opts->net);
 	mutex_unlock(&opts->lock);
 	ncm->port.is_fixed = true;
 	ncm->port.supports_multi_frame = true;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index a84fe94..7558021 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1338,7 +1338,7 @@
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_RESUME);
 				spin_unlock_irqrestore(&xhci->lock, flags);
-				msleep(USB_RESUME_TIMEOUT);
+				usleep_range(21000, 21500);
 				spin_lock_irqsave(&xhci->lock, flags);
 				xhci_set_link_state(xhci, port_array, wIndex,
 							XDEV_U0);
@@ -1619,7 +1619,7 @@
 
 	if (need_usb2_u3_exit) {
 		spin_unlock_irqrestore(&xhci->lock, flags);
-		msleep(USB_RESUME_TIMEOUT);
+		usleep_range(21000, 21500);
 		spin_lock_irqsave(&xhci->lock, flags);
 	}
 
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 129bb3f..ec1f0b9 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -174,6 +174,8 @@
 	if (!hcd)
 		return -ENOMEM;
 
+	hcd_to_bus(hcd)->skip_resume = true;
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(hcd->regs)) {
@@ -229,6 +231,8 @@
 		goto disable_clk;
 	}
 
+	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+
 	if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
 		xhci->quirks |= XHCI_LPM_SUPPORT;
 
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 1e55c1d..d52e335 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -53,152 +53,149 @@
 #define GCC_GPU_GPLL0_DIV_CLK_SRC				35
 #define GCC_GPU_MEMNOC_GFX_CLK					36
 #define GCC_GPU_SNOC_DVM_GFX_CLK				37
-#define GCC_MMSS_QM_AHB_CLK					38
-#define GCC_MMSS_QM_CORE_CLK					39
-#define GCC_MMSS_QM_CORE_CLK_SRC				40
-#define GCC_MSS_AXIS2_CLK					41
-#define GCC_MSS_CFG_AHB_CLK					42
-#define GCC_MSS_GPLL0_DIV_CLK_SRC				43
-#define GCC_MSS_MFAB_AXIS_CLK					44
-#define GCC_MSS_Q6_MEMNOC_AXI_CLK				45
-#define GCC_MSS_SNOC_AXI_CLK					46
-#define GCC_PCIE_0_AUX_CLK					47
-#define GCC_PCIE_0_AUX_CLK_SRC					48
-#define GCC_PCIE_0_CFG_AHB_CLK					49
-#define GCC_PCIE_0_CLKREF_CLK					50
-#define GCC_PCIE_0_MSTR_AXI_CLK					51
-#define GCC_PCIE_0_PIPE_CLK					52
-#define GCC_PCIE_0_SLV_AXI_CLK					53
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				54
-#define GCC_PCIE_1_AUX_CLK					55
-#define GCC_PCIE_1_AUX_CLK_SRC					56
-#define GCC_PCIE_1_CFG_AHB_CLK					57
-#define GCC_PCIE_1_CLKREF_CLK					58
-#define GCC_PCIE_1_MSTR_AXI_CLK					59
-#define GCC_PCIE_1_PIPE_CLK					60
-#define GCC_PCIE_1_SLV_AXI_CLK					61
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				62
-#define GCC_PCIE_PHY_AUX_CLK					63
-#define GCC_PCIE_PHY_REFGEN_CLK					64
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC				65
-#define GCC_PDM2_CLK						66
-#define GCC_PDM2_CLK_SRC					67
-#define GCC_PDM_AHB_CLK						68
-#define GCC_PDM_XO4_CLK						69
-#define GCC_PRNG_AHB_CLK					70
-#define GCC_QMIP_CAMERA_AHB_CLK					71
-#define GCC_QMIP_DISP_AHB_CLK					72
-#define GCC_QMIP_VIDEO_AHB_CLK					73
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				74
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_CORE_CLK				76
-#define GCC_QUPV3_WRAP0_S0_CLK					77
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				78
-#define GCC_QUPV3_WRAP0_S1_CLK					79
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				80
-#define GCC_QUPV3_WRAP0_S2_CLK					81
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				82
-#define GCC_QUPV3_WRAP0_S3_CLK					83
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				84
-#define GCC_QUPV3_WRAP0_S4_CLK					85
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				86
-#define GCC_QUPV3_WRAP0_S5_CLK					87
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				88
-#define GCC_QUPV3_WRAP0_S6_CLK					89
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				90
-#define GCC_QUPV3_WRAP0_S7_CLK					91
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				92
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK				93
-#define GCC_QUPV3_WRAP1_CORE_CLK				94
-#define GCC_QUPV3_WRAP1_S0_CLK					95
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				96
-#define GCC_QUPV3_WRAP1_S1_CLK					97
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				98
-#define GCC_QUPV3_WRAP1_S2_CLK					99
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				100
-#define GCC_QUPV3_WRAP1_S3_CLK					101
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				102
-#define GCC_QUPV3_WRAP1_S4_CLK					103
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				104
-#define GCC_QUPV3_WRAP1_S5_CLK					105
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				106
-#define GCC_QUPV3_WRAP1_S6_CLK					107
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				108
-#define GCC_QUPV3_WRAP1_S7_CLK					109
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				110
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				111
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				112
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				113
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				114
-#define GCC_RX1_USB2_CLKREF_CLK					115
-#define GCC_RX2_QLINK_CLKREF_CLK				116
-#define GCC_RX3_MODEM_CLKREF_CLK				117
-#define GCC_SDCC2_AHB_CLK					118
-#define GCC_SDCC2_APPS_CLK					119
-#define GCC_SDCC2_APPS_CLK_SRC					120
-#define GCC_SDCC4_AHB_CLK					121
-#define GCC_SDCC4_APPS_CLK					122
-#define GCC_SDCC4_APPS_CLK_SRC					123
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				124
-#define GCC_TSIF_AHB_CLK					125
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				126
-#define GCC_TSIF_REF_CLK					127
-#define GCC_TSIF_REF_CLK_SRC					128
-#define GCC_UFS_CARD_AHB_CLK					129
-#define GCC_UFS_CARD_AXI_CLK					130
-#define GCC_UFS_CARD_AXI_CLK_SRC				131
-#define GCC_UFS_CARD_CLKREF_CLK					132
-#define GCC_UFS_CARD_ICE_CORE_CLK				133
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				134
-#define GCC_UFS_CARD_PHY_AUX_CLK				135
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				136
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				137
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				138
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				139
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				140
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			141
-#define GCC_UFS_MEM_CLKREF_CLK					142
-#define GCC_UFS_PHY_AHB_CLK					143
-#define GCC_UFS_PHY_AXI_CLK					144
-#define GCC_UFS_PHY_AXI_CLK_SRC					145
-#define GCC_UFS_PHY_ICE_CORE_CLK				146
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				147
-#define GCC_UFS_PHY_PHY_AUX_CLK					148
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				149
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				150
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				151
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				152
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				153
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				154
-#define GCC_USB30_PRIM_MASTER_CLK				155
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				156
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				157
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			158
-#define GCC_USB30_PRIM_SLEEP_CLK				159
-#define GCC_USB30_SEC_MASTER_CLK				160
-#define GCC_USB30_SEC_MASTER_CLK_SRC				161
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				162
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				163
-#define GCC_USB30_SEC_SLEEP_CLK					164
-#define GCC_USB3_PRIM_CLKREF_CLK				165
-#define GCC_USB3_PRIM_PHY_AUX_CLK				166
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				167
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				168
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				169
-#define GCC_USB3_SEC_CLKREF_CLK					170
-#define GCC_USB3_SEC_PHY_AUX_CLK				171
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				172
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				173
-#define GCC_USB3_SEC_PHY_PIPE_CLK				174
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				175
-#define GCC_VIDEO_AHB_CLK					176
-#define GCC_VIDEO_AXI_CLK					177
-#define GCC_VIDEO_XO_CLK					178
-#define GPLL0							179
-#define GPLL0_OUT_EVEN						180
-#define GPLL0_OUT_MAIN						181
-#define GPLL1							182
-#define GPLL1_OUT_MAIN						183
+#define GCC_MSS_AXIS2_CLK					38
+#define GCC_MSS_CFG_AHB_CLK					39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC				40
+#define GCC_MSS_MFAB_AXIS_CLK					41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK				42
+#define GCC_MSS_SNOC_AXI_CLK					43
+#define GCC_PCIE_0_AUX_CLK					44
+#define GCC_PCIE_0_AUX_CLK_SRC					45
+#define GCC_PCIE_0_CFG_AHB_CLK					46
+#define GCC_PCIE_0_CLKREF_CLK					47
+#define GCC_PCIE_0_MSTR_AXI_CLK					48
+#define GCC_PCIE_0_PIPE_CLK					49
+#define GCC_PCIE_0_SLV_AXI_CLK					50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				51
+#define GCC_PCIE_1_AUX_CLK					52
+#define GCC_PCIE_1_AUX_CLK_SRC					53
+#define GCC_PCIE_1_CFG_AHB_CLK					54
+#define GCC_PCIE_1_CLKREF_CLK					55
+#define GCC_PCIE_1_MSTR_AXI_CLK					56
+#define GCC_PCIE_1_PIPE_CLK					57
+#define GCC_PCIE_1_SLV_AXI_CLK					58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				59
+#define GCC_PCIE_PHY_AUX_CLK					60
+#define GCC_PCIE_PHY_REFGEN_CLK					61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				62
+#define GCC_PDM2_CLK						63
+#define GCC_PDM2_CLK_SRC					64
+#define GCC_PDM_AHB_CLK						65
+#define GCC_PDM_XO4_CLK						66
+#define GCC_PRNG_AHB_CLK					67
+#define GCC_QMIP_CAMERA_AHB_CLK					68
+#define GCC_QMIP_DISP_AHB_CLK					69
+#define GCC_QMIP_VIDEO_AHB_CLK					70
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				71
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				72
+#define GCC_QUPV3_WRAP0_CORE_CLK				73
+#define GCC_QUPV3_WRAP0_S0_CLK					74
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				75
+#define GCC_QUPV3_WRAP0_S1_CLK					76
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				77
+#define GCC_QUPV3_WRAP0_S2_CLK					78
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				79
+#define GCC_QUPV3_WRAP0_S3_CLK					80
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				81
+#define GCC_QUPV3_WRAP0_S4_CLK					82
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				83
+#define GCC_QUPV3_WRAP0_S5_CLK					84
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				85
+#define GCC_QUPV3_WRAP0_S6_CLK					86
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				87
+#define GCC_QUPV3_WRAP0_S7_CLK					88
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				89
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK				90
+#define GCC_QUPV3_WRAP1_CORE_CLK				91
+#define GCC_QUPV3_WRAP1_S0_CLK					92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP1_S1_CLK					94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP1_S2_CLK					96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP1_S3_CLK					98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP1_S4_CLK					100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP1_S5_CLK					102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP1_S6_CLK					104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
+#define GCC_QUPV3_WRAP1_S7_CLK					106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
+#define GCC_RX1_USB2_CLKREF_CLK					112
+#define GCC_RX2_QLINK_CLKREF_CLK				113
+#define GCC_RX3_MODEM_CLKREF_CLK				114
+#define GCC_SDCC2_AHB_CLK					115
+#define GCC_SDCC2_APPS_CLK					116
+#define GCC_SDCC2_APPS_CLK_SRC					117
+#define GCC_SDCC4_AHB_CLK					118
+#define GCC_SDCC4_APPS_CLK					119
+#define GCC_SDCC4_APPS_CLK_SRC					120
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				121
+#define GCC_TSIF_AHB_CLK					122
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				123
+#define GCC_TSIF_REF_CLK					124
+#define GCC_TSIF_REF_CLK_SRC					125
+#define GCC_UFS_CARD_AHB_CLK					126
+#define GCC_UFS_CARD_AXI_CLK					127
+#define GCC_UFS_CARD_AXI_CLK_SRC				128
+#define GCC_UFS_CARD_CLKREF_CLK					129
+#define GCC_UFS_CARD_ICE_CORE_CLK				130
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				131
+#define GCC_UFS_CARD_PHY_AUX_CLK				132
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				133
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				134
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				135
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				136
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				137
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			138
+#define GCC_UFS_MEM_CLKREF_CLK					139
+#define GCC_UFS_PHY_AHB_CLK					140
+#define GCC_UFS_PHY_AXI_CLK					141
+#define GCC_UFS_PHY_AXI_CLK_SRC					142
+#define GCC_UFS_PHY_ICE_CORE_CLK				143
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				144
+#define GCC_UFS_PHY_PHY_AUX_CLK					145
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				147
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				148
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				149
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				150
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				151
+#define GCC_USB30_PRIM_MASTER_CLK				152
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				153
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				154
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			155
+#define GCC_USB30_PRIM_SLEEP_CLK				156
+#define GCC_USB30_SEC_MASTER_CLK				157
+#define GCC_USB30_SEC_MASTER_CLK_SRC				158
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				159
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				160
+#define GCC_USB30_SEC_SLEEP_CLK					161
+#define GCC_USB3_PRIM_CLKREF_CLK				162
+#define GCC_USB3_PRIM_PHY_AUX_CLK				163
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				164
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				166
+#define GCC_USB3_SEC_CLKREF_CLK					167
+#define GCC_USB3_SEC_PHY_AUX_CLK				168
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				169
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				170
+#define GCC_USB3_SEC_PHY_PIPE_CLK				171
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				172
+#define GCC_VIDEO_AHB_CLK					173
+#define GCC_VIDEO_AXI_CLK					174
+#define GCC_VIDEO_XO_CLK					175
+#define GPLL0							176
+#define GPLL0_OUT_EVEN						177
+#define GPLL0_OUT_MAIN						178
+#define GPLL1							179
+#define GPLL1_OUT_MAIN						180
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index cc57986..23beb58 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -412,6 +412,7 @@
 
 #define CPUFREQ_TRANSITION_NOTIFIER	(0)
 #define CPUFREQ_POLICY_NOTIFIER		(1)
+#define CPUFREQ_GOVINFO_NOTIFIER	(2)
 
 /* Transition notifiers */
 #define CPUFREQ_PRECHANGE		(0)
@@ -424,6 +425,9 @@
 #define CPUFREQ_CREATE_POLICY		(3)
 #define CPUFREQ_REMOVE_POLICY		(4)
 
+/* Govinfo Notifiers */
+#define CPUFREQ_LOAD_CHANGE		(0)
+
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
@@ -432,6 +436,16 @@
 		struct cpufreq_freqs *freqs);
 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 		struct cpufreq_freqs *freqs, int transition_failed);
+/*
+ * Governor specific info that can be passed to modules that subscribe
+ * to CPUFREQ_GOVINFO_NOTIFIER
+ */
+struct cpufreq_govinfo {
+	unsigned int cpu;
+	unsigned int load;
+	unsigned int sampling_rate_us;
+};
+extern struct atomic_notifier_head cpufreq_govinfo_notifier_list;
 
 #else /* CONFIG_CPU_FREQ */
 static inline int cpufreq_register_notifier(struct notifier_block *nb,
@@ -584,6 +598,9 @@
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
 extern struct cpufreq_governor cpufreq_gov_conservative;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_conservative)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE)
+extern struct cpufreq_governor cpufreq_gov_interactive;
+#define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_interactive)
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
 extern struct cpufreq_governor cpufreq_gov_sched;
 #define CPUFREQ_DEFAULT_GOVERNOR	(&cpufreq_gov_sched)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index b9337de..7f395e3 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -45,6 +45,7 @@
 	CPUHP_POWERPC_MMU_CTX_PREPARE,
 	CPUHP_XEN_PREPARE,
 	CPUHP_XEN_EVTCHN_PREPARE,
+	CPUHP_QCOM_CPUFREQ_PREPARE,
 	CPUHP_NOTIFY_PREPARE,
 	CPUHP_ARM_SHMOBILE_SCU_PREPARE,
 	CPUHP_SH_SH3X_PREPARE,
@@ -86,6 +87,7 @@
 	CPUHP_AP_METAG_TIMER_STARTING,
 	CPUHP_AP_QCOM_TIMER_STARTING,
 	CPUHP_AP_QCOM_SLEEP_STARTING,
+	CPUHP_AP_QCOM_CPUFREQ_STARTING,
 	CPUHP_AP_ARMADA_TIMER_STARTING,
 	CPUHP_AP_MARCO_TIMER_STARTING,
 	CPUHP_AP_MIPS_GIC_TIMER_STARTING,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7e56a00..82e5ecd 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -396,6 +396,15 @@
 	struct mon_bus *mon_bus;	/* non-null when associated */
 	int monitored;			/* non-zero when monitored */
 #endif
+	unsigned skip_resume:1;		/* All USB devices are brought into full
+					 * power state after system resume. It
+					 * is desirable for some buses to keep
+					 * their devices in suspend state even
+					 * after system resume. The devices
+					 * are resumed later when a remote
+					 * wakeup is detected or an interface
+					 * driver starts I/O.
+					 */
 };
 
 struct usb_dev_state;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index f58f62c..00ea49a 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -503,7 +503,7 @@
 extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
 extern void usb_wakeup_notification(struct usb_device *hdev,
 		unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
 extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
 extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
 
diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h
index d66d44c..262fa64 100644
--- a/include/media/msm_vidc.h
+++ b/include/media/msm_vidc.h
@@ -105,7 +105,8 @@
 int msm_vidc_s_ext_ctrl(void *instance, struct v4l2_ext_controls *a);
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *a);
 int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b);
-int msm_vidc_release_buffers(void *instance, int buffer_type);
+int msm_vidc_release_buffer(void *instance, int buffer_type,
+		unsigned int buffer_index);
 int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b);
 int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b);
 int msm_vidc_streamon(void *instance, enum v4l2_buf_type i);
diff --git a/include/soc/qcom/devfreq_devbw.h b/include/soc/qcom/devfreq_devbw.h
new file mode 100644
index 0000000..7edb2ab
--- /dev/null
+++ b/include/soc/qcom/devfreq_devbw.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DEVFREQ_DEVBW_H
+#define _DEVFREQ_DEVBW_H
+
+#include <linux/devfreq.h>
+
+#ifdef CONFIG_MSM_DEVFREQ_DEVBW
+int devfreq_add_devbw(struct device *dev);
+int devfreq_remove_devbw(struct device *dev);
+int devfreq_suspend_devbw(struct device *dev);
+int devfreq_resume_devbw(struct device *dev);
+#else
+static inline int devfreq_add_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_remove_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_suspend_devbw(struct device *dev)
+{
+	return 0;
+}
+static inline int devfreq_resume_devbw(struct device *dev)
+{
+	return 0;
+}
+#endif
+
+#endif /* _DEVFREQ_DEVBW_H */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
index faecc0b..61992e6 100644
--- a/include/trace/events/cpufreq_interactive.h
+++ b/include/trace/events/cpufreq_interactive.h
@@ -8,102 +8,138 @@
 
 DECLARE_EVENT_CLASS(set,
 	TP_PROTO(u32 cpu_id, unsigned long targfreq,
-		 unsigned long actualfreq),
+	         unsigned long actualfreq),
 	TP_ARGS(cpu_id, targfreq, actualfreq),
 
 	TP_STRUCT__entry(
-		__field(u32, cpu_id)
-		__field(unsigned long, targfreq)
-		__field(unsigned long, actualfreq)
-	),
+	    __field(          u32, cpu_id    )
+	    __field(unsigned long, targfreq   )
+	    __field(unsigned long, actualfreq )
+	   ),
 
 	TP_fast_assign(
-		__entry->cpu_id = (u32)cpu_id;
-		__entry->targfreq = targfreq;
-		__entry->actualfreq = actualfreq;
+	    __entry->cpu_id = (u32) cpu_id;
+	    __entry->targfreq = targfreq;
+	    __entry->actualfreq = actualfreq;
 	),
 
 	TP_printk("cpu=%u targ=%lu actual=%lu",
-		__entry->cpu_id, __entry->targfreq,
-		__entry->actualfreq)
+	      __entry->cpu_id, __entry->targfreq,
+	      __entry->actualfreq)
 );
 
 DEFINE_EVENT(set, cpufreq_interactive_setspeed,
 	TP_PROTO(u32 cpu_id, unsigned long targfreq,
-		 unsigned long actualfreq),
+	     unsigned long actualfreq),
 	TP_ARGS(cpu_id, targfreq, actualfreq)
 );
 
 DECLARE_EVENT_CLASS(loadeval,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+		    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
 
-	TP_STRUCT__entry(
-		__field(unsigned long, cpu_id)
-		__field(unsigned long, load)
-		__field(unsigned long, curtarg)
-		__field(unsigned long, curactual)
-		__field(unsigned long, newtarg)
-	),
+	    TP_STRUCT__entry(
+		    __field(unsigned long, cpu_id    )
+		    __field(unsigned long, load      )
+		    __field(unsigned long, curtarg   )
+		    __field(unsigned long, curactual )
+		    __field(unsigned long, newtarg   )
+	    ),
 
-	TP_fast_assign(
-		__entry->cpu_id = cpu_id;
-		__entry->load = load;
-		__entry->curtarg = curtarg;
-		__entry->curactual = curactual;
-		__entry->newtarg = newtarg;
-	),
+	    TP_fast_assign(
+		    __entry->cpu_id = cpu_id;
+		    __entry->load = load;
+		    __entry->curtarg = curtarg;
+		    __entry->curactual = curactual;
+		    __entry->newtarg = newtarg;
+	    ),
 
-	TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
-		  __entry->cpu_id, __entry->load, __entry->curtarg,
-		  __entry->curactual, __entry->newtarg)
+	    TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+		      __entry->cpu_id, __entry->load, __entry->curtarg,
+		      __entry->curactual, __entry->newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_target,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_already,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
-	TP_PROTO(unsigned long cpu_id, unsigned long load,
-		 unsigned long curtarg, unsigned long curactual,
-		 unsigned long newtarg),
-	TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
 );
 
 TRACE_EVENT(cpufreq_interactive_boost,
-	TP_PROTO(const char *s),
-	TP_ARGS(s),
-	TP_STRUCT__entry(
-		__string(s, s)
-	),
-	TP_fast_assign(
-		__assign_str(s, s);
-	),
-	TP_printk("%s", __get_str(s))
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
 );
 
 TRACE_EVENT(cpufreq_interactive_unboost,
-	TP_PROTO(const char *s),
-	TP_ARGS(s),
-	TP_STRUCT__entry(
-		__string(s, s)
-	),
-	TP_fast_assign(
-		__assign_str(s, s);
-	),
-	TP_printk("%s", __get_str(s))
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_load_change,
+	    TP_PROTO(unsigned long cpu_id),
+	    TP_ARGS(cpu_id),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	    ),
+	    TP_printk("re-evaluate for cpu=%lu", __entry->cpu_id)
+);
+
+TRACE_EVENT(cpufreq_interactive_cpuload,
+	    TP_PROTO(unsigned long cpu_id, unsigned int load,
+		     unsigned int new_task_pct, unsigned int prev,
+		     unsigned int predicted),
+	    TP_ARGS(cpu_id, load, new_task_pct, prev, predicted),
+	    TP_STRUCT__entry(
+		__field(unsigned long, cpu_id)
+		__field(unsigned int, load)
+		__field(unsigned int, new_task_pct)
+		__field(unsigned int, prev)
+		__field(unsigned int, predicted)
+	    ),
+	    TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+		__entry->load = load;
+		__entry->new_task_pct = new_task_pct;
+		__entry->prev = prev;
+		__entry->predicted = predicted;
+	    ),
+	    TP_printk("cpu=%lu load=%u new_task_pct=%u prev=%u predicted=%u",
+		      __entry->cpu_id, __entry->load, __entry->new_task_pct,
+		      __entry->prev, __entry->predicted)
 );
 
 #endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index ec6f815..3354d4e 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -179,6 +179,48 @@
 	TP_ARGS(capacity, cpu_id)
 );
 
+TRACE_EVENT(cpu_frequency_switch_start,
+
+	TP_PROTO(unsigned int start_freq, unsigned int end_freq,
+		 unsigned int cpu_id),
+
+	TP_ARGS(start_freq, end_freq, cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		start_freq	)
+		__field(	u32,		end_freq	)
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->start_freq = start_freq;
+		__entry->end_freq = end_freq;
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("start=%lu end=%lu cpu_id=%lu",
+		  (unsigned long)__entry->start_freq,
+		  (unsigned long)__entry->end_freq,
+		  (unsigned long)__entry->cpu_id)
+);
+
+TRACE_EVENT(cpu_frequency_switch_end,
+
+	TP_PROTO(unsigned int cpu_id),
+
+	TP_ARGS(cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
 	TP_PROTO(struct device *dev, const char *pm_ops, int event),
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index c104244..fb882f5 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -279,8 +279,10 @@
 #define DRM_MSM_GEM_SUBMIT             0x06
 #define DRM_MSM_WAIT_FENCE             0x07
 #define DRM_MSM_GEM_MADVISE            0x08
-#define DRM_SDE_WB_CONFIG              0x08
-#define DRM_MSM_NUM_IOCTLS             0x09
+
+#define DRM_SDE_WB_CONFIG              0x40
+#define DRM_MSM_REGISTER_EVENT         0x41
+#define DRM_MSM_DEREGISTER_EVENT       0x42
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)