Merge "i2c: qcom: geni: Fix the way stretch stop is done" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
new file mode 100644
index 0000000..964fea6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -0,0 +1,476 @@
+Qualcomm Technologies, Inc. OSM Bindings
+
+Operating State Manager (OSM) is a hardware engine used by some Qualcomm
+Technologies, Inc. (QTI) SoCs to manage frequency and voltage scaling
+in hardware. OSM is capable of controlling frequency and voltage requests
+for multiple clusters via the existence of multiple OSM domains.
+
+Properties:
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: must be "qcom,clk-cpu-osm".
+
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Addresses and sizes for the memory of the OSM controller,
+		    cluster PLL management, and APCS common register regions.
+		    Optionally, the address of the efuse registers used to
+		    determine the pwrcl or perfcl speed-bins and/or the ACD
+		    register space to initialize prior to enabling OSM.
+
+- reg-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
+		    "osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
+		    "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer" or
+		    "apps_itm_ctl". Optionally, "l3_efuse", "pwrcl_efuse"
+		    "perfcl_efuse".
+		    Must be specified in the same order as the corresponding
+		    addresses are specified in the reg property.
+
+- vdd-l3-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the L3 cluster.
+
+- vdd-pwrcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Power cluster.
+
+- vdd-perfcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Performance cluster.
+
+- interrupts
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: OSM interrupt specifier.
+
+- interrupt-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Interrupt names. this list must match up 1-to-1 with the
+		    interrupts specified in the 'interrupts' property.
+		    "pwrcl-irq" and "perfcl-irq" must be specified.
+
+- qcom,l3-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the L3 cluster.
+
+- qcom,pwrcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Power cluster.
+
+- qcom,perfcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Performance cluster.
+
+- qcom,l3-min-cpr-vc-binX
+	Usage:	    required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the L3 clock domain.
+
+- qcom,pwrcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the power cluster.
+
+- qcom,perfcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the performance cluster.
+
+- qcom,osm-no-tz
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that there is no programming
+		    of the OSM hardware performed by the secure world.
+
+- qcom,osm-pll-setup
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the PLL setup sequence
+		    must be executed for each clock domain managed by the OSM
+		    controller.
+
+- qcom,up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS up timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS down timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,pc-override-index
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the OSM performance index to be used
+		    when each cluster enters certain low power modes.
+
+- qcom,set-ret-inactive
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if domains in retention must
+		    be treated as inactive.
+
+- qcom,enable-llm-freq-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware frequency
+		    votes must be honored by OSM.
+
+- qcom,llm-freq-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-freq-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,enable-llm-volt-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware voltage
+		    votes must be honored by OSM.
+
+- qcom,llm-volt-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-volt-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,cc-reads
+	Usage:      optional
+	Value type: <integer>
+	Definition: Defines the number of times the cycle counters must be
+		    read to determine the performance level of each clock
+		    domain.
+
+- qcom,l-val-base
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the L_VAL
+		    control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,apcs-pll-user-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the PLL
+		    user control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,perfcl-apcs-apm-threshold-voltage
+	Usage:      required
+	Value type: <u32>
+	Definition: Specifies the APM threshold voltage in microvolts.  If the
+		    VDD_APCC supply voltage is above or at this level, then the
+		    APM is switched to use VDD_APCC.  If VDD_APCC is below
+		    this level, then the APM is switched to use VDD_MX.
+
+- qcom,apm-mode-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    control register for each of the two clusters managed
+		    by the OSM controller.
+
+- qcom,apm-status-ctrl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    controller status register for each of the three clock
+		    domains managed by the OSM controller.
+
+- qcom,perfcl-isense-addr
+	Usage:      required
+	Value type: <u32>
+	Definition: Contains the ISENSE register address.
+
+- qcom,l3-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the L3 cluster.
+		    The array must contain exactly three elements.
+
+- qcom,pwrcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Power cluster.
+		    The array must contain exactly three elements.
+
+- qcom,perfcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Performance cluster.
+		    The array must contain exactly three elements.
+
+		    corresponding CPRh device.
+
+- qcom,perfcl-apcs-mem-acc-threshold-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies the highest MEM ACC threshold voltage in
+		    microvolts for the Performance cluster.  This voltage is
+		    used to determine which MEM ACC setting is used for the
+		    highest frequencies.  If specified, the voltage must match
+		    the MEM ACC threshold voltage specified for the
+		    corresponding CPRh device.
+
+- qcom,apcs-cbc-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_CBC_ADDR
+		    registers for all three clock domains.
+
+- qcom,apcs-ramp-ctl-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_RAMP_CTL_ADDR
+		    registers for all three clock domains.
+
+- qcom,red-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the reduction FSM
+		    should be enabled.
+
+- qcom,boost-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the boost FSM should
+		    be enabled.
+
+- qcom,safe-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the safe FSM should
+		    be enabled.
+
+- qcom,ps-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the PS FSM should be
+		    enabled.
+
+- qcom,droop-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the droop FSM should
+		    be enabled.
+
+- qcom,set-c3-active
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C3 are to
+		    be treated as active for core count calculations.
+
+- qcom,set-c2-active
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C2 are to
+		    be treated as active for core count calculations.
+
+- qcom,disable-cc-dvcs
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if core count based DCVS is
+		    to be disabled.
+
+- qcom,apcs-pll-min-freq
+	Usage:	    required
+	Value type: <u32>
+	Definition: Contains the addresses of the RAILx_CLKDOMy_PLL_MIN_FREQ
+		    registers for the three clock domains.
+
+- clock-names
+	Usage:      required
+	Value type: <string>
+	Definition: Must be "aux_clk".
+
+- clocks
+	Usage:      required
+	Value type: <phandle>
+	Definition: Phandle to the aux clock device.
+
+Example:
+	clock_cpucc: qcom,cpucc@0x17d41000 {
+		compatible = "qcom,clk-cpu-osm";
+		reg = <0x17d41000 0x1400>,
+			<0x17d43000 0x1400>,
+			<0x17d45800 0x1400>,
+			<0x178d0000 0x1000>,
+			<0x178c0000 0x1000>,
+			<0x178b0000 0x1000>,
+			<0x17d42400 0x0c00>,
+			<0x17d44400 0x0c00>,
+			<0x17d46c00 0x0c00>,
+			<0x17810090 0x8>;
+		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+			"l3_pll", "pwrcl_pll", "perfcl_pll",
+			"l3_sequencer", "pwrcl_sequencer",
+			"perfcl_sequencer", "apps_itm_ctl";
+
+		vdd-l3-supply = <&apc0_l3_vreg>;
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		qcom,l3-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >;
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003333 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x1 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x1 14 >,
+			<  1440000000 0x403c0e4b 0x00003c3c 0x1 15 >,
+			<  1516800000 0x403c0f4f 0x00004040 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004343 0x2 17 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x1 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x1 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x2 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x2 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x2 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >;
+
+		qcom,l3-min-cpr-vc-bin0 = <7>;
+		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
+		qcom,perfcl-min-cpr-vc-bin0 = <7>;
+
+		qcom,up-timer =
+			<1000 1000 1000>;
+		qcom,down-timer =
+			<100000 100000 100000>;
+		qcom,pc-override-index =
+			<0 0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-freq-down-timer =
+			<327675 327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-volt-down-timer =
+			<327675 327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <100000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x178d0004 0x178c0004 0x178b0004>;
+		qcom,apcs-pll-user-ctl =
+			<0x178d000c 0x178c000c 0x178b000c>;
+		qcom,apcs-pll-min-freq =
+			<0x17d41094 0x17d43094 0x17d45894>;
+		qcom,apm-mode-ctl =
+			<0x0 0x0 0x17d20010>;
+		qcom,apm-status-ctrl =
+			<0x0 0x0 0x17d20000>;
+		qcom,perfcl-isense-addr = <0x17871480>;
+		qcom,l3-mem-acc-addr = <0x17990170 0x17990170 0x17990170>;
+		qcom,pwrcl-mem-acc-addr = <0x17990160 0x17990164 0x17990164>;
+		qcom,perfcl-mem-acc-addr = <0x17990168 0x1799016c 0x1799016c>;
+		qcom,cfg-gfmux-addr =<0x178d0084 0x178c0084 0x178b0084>;
+		qcom,apcs-cbc-addr = <0x178d008c 0x178c008c 0x178b008c>;
+		qcom,apcs-ramp-ctl-addr = <0x17840904 0x17840904 0x17830904>;
+
+		qcom,perfcl-apcs-apm-threshold-voltage = <800000>;
+		qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,osm-no-tz;
+		qcom,osm-pll-setup;
+
+		clock-names = "xo_ao";
+		clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt
new file mode 100644
index 0000000..9427123
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/msm-cpufreq.txt
@@ -0,0 +1,47 @@
+Qualcomm MSM CPUfreq device
+
+msm-cpufreq is a device that represents the list of usable CPU frequencies
+and provides a device handle for the CPUfreq driver to get the CPU and cache
+clocks.
+
+Required properties:
+- compatible:		Must be "qcom,msm-cpufreq"
+- qcom,cpufreq-table, or qcom,cpufreq-table-<X>:
+			A list of usable CPU frequencies (KHz).
+			Use "qcom,cpufreq-table" if all CPUs in the system
+			should share same list of frequencies.
+			Use "qcom,cpufreq-table-<cpuid>" to describe
+			different CPU freq tables for different CPUs.
+			The table should be listed only for the first CPU
+			if multiple CPUs are synchronous.
+
+Optional properties:
+- clock-names:		When DT based binding of clock is available, this
+			provides a list of CPU subsystem clocks.
+			"cpuX_clk" for every CPU that's present.
+			"l2_clk" when an async cache/CCI is present.
+
+Optional properties:
+- qcom,governor-per-policy:	This property denotes that governor tunables
+				should be associated with each cpufreq policy
+				group instead of being global.
+
+Example:
+	qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		qcom,cpufreq-table =
+			<  300000 >,
+			<  422400 >,
+			<  652800 >,
+			<  729600 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1728000 >,
+			< 1958400 >,
+			< 2265600 >;
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index b6bc475..058dab1 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -89,6 +89,10 @@
 				entries by name.
 - cache-slices:			The tuple has phandle to llcc device as the first argument and the
 				second argument is the usecase id of the client.
+- qcom,sde-ubwc-malsize:	A u32 property to specify the default UBWC
+				minimum allowable length configuration value.
+- qcom,sde-ubwc-swizzle:	A u32 property to specify the default UBWC
+				swizzle configuration value.
 
 Subnode properties:
 - compatible:		Compatible name used in smmu v2.
@@ -126,6 +130,8 @@
 		clock-names = "iface_clk", "rot_core_clk";
 
 		qcom,mdss-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-malsize = <0>;
+		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -144,7 +150,7 @@
 
 		qcom,mdss-sbuf-headroom = <20>;
 		cache-slice-names = "rotator";
-		cache-slices = <&llcc 3>;
+		cache-slices = <&llcc 4>;
 
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index b157e04..c5340a8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -127,6 +127,8 @@
 		qcom,sde-vbif-size = <0x1040>;
 		qcom,sde-vbif-id = <0>;
 
+		qcom,sde-inline-rotator = <&mdss_rotator 0>;
+
 		qcom,sde-sspp-vig-blocks {
 			qcom,sde-vig-csc-off = <0x1a00>;
 			qcom,sde-vig-qseed-off = <0xa00>;
@@ -204,8 +206,12 @@
 		reg-names = "mdp_phys",
 			"rot_vbif_phys";
 
+		#list-cells = <1>;
+
 		qcom,mdss-rot-mode = <1>;
 		qcom,mdss-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-malsize = <1>;
+		qcom,sde-ubwc-swizzle = <1>;
 
 		/* Bus Scale Settings */
 		qcom,msm-bus,name = "mdss_rotator";
@@ -238,6 +244,11 @@
 		qcom,mdss-default-ot-rd-limit = <32>;
 		qcom,mdss-default-ot-wr-limit = <32>;
 
+		qcom,mdss-sbuf-headroom = <20>;
+
+		cache-slice-names = "rotator";
+		cache-slices = <&llcc 4>;
+
 		smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
 			compatible = "qcom,smmu_sde_rot_unsec";
 			iommus = <&apps_smmu 0x1090>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index b2382d1..5c33436 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -500,6 +500,58 @@
 		cell-index = <0>;
 	};
 
+	msm_cpufreq: qcom,msm-cpufreq {
+		compatible = "qcom,msm-cpufreq";
+		clock-names = "cpu0_clk", "cpu4_clk";
+		clocks = <&clock_cpucc CPU0_PWRCL_CLK>,
+			 <&clock_cpucc CPU4_PERFCL_CLK>;
+
+		qcom,governor-per-policy;
+
+		qcom,cpufreq-table-0 =
+			<  300000 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  748800 >,
+			<  825600 >,
+			<  902400 >,
+			<  979200 >,
+			< 1056000 >,
+			< 1132800 >,
+			< 1209600 >,
+			< 1286400 >,
+			< 1363200 >,
+			< 1440000 >,
+			< 1516800 >,
+			< 1593600 >;
+
+		qcom,cpufreq-table-4 =
+			<  300000 >,
+			<  422400 >,
+			<  499200 >,
+			<  576000 >,
+			<  652800 >,
+			<  729600 >,
+			<  806400 >,
+			<  883200 >,
+			<  960000 >,
+			< 1036800 >,
+			< 1113600 >,
+			< 1190400 >,
+			< 1267200 >,
+			< 1344000 >,
+			< 1420800 >,
+			< 1497600 >,
+			< 1574400 >,
+			< 1651200 >,
+			< 1728000 >,
+			< 1804800 >,
+			< 1881600 >,
+			< 1958400 >;
+	};
+
 	clock_gcc: qcom,gcc@100000 {
 		compatible = "qcom,gcc-sdm845";
 		reg = <0x100000 0x1f0000>;
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index a1db0fc..2e72456 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -17,6 +17,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 944ae1a..688e130 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -23,6 +23,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d327826..31ea544 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -204,3 +204,15 @@
 	  SoCs. It accepts requests from other hardware subsystems via RSC.
 	  Say Y to support the clocks managed by RPMh VRM/ARC on platforms
 	  such as sdm845.
+
+config CLOCK_CPU_OSM
+	tristate "OSM CPU Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	 Support for the OSM clock controller.
+	 Operating State Manager (OSM) is a hardware engine used by some
+	 Qualcomm Technologies, Inc. (QTI) SoCs to manage frequency and
+	 voltage scaling in hardware. OSM is capable of controlling
+	 frequency and voltage requests for multiple clusters via the
+	 existence of multiple OSM domains.
+	 Say Y if you want to support OSM clocks.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ebcf4fc..d52a751 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -16,23 +16,24 @@
 # Keep alphabetically sorted by config
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o
+obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
 obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
 obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
+obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
 obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
-obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
-obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
-obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
-obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
new file mode 100644
index 0000000..d5e2be6
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -0,0 +1,2619 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <soc/qcom/scm.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "clk-voter.h"
+
+#define OSM_TABLE_SIZE			40
+#define SINGLE_CORE			1
+#define MAX_CLUSTER_CNT			3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_CORE_COUNT			4
+#define CORE_COUNT_VAL(val)		((val & GENMASK(18, 16)) >> 16)
+
+#define OSM_CYCLE_COUNTER_CTRL_REG		0x760
+#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN	BIT(8)
+
+#define OSM_REG_SIZE			32
+
+#define L3_EFUSE_SHIFT			0
+#define L3_EFUSE_MASK			0
+#define PWRCL_EFUSE_SHIFT		0
+#define PWRCL_EFUSE_MASK		0
+#define PERFCL_EFUSE_SHIFT		29
+#define PERFCL_EFUSE_MASK		0x7
+
+#define ENABLE_REG			0x0
+#define FREQ_REG			0x110
+#define VOLT_REG			0x114
+#define OVERRIDE_REG			0x118
+#define SPM_CC_INC_HYSTERESIS		0x1c
+#define SPM_CC_DEC_HYSTERESIS		0x20
+#define SPM_CORE_INACTIVE_MAPPING	0x28
+#define CC_ZERO_BEHAV_CTRL		0xc
+#define ENABLE_OVERRIDE			BIT(0)
+#define SPM_CC_DCVS_DISABLE		0x24
+#define LLM_FREQ_VOTE_INC_HYSTERESIS	0x30
+#define LLM_FREQ_VOTE_DEC_HYSTERESIS	0x34
+#define LLM_INTF_DCVS_DISABLE		0x40
+#define LLM_VOLTAGE_VOTE_INC_HYSTERESIS	0x38
+#define LLM_VOLTAGE_VOTE_DEC_HYSTERESIS	0x3c
+#define VMIN_REDUCTION_ENABLE_REG	0x48
+#define VMIN_REDUCTION_TIMER_REG	0x4c
+#define PDN_FSM_CTRL_REG		0x54
+#define DELTA_DEX_VAL			BVAL(31, 23, 0xa)
+#define IGNORE_PLL_LOCK			BIT(15)
+#define CC_BOOST_FSM_EN			BIT(0)
+#define CC_BOOST_FSM_TIMERS_REG0	0x58
+#define CC_BOOST_FSM_TIMERS_REG1	0x5c
+#define CC_BOOST_FSM_TIMERS_REG2	0x60
+#define DCVS_BOOST_FSM_EN_MASK		BIT(2)
+#define DCVS_BOOST_FSM_TIMERS_REG0	0x64
+#define DCVS_BOOST_FSM_TIMERS_REG1	0x68
+#define DCVS_BOOST_FSM_TIMERS_REG2	0x6c
+#define PS_BOOST_FSM_EN_MASK		BIT(1)
+#define PS_BOOST_FSM_TIMERS_REG0	0x74
+#define PS_BOOST_FSM_TIMERS_REG1	0x78
+#define PS_BOOST_FSM_TIMERS_REG2	0x7c
+#define BOOST_PROG_SYNC_DELAY_REG	0x80
+#define DCVS_DROOP_FSM_EN_MASK		BIT(5)
+#define DROOP_PROG_SYNC_DELAY_REG	0x9c
+#define DROOP_RELEASE_TIMER_CTRL	0x88
+#define DROOP_CTRL_REG			0x84
+#define DCVS_DROOP_TIMER_CTRL		0x98
+#define PLL_SW_OVERRIDE_ENABLE		0xa0
+#define PLL_SW_OVERRIDE_DROOP_EN	BIT(0)
+#define SPM_CORE_COUNT_CTRL		0x2c
+#define CORE_DCVS_CTRL			0xbc
+#define OVERRIDE_CLUSTER_IDLE_ACK	0x800
+#define REQ_GEN_FSM_STATUS		0x70c
+
+#define PLL_MIN_LVAL			0x21
+#define PLL_MIN_FREQ_REG		0x94
+#define PLL_POST_DIV1			0x1F
+#define PLL_POST_DIV2			0x11F
+#define PLL_MODE			0x0
+#define PLL_L_VAL			0x4
+#define PLL_USER_CTRL			0xc
+#define PLL_CONFIG_CTL_LO		0x10
+#define PLL_CONFIG_CTL_HI		0x14
+#define MIN_VCO_VAL			0x2b
+
+#define MAX_VC				63
+#define MAX_MEM_ACC_LEVELS		3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_MEM_ACC_VALUES		(MAX_MEM_ACC_LEVELS * \
+					MAX_MEM_ACC_VAL_PER_LEVEL)
+#define MEM_ACC_ADDRS			3
+
+#define ISENSE_ON_DATA			0xf
+#define ISENSE_OFF_DATA			0x0
+#define CONSTANT_32			0x20
+
+#define APM_MX_MODE			0x0
+#define APM_APC_MODE			0x2
+#define APM_READ_DATA_MASK		0xc
+#define APM_MX_MODE_VAL			0x4
+#define APM_APC_READ_VAL		0x8
+#define APM_MX_READ_VAL			0x4
+#define APM_CROSSOVER_VC		0xb0
+
+#define MEM_ACC_SEQ_CONST(n)		(n)
+#define MEM_ACC_APM_READ_MASK		0xff
+#define MEMACC_CROSSOVER_VC		0xb8
+
+#define PLL_WAIT_LOCK_TIME_US		10
+#define PLL_WAIT_LOCK_TIME_NS		(PLL_WAIT_LOCK_TIME_US * 1000)
+#define SAFE_FREQ_WAIT_NS		5000
+#define DEXT_DECREMENT_WAIT_NS		1000
+
+#define DATA_MEM(n)			(0x400 + (n) * 4)
+
+#define DCVS_PERF_STATE_DESIRED_REG_0	0x780
+#define DCVS_PERF_STATE_DESIRED_REG(n) (DCVS_PERF_STATE_DESIRED_REG_0 + \
+					(4 * n))
+#define OSM_CYCLE_COUNTER_STATUS_REG_0	0x7d0
+#define OSM_CYCLE_COUNTER_STATUS_REG(n)	(OSM_CYCLE_COUNTER_STATUS_REG_0 + \
+					(4 * n))
+
+static const struct regmap_config osm_qcom_regmap_config = {
+	.reg_bits       = 32,
+	.reg_stride     = 4,
+	.val_bits       = 32,
+	.fast_io	= true,
+};
+
+enum clk_osm_bases {
+	OSM_BASE,
+	PLL_BASE,
+	EFUSE_BASE,
+	SEQ_BASE,
+	NUM_BASES,
+};
+
+enum clk_osm_lut_data {
+	FREQ,
+	FREQ_DATA,
+	PLL_OVERRIDES,
+	MEM_ACC_LEVEL,
+	VIRTUAL_CORNER,
+	NUM_FIELDS,
+};
+
+struct osm_entry {
+	u16 virtual_corner;
+	u16 open_loop_volt;
+	u32 freq_data;
+	u32 override_data;
+	u32 mem_acc_level;
+	long frequency;
+};
+
+struct clk_osm {
+	struct clk_hw hw;
+	struct osm_entry osm_table[OSM_TABLE_SIZE];
+	struct dentry *debugfs;
+	struct regulator *vdd_reg;
+	struct platform_device *vdd_dev;
+	void *vbases[NUM_BASES];
+	unsigned long pbases[NUM_BASES];
+	spinlock_t lock;
+
+	u32 cpu_reg_mask;
+	u32 num_entries;
+	u32 cluster_num;
+	u32 core_num;
+	u32 apm_crossover_vc;
+	u32 apm_threshold_vc;
+	u32 mem_acc_crossover_vc;
+	u32 mem_acc_threshold_vc;
+	u32 min_cpr_vc;
+	u32 cycle_counter_reads;
+	u32 cycle_counter_delay;
+	u32 cycle_counter_factor;
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	u32 l_val_base;
+	u32 apcs_pll_user_ctl;
+	u32 apcs_pll_min_freq;
+	u32 cfg_gfmux_addr;
+	u32 apcs_cbc_addr;
+	u32 speedbin;
+	u32 mem_acc_crossover_vc_addr;
+	u32 mem_acc_addr[MEM_ACC_ADDRS];
+	u32 ramp_ctl_addr;
+	u32 apm_mode_ctl;
+	u32 apm_status_ctl;
+	u32 osm_clk_rate;
+	u32 xo_clk_rate;
+	bool secure_init;
+	bool red_fsm_en;
+	bool boost_fsm_en;
+	bool safe_fsm_en;
+	bool ps_fsm_en;
+	bool droop_fsm_en;
+
+	struct notifier_block panic_notifier;
+	u32 trace_periodic_timer;
+	bool trace_en;
+	bool wdog_trace_en;
+};
+
+static struct regulator *vdd_l3;
+static struct regulator *vdd_pwrcl;
+static struct regulator *vdd_perfcl;
+
+static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
+{
+	return container_of(_hw, struct clk_osm, hw);
+}
+
+static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
+					    u32 offset, u32 mask)
+{
+	u32 val2, orig_val;
+
+	val2 = orig_val = readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+	val2 &= ~mask;
+	val2 |= val & mask;
+
+	if (val2 != orig_val)
+		writel_relaxed(val2, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline void clk_osm_write_seq_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[SEQ_BASE] + offset);
+}
+
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_mb(struct clk_osm *c, int base)
+{
+	return readl_relaxed_no_log((char *)c->vbases[base] + ENABLE_REG);
+}
+
+static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n,
+					unsigned long rate_max)
+{
+	if (n >= hw->init->num_rate_max)
+		return -ENXIO;
+	return hw->init->rate_max[n];
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+			unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	int i;
+	unsigned long rrate = 0;
+
+	/*
+	 * If the rate passed in is 0, return the first frequency in the
+	 * FMAX table.
+	 */
+	if (!rate)
+		return hw->init->rate_max[0];
+
+	for (i = 0; i < hw->init->num_rate_max; i++) {
+		if (is_better_rate(rate, rrate, hw->init->rate_max[i])) {
+			rrate = hw->init->rate_max[i];
+			if (rate == rrate)
+				break;
+		}
+	}
+
+	pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate,
+						rrate, hw->init->rate_max[i]);
+
+	return rrate;
+}
+
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+	int quad_core_index, single_core_index = 0;
+	int core_count;
+
+	for (quad_core_index = 0; quad_core_index < entries;
+						quad_core_index++) {
+		core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
+		if (rate == table[quad_core_index].frequency &&
+					core_count == SINGLE_CORE) {
+			single_core_index = quad_core_index;
+			continue;
+		}
+		if (rate == table[quad_core_index].frequency &&
+					core_count == MAX_CORE_COUNT)
+			return quad_core_index;
+	}
+	if (single_core_index)
+		return single_core_index;
+
+	return -EINVAL;
+}
+
+static int clk_osm_enable(struct clk_hw *hw)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+
+	clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	/* Wait for 5us for OSM hardware to enable */
+	udelay(5);
+
+	pr_debug("OSM clk enabled for cluster=%d\n", cpuclk->cluster_num);
+
+	return 0;
+}
+
+const struct clk_ops clk_ops_cpu_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+};
+
+static struct clk_ops clk_ops_core;
+
+static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(p_hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(parent->osm_table,
+				     parent->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+	/*
+	 * Choose index and send request to OSM hardware.
+	 * TODO: Program INACTIVE_OS_REQUEST if needed.
+	 */
+	clk_osm_write_reg(parent, index,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(parent, OSM_BASE);
+
+	return 0;
+}
+
+static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(cpuclk->osm_table,
+				     cpuclk->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+
+	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	return 0;
+}
+
+static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(parent,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				parent->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return parent->osm_table[index].frequency;
+}
+
+static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				cpuclk->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return cpuclk->osm_table[index].frequency;
+}
+
+
+const struct clk_ops clk_ops_l3_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+	.recalc_rate = l3_clk_recalc_rate,
+	.set_rate = l3_clk_set_rate,
+};
+
+enum {
+	P_XO,
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_XO, 0 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"xo",
+};
+
+static struct clk_init_data osm_clks_init[] = {
+	[0] = {
+		.name = "l3_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_l3_osm,
+	},
+	[1] = {
+		.name = "pwrcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+	[2] = {
+		.name = "perfcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+};
+
+static struct clk_osm l3_clk = {
+	.cluster_num = 0,
+	.cpu_reg_mask = 0x0,
+	.hw.init = &osm_clks_init[0],
+};
+
+static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0);
+static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0);
+
+static struct clk_osm pwrcl_clk = {
+	.cluster_num = 1,
+	.cpu_reg_mask = 0x300,
+	.hw.init = &osm_clks_init[1],
+};
+
+static struct clk_osm cpu0_pwrcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu0_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu1_pwrcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu1_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu2_pwrcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu2_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu3_pwrcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu3_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm perfcl_clk = {
+	.cluster_num = 2,
+	.cpu_reg_mask = 0x700,
+	.hw.init = &osm_clks_init[2],
+};
+
+
+static struct clk_osm cpu4_perfcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu4_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu5_perfcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu5_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu6_perfcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu6_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu7_perfcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu7_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+/*
+ * Use the cpu* clocks only for writing to the PERF_STATE_DESIRED registers.
+ * Note that we are currently NOT programming the APSS_LMH_GFMUX_CFG &
+ * APSS_OSM_GFMUX_CFG registers.
+ */
+
+static struct clk_hw *osm_qcom_clk_hws[] = {
+	[L3_CLK] = &l3_clk.hw,
+	[L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw,
+	[L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw,
+	[PWRCL_CLK] = &pwrcl_clk.hw,
+	[CPU0_PWRCL_CLK] = &cpu0_pwrcl_clk.hw,
+	[CPU1_PWRCL_CLK] = &cpu1_pwrcl_clk.hw,
+	[CPU2_PWRCL_CLK] = &cpu2_pwrcl_clk.hw,
+	[CPU3_PWRCL_CLK] = &cpu3_pwrcl_clk.hw,
+	[PERFCL_CLK] = &perfcl_clk.hw,
+	[CPU4_PERFCL_CLK] = &cpu4_perfcl_clk.hw,
+	[CPU5_PERFCL_CLK] = &cpu5_perfcl_clk.hw,
+	[CPU6_PERFCL_CLK] = &cpu6_perfcl_clk.hw,
+	[CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw,
+};
+
+static struct clk_osm *logical_cpu_to_clk(int cpu)
+{
+	struct device_node *cpu_node;
+	const u32 *cell;
+	u64 hwid;
+	static struct clk_osm *cpu_clk_map[NR_CPUS];
+
+	if (cpu_clk_map[cpu])
+		return cpu_clk_map[cpu];
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		goto fail;
+
+	cell = of_get_property(cpu_node, "reg", NULL);
+	if (!cell) {
+		pr_err("%s: missing reg property\n", cpu_node->full_name);
+		goto fail;
+	}
+
+	hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+	if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 0:
+			cpu_clk_map[cpu] = &cpu0_pwrcl_clk;
+			break;
+		case 1:
+			cpu_clk_map[cpu] = &cpu1_pwrcl_clk;
+			break;
+		case 2:
+			cpu_clk_map[cpu] = &cpu2_pwrcl_clk;
+			break;
+		case 3:
+			cpu_clk_map[cpu] = &cpu3_pwrcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for power cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+	if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 4:
+			cpu_clk_map[cpu] = &cpu4_perfcl_clk;
+			break;
+		case 5:
+			cpu_clk_map[cpu] = &cpu5_perfcl_clk;
+			break;
+		case 6:
+			cpu_clk_map[cpu] = &cpu6_perfcl_clk;
+			break;
+		case 7:
+			cpu_clk_map[cpu] = &cpu7_perfcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for perf cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+fail:
+	return NULL;
+}
+
+static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
+{
+	u64 temp;
+
+	temp = (u64)c->osm_clk_rate * nsec;
+	do_div(temp, 1000000000);
+
+	return temp;
+}
+
+static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
+{
+	int curr_level, i, j = 0;
+	int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {MAX_VC, MAX_VC, MAX_VC};
+
+	curr_level = c->osm_table[0].mem_acc_level;
+	for (i = 0; i < c->num_entries; i++) {
+		if (curr_level == MAX_MEM_ACC_LEVELS)
+			break;
+
+		if (c->osm_table[i].mem_acc_level != curr_level) {
+			mem_acc_level_map[j++] =
+				c->osm_table[i].virtual_corner;
+			curr_level = c->osm_table[i].mem_acc_level;
+		}
+	}
+
+	if (c->secure_init) {
+		clk_osm_write_seq_reg(c,
+				c->pbases[OSM_BASE] + MEMACC_CROSSOVER_VC,
+				DATA_MEM(57));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[0], DATA_MEM(48));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[1], DATA_MEM(49));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[2], DATA_MEM(50));
+		clk_osm_write_seq_reg(c, c->mem_acc_crossover_vc,
+							DATA_MEM(78));
+		clk_osm_write_seq_reg(c, mem_acc_level_map[0], DATA_MEM(79));
+		if (c == &perfcl_clk)
+			clk_osm_write_seq_reg(c, c->mem_acc_threshold_vc,
+								DATA_MEM(80));
+		else
+			clk_osm_write_seq_reg(c, mem_acc_level_map[1],
+								DATA_MEM(80));
+		/*
+		 * Note that DATA_MEM[81] -> DATA_MEM[89] values will be
+		 * confirmed post-si. Use a value of 1 for DATA_MEM[89] and
+		 * leave the rest of them as 0.
+		 */
+		clk_osm_write_seq_reg(c, 1, DATA_MEM(89));
+	} else {
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(78),
+						c->mem_acc_crossover_vc);
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(79),
+						mem_acc_level_map[0]);
+		if (c == &perfcl_clk)
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						c->mem_acc_threshold_vc);
+		else
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						mem_acc_level_map[1]);
+	}
+}
+
+static void clk_osm_program_apm_regs(struct clk_osm *c)
+{
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return;
+
+	/*
+	 * Program address of the control register used to configure
+	 * the Array Power Mux controller
+	 */
+	clk_osm_write_seq_reg(c, c->apm_mode_ctl, DATA_MEM(41));
+
+	/* Program address of controller status register */
+	clk_osm_write_seq_reg(c, c->apm_status_ctl, DATA_MEM(43));
+
+	/* Program address of crossover register */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + APM_CROSSOVER_VC,
+						DATA_MEM(44));
+
+	/* Program mode value to switch APM to VDD_APC */
+	clk_osm_write_seq_reg(c, APM_APC_MODE, DATA_MEM(72));
+
+	/* Program mode value to switch APM to VDD_MX */
+	clk_osm_write_seq_reg(c, APM_MX_MODE, DATA_MEM(73));
+
+	/* Program mask used to move into read_mask port */
+	clk_osm_write_seq_reg(c, APM_READ_DATA_MASK, DATA_MEM(74));
+
+	/* Value used to move into read_exp port */
+	clk_osm_write_seq_reg(c, APM_APC_READ_VAL, DATA_MEM(75));
+	clk_osm_write_seq_reg(c, APM_MX_READ_VAL, DATA_MEM(76));
+}
+
+static void clk_osm_do_additional_setup(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	if (!c->secure_init)
+		return;
+
+	dev_info(&pdev->dev, "Performing additional OSM setup due to lack of TZ for cluster=%d\n",
+						 c->cluster_num);
+
+	/* PLL L_VAL & post-div programming */
+	clk_osm_write_seq_reg(c, c->apcs_pll_min_freq, DATA_MEM(32));
+	clk_osm_write_seq_reg(c, c->l_val_base, DATA_MEM(33));
+	clk_osm_write_seq_reg(c, c->apcs_pll_user_ctl, DATA_MEM(34));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV1, DATA_MEM(35));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV2, DATA_MEM(36));
+
+	/* APM Programming */
+	clk_osm_program_apm_regs(c);
+
+	/* GFMUX Programming */
+	clk_osm_write_seq_reg(c, c->cfg_gfmux_addr, DATA_MEM(37));
+	clk_osm_write_seq_reg(c, 0x1, DATA_MEM(65));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(66));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(67));
+	clk_osm_write_seq_reg(c, 0x40000000, DATA_MEM(68));
+	clk_osm_write_seq_reg(c, 0x20000000, DATA_MEM(69));
+	clk_osm_write_seq_reg(c, 0x10000000, DATA_MEM(70));
+	clk_osm_write_seq_reg(c, 0x70000000, DATA_MEM(71));
+
+	/* Override programming */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] +
+			OVERRIDE_CLUSTER_IDLE_ACK, DATA_MEM(54));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(55));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
+					DATA_MEM(40));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + REQ_GEN_FSM_STATUS,
+					DATA_MEM(60));
+	clk_osm_write_seq_reg(c, 0x10, DATA_MEM(61));
+	clk_osm_write_seq_reg(c, 0x70, DATA_MEM(62));
+	clk_osm_write_seq_reg(c, c->apcs_cbc_addr, DATA_MEM(112));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(113));
+
+	if (c == &perfcl_clk) {
+		int rc;
+		u32 isense_addr;
+
+		/* Performance cluster isense programming */
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perfcl-isense-addr", &isense_addr);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,perfcl-isense-addr property, rc=%d\n",
+				rc);
+			return;
+		}
+		clk_osm_write_seq_reg(c, isense_addr, DATA_MEM(45));
+		clk_osm_write_seq_reg(c, ISENSE_ON_DATA, DATA_MEM(46));
+		clk_osm_write_seq_reg(c, ISENSE_OFF_DATA, DATA_MEM(47));
+	}
+
+	clk_osm_write_seq_reg(c, c->ramp_ctl_addr, DATA_MEM(105));
+	clk_osm_write_seq_reg(c, CONSTANT_32, DATA_MEM(92));
+
+	/* Enable/disable CPR ramp settings */
+	clk_osm_write_seq_reg(c, 0x101C031, DATA_MEM(106));
+	clk_osm_write_seq_reg(c, 0x1010031, DATA_MEM(107));
+}
+
+static void clk_osm_setup_fsms(struct clk_osm *c)
+{
+	u32 val;
+
+	/* Voltage Reduction FSM */
+	if (c->red_fsm_en) {
+		val = clk_osm_read_reg(c, VMIN_REDUCTION_ENABLE_REG) | BIT(0);
+		val |= BVAL(6, 1, c->min_cpr_vc);
+		clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG);
+
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 10000),
+				  VMIN_REDUCTION_TIMER_REG);
+	}
+
+	/* Boost FSM */
+	if (c->boost_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		val |= DELTA_DEX_VAL | CC_BOOST_FSM_EN | IGNORE_PLL_LOCK;
+		clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* Safe Freq FSM */
+	if (c->safe_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_BOOST_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2);
+
+	}
+
+	/* Pulse Swallowing FSM */
+	if (c->ps_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | PS_BOOST_FSM_EN_MASK,
+							PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* PLL signal timing control */
+	if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
+		clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG);
+
+	/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
+	if (c->droop_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_DROOP_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+	}
+
+	if (c->ps_fsm_en || c->droop_fsm_en) {
+		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 100),
+				  DROOP_RELEASE_TIMER_CTRL);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 150),
+				  DCVS_DROOP_TIMER_CTRL);
+		/*
+		 * TODO: Check if DCVS_DROOP_CODE used is correct. Also check
+		 * if RESYNC_CTRL should be set for L3.
+		 */
+		val = BIT(31) | BVAL(22, 16, 0x2) | BVAL(6, 0, 0x8);
+		clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+	}
+}
+
+static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-up-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM voltage up timer value, rc=%d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Voltage down timer value: %d\n",
+									rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM Voltage requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-volt-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Voltage requests\n");
+		val = 0;
+	} else
+		val = 1;
+
+	/* Enable or disable LLM VOLT DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "Unable to get CC up timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Frequency down timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM frequency requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-freq-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Frequency requests\n");
+		val = 0;
+	} else
+		val = BIT(1);
+
+	/* Enable or disable LLM FREQ DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the write to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_cc_policy(struct platform_device *pdev)
+{
+	int rc = 0, val;
+	u32 *array;
+	struct device_node *of = pdev->dev.of_node;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No up timer value, rc=%d\n",
+			 rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+					array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS);
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No down timer value, rc=%d\n", rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+				       array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+				       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		clk_osm_count_ns(&perfcl_clk,
+				       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+	}
+
+	/* OSM index override for cluster PC */
+	rc = of_property_read_u32_array(of, "qcom,pc-override-index",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
+			rc);
+		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+	} else {
+		val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		val = BVAL(6, 1, array[perfcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+	}
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c3-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C3 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c2-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C2 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Disabling CC based DCVS\n");
+		val = 1;
+	} else
+		val = 0;
+
+	clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static void clk_osm_setup_cluster_pll(struct clk_osm *c)
+{
+	writel_relaxed(0x0, c->vbases[PLL_BASE] + PLL_MODE);
+	writel_relaxed(0x26, c->vbases[PLL_BASE] + PLL_L_VAL);
+	writel_relaxed(0x8, c->vbases[PLL_BASE] +
+			PLL_USER_CTRL);
+	writel_relaxed(0x20000AA8, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_LO);
+	writel_relaxed(0x000003D2, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_HI);
+	writel_relaxed(0x2, c->vbases[PLL_BASE] +
+			PLL_MODE);
+
+	/* Ensure writes complete before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	udelay(PLL_WAIT_LOCK_TIME_US);
+
+	writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
+
+	/* Ensure write completes before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	usleep_range(50, 75);
+
+	writel_relaxed(0x7, c->vbases[PLL_BASE] + PLL_MODE);
+}
+
+static void clk_osm_misc_programming(struct clk_osm *c)
+{
+	u32 lval = 0xFF, val;
+	int i;
+
+	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL);
+	clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG);
+
+	/* Pattern to set/clear PLL lock in PDN_FSM_CTRL_REG */
+	val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+	if (c->secure_init) {
+		val |= IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(108));
+		val &= ~IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(109));
+		clk_osm_write_seq_reg(c, MIN_VCO_VAL, DATA_MEM(110));
+	} else {
+		val |= IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(108), val);
+		val &= ~IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(109), val);
+	}
+
+	/* Program LVAL corresponding to first turbo VC */
+	for (i = 0; i < c->num_entries; i++) {
+		if (c->osm_table[i].mem_acc_level == MAX_MEM_ACC_LEVELS) {
+			lval = c->osm_table[i].freq_data & GENMASK(7, 0);
+			break;
+		}
+	}
+
+	if (c->secure_init)
+		clk_osm_write_seq_reg(c, lval, DATA_MEM(114));
+	else
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(114), lval);
+
+}
+
+static int clk_osm_setup_hw_table(struct clk_osm *c)
+{
+	struct osm_entry *entry = c->osm_table;
+	int i;
+	u32 freq_val = 0, volt_val = 0, override_val = 0;
+	u32 table_entry_offset, last_mem_acc_level, last_virtual_corner = 0;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		if (i < c->num_entries) {
+			freq_val = entry[i].freq_data;
+			volt_val = BVAL(27, 24, entry[i].mem_acc_level)
+				| BVAL(21, 16, entry[i].virtual_corner)
+				| BVAL(11, 0, entry[i].open_loop_volt);
+			override_val = entry[i].override_data;
+
+			if (last_virtual_corner && last_virtual_corner ==
+			    entry[i].virtual_corner && last_mem_acc_level !=
+			    entry[i].mem_acc_level) {
+				pr_err("invalid LUT entry at row=%d virtual_corner=%d, mem_acc_level=%d\n",
+				       i, entry[i].virtual_corner,
+				       entry[i].mem_acc_level);
+				return -EINVAL;
+			}
+			last_virtual_corner = entry[i].virtual_corner;
+			last_mem_acc_level = entry[i].mem_acc_level;
+		}
+
+		table_entry_offset = i * OSM_REG_SIZE;
+		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
+		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+		clk_osm_write_reg(c, override_val, OVERRIDE_REG +
+				  table_entry_offset);
+	}
+
+	/* Make sure all writes go through */
+	clk_osm_mb(c, OSM_BASE);
+
+	return 0;
+}
+
+static void clk_osm_print_osm_table(struct clk_osm *c)
+{
+	int i;
+	struct osm_entry *table = c->osm_table;
+	u32 pll_src, pll_div, lval, core_count;
+
+	pr_debug("Index, Frequency, VC, OLV (mv), Core Count, PLL Src, PLL Div, L-Val, ACC Level\n");
+	for (i = 0; i < c->num_entries; i++) {
+		pll_src = (table[i].freq_data & GENMASK(31, 30)) >> 30;
+		pll_div = (table[i].freq_data & GENMASK(29, 28)) >> 28;
+		lval = table[i].freq_data & GENMASK(7, 0);
+		core_count = (table[i].freq_data & GENMASK(18, 16)) >> 16;
+
+		pr_debug("%3d, %11lu, %2u, %5u, %2u, %6u, %8u, %7u, %5u\n",
+			i,
+			table[i].frequency,
+			table[i].virtual_corner,
+			table[i].open_loop_volt,
+			core_count,
+			pll_src,
+			pll_div,
+			lval,
+			table[i].mem_acc_level);
+	}
+	pr_debug("APM threshold corner=%d, crossover corner=%d\n",
+			c->apm_threshold_vc, c->apm_crossover_vc);
+	pr_debug("MEM-ACC threshold corner=%d, crossover corner=%d\n",
+			c->mem_acc_threshold_vc, c->mem_acc_crossover_vc);
+}
+
+static u32 find_voltage(struct clk_osm *c, unsigned long rate)
+{
+	struct osm_entry *table = c->osm_table;
+	int entries = c->num_entries, i;
+
+	for (i = 0; i < entries; i++) {
+		if (rate == table[i].frequency) {
+			/* OPP table voltages have units of mV */
+			return table[i].open_loop_volt * 1000;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int add_opp(struct clk_osm *c, struct device *dev)
+{
+	unsigned long rate = 0;
+	u32 uv;
+	long rc;
+	int j = 0;
+	unsigned long min_rate = c->hw.init->rate_max[0];
+	unsigned long max_rate =
+			c->hw.init->rate_max[c->hw.init->num_rate_max - 1];
+
+	while (1) {
+		rate = c->hw.init->rate_max[j++];
+		uv = find_voltage(c, rate);
+		if (uv <= 0) {
+			pr_warn("No voltage for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		rc = dev_pm_opp_add(dev, rate, uv);
+		if (rc) {
+			pr_warn("failed to add OPP for %lu\n", rate);
+			return rc;
+		}
+
+		/*
+		 * Print the OPP pair for the lowest and highest frequency for
+		 * each device that we're populating. This is important since
+		 * this information will be used by thermal mitigation and the
+		 * scheduler.
+		 */
+		if (rate == min_rate)
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+
+		if (rate == max_rate && max_rate != min_rate) {
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+			break;
+		}
+
+		if (min_rate == max_rate)
+			break;
+	}
+	return 0;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+	int cpu;
+	struct device *cpu_dev;
+	struct clk_osm *c, *parent;
+	struct clk_hw *hw_parent;
+
+	for_each_possible_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return;
+		}
+
+		hw_parent = clk_hw_get_parent(&c->hw);
+		parent = to_clk_osm(hw_parent);
+		cpu_dev = get_cpu_device(cpu);
+		if (cpu_dev)
+			if (add_opp(parent, cpu_dev))
+				pr_err("Failed to add OPP levels for %s\n",
+					dev_name(cpu_dev));
+	}
+
+	/*TODO: Figure out which device to tag the L3 table to */
+}
+
+static u64 clk_osm_get_cpu_cycle_counter(int cpu)
+{
+	u32 val;
+	unsigned long flags;
+	struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
+
+	if (IS_ERR_OR_NULL(c)) {
+		pr_err("no clock device for CPU=%d\n", cpu);
+		return 0;
+	}
+
+	parent = to_clk_osm(clk_hw_get_parent(&c->hw));
+
+	spin_lock_irqsave(&parent->lock, flags);
+	val = clk_osm_read_reg_no_log(parent,
+			OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+
+	if (val < c->prev_cycle_counter) {
+		/* Handle counter overflow */
+		c->total_cycle_counter += UINT_MAX -
+			c->prev_cycle_counter + val;
+		c->prev_cycle_counter = val;
+	} else {
+		c->total_cycle_counter += val - c->prev_cycle_counter;
+		c->prev_cycle_counter = val;
+	}
+	spin_unlock_irqrestore(&parent->lock, flags);
+
+	return c->total_cycle_counter;
+}
+
+static void clk_osm_setup_cycle_counters(struct clk_osm *c)
+{
+	u32 ratio = c->osm_clk_rate;
+	u32 val = 0;
+
+	/* Enable cycle counter */
+	val = BIT(0);
+	/* Setup OSM clock to XO ratio */
+	do_div(ratio, c->xo_clk_rate);
+	val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
+
+	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+	pr_debug("OSM to XO clock ratio: %d\n", ratio);
+}
+
+static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	struct regulator *regulator = c->vdd_reg;
+	int count, vc, i, memacc_threshold, apm_threshold;
+	int rc = 0;
+	u32 corner_volt;
+
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-apm-threshold-voltage",
+				  &apm_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-apm-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-mem-acc-threshold-voltage",
+				  &memacc_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-mem-acc-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	/*
+	 * Initialize VC settings in case none of them go above the voltage
+	 * limits
+	 */
+	c->apm_threshold_vc = c->apm_crossover_vc = c->mem_acc_crossover_vc =
+				c->mem_acc_threshold_vc = MAX_VC;
+
+	count = regulator_count_voltages(regulator);
+	if (count < 0) {
+		pr_err("Failed to get the number of virtual corners supported\n");
+		return count;
+	}
+
+	c->apm_crossover_vc = count - 2;
+	c->mem_acc_crossover_vc = count - 1;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		corner_volt = regulator_list_corner_voltage(regulator, vc);
+
+		if (c->apm_threshold_vc == MAX_VC &&
+				corner_volt >= apm_threshold)
+			c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+
+		if (c->mem_acc_threshold_vc == MAX_VC &&
+				corner_volt >= memacc_threshold)
+			c->mem_acc_threshold_vc =
+				c->osm_table[i].virtual_corner;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
+{
+	struct regulator *regulator = c->vdd_reg;
+	u32 vc, mv;
+	int i;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		/* Voltage is in uv. Convert to mv */
+		mv = regulator_list_corner_voltage(regulator, vc) / 1000;
+		c->osm_table[i].open_loop_volt = mv;
+	}
+
+	return 0;
+}
+
+static int clk_osm_get_lut(struct platform_device *pdev,
+			   struct clk_osm *c, char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, total_elems, num_rows, i, j, k;
+	int rc = 0;
+	u32 *array;
+	u32 *fmax_temp;
+	u32 data;
+	unsigned long abs_fmax = 0;
+	bool last_entry = false;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	total_elems = prop_len / sizeof(u32);
+	if (total_elems % NUM_FIELDS) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	num_rows = total_elems / NUM_FIELDS;
+
+	fmax_temp = devm_kzalloc(&pdev->dev, num_rows * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!fmax_temp)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev, prop_len, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, prop_name, array, total_elems);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse OSM table, rc=%d\n", rc);
+		goto exit;
+	}
+
+	pr_debug("%s: Entries in Table: %d\n", __func__, num_rows);
+	c->num_entries = num_rows;
+	if (c->num_entries > OSM_TABLE_SIZE) {
+		pr_err("LUT entries %d exceed maximum size %d\n",
+		       c->num_entries, OSM_TABLE_SIZE);
+		return -EINVAL;
+	}
+
+	for (i = 0, j = 0, k = 0; j < OSM_TABLE_SIZE; j++) {
+		c->osm_table[j].frequency = array[i + FREQ];
+		c->osm_table[j].freq_data = array[i + FREQ_DATA];
+		c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
+		c->osm_table[j].mem_acc_level = array[i + MEM_ACC_LEVEL];
+		/* Voltage corners are 0 based in the OSM LUT */
+		c->osm_table[j].virtual_corner = array[i + VIRTUAL_CORNER] - 1;
+		pr_debug("index=%d freq=%ld virtual_corner=%d freq_data=0x%x override_data=0x%x mem_acc_level=0x%x\n",
+			 j, c->osm_table[j].frequency,
+			 c->osm_table[j].virtual_corner,
+			 c->osm_table[j].freq_data,
+			 c->osm_table[j].override_data,
+			 c->osm_table[j].mem_acc_level);
+
+		data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
+		if (!last_entry && data == MAX_CORE_COUNT) {
+			fmax_temp[k] = array[i];
+			k++;
+		}
+
+		if (i < total_elems - NUM_FIELDS)
+			i += NUM_FIELDS;
+		else {
+			abs_fmax = array[i];
+			last_entry = true;
+		}
+	}
+	fmax_temp[k] = abs_fmax;
+
+	osm_clks_init[c->cluster_num].rate_max = devm_kzalloc(&pdev->dev,
+						 k * sizeof(unsigned long),
+						       GFP_KERNEL);
+	if (!osm_clks_init[c->cluster_num].rate_max) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < k; i++)
+		osm_clks_init[c->cluster_num].rate_max[i] = fmax_temp[i];
+
+	osm_clks_init[c->cluster_num].num_rate_max = k;
+exit:
+	devm_kfree(&pdev->dev, fmax_temp);
+	devm_kfree(&pdev->dev, array);
+	return rc;
+}
+
+static int clk_osm_parse_dt_configs(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0;
+	struct resource *res;
+	char l3_min_cpr_vc_str[] = "qcom,l3-min-cpr-vc-bin0";
+	char pwrcl_min_cpr_vc_str[] = "qcom,pwrcl-min-cpr-vc-bin0";
+	char perfcl_min_cpr_vc_str[] = "qcom,perfcl-min-cpr-vc-bin0";
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,l-val-base",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l-val-base property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.l_val_base = array[l3_clk.cluster_num];
+	pwrcl_clk.l_val_base = array[pwrcl_clk.cluster_num];
+	perfcl_clk.l_val_base = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-user-ctl",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-user-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_user_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_user_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_user_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-min-freq",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-min-freq property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_min_freq = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_min_freq = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_min_freq = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-mode-ctl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-mode-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_mode_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_mode_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_mode_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-status-ctrl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-status-ctrl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_status_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_status_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_status_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,cfg-gfmux-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cfg-gfmux-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.cfg_gfmux_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.cfg_gfmux_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.cfg_gfmux_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-cbc-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-cbc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_cbc_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_cbc_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_cbc_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-ramp-ctl-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-ramp-ctl-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.ramp_ctl_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.ramp_ctl_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.ramp_ctl_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32(of, "qcom,xo-clk-rate",
+				  &pwrcl_clk.xo_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,xo-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.xo_clk_rate = perfcl_clk.xo_clk_rate = pwrcl_clk.xo_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,osm-clk-rate",
+				  &pwrcl_clk.osm_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,osm-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.osm_clk_rate = perfcl_clk.osm_clk_rate = pwrcl_clk.osm_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,cc-reads",
+				  &pwrcl_clk.cycle_counter_reads);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cc-reads property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.cycle_counter_reads = perfcl_clk.cycle_counter_reads =
+			pwrcl_clk.cycle_counter_reads;
+
+	rc = of_property_read_u32(of, "qcom,cc-delay",
+				  &pwrcl_clk.cycle_counter_delay);
+	if (rc)
+		dev_dbg(&pdev->dev, "no delays between cycle counter reads\n");
+	else
+		l3_clk.cycle_counter_delay = perfcl_clk.cycle_counter_delay =
+			pwrcl_clk.cycle_counter_delay;
+
+	rc = of_property_read_u32(of, "qcom,cc-factor",
+				  &pwrcl_clk.cycle_counter_factor);
+	if (rc)
+		dev_dbg(&pdev->dev, "no factor specified for cycle counter estimation\n");
+	else
+		l3_clk.cycle_counter_factor = perfcl_clk.cycle_counter_factor =
+			pwrcl_clk.cycle_counter_factor;
+
+	l3_clk.red_fsm_en = perfcl_clk.red_fsm_en = pwrcl_clk.red_fsm_en =
+		of_property_read_bool(of, "qcom,red-fsm-en");
+
+	l3_clk.boost_fsm_en = perfcl_clk.boost_fsm_en =
+		pwrcl_clk.boost_fsm_en =
+		of_property_read_bool(of, "qcom,boost-fsm-en");
+
+	l3_clk.safe_fsm_en = perfcl_clk.safe_fsm_en = pwrcl_clk.safe_fsm_en =
+		of_property_read_bool(of, "qcom,safe-fsm-en");
+
+	l3_clk.ps_fsm_en = perfcl_clk.ps_fsm_en = pwrcl_clk.ps_fsm_en =
+		of_property_read_bool(of, "qcom,ps-fsm-en");
+
+	l3_clk.droop_fsm_en = perfcl_clk.droop_fsm_en =
+		pwrcl_clk.droop_fsm_en =
+		of_property_read_bool(of, "qcom,droop-fsm-en");
+
+	devm_kfree(&pdev->dev, array);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"l3_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_sequencer\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in l3_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"pwrcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!pwrcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in pwrcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"perfcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in perfcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	snprintf(l3_min_cpr_vc_str, ARRAY_SIZE(l3_min_cpr_vc_str),
+			"qcom,l3-min-cpr-vc-bin%d", l3_clk.speedbin);
+	rc = of_property_read_u32(of, l3_min_cpr_vc_str, &l3_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			l3_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(pwrcl_min_cpr_vc_str, ARRAY_SIZE(pwrcl_min_cpr_vc_str),
+			"qcom,pwrcl-min-cpr-vc-bin%d", pwrcl_clk.speedbin);
+	rc = of_property_read_u32(of, pwrcl_min_cpr_vc_str,
+						&pwrcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			pwrcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(perfcl_min_cpr_vc_str, ARRAY_SIZE(perfcl_min_cpr_vc_str),
+			"qcom,perfcl-min-cpr-vc-bin%d", perfcl_clk.speedbin);
+	rc = of_property_read_u32(of, perfcl_min_cpr_vc_str,
+						&perfcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			perfcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	l3_clk.secure_init = perfcl_clk.secure_init = pwrcl_clk.secure_init =
+		of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
+
+	if (!pwrcl_clk.secure_init)
+		return rc;
+
+	rc = of_property_read_u32_array(of, "qcom,l3-mem-acc-addr",
+					l3_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l3-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,pwrcl-mem-acc-addr",
+					pwrcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,pwrcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,perfcl-mem-acc-addr",
+					perfcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,perfcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resources_init(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct resource *res;
+	unsigned long pbase;
+	int rc = 0;
+	void *vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_l3_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_l3_base");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_l3_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_pwrcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_pwrcl_base");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+	if (!pwrcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_pwrcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_perfcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_perfcl_base");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_perfcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map l3_pll base\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[PLL_BASE] = pbase;
+	l3_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map pwrcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[PLL_BASE] = pbase;
+	pwrcl_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "perfcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map perfcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[PLL_BASE] = pbase;
+	perfcl_clk.vbases[PLL_BASE] = vbase;
+
+	/* efuse speed bin fuses are optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "pwrcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[EFUSE_BASE] = pbase;
+		pwrcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "perfcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[EFUSE_BASE] = pbase;
+		perfcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	vdd_l3 = devm_regulator_get(&pdev->dev, "vdd-l3");
+	if (IS_ERR(vdd_l3)) {
+		rc = PTR_ERR(vdd_l3);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the l3 vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	l3_clk.vdd_reg = vdd_l3;
+
+	vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+	if (IS_ERR(vdd_pwrcl)) {
+		rc = PTR_ERR(vdd_pwrcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the pwrcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	pwrcl_clk.vdd_reg = vdd_pwrcl;
+
+	vdd_perfcl = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+	if (IS_ERR(vdd_perfcl)) {
+		rc = PTR_ERR(vdd_perfcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the perfcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	perfcl_clk.vdd_reg = vdd_perfcl;
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-l3-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-l3-supply\n");
+		return -EINVAL;
+	}
+
+	l3_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!l3_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-l3-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-pwrcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-pwrcl-supply\n");
+		return -EINVAL;
+	}
+
+	pwrcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!pwrcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-pwrcl-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-perfcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	perfcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!perfcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long init_rate = 300000000;
+
+static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
+{
+	int rc = 0, cpu, i;
+	int speedbin = 0, pvs_ver = 0;
+	u32 pte_efuse, val;
+	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
+	struct clk *ext_xo_clk, *clk;
+	struct clk_osm *c;
+	struct device *dev = &pdev->dev;
+	struct clk_onecell_data *clk_data;
+	struct resource *res;
+	void *vbase;
+	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
+	char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
+	char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+	struct cpu_cycle_counter_cb cb = {
+		.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
+	};
+
+	/*
+	 * Require the RPM-XO clock to be registered before OSM.
+	 * The cpuss_gpll0_clk_src is listed to be configured by BL.
+	 */
+	ext_xo_clk = devm_clk_get(dev, "xo_ao");
+	if (IS_ERR(ext_xo_clk)) {
+		if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
+			dev_err(dev, "Unable to get xo clock\n");
+		return PTR_ERR(ext_xo_clk);
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+								GFP_KERNEL);
+	if (!clk_data)
+		goto exit;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+					sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks)
+		goto clk_err;
+
+	clk_data->clk_num = num_clks;
+
+	rc = clk_osm_parse_dt_configs(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+		return rc;
+	}
+
+	rc = clk_osm_resources_init(pdev);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "resources init failed, rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	if (l3_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+						    L3_EFUSE_MASK);
+		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+			 "qcom,l3-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (pwrcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+						    PWRCL_EFUSE_MASK);
+		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+			 "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (perfcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
+							PERFCL_EFUSE_MASK);
+		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+			 "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for L3\n");
+
+	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for pwrcl_clk\n");
+
+	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+
+	clk_osm_setup_cycle_counters(&l3_clk);
+	clk_osm_setup_cycle_counters(&pwrcl_clk);
+	clk_osm_setup_cycle_counters(&perfcl_clk);
+
+	clk_osm_print_osm_table(&l3_clk);
+	clk_osm_print_osm_table(&pwrcl_clk);
+	clk_osm_print_osm_table(&perfcl_clk);
+
+	rc = clk_osm_setup_hw_table(&l3_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&pwrcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&perfcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+		goto exit;
+	}
+
+	/* Policy tuning */
+	rc = clk_osm_set_cc_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "cc policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Freq Policy Tuning */
+	rc = clk_osm_set_llm_freq_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Voltage Policy Tuning */
+	rc = clk_osm_set_llm_volt_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+		goto exit;
+	}
+
+	clk_osm_setup_fsms(&l3_clk);
+	clk_osm_setup_fsms(&pwrcl_clk);
+	clk_osm_setup_fsms(&perfcl_clk);
+
+	/* Program VC at which the array power supply needs to be switched */
+	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+					APM_CROSSOVER_VC);
+	if (perfcl_clk.secure_init) {
+		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
+				DATA_MEM(77));
+		clk_osm_write_seq_reg(&perfcl_clk,
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
+				DATA_MEM(111));
+	} else {
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+				perfcl_clk.apm_crossover_vc);
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"apps_itm_ctl");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for apps_itm_ctl\n");
+		return -ENOMEM;
+	}
+
+	vbase = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!vbase) {
+		dev_err(&pdev->dev,
+				"Unable to map in apps_itm_ctl base\n");
+		return -ENOMEM;
+	}
+
+	val = readl_relaxed(vbase + 0x0);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x0);
+
+	val = readl_relaxed(vbase + 0x4);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x4);
+
+	/*
+	 * Perform typical secure-world HW initialization
+	 * as necessary.
+	 */
+	clk_osm_do_additional_setup(&l3_clk, pdev);
+	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+
+	/* MEM-ACC Programming */
+	clk_osm_program_mem_acc_regs(&l3_clk);
+	clk_osm_program_mem_acc_regs(&pwrcl_clk);
+	clk_osm_program_mem_acc_regs(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
+		clk_osm_setup_cluster_pll(&l3_clk);
+		clk_osm_setup_cluster_pll(&pwrcl_clk);
+		clk_osm_setup_cluster_pll(&perfcl_clk);
+	}
+
+	/* Misc programming */
+	clk_osm_misc_programming(&l3_clk);
+	clk_osm_misc_programming(&pwrcl_clk);
+	clk_osm_misc_programming(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,enable-per-core-dcvs")) {
+		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
+
+		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL);
+	}
+
+	clk_ops_core = clk_dummy_ops;
+	clk_ops_core.set_rate = cpu_clk_set_rate;
+	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
+
+	spin_lock_init(&l3_clk.lock);
+	spin_lock_init(&pwrcl_clk.lock);
+	spin_lock_init(&perfcl_clk.lock);
+
+	/* Register OSM l3, pwr and perf clocks with Clock Framework */
+	for (i = 0; i < num_clks; i++) {
+		clk = devm_clk_register(&pdev->dev, osm_qcom_clk_hws[i]);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register CPU clock at index %d\n",
+				i);
+			return PTR_ERR(clk);
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+								clk_data);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to register CPU clocks\n");
+			goto provider_err;
+	}
+
+	get_online_cpus();
+
+	/* Enable OSM */
+	for_each_online_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return -EINVAL;
+		}
+
+		rc = clk_set_rate(c->hw.clk, init_rate);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set init rate on CPU %d, rc=%d\n",
+			cpu, rc);
+			goto provider_err;
+		}
+		WARN(clk_prepare_enable(c->hw.clk),
+		     "Failed to enable clock for cpu %d\n", cpu);
+		udelay(300);
+	}
+
+	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
+			rc);
+		goto provider_err;
+	}
+	WARN(clk_prepare_enable(l3_clk.hw.clk),
+		     "Failed to enable clock for L3\n");
+	udelay(300);
+
+	populate_opp_table(pdev);
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	register_cpu_cycle_counter_cb(&cb);
+	pr_info("OSM driver inited\n");
+	put_online_cpus();
+
+	return 0;
+provider_err:
+	if (clk_data)
+		devm_kfree(&pdev->dev, clk_data->clks);
+clk_err:
+	devm_kfree(&pdev->dev, clk_data);
+exit:
+	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
+	panic("Unable to Setup OSM");
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,clk-cpu-osm" },
+	{}
+};
+
+static struct platform_driver clk_cpu_osm_driver = {
+	.probe = clk_cpu_osm_driver_probe,
+	.driver = {
+		.name = "clk-cpu-osm",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init clk_cpu_osm_init(void)
+{
+	return platform_driver_register(&clk_cpu_osm_driver);
+}
+arch_initcall(clk_cpu_osm_init);
+
+static void __exit clk_cpu_osm_exit(void)
+{
+	platform_driver_unregister(&clk_cpu_osm_driver);
+}
+module_exit(clk_cpu_osm_exit);
+
+MODULE_DESCRIPTION("QTI CPU clock driver for OSM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index eface18..e728dec 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -57,4 +57,8 @@
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
 extern struct clk_ops clk_dummy_ops;
+
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
 #endif
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 1b8c739..12eb6d8 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -35,8 +35,12 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/cpufreq_interactive.h>
 
+static DEFINE_PER_CPU(struct update_util_data, update_util);
+
 struct cpufreq_interactive_policyinfo {
-	struct timer_list policy_timer;
+	bool work_in_progress;
+	struct irq_work irq_work;
+	spinlock_t irq_work_lock; /* protects work_in_progress */
 	struct timer_list policy_slack_timer;
 	struct hrtimer notif_timer;
 	spinlock_t load_lock; /* protects load tracking stat */
@@ -215,9 +219,6 @@
 			pcpu->cputime_speedadj_timestamp =
 						pcpu->time_in_idle_timestamp;
 		}
-		del_timer(&ppol->policy_timer);
-		ppol->policy_timer.expires = expires;
-		add_timer(&ppol->policy_timer);
 	}
 
 	if (tunables->timer_slack_val >= 0 &&
@@ -231,9 +232,51 @@
 	spin_unlock_irqrestore(&ppol->load_lock, flags);
 }
 
+static void update_util_handler(struct update_util_data *data, u64 time,
+				unsigned int sched_flags)
+{
+	struct cpufreq_interactive_policyinfo *ppol;
+	unsigned long flags;
+
+	ppol = *this_cpu_ptr(&polinfo);
+	spin_lock_irqsave(&ppol->irq_work_lock, flags);
+	/*
+	 * The irq-work may not be allowed to be queued up right now
+	 * because work has already been queued up or is in progress.
+	 */
+	if (ppol->work_in_progress ||
+	    sched_flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)
+		goto out;
+
+	ppol->work_in_progress = true;
+	irq_work_queue(&ppol->irq_work);
+out:
+	spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+	int i;
+
+	for_each_cpu(i, policy->cpus)
+		cpufreq_remove_update_util_hook(i);
+
+	synchronize_sched();
+}
+
+static void gov_set_update_util(struct cpufreq_policy *policy)
+{
+	struct update_util_data *util;
+	int cpu;
+
+	for_each_cpu(cpu, policy->cpus) {
+		util = &per_cpu(update_util, cpu);
+		cpufreq_add_update_util_hook(cpu, util, update_util_handler);
+	}
+}
+
 /* The caller shall take enable_sem write semaphore to avoid any timer race.
- * The policy_timer and policy_slack_timer must be deactivated when calling
- * this function.
+ * The policy_slack_timer must be deactivated when calling this function.
  */
 static void cpufreq_interactive_timer_start(
 	struct cpufreq_interactive_tunables *tunables, int cpu)
@@ -245,8 +288,7 @@
 	int i;
 
 	spin_lock_irqsave(&ppol->load_lock, flags);
-	ppol->policy_timer.expires = expires;
-	add_timer(&ppol->policy_timer);
+	gov_set_update_util(ppol->policy);
 	if (tunables->timer_slack_val >= 0 &&
 	    ppol->target_freq > ppol->policy->min) {
 		expires += usecs_to_jiffies(tunables->timer_slack_val);
@@ -265,6 +307,7 @@
 	spin_unlock_irqrestore(&ppol->load_lock, flags);
 }
 
+
 static unsigned int freq_to_above_hispeed_delay(
 	struct cpufreq_interactive_tunables *tunables,
 	unsigned int freq)
@@ -448,7 +491,7 @@
 
 #define NEW_TASK_RATIO 75
 #define PRED_TOLERANCE_PCT 10
-static void cpufreq_interactive_timer(unsigned long data)
+static void cpufreq_interactive_timer(int data)
 {
 	s64 now;
 	unsigned int delta_time;
@@ -467,7 +510,7 @@
 	unsigned int index;
 	unsigned long flags;
 	unsigned long max_cpu;
-	int cpu, i;
+	int i, cpu;
 	int new_load_pct = 0;
 	int prev_l, pred_l = 0;
 	struct cpufreq_govinfo govinfo;
@@ -659,8 +702,7 @@
 	wake_up_process_no_notif(speedchange_task);
 
 rearm:
-	if (!timer_pending(&ppol->policy_timer))
-		cpufreq_interactive_timer_resched(data, false);
+	cpufreq_interactive_timer_resched(data, false);
 
 	/*
 	 * Send govinfo notification.
@@ -822,7 +864,6 @@
 	}
 	cpu = ppol->notif_cpu;
 	trace_cpufreq_interactive_load_change(cpu);
-	del_timer(&ppol->policy_timer);
 	del_timer(&ppol->policy_slack_timer);
 	cpufreq_interactive_timer(cpu);
 
@@ -1569,6 +1610,20 @@
 	return tunables;
 }
 
+static void irq_work(struct irq_work *irq_work)
+{
+	struct cpufreq_interactive_policyinfo *ppol;
+	unsigned long flags;
+
+	ppol = container_of(irq_work, struct cpufreq_interactive_policyinfo,
+			    irq_work);
+
+	cpufreq_interactive_timer(smp_processor_id());
+	spin_lock_irqsave(&ppol->irq_work_lock, flags);
+	ppol->work_in_progress = false;
+	spin_unlock_irqrestore(&ppol->irq_work_lock, flags);
+}
+
 static struct cpufreq_interactive_policyinfo *get_policyinfo(
 					struct cpufreq_policy *policy)
 {
@@ -1593,12 +1648,12 @@
 	}
 	ppol->sl = sl;
 
-	init_timer_deferrable(&ppol->policy_timer);
-	ppol->policy_timer.function = cpufreq_interactive_timer;
 	init_timer(&ppol->policy_slack_timer);
 	ppol->policy_slack_timer.function = cpufreq_interactive_nop_timer;
 	hrtimer_init(&ppol->notif_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	ppol->notif_timer.function = cpufreq_interactive_hrtimer;
+	init_irq_work(&ppol->irq_work, irq_work);
+	spin_lock_init(&ppol->irq_work_lock);
 	spin_lock_init(&ppol->load_lock);
 	spin_lock_init(&ppol->target_freq_lock);
 	init_rwsem(&ppol->enable_sem);
@@ -1775,9 +1830,7 @@
 	ppol->reject_notification = true;
 	ppol->notif_pending = false;
 	down_write(&ppol->enable_sem);
-	del_timer_sync(&ppol->policy_timer);
 	del_timer_sync(&ppol->policy_slack_timer);
-	ppol->policy_timer.data = policy->cpu;
 	ppol->last_evaluated_jiffy = get_jiffies_64();
 	cpufreq_interactive_timer_start(tunables, policy->cpu);
 	ppol->governor_enabled = 1;
@@ -1807,7 +1860,9 @@
 	down_write(&ppol->enable_sem);
 	ppol->governor_enabled = 0;
 	ppol->target_freq = 0;
-	del_timer_sync(&ppol->policy_timer);
+	gov_clear_update_util(ppol->policy);
+	irq_work_sync(&ppol->irq_work);
+	ppol->work_in_progress = false;
 	del_timer_sync(&ppol->policy_slack_timer);
 	up_write(&ppol->enable_sem);
 	ppol->reject_notification = false;
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 0caa8d1..f968ffd9 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -364,7 +364,7 @@
 	char clk_name[] = "cpu??_clk";
 	char tbl_name[] = "qcom,cpufreq-table-??";
 	struct clk *c;
-	int cpu;
+	int cpu, ret;
 	struct cpufreq_frequency_table *ftbl;
 
 	l2_clk = devm_clk_get(dev, "l2_clk");
@@ -431,7 +431,15 @@
 		per_cpu(freq_table, cpu) = ftbl;
 	}
 
-	return 0;
+	ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
+	if (ret)
+		return ret;
+
+	ret = cpufreq_register_driver(&msm_cpufreq_driver);
+	if (ret)
+		unregister_pm_notifier(&msm_cpufreq_pm_notifier);
+
+	return ret;
 }
 
 static const struct of_device_id msm_cpufreq_match_table[] = {
@@ -467,8 +475,7 @@
 		return rc;
 	}
 
-	register_pm_notifier(&msm_cpufreq_pm_notifier);
-	return cpufreq_register_driver(&msm_cpufreq_driver);
+	return 0;
 }
 
 subsys_initcall(msm_cpufreq_register);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
index 0e2e7ec..2a84a2d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk.h
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/types.h>
 #include <linux/clk.h>
+#include "sde_power_handle.h"
 
 #define MAX_STRING_LEN 32
 #define MAX_DSI_CTRL 2
@@ -67,6 +68,8 @@
  * @core_mmss_clk:       Handle to MMSS core clock.
  * @bus_clk:             Handle to bus clock.
  * @mnoc_clk:            Handle to MMSS NOC clock.
+ * @dsi_core_client:	 Pointer to SDE power client
+ * @phandle:             Pointer to SDE power handle
  */
 struct dsi_core_clk_info {
 	struct clk *mdp_core_clk;
@@ -74,6 +77,8 @@
 	struct clk *core_mmss_clk;
 	struct clk *bus_clk;
 	struct clk *mnoc_clk;
+	struct sde_power_client *dsi_core_client;
+	struct sde_power_handle *phandle;
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 9650a0b..2fcf10ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -185,10 +185,12 @@
 {
 	int rc = 0;
 
-	rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
-	if (rc) {
-		pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
-		goto error;
+	if (c_clks->clks.mdp_core_clk) {
+		rc = clk_prepare_enable(c_clks->clks.mdp_core_clk);
+		if (rc) {
+			pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
+			goto error;
+		}
 	}
 
 	if (c_clks->clks.mnoc_clk) {
@@ -199,28 +201,36 @@
 		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.iface_clk);
-	if (rc) {
-		pr_err("failed to enable iface_clk, rc=%d\n", rc);
-		goto error_disable_mnoc_clk;
+	if (c_clks->clks.iface_clk) {
+		rc = clk_prepare_enable(c_clks->clks.iface_clk);
+		if (rc) {
+			pr_err("failed to enable iface_clk, rc=%d\n", rc);
+			goto error_disable_mnoc_clk;
+		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.bus_clk);
-	if (rc) {
-		pr_err("failed to enable bus_clk, rc=%d\n", rc);
-		goto error_disable_iface_clk;
+	if (c_clks->clks.bus_clk) {
+		rc = clk_prepare_enable(c_clks->clks.bus_clk);
+		if (rc) {
+			pr_err("failed to enable bus_clk, rc=%d\n", rc);
+			goto error_disable_iface_clk;
+		}
 	}
 
-	rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
-	if (rc) {
-		pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
-		goto error_disable_bus_clk;
+	if (c_clks->clks.core_mmss_clk) {
+		rc = clk_prepare_enable(c_clks->clks.core_mmss_clk);
+		if (rc) {
+			pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
+			goto error_disable_bus_clk;
+		}
 	}
 
-	rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
-	if (rc) {
-		pr_err("bus scale client enable failed, rc=%d\n", rc);
-		goto error_disable_mmss_clk;
+	if (c_clks->bus_handle) {
+		rc = msm_bus_scale_client_update_request(c_clks->bus_handle, 1);
+		if (rc) {
+			pr_err("bus scale client enable failed, rc=%d\n", rc);
+			goto error_disable_mmss_clk;
+		}
 	}
 
 	return rc;
@@ -458,11 +468,18 @@
 	 */
 
 	m_clks = &clks[master_ndx];
+	rc = sde_power_resource_enable(m_clks->clks.phandle,
+			m_clks->clks.dsi_core_client, true);
+
+	if (rc) {
+		pr_err("Power resource enable failed, rc=%d\n", rc);
+		goto error;
+	}
 
 	rc = dsi_core_clk_start(m_clks);
 	if (rc) {
 		pr_err("failed to turn on master clocks, rc=%d\n", rc);
-		goto error;
+		goto error_disable_master_resource;
 	}
 
 	/* Turn on rest of the core clocks */
@@ -471,15 +488,28 @@
 		if (!clk || (clk == m_clks))
 			continue;
 
+		rc = sde_power_resource_enable(clk->clks.phandle,
+				clk->clks.dsi_core_client, true);
+		if (rc) {
+			pr_err("Power resource enable failed, rc=%d\n", rc);
+			goto error_disable_master;
+		}
+
 		rc = dsi_core_clk_start(clk);
 		if (rc) {
 			pr_err("failed to turn on clocks, rc=%d\n", rc);
+			(void)sde_power_resource_enable(clk->clks.phandle,
+					clk->clks.dsi_core_client, false);
 			goto error_disable_master;
 		}
 	}
-	return rc;
+
 error_disable_master:
 	(void)dsi_core_clk_stop(m_clks);
+
+error_disable_master_resource:
+	(void)sde_power_resource_enable(m_clks->clks.phandle,
+				m_clks->clks.dsi_core_client, false);
 error:
 	return rc;
 }
@@ -547,14 +577,30 @@
 			continue;
 
 		rc = dsi_core_clk_stop(clk);
-		if (rc)
-			pr_err("failed to turn off clocks, rc=%d\n", rc);
+		if (rc) {
+			pr_debug("failed to turn off clocks, rc=%d\n", rc);
+			goto error;
+		}
+
+		rc = sde_power_resource_enable(clk->clks.phandle,
+				clk->clks.dsi_core_client, false);
+		if (rc) {
+			pr_err("Power resource disable failed: %d\n", rc);
+			goto error;
+		}
 	}
 
 	rc = dsi_core_clk_stop(m_clks);
-	if (rc)
+	if (rc) {
 		pr_err("failed to turn off master clocks, rc=%d\n", rc);
+		goto error;
+	}
 
+	rc = sde_power_resource_enable(m_clks->clks.phandle,
+				m_clks->clks.dsi_core_client, false);
+	if (rc)
+		pr_err("Power resource disable failed: %d\n", rc);
+error:
 	return rc;
 }
 
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index cd851bc..5df48c3 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -490,30 +490,26 @@
 
 	core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
 	if (IS_ERR(core->mdp_core_clk)) {
-		rc = PTR_ERR(core->mdp_core_clk);
-		pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
-		goto fail;
+		core->mdp_core_clk = NULL;
+		pr_debug("failed to get mdp_core_clk, rc=%d\n", rc);
 	}
 
 	core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
 	if (IS_ERR(core->iface_clk)) {
-		rc = PTR_ERR(core->iface_clk);
-		pr_err("failed to get iface_clk, rc=%d\n", rc);
-		goto fail;
+		core->iface_clk = NULL;
+		pr_debug("failed to get iface_clk, rc=%d\n", rc);
 	}
 
 	core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
 	if (IS_ERR(core->core_mmss_clk)) {
-		rc = PTR_ERR(core->core_mmss_clk);
-		pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
-		goto fail;
+		core->core_mmss_clk = NULL;
+		pr_debug("failed to get core_mmss_clk, rc=%d\n", rc);
 	}
 
 	core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
 	if (IS_ERR(core->bus_clk)) {
-		rc = PTR_ERR(core->bus_clk);
-		pr_err("failed to get bus_clk, rc=%d\n", rc);
-		goto fail;
+		core->bus_clk = NULL;
+		pr_debug("failed to get bus_clk, rc=%d\n", rc);
 	}
 
 	core->mnoc_clk = devm_clk_get(&pdev->dev, "mnoc_clk");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ddf791c..bcaf428 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2228,10 +2228,12 @@
 	struct dsi_display *display;
 	struct dsi_clk_info info;
 	struct clk_ctrl_cb clk_cb;
+	struct msm_drm_private *priv;
 	void *handle = NULL;
 	struct platform_device *pdev = to_platform_device(dev);
 	char *client1 = "dsi_clk_client";
 	char *client2 = "mdp_event_client";
+	char dsi_client_name[DSI_CLIENT_NAME_SIZE];
 	int i, rc = 0;
 
 	if (!dev || !pdev || !master) {
@@ -2247,6 +2249,7 @@
 				drm, display);
 		return -EINVAL;
 	}
+	priv = drm->dev_private;
 
 	mutex_lock(&display->display_lock);
 
@@ -2260,7 +2263,6 @@
 
 	for (i = 0; i < display->ctrl_count; i++) {
 		display_ctrl = &display->ctrl[i];
-
 		rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
 		if (rc) {
 			pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
@@ -2280,9 +2282,19 @@
 			sizeof(struct dsi_core_clk_info));
 		memcpy(&info.l_clks[i], &display_ctrl->ctrl->clk_info.link_clks,
 			sizeof(struct dsi_link_clk_info));
+		info.c_clks[i].phandle = &priv->phandle;
 		info.bus_handle[i] =
 			display_ctrl->ctrl->axi_bus_info.bus_handle;
 		info.ctrl_index[i] = display_ctrl->ctrl->cell_index;
+		snprintf(dsi_client_name, DSI_CLIENT_NAME_SIZE,
+						"dsi_core_client%u", i);
+		info.c_clks[i].dsi_core_client = sde_power_client_create(
+				info.c_clks[i].phandle, dsi_client_name);
+		if (IS_ERR_OR_NULL(info.c_clks[i].dsi_core_client)) {
+			pr_err("[%s] client creation failed for ctrl[%d]",
+					dsi_client_name, i);
+			goto error_ctrl_deinit;
+		}
 	}
 
 	info.pre_clkoff_cb = dsi_pre_clkoff_cb;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index 89bba96..cfbb14ec 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -29,7 +29,7 @@
 #include "dsi_panel.h"
 
 #define MAX_DSI_CTRLS_PER_DISPLAY             2
-
+#define DSI_CLIENT_NAME_SIZE		20
 /*
  * DSI Validate Mode modifiers
  * @DSI_VALIDATE_FLAG_ALLOW_ADJUST:	Allow mode validation to also do fixup
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index ffb1b67..f79dc08 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -180,10 +180,16 @@
 		break;
 	case DRM_FORMAT_NV12:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier)) {
-			if (SDE_MODIFIER_IS_10B(drm_modifier))
-				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC;
-			else
+			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
+				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC;
+				else
+					*pixfmt =
+					SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC;
+			} else {
 				*pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_UBWC;
+			}
 		} else if (SDE_MODIFIER_IS_TILE(drm_modifier)) {
 			if (SDE_MODIFIER_IS_10B(drm_modifier)) {
 				if (SDE_MODIFIER_IS_TIGHT(drm_modifier))
@@ -452,6 +458,12 @@
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE |
 				DRM_FORMAT_MOD_QCOM_DX;
 		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
+		*drm_pixfmt = DRM_FORMAT_NV12;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE |
+				DRM_FORMAT_MOD_QCOM_DX;
+		break;
 	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
 		*drm_pixfmt = DRM_FORMAT_NV12;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE |
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f9c55ec..2eb947d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -38,6 +38,14 @@
 #include "sde_color_processing.h"
 #include "sde_hw_rot.h"
 
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+		"If set, active planes will force their outputs to black,\n"
+		"by temporarily enabling the color fill, when recovering\n"
+		"from a system resume instead of attempting to display the\n"
+		"last provided frame buffer.");
+
 #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
 
@@ -2895,6 +2903,10 @@
 	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
 		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
 
+	/* force black color fill during suspend */
+	if (msm_is_suspend_state(plane->dev) && suspend_blank)
+		_sde_plane_color_fill(psde, 0x0, 0x0);
+
 	/* flag h/w flush complete */
 	if (plane->state)
 		to_sde_plane_state(plane->state)->pending = false;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 4c172a4..4e262a3 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -21,6 +21,8 @@
 #define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA	64000
 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA	0
 
+#include <linux/sde_io_util.h>
+
 /**
  * mdss_bus_vote_type: register bus vote type
  * VOTE_INDEX_DISABLE: removes the client vote
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index f50dae9..7cdd2b2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -676,7 +676,7 @@
 #define A6XX_GMU_HOST_INTERRUPT_CLR		0x23B04
 #define A6XX_GMU_HOST_INTERRUPT_STATUS		0x23B05
 #define A6XX_GMU_HOST_INTERRUPT_MASK		0x23B06
-#define A6XX_GMU_GPU_CX_BUSY_STATUS		0x23B0C
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS	0x23B0C
 #define A6XX_GMU_AHB_FENCE_STATUS		0x23B13
 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS	0x23B15
 #define A6XX_GMU_AO_SPARE_CNTL			0x23B16
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 787ca6b..f0d8746 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -2243,6 +2243,11 @@
 {
 	const struct adreno_gpu_core *gpucore = adreno_dev->gpucore;
 	unsigned int reg_rbbm_status;
+	struct adreno_gpudev *gpudev  = ADRENO_GPU_DEVICE(adreno_dev);
+
+	/* if hw driver implements idle check - use it */
+	if (gpudev->hw_isidle)
+		return gpudev->hw_isidle(adreno_dev);
 
 	if (adreno_is_a540(adreno_dev))
 		/**
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 1e08a5e..e23d6a0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -852,10 +852,10 @@
 				unsigned int clear_mask);
 	void (*oob_clear)(struct adreno_device *adreno_dev,
 				unsigned int clear_mask);
-	bool (*hw_isidle)(struct adreno_device *);
 	int (*rpmh_gpu_pwrctrl)(struct adreno_device *, unsigned int ops,
 				unsigned int arg1, unsigned int arg2);
-	bool (*gmu_isidle)(struct adreno_device *);
+	bool (*hw_isidle)(struct adreno_device *);
+	int (*wait_for_gmu_idle)(struct adreno_device *);
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index ca0e79d..0211a17 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -193,6 +193,11 @@
 	unsigned int bit, mal, mode, glbl_inv;
 	unsigned int amsbc = 0;
 
+	/* runtime adjust callbacks based on feature sets */
+	if (!kgsl_gmu_isenabled(device))
+		/* Legacy idle management if gmu is disabled */
+		ADRENO_GPU_DEVICE(adreno_dev)->hw_isidle = NULL;
+
 	adreno_vbif_start(adreno_dev, a6xx_vbif_platforms,
 			ARRAY_SIZE(a6xx_vbif_platforms));
 	/*
@@ -925,8 +930,6 @@
 	return ret;
 }
 
-#define GMU_POWER_STATE_SLUMBER 15
-
 /*
  * a6xx_notify_slumber() - initiate request to GMU to prepare to slumber
  * @device: Pointer to KGSL device
@@ -959,7 +962,7 @@
 		dev_err(&gmu->pdev->dev, "OOB set for slumber timed out\n");
 	else {
 		kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &state);
-		if (state != GMU_POWER_STATE_SLUMBER) {
+		if (state != GPU_HW_SLUMBER) {
 			dev_err(&gmu->pdev->dev,
 					"Failed to prepare for slumber\n");
 			ret = -EINVAL;
@@ -1258,29 +1261,35 @@
 	return ret;
 }
 
-static bool a6xx_gmu_isidle(struct adreno_device *adreno_dev)
+static bool a6xx_hw_isidle(struct adreno_device *adreno_dev)
+{
+	unsigned int reg;
+
+	kgsl_gmu_regread(KGSL_DEVICE(adreno_dev),
+		A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg);
+	return ((~reg & GPUBUSYIGNAHB) != 0);
+}
+
+static int a6xx_wait_for_gmu_idle(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
 	struct gmu_device *gmu = &device->gmu;
-	unsigned int value;
 
-	/* Check if GMU on */
-	if (!(gmu->flags & GMU_CLK_ON))
-		return true;
+	if (timed_poll_check(device, A6XX_GMU_RPMH_POWER_STATE,
+		gmu->idle_level, GMU_START_TIMEOUT, 0xf)) {
+		dev_err(&gmu->pdev->dev,
+			"GMU is not going to powerstate %d\n",
+			gmu->idle_level);
+		return -ETIMEDOUT;
+	}
 
-	/* Ensure GPU is in its lowest power state */
-	kgsl_gmu_regread(device, A6XX_GMU_RPMH_POWER_STATE, &value);
+	if (timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
+		0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
+		dev_err(&gmu->pdev->dev, "GMU is not idling\n");
+		return -ETIMEDOUT;
+	}
 
-	if (value < gmu->idle_level)
-		return false;
-
-	/* Ensure GPU and GMU are both idle */
-	kgsl_gmu_regread(device->reg_virt, A6XX_GMU_GPU_CX_BUSY_STATUS,
-			&value);
-	if ((value & SLUMBER_CHECK_MASK) != SLUMBER_CHECK_MASK)
-		return false;
-
-	return true;
+	return 0;
 }
 
 /*
@@ -2040,5 +2049,6 @@
 	.oob_set = a6xx_oob_set,
 	.oob_clear = a6xx_oob_clear,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
-	.gmu_isidle = a6xx_gmu_isidle,
+	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
+	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle
 };
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 56e4f23..2e9f108 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -1086,7 +1086,7 @@
 
 	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
 
-	gmu->idle_level = GPU_HW_CGC;
+	gmu->idle_level = GPU_HW_ACTIVE;
 
 	return 0;
 
@@ -1312,7 +1312,11 @@
 	if (!test_bit(GMU_CLK_ON, &gmu->flags))
 		return;
 
-	/* TODO: Check for conditions to enter slumber */
+	if (gpudev->wait_for_gmu_idle &&
+		!gpudev->wait_for_gmu_idle(adreno_dev)) {
+		dev_err(&gmu->pdev->dev, "Failure to stop gmu");
+		return;
+	}
 
 	gpudev->rpmh_gpu_pwrctrl(adreno_dev, GMU_NOTIFY_SLUMBER, 0, 0);
 
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index b5c0c96..ac2c151 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -53,11 +53,9 @@
 				CX_VOTE_ENABLE		| \
 				GFX_VOTE_ENABLE)
 
-/* Bitmask for GMU idle status check */
-#define CXGX_CPUBUSY_IGNAHB_IDLE	BIT(30)
-#define GPUBUSY_IGNAHB_IDLE		BIT(23)
-#define SLUMBER_CHECK_MASK		(CXGX_CPUBUSY_IGNAHB_IDLE  | \
-					GPUBUSY_IGNAHB_IDLE)
+/* Bitmask for GPU idle status check */
+#define GPUBUSYIGNAHB		BIT(23)
+#define CXGXCPUBUSYIGNAHB	BIT(30)
 
 /* Constants for GMU OOBs */
 #define OOB_BOOT_OPTION         0
@@ -143,12 +141,13 @@
 };
 
 enum gpu_idle_level {
-	GPU_HW_ACTIVE,
-	GPU_HW_CGC,
-	GPU_HW_SPTP_PC,
-	GPU_HW_IFPC,
-	GPU_HW_NAP,
-	GPU_HW_MIN_VOLT,
+	GPU_HW_ACTIVE = 0x0,
+	GPU_HW_SPTP_PC = 0x2,
+	GPU_HW_IFPC = 0x3,
+	GPU_HW_NAP = 0x4,
+	GPU_HW_MIN_VOLT = 0x5,
+	GPU_HW_MIN_DDR = 0x6,
+	GPU_HW_SLUMBER = 0xF
 };
 
 /**
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index c7abd9d..a0b53bb 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -98,6 +98,7 @@
  * @SDE_CAPS_R3_1P5_DOWNSCALE: 1.5x downscale rotator support
  * @SDE_CAPS_MIN_BUS_VOTE: minimum bus vote prior to power enable
  * @SDE_CAPS_SBUF_1: stream buffer support for inline rotation
+ * @SDE_CAPS_UBWC_2: universal bandwidth compression version 2
  */
 enum sde_caps_settings {
 	SDE_CAPS_R1_WB,
@@ -106,6 +107,7 @@
 	SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
 	SDE_CAPS_MIN_BUS_VOTE,
 	SDE_CAPS_SBUF_1,
+	SDE_CAPS_UBWC_2,
 	SDE_CAPS_MAX,
 };
 
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
index c78c513..573e0a8 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -284,6 +284,27 @@
 		},
 	},
 	{
+		.mdp_format = {
+			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC),
+			.description = "SDE/Y_CBCR_H2V2_P010_UBWC",
+			.flag = 0,
+			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+			.chroma_sample = SDE_MDP_CHROMA_420,
+			.unpack_count = 2,
+			.bpp = 2,
+			.frame_format = SDE_MDP_FMT_TILE_A5X,
+			.pixel_mode = SDE_MDP_PIXEL_10BIT,
+			.element = { C1_B_Cb, C2_R_Cr },
+			.unpack_tight = 0,
+			.unpack_align_msb = 1,
+			.is_ubwc = SDE_MDP_COMPRESS_UBWC
+		},
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
+	{
 		.mdp_format =
 			FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_TILE,
 			"SDE/RGBA_1010102_TILE",
@@ -517,6 +538,27 @@
 			.tile_width = 16,
 		},
 	},
+	{
+		.mdp_format = {
+			FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE),
+			.description = "SDE/Y_CBCR_H2V2_P010_TILE",
+			.flag = SDE_MDP_FORMAT_FLAG_PRIVATE,
+			.fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+			.chroma_sample = SDE_MDP_CHROMA_420,
+			.unpack_count = 2,
+			.bpp = 2,
+			.frame_format = SDE_MDP_FMT_TILE_A5X,
+			.pixel_mode = SDE_MDP_PIXEL_10BIT,
+			.element = { C1_B_Cb, C2_R_Cr },
+			.unpack_tight = 0,
+			.unpack_align_msb = 1,
+			.is_ubwc = SDE_MDP_COMPRESS_NONE,
+		},
+		.micro = {
+			.tile_height = 4,
+			.tile_width = 32,
+		},
+	},
 };
 
 static struct sde_mdp_format_params sde_mdp_format_map[] = {
@@ -853,6 +895,11 @@
 	case SDE_PIX_FMT_BGRX_1010102_TILE:
 		*dst_pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
 		break;
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE:
+	case SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC:
+		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE;
+		break;
 	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10:
 	case SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC:
 		*dst_pixfmt = SDE_PIX_FMT_Y_CBCR_H2V2_TP10;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 4278b6d..27e2d28 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -52,6 +52,8 @@
 
 /* default stream buffer headroom in lines */
 #define DEFAULT_SBUF_HEADROOM	20
+#define DEFAULT_UBWC_MALSIZE	1
+#define DEFAULT_UBWC_SWIZZLE	1
 
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
@@ -278,6 +280,8 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
 	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
 	SDE_PIX_FMT_XRGB_8888_TILE,
@@ -356,6 +360,8 @@
 	SDE_PIX_FMT_Y_CBCR_H2V2_P010,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
+	SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
 	SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
 	SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
 	SDE_PIX_FMT_XRGB_8888_TILE,
@@ -877,6 +883,12 @@
 			SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
 			((rot->highest_bank & 0x3) << 18));
 
+	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(31) |
+				((ctx->rot->ubwc_malsize & 0x3) << 8) |
+				((ctx->rot->highest_bank & 0x3) << 4) |
+				((ctx->rot->ubwc_swizzle & 0x1) << 0));
+
 	/* setup source buffer plane security status */
 	if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
 			SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
@@ -1009,6 +1021,12 @@
 	SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
 			(ctx->rot->highest_bank & 0x3) << 8);
 
+	if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
+		SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
+				((ctx->rot->ubwc_malsize & 0x3) << 8) |
+				((ctx->rot->highest_bank & 0x3) << 4) |
+				((ctx->rot->ubwc_swizzle & 0x1) << 0));
+
 	if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
 		SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
 				ctx->sys_cache_mode);
@@ -2212,6 +2230,7 @@
 		SDEROT_DBG("Supporting sys cache inline rotation\n");
 		set_bit(SDE_CAPS_MIN_BUS_VOTE,  mdata->sde_caps_map);
 		set_bit(SDE_CAPS_SBUF_1,  mdata->sde_caps_map);
+		set_bit(SDE_CAPS_UBWC_2,  mdata->sde_caps_map);
 		rot->inpixfmts = sde_hw_rotator_v4_inpixfmts;
 		rot->num_inpixfmt = ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
 		rot->outpixfmts = sde_hw_rotator_v4_outpixfmts;
@@ -2739,6 +2758,26 @@
 	}
 
 	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,sde-ubwc-malsize", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
+	} else {
+		SDEROT_DBG("set ubwc malsize to %d\n", data);
+		hw_data->ubwc_malsize = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,sde-ubwc_swizzle", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
+	} else {
+		SDEROT_DBG("set ubwc swizzle to %d\n", data);
+		hw_data->ubwc_swizzle = data;
+	}
+
+	ret = of_property_read_u32(dev->dev.of_node,
 			"qcom,mdss-sbuf-headroom", &data);
 	if (ret) {
 		ret = 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index f86f54b..dc97bdf 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -70,6 +70,7 @@
 #define ROT_SSPP_SRC_UNPACK_PATTERN             (SDE_ROT_SSPP_OFFSET+0x34)
 #define ROT_SSPP_SRC_OP_MODE                    (SDE_ROT_SSPP_OFFSET+0x38)
 #define ROT_SSPP_SRC_CONSTANT_COLOR             (SDE_ROT_SSPP_OFFSET+0x3C)
+#define ROT_SSPP_UBWC_STATIC_CTRL               (SDE_ROT_SSPP_OFFSET+0x44)
 #define ROT_SSPP_FETCH_CONFIG                   (SDE_ROT_SSPP_OFFSET+0x48)
 #define ROT_SSPP_VC1_RANGE                      (SDE_ROT_SSPP_OFFSET+0x4C)
 #define ROT_SSPP_REQPRIORITY_FIFO_WATERMARK_0   (SDE_ROT_SSPP_OFFSET+0x50)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index c011d7a..d1607d9 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -248,6 +248,9 @@
  * struct sde_hw_rotator : Rotator description
  * @hw:           mdp register mapped offset
  * @ops:          pointer to operations possible for the rotator HW
+ * @highest_bank: highest bank size of memory
+ * @ubwc_malsize: ubwc minimum allowable length
+ * @ubwc_swizzle: ubwc swizzle enable
  * @sbuf_headroom: stream buffer headroom in lines
  * @solid_fill: true if solid fill is requested
  * @constant_color: solid fill constant color
@@ -296,6 +299,8 @@
 	void *swts_buffer;
 
 	u32    highest_bank;
+	u32    ubwc_malsize;
+	u32    ubwc_swizzle;
 	u32    sbuf_headroom;
 	u32    solid_fill;
 	u32    constant_color;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 4cf9dfc..9ef4282 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -210,6 +210,32 @@
 		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
 		ps->plane_size[3] = ALIGN(ps->ystride[3] *
 			ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
+	} else if (sde_mdp_is_p010_format(fmt)) {
+		ps->num_planes = 2;
+		/* Y bitstream stride and plane size */
+		ps->ystride[0] = ALIGN(width * 2, 256);
+		ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 16),
+					4096);
+
+		/* CbCr bitstream stride and plane size */
+		ps->ystride[1] = ALIGN(width * 2, 256);
+		ps->plane_size[1] = ALIGN(ps->ystride[1] *
+			ALIGN(height / 2, 16), 4096);
+
+		if (!sde_mdp_is_ubwc_format(fmt))
+			goto done;
+
+		ps->num_planes += 2;
+
+		/* Y meta data stride and plane size */
+		ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
+		ps->plane_size[2] = ALIGN(ps->ystride[2] *
+			ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+
+		/* CbCr meta data stride and plane size */
+		ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+		ps->plane_size[3] = ALIGN(ps->ystride[3] *
+			ALIGN(DIV_ROUND_UP(height / 2, 4), 16), 4096);
 	} else if (sde_mdp_is_tp10_format(fmt)) {
 		u32 yWidth   = sde_mdp_general_align(width, 192);
 		u32 yHeight  = ALIGN(height, 16);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index ce5a7dc..5818986 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1322,6 +1322,8 @@
 					descr = "Y/CbCr 4:2:0 P10"; break;
 	case V4L2_PIX_FMT_NV12_TP10_UBWC:
 					descr = "Y/CbCr 4:2:0 TP10 UBWC"; break;
+	case V4L2_PIX_FMT_NV12_P010_UBWC:
+					descr = "Y/CbCr 4:2:0 P010 UBWC"; break;
 
 	default:
 		/* Compressed formats */
diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c
index 1a50f37..d747408 100644
--- a/drivers/mfd/wcd9xxx-irq.c
+++ b/drivers/mfd/wcd9xxx-irq.c
@@ -704,20 +704,27 @@
 
 static int wcd9xxx_irq_probe(struct platform_device *pdev)
 {
-	int irq;
+	int irq, dir_apps_irq = -EINVAL;
 	struct wcd9xxx_irq_drv_data *data;
 	struct device_node *node = pdev->dev.of_node;
 	int ret = -EINVAL;
 
 	irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
-	if (!gpio_is_valid(irq)) {
+	if (!gpio_is_valid(irq))
+		dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
+
+	if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
 		dev_err(&pdev->dev, "TLMM connect gpio not found\n");
 		return -EPROBE_DEFER;
 	}
-	irq = gpio_to_irq(irq);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "Unable to configure irq\n");
-		return irq;
+	if (dir_apps_irq > 0) {
+		irq = dir_apps_irq;
+	} else {
+		irq = gpio_to_irq(irq);
+		if (irq < 0) {
+			dev_err(&pdev->dev, "Unable to configure irq\n");
+			return irq;
+		}
 	}
 	dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
 	data = wcd9xxx_irq_add_domain(node, node->parent);
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index fff8966..b7685cb 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -415,6 +415,16 @@
 {
 	uint32_t val;
 
+	/*
+	 * allocate new events for this channel first
+	 * before submitting the new TREs.
+	 * for TO_GSI channels the event ring doorbell is rang as part of
+	 * interrupt handling.
+	 */
+	if (ctx->evtr && ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+		gsi_ring_evt_doorbell(ctx->evtr);
+	ctx->ring.wp = ctx->ring.wp_local;
+
 	/* write order MUST be MSB followed by LSB */
 	val = ((ctx->ring.wp_local >> 32) &
 			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
@@ -470,8 +480,8 @@
 			cntr = 0;
 			rp = gsi_readl(gsi_ctx->base +
 				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
-			rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-				GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(i, ee))) << 32;
+			rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
 			ctx->ring.rp = rp;
 			while (ctx->ring.rp_local != rp) {
 				++cntr;
@@ -1529,6 +1539,7 @@
 static int gsi_validate_channel_props(struct gsi_chan_props *props)
 {
 	uint64_t ra;
+	uint64_t last;
 
 	if (props->ch_id >= gsi_ctx->max_ch) {
 		GSIERR("ch_id %u invalid\n", props->ch_id);
@@ -1556,6 +1567,17 @@
 		return -GSI_STATUS_INVALID_PARAMS;
 	}
 
+	last = props->ring_base_addr + props->ring_len - props->re_size;
+
+	/* MSB should stay same within the ring */
+	if ((props->ring_base_addr & 0xFFFFFFFF00000000ULL) !=
+	    (last & 0xFFFFFFFF00000000ULL)) {
+		GSIERR("MSB is not fixed on ring base 0x%llx size 0x%x\n",
+			props->ring_base_addr,
+			props->ring_len);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
 	if (props->prot == GSI_CHAN_PROT_GPI &&
 			!props->ring_base_vaddr) {
 		GSIERR("protocol %u requires ring base VA\n", props->prot);
@@ -2128,29 +2150,22 @@
 		uint16_t *num_free_re)
 {
 	uint16_t start;
-	uint16_t start_hw;
 	uint16_t end;
 	uint64_t rp;
-	uint64_t rp_hw;
 	int ee = gsi_ctx->per.ee;
 	uint16_t used;
-	uint16_t used_hw;
-
-	rp_hw = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
-	rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
-		<< 32;
 
 	if (!ctx->evtr) {
-		rp = rp_hw;
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
 		ctx->ring.rp = rp;
 	} else {
 		rp = ctx->ring.rp_local;
 	}
 
 	start = gsi_find_idx_from_addr(&ctx->ring, rp);
-	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
 	end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
 
 	if (end >= start)
@@ -2158,13 +2173,7 @@
 	else
 		used = ctx->ring.max_num_elem + 1 - (start - end);
 
-	if (end >= start_hw)
-		used_hw = end - start_hw;
-	else
-		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end);
-
 	*num_free_re = ctx->ring.max_num_elem - used;
-	gsi_update_ch_dp_stats(ctx, used_hw);
 }
 
 int gsi_query_channel_info(unsigned long chan_hdl,
@@ -2274,14 +2283,12 @@
 
 	rp = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
-	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
+	rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
 	ctx->ring.rp = rp;
 
 	wp = gsi_readl(gsi_ctx->base +
 		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
-	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
+	wp |= ctx->ring.wp & 0xFFFFFFFF00000000;
 	ctx->ring.wp = wp;
 
 	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
@@ -2353,6 +2360,8 @@
 			tre.re_type = GSI_RE_XFER;
 		} else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) {
 			tre.re_type = GSI_RE_IMMD_CMD;
+		} else if (xfer[i].type == GSI_XFER_ELEM_NOP) {
+			tre.re_type = GSI_RE_NOP;
 		} else {
 			GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl,
 				xfer[i].type);
@@ -2420,6 +2429,9 @@
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
+	if (ctx->ring.wp == ctx->ring.wp_local)
+		return GSI_STATUS_SUCCESS;
+
 	gsi_ring_chan_doorbell(ctx);
 
 	return GSI_STATUS_SUCCESS;
@@ -2457,19 +2469,22 @@
 	}
 
 	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
-	rp = gsi_readl(gsi_ctx->base +
-		GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
-	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
-		GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) << 32;
-	ctx->evtr->ring.rp = rp;
-	if (rp == ctx->evtr->ring.rp_local) {
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
+		/* update rp to see of we have anything new to process */
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+
+		ctx->evtr->ring.rp = rp;
+	}
+
+	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
 		spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
 		ctx->stats.poll_empty++;
 		return GSI_STATUS_POLL_EMPTY;
 	}
 
 	gsi_process_evt_re(ctx->evtr, notify, false);
-	gsi_ring_evt_doorbell(ctx->evtr);
 	spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
 	ctx->stats.poll_ok++;
 
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index f53a4bd..32fb178 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -209,6 +209,7 @@
 enum gsi_re_type {
 	GSI_RE_XFER = 0x2,
 	GSI_RE_IMMD_CMD = 0x3,
+	GSI_RE_NOP = 0x4,
 };
 
 struct __packed gsi_tre {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 717c8917..b1d1dfa 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -490,11 +490,6 @@
 		goto error;
 	}
 
-	if (gsi_ctx->chan[ch_id].props.prot == GSI_CHAN_PROT_GPI) {
-		TERR("valid for non GPI channels only\n");
-		goto error;
-	}
-
 	if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) {
 		TERR("ch_%d: already enabled/disabled\n", ch_id);
 		return -EFAULT;
@@ -631,7 +626,7 @@
 	else
 		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw);
 
-	TERR("ch %d used %d\n", ctx->props.ch_id, used_hw);
+	TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw);
 	gsi_update_ch_dp_stats(ctx, used_hw);
 }
 
@@ -641,7 +636,6 @@
 
 	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
 		if (gsi_ctx->chan[ch_id].allocated &&
-		    gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
 		    gsi_ctx->chan[ch_id].enable_dp_stats)
 			gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]);
 	}
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 5633e1f..266c0a2 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -213,6 +213,7 @@
 	bool tx_blocked_signal_sent;
 	struct kthread_work kwork;
 	struct kthread_worker kworker;
+	struct work_struct wakeup_work;
 	struct task_struct *task;
 	struct tasklet_struct tasklet;
 	struct srcu_struct use_ref;
@@ -874,20 +875,10 @@
 		srcu_read_unlock(&einfo->use_ref, rcu_id);
 		return;
 	}
-	if (!atomic_ctx) {
-		if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
-			einfo->tx_resume_needed = false;
-			einfo->xprt_if.glink_core_if_ptr->tx_resume(
-							&einfo->xprt_if);
-		}
-		spin_lock_irqsave(&einfo->write_lock, flags);
-		if (einfo->tx_blocked_signal_sent) {
-			wake_up_all(&einfo->tx_blocked_queue);
-			einfo->tx_blocked_signal_sent = false;
-		}
-		spin_unlock_irqrestore(&einfo->write_lock, flags);
-	}
 
+	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
+		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
+		schedule_work(&einfo->wakeup_work);
 
 	/*
 	 * Access to the fifo needs to be synchronized, however only the calls
@@ -1195,6 +1186,39 @@
 }
 
 /**
+ * tx_wakeup_worker() - worker function to wakeup tx blocked thread
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void tx_wakeup_worker(struct work_struct *work)
+{
+	struct edge_info *einfo;
+	bool trigger_wakeup = false;
+	unsigned long flags;
+	int rcu_id;
+
+	einfo = container_of(work, struct edge_info, wakeup_work);
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+	if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+		einfo->tx_resume_needed = false;
+		einfo->xprt_if.glink_core_if_ptr->tx_resume(
+						&einfo->xprt_if);
+	}
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
+		einfo->tx_blocked_signal_sent = false;
+		trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+	if (trigger_wakeup)
+		wake_up_all(&einfo->tx_blocked_queue);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
  * rx_worker() - worker function to process received commands
  * @work:	kwork associated with the edge to process commands on.
  */
@@ -2303,6 +2327,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2402,6 +2427,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2489,6 +2515,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->intentless = true;
 	einfo->read_from_fifo = memcpy32_fromio;
@@ -2649,6 +2676,7 @@
 reg_xprt_fail:
 toc_init_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2780,6 +2808,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2900,6 +2929,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 9a9d73b..458e39d 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -43,8 +43,14 @@
 #define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
 #define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
 #define PIL_SUBSYSTEM_NAME_LEN 32
-#define DEFAULT_NUM_INTENTS 5
-#define DEFAULT_RX_INTENT_SIZE 2048
+
+#define MAX_NUM_LO_INTENTS 5
+#define MAX_NUM_MD_INTENTS 3
+#define MAX_NUM_HI_INTENTS 2
+#define LO_RX_INTENT_SIZE 2048
+#define MD_RX_INTENT_SIZE 8192
+#define HI_RX_INTENT_SIZE (17 * 1024)
+
 /**
  * ipc_router_glink_xprt - IPC Router's GLINK XPRT structure
  * @list: IPC router's GLINK XPRT list.
@@ -82,6 +88,9 @@
 	unsigned int xprt_version;
 	unsigned int xprt_option;
 	bool disable_pil_loading;
+	uint32_t cur_lo_intents_cnt;
+	uint32_t cur_md_intents_cnt;
+	uint32_t cur_hi_intents_cnt;
 };
 
 struct ipc_router_glink_xprt_work {
@@ -342,7 +351,7 @@
 	}
 
 	D("%s %zu bytes @ %p\n", __func__, rx_work->iovec_size, rx_work->iovec);
-	if (rx_work->iovec_size <= DEFAULT_RX_INTENT_SIZE)
+	if (rx_work->iovec_size <= HI_RX_INTENT_SIZE)
 		reuse_intent = true;
 
 	pkt = glink_xprt_copy_data(rx_work);
@@ -371,9 +380,14 @@
 				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
 	D("%s: Notified IPC Router of %s OPEN\n",
 	  __func__, glink_xprtp->xprt.name);
-	for (i = 0; i < DEFAULT_NUM_INTENTS; i++)
+	glink_xprtp->cur_lo_intents_cnt = 0;
+	glink_xprtp->cur_md_intents_cnt = 0;
+	glink_xprtp->cur_hi_intents_cnt = 0;
+	for (i = 0; i < MAX_NUM_LO_INTENTS; i++) {
 		glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-				      DEFAULT_RX_INTENT_SIZE);
+				      LO_RX_INTENT_SIZE);
+		glink_xprtp->cur_lo_intents_cnt++;
+	}
 	kfree(xprt_work);
 }
 
@@ -394,13 +408,32 @@
 
 static void glink_xprt_qrx_intent_worker(struct work_struct *work)
 {
+	size_t sz;
 	struct queue_rx_intent_work *qrx_intent_work =
 		container_of(work, struct queue_rx_intent_work, work);
 	struct ipc_router_glink_xprt *glink_xprtp =
 					qrx_intent_work->glink_xprtp;
+	uint32_t *cnt = NULL;
+	int ret;
 
-	glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-			      qrx_intent_work->intent_size);
+	sz = qrx_intent_work->intent_size;
+	if (sz <= MD_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_md_intents_cnt >= MAX_NUM_MD_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = MD_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_md_intents_cnt;
+	} else if (sz <= HI_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_hi_intents_cnt >= MAX_NUM_HI_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = HI_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_hi_intents_cnt;
+	}
+
+	ret = glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+					sz);
+	if (!ret && cnt)
+		(*cnt)++;
+qrx_intent_worker_out:
 	kfree(qrx_intent_work);
 }
 
@@ -470,7 +503,7 @@
 	struct ipc_router_glink_xprt *glink_xprtp =
 		(struct ipc_router_glink_xprt *)priv;
 
-	if (sz <= DEFAULT_RX_INTENT_SIZE)
+	if (sz <= LO_RX_INTENT_SIZE)
 		return true;
 
 	qrx_intent_work = kmalloc(sizeof(struct queue_rx_intent_work),
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 38d29e4..2471d27 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -502,13 +502,21 @@
 				struct queue_rx_intent_work, work);
 	struct glink_pkt_dev *devp = work_item->devp;
 
-	if (!devp || !devp->handle) {
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device\n", __func__);
+		kfree(work_item);
+		return;
+	}
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
 		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		mutex_unlock(&devp->ch_lock);
 		kfree(work_item);
 		return;
 	}
 
 	ret = glink_queue_rx_intent(devp->handle, devp, work_item->intent_size);
+	mutex_unlock(&devp->ch_lock);
 	GLINK_PKT_INFO("%s: Triggered with size[%zu] ret[%d]\n",
 				__func__, work_item->intent_size, ret);
 	if (ret)
@@ -664,8 +672,15 @@
 	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
 
 	ret = copy_to_user(buf, pkt->data, pkt->size);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_to_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, pkt->size);
+		spin_lock_irqsave(&devp->pkt_list_lock, flags);
+		list_add_tail(&pkt->list, &devp->pkt_list);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -EFAULT;
+	}
 
 	ret = pkt->size;
 	glink_rx_done(devp->handle, pkt->data, false);
@@ -739,8 +754,13 @@
 	}
 
 	ret = copy_from_user(data, buf, count);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_from_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, count);
+		kfree(data);
+		return -EFAULT;
+	}
 
 	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
 	if (ret) {
@@ -1038,6 +1058,27 @@
 }
 
 /**
+ * pop_rx_pkt() - return first pkt from rx pkt_list
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function return first item from rx pkt_list and NULL if list is empty.
+ */
+struct glink_rx_pkt *pop_rx_pkt(struct glink_pkt_dev *devp)
+{
+	unsigned long flags;
+	struct glink_rx_pkt *pkt = NULL;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	if (!list_empty(&devp->pkt_list)) {
+		pkt = list_first_entry(&devp->pkt_list,
+				struct glink_rx_pkt, list);
+		list_del(&pkt->list);
+	}
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return pkt;
+}
+
+/**
  * glink_pkt_release() - release operation on glink_pkt device
  * inode:	Pointer to the inode structure.
  * file:	Pointer to the file structure.
@@ -1051,6 +1092,7 @@
 	int ret = 0;
 	struct glink_pkt_dev *devp = file->private_data;
 	unsigned long flags;
+	struct glink_rx_pkt *pkt;
 
 	GLINK_PKT_INFO("%s() on dev id:%d by [%s] ref_cnt[%d]\n",
 			__func__, devp->i, current->comm, devp->ref_cnt);
@@ -1059,9 +1101,14 @@
 		devp->ref_cnt--;
 
 	if (devp->handle && devp->ref_cnt == 0) {
+		while ((pkt = pop_rx_pkt(devp))) {
+			glink_rx_done(devp->handle, pkt->data, false);
+			kfree(pkt);
+		}
 		wake_up(&devp->ch_read_wait_queue);
 		wake_up_interruptible(&devp->ch_opened_wait_queue);
 		ret = glink_close(devp->handle);
+		devp->handle = NULL;
 		if (ret)  {
 			GLINK_PKT_ERR("%s: close failed ret[%d]\n",
 						__func__, ret);
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index c2fb37b..1bbd751 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -79,6 +79,7 @@
 static void *smem_ramdump_dev;
 static DEFINE_MUTEX(spinlock_init_lock);
 static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
 static int smem_module_inited;
 static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
 static DEFINE_MUTEX(smem_module_init_notifier_lock);
@@ -374,7 +375,7 @@
 	uint32_t a_hdr_size;
 	int rc;
 
-	SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+	SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
 					flags, skip_init_check, use_rspinlock);
 
 	if (!skip_init_check && !smem_initialized_check())
@@ -817,7 +818,7 @@
 void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
 							unsigned int flags)
 {
-	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+	SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
 
 	/*
 	 * Handle the circular dependecy between SMEM and software implemented
@@ -1084,7 +1085,8 @@
 	void *handle;
 	struct restart_notifier_block *nb;
 
-	smem_ramdump_dev = create_ramdump_device("smem", NULL);
+	if (smem_dev)
+		smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
 	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
 		LOG_ERR("%s: Unable to create smem ramdump device.\n",
 			__func__);
@@ -1509,7 +1511,7 @@
 		SMEM_INFO("smem security enabled\n");
 		smem_init_security();
 	}
-
+	smem_dev = &pdev->dev;
 	probe_done = true;
 
 	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 9c3f9431..0b952a4 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -101,6 +101,7 @@
 		.ei_array	= NULL,
 	},
 };
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
 
 struct elem_info qmi_error_resp_type_v01_ei[] = {
 	{
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 10caf22..8e9ad9b 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -266,10 +266,12 @@
 		if (!domains_read) {
 			db_rev_count = pd->db_rev_count = resp->db_rev_count;
 			pd->total_domains = resp->total_domains;
-			if (!pd->total_domains && resp->domain_list_len) {
-				pr_err("total domains not set\n");
-				pd->total_domains = resp->domain_list_len;
+			if (!resp->total_domains) {
+				pr_err("No matching domains found\n");
+				rc = -EIO;
+				goto out;
 			}
+
 			pd->domain_list = kmalloc(
 					sizeof(struct servreg_loc_entry_v01) *
 					resp->total_domains, GFP_KERNEL);
@@ -286,6 +288,10 @@
 			rc = -EAGAIN;
 			goto out;
 		}
+		if (resp->domain_list_len >  resp->total_domains) {
+			/* Always read total_domains from the response msg */
+			resp->domain_list_len = resp->total_domains;
+		}
 		/* Copy the response*/
 		store_get_domain_list_response(pd, resp, domains_read);
 		domains_read += resp->domain_list_len;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 226b0b4ac..2240be5 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -2172,6 +2172,8 @@
 	.n_mcgrps = ARRAY_SIZE(thermal_event_mcgrps),
 };
 
+static int allow_netlink_events;
+
 int thermal_generate_netlink_event(struct thermal_zone_device *tz,
 					enum events event)
 {
@@ -2186,6 +2188,9 @@
 	if (!tz)
 		return -EINVAL;
 
+	if (!allow_netlink_events)
+		return -ENODEV;
+
 	/* allocate memory */
 	size = nla_total_size(sizeof(struct thermal_genl_event)) +
 	       nla_total_size(0);
@@ -2237,7 +2242,13 @@
 
 static int genetlink_init(void)
 {
-	return genl_register_family(&thermal_event_genl_family);
+	int ret;
+
+	ret = genl_register_family(&thermal_event_genl_family);
+	if (!ret)
+		allow_netlink_events = true;
+
+	return ret;
 }
 
 static void genetlink_exit(void)
@@ -2247,6 +2258,8 @@
 #else /* !CONFIG_NET */
 static inline int genetlink_init(void) { return 0; }
 static inline void genetlink_exit(void) {}
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+		enum events event) { return -ENODEV; }
 #endif /* !CONFIG_NET */
 
 static int __init thermal_register_governors(void)
@@ -2318,19 +2331,15 @@
 
 	result = thermal_register_governors();
 	if (result)
-		goto error;
+		goto init_exit;
 
 	result = class_register(&thermal_class);
 	if (result)
 		goto unregister_governors;
 
-	result = genetlink_init();
-	if (result)
-		goto unregister_class;
-
 	result = of_parse_thermal_zones();
 	if (result)
-		goto exit_netlink;
+		goto exit_zone_parse;
 
 	result = register_pm_notifier(&thermal_pm_nb);
 	if (result)
@@ -2339,13 +2348,11 @@
 
 	return 0;
 
-exit_netlink:
-	genetlink_exit();
-unregister_class:
+exit_zone_parse:
 	class_unregister(&thermal_class);
 unregister_governors:
 	thermal_unregister_governors();
-error:
+init_exit:
 	idr_destroy(&thermal_tz_idr);
 	idr_destroy(&thermal_cdev_idr);
 	mutex_destroy(&thermal_idr_lock);
@@ -2368,5 +2375,19 @@
 	mutex_destroy(&thermal_governor_lock);
 }
 
-fs_initcall(thermal_init);
+static int __init thermal_netlink_init(void)
+{
+	int ret = 0;
+
+	ret = genetlink_init();
+	if (!ret)
+		goto exit_netlink;
+
+	thermal_exit();
+exit_netlink:
+	return ret;
+}
+
+subsys_initcall(thermal_init);
+fs_initcall(thermal_netlink_init);
 module_exit(thermal_exit);
diff --git a/include/dt-bindings/clock/qcom,cpucc-sdm845.h b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
index c1ff2a0..db3c940 100644
--- a/include/dt-bindings/clock/qcom,cpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,18 +14,18 @@
 #ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 
-#define L3_CLUSTER0_VOTE_CLK					0
-#define L3_CLUSTER1_VOTE_CLK					1
-#define L3_CLK							2
-#define CPU0_PWRCL_CLK						3
-#define CPU1_PWRCL_CLK						4
-#define CPU2_PWRCL_CLK						5
-#define CPU3_PWRCL_CLK						6
-#define PWRCL_CLK						7
-#define CPU4_PERFCL_CLK						8
-#define CPU5_PERFCL_CLK						9
-#define CPU6_PERFCL_CLK						10
-#define CPU7_PERFCL_CLK						11
-#define PERFCL_CLK						12
+#define L3_CLK							0
+#define PWRCL_CLK						1
+#define PERFCL_CLK						2
+#define L3_CLUSTER0_VOTE_CLK					3
+#define L3_CLUSTER1_VOTE_CLK					4
+#define CPU0_PWRCL_CLK						5
+#define CPU1_PWRCL_CLK						6
+#define CPU2_PWRCL_CLK						7
+#define CPU3_PWRCL_CLK						8
+#define CPU4_PERFCL_CLK						9
+#define CPU5_PERFCL_CLK						10
+#define CPU6_PERFCL_CLK						11
+#define CPU7_PERFCL_CLK						12
 
 #endif
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 541b10e..f5d2f72 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -367,6 +367,7 @@
 enum gsi_xfer_elem_type {
 	GSI_XFER_ELEM_DATA,
 	GSI_XFER_ELEM_IMME_CMD,
+	GSI_XFER_ELEM_NOP,
 };
 
 /**
@@ -409,6 +410,7 @@
  *
  *		    GSI_XFER_ELEM_DATA: for all data transfers
  *		    GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
+ *		    GSI_XFER_ELEM_NOP: for event generation only
  *
  * @xfer_user_data: cookie used in xfer_cb
  *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 45b5f91..867de7d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3885,6 +3885,7 @@
 #define SCHED_CPUFREQ_RT	(1U << 0)
 #define SCHED_CPUFREQ_DL	(1U << 1)
 #define SCHED_CPUFREQ_IOWAIT	(1U << 2)
+#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
 
 #define SCHED_CPUFREQ_RT_DL	(SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
 
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index d7cd961..693fceb 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -7,7 +7,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -60,6 +60,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 539b25a..0ee910d 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -96,6 +96,27 @@
 		__entry->dst_nid,
 		__entry->nr_pages)
 );
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fd379ec..86cb858 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -539,6 +539,7 @@
 #define V4L2_PIX_FMT_NV12_UBWC        v4l2_fourcc('Q', '1', '2', '8')
 /* UBWC 10-bit Y/CbCr 4:2:0 */
 #define V4L2_PIX_FMT_NV12_TP10_UBWC   v4l2_fourcc('Q', '1', '2', 'A')
+#define V4L2_PIX_FMT_NV12_P010_UBWC   v4l2_fourcc('Q', '1', '2', 'B')
 
 /* two non contiguous planes - one Y, one Cr + Cb interleaved  */
 #define V4L2_PIX_FMT_NV12M   v4l2_fourcc('N', 'M', '1', '2') /* 12  Y/CbCr 4:2:0  */
diff --git a/include/uapi/media/msm_sde_rotator.h b/include/uapi/media/msm_sde_rotator.h
index 790135a..212eb26 100644
--- a/include/uapi/media/msm_sde_rotator.h
+++ b/include/uapi/media/msm_sde_rotator.h
@@ -63,6 +63,7 @@
 #define SDE_PIX_FMT_Y_CBCR_H2V2_P010	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010
 #define SDE_PIX_FMT_Y_CBCR_H2V2_TP10	V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_TP10
 #define SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC	V4L2_PIX_FMT_NV12_TP10_UBWC
+#define SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC	V4L2_PIX_FMT_NV12_P010_UBWC
 
 /*
  * struct msm_sde_rotator_fence - v4l2 buffer fence info
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe084ef..f7f5256 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2172,6 +2172,7 @@
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+	cpufreq_update_util(rq, 0);
 	raw_spin_unlock(&rq->lock);
 
 	rcu_read_lock();
@@ -2264,6 +2265,7 @@
 
 		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
 		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 		note_task_waking(p, wallclock);
 	}
@@ -3370,6 +3372,8 @@
 
 	wallclock = sched_ktime_clock();
 	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+
+	cpufreq_update_util(rq, 0);
 	early_notif = early_detection_notify(rq, wallclock);
 
 	raw_spin_unlock(&rq->lock);
@@ -3704,6 +3708,7 @@
 	if (likely(prev != next)) {
 		update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
 		update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		if (!is_idle_task(prev) && !prev->on_rq)
 			update_avg_burst(prev);
 
@@ -3717,6 +3722,7 @@
 		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
 	} else {
 		update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
+		cpufreq_update_util(rq, 0);
 		lockdep_unpin_lock(&rq->lock, cookie);
 		raw_spin_unlock_irq(&rq->lock);
 	}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index c0adf4e..e43344f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -3610,6 +3610,11 @@
 
 	migrate_top_tasks(p, src_rq, dest_rq);
 
+	if (!same_freq_domain(new_cpu, task_cpu(p))) {
+		cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+		cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+	}
+
 	if (p == src_rq->ed_task) {
 		src_rq->ed_task = NULL;
 		if (!dest_rq->ed_task)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e7f6794..5e25011 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -79,6 +79,7 @@
 	u64 time;
 };
 
+extern unsigned int sched_disable_window_stats;
 #endif /* CONFIG_SCHED_HMP */
 
 
@@ -770,6 +771,7 @@
 
 	int cstate, wakeup_latency, wakeup_energy;
 	u64 window_start;
+	u64 load_reported_window;
 	unsigned long hmp_flags;
 
 	u64 cur_irqload;
@@ -2142,6 +2144,18 @@
 {
 	struct update_util_data *data;
 
+#ifdef CONFIG_SCHED_HMP
+	/*
+	 * Skip if we've already reported, but not if this is an inter-cluster
+	 * migration
+	 */
+	if (!sched_disable_window_stats &&
+		(rq->load_reported_window == rq->window_start) &&
+		!(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+		return;
+	rq->load_reported_window = rq->window_start;
+#endif
+
 	data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
 	if (data)
 		data->func(data, rq_clock(rq), flags);
diff --git a/lib/Makefile b/lib/Makefile
index e0eb131..6bde16d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,6 +31,8 @@
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
+KASAN_SANITIZE_find_bit.o := n
+
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
diff --git a/mm/cma.c b/mm/cma.c
index c960459..0306bab 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -134,6 +134,10 @@
 	spin_lock_init(&cma->mem_head_lock);
 #endif
 
+	if (!PageHighMem(pfn_to_page(cma->base_pfn)))
+		kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
+				cma->count << PAGE_SHIFT);
+
 	return 0;
 
 err:
@@ -380,6 +384,8 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
@@ -420,6 +426,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index cbb1e5e..91e1653 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3050,7 +3050,7 @@
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(65536);
+	rounddown_pow_of_two(4096);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
diff --git a/mm/migrate.c b/mm/migrate.c
index 66ce6b4..f49de3cf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1319,6 +1319,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
diff --git a/mm/slub.c b/mm/slub.c
index 2b01429..30be24b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1673,6 +1673,7 @@
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
 	memcg_uncharge_slab(page, order, s);
+	kasan_alloc_pages(page, order);
 	__free_pages(page, order);
 }
 
@@ -3881,6 +3882,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(x);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_pages(page, compound_order(page));
 		return;
 	}
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index cdf372f..e057887 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -2798,6 +2798,9 @@
 	if (!port_ptr || !name)
 		return -EINVAL;
 
+	if (port_ptr->type != CLIENT_PORT)
+		return -EINVAL;
+
 	if (name->addrtype != MSM_IPC_ADDR_NAME)
 		return -EINVAL;
 
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
index a84fc11..02242a1 100644
--- a/net/ipc_router/ipc_router_socket.c
+++ b/net/ipc_router/ipc_router_socket.c
@@ -543,10 +543,18 @@
 static int msm_ipc_router_close(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct msm_ipc_port *port_ptr;
 	int ret;
 
+	if (!sk)
+		return -EINVAL;
+
 	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
 	ret = msm_ipc_router_close_port(port_ptr);
 	msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
 	release_sock(sk);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index f4c68ff..e719c00 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -570,6 +570,9 @@
 static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion,
 				enum snd_jack_types jack_type)
 {
+	struct snd_soc_codec *codec = mbhc->codec;
+	bool is_pa_on = false;
+
 	WCD_MBHC_RSC_ASSERT_LOCKED(mbhc);
 
 	pr_debug("%s: enter insertion %d hph_status %x\n",
@@ -595,14 +598,14 @@
 		if (mbhc->micbias_enable) {
 			if (mbhc->mbhc_cb->mbhc_micbias_control)
 				mbhc->mbhc_cb->mbhc_micbias_control(
-						mbhc->codec, MIC_BIAS_2,
+						codec, MIC_BIAS_2,
 						MICB_DISABLE);
 			if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
 				mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
-						mbhc->codec,
+						codec,
 						MIC_BIAS_2, false);
 			if (mbhc->mbhc_cb->set_micbias_value) {
-				mbhc->mbhc_cb->set_micbias_value(mbhc->codec);
+				mbhc->mbhc_cb->set_micbias_value(codec);
 				WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
 			}
 			mbhc->micbias_enable = false;
@@ -632,15 +635,15 @@
 			if (mbhc->micbias_enable) {
 				if (mbhc->mbhc_cb->mbhc_micbias_control)
 					mbhc->mbhc_cb->mbhc_micbias_control(
-						mbhc->codec, MIC_BIAS_2,
+						codec, MIC_BIAS_2,
 						MICB_DISABLE);
 				if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
 					mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
-						mbhc->codec,
+						codec,
 						MIC_BIAS_2, false);
 				if (mbhc->mbhc_cb->set_micbias_value) {
 					mbhc->mbhc_cb->set_micbias_value(
-							mbhc->codec);
+							codec);
 					WCD_MBHC_REG_UPDATE_BITS(
 							WCD_MBHC_MICB_CTRL, 0);
 				}
@@ -691,9 +694,13 @@
 		} else if (jack_type == SND_JACK_ANC_HEADPHONE)
 			mbhc->current_plug = MBHC_PLUG_TYPE_ANC_HEADPHONE;
 
+		if (mbhc->mbhc_cb->hph_pa_on_status)
+			is_pa_on = mbhc->mbhc_cb->hph_pa_on_status(codec);
+
 		if (mbhc->impedance_detect &&
 			mbhc->mbhc_cb->compute_impedance &&
-			(mbhc->mbhc_cfg->linein_th != 0)) {
+			(mbhc->mbhc_cfg->linein_th != 0) &&
+			(!is_pa_on)) {
 			mbhc->mbhc_cb->compute_impedance(mbhc,
 					&mbhc->zl, &mbhc->zr);
 			if ((mbhc->zl > mbhc->mbhc_cfg->linein_th &&
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 73755ed..54e72e0 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -4952,6 +4952,26 @@
 	},
 };
 
+static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = {
+	{
+		.name = MSM_DAILINK_NAME(ASM Loopback),
+		.stream_name = "MultiMedia6",
+		.cpu_dai_name = "MultiMedia6",
+		.platform_name = "msm-pcm-loopback",
+		.dynamic = 1,
+		.dpcm_playback = 1,
+		.dpcm_capture = 1,
+		.codec_dai_name = "snd-soc-dummy-dai",
+		.codec_name = "snd-soc-dummy",
+		.trigger = {SND_SOC_DPCM_TRIGGER_POST,
+			    SND_SOC_DPCM_TRIGGER_POST},
+		.ignore_suspend = 1,
+		.no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+		.ignore_pmdown_time = 1,
+		.be_id = MSM_FRONTEND_DAI_MULTIMEDIA6,
+	},
+};
+
 static struct snd_soc_dai_link msm_common_be_dai_links[] = {
 	/* Backend AFE DAI Links */
 	{
@@ -5842,6 +5862,7 @@
 static struct snd_soc_dai_link msm_tasha_dai_links[
 			 ARRAY_SIZE(msm_common_dai_links) +
 			 ARRAY_SIZE(msm_tasha_fe_dai_links) +
+			 ARRAY_SIZE(msm_common_misc_fe_dai_links) +
 			 ARRAY_SIZE(msm_common_be_dai_links) +
 			 ARRAY_SIZE(msm_tasha_be_dai_links) +
 			 ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -5852,6 +5873,7 @@
 static struct snd_soc_dai_link msm_tavil_dai_links[
 			 ARRAY_SIZE(msm_common_dai_links) +
 			 ARRAY_SIZE(msm_tavil_fe_dai_links) +
+			 ARRAY_SIZE(msm_common_misc_fe_dai_links) +
 			 ARRAY_SIZE(msm_common_be_dai_links) +
 			 ARRAY_SIZE(msm_tavil_be_dai_links) +
 			 ARRAY_SIZE(msm_wcn_be_dai_links) +
@@ -6185,7 +6207,7 @@
 {
 	struct snd_soc_card *card = NULL;
 	struct snd_soc_dai_link *dailink;
-	int len_1, len_2, len_3;
+	int len_1, len_2, len_3, len_4;
 	int total_links;
 	const struct of_device_id *match;
 
@@ -6200,8 +6222,9 @@
 		card = &snd_soc_card_tasha_msm;
 		len_1 = ARRAY_SIZE(msm_common_dai_links);
 		len_2 = len_1 + ARRAY_SIZE(msm_tasha_fe_dai_links);
-		len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
-		total_links = len_3 + ARRAY_SIZE(msm_tasha_be_dai_links);
+		len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+		len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+		total_links = len_4 + ARRAY_SIZE(msm_tasha_be_dai_links);
 		memcpy(msm_tasha_dai_links,
 		       msm_common_dai_links,
 		       sizeof(msm_common_dai_links));
@@ -6209,9 +6232,12 @@
 		       msm_tasha_fe_dai_links,
 		       sizeof(msm_tasha_fe_dai_links));
 		memcpy(msm_tasha_dai_links + len_2,
+		       msm_common_misc_fe_dai_links,
+		       sizeof(msm_common_misc_fe_dai_links));
+		memcpy(msm_tasha_dai_links + len_3,
 		       msm_common_be_dai_links,
 		       sizeof(msm_common_be_dai_links));
-		memcpy(msm_tasha_dai_links + len_3,
+		memcpy(msm_tasha_dai_links + len_4,
 		       msm_tasha_be_dai_links,
 		       sizeof(msm_tasha_be_dai_links));
 
@@ -6252,8 +6278,9 @@
 		card = &snd_soc_card_tavil_msm;
 		len_1 = ARRAY_SIZE(msm_common_dai_links);
 		len_2 = len_1 + ARRAY_SIZE(msm_tavil_fe_dai_links);
-		len_3 = len_2 + ARRAY_SIZE(msm_common_be_dai_links);
-		total_links = len_3 + ARRAY_SIZE(msm_tavil_be_dai_links);
+		len_3 = len_2 + ARRAY_SIZE(msm_common_misc_fe_dai_links);
+		len_4 = len_3 + ARRAY_SIZE(msm_common_be_dai_links);
+		total_links = len_4 + ARRAY_SIZE(msm_tavil_be_dai_links);
 		memcpy(msm_tavil_dai_links,
 		       msm_common_dai_links,
 		       sizeof(msm_common_dai_links));
@@ -6261,9 +6288,12 @@
 		       msm_tavil_fe_dai_links,
 		       sizeof(msm_tavil_fe_dai_links));
 		memcpy(msm_tavil_dai_links + len_2,
+		       msm_common_misc_fe_dai_links,
+		       sizeof(msm_common_misc_fe_dai_links));
+		memcpy(msm_tavil_dai_links + len_3,
 		       msm_common_be_dai_links,
 		       sizeof(msm_common_be_dai_links));
-		memcpy(msm_tavil_dai_links + len_3,
+		memcpy(msm_tavil_dai_links + len_4,
 		       msm_tavil_be_dai_links,
 		       sizeof(msm_tavil_be_dai_links));
 
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 3d13932..4f91a87 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -9554,6 +9554,7 @@
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia4", "MM_DL4"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia5", "MM_DL5"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia7", "MM_DL7"},
+	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia8", "MM_DL8"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia10", "MM_DL10"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia11", "MM_DL11"},
 	{"SLIMBUS_7_RX Audio Mixer", "MultiMedia12", "MM_DL12"},