Merge "ARM: dts: msm: Add sdm845 cpufreq node" into msm-4.9
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
new file mode 100644
index 0000000..964fea6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -0,0 +1,476 @@
+Qualcomm Technologies, Inc. OSM Bindings
+
+Operating State Manager (OSM) is a hardware engine used by some Qualcomm
+Technologies, Inc. (QTI) SoCs to manage frequency and voltage scaling
+in hardware. OSM is capable of controlling frequency and voltage requests
+for multiple clusters via the existence of multiple OSM domains.
+
+Properties:
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: must be "qcom,clk-cpu-osm".
+
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Addresses and sizes for the memory of the OSM controller,
+		    cluster PLL management, and APCS common register regions.
+		    Optionally, the address of the efuse registers used to
+		    determine the pwrcl or perfcl speed-bins and/or the ACD
+		    register space to initialize prior to enabling OSM.
+
+- reg-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Address names. Must be "osm_l3_base", "osm_pwrcl_base",
+		    "osm_perfcl_base", "l3_pll", "pwrcl_pll", "perfcl_pll",
+		    "l3_sequencer", "pwrcl_sequencer", "perfcl_sequencer" or
+		    "apps_itm_ctl". Optionally, "l3_efuse", "pwrcl_efuse"
+		    "perfcl_efuse".
+		    Must be specified in the same order as the corresponding
+		    addresses are specified in the reg property.
+
+- vdd-l3-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the L3 cluster.
+
+- vdd-pwrcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Power cluster.
+
+- vdd-perfcl-supply
+	Usage:      required
+	Value type: <phandle>
+	Definition: phandle of the underlying regulator device that manages
+		    the voltage supply of the Performance cluster.
+
+- interrupts
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: OSM interrupt specifier.
+
+- interrupt-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Interrupt names. this list must match up 1-to-1 with the
+		    interrupts specified in the 'interrupts' property.
+		    "pwrcl-irq" and "perfcl-irq" must be specified.
+
+- qcom,l3-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the L3 cluster.
+
+- qcom,pwrcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level, and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Power cluster.
+
+- qcom,perfcl-speedbinX-v0
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the frequency in Hertz, frequency,
+		    PLL override data, ACC level and virtual corner used
+		    by the OSM hardware for each supported DCVS setpoint
+		    of the Performance cluster.
+
+- qcom,l3-min-cpr-vc-binX
+	Usage:	    required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the L3 clock domain.
+
+- qcom,pwrcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the power cluster.
+
+- qcom,perfcl-min-cpr-vc-binX
+	Usage:      required
+	Value type: <u32>
+	Definition: First virtual corner which does not use PLL post-divider
+		    for the performance cluster.
+
+- qcom,osm-no-tz
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that there is no programming
+		    of the OSM hardware performed by the secure world.
+
+- qcom,osm-pll-setup
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the PLL setup sequence
+		    must be executed for each clock domain managed by the OSM
+		    controller.
+
+- qcom,up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS up timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the DCVS down timer value in nanoseconds
+		    for each of the three clock domains managed by the OSM
+		    controller.
+
+- qcom,pc-override-index
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the OSM performance index to be used
+		    when each cluster enters certain low power modes.
+
+- qcom,set-ret-inactive
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if domains in retention must
+		    be treated as inactive.
+
+- qcom,enable-llm-freq-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware frequency
+		    votes must be honored by OSM.
+
+- qcom,llm-freq-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-freq-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM frequency down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,enable-llm-volt-vote
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if Limits hardware voltage
+		    votes must be honored by OSM.
+
+- qcom,llm-volt-up-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage up timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,llm-volt-down-timer
+	Usage:      optional
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the LLM voltage down timer value in
+		    nanoseconds for each of the three clock domains managed by
+		    the OSM controller.
+
+- qcom,cc-reads
+	Usage:      optional
+	Value type: <integer>
+	Definition: Defines the number of times the cycle counters must be
+		    read to determine the performance level of each clock
+		    domain.
+
+- qcom,l-val-base
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the L_VAL
+		    control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,apcs-pll-user-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the PLL
+		    user control register for each of the three clock domains
+		    managed by the OSM controller.
+
+- qcom,perfcl-apcs-apm-threshold-voltage
+	Usage:      required
+	Value type: <u32>
+	Definition: Specifies the APM threshold voltage in microvolts.  If the
+		    VDD_APCC supply voltage is above or at this level, then the
+		    APM is switched to use VDD_APCC.  If VDD_APCC is below
+		    this level, then the APM is switched to use VDD_MX.
+
+- qcom,apm-mode-ctl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    control register for each of the two clusters managed
+		    by the OSM controller.
+
+- qcom,apm-status-ctrl
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the register addresses of the APM
+		    controller status register for each of the three clock
+		    domains managed by the OSM controller.
+
+- qcom,perfcl-isense-addr
+	Usage:      required
+	Value type: <u32>
+	Definition: Contains the ISENSE register address.
+
+- qcom,l3-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the L3 cluster.
+		    The array must contain exactly three elements.
+
+- qcom,pwrcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Power cluster.
+		    The array must contain exactly three elements.
+
+- qcom,perfcl-mem-acc-addr
+	Usage:      required if qcom,osm-no-tz is specified
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the mem-acc
+		    configuration registers for the Performance cluster.
+		    The array must contain exactly three elements.
+
+		    corresponding CPRh device.
+
+- qcom,perfcl-apcs-mem-acc-threshold-voltage
+	Usage:      optional
+	Value type: <u32>
+	Definition: Specifies the highest MEM ACC threshold voltage in
+		    microvolts for the Performance cluster.  This voltage is
+		    used to determine which MEM ACC setting is used for the
+		    highest frequencies.  If specified, the voltage must match
+		    the MEM ACC threshold voltage specified for the
+		    corresponding CPRh device.
+
+- qcom,apcs-cbc-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_CBC_ADDR
+		    registers for all three clock domains.
+
+- qcom,apcs-ramp-ctl-addr
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: Array which defines the addresses of the APCS_RAMP_CTL_ADDR
+		    registers for all three clock domains.
+
+- qcom,red-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the reduction FSM
+		    should be enabled.
+
+- qcom,boost-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the boost FSM should
+		    be enabled.
+
+- qcom,safe-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the safe FSM should
+		    be enabled.
+
+- qcom,ps-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the PS FSM should be
+		    enabled.
+
+- qcom,droop-fsm-en
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the droop FSM should
+		    be enabled.
+
+- qcom,set-c3-active
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C3 are to
+		    be treated as active for core count calculations.
+
+- qcom,set-c2-active
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if the cores in C2 are to
+		    be treated as active for core count calculations.
+
+- qcom,disable-cc-dvcs
+	Usage:	    optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates if core count based DCVS is
+		    to be disabled.
+
+- qcom,apcs-pll-min-freq
+	Usage:	    required
+	Value type: <u32>
+	Definition: Contains the addresses of the RAILx_CLKDOMy_PLL_MIN_FREQ
+		    registers for the three clock domains.
+
+- clock-names
+	Usage:      required
+	Value type: <string>
+	Definition: Must be "aux_clk".
+
+- clocks
+	Usage:      required
+	Value type: <phandle>
+	Definition: Phandle to the aux clock device.
+
+Example:
+	clock_cpucc: qcom,cpucc@0x17d41000 {
+		compatible = "qcom,clk-cpu-osm";
+		reg = <0x17d41000 0x1400>,
+			<0x17d43000 0x1400>,
+			<0x17d45800 0x1400>,
+			<0x178d0000 0x1000>,
+			<0x178c0000 0x1000>,
+			<0x178b0000 0x1000>,
+			<0x17d42400 0x0c00>,
+			<0x17d44400 0x0c00>,
+			<0x17d46c00 0x0c00>,
+			<0x17810090 0x8>;
+		reg-names = "osm_l3_base", "osm_pwrcl_base", "osm_perfcl_base",
+			"l3_pll", "pwrcl_pll", "perfcl_pll",
+			"l3_sequencer", "pwrcl_sequencer",
+			"perfcl_sequencer", "apps_itm_ctl";
+
+		vdd-l3-supply = <&apc0_l3_vreg>;
+		vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+		vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
+		qcom,l3-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x2 9 >;
+
+		qcom,pwrcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   825600000 0x401c062b 0x00002222 0x1 7 >,
+			<   902400000 0x4024072f 0x00002626 0x1 8 >,
+			<   979200000 0x40240833 0x00002929 0x1 9 >,
+			<  1056000000 0x402c0937 0x00002c2c 0x1 10 >,
+			<  1132800000 0x402c0a3b 0x00002f2f 0x1 11 >,
+			<  1209600000 0x402c0b3f 0x00003333 0x1 12 >,
+			<  1286400000 0x40340c43 0x00003636 0x1 13 >,
+			<  1363200000 0x40340d47 0x00003939 0x1 14 >,
+			<  1440000000 0x403c0e4b 0x00003c3c 0x1 15 >,
+			<  1516800000 0x403c0f4f 0x00004040 0x2 16 >,
+			<  1593600000 0x403c1053 0x00004343 0x2 17 >;
+
+		qcom,perfcl-speedbin0-v0 =
+			<   300000000 0x000c000f 0x00002020 0x1 1 >,
+			<   422400000 0x50140116 0x00002020 0x1 2 >,
+			<   499200000 0x5014021a 0x00002020 0x1 3 >,
+			<   576000000 0x5014031e 0x00002020 0x1 4 >,
+			<   652800000 0x501c0422 0x00002020 0x1 5 >,
+			<   729600000 0x501c0526 0x00002020 0x1 6 >,
+			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   883200000 0x4024072b 0x00002525 0x1 8 >,
+			<   960000000 0x40240832 0x00002828 0x1 9 >,
+			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
+			<  1113600000 0x402c0a3a 0x00002e2e 0x1 11 >,
+			<  1190400000 0x402c0b3e 0x00003232 0x1 12 >,
+			<  1267200000 0x40340c42 0x00003535 0x1 13 >,
+			<  1344000000 0x40340d46 0x00003838 0x1 14 >,
+			<  1420800000 0x40340e4a 0x00003b3b 0x1 15 >,
+			<  1497600000 0x403c0f4e 0x00003e3e 0x1 16 >,
+			<  1574400000 0x403c1052 0x00004242 0x2 17 >,
+			<  1651200000 0x403c1156 0x00004545 0x2 18 >,
+			<  1728000000 0x4044125a 0x00004848 0x2 19 >,
+			<  1804800000 0x4044135e 0x00004b4b 0x2 20 >,
+			<  1881600000 0x404c1462 0x00004e4e 0x2 21 >,
+			<  1958400000 0x404c1566 0x00005252 0x3 22 >;
+
+		qcom,l3-min-cpr-vc-bin0 = <7>;
+		qcom,pwrcl-min-cpr-vc-bin0 = <6>;
+		qcom,perfcl-min-cpr-vc-bin0 = <7>;
+
+		qcom,up-timer =
+			<1000 1000 1000>;
+		qcom,down-timer =
+			<100000 100000 100000>;
+		qcom,pc-override-index =
+			<0 0 0>;
+		qcom,set-ret-inactive;
+		qcom,enable-llm-freq-vote;
+		qcom,llm-freq-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-freq-down-timer =
+			<327675 327675 327675>;
+		qcom,enable-llm-volt-vote;
+		qcom,llm-volt-up-timer =
+			<1000 1000 1000>;
+		qcom,llm-volt-down-timer =
+			<327675 327675 327675>;
+		qcom,cc-reads = <10>;
+		qcom,cc-delay = <5>;
+		qcom,cc-factor = <100>;
+		qcom,osm-clk-rate = <100000000>;
+		qcom,xo-clk-rate = <19200000>;
+
+		qcom,l-val-base =
+			<0x178d0004 0x178c0004 0x178b0004>;
+		qcom,apcs-pll-user-ctl =
+			<0x178d000c 0x178c000c 0x178b000c>;
+		qcom,apcs-pll-min-freq =
+			<0x17d41094 0x17d43094 0x17d45894>;
+		qcom,apm-mode-ctl =
+			<0x0 0x0 0x17d20010>;
+		qcom,apm-status-ctrl =
+			<0x0 0x0 0x17d20000>;
+		qcom,perfcl-isense-addr = <0x17871480>;
+		qcom,l3-mem-acc-addr = <0x17990170 0x17990170 0x17990170>;
+		qcom,pwrcl-mem-acc-addr = <0x17990160 0x17990164 0x17990164>;
+		qcom,perfcl-mem-acc-addr = <0x17990168 0x1799016c 0x1799016c>;
+		qcom,cfg-gfmux-addr =<0x178d0084 0x178c0084 0x178b0084>;
+		qcom,apcs-cbc-addr = <0x178d008c 0x178c008c 0x178b008c>;
+		qcom,apcs-ramp-ctl-addr = <0x17840904 0x17840904 0x17830904>;
+
+		qcom,perfcl-apcs-apm-threshold-voltage = <800000>;
+		qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
+		qcom,boost-fsm-en;
+		qcom,safe-fsm-en;
+		qcom,ps-fsm-en;
+		qcom,droop-fsm-en;
+		qcom,osm-no-tz;
+		qcom,osm-pll-setup;
+
+		clock-names = "xo_ao";
+		clocks = <&clock_rpmh RPMH_CXO_CLK_A>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index a1db0fc..2e72456 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -17,6 +17,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 944ae1a..688e130 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -23,6 +23,7 @@
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SCHED_HMP=y
 CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
 CONFIG_NAMESPACES=y
 # CONFIG_UTS_NS is not set
 # CONFIG_PID_NS is not set
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index d327826..31ea544 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -204,3 +204,15 @@
 	  SoCs. It accepts requests from other hardware subsystems via RSC.
 	  Say Y to support the clocks managed by RPMh VRM/ARC on platforms
 	  such as sdm845.
+
+config CLOCK_CPU_OSM
+	tristate "OSM CPU Clock Controller"
+	depends on COMMON_CLK_QCOM
+	help
+	 Support for the OSM clock controller.
+	 Operating State Manager (OSM) is a hardware engine used by some
+	 Qualcomm Technologies, Inc. (QTI) SoCs to manage frequency and
+	 voltage scaling in hardware. OSM is capable of controlling
+	 frequency and voltage requests for multiple clusters via the
+	 existence of multiple OSM domains.
+	 Say Y if you want to support OSM clocks.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ebcf4fc..d52a751 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -16,23 +16,24 @@
 # Keep alphabetically sorted by config
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_CLOCK_CPU_OSM) += clk-cpu-osm.o
+obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
 obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
 obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
 obj-$(CONFIG_MSM_CAMCC_SDM845) += camcc-sdm845.o
+obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
 obj-$(CONFIG_MSM_GCC_SDM845) += gcc-sdm845.o
-obj-$(CONFIG_MSM_DISPCC_SDM845) += dispcc-sdm845.o
-obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
 obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
-obj-$(CONFIG_CLOCK_QPNP_DIV) += clk-qpnp-div.o
-obj-$(CONFIG_MSM_CLK_RPMH) += clk-rpmh.o
+obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
new file mode 100644
index 0000000..d5e2be6
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -0,0 +1,2619 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/driver.h>
+#include <linux/regmap.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <soc/qcom/scm.h>
+#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+#include "clk-voter.h"
+
+#define OSM_TABLE_SIZE			40
+#define SINGLE_CORE			1
+#define MAX_CLUSTER_CNT			3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_CORE_COUNT			4
+#define CORE_COUNT_VAL(val)		((val & GENMASK(18, 16)) >> 16)
+
+#define OSM_CYCLE_COUNTER_CTRL_REG		0x760
+#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN	BIT(8)
+
+#define OSM_REG_SIZE			32
+
+#define L3_EFUSE_SHIFT			0
+#define L3_EFUSE_MASK			0
+#define PWRCL_EFUSE_SHIFT		0
+#define PWRCL_EFUSE_MASK		0
+#define PERFCL_EFUSE_SHIFT		29
+#define PERFCL_EFUSE_MASK		0x7
+
+#define ENABLE_REG			0x0
+#define FREQ_REG			0x110
+#define VOLT_REG			0x114
+#define OVERRIDE_REG			0x118
+#define SPM_CC_INC_HYSTERESIS		0x1c
+#define SPM_CC_DEC_HYSTERESIS		0x20
+#define SPM_CORE_INACTIVE_MAPPING	0x28
+#define CC_ZERO_BEHAV_CTRL		0xc
+#define ENABLE_OVERRIDE			BIT(0)
+#define SPM_CC_DCVS_DISABLE		0x24
+#define LLM_FREQ_VOTE_INC_HYSTERESIS	0x30
+#define LLM_FREQ_VOTE_DEC_HYSTERESIS	0x34
+#define LLM_INTF_DCVS_DISABLE		0x40
+#define LLM_VOLTAGE_VOTE_INC_HYSTERESIS	0x38
+#define LLM_VOLTAGE_VOTE_DEC_HYSTERESIS	0x3c
+#define VMIN_REDUCTION_ENABLE_REG	0x48
+#define VMIN_REDUCTION_TIMER_REG	0x4c
+#define PDN_FSM_CTRL_REG		0x54
+#define DELTA_DEX_VAL			BVAL(31, 23, 0xa)
+#define IGNORE_PLL_LOCK			BIT(15)
+#define CC_BOOST_FSM_EN			BIT(0)
+#define CC_BOOST_FSM_TIMERS_REG0	0x58
+#define CC_BOOST_FSM_TIMERS_REG1	0x5c
+#define CC_BOOST_FSM_TIMERS_REG2	0x60
+#define DCVS_BOOST_FSM_EN_MASK		BIT(2)
+#define DCVS_BOOST_FSM_TIMERS_REG0	0x64
+#define DCVS_BOOST_FSM_TIMERS_REG1	0x68
+#define DCVS_BOOST_FSM_TIMERS_REG2	0x6c
+#define PS_BOOST_FSM_EN_MASK		BIT(1)
+#define PS_BOOST_FSM_TIMERS_REG0	0x74
+#define PS_BOOST_FSM_TIMERS_REG1	0x78
+#define PS_BOOST_FSM_TIMERS_REG2	0x7c
+#define BOOST_PROG_SYNC_DELAY_REG	0x80
+#define DCVS_DROOP_FSM_EN_MASK		BIT(5)
+#define DROOP_PROG_SYNC_DELAY_REG	0x9c
+#define DROOP_RELEASE_TIMER_CTRL	0x88
+#define DROOP_CTRL_REG			0x84
+#define DCVS_DROOP_TIMER_CTRL		0x98
+#define PLL_SW_OVERRIDE_ENABLE		0xa0
+#define PLL_SW_OVERRIDE_DROOP_EN	BIT(0)
+#define SPM_CORE_COUNT_CTRL		0x2c
+#define CORE_DCVS_CTRL			0xbc
+#define OVERRIDE_CLUSTER_IDLE_ACK	0x800
+#define REQ_GEN_FSM_STATUS		0x70c
+
+#define PLL_MIN_LVAL			0x21
+#define PLL_MIN_FREQ_REG		0x94
+#define PLL_POST_DIV1			0x1F
+#define PLL_POST_DIV2			0x11F
+#define PLL_MODE			0x0
+#define PLL_L_VAL			0x4
+#define PLL_USER_CTRL			0xc
+#define PLL_CONFIG_CTL_LO		0x10
+#define PLL_CONFIG_CTL_HI		0x14
+#define MIN_VCO_VAL			0x2b
+
+#define MAX_VC				63
+#define MAX_MEM_ACC_LEVELS		3
+#define MAX_MEM_ACC_VAL_PER_LEVEL	3
+#define MAX_MEM_ACC_VALUES		(MAX_MEM_ACC_LEVELS * \
+					MAX_MEM_ACC_VAL_PER_LEVEL)
+#define MEM_ACC_ADDRS			3
+
+#define ISENSE_ON_DATA			0xf
+#define ISENSE_OFF_DATA			0x0
+#define CONSTANT_32			0x20
+
+#define APM_MX_MODE			0x0
+#define APM_APC_MODE			0x2
+#define APM_READ_DATA_MASK		0xc
+#define APM_MX_MODE_VAL			0x4
+#define APM_APC_READ_VAL		0x8
+#define APM_MX_READ_VAL			0x4
+#define APM_CROSSOVER_VC		0xb0
+
+#define MEM_ACC_SEQ_CONST(n)		(n)
+#define MEM_ACC_APM_READ_MASK		0xff
+#define MEMACC_CROSSOVER_VC		0xb8
+
+#define PLL_WAIT_LOCK_TIME_US		10
+#define PLL_WAIT_LOCK_TIME_NS		(PLL_WAIT_LOCK_TIME_US * 1000)
+#define SAFE_FREQ_WAIT_NS		5000
+#define DEXT_DECREMENT_WAIT_NS		1000
+
+#define DATA_MEM(n)			(0x400 + (n) * 4)
+
+#define DCVS_PERF_STATE_DESIRED_REG_0	0x780
+#define DCVS_PERF_STATE_DESIRED_REG(n) (DCVS_PERF_STATE_DESIRED_REG_0 + \
+					(4 * n))
+#define OSM_CYCLE_COUNTER_STATUS_REG_0	0x7d0
+#define OSM_CYCLE_COUNTER_STATUS_REG(n)	(OSM_CYCLE_COUNTER_STATUS_REG_0 + \
+					(4 * n))
+
+static const struct regmap_config osm_qcom_regmap_config = {
+	.reg_bits       = 32,
+	.reg_stride     = 4,
+	.val_bits       = 32,
+	.fast_io	= true,
+};
+
+enum clk_osm_bases {
+	OSM_BASE,
+	PLL_BASE,
+	EFUSE_BASE,
+	SEQ_BASE,
+	NUM_BASES,
+};
+
+enum clk_osm_lut_data {
+	FREQ,
+	FREQ_DATA,
+	PLL_OVERRIDES,
+	MEM_ACC_LEVEL,
+	VIRTUAL_CORNER,
+	NUM_FIELDS,
+};
+
+struct osm_entry {
+	u16 virtual_corner;
+	u16 open_loop_volt;
+	u32 freq_data;
+	u32 override_data;
+	u32 mem_acc_level;
+	long frequency;
+};
+
+struct clk_osm {
+	struct clk_hw hw;
+	struct osm_entry osm_table[OSM_TABLE_SIZE];
+	struct dentry *debugfs;
+	struct regulator *vdd_reg;
+	struct platform_device *vdd_dev;
+	void *vbases[NUM_BASES];
+	unsigned long pbases[NUM_BASES];
+	spinlock_t lock;
+
+	u32 cpu_reg_mask;
+	u32 num_entries;
+	u32 cluster_num;
+	u32 core_num;
+	u32 apm_crossover_vc;
+	u32 apm_threshold_vc;
+	u32 mem_acc_crossover_vc;
+	u32 mem_acc_threshold_vc;
+	u32 min_cpr_vc;
+	u32 cycle_counter_reads;
+	u32 cycle_counter_delay;
+	u32 cycle_counter_factor;
+	u64 total_cycle_counter;
+	u32 prev_cycle_counter;
+	u32 l_val_base;
+	u32 apcs_pll_user_ctl;
+	u32 apcs_pll_min_freq;
+	u32 cfg_gfmux_addr;
+	u32 apcs_cbc_addr;
+	u32 speedbin;
+	u32 mem_acc_crossover_vc_addr;
+	u32 mem_acc_addr[MEM_ACC_ADDRS];
+	u32 ramp_ctl_addr;
+	u32 apm_mode_ctl;
+	u32 apm_status_ctl;
+	u32 osm_clk_rate;
+	u32 xo_clk_rate;
+	bool secure_init;
+	bool red_fsm_en;
+	bool boost_fsm_en;
+	bool safe_fsm_en;
+	bool ps_fsm_en;
+	bool droop_fsm_en;
+
+	struct notifier_block panic_notifier;
+	u32 trace_periodic_timer;
+	bool trace_en;
+	bool wdog_trace_en;
+};
+
+static struct regulator *vdd_l3;
+static struct regulator *vdd_pwrcl;
+static struct regulator *vdd_perfcl;
+
+static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
+{
+	return container_of(_hw, struct clk_osm, hw);
+}
+
+static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
+					    u32 offset, u32 mask)
+{
+	u32 val2, orig_val;
+
+	val2 = orig_val = readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+	val2 &= ~mask;
+	val2 |= val & mask;
+
+	if (val2 != orig_val)
+		writel_relaxed(val2, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline void clk_osm_write_seq_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[SEQ_BASE] + offset);
+}
+
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+	writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
+{
+	return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_mb(struct clk_osm *c, int base)
+{
+	return readl_relaxed_no_log((char *)c->vbases[base] + ENABLE_REG);
+}
+
+static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n,
+					unsigned long rate_max)
+{
+	if (n >= hw->init->num_rate_max)
+		return -ENXIO;
+	return hw->init->rate_max[n];
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+			unsigned long new)
+{
+	if (IS_ERR_VALUE(new))
+		return false;
+
+	return (req <= new && new < best) || (best < req && best < new);
+}
+
+static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	int i;
+	unsigned long rrate = 0;
+
+	/*
+	 * If the rate passed in is 0, return the first frequency in the
+	 * FMAX table.
+	 */
+	if (!rate)
+		return hw->init->rate_max[0];
+
+	for (i = 0; i < hw->init->num_rate_max; i++) {
+		if (is_better_rate(rate, rrate, hw->init->rate_max[i])) {
+			rrate = hw->init->rate_max[i];
+			if (rate == rrate)
+				break;
+		}
+	}
+
+	pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate,
+						rrate, hw->init->rate_max[i]);
+
+	return rrate;
+}
+
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+	int quad_core_index, single_core_index = 0;
+	int core_count;
+
+	for (quad_core_index = 0; quad_core_index < entries;
+						quad_core_index++) {
+		core_count = CORE_COUNT_VAL(table[quad_core_index].freq_data);
+		if (rate == table[quad_core_index].frequency &&
+					core_count == SINGLE_CORE) {
+			single_core_index = quad_core_index;
+			continue;
+		}
+		if (rate == table[quad_core_index].frequency &&
+					core_count == MAX_CORE_COUNT)
+			return quad_core_index;
+	}
+	if (single_core_index)
+		return single_core_index;
+
+	return -EINVAL;
+}
+
+static int clk_osm_enable(struct clk_hw *hw)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+
+	clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	/* Wait for 5us for OSM hardware to enable */
+	udelay(5);
+
+	pr_debug("OSM clk enabled for cluster=%d\n", cpuclk->cluster_num);
+
+	return 0;
+}
+
+const struct clk_ops clk_ops_cpu_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+};
+
+static struct clk_ops clk_ops_core;
+
+static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(p_hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(parent->osm_table,
+				     parent->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+	/*
+	 * Choose index and send request to OSM hardware.
+	 * TODO: Program INACTIVE_OS_REQUEST if needed.
+	 */
+	clk_osm_write_reg(parent, index,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(parent, OSM_BASE);
+
+	return 0;
+}
+
+static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+	unsigned long r_rate;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	r_rate = clk_osm_round_rate(hw, rate, NULL);
+
+	if (rate != r_rate) {
+		pr_err("invalid requested rate=%ld\n", rate);
+		return -EINVAL;
+	}
+
+	/* Convert rate to table index */
+	index = clk_osm_search_table(cpuclk->osm_table,
+				     cpuclk->num_entries, r_rate);
+	if (index < 0) {
+		pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
+		return -EINVAL;
+	}
+	pr_debug("rate: %lu --> index %d\n", rate, index);
+
+	clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	/* Make sure the write goes through before proceeding */
+	clk_osm_mb(cpuclk, OSM_BASE);
+
+	return 0;
+}
+
+static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	struct clk_hw *p_hw = clk_hw_get_parent(hw);
+	struct clk_osm *parent = to_clk_osm(p_hw);
+	int index = 0;
+
+	if (!cpuclk || !parent)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(parent,
+			DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num));
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				parent->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return parent->osm_table[index].frequency;
+}
+
+static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_osm *cpuclk = to_clk_osm(hw);
+	int index = 0;
+
+	if (!cpuclk)
+		return -EINVAL;
+
+	index = clk_osm_read_reg(cpuclk, DCVS_PERF_STATE_DESIRED_REG_0);
+
+	pr_debug("%s: Index %d, freq %ld\n", __func__, index,
+				cpuclk->osm_table[index].frequency);
+
+	/* Convert index to frequency */
+	return cpuclk->osm_table[index].frequency;
+}
+
+
+const struct clk_ops clk_ops_l3_osm = {
+	.enable = clk_osm_enable,
+	.round_rate = clk_osm_round_rate,
+	.list_rate = clk_osm_list_rate,
+	.recalc_rate = l3_clk_recalc_rate,
+	.set_rate = l3_clk_set_rate,
+};
+
+enum {
+	P_XO,
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+	{ P_XO, 0 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+	"xo",
+};
+
+static struct clk_init_data osm_clks_init[] = {
+	[0] = {
+		.name = "l3_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_l3_osm,
+	},
+	[1] = {
+		.name = "pwrcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+	[2] = {
+		.name = "perfcl_clk",
+		.parent_names = (const char *[]){ "bi_tcxo" },
+		.num_parents = 1,
+		.ops = &clk_ops_cpu_osm,
+	},
+};
+
+static struct clk_osm l3_clk = {
+	.cluster_num = 0,
+	.cpu_reg_mask = 0x0,
+	.hw.init = &osm_clks_init[0],
+};
+
+static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0);
+static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0);
+
+static struct clk_osm pwrcl_clk = {
+	.cluster_num = 1,
+	.cpu_reg_mask = 0x300,
+	.hw.init = &osm_clks_init[1],
+};
+
+static struct clk_osm cpu0_pwrcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu0_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu1_pwrcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu1_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu2_pwrcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu2_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu3_pwrcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu3_pwrcl_clk",
+		.parent_names = (const char *[]){ "pwrcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm perfcl_clk = {
+	.cluster_num = 2,
+	.cpu_reg_mask = 0x700,
+	.hw.init = &osm_clks_init[2],
+};
+
+
+static struct clk_osm cpu4_perfcl_clk = {
+	.core_num = 0,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu4_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu5_perfcl_clk = {
+	.core_num = 1,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu5_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu6_perfcl_clk = {
+	.core_num = 2,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu6_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+static struct clk_osm cpu7_perfcl_clk = {
+	.core_num = 3,
+	.total_cycle_counter = 0,
+	.prev_cycle_counter = 0,
+	.hw.init = &(struct clk_init_data){
+		.name = "cpu7_perfcl_clk",
+		.parent_names = (const char *[]){ "perfcl_clk" },
+		.num_parents = 1,
+		.flags = CLK_SET_RATE_PARENT,
+		.ops = &clk_ops_core,
+	},
+};
+
+/*
+ * Use the cpu* clocks only for writing to the PERF_STATE_DESIRED registers.
+ * Note that we are currently NOT programming the APSS_LMH_GFMUX_CFG &
+ * APSS_OSM_GFMUX_CFG registers.
+ */
+
+static struct clk_hw *osm_qcom_clk_hws[] = {
+	[L3_CLK] = &l3_clk.hw,
+	[L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw,
+	[L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw,
+	[PWRCL_CLK] = &pwrcl_clk.hw,
+	[CPU0_PWRCL_CLK] = &cpu0_pwrcl_clk.hw,
+	[CPU1_PWRCL_CLK] = &cpu1_pwrcl_clk.hw,
+	[CPU2_PWRCL_CLK] = &cpu2_pwrcl_clk.hw,
+	[CPU3_PWRCL_CLK] = &cpu3_pwrcl_clk.hw,
+	[PERFCL_CLK] = &perfcl_clk.hw,
+	[CPU4_PERFCL_CLK] = &cpu4_perfcl_clk.hw,
+	[CPU5_PERFCL_CLK] = &cpu5_perfcl_clk.hw,
+	[CPU6_PERFCL_CLK] = &cpu6_perfcl_clk.hw,
+	[CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw,
+};
+
+static struct clk_osm *logical_cpu_to_clk(int cpu)
+{
+	struct device_node *cpu_node;
+	const u32 *cell;
+	u64 hwid;
+	static struct clk_osm *cpu_clk_map[NR_CPUS];
+
+	if (cpu_clk_map[cpu])
+		return cpu_clk_map[cpu];
+
+	cpu_node = of_get_cpu_node(cpu, NULL);
+	if (!cpu_node)
+		goto fail;
+
+	cell = of_get_property(cpu_node, "reg", NULL);
+	if (!cell) {
+		pr_err("%s: missing reg property\n", cpu_node->full_name);
+		goto fail;
+	}
+
+	hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+	if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 0:
+			cpu_clk_map[cpu] = &cpu0_pwrcl_clk;
+			break;
+		case 1:
+			cpu_clk_map[cpu] = &cpu1_pwrcl_clk;
+			break;
+		case 2:
+			cpu_clk_map[cpu] = &cpu2_pwrcl_clk;
+			break;
+		case 3:
+			cpu_clk_map[cpu] = &cpu3_pwrcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for power cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+	if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+		switch (cpu) {
+		case 4:
+			cpu_clk_map[cpu] = &cpu4_perfcl_clk;
+			break;
+		case 5:
+			cpu_clk_map[cpu] = &cpu5_perfcl_clk;
+			break;
+		case 6:
+			cpu_clk_map[cpu] = &cpu6_perfcl_clk;
+			break;
+		case 7:
+			cpu_clk_map[cpu] = &cpu7_perfcl_clk;
+			break;
+		default:
+			pr_err("unsupported CPU number for perf cluster\n");
+			return NULL;
+		}
+		return cpu_clk_map[cpu];
+	}
+
+fail:
+	return NULL;
+}
+
+static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
+{
+	u64 temp;
+
+	temp = (u64)c->osm_clk_rate * nsec;
+	do_div(temp, 1000000000);
+
+	return temp;
+}
+
+static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
+{
+	int curr_level, i, j = 0;
+	int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {MAX_VC, MAX_VC, MAX_VC};
+
+	curr_level = c->osm_table[0].mem_acc_level;
+	for (i = 0; i < c->num_entries; i++) {
+		if (curr_level == MAX_MEM_ACC_LEVELS)
+			break;
+
+		if (c->osm_table[i].mem_acc_level != curr_level) {
+			mem_acc_level_map[j++] =
+				c->osm_table[i].virtual_corner;
+			curr_level = c->osm_table[i].mem_acc_level;
+		}
+	}
+
+	if (c->secure_init) {
+		clk_osm_write_seq_reg(c,
+				c->pbases[OSM_BASE] + MEMACC_CROSSOVER_VC,
+				DATA_MEM(57));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[0], DATA_MEM(48));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[1], DATA_MEM(49));
+		clk_osm_write_seq_reg(c, c->mem_acc_addr[2], DATA_MEM(50));
+		clk_osm_write_seq_reg(c, c->mem_acc_crossover_vc,
+							DATA_MEM(78));
+		clk_osm_write_seq_reg(c, mem_acc_level_map[0], DATA_MEM(79));
+		if (c == &perfcl_clk)
+			clk_osm_write_seq_reg(c, c->mem_acc_threshold_vc,
+								DATA_MEM(80));
+		else
+			clk_osm_write_seq_reg(c, mem_acc_level_map[1],
+								DATA_MEM(80));
+		/*
+		 * Note that DATA_MEM[81] -> DATA_MEM[89] values will be
+		 * confirmed post-si. Use a value of 1 for DATA_MEM[89] and
+		 * leave the rest of them as 0.
+		 */
+		clk_osm_write_seq_reg(c, 1, DATA_MEM(89));
+	} else {
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(78),
+						c->mem_acc_crossover_vc);
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(79),
+						mem_acc_level_map[0]);
+		if (c == &perfcl_clk)
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						c->mem_acc_threshold_vc);
+		else
+			scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(80),
+						mem_acc_level_map[1]);
+	}
+}
+
+static void clk_osm_program_apm_regs(struct clk_osm *c)
+{
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return;
+
+	/*
+	 * Program address of the control register used to configure
+	 * the Array Power Mux controller
+	 */
+	clk_osm_write_seq_reg(c, c->apm_mode_ctl, DATA_MEM(41));
+
+	/* Program address of controller status register */
+	clk_osm_write_seq_reg(c, c->apm_status_ctl, DATA_MEM(43));
+
+	/* Program address of crossover register */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + APM_CROSSOVER_VC,
+						DATA_MEM(44));
+
+	/* Program mode value to switch APM to VDD_APC */
+	clk_osm_write_seq_reg(c, APM_APC_MODE, DATA_MEM(72));
+
+	/* Program mode value to switch APM to VDD_MX */
+	clk_osm_write_seq_reg(c, APM_MX_MODE, DATA_MEM(73));
+
+	/* Program mask used to move into read_mask port */
+	clk_osm_write_seq_reg(c, APM_READ_DATA_MASK, DATA_MEM(74));
+
+	/* Value used to move into read_exp port */
+	clk_osm_write_seq_reg(c, APM_APC_READ_VAL, DATA_MEM(75));
+	clk_osm_write_seq_reg(c, APM_MX_READ_VAL, DATA_MEM(76));
+}
+
+static void clk_osm_do_additional_setup(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	if (!c->secure_init)
+		return;
+
+	dev_info(&pdev->dev, "Performing additional OSM setup due to lack of TZ for cluster=%d\n",
+						 c->cluster_num);
+
+	/* PLL L_VAL & post-div programming */
+	clk_osm_write_seq_reg(c, c->apcs_pll_min_freq, DATA_MEM(32));
+	clk_osm_write_seq_reg(c, c->l_val_base, DATA_MEM(33));
+	clk_osm_write_seq_reg(c, c->apcs_pll_user_ctl, DATA_MEM(34));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV1, DATA_MEM(35));
+	clk_osm_write_seq_reg(c, PLL_POST_DIV2, DATA_MEM(36));
+
+	/* APM Programming */
+	clk_osm_program_apm_regs(c);
+
+	/* GFMUX Programming */
+	clk_osm_write_seq_reg(c, c->cfg_gfmux_addr, DATA_MEM(37));
+	clk_osm_write_seq_reg(c, 0x1, DATA_MEM(65));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(66));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(67));
+	clk_osm_write_seq_reg(c, 0x40000000, DATA_MEM(68));
+	clk_osm_write_seq_reg(c, 0x20000000, DATA_MEM(69));
+	clk_osm_write_seq_reg(c, 0x10000000, DATA_MEM(70));
+	clk_osm_write_seq_reg(c, 0x70000000, DATA_MEM(71));
+
+	/* Override programming */
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] +
+			OVERRIDE_CLUSTER_IDLE_ACK, DATA_MEM(54));
+	clk_osm_write_seq_reg(c, 0x3, DATA_MEM(55));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
+					DATA_MEM(40));
+	clk_osm_write_seq_reg(c, c->pbases[OSM_BASE] + REQ_GEN_FSM_STATUS,
+					DATA_MEM(60));
+	clk_osm_write_seq_reg(c, 0x10, DATA_MEM(61));
+	clk_osm_write_seq_reg(c, 0x70, DATA_MEM(62));
+	clk_osm_write_seq_reg(c, c->apcs_cbc_addr, DATA_MEM(112));
+	clk_osm_write_seq_reg(c, 0x2, DATA_MEM(113));
+
+	if (c == &perfcl_clk) {
+		int rc;
+		u32 isense_addr;
+
+		/* Performance cluster isense programming */
+		rc = of_property_read_u32(pdev->dev.of_node,
+				"qcom,perfcl-isense-addr", &isense_addr);
+		if (rc) {
+			dev_err(&pdev->dev, "unable to find qcom,perfcl-isense-addr property, rc=%d\n",
+				rc);
+			return;
+		}
+		clk_osm_write_seq_reg(c, isense_addr, DATA_MEM(45));
+		clk_osm_write_seq_reg(c, ISENSE_ON_DATA, DATA_MEM(46));
+		clk_osm_write_seq_reg(c, ISENSE_OFF_DATA, DATA_MEM(47));
+	}
+
+	clk_osm_write_seq_reg(c, c->ramp_ctl_addr, DATA_MEM(105));
+	clk_osm_write_seq_reg(c, CONSTANT_32, DATA_MEM(92));
+
+	/* Enable/disable CPR ramp settings */
+	clk_osm_write_seq_reg(c, 0x101C031, DATA_MEM(106));
+	clk_osm_write_seq_reg(c, 0x1010031, DATA_MEM(107));
+}
+
+static void clk_osm_setup_fsms(struct clk_osm *c)
+{
+	u32 val;
+
+	/* Voltage Reduction FSM */
+	if (c->red_fsm_en) {
+		val = clk_osm_read_reg(c, VMIN_REDUCTION_ENABLE_REG) | BIT(0);
+		val |= BVAL(6, 1, c->min_cpr_vc);
+		clk_osm_write_reg(c, val, VMIN_REDUCTION_ENABLE_REG);
+
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 10000),
+				  VMIN_REDUCTION_TIMER_REG);
+	}
+
+	/* Boost FSM */
+	if (c->boost_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		val |= DELTA_DEX_VAL | CC_BOOST_FSM_EN | IGNORE_PLL_LOCK;
+		clk_osm_write_reg(c, val, PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, CC_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, CC_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* Safe Freq FSM */
+	if (c->safe_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_BOOST_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, DCVS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, DCVS_BOOST_FSM_TIMERS_REG2);
+
+	}
+
+	/* Pulse Swallowing FSM */
+	if (c->ps_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | PS_BOOST_FSM_EN_MASK,
+							PDN_FSM_CTRL_REG);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG0);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG0);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG1);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+		val |= BVAL(31, 16, clk_osm_count_ns(c, 1000));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG1);
+
+		val = clk_osm_read_reg(c, PS_BOOST_FSM_TIMERS_REG2);
+		val |= BVAL(15, 0, clk_osm_count_ns(c, DEXT_DECREMENT_WAIT_NS));
+		clk_osm_write_reg(c, val, PS_BOOST_FSM_TIMERS_REG2);
+	}
+
+	/* PLL signal timing control */
+	if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
+		clk_osm_write_reg(c, 0x2, BOOST_PROG_SYNC_DELAY_REG);
+
+	/* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
+	if (c->droop_fsm_en) {
+		val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+		clk_osm_write_reg(c, val | DCVS_DROOP_FSM_EN_MASK,
+				  PDN_FSM_CTRL_REG);
+	}
+
+	if (c->ps_fsm_en || c->droop_fsm_en) {
+		clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 100),
+				  DROOP_RELEASE_TIMER_CTRL);
+		clk_osm_write_reg(c, clk_osm_count_ns(c, 150),
+				  DCVS_DROOP_TIMER_CTRL);
+		/*
+		 * TODO: Check if DCVS_DROOP_CODE used is correct. Also check
+		 * if RESYNC_CTRL should be set for L3.
+		 */
+		val = BIT(31) | BVAL(22, 16, 0x2) | BVAL(6, 0, 0x8);
+		clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+	}
+}
+
+static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-up-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM voltage up timer value, rc=%d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down voltage request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-volt-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Voltage down timer value: %d\n",
+									rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+					LLM_VOLTAGE_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM Voltage requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-volt-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Voltage requests\n");
+		val = 0;
+	} else
+		val = 1;
+
+	/* Enable or disable LLM VOLT DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0, val, regval;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM up frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "Unable to get CC up timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+						array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+						array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_INC_HYSTERESIS);
+	}
+
+	/*
+	 * Setup Timer to control how long OSM should wait before performing
+	 * DCVS when a LLM down frequency request is received.
+	 * Time is specified in us.
+	 */
+	rc = of_property_read_u32_array(of, "qcom,llm-freq-down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No LLM Frequency down timer value: %d\n",
+			rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk, array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val,
+						LLM_FREQ_VOTE_DEC_HYSTERESIS);
+	}
+
+	/* Enable or disable honoring of LLM frequency requests */
+	rc = of_property_read_bool(pdev->dev.of_node,
+					"qcom,enable-llm-freq-vote");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Honoring LLM Frequency requests\n");
+		val = 0;
+	} else
+		val = BIT(1);
+
+	/* Enable or disable LLM FREQ DVCS */
+	regval = val | clk_osm_read_reg(&l3_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&l3_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+	regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+	/* Wait for the write to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static int clk_osm_set_cc_policy(struct platform_device *pdev)
+{
+	int rc = 0, val;
+	u32 *array;
+	struct device_node *of = pdev->dev.of_node;
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,up-timer", array,
+					MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No up timer value, rc=%d\n",
+			 rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+					array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+					array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_INC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&perfcl_clk,
+					array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_INC_HYSTERESIS);
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,down-timer",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No down timer value, rc=%d\n", rc);
+	} else {
+		val = clk_osm_count_ns(&l3_clk,
+				       array[l3_clk.cluster_num]);
+		clk_osm_write_reg(&l3_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		val = clk_osm_count_ns(&pwrcl_clk,
+				       array[pwrcl_clk.cluster_num]);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+
+		clk_osm_count_ns(&perfcl_clk,
+				       array[perfcl_clk.cluster_num]);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DEC_HYSTERESIS);
+	}
+
+	/* OSM index override for cluster PC */
+	rc = of_property_read_u32_array(of, "qcom,pc-override-index",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
+			rc);
+		clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+		clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+	} else {
+		val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+		val = BVAL(6, 1, array[perfcl_clk.cluster_num])
+			| ENABLE_OVERRIDE;
+		clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+	}
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c3-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C3 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(2);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-c2-active");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Treat cores in C2 as active\n");
+
+		val = clk_osm_read_reg(&l3_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&l3_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&pwrcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+
+		val = clk_osm_read_reg(&perfcl_clk, SPM_CORE_INACTIVE_MAPPING);
+		val &= ~BIT(1);
+		clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_INACTIVE_MAPPING);
+	}
+
+	rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
+	if (rc) {
+		dev_dbg(&pdev->dev, "Disabling CC based DCVS\n");
+		val = 1;
+	} else
+		val = 0;
+
+	clk_osm_write_reg(&l3_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
+	clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+
+	/* Wait for the writes to complete */
+	clk_osm_mb(&perfcl_clk, OSM_BASE);
+
+	devm_kfree(&pdev->dev, array);
+	return 0;
+}
+
+static void clk_osm_setup_cluster_pll(struct clk_osm *c)
+{
+	writel_relaxed(0x0, c->vbases[PLL_BASE] + PLL_MODE);
+	writel_relaxed(0x26, c->vbases[PLL_BASE] + PLL_L_VAL);
+	writel_relaxed(0x8, c->vbases[PLL_BASE] +
+			PLL_USER_CTRL);
+	writel_relaxed(0x20000AA8, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_LO);
+	writel_relaxed(0x000003D2, c->vbases[PLL_BASE] +
+			PLL_CONFIG_CTL_HI);
+	writel_relaxed(0x2, c->vbases[PLL_BASE] +
+			PLL_MODE);
+
+	/* Ensure writes complete before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	udelay(PLL_WAIT_LOCK_TIME_US);
+
+	writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
+
+	/* Ensure write completes before delaying */
+	clk_osm_mb(c, PLL_BASE);
+
+	usleep_range(50, 75);
+
+	writel_relaxed(0x7, c->vbases[PLL_BASE] + PLL_MODE);
+}
+
+static void clk_osm_misc_programming(struct clk_osm *c)
+{
+	u32 lval = 0xFF, val;
+	int i;
+
+	clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CORE_COUNT_CTRL);
+	clk_osm_write_reg(c, PLL_MIN_LVAL, PLL_MIN_FREQ_REG);
+
+	/* Pattern to set/clear PLL lock in PDN_FSM_CTRL_REG */
+	val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+	if (c->secure_init) {
+		val |= IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(108));
+		val &= ~IGNORE_PLL_LOCK;
+		clk_osm_write_seq_reg(c, val, DATA_MEM(109));
+		clk_osm_write_seq_reg(c, MIN_VCO_VAL, DATA_MEM(110));
+	} else {
+		val |= IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(108), val);
+		val &= ~IGNORE_PLL_LOCK;
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(109), val);
+	}
+
+	/* Program LVAL corresponding to first turbo VC */
+	for (i = 0; i < c->num_entries; i++) {
+		if (c->osm_table[i].mem_acc_level == MAX_MEM_ACC_LEVELS) {
+			lval = c->osm_table[i].freq_data & GENMASK(7, 0);
+			break;
+		}
+	}
+
+	if (c->secure_init)
+		clk_osm_write_seq_reg(c, lval, DATA_MEM(114));
+	else
+		scm_io_write(c->pbases[SEQ_BASE] + DATA_MEM(114), lval);
+
+}
+
+static int clk_osm_setup_hw_table(struct clk_osm *c)
+{
+	struct osm_entry *entry = c->osm_table;
+	int i;
+	u32 freq_val = 0, volt_val = 0, override_val = 0;
+	u32 table_entry_offset, last_mem_acc_level, last_virtual_corner = 0;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		if (i < c->num_entries) {
+			freq_val = entry[i].freq_data;
+			volt_val = BVAL(27, 24, entry[i].mem_acc_level)
+				| BVAL(21, 16, entry[i].virtual_corner)
+				| BVAL(11, 0, entry[i].open_loop_volt);
+			override_val = entry[i].override_data;
+
+			if (last_virtual_corner && last_virtual_corner ==
+			    entry[i].virtual_corner && last_mem_acc_level !=
+			    entry[i].mem_acc_level) {
+				pr_err("invalid LUT entry at row=%d virtual_corner=%d, mem_acc_level=%d\n",
+				       i, entry[i].virtual_corner,
+				       entry[i].mem_acc_level);
+				return -EINVAL;
+			}
+			last_virtual_corner = entry[i].virtual_corner;
+			last_mem_acc_level = entry[i].mem_acc_level;
+		}
+
+		table_entry_offset = i * OSM_REG_SIZE;
+		clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
+		clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+		clk_osm_write_reg(c, override_val, OVERRIDE_REG +
+				  table_entry_offset);
+	}
+
+	/* Make sure all writes go through */
+	clk_osm_mb(c, OSM_BASE);
+
+	return 0;
+}
+
+static void clk_osm_print_osm_table(struct clk_osm *c)
+{
+	int i;
+	struct osm_entry *table = c->osm_table;
+	u32 pll_src, pll_div, lval, core_count;
+
+	pr_debug("Index, Frequency, VC, OLV (mv), Core Count, PLL Src, PLL Div, L-Val, ACC Level\n");
+	for (i = 0; i < c->num_entries; i++) {
+		pll_src = (table[i].freq_data & GENMASK(31, 30)) >> 30;
+		pll_div = (table[i].freq_data & GENMASK(29, 28)) >> 28;
+		lval = table[i].freq_data & GENMASK(7, 0);
+		core_count = (table[i].freq_data & GENMASK(18, 16)) >> 16;
+
+		pr_debug("%3d, %11lu, %2u, %5u, %2u, %6u, %8u, %7u, %5u\n",
+			i,
+			table[i].frequency,
+			table[i].virtual_corner,
+			table[i].open_loop_volt,
+			core_count,
+			pll_src,
+			pll_div,
+			lval,
+			table[i].mem_acc_level);
+	}
+	pr_debug("APM threshold corner=%d, crossover corner=%d\n",
+			c->apm_threshold_vc, c->apm_crossover_vc);
+	pr_debug("MEM-ACC threshold corner=%d, crossover corner=%d\n",
+			c->mem_acc_threshold_vc, c->mem_acc_crossover_vc);
+}
+
+static u32 find_voltage(struct clk_osm *c, unsigned long rate)
+{
+	struct osm_entry *table = c->osm_table;
+	int entries = c->num_entries, i;
+
+	for (i = 0; i < entries; i++) {
+		if (rate == table[i].frequency) {
+			/* OPP table voltages have units of mV */
+			return table[i].open_loop_volt * 1000;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int add_opp(struct clk_osm *c, struct device *dev)
+{
+	unsigned long rate = 0;
+	u32 uv;
+	long rc;
+	int j = 0;
+	unsigned long min_rate = c->hw.init->rate_max[0];
+	unsigned long max_rate =
+			c->hw.init->rate_max[c->hw.init->num_rate_max - 1];
+
+	while (1) {
+		rate = c->hw.init->rate_max[j++];
+		uv = find_voltage(c, rate);
+		if (uv <= 0) {
+			pr_warn("No voltage for %lu.\n", rate);
+			return -EINVAL;
+		}
+
+		rc = dev_pm_opp_add(dev, rate, uv);
+		if (rc) {
+			pr_warn("failed to add OPP for %lu\n", rate);
+			return rc;
+		}
+
+		/*
+		 * Print the OPP pair for the lowest and highest frequency for
+		 * each device that we're populating. This is important since
+		 * this information will be used by thermal mitigation and the
+		 * scheduler.
+		 */
+		if (rate == min_rate)
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+
+		if (rate == max_rate && max_rate != min_rate) {
+			pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+				rate, uv, dev_name(dev));
+			break;
+		}
+
+		if (min_rate == max_rate)
+			break;
+	}
+	return 0;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+	int cpu;
+	struct device *cpu_dev;
+	struct clk_osm *c, *parent;
+	struct clk_hw *hw_parent;
+
+	for_each_possible_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return;
+		}
+
+		hw_parent = clk_hw_get_parent(&c->hw);
+		parent = to_clk_osm(hw_parent);
+		cpu_dev = get_cpu_device(cpu);
+		if (cpu_dev)
+			if (add_opp(parent, cpu_dev))
+				pr_err("Failed to add OPP levels for %s\n",
+					dev_name(cpu_dev));
+	}
+
+	/*TODO: Figure out which device to tag the L3 table to */
+}
+
+static u64 clk_osm_get_cpu_cycle_counter(int cpu)
+{
+	u32 val;
+	unsigned long flags;
+	struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
+
+	if (IS_ERR_OR_NULL(c)) {
+		pr_err("no clock device for CPU=%d\n", cpu);
+		return 0;
+	}
+
+	parent = to_clk_osm(clk_hw_get_parent(&c->hw));
+
+	spin_lock_irqsave(&parent->lock, flags);
+	val = clk_osm_read_reg_no_log(parent,
+			OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+
+	if (val < c->prev_cycle_counter) {
+		/* Handle counter overflow */
+		c->total_cycle_counter += UINT_MAX -
+			c->prev_cycle_counter + val;
+		c->prev_cycle_counter = val;
+	} else {
+		c->total_cycle_counter += val - c->prev_cycle_counter;
+		c->prev_cycle_counter = val;
+	}
+	spin_unlock_irqrestore(&parent->lock, flags);
+
+	return c->total_cycle_counter;
+}
+
+static void clk_osm_setup_cycle_counters(struct clk_osm *c)
+{
+	u32 ratio = c->osm_clk_rate;
+	u32 val = 0;
+
+	/* Enable cycle counter */
+	val = BIT(0);
+	/* Setup OSM clock to XO ratio */
+	do_div(ratio, c->xo_clk_rate);
+	val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
+
+	clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+	pr_debug("OSM to XO clock ratio: %d\n", ratio);
+}
+
+static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
+					struct platform_device *pdev)
+{
+	struct regulator *regulator = c->vdd_reg;
+	int count, vc, i, memacc_threshold, apm_threshold;
+	int rc = 0;
+	u32 corner_volt;
+
+	if (c == &l3_clk || c == &pwrcl_clk)
+		return rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-apm-threshold-voltage",
+				  &apm_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-apm-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+				  "qcom,perfcl-apcs-mem-acc-threshold-voltage",
+				  &memacc_threshold);
+	if (rc) {
+		pr_err("qcom,perfcl-apcs-mem-acc-threshold-voltage property not specified\n");
+		return rc;
+	}
+
+	/*
+	 * Initialize VC settings in case none of them go above the voltage
+	 * limits
+	 */
+	c->apm_threshold_vc = c->apm_crossover_vc = c->mem_acc_crossover_vc =
+				c->mem_acc_threshold_vc = MAX_VC;
+
+	count = regulator_count_voltages(regulator);
+	if (count < 0) {
+		pr_err("Failed to get the number of virtual corners supported\n");
+		return count;
+	}
+
+	c->apm_crossover_vc = count - 2;
+	c->mem_acc_crossover_vc = count - 1;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		corner_volt = regulator_list_corner_voltage(regulator, vc);
+
+		if (c->apm_threshold_vc == MAX_VC &&
+				corner_volt >= apm_threshold)
+			c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+
+		if (c->mem_acc_threshold_vc == MAX_VC &&
+				corner_volt >= memacc_threshold)
+			c->mem_acc_threshold_vc =
+				c->osm_table[i].virtual_corner;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
+{
+	struct regulator *regulator = c->vdd_reg;
+	u32 vc, mv;
+	int i;
+
+	for (i = 0; i < OSM_TABLE_SIZE; i++) {
+		vc = c->osm_table[i].virtual_corner + 1;
+		/* Voltage is in uv. Convert to mv */
+		mv = regulator_list_corner_voltage(regulator, vc) / 1000;
+		c->osm_table[i].open_loop_volt = mv;
+	}
+
+	return 0;
+}
+
+static int clk_osm_get_lut(struct platform_device *pdev,
+			   struct clk_osm *c, char *prop_name)
+{
+	struct device_node *of = pdev->dev.of_node;
+	int prop_len, total_elems, num_rows, i, j, k;
+	int rc = 0;
+	u32 *array;
+	u32 *fmax_temp;
+	u32 data;
+	unsigned long abs_fmax = 0;
+	bool last_entry = false;
+
+	if (!of_find_property(of, prop_name, &prop_len)) {
+		dev_err(&pdev->dev, "missing %s\n", prop_name);
+		return -EINVAL;
+	}
+
+	total_elems = prop_len / sizeof(u32);
+	if (total_elems % NUM_FIELDS) {
+		dev_err(&pdev->dev, "bad length %d\n", prop_len);
+		return -EINVAL;
+	}
+
+	num_rows = total_elems / NUM_FIELDS;
+
+	fmax_temp = devm_kzalloc(&pdev->dev, num_rows * sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!fmax_temp)
+		return -ENOMEM;
+
+	array = devm_kzalloc(&pdev->dev, prop_len, GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, prop_name, array, total_elems);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse OSM table, rc=%d\n", rc);
+		goto exit;
+	}
+
+	pr_debug("%s: Entries in Table: %d\n", __func__, num_rows);
+	c->num_entries = num_rows;
+	if (c->num_entries > OSM_TABLE_SIZE) {
+		pr_err("LUT entries %d exceed maximum size %d\n",
+		       c->num_entries, OSM_TABLE_SIZE);
+		return -EINVAL;
+	}
+
+	for (i = 0, j = 0, k = 0; j < OSM_TABLE_SIZE; j++) {
+		c->osm_table[j].frequency = array[i + FREQ];
+		c->osm_table[j].freq_data = array[i + FREQ_DATA];
+		c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
+		c->osm_table[j].mem_acc_level = array[i + MEM_ACC_LEVEL];
+		/* Voltage corners are 0 based in the OSM LUT */
+		c->osm_table[j].virtual_corner = array[i + VIRTUAL_CORNER] - 1;
+		pr_debug("index=%d freq=%ld virtual_corner=%d freq_data=0x%x override_data=0x%x mem_acc_level=0x%x\n",
+			 j, c->osm_table[j].frequency,
+			 c->osm_table[j].virtual_corner,
+			 c->osm_table[j].freq_data,
+			 c->osm_table[j].override_data,
+			 c->osm_table[j].mem_acc_level);
+
+		data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
+		if (!last_entry && data == MAX_CORE_COUNT) {
+			fmax_temp[k] = array[i];
+			k++;
+		}
+
+		if (i < total_elems - NUM_FIELDS)
+			i += NUM_FIELDS;
+		else {
+			abs_fmax = array[i];
+			last_entry = true;
+		}
+	}
+	fmax_temp[k] = abs_fmax;
+
+	osm_clks_init[c->cluster_num].rate_max = devm_kzalloc(&pdev->dev,
+						 k * sizeof(unsigned long),
+						       GFP_KERNEL);
+	if (!osm_clks_init[c->cluster_num].rate_max) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	for (i = 0; i < k; i++)
+		osm_clks_init[c->cluster_num].rate_max[i] = fmax_temp[i];
+
+	osm_clks_init[c->cluster_num].num_rate_max = k;
+exit:
+	devm_kfree(&pdev->dev, fmax_temp);
+	devm_kfree(&pdev->dev, array);
+	return rc;
+}
+
+static int clk_osm_parse_dt_configs(struct platform_device *pdev)
+{
+	struct device_node *of = pdev->dev.of_node;
+	u32 *array;
+	int rc = 0;
+	struct resource *res;
+	char l3_min_cpr_vc_str[] = "qcom,l3-min-cpr-vc-bin0";
+	char pwrcl_min_cpr_vc_str[] = "qcom,pwrcl-min-cpr-vc-bin0";
+	char perfcl_min_cpr_vc_str[] = "qcom,perfcl-min-cpr-vc-bin0";
+
+	array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+			     GFP_KERNEL);
+	if (!array)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(of, "qcom,l-val-base",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l-val-base property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.l_val_base = array[l3_clk.cluster_num];
+	pwrcl_clk.l_val_base = array[pwrcl_clk.cluster_num];
+	perfcl_clk.l_val_base = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-user-ctl",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-user-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_user_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_user_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_user_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-pll-min-freq",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-pll-min-freq property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_pll_min_freq = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_pll_min_freq = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_pll_min_freq = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-mode-ctl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-mode-ctl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_mode_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_mode_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_mode_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apm-status-ctrl",
+				  array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apm-status-ctrl property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apm_status_ctl = array[l3_clk.cluster_num];
+	pwrcl_clk.apm_status_ctl = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apm_status_ctl = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,cfg-gfmux-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cfg-gfmux-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.cfg_gfmux_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.cfg_gfmux_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.cfg_gfmux_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-cbc-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-cbc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.apcs_cbc_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.apcs_cbc_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.apcs_cbc_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32_array(of, "qcom,apcs-ramp-ctl-addr",
+					array, MAX_CLUSTER_CNT);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,apcs-ramp-ctl-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.ramp_ctl_addr = array[l3_clk.cluster_num];
+	pwrcl_clk.ramp_ctl_addr = array[pwrcl_clk.cluster_num];
+	perfcl_clk.ramp_ctl_addr = array[perfcl_clk.cluster_num];
+
+	rc = of_property_read_u32(of, "qcom,xo-clk-rate",
+				  &pwrcl_clk.xo_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,xo-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	l3_clk.xo_clk_rate = perfcl_clk.xo_clk_rate = pwrcl_clk.xo_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,osm-clk-rate",
+				  &pwrcl_clk.osm_clk_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,osm-clk-rate property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.osm_clk_rate = perfcl_clk.osm_clk_rate = pwrcl_clk.osm_clk_rate;
+
+	rc = of_property_read_u32(of, "qcom,cc-reads",
+				  &pwrcl_clk.cycle_counter_reads);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,cc-reads property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+	l3_clk.cycle_counter_reads = perfcl_clk.cycle_counter_reads =
+			pwrcl_clk.cycle_counter_reads;
+
+	rc = of_property_read_u32(of, "qcom,cc-delay",
+				  &pwrcl_clk.cycle_counter_delay);
+	if (rc)
+		dev_dbg(&pdev->dev, "no delays between cycle counter reads\n");
+	else
+		l3_clk.cycle_counter_delay = perfcl_clk.cycle_counter_delay =
+			pwrcl_clk.cycle_counter_delay;
+
+	rc = of_property_read_u32(of, "qcom,cc-factor",
+				  &pwrcl_clk.cycle_counter_factor);
+	if (rc)
+		dev_dbg(&pdev->dev, "no factor specified for cycle counter estimation\n");
+	else
+		l3_clk.cycle_counter_factor = perfcl_clk.cycle_counter_factor =
+			pwrcl_clk.cycle_counter_factor;
+
+	l3_clk.red_fsm_en = perfcl_clk.red_fsm_en = pwrcl_clk.red_fsm_en =
+		of_property_read_bool(of, "qcom,red-fsm-en");
+
+	l3_clk.boost_fsm_en = perfcl_clk.boost_fsm_en =
+		pwrcl_clk.boost_fsm_en =
+		of_property_read_bool(of, "qcom,boost-fsm-en");
+
+	l3_clk.safe_fsm_en = perfcl_clk.safe_fsm_en = pwrcl_clk.safe_fsm_en =
+		of_property_read_bool(of, "qcom,safe-fsm-en");
+
+	l3_clk.ps_fsm_en = perfcl_clk.ps_fsm_en = pwrcl_clk.ps_fsm_en =
+		of_property_read_bool(of, "qcom,ps-fsm-en");
+
+	l3_clk.droop_fsm_en = perfcl_clk.droop_fsm_en =
+		pwrcl_clk.droop_fsm_en =
+		of_property_read_bool(of, "qcom,droop-fsm-en");
+
+	devm_kfree(&pdev->dev, array);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"l3_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_sequencer\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in l3_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"pwrcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!pwrcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in pwrcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"perfcl_sequencer");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_sequencer\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[SEQ_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[SEQ_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[SEQ_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in perfcl_sequencer base\n");
+		return -ENOMEM;
+	}
+
+	snprintf(l3_min_cpr_vc_str, ARRAY_SIZE(l3_min_cpr_vc_str),
+			"qcom,l3-min-cpr-vc-bin%d", l3_clk.speedbin);
+	rc = of_property_read_u32(of, l3_min_cpr_vc_str, &l3_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			l3_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(pwrcl_min_cpr_vc_str, ARRAY_SIZE(pwrcl_min_cpr_vc_str),
+			"qcom,pwrcl-min-cpr-vc-bin%d", pwrcl_clk.speedbin);
+	rc = of_property_read_u32(of, pwrcl_min_cpr_vc_str,
+						&pwrcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			pwrcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	snprintf(perfcl_min_cpr_vc_str, ARRAY_SIZE(perfcl_min_cpr_vc_str),
+			"qcom,perfcl-min-cpr-vc-bin%d", perfcl_clk.speedbin);
+	rc = of_property_read_u32(of, perfcl_min_cpr_vc_str,
+						&perfcl_clk.min_cpr_vc);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find %s property, rc=%d\n",
+			perfcl_min_cpr_vc_str, rc);
+		return -EINVAL;
+	}
+
+	l3_clk.secure_init = perfcl_clk.secure_init = pwrcl_clk.secure_init =
+		of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
+
+	if (!pwrcl_clk.secure_init)
+		return rc;
+
+	rc = of_property_read_u32_array(of, "qcom,l3-mem-acc-addr",
+					l3_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,l3-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,pwrcl-mem-acc-addr",
+					pwrcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,pwrcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of, "qcom,perfcl-mem-acc-addr",
+					perfcl_clk.mem_acc_addr, MEM_ACC_ADDRS);
+	if (rc) {
+		dev_err(&pdev->dev, "unable to find qcom,perfcl-mem-acc-addr property, rc=%d\n",
+			rc);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int clk_osm_resources_init(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct resource *res;
+	unsigned long pbase;
+	int rc = 0;
+	void *vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_l3_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_l3_base");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	l3_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!l3_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_l3_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_pwrcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_pwrcl_base");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+	if (!pwrcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_pwrcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"osm_perfcl_base");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for osm_perfcl_base");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+	perfcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+						  resource_size(res));
+
+	if (!perfcl_clk.vbases[OSM_BASE]) {
+		dev_err(&pdev->dev, "Unable to map in osm_perfcl_base base\n");
+		return -ENOMEM;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l3_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for l3_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map l3_pll base\n");
+		return -ENOMEM;
+	}
+
+	l3_clk.pbases[PLL_BASE] = pbase;
+	l3_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for pwrcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map pwrcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	pwrcl_clk.pbases[PLL_BASE] = pbase;
+	pwrcl_clk.vbases[PLL_BASE] = vbase;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "perfcl_pll");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for perfcl_pll\n");
+		return -ENOMEM;
+	}
+	pbase = (unsigned long)res->start;
+	vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+
+	if (!vbase) {
+		dev_err(&pdev->dev, "Unable to map perfcl_pll base\n");
+		return -ENOMEM;
+	}
+
+	perfcl_clk.pbases[PLL_BASE] = pbase;
+	perfcl_clk.vbases[PLL_BASE] = vbase;
+
+	/* efuse speed bin fuses are optional */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "pwrcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in pwrcl_efuse base\n");
+			return -ENOMEM;
+		}
+		pwrcl_clk.pbases[EFUSE_BASE] = pbase;
+		pwrcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "perfcl_efuse");
+	if (res) {
+		pbase = (unsigned long)res->start;
+		vbase = devm_ioremap(&pdev->dev, res->start,
+				     resource_size(res));
+		if (!vbase) {
+			dev_err(&pdev->dev, "Unable to map in perfcl_efuse base\n");
+			return -ENOMEM;
+		}
+		perfcl_clk.pbases[EFUSE_BASE] = pbase;
+		perfcl_clk.vbases[EFUSE_BASE] = vbase;
+	}
+
+	vdd_l3 = devm_regulator_get(&pdev->dev, "vdd-l3");
+	if (IS_ERR(vdd_l3)) {
+		rc = PTR_ERR(vdd_l3);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the l3 vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	l3_clk.vdd_reg = vdd_l3;
+
+	vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+	if (IS_ERR(vdd_pwrcl)) {
+		rc = PTR_ERR(vdd_pwrcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the pwrcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	pwrcl_clk.vdd_reg = vdd_pwrcl;
+
+	vdd_perfcl = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+	if (IS_ERR(vdd_perfcl)) {
+		rc = PTR_ERR(vdd_perfcl);
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "Unable to get the perfcl vreg, rc=%d\n",
+				rc);
+		return rc;
+	}
+	perfcl_clk.vdd_reg = vdd_perfcl;
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-l3-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-l3-supply\n");
+		return -EINVAL;
+	}
+
+	l3_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!l3_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-l3-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-pwrcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-pwrcl-supply\n");
+		return -EINVAL;
+	}
+
+	pwrcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!pwrcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-pwrcl-supply node\n");
+		return -EINVAL;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "vdd-perfcl-supply", 0);
+	if (!node) {
+		pr_err("Unable to find vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	perfcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+	if (!perfcl_clk.vdd_dev) {
+		pr_err("Unable to find device for vdd-perfcl-supply\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static unsigned long init_rate = 300000000;
+
+static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
+{
+	int rc = 0, cpu, i;
+	int speedbin = 0, pvs_ver = 0;
+	u32 pte_efuse, val;
+	int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
+	struct clk *ext_xo_clk, *clk;
+	struct clk_osm *c;
+	struct device *dev = &pdev->dev;
+	struct clk_onecell_data *clk_data;
+	struct resource *res;
+	void *vbase;
+	char l3speedbinstr[] = "qcom,l3-speedbin0-v0";
+	char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
+	char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+	struct cpu_cycle_counter_cb cb = {
+		.get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
+	};
+
+	/*
+	 * Require the RPM-XO clock to be registered before OSM.
+	 * The cpuss_gpll0_clk_src is listed to be configured by BL.
+	 */
+	ext_xo_clk = devm_clk_get(dev, "xo_ao");
+	if (IS_ERR(ext_xo_clk)) {
+		if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
+			dev_err(dev, "Unable to get xo clock\n");
+		return PTR_ERR(ext_xo_clk);
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+								GFP_KERNEL);
+	if (!clk_data)
+		goto exit;
+
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+					sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks)
+		goto clk_err;
+
+	clk_data->clk_num = num_clks;
+
+	rc = clk_osm_parse_dt_configs(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+		return rc;
+	}
+
+	rc = clk_osm_resources_init(pdev);
+	if (rc) {
+		if (rc != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "resources init failed, rc=%d\n",
+									rc);
+		return rc;
+	}
+
+	if (l3_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(l3_clk.vbases[EFUSE_BASE]);
+		l3_clk.speedbin = ((pte_efuse >> L3_EFUSE_SHIFT) &
+						    L3_EFUSE_MASK);
+		snprintf(l3speedbinstr, ARRAY_SIZE(l3speedbinstr),
+			 "qcom,l3-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using L3 speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &l3_clk, l3speedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (pwrcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(pwrcl_clk.vbases[EFUSE_BASE]);
+		pwrcl_clk.speedbin = ((pte_efuse >> PWRCL_EFUSE_SHIFT) &
+						    PWRCL_EFUSE_MASK);
+		snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+			 "qcom,pwrcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using pwrcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &pwrcl_clk, pwrclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	if (perfcl_clk.vbases[EFUSE_BASE]) {
+		/* Multiple speed-bins are supported */
+		pte_efuse = readl_relaxed(perfcl_clk.vbases[EFUSE_BASE]);
+		perfcl_clk.speedbin = ((pte_efuse >> PERFCL_EFUSE_SHIFT) &
+							PERFCL_EFUSE_MASK);
+		snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+			 "qcom,perfcl-speedbin%d-v%d", speedbin, pvs_ver);
+	}
+
+	dev_info(&pdev->dev, "using perfcl speed bin %u and pvs_ver %d\n",
+		 speedbin, pvs_ver);
+
+	rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&l3_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for L3, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+	if (rc) {
+		if (rc == -EPROBE_DEFER)
+			return rc;
+		dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	rc = clk_osm_resolve_crossover_corners(&l3_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for L3\n");
+
+	rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev,
+			"No APM crossover corner programmed for pwrcl_clk\n");
+
+	rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+	if (rc)
+		dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
+
+	clk_osm_setup_cycle_counters(&l3_clk);
+	clk_osm_setup_cycle_counters(&pwrcl_clk);
+	clk_osm_setup_cycle_counters(&perfcl_clk);
+
+	clk_osm_print_osm_table(&l3_clk);
+	clk_osm_print_osm_table(&pwrcl_clk);
+	clk_osm_print_osm_table(&perfcl_clk);
+
+	rc = clk_osm_setup_hw_table(&l3_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup l3 hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&pwrcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+		goto exit;
+	}
+	rc = clk_osm_setup_hw_table(&perfcl_clk);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+		goto exit;
+	}
+
+	/* Policy tuning */
+	rc = clk_osm_set_cc_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "cc policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Freq Policy Tuning */
+	rc = clk_osm_set_llm_freq_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+		goto exit;
+	}
+
+	/* LLM Voltage Policy Tuning */
+	rc = clk_osm_set_llm_volt_policy(pdev);
+	if (rc < 0) {
+		dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+		goto exit;
+	}
+
+	clk_osm_setup_fsms(&l3_clk);
+	clk_osm_setup_fsms(&pwrcl_clk);
+	clk_osm_setup_fsms(&perfcl_clk);
+
+	/* Program VC at which the array power supply needs to be switched */
+	clk_osm_write_reg(&perfcl_clk, perfcl_clk.apm_threshold_vc,
+					APM_CROSSOVER_VC);
+	if (perfcl_clk.secure_init) {
+		clk_osm_write_seq_reg(&perfcl_clk, perfcl_clk.apm_crossover_vc,
+				DATA_MEM(77));
+		clk_osm_write_seq_reg(&perfcl_clk,
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)),
+				DATA_MEM(111));
+	} else {
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(77),
+				perfcl_clk.apm_crossover_vc);
+		scm_io_write(perfcl_clk.pbases[SEQ_BASE] + DATA_MEM(111),
+				(0x39 | (perfcl_clk.apm_threshold_vc << 6)));
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"apps_itm_ctl");
+	if (!res) {
+		dev_err(&pdev->dev,
+			"Unable to get platform resource for apps_itm_ctl\n");
+		return -ENOMEM;
+	}
+
+	vbase = devm_ioremap(&pdev->dev, res->start,
+						resource_size(res));
+	if (!vbase) {
+		dev_err(&pdev->dev,
+				"Unable to map in apps_itm_ctl base\n");
+		return -ENOMEM;
+	}
+
+	val = readl_relaxed(vbase + 0x0);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x0);
+
+	val = readl_relaxed(vbase + 0x4);
+	val &= ~BIT(0);
+	writel_relaxed(val, vbase + 0x4);
+
+	/*
+	 * Perform typical secure-world HW initialization
+	 * as necessary.
+	 */
+	clk_osm_do_additional_setup(&l3_clk, pdev);
+	clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+	clk_osm_do_additional_setup(&perfcl_clk, pdev);
+
+	/* MEM-ACC Programming */
+	clk_osm_program_mem_acc_regs(&l3_clk);
+	clk_osm_program_mem_acc_regs(&pwrcl_clk);
+	clk_osm_program_mem_acc_regs(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,osm-pll-setup")) {
+		clk_osm_setup_cluster_pll(&l3_clk);
+		clk_osm_setup_cluster_pll(&pwrcl_clk);
+		clk_osm_setup_cluster_pll(&perfcl_clk);
+	}
+
+	/* Misc programming */
+	clk_osm_misc_programming(&l3_clk);
+	clk_osm_misc_programming(&pwrcl_clk);
+	clk_osm_misc_programming(&perfcl_clk);
+
+	if (of_property_read_bool(pdev->dev.of_node,
+				"qcom,enable-per-core-dcvs")) {
+		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
+
+		val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
+		val |= BIT(0);
+		clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL);
+	}
+
+	clk_ops_core = clk_dummy_ops;
+	clk_ops_core.set_rate = cpu_clk_set_rate;
+	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
+
+	spin_lock_init(&l3_clk.lock);
+	spin_lock_init(&pwrcl_clk.lock);
+	spin_lock_init(&perfcl_clk.lock);
+
+	/* Register OSM l3, pwr and perf clocks with Clock Framework */
+	for (i = 0; i < num_clks; i++) {
+		clk = devm_clk_register(&pdev->dev, osm_qcom_clk_hws[i]);
+		if (IS_ERR(clk)) {
+			dev_err(&pdev->dev, "Unable to register CPU clock at index %d\n",
+				i);
+			return PTR_ERR(clk);
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+								clk_data);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to register CPU clocks\n");
+			goto provider_err;
+	}
+
+	get_online_cpus();
+
+	/* Enable OSM */
+	for_each_online_cpu(cpu) {
+		c = logical_cpu_to_clk(cpu);
+		if (!c) {
+			pr_err("no clock device for CPU=%d\n", cpu);
+			return -EINVAL;
+		}
+
+		rc = clk_set_rate(c->hw.clk, init_rate);
+		if (rc) {
+			dev_err(&pdev->dev, "Unable to set init rate on CPU %d, rc=%d\n",
+			cpu, rc);
+			goto provider_err;
+		}
+		WARN(clk_prepare_enable(c->hw.clk),
+		     "Failed to enable clock for cpu %d\n", cpu);
+		udelay(300);
+	}
+
+	rc = clk_set_rate(l3_clk.hw.clk, init_rate);
+	if (rc) {
+		dev_err(&pdev->dev, "Unable to set init rate on L3 cluster, rc=%d\n",
+			rc);
+		goto provider_err;
+	}
+	WARN(clk_prepare_enable(l3_clk.hw.clk),
+		     "Failed to enable clock for L3\n");
+	udelay(300);
+
+	populate_opp_table(pdev);
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	register_cpu_cycle_counter_cb(&cb);
+	pr_info("OSM driver inited\n");
+	put_online_cpus();
+
+	return 0;
+provider_err:
+	if (clk_data)
+		devm_kfree(&pdev->dev, clk_data->clks);
+clk_err:
+	devm_kfree(&pdev->dev, clk_data);
+exit:
+	dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n", rc);
+	panic("Unable to Setup OSM");
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,clk-cpu-osm" },
+	{}
+};
+
+static struct platform_driver clk_cpu_osm_driver = {
+	.probe = clk_cpu_osm_driver_probe,
+	.driver = {
+		.name = "clk-cpu-osm",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init clk_cpu_osm_init(void)
+{
+	return platform_driver_register(&clk_cpu_osm_driver);
+}
+arch_initcall(clk_cpu_osm_init);
+
+static void __exit clk_cpu_osm_exit(void)
+{
+	platform_driver_unregister(&clk_cpu_osm_driver);
+}
+module_exit(clk_cpu_osm_exit);
+
+MODULE_DESCRIPTION("QTI CPU clock driver for OSM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index eface18..e728dec 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -57,4 +57,8 @@
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
 extern struct clk_ops clk_dummy_ops;
+
+#define BM(msb, lsb)	(((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val)	(((val) << lsb) & BM(msb, lsb))
+
 #endif
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index f9c55ec..2eb947d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -38,6 +38,14 @@
 #include "sde_color_processing.h"
 #include "sde_hw_rot.h"
 
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+		"If set, active planes will force their outputs to black,\n"
+		"by temporarily enabling the color fill, when recovering\n"
+		"from a system resume instead of attempting to display the\n"
+		"last provided frame buffer.");
+
 #define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
 		(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
 
@@ -2895,6 +2903,10 @@
 	else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
 		psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
 
+	/* force black color fill during suspend */
+	if (msm_is_suspend_state(plane->dev) && suspend_blank)
+		_sde_plane_color_fill(psde, 0x0, 0x0);
+
 	/* flag h/w flush complete */
 	if (plane->state)
 		to_sde_plane_state(plane->state)->pending = false;
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index 5633e1f..266c0a2 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -213,6 +213,7 @@
 	bool tx_blocked_signal_sent;
 	struct kthread_work kwork;
 	struct kthread_worker kworker;
+	struct work_struct wakeup_work;
 	struct task_struct *task;
 	struct tasklet_struct tasklet;
 	struct srcu_struct use_ref;
@@ -874,20 +875,10 @@
 		srcu_read_unlock(&einfo->use_ref, rcu_id);
 		return;
 	}
-	if (!atomic_ctx) {
-		if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
-			einfo->tx_resume_needed = false;
-			einfo->xprt_if.glink_core_if_ptr->tx_resume(
-							&einfo->xprt_if);
-		}
-		spin_lock_irqsave(&einfo->write_lock, flags);
-		if (einfo->tx_blocked_signal_sent) {
-			wake_up_all(&einfo->tx_blocked_queue);
-			einfo->tx_blocked_signal_sent = false;
-		}
-		spin_unlock_irqrestore(&einfo->write_lock, flags);
-	}
 
+	if ((atomic_ctx) && ((einfo->tx_resume_needed) ||
+		(waitqueue_active(&einfo->tx_blocked_queue)))) /* tx waiting ?*/
+		schedule_work(&einfo->wakeup_work);
 
 	/*
 	 * Access to the fifo needs to be synchronized, however only the calls
@@ -1195,6 +1186,39 @@
 }
 
 /**
+ * tx_wakeup_worker() - worker function to wakeup tx blocked thread
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void tx_wakeup_worker(struct work_struct *work)
+{
+	struct edge_info *einfo;
+	bool trigger_wakeup = false;
+	unsigned long flags;
+	int rcu_id;
+
+	einfo = container_of(work, struct edge_info, wakeup_work);
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+	if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+		einfo->tx_resume_needed = false;
+		einfo->xprt_if.glink_core_if_ptr->tx_resume(
+						&einfo->xprt_if);
+	}
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	if (waitqueue_active(&einfo->tx_blocked_queue)) { /* tx waiting ?*/
+		einfo->tx_blocked_signal_sent = false;
+		trigger_wakeup = true;
+	}
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+	if (trigger_wakeup)
+		wake_up_all(&einfo->tx_blocked_queue);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
  * rx_worker() - worker function to process received commands
  * @work:	kwork associated with the edge to process commands on.
  */
@@ -2303,6 +2327,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2402,6 +2427,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2489,6 +2515,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->intentless = true;
 	einfo->read_from_fifo = memcpy32_fromio;
@@ -2649,6 +2676,7 @@
 reg_xprt_fail:
 toc_init_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
@@ -2780,6 +2808,7 @@
 	init_waitqueue_head(&einfo->tx_blocked_queue);
 	kthread_init_work(&einfo->kwork, rx_worker);
 	kthread_init_worker(&einfo->kworker);
+	INIT_WORK(&einfo->wakeup_work, tx_wakeup_worker);
 	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
 	einfo->read_from_fifo = read_from_fifo;
 	einfo->write_to_fifo = write_to_fifo;
@@ -2900,6 +2929,7 @@
 reg_xprt_fail:
 smem_alloc_fail:
 	kthread_flush_worker(&einfo->kworker);
+	flush_work(&einfo->wakeup_work);
 	kthread_stop(einfo->task);
 	einfo->task = NULL;
 	tasklet_kill(&einfo->tasklet);
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 9a9d73b..458e39d 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -43,8 +43,14 @@
 #define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
 #define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
 #define PIL_SUBSYSTEM_NAME_LEN 32
-#define DEFAULT_NUM_INTENTS 5
-#define DEFAULT_RX_INTENT_SIZE 2048
+
+#define MAX_NUM_LO_INTENTS 5
+#define MAX_NUM_MD_INTENTS 3
+#define MAX_NUM_HI_INTENTS 2
+#define LO_RX_INTENT_SIZE 2048
+#define MD_RX_INTENT_SIZE 8192
+#define HI_RX_INTENT_SIZE (17 * 1024)
+
 /**
  * ipc_router_glink_xprt - IPC Router's GLINK XPRT structure
  * @list: IPC router's GLINK XPRT list.
@@ -82,6 +88,9 @@
 	unsigned int xprt_version;
 	unsigned int xprt_option;
 	bool disable_pil_loading;
+	uint32_t cur_lo_intents_cnt;
+	uint32_t cur_md_intents_cnt;
+	uint32_t cur_hi_intents_cnt;
 };
 
 struct ipc_router_glink_xprt_work {
@@ -342,7 +351,7 @@
 	}
 
 	D("%s %zu bytes @ %p\n", __func__, rx_work->iovec_size, rx_work->iovec);
-	if (rx_work->iovec_size <= DEFAULT_RX_INTENT_SIZE)
+	if (rx_work->iovec_size <= HI_RX_INTENT_SIZE)
 		reuse_intent = true;
 
 	pkt = glink_xprt_copy_data(rx_work);
@@ -371,9 +380,14 @@
 				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
 	D("%s: Notified IPC Router of %s OPEN\n",
 	  __func__, glink_xprtp->xprt.name);
-	for (i = 0; i < DEFAULT_NUM_INTENTS; i++)
+	glink_xprtp->cur_lo_intents_cnt = 0;
+	glink_xprtp->cur_md_intents_cnt = 0;
+	glink_xprtp->cur_hi_intents_cnt = 0;
+	for (i = 0; i < MAX_NUM_LO_INTENTS; i++) {
 		glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-				      DEFAULT_RX_INTENT_SIZE);
+				      LO_RX_INTENT_SIZE);
+		glink_xprtp->cur_lo_intents_cnt++;
+	}
 	kfree(xprt_work);
 }
 
@@ -394,13 +408,32 @@
 
 static void glink_xprt_qrx_intent_worker(struct work_struct *work)
 {
+	size_t sz;
 	struct queue_rx_intent_work *qrx_intent_work =
 		container_of(work, struct queue_rx_intent_work, work);
 	struct ipc_router_glink_xprt *glink_xprtp =
 					qrx_intent_work->glink_xprtp;
+	uint32_t *cnt = NULL;
+	int ret;
 
-	glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
-			      qrx_intent_work->intent_size);
+	sz = qrx_intent_work->intent_size;
+	if (sz <= MD_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_md_intents_cnt >= MAX_NUM_MD_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = MD_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_md_intents_cnt;
+	} else if (sz <= HI_RX_INTENT_SIZE) {
+		if (glink_xprtp->cur_hi_intents_cnt >= MAX_NUM_HI_INTENTS)
+			goto qrx_intent_worker_out;
+		sz = HI_RX_INTENT_SIZE;
+		cnt = &glink_xprtp->cur_hi_intents_cnt;
+	}
+
+	ret = glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+					sz);
+	if (!ret && cnt)
+		(*cnt)++;
+qrx_intent_worker_out:
 	kfree(qrx_intent_work);
 }
 
@@ -470,7 +503,7 @@
 	struct ipc_router_glink_xprt *glink_xprtp =
 		(struct ipc_router_glink_xprt *)priv;
 
-	if (sz <= DEFAULT_RX_INTENT_SIZE)
+	if (sz <= LO_RX_INTENT_SIZE)
 		return true;
 
 	qrx_intent_work = kmalloc(sizeof(struct queue_rx_intent_work),
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
index 38d29e4..2471d27 100644
--- a/drivers/soc/qcom/msm_glink_pkt.c
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -502,13 +502,21 @@
 				struct queue_rx_intent_work, work);
 	struct glink_pkt_dev *devp = work_item->devp;
 
-	if (!devp || !devp->handle) {
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device\n", __func__);
+		kfree(work_item);
+		return;
+	}
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
 		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		mutex_unlock(&devp->ch_lock);
 		kfree(work_item);
 		return;
 	}
 
 	ret = glink_queue_rx_intent(devp->handle, devp, work_item->intent_size);
+	mutex_unlock(&devp->ch_lock);
 	GLINK_PKT_INFO("%s: Triggered with size[%zu] ret[%d]\n",
 				__func__, work_item->intent_size, ret);
 	if (ret)
@@ -664,8 +672,15 @@
 	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
 
 	ret = copy_to_user(buf, pkt->data, pkt->size);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_to_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, pkt->size);
+		spin_lock_irqsave(&devp->pkt_list_lock, flags);
+		list_add_tail(&pkt->list, &devp->pkt_list);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -EFAULT;
+	}
 
 	ret = pkt->size;
 	glink_rx_done(devp->handle, pkt->data, false);
@@ -739,8 +754,13 @@
 	}
 
 	ret = copy_from_user(data, buf, count);
-	if (WARN_ON(ret != 0))
-		return ret;
+	if (ret) {
+		GLINK_PKT_ERR(
+		"%s copy_from_user failed ret[%d] on dev id:%d size %zu\n",
+		 __func__, ret, devp->i, count);
+		kfree(data);
+		return -EFAULT;
+	}
 
 	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
 	if (ret) {
@@ -1038,6 +1058,27 @@
 }
 
 /**
+ * pop_rx_pkt() - return first pkt from rx pkt_list
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function return first item from rx pkt_list and NULL if list is empty.
+ */
+struct glink_rx_pkt *pop_rx_pkt(struct glink_pkt_dev *devp)
+{
+	unsigned long flags;
+	struct glink_rx_pkt *pkt = NULL;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	if (!list_empty(&devp->pkt_list)) {
+		pkt = list_first_entry(&devp->pkt_list,
+				struct glink_rx_pkt, list);
+		list_del(&pkt->list);
+	}
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return pkt;
+}
+
+/**
  * glink_pkt_release() - release operation on glink_pkt device
  * inode:	Pointer to the inode structure.
  * file:	Pointer to the file structure.
@@ -1051,6 +1092,7 @@
 	int ret = 0;
 	struct glink_pkt_dev *devp = file->private_data;
 	unsigned long flags;
+	struct glink_rx_pkt *pkt;
 
 	GLINK_PKT_INFO("%s() on dev id:%d by [%s] ref_cnt[%d]\n",
 			__func__, devp->i, current->comm, devp->ref_cnt);
@@ -1059,9 +1101,14 @@
 		devp->ref_cnt--;
 
 	if (devp->handle && devp->ref_cnt == 0) {
+		while ((pkt = pop_rx_pkt(devp))) {
+			glink_rx_done(devp->handle, pkt->data, false);
+			kfree(pkt);
+		}
 		wake_up(&devp->ch_read_wait_queue);
 		wake_up_interruptible(&devp->ch_opened_wait_queue);
 		ret = glink_close(devp->handle);
+		devp->handle = NULL;
 		if (ret)  {
 			GLINK_PKT_ERR("%s: close failed ret[%d]\n",
 						__func__, ret);
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index c2fb37b..1bbd751 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -79,6 +79,7 @@
 static void *smem_ramdump_dev;
 static DEFINE_MUTEX(spinlock_init_lock);
 static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
 static int smem_module_inited;
 static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
 static DEFINE_MUTEX(smem_module_init_notifier_lock);
@@ -374,7 +375,7 @@
 	uint32_t a_hdr_size;
 	int rc;
 
-	SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+	SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
 					flags, skip_init_check, use_rspinlock);
 
 	if (!skip_init_check && !smem_initialized_check())
@@ -817,7 +818,7 @@
 void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
 							unsigned int flags)
 {
-	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+	SMEM_DBG("%s(%u, %u, %u)\n", __func__, id, to_proc, flags);
 
 	/*
 	 * Handle the circular dependecy between SMEM and software implemented
@@ -1084,7 +1085,8 @@
 	void *handle;
 	struct restart_notifier_block *nb;
 
-	smem_ramdump_dev = create_ramdump_device("smem", NULL);
+	if (smem_dev)
+		smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
 	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
 		LOG_ERR("%s: Unable to create smem ramdump device.\n",
 			__func__);
@@ -1509,7 +1511,7 @@
 		SMEM_INFO("smem security enabled\n");
 		smem_init_security();
 	}
-
+	smem_dev = &pdev->dev;
 	probe_done = true;
 
 	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 9c3f9431..0b952a4 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -101,6 +101,7 @@
 		.ei_array	= NULL,
 	},
 };
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
 
 struct elem_info qmi_error_resp_type_v01_ei[] = {
 	{
diff --git a/include/dt-bindings/clock/qcom,cpucc-sdm845.h b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
index c1ff2a0..db3c940 100644
--- a/include/dt-bindings/clock/qcom,cpucc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,cpucc-sdm845.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,18 +14,18 @@
 #ifndef _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 #define _DT_BINDINGS_CLK_MSM_CPU_CC_SDM845_H
 
-#define L3_CLUSTER0_VOTE_CLK					0
-#define L3_CLUSTER1_VOTE_CLK					1
-#define L3_CLK							2
-#define CPU0_PWRCL_CLK						3
-#define CPU1_PWRCL_CLK						4
-#define CPU2_PWRCL_CLK						5
-#define CPU3_PWRCL_CLK						6
-#define PWRCL_CLK						7
-#define CPU4_PERFCL_CLK						8
-#define CPU5_PERFCL_CLK						9
-#define CPU6_PERFCL_CLK						10
-#define CPU7_PERFCL_CLK						11
-#define PERFCL_CLK						12
+#define L3_CLK							0
+#define PWRCL_CLK						1
+#define PERFCL_CLK						2
+#define L3_CLUSTER0_VOTE_CLK					3
+#define L3_CLUSTER1_VOTE_CLK					4
+#define CPU0_PWRCL_CLK						5
+#define CPU1_PWRCL_CLK						6
+#define CPU2_PWRCL_CLK						7
+#define CPU3_PWRCL_CLK						8
+#define CPU4_PERFCL_CLK						9
+#define CPU5_PERFCL_CLK						10
+#define CPU6_PERFCL_CLK						11
+#define CPU7_PERFCL_CLK						12
 
 #endif
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index d7cd961..693fceb 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -7,7 +7,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
 	TP_PROTO(unsigned long pfn, const struct page *page,
 		 unsigned int count, unsigned int align),
@@ -60,6 +60,44 @@
 		  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+	TP_PROTO(unsigned int count, unsigned int align),
+
+	TP_ARGS(count, align),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, count)
+		__field(unsigned int, align)
+	),
+
+	TP_fast_assign(
+		__entry->count = count;
+		__entry->align = align;
+	),
+
+	TP_printk("count=%u align=%u",
+		  __entry->count,
+		  __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+	TP_PROTO(unsigned long pfn, const struct page *page,
+		 unsigned int count, unsigned int align),
+
+	TP_ARGS(pfn, page, count, align)
+);
+
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 539b25a..0ee910d 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -96,6 +96,27 @@
 		__entry->dst_nid,
 		__entry->nr_pages)
 );
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+	TP_PROTO(enum migrate_mode mode, int reason),
+
+	TP_ARGS(mode, reason),
+
+	TP_STRUCT__entry(
+		__field(enum migrate_mode, mode)
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->mode	= mode;
+		__entry->reason	= reason;
+	),
+
+	TP_printk("mode=%s reason=%s",
+		__print_symbolic(__entry->mode, MIGRATE_MODE),
+		__print_symbolic(__entry->reason, MIGRATE_REASON))
+);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
diff --git a/lib/Makefile b/lib/Makefile
index e0eb131..6bde16d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -31,6 +31,8 @@
 lib-y	+= kobject.o klist.o
 obj-y	+= lockref.o
 
+KASAN_SANITIZE_find_bit.o := n
+
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
diff --git a/mm/cma.c b/mm/cma.c
index c960459..0306bab 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -134,6 +134,10 @@
 	spin_lock_init(&cma->mem_head_lock);
 #endif
 
+	if (!PageHighMem(pfn_to_page(cma->base_pfn)))
+		kmemleak_free_part(__va(cma->base_pfn << PAGE_SHIFT),
+				cma->count << PAGE_SHIFT);
+
 	return 0;
 
 err:
@@ -380,6 +384,8 @@
 	if (!count)
 		return NULL;
 
+	trace_cma_alloc_start(count, align);
+
 	mask = cma_bitmap_aligned_mask(cma, align);
 	offset = cma_bitmap_aligned_offset(cma, align);
 	bitmap_maxno = cma_bitmap_maxno(cma);
@@ -420,6 +426,8 @@
 
 		pr_debug("%s(): memory range at %p is busy, retrying\n",
 			 __func__, pfn_to_page(pfn));
+
+		trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
 		/* try again with a bit different memory target */
 		start = bitmap_no + mask + 1;
 	}
diff --git a/mm/memory.c b/mm/memory.c
index cbb1e5e..91e1653 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3050,7 +3050,7 @@
 }
 
 static unsigned long fault_around_bytes __read_mostly =
-	rounddown_pow_of_two(65536);
+	rounddown_pow_of_two(4096);
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
diff --git a/mm/migrate.c b/mm/migrate.c
index 66ce6b4..f49de3cf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1319,6 +1319,8 @@
 	int swapwrite = current->flags & PF_SWAPWRITE;
 	int rc;
 
+	trace_mm_migrate_pages_start(mode, reason);
+
 	if (!swapwrite)
 		current->flags |= PF_SWAPWRITE;
 
diff --git a/mm/slub.c b/mm/slub.c
index 2b01429..30be24b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1673,6 +1673,7 @@
 	if (current->reclaim_state)
 		current->reclaim_state->reclaimed_slab += pages;
 	memcg_uncharge_slab(page, order, s);
+	kasan_alloc_pages(page, order);
 	__free_pages(page, order);
 }
 
@@ -3881,6 +3882,7 @@
 	if (unlikely(!PageSlab(page))) {
 		BUG_ON(!PageCompound(page));
 		kfree_hook(x);
+		kasan_alloc_pages(page, compound_order(page));
 		__free_pages(page, compound_order(page));
 		return;
 	}
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index cdf372f..e057887 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -2798,6 +2798,9 @@
 	if (!port_ptr || !name)
 		return -EINVAL;
 
+	if (port_ptr->type != CLIENT_PORT)
+		return -EINVAL;
+
 	if (name->addrtype != MSM_IPC_ADDR_NAME)
 		return -EINVAL;
 
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
index a84fc11..02242a1 100644
--- a/net/ipc_router/ipc_router_socket.c
+++ b/net/ipc_router/ipc_router_socket.c
@@ -543,10 +543,18 @@
 static int msm_ipc_router_close(struct socket *sock)
 {
 	struct sock *sk = sock->sk;
-	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct msm_ipc_port *port_ptr;
 	int ret;
 
+	if (!sk)
+		return -EINVAL;
+
 	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
 	ret = msm_ipc_router_close_port(port_ptr);
 	msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
 	release_sock(sk);