blob: fb0b504874a1206c1bc338ed0af42af710f6b85a [file] [log] [blame]
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001/*
2 * Copyright (c) 2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
Deepak Katragadda9abd7942017-06-13 14:20:09 -070014#define pr_fmt(fmt) "clk: %s: " fmt, __func__
Deepak Katragadda7abd9312016-12-21 14:18:00 -080015
16#include <linux/debugfs.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/io.h>
Deepak Katragadda7abd9312016-12-21 14:18:00 -080021#include <linux/err.h>
22#include <linux/errno.h>
23#include <linux/clk.h>
24#include <linux/clk-provider.h>
25#include <linux/cpu.h>
26#include <linux/platform_device.h>
27#include <linux/of_platform.h>
28#include <linux/pm_opp.h>
Deepak Katragadda7abd9312016-12-21 14:18:00 -080029#include <linux/interrupt.h>
Deepak Katragadda7abd9312016-12-21 14:18:00 -080030#include <linux/uaccess.h>
31#include <linux/sched.h>
Stephen Boyd0c29ef62017-05-05 10:41:22 -070032#include <linux/cpufreq.h>
33#include <linux/slab.h>
Deepak Katragadda02617bd2017-11-10 16:03:43 -080034#include <linux/regulator/consumer.h>
Deepak Katragadda7abd9312016-12-21 14:18:00 -080035#include <dt-bindings/clock/qcom,cpucc-sdm845.h>
Deepak Katragadda02617bd2017-11-10 16:03:43 -080036#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
Deepak Katragadda7abd9312016-12-21 14:18:00 -080037
38#include "common.h"
39#include "clk-regmap.h"
Deepak Katragadda7abd9312016-12-21 14:18:00 -080040#include "clk-voter.h"
Deepak Katragaddabca4a872017-04-24 15:51:03 -070041#include "clk-debug.h"
Deepak Katragadda7abd9312016-12-21 14:18:00 -080042
Stephen Boyd0c29ef62017-05-05 10:41:22 -070043#define OSM_INIT_RATE 300000000UL
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -070044#define XO_RATE 19200000UL
Deepak Katragadda7abd9312016-12-21 14:18:00 -080045#define OSM_TABLE_SIZE 40
46#define SINGLE_CORE 1
47#define MAX_CLUSTER_CNT 3
48#define MAX_MEM_ACC_VAL_PER_LEVEL 3
Deepak Katragadda7abd9312016-12-21 14:18:00 -080049#define CORE_COUNT_VAL(val) ((val & GENMASK(18, 16)) >> 16)
50
Deepak Katragadda7abd9312016-12-21 14:18:00 -080051#define OSM_REG_SIZE 32
52
Deepak Katragadda7abd9312016-12-21 14:18:00 -080053#define ENABLE_REG 0x0
54#define FREQ_REG 0x110
55#define VOLT_REG 0x114
Deepak Katragadda7abd9312016-12-21 14:18:00 -080056#define CORE_DCVS_CTRL 0xbc
Deepak Katragadda7abd9312016-12-21 14:18:00 -080057
Deepak Katragadda02617bd2017-11-10 16:03:43 -080058#define EFUSE_SHIFT(v1) ((v1) ? 3 : 2)
59#define EFUSE_MASK 0x7
60
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -070061#define DCVS_PERF_STATE_DESIRED_REG_0_V1 0x780
62#define DCVS_PERF_STATE_DESIRED_REG_0_V2 0x920
Odelu Kukatla3799c7c2017-08-20 20:48:45 +053063#define DCVS_PERF_STATE_DESIRED_REG(n, v1) \
64 (((v1) ? DCVS_PERF_STATE_DESIRED_REG_0_V1 \
65 : DCVS_PERF_STATE_DESIRED_REG_0_V2) + 4 * (n))
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -070066
67#define OSM_CYCLE_COUNTER_STATUS_REG_0_V1 0x7d0
68#define OSM_CYCLE_COUNTER_STATUS_REG_0_V2 0x9c0
Odelu Kukatla3799c7c2017-08-20 20:48:45 +053069#define OSM_CYCLE_COUNTER_STATUS_REG(n, v1) \
70 (((v1) ? OSM_CYCLE_COUNTER_STATUS_REG_0_V1 \
71 : OSM_CYCLE_COUNTER_STATUS_REG_0_V2) + 4 * (n))
Deepak Katragadda7abd9312016-12-21 14:18:00 -080072
Deepak Katragadda02617bd2017-11-10 16:03:43 -080073static DEFINE_VDD_REGS_INIT(vdd_l3_mx_ao, 1);
74static DEFINE_VDD_REGS_INIT(vdd_pwrcl_mx_ao, 1);
75
Deepak Katragadda7abd9312016-12-21 14:18:00 -080076struct osm_entry {
77 u16 virtual_corner;
78 u16 open_loop_volt;
Deepak Katragadda7abd9312016-12-21 14:18:00 -080079 long frequency;
80};
81
82struct clk_osm {
83 struct clk_hw hw;
84 struct osm_entry osm_table[OSM_TABLE_SIZE];
85 struct dentry *debugfs;
Deepak Katragaddacc3e9472017-07-07 10:30:15 -070086 void __iomem *vbase;
87 phys_addr_t pbase;
Deepak Katragadda7abd9312016-12-21 14:18:00 -080088 spinlock_t lock;
Deepak Katragaddacc3e9472017-07-07 10:30:15 -070089 bool per_core_dcvs;
Deepak Katragadda7abd9312016-12-21 14:18:00 -080090 u32 num_entries;
91 u32 cluster_num;
92 u32 core_num;
Deepak Katragadda7abd9312016-12-21 14:18:00 -080093 u64 total_cycle_counter;
94 u32 prev_cycle_counter;
Odelu Kukatla3799c7c2017-08-20 20:48:45 +053095 u32 max_core_count;
Deepak Katragadda02617bd2017-11-10 16:03:43 -080096 u32 mx_turbo_freq;
97 unsigned int cpr_rc;
Deepak Katragadda7abd9312016-12-21 14:18:00 -080098};
99
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530100static bool is_sdm845v1;
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -0700101
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800102static inline struct clk_osm *to_clk_osm(struct clk_hw *_hw)
103{
104 return container_of(_hw, struct clk_osm, hw);
105}
106
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700107static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800108{
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700109 writel_relaxed(val, c->vbase + offset);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800110}
111
112static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
113{
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700114 return readl_relaxed(c->vbase + offset);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800115}
116
117static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
118{
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700119 return readl_relaxed_no_log(c->vbase + offset);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800120}
121
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700122static inline int clk_osm_mb(struct clk_osm *c)
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800123{
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700124 return readl_relaxed_no_log(c->vbase + ENABLE_REG);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800125}
126
127static long clk_osm_list_rate(struct clk_hw *hw, unsigned int n,
128 unsigned long rate_max)
129{
130 if (n >= hw->init->num_rate_max)
131 return -ENXIO;
132 return hw->init->rate_max[n];
133}
134
135static inline bool is_better_rate(unsigned long req, unsigned long best,
136 unsigned long new)
137{
138 if (IS_ERR_VALUE(new))
139 return false;
140
141 return (req <= new && new < best) || (best < req && best < new);
142}
143
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800144static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
145{
146 int index;
147
148 for (index = 0; index < entries; index++) {
149 if (rate == table[index].frequency)
150 return index;
151 }
152
153 return -EINVAL;
154}
155
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800156static long clk_osm_round_rate(struct clk_hw *hw, unsigned long rate,
157 unsigned long *parent_rate)
158{
159 int i;
160 unsigned long rrate = 0;
161
Deepak Katragaddaf424e012017-07-14 11:38:35 -0700162 if (!hw)
163 return -EINVAL;
164
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800165 /*
166 * If the rate passed in is 0, return the first frequency in the
167 * FMAX table.
168 */
169 if (!rate)
170 return hw->init->rate_max[0];
171
172 for (i = 0; i < hw->init->num_rate_max; i++) {
173 if (is_better_rate(rate, rrate, hw->init->rate_max[i])) {
174 rrate = hw->init->rate_max[i];
175 if (rate == rrate)
176 break;
177 }
178 }
179
180 pr_debug("%s: rate %lu, rrate %ld, Rate max %ld\n", __func__, rate,
181 rrate, hw->init->rate_max[i]);
182
183 return rrate;
184}
185
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800186static int clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
187 unsigned long parent_rate)
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800188{
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800189 struct clk_osm *c = to_clk_osm(hw);
190 struct clk_hw *p_hw = clk_hw_get_parent(hw);
191 struct clk_osm *parent = to_clk_osm(p_hw);
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -0700192 int index = 0;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800193
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800194 if (!c || !parent)
195 return -EINVAL;
196
197 index = clk_osm_search_table(parent->osm_table,
198 parent->num_entries, rate);
199 if (index < 0) {
200 pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
201 return -EINVAL;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800202 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800203
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800204 clk_osm_write_reg(parent, index,
205 DCVS_PERF_STATE_DESIRED_REG(c->core_num,
206 is_sdm845v1));
207
208 /* Make sure the write goes through before proceeding */
209 clk_osm_mb(parent);
210
211 return 0;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800212}
213
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800214static unsigned long clk_cpu_recalc_rate(struct clk_hw *hw,
215 unsigned long parent_rate)
216{
217 struct clk_osm *c = to_clk_osm(hw);
218 struct clk_hw *p_hw = clk_hw_get_parent(hw);
219 struct clk_osm *parent = to_clk_osm(p_hw);
220 int index = 0;
221
222 if (!c || !parent)
223 return -EINVAL;
224
225 index = clk_osm_read_reg(parent,
226 DCVS_PERF_STATE_DESIRED_REG(c->core_num,
227 is_sdm845v1));
228 return parent->osm_table[index].frequency;
229}
230
231static long clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
232 unsigned long *parent_rate)
233{
234 struct clk_hw *parent_hw = clk_hw_get_parent(hw);
235
236 if (!parent_hw)
237 return -EINVAL;
238
239 *parent_rate = rate;
240 return clk_hw_round_rate(parent_hw, rate);
241}
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800242
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800243static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate,
244 unsigned long parent_rate)
245{
246 struct clk_osm *cpuclk = to_clk_osm(hw);
247 int index = 0;
248 unsigned long r_rate;
249
250 if (!cpuclk)
251 return -EINVAL;
252
253 r_rate = clk_osm_round_rate(hw, rate, NULL);
254
255 if (rate != r_rate) {
256 pr_err("invalid requested rate=%ld\n", rate);
257 return -EINVAL;
258 }
259
260 /* Convert rate to table index */
261 index = clk_osm_search_table(cpuclk->osm_table,
262 cpuclk->num_entries, r_rate);
263 if (index < 0) {
264 pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate);
265 return -EINVAL;
266 }
267 pr_debug("rate: %lu --> index %d\n", rate, index);
268
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530269 clk_osm_write_reg(cpuclk, index,
270 DCVS_PERF_STATE_DESIRED_REG(0, is_sdm845v1));
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800271
272 /* Make sure the write goes through before proceeding */
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700273 clk_osm_mb(cpuclk);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800274
275 return 0;
276}
277
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800278static unsigned long l3_clk_recalc_rate(struct clk_hw *hw,
279 unsigned long parent_rate)
280{
281 struct clk_osm *cpuclk = to_clk_osm(hw);
282 int index = 0;
283
284 if (!cpuclk)
285 return -EINVAL;
286
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530287 index = clk_osm_read_reg(cpuclk,
288 DCVS_PERF_STATE_DESIRED_REG(0, is_sdm845v1));
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800289
290 pr_debug("%s: Index %d, freq %ld\n", __func__, index,
291 cpuclk->osm_table[index].frequency);
292
293 /* Convert index to frequency */
294 return cpuclk->osm_table[index].frequency;
295}
296
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -0700297static struct clk_ops clk_ops_l3_osm = {
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800298 .round_rate = clk_osm_round_rate,
299 .list_rate = clk_osm_list_rate,
300 .recalc_rate = l3_clk_recalc_rate,
301 .set_rate = l3_clk_set_rate,
Deepak Katragaddabca4a872017-04-24 15:51:03 -0700302 .debug_init = clk_debug_measure_add,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800303};
304
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800305static struct clk_ops clk_ops_core;
306static struct clk_ops clk_ops_cpu_osm;
307
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800308static struct clk_init_data osm_clks_init[] = {
309 [0] = {
310 .name = "l3_clk",
Deepak Katragadda3760e052017-04-20 13:41:32 -0700311 .parent_names = (const char *[]){ "bi_tcxo_ao" },
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800312 .num_parents = 1,
313 .ops = &clk_ops_l3_osm,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800314 .vdd_class = &vdd_l3_mx_ao,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800315 },
316 [1] = {
317 .name = "pwrcl_clk",
Deepak Katragadda3760e052017-04-20 13:41:32 -0700318 .parent_names = (const char *[]){ "bi_tcxo_ao" },
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800319 .num_parents = 1,
320 .ops = &clk_ops_cpu_osm,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800321 .vdd_class = &vdd_pwrcl_mx_ao,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800322 },
323 [2] = {
324 .name = "perfcl_clk",
Deepak Katragadda3760e052017-04-20 13:41:32 -0700325 .parent_names = (const char *[]){ "bi_tcxo_ao" },
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800326 .num_parents = 1,
327 .ops = &clk_ops_cpu_osm,
328 },
329};
330
331static struct clk_osm l3_clk = {
332 .cluster_num = 0,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530333 .max_core_count = 4,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800334 .hw.init = &osm_clks_init[0],
335};
336
337static DEFINE_CLK_VOTER(l3_cluster0_vote_clk, l3_clk, 0);
338static DEFINE_CLK_VOTER(l3_cluster1_vote_clk, l3_clk, 0);
Deepak Katragadda43db9372017-10-12 10:55:55 -0700339static DEFINE_CLK_VOTER(l3_misc_vote_clk, l3_clk, 0);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800340
341static struct clk_osm pwrcl_clk = {
342 .cluster_num = 1,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530343 .max_core_count = 4,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800344 .hw.init = &osm_clks_init[1],
345};
346
347static struct clk_osm cpu0_pwrcl_clk = {
348 .core_num = 0,
349 .total_cycle_counter = 0,
350 .prev_cycle_counter = 0,
351 .hw.init = &(struct clk_init_data){
352 .name = "cpu0_pwrcl_clk",
353 .parent_names = (const char *[]){ "pwrcl_clk" },
354 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800355 .flags = CLK_SET_RATE_PARENT,
356 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800357 },
358};
359
360static struct clk_osm cpu1_pwrcl_clk = {
361 .core_num = 1,
362 .total_cycle_counter = 0,
363 .prev_cycle_counter = 0,
364 .hw.init = &(struct clk_init_data){
365 .name = "cpu1_pwrcl_clk",
366 .parent_names = (const char *[]){ "pwrcl_clk" },
367 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800368 .flags = CLK_SET_RATE_PARENT,
369 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800370 },
371};
372
373static struct clk_osm cpu2_pwrcl_clk = {
374 .core_num = 2,
375 .total_cycle_counter = 0,
376 .prev_cycle_counter = 0,
377 .hw.init = &(struct clk_init_data){
378 .name = "cpu2_pwrcl_clk",
379 .parent_names = (const char *[]){ "pwrcl_clk" },
380 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800381 .flags = CLK_SET_RATE_PARENT,
382 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800383 },
384};
385
386static struct clk_osm cpu3_pwrcl_clk = {
387 .core_num = 3,
388 .total_cycle_counter = 0,
389 .prev_cycle_counter = 0,
390 .hw.init = &(struct clk_init_data){
391 .name = "cpu3_pwrcl_clk",
392 .parent_names = (const char *[]){ "pwrcl_clk" },
393 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800394 .flags = CLK_SET_RATE_PARENT,
395 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800396 },
397};
398
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530399static struct clk_osm cpu4_pwrcl_clk = {
400 .core_num = 4,
401 .total_cycle_counter = 0,
402 .prev_cycle_counter = 0,
403 .hw.init = &(struct clk_init_data){
404 .name = "cpu4_pwrcl_clk",
405 .parent_names = (const char *[]){ "pwrcl_clk" },
406 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800407 .flags = CLK_SET_RATE_PARENT,
408 .ops = &clk_ops_core,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530409 },
410};
411
412static struct clk_osm cpu5_pwrcl_clk = {
413 .core_num = 5,
414 .total_cycle_counter = 0,
415 .prev_cycle_counter = 0,
416 .hw.init = &(struct clk_init_data){
417 .name = "cpu5_pwrcl_clk",
418 .parent_names = (const char *[]){ "pwrcl_clk" },
419 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800420 .flags = CLK_SET_RATE_PARENT,
421 .ops = &clk_ops_core,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530422 },
423};
424
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800425static struct clk_osm perfcl_clk = {
426 .cluster_num = 2,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530427 .max_core_count = 4,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800428 .hw.init = &osm_clks_init[2],
429};
430
431
432static struct clk_osm cpu4_perfcl_clk = {
433 .core_num = 0,
434 .total_cycle_counter = 0,
435 .prev_cycle_counter = 0,
436 .hw.init = &(struct clk_init_data){
437 .name = "cpu4_perfcl_clk",
438 .parent_names = (const char *[]){ "perfcl_clk" },
439 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800440 .flags = CLK_SET_RATE_PARENT,
441 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800442 },
443};
444
445static struct clk_osm cpu5_perfcl_clk = {
446 .core_num = 1,
447 .total_cycle_counter = 0,
448 .prev_cycle_counter = 0,
449 .hw.init = &(struct clk_init_data){
450 .name = "cpu5_perfcl_clk",
451 .parent_names = (const char *[]){ "perfcl_clk" },
452 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800453 .flags = CLK_SET_RATE_PARENT,
454 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800455 },
456};
457
458static struct clk_osm cpu6_perfcl_clk = {
459 .core_num = 2,
460 .total_cycle_counter = 0,
461 .prev_cycle_counter = 0,
462 .hw.init = &(struct clk_init_data){
463 .name = "cpu6_perfcl_clk",
464 .parent_names = (const char *[]){ "perfcl_clk" },
465 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800466 .flags = CLK_SET_RATE_PARENT,
467 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800468 },
469};
470
471static struct clk_osm cpu7_perfcl_clk = {
472 .core_num = 3,
473 .total_cycle_counter = 0,
474 .prev_cycle_counter = 0,
475 .hw.init = &(struct clk_init_data){
476 .name = "cpu7_perfcl_clk",
477 .parent_names = (const char *[]){ "perfcl_clk" },
478 .num_parents = 1,
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800479 .flags = CLK_SET_RATE_PARENT,
480 .ops = &clk_ops_core,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800481 },
482};
483
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800484static struct clk_hw *osm_qcom_clk_hws[] = {
485 [L3_CLK] = &l3_clk.hw,
486 [L3_CLUSTER0_VOTE_CLK] = &l3_cluster0_vote_clk.hw,
487 [L3_CLUSTER1_VOTE_CLK] = &l3_cluster1_vote_clk.hw,
Deepak Katragadda43db9372017-10-12 10:55:55 -0700488 [L3_MISC_VOTE_CLK] = &l3_misc_vote_clk.hw,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800489 [PWRCL_CLK] = &pwrcl_clk.hw,
490 [CPU0_PWRCL_CLK] = &cpu0_pwrcl_clk.hw,
491 [CPU1_PWRCL_CLK] = &cpu1_pwrcl_clk.hw,
492 [CPU2_PWRCL_CLK] = &cpu2_pwrcl_clk.hw,
493 [CPU3_PWRCL_CLK] = &cpu3_pwrcl_clk.hw,
494 [PERFCL_CLK] = &perfcl_clk.hw,
495 [CPU4_PERFCL_CLK] = &cpu4_perfcl_clk.hw,
496 [CPU5_PERFCL_CLK] = &cpu5_perfcl_clk.hw,
497 [CPU6_PERFCL_CLK] = &cpu6_perfcl_clk.hw,
498 [CPU7_PERFCL_CLK] = &cpu7_perfcl_clk.hw,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530499 [CPU4_PWRCL_CLK] = NULL,
500 [CPU5_PWRCL_CLK] = NULL,
501};
502
503static struct clk_osm *clk_cpu_map[] = {
504 &cpu0_pwrcl_clk,
505 &cpu1_pwrcl_clk,
506 &cpu2_pwrcl_clk,
507 &cpu3_pwrcl_clk,
508 &cpu4_perfcl_clk,
509 &cpu5_perfcl_clk,
510 &cpu6_perfcl_clk,
511 &cpu7_perfcl_clk,
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800512};
513
514static struct clk_osm *logical_cpu_to_clk(int cpu)
515{
516 struct device_node *cpu_node;
517 const u32 *cell;
518 u64 hwid;
519 static struct clk_osm *cpu_clk_map[NR_CPUS];
520
Stephen Boyd406244f2017-05-19 14:52:57 -0700521 if (!cpu_clk_map[cpu]) {
522 cpu_node = of_get_cpu_node(cpu, NULL);
523 if (!cpu_node)
524 return NULL;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800525
Stephen Boyd406244f2017-05-19 14:52:57 -0700526 cell = of_get_property(cpu_node, "reg", NULL);
527 if (!cell) {
528 pr_err("%s: missing reg property\n",
529 cpu_node->full_name);
530 of_node_put(cpu_node);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800531 return NULL;
532 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800533
Stephen Boyd406244f2017-05-19 14:52:57 -0700534 hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
535 hwid = (hwid >> 8) & 0xff;
536 of_node_put(cpu_node);
537 if (hwid >= ARRAY_SIZE(clk_cpu_map)) {
538 pr_err("unsupported CPU number - %d (hw_id - %llu)\n",
539 cpu, hwid);
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800540 return NULL;
541 }
Stephen Boyd406244f2017-05-19 14:52:57 -0700542
543 cpu_clk_map[cpu] = clk_cpu_map[hwid];
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800544 }
545
Stephen Boyd406244f2017-05-19 14:52:57 -0700546 return cpu_clk_map[cpu];
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800547}
548
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700549static struct clk_osm *osm_configure_policy(struct cpufreq_policy *policy)
550{
551 int cpu;
552 struct clk_hw *parent, *c_parent;
553 struct clk_osm *first;
554 struct clk_osm *c, *n;
555
556 c = logical_cpu_to_clk(policy->cpu);
557 if (!c)
558 return NULL;
559
560 c_parent = clk_hw_get_parent(&c->hw);
561 if (!c_parent)
562 return NULL;
563
564 /*
565 * Don't put any other CPUs into the policy if we're doing
566 * per_core_dcvs
567 */
568 if (to_clk_osm(c_parent)->per_core_dcvs)
569 return c;
570
571 first = c;
572 /* Find CPUs that share the same clock domain */
573 for_each_possible_cpu(cpu) {
574 n = logical_cpu_to_clk(cpu);
575 if (!n)
576 continue;
577
578 parent = clk_hw_get_parent(&n->hw);
579 if (!parent)
580 return NULL;
581 if (parent != c_parent)
582 continue;
583
584 cpumask_set_cpu(cpu, policy->cpus);
585 if (n->core_num == 0)
586 first = n;
587 }
588
589 return first;
590}
591
592static void
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800593osm_set_index(struct clk_osm *c, unsigned int index)
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700594{
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800595 struct clk_hw *p_hw = clk_hw_get_parent(&c->hw);
596 struct clk_osm *parent = to_clk_osm(p_hw);
597 unsigned long rate = 0;
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700598
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800599 if (index >= OSM_TABLE_SIZE) {
600 pr_err("Passing an index (%u) that's greater than max (%d)\n",
601 index, OSM_TABLE_SIZE - 1);
602 return;
603 }
604
605 rate = parent->osm_table[index].frequency;
606 if (!rate)
607 return;
608
609 clk_set_rate(c->hw.clk, clk_round_rate(c->hw.clk, rate));
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700610}
611
612static int
613osm_cpufreq_target_index(struct cpufreq_policy *policy, unsigned int index)
614{
615 struct clk_osm *c = policy->driver_data;
616
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800617 osm_set_index(c, index);
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700618 return 0;
619}
620
621static unsigned int osm_cpufreq_get(unsigned int cpu)
622{
623 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
624 struct clk_osm *c;
625 u32 index;
626
627 if (!policy)
628 return 0;
629
630 c = policy->driver_data;
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -0700631 index = clk_osm_read_reg(c,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530632 DCVS_PERF_STATE_DESIRED_REG(c->core_num, is_sdm845v1));
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700633 return policy->freq_table[index].frequency;
634}
635
636static int osm_cpufreq_cpu_init(struct cpufreq_policy *policy)
637{
638 struct cpufreq_frequency_table *table;
639 struct clk_osm *c, *parent;
640 struct clk_hw *p_hw;
641 int ret;
642 unsigned int i;
643 unsigned int xo_kHz;
644
645 c = osm_configure_policy(policy);
646 if (!c) {
647 pr_err("no clock for CPU%d\n", policy->cpu);
648 return -ENODEV;
649 }
650
651 p_hw = clk_hw_get_parent(&c->hw);
652 if (!p_hw) {
653 pr_err("no parent clock for CPU%d\n", policy->cpu);
654 return -ENODEV;
655 }
656
657 parent = to_clk_osm(p_hw);
Deepak Katragaddacc3e9472017-07-07 10:30:15 -0700658 c->vbase = parent->vbase;
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700659
660 p_hw = clk_hw_get_parent(p_hw);
661 if (!p_hw) {
662 pr_err("no xo clock for CPU%d\n", policy->cpu);
663 return -ENODEV;
664 }
665 xo_kHz = clk_hw_get_rate(p_hw) / 1000;
666
667 table = kcalloc(OSM_TABLE_SIZE + 1, sizeof(*table), GFP_KERNEL);
668 if (!table)
669 return -ENOMEM;
670
671 for (i = 0; i < OSM_TABLE_SIZE; i++) {
672 u32 data, src, div, lval, core_count;
673
674 data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
675 src = (data & GENMASK(31, 30)) >> 30;
676 div = (data & GENMASK(29, 28)) >> 28;
677 lval = data & GENMASK(7, 0);
678 core_count = CORE_COUNT_VAL(data);
679
680 if (!src)
681 table[i].frequency = OSM_INIT_RATE / 1000;
682 else
683 table[i].frequency = xo_kHz * lval;
684 table[i].driver_data = table[i].frequency;
685
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530686 if (core_count != parent->max_core_count)
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700687 table[i].frequency = CPUFREQ_ENTRY_INVALID;
688
689 /* Two of the same frequencies means end of table */
690 if (i > 0 && table[i - 1].driver_data == table[i].driver_data) {
691 struct cpufreq_frequency_table *prev = &table[i - 1];
692
693 if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
694 prev->flags = CPUFREQ_BOOST_FREQ;
695 prev->frequency = prev->driver_data;
696 }
697
698 break;
699 }
700 }
701 table[i].frequency = CPUFREQ_TABLE_END;
702
703 ret = cpufreq_table_validate_and_show(policy, table);
704 if (ret) {
705 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
706 goto err;
707 }
708
709 policy->driver_data = c;
Stephen Boyd0c29ef62017-05-05 10:41:22 -0700710 return 0;
711
712err:
713 kfree(table);
714 return ret;
715}
716
717static int osm_cpufreq_cpu_exit(struct cpufreq_policy *policy)
718{
719 kfree(policy->freq_table);
720 policy->freq_table = NULL;
721 return 0;
722}
723
724static struct freq_attr *osm_cpufreq_attr[] = {
725 &cpufreq_freq_attr_scaling_available_freqs,
726 &cpufreq_freq_attr_scaling_boost_freqs,
727 NULL
728};
729
730static struct cpufreq_driver qcom_osm_cpufreq_driver = {
731 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
732 CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
733 .verify = cpufreq_generic_frequency_table_verify,
734 .target_index = osm_cpufreq_target_index,
735 .get = osm_cpufreq_get,
736 .init = osm_cpufreq_cpu_init,
737 .exit = osm_cpufreq_cpu_exit,
738 .name = "osm-cpufreq",
739 .attr = osm_cpufreq_attr,
740 .boost_enabled = true,
741};
742
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800743static u32 find_voltage(struct clk_osm *c, unsigned long rate)
744{
745 struct osm_entry *table = c->osm_table;
746 int entries = c->num_entries, i;
747
748 for (i = 0; i < entries; i++) {
749 if (rate == table[i].frequency) {
750 /* OPP table voltages have units of mV */
751 return table[i].open_loop_volt * 1000;
752 }
753 }
754
755 return -EINVAL;
756}
757
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700758static int add_opp(struct clk_osm *c, struct device **device_list, int count)
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800759{
760 unsigned long rate = 0;
761 u32 uv;
762 long rc;
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700763 int i, j = 0;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800764 unsigned long min_rate = c->hw.init->rate_max[0];
765 unsigned long max_rate =
766 c->hw.init->rate_max[c->hw.init->num_rate_max - 1];
767
768 while (1) {
769 rate = c->hw.init->rate_max[j++];
770 uv = find_voltage(c, rate);
771 if (uv <= 0) {
772 pr_warn("No voltage for %lu.\n", rate);
773 return -EINVAL;
774 }
775
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700776 for (i = 0; i < count; i++) {
777 rc = dev_pm_opp_add(device_list[i], rate, uv);
778 if (rc) {
779 pr_warn("failed to add OPP for %lu\n", rate);
780 return rc;
781 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800782 }
783
784 /*
785 * Print the OPP pair for the lowest and highest frequency for
786 * each device that we're populating. This is important since
787 * this information will be used by thermal mitigation and the
788 * scheduler.
789 */
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700790 if (rate == min_rate) {
791 for (i = 0; i < count; i++) {
792 pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
793 rate, uv, dev_name(device_list[i]));
794 }
795 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800796
797 if (rate == max_rate && max_rate != min_rate) {
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700798 for (i = 0; i < count; i++) {
799 pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
800 rate, uv, dev_name(device_list[i]));
801 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800802 break;
803 }
804
805 if (min_rate == max_rate)
806 break;
807 }
808 return 0;
809}
810
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700811static int derive_device_list(struct device **device_list,
812 struct device_node *np,
813 char *phandle_name, int count)
814{
815 int i;
816 struct platform_device *pdev;
817 struct device_node *dev_node;
818
819 for (i = 0; i < count; i++) {
820 dev_node = of_parse_phandle(np, phandle_name, i);
821 if (!dev_node) {
822 pr_err("Unable to get device_node pointer for opp-handle (%s)\n",
823 phandle_name);
824 return -ENODEV;
825 }
826
827 pdev = of_find_device_by_node(dev_node);
828 if (!pdev) {
829 pr_err("Unable to find platform_device node for opp-handle (%s)\n",
830 phandle_name);
831 return -ENODEV;
832 }
833 device_list[i] = &pdev->dev;
834 }
835 return 0;
836}
837
838static void populate_l3_opp_table(struct device_node *np, char *phandle_name)
839{
840 struct device **device_list;
841 int len, count, ret = 0;
842
843 if (of_find_property(np, phandle_name, &len)) {
844 count = len / sizeof(u32);
845
846 device_list = kcalloc(count, sizeof(struct device *),
847 GFP_KERNEL);
848 if (!device_list)
849 return;
850
851 ret = derive_device_list(device_list, np, phandle_name, count);
852 if (ret < 0) {
853 pr_err("Failed to fill device_list for %s\n",
854 phandle_name);
855 return;
856 }
857 } else {
858 pr_debug("Unable to find %s\n", phandle_name);
859 return;
860 }
861
862 if (add_opp(&l3_clk, device_list, count))
863 pr_err("Failed to add OPP levels for %s\n", phandle_name);
864
865 kfree(device_list);
866}
867
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800868static void populate_opp_table(struct platform_device *pdev)
869{
870 int cpu;
871 struct device *cpu_dev;
872 struct clk_osm *c, *parent;
873 struct clk_hw *hw_parent;
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700874 struct device_node *np = pdev->dev.of_node;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800875
876 for_each_possible_cpu(cpu) {
877 c = logical_cpu_to_clk(cpu);
878 if (!c) {
879 pr_err("no clock device for CPU=%d\n", cpu);
880 return;
881 }
882
883 hw_parent = clk_hw_get_parent(&c->hw);
884 parent = to_clk_osm(hw_parent);
885 cpu_dev = get_cpu_device(cpu);
886 if (cpu_dev)
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700887 if (add_opp(parent, &cpu_dev, 1))
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800888 pr_err("Failed to add OPP levels for %s\n",
889 dev_name(cpu_dev));
890 }
891
Deepak Katragaddaeda782e2017-10-12 11:06:28 -0700892 populate_l3_opp_table(np, "l3-devs");
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800893}
894
895static u64 clk_osm_get_cpu_cycle_counter(int cpu)
896{
897 u32 val;
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -0700898 int core_num;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800899 unsigned long flags;
Deepak Katragaddad9a67e92017-05-26 10:20:21 -0700900 u64 cycle_counter_ret;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800901 struct clk_osm *parent, *c = logical_cpu_to_clk(cpu);
902
903 if (IS_ERR_OR_NULL(c)) {
904 pr_err("no clock device for CPU=%d\n", cpu);
905 return 0;
906 }
907
908 parent = to_clk_osm(clk_hw_get_parent(&c->hw));
909
910 spin_lock_irqsave(&parent->lock, flags);
Deepak Katragaddaa83ccfe2017-04-11 15:57:11 -0700911 /*
912 * Use core 0's copy as proxy for the whole cluster when per
913 * core DCVS is disabled.
914 */
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -0700915 core_num = parent->per_core_dcvs ? c->core_num : 0;
916 val = clk_osm_read_reg_no_log(parent,
Odelu Kukatla3799c7c2017-08-20 20:48:45 +0530917 OSM_CYCLE_COUNTER_STATUS_REG(core_num, is_sdm845v1));
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800918
919 if (val < c->prev_cycle_counter) {
920 /* Handle counter overflow */
921 c->total_cycle_counter += UINT_MAX -
922 c->prev_cycle_counter + val;
923 c->prev_cycle_counter = val;
924 } else {
925 c->total_cycle_counter += val - c->prev_cycle_counter;
926 c->prev_cycle_counter = val;
927 }
Deepak Katragaddad9a67e92017-05-26 10:20:21 -0700928 cycle_counter_ret = c->total_cycle_counter;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800929 spin_unlock_irqrestore(&parent->lock, flags);
930
Deepak Katragaddad9a67e92017-05-26 10:20:21 -0700931 return cycle_counter_ret;
Deepak Katragadda7abd9312016-12-21 14:18:00 -0800932}
933
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -0700934static int clk_osm_read_lut(struct platform_device *pdev, struct clk_osm *c)
935{
936 u32 data, src, lval, i, j = OSM_TABLE_SIZE;
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800937 struct clk_vdd_class *vdd = osm_clks_init[c->cluster_num].vdd_class;
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -0700938
939 for (i = 0; i < OSM_TABLE_SIZE; i++) {
940 data = clk_osm_read_reg(c, FREQ_REG + i * OSM_REG_SIZE);
941 src = ((data & GENMASK(31, 30)) >> 30);
942 lval = (data & GENMASK(7, 0));
943
944 if (!src)
945 c->osm_table[i].frequency = OSM_INIT_RATE;
946 else
947 c->osm_table[i].frequency = XO_RATE * lval;
948
949 data = clk_osm_read_reg(c, VOLT_REG + i * OSM_REG_SIZE);
950 c->osm_table[i].virtual_corner =
951 ((data & GENMASK(21, 16)) >> 16);
952 c->osm_table[i].open_loop_volt = (data & GENMASK(11, 0));
953
954 pr_debug("index=%d freq=%ld virtual_corner=%d open_loop_voltage=%u\n",
955 i, c->osm_table[i].frequency,
956 c->osm_table[i].virtual_corner,
957 c->osm_table[i].open_loop_volt);
958
959 if (i > 0 && j == OSM_TABLE_SIZE && c->osm_table[i].frequency ==
960 c->osm_table[i - 1].frequency)
961 j = i;
962 }
963
964 osm_clks_init[c->cluster_num].rate_max = devm_kcalloc(&pdev->dev,
965 j, sizeof(unsigned long),
966 GFP_KERNEL);
967 if (!osm_clks_init[c->cluster_num].rate_max)
968 return -ENOMEM;
969
Deepak Katragadda02617bd2017-11-10 16:03:43 -0800970 if (vdd) {
971 vdd->level_votes = devm_kcalloc(&pdev->dev, j,
972 sizeof(*vdd->level_votes), GFP_KERNEL);
973 if (!vdd->level_votes)
974 return -ENOMEM;
975
976 vdd->vdd_uv = devm_kcalloc(&pdev->dev, j, sizeof(*vdd->vdd_uv),
977 GFP_KERNEL);
978 if (!vdd->vdd_uv)
979 return -ENOMEM;
980
981 for (i = 0; i < j; i++) {
982 if (c->osm_table[i].frequency < c->mx_turbo_freq ||
983 (c->cpr_rc > 1))
984 vdd->vdd_uv[i] = RPMH_REGULATOR_LEVEL_NOM;
985 else
986 vdd->vdd_uv[i] = RPMH_REGULATOR_LEVEL_TURBO;
987 }
988 vdd->num_levels = j;
989 vdd->cur_level = j;
990 vdd->use_max_uV = true;
991 }
992
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -0700993 for (i = 0; i < j; i++)
994 osm_clks_init[c->cluster_num].rate_max[i] =
995 c->osm_table[i].frequency;
996
997 c->num_entries = osm_clks_init[c->cluster_num].num_rate_max = j;
998 return 0;
999}
1000
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001001static int clk_osm_resources_init(struct platform_device *pdev)
1002{
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001003 struct resource *res;
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001004
1005 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1006 "osm_l3_base");
1007 if (!res) {
1008 dev_err(&pdev->dev,
1009 "Unable to get platform resource for osm_l3_base");
1010 return -ENOMEM;
1011 }
1012
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001013 l3_clk.pbase = (unsigned long)res->start;
1014 l3_clk.vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001015
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001016 if (!l3_clk.vbase) {
Osvaldo Banuelosa64c40a2017-03-20 16:01:45 -07001017 dev_err(&pdev->dev, "Unable to map osm_l3_base base\n");
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001018 return -ENOMEM;
1019 }
1020
1021 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1022 "osm_pwrcl_base");
1023 if (!res) {
1024 dev_err(&pdev->dev,
1025 "Unable to get platform resource for osm_pwrcl_base");
1026 return -ENOMEM;
1027 }
1028
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001029 pwrcl_clk.pbase = (unsigned long)res->start;
1030 pwrcl_clk.vbase = devm_ioremap(&pdev->dev, res->start,
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001031 resource_size(res));
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001032 if (!pwrcl_clk.vbase) {
Osvaldo Banuelosa64c40a2017-03-20 16:01:45 -07001033 dev_err(&pdev->dev, "Unable to map osm_pwrcl_base base\n");
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001034 return -ENOMEM;
1035 }
1036
1037 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1038 "osm_perfcl_base");
1039 if (!res) {
1040 dev_err(&pdev->dev,
1041 "Unable to get platform resource for osm_perfcl_base");
1042 return -ENOMEM;
1043 }
1044
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001045 perfcl_clk.pbase = (unsigned long)res->start;
1046 perfcl_clk.vbase = devm_ioremap(&pdev->dev, res->start,
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001047 resource_size(res));
1048
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001049 if (!perfcl_clk.vbase) {
Osvaldo Banuelosa64c40a2017-03-20 16:01:45 -07001050 dev_err(&pdev->dev, "Unable to map osm_perfcl_base base\n");
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001051 return -ENOMEM;
1052 }
1053
Osvaldo Banuelosa64c40a2017-03-20 16:01:45 -07001054 return 0;
1055}
1056
Odelu Kukatla3799c7c2017-08-20 20:48:45 +05301057static void clk_cpu_osm_driver_sdm670_fixup(void)
1058{
1059 osm_qcom_clk_hws[CPU4_PERFCL_CLK] = NULL;
1060 osm_qcom_clk_hws[CPU5_PERFCL_CLK] = NULL;
1061 osm_qcom_clk_hws[CPU4_PWRCL_CLK] = &cpu4_pwrcl_clk.hw;
1062 osm_qcom_clk_hws[CPU5_PWRCL_CLK] = &cpu5_pwrcl_clk.hw;
1063
1064 clk_cpu_map[4] = &cpu4_pwrcl_clk;
1065 clk_cpu_map[5] = &cpu5_pwrcl_clk;
1066
1067 cpu6_perfcl_clk.core_num = 0;
1068 cpu7_perfcl_clk.core_num = 1;
1069
1070 pwrcl_clk.max_core_count = 6;
1071 perfcl_clk.max_core_count = 2;
1072}
1073
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001074static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
1075{
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001076 int rc = 0, i, cpu;
1077 bool is_sdm670 = false;
1078 u32 *array;
1079 u32 val, pte_efuse;
1080 void __iomem *vbase;
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001081 int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
1082 struct clk *ext_xo_clk, *clk;
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001083 struct clk_osm *osm_clk;
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001084 struct device *dev = &pdev->dev;
1085 struct clk_onecell_data *clk_data;
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001086 struct resource *res;
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001087 struct cpu_cycle_counter_cb cb = {
1088 .get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
1089 };
1090
1091 /*
1092 * Require the RPM-XO clock to be registered before OSM.
1093 * The cpuss_gpll0_clk_src is listed to be configured by BL.
1094 */
1095 ext_xo_clk = devm_clk_get(dev, "xo_ao");
1096 if (IS_ERR(ext_xo_clk)) {
1097 if (PTR_ERR(ext_xo_clk) != -EPROBE_DEFER)
1098 dev_err(dev, "Unable to get xo clock\n");
1099 return PTR_ERR(ext_xo_clk);
1100 }
1101
Odelu Kukatla3799c7c2017-08-20 20:48:45 +05301102 is_sdm845v1 = of_device_is_compatible(pdev->dev.of_node,
1103 "qcom,clk-cpu-osm");
1104
1105 if (of_device_is_compatible(pdev->dev.of_node,
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001106 "qcom,clk-cpu-osm-sdm670")) {
1107 is_sdm670 = true;
Odelu Kukatla3799c7c2017-08-20 20:48:45 +05301108 clk_cpu_osm_driver_sdm670_fixup();
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001109 }
1110
1111 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpr_rc");
1112 if (res) {
1113 vbase = devm_ioremap(&pdev->dev, res->start,
1114 resource_size(res));
1115 if (!vbase) {
1116 dev_err(&pdev->dev, "Unable to map in cpr_rc base\n");
1117 return -ENOMEM;
1118 }
1119 pte_efuse = readl_relaxed(vbase);
1120 l3_clk.cpr_rc = pwrcl_clk.cpr_rc = perfcl_clk.cpr_rc =
1121 ((pte_efuse >> EFUSE_SHIFT(is_sdm845v1 | is_sdm670))
1122 & EFUSE_MASK);
1123 pr_info("LOCAL_CPR_RC: %u\n", l3_clk.cpr_rc);
1124 devm_iounmap(&pdev->dev, vbase);
1125 } else {
1126 dev_err(&pdev->dev,
1127 "Unable to get platform resource for cpr_rc\n");
1128 return -ENOMEM;
1129 }
1130
1131 vdd_l3_mx_ao.regulator[0] = devm_regulator_get(&pdev->dev,
1132 "vdd_l3_mx_ao");
1133 if (IS_ERR(vdd_l3_mx_ao.regulator[0])) {
1134 if (PTR_ERR(vdd_l3_mx_ao.regulator[0]) != -EPROBE_DEFER)
1135 dev_err(&pdev->dev,
1136 "Unable to get vdd_l3_mx_ao regulator\n");
1137 return PTR_ERR(vdd_l3_mx_ao.regulator[0]);
1138 }
1139
1140 vdd_pwrcl_mx_ao.regulator[0] = devm_regulator_get(&pdev->dev,
1141 "vdd_pwrcl_mx_ao");
1142 if (IS_ERR(vdd_pwrcl_mx_ao.regulator[0])) {
1143 if (PTR_ERR(vdd_pwrcl_mx_ao.regulator[0]) != -EPROBE_DEFER)
1144 dev_err(&pdev->dev,
1145 "Unable to get vdd_pwrcl_mx_ao regulator\n");
1146 return PTR_ERR(vdd_pwrcl_mx_ao.regulator[0]);
1147 }
1148
1149 array = devm_kcalloc(&pdev->dev, MAX_CLUSTER_CNT, sizeof(*array),
1150 GFP_KERNEL);
1151 if (!array)
1152 return -ENOMEM;
1153
1154 rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,mx-turbo-freq",
1155 array, MAX_CLUSTER_CNT);
1156 if (rc) {
1157 dev_err(&pdev->dev, "unable to find qcom,mx-turbo-freq property, rc=%d\n",
1158 rc);
1159 devm_kfree(&pdev->dev, array);
1160 return rc;
1161 }
1162
1163 l3_clk.mx_turbo_freq = array[l3_clk.cluster_num];
1164 pwrcl_clk.mx_turbo_freq = array[pwrcl_clk.cluster_num];
1165 perfcl_clk.mx_turbo_freq = array[perfcl_clk.cluster_num];
1166
1167 devm_kfree(&pdev->dev, array);
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -07001168
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001169 clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
1170 GFP_KERNEL);
1171 if (!clk_data)
1172 goto exit;
1173
1174 clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
1175 sizeof(struct clk *)), GFP_KERNEL);
1176 if (!clk_data->clks)
1177 goto clk_err;
1178
1179 clk_data->clk_num = num_clks;
1180
Deepak Katragadda4e2e1dc2017-07-25 14:41:52 -07001181 rc = clk_osm_resources_init(pdev);
1182 if (rc) {
1183 if (rc != -EPROBE_DEFER)
1184 dev_err(&pdev->dev, "OSM resources init failed, rc=%d\n",
1185 rc);
1186 return rc;
1187 }
1188
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001189 /* Check if per-core DCVS is enabled/not */
1190 val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
Amit Nischal79c1dc22017-11-27 16:21:35 +05301191 if (val & BIT(0))
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001192 pwrcl_clk.per_core_dcvs = true;
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001193
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001194 val = clk_osm_read_reg(&perfcl_clk, CORE_DCVS_CTRL);
Amit Nischal79c1dc22017-11-27 16:21:35 +05301195 if (val & BIT(0))
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001196 perfcl_clk.per_core_dcvs = true;
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001197
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001198 rc = clk_osm_read_lut(pdev, &l3_clk);
1199 if (rc) {
1200 dev_err(&pdev->dev, "Unable to read OSM LUT for L3, rc=%d\n",
1201 rc);
1202 return rc;
1203 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001204
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001205 rc = clk_osm_read_lut(pdev, &pwrcl_clk);
1206 if (rc) {
1207 dev_err(&pdev->dev, "Unable to read OSM LUT for power cluster, rc=%d\n",
1208 rc);
1209 return rc;
1210 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001211
Deepak Katragaddacc3e9472017-07-07 10:30:15 -07001212 rc = clk_osm_read_lut(pdev, &perfcl_clk);
1213 if (rc) {
1214 dev_err(&pdev->dev, "Unable to read OSM LUT for perf cluster, rc=%d\n",
1215 rc);
1216 return rc;
Osvaldo Banuelosa64c40a2017-03-20 16:01:45 -07001217 }
1218
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001219 spin_lock_init(&l3_clk.lock);
1220 spin_lock_init(&pwrcl_clk.lock);
1221 spin_lock_init(&perfcl_clk.lock);
1222
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001223 clk_ops_core = clk_dummy_ops;
1224 clk_ops_core.set_rate = clk_cpu_set_rate;
1225 clk_ops_core.round_rate = clk_cpu_round_rate;
1226 clk_ops_core.recalc_rate = clk_cpu_recalc_rate;
1227
1228 clk_ops_cpu_osm = clk_dummy_ops;
1229 clk_ops_cpu_osm.round_rate = clk_osm_round_rate;
1230 clk_ops_cpu_osm.list_rate = clk_osm_list_rate;
1231 clk_ops_cpu_osm.debug_init = clk_debug_measure_add;
1232
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001233 /* Register OSM l3, pwr and perf clocks with Clock Framework */
1234 for (i = 0; i < num_clks; i++) {
Odelu Kukatla3799c7c2017-08-20 20:48:45 +05301235 if (!osm_qcom_clk_hws[i])
1236 continue;
1237
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001238 clk = devm_clk_register(&pdev->dev, osm_qcom_clk_hws[i]);
1239 if (IS_ERR(clk)) {
1240 dev_err(&pdev->dev, "Unable to register CPU clock at index %d\n",
1241 i);
1242 return PTR_ERR(clk);
1243 }
1244 clk_data->clks[i] = clk;
1245 }
1246
1247 rc = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
1248 clk_data);
1249 if (rc) {
1250 dev_err(&pdev->dev, "Unable to register CPU clocks\n");
1251 goto provider_err;
1252 }
1253
1254 get_online_cpus();
1255
Deepak Katragadda4280f412017-07-24 13:28:26 -07001256 WARN(clk_prepare_enable(l3_cluster0_vote_clk.hw.clk),
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -07001257 "clk: Failed to enable cluster0 clock for L3\n");
Deepak Katragadda4280f412017-07-24 13:28:26 -07001258 WARN(clk_prepare_enable(l3_cluster1_vote_clk.hw.clk),
Deepak Katragadda8ee6b5b82017-07-24 16:47:10 -07001259 "clk: Failed to enable cluster1 clock for L3\n");
Deepak Katragadda43db9372017-10-12 10:55:55 -07001260 WARN(clk_prepare_enable(l3_misc_vote_clk.hw.clk),
1261 "clk: Failed to enable misc clock for L3\n");
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001262
Deepak Katragadda02617bd2017-11-10 16:03:43 -08001263 /*
1264 * Call clk_prepare_enable for the silver clock explicitly in order to
1265 * place an implicit vote on MX
1266 */
1267 for_each_online_cpu(cpu) {
1268 osm_clk = logical_cpu_to_clk(cpu);
1269 if (!osm_clk)
1270 return -EINVAL;
1271 clk_prepare_enable(osm_clk->hw.clk);
1272 }
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001273 populate_opp_table(pdev);
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001274
1275 of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
1276 register_cpu_cycle_counter_cb(&cb);
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001277 put_online_cpus();
1278
Stephen Boyd0c29ef62017-05-05 10:41:22 -07001279 rc = cpufreq_register_driver(&qcom_osm_cpufreq_driver);
1280 if (rc)
1281 goto provider_err;
1282
1283 pr_info("OSM CPUFreq driver inited\n");
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001284 return 0;
Stephen Boyd0c29ef62017-05-05 10:41:22 -07001285
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001286provider_err:
1287 if (clk_data)
1288 devm_kfree(&pdev->dev, clk_data->clks);
1289clk_err:
1290 devm_kfree(&pdev->dev, clk_data);
1291exit:
Stephen Boyd0c29ef62017-05-05 10:41:22 -07001292 dev_err(&pdev->dev, "OSM CPUFreq driver failed to initialize, rc=%d\n",
1293 rc);
1294 panic("Unable to Setup OSM CPUFreq");
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001295}
1296
1297static const struct of_device_id match_table[] = {
1298 { .compatible = "qcom,clk-cpu-osm" },
Deepak Katragaddafc6bd3c2017-05-12 12:03:49 -07001299 { .compatible = "qcom,clk-cpu-osm-v2" },
Odelu Kukatla3799c7c2017-08-20 20:48:45 +05301300 { .compatible = "qcom,clk-cpu-osm-sdm670" },
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001301 {}
1302};
1303
1304static struct platform_driver clk_cpu_osm_driver = {
1305 .probe = clk_cpu_osm_driver_probe,
1306 .driver = {
1307 .name = "clk-cpu-osm",
1308 .of_match_table = match_table,
1309 .owner = THIS_MODULE,
1310 },
1311};
1312
1313static int __init clk_cpu_osm_init(void)
1314{
1315 return platform_driver_register(&clk_cpu_osm_driver);
1316}
Deepak Katragaddaef44e102017-06-21 10:30:46 -07001317subsys_initcall(clk_cpu_osm_init);
Deepak Katragadda7abd9312016-12-21 14:18:00 -08001318
1319static void __exit clk_cpu_osm_exit(void)
1320{
1321 platform_driver_unregister(&clk_cpu_osm_driver);
1322}
1323module_exit(clk_cpu_osm_exit);
1324
1325MODULE_DESCRIPTION("QTI CPU clock driver for OSM");
1326MODULE_LICENSE("GPL v2");