blob: 7fd00e66f60ef8bea5f588b7341c20c21ca5baab [file] [log] [blame]
Tianyi Gou7395a7e2012-10-19 14:12:21 -07001/*
2 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) "%s: " fmt, __func__
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/delay.h>
21#include <linux/mutex.h>
22#include <linux/spinlock.h>
23#include <linux/errno.h>
24#include <linux/cpufreq.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27#include <linux/iopoll.h>
28
29#include <mach/board.h>
30#include <mach/msm_iomap.h>
31#include <mach/msm_bus.h>
32#include <mach/msm_bus_board.h>
33#include <mach/rpm-regulator.h>
34#include <mach/clk-provider.h>
35#include <mach/rpm-regulator-smd.h>
36
37#include "acpuclock.h"
38
39#define RCG_SRC_DIV_MASK BM(7, 0)
40#define RCG_CONFIG_PGM_DATA_BIT BIT(11)
41#define RCG_CONFIG_PGM_ENA_BIT BIT(10)
42#define POLL_INTERVAL_US 1
43#define APCS_RCG_UPDATE_TIMEOUT_US 20
44#define GPLL0_TO_A5_ALWAYS_ENABLE BIT(18)
45
46#define MAX_VDD_MEM 1050000
47#define MAX_VDD_CPU 1050000
48
49/* Corner type vreg VDD values */
50#define LVL_NONE RPM_REGULATOR_CORNER_NONE
51#define LVL_LOW RPM_REGULATOR_CORNER_SVS_SOC
52#define LVL_NOM RPM_REGULATOR_CORNER_NORMAL
53#define LVL_HIGH RPM_REGULATOR_CORNER_SUPER_TURBO
54
55enum clk_src {
56 CXO,
57 PLL0,
58 ACPUPLL,
59 NUM_SRC,
60};
61
62struct src_clock {
63 struct clk *clk;
64 const char *name;
65};
66
67static struct src_clock src_clocks[NUM_SRC] = {
68 [PLL0].name = "pll0",
69 [ACPUPLL].name = "pll14",
70};
71
72struct clkctl_acpu_speed {
73 bool use_for_scaling;
74 unsigned int khz;
75 int src;
76 unsigned int src_sel;
77 unsigned int src_div;
78 unsigned int vdd_cpu;
79 unsigned int vdd_mem;
80 unsigned int bw_level;
81};
82
83struct acpuclk_drv_data {
84 struct mutex lock;
85 struct clkctl_acpu_speed *current_speed;
86 void __iomem *apcs_rcg_config;
87 void __iomem *apcs_cpu_pwr_ctl;
88 struct regulator *vdd_cpu;
89 struct regulator *vdd_mem;
90};
91
92static struct acpuclk_drv_data drv_data = {
93 .current_speed = &(struct clkctl_acpu_speed){ 0 },
94};
95
96/* Instantaneous bandwidth requests in MB/s. */
97#define BW_MBPS(_bw) \
98 { \
99 .vectors = &(struct msm_bus_vectors){ \
100 .src = MSM_BUS_MASTER_AMPSS_M0, \
101 .dst = MSM_BUS_SLAVE_EBI_CH0, \
102 .ib = (_bw) * 1000000UL, \
103 .ab = 0, \
104 }, \
105 .num_paths = 1, \
106 }
107
108static struct msm_bus_paths bw_level_tbl[] = {
109 [0] = BW_MBPS(152), /* At least 19 MHz on bus. */
110 [1] = BW_MBPS(264), /* At least 33 MHz on bus. */
111 [2] = BW_MBPS(528), /* At least 66 MHz on bus. */
112 [3] = BW_MBPS(664), /* At least 83 MHz on bus. */
113 [4] = BW_MBPS(1064), /* At least 133 MHz on bus. */
114 [5] = BW_MBPS(1328), /* At least 166 MHz on bus. */
115 [6] = BW_MBPS(2128), /* At least 266 MHz on bus. */
116 [7] = BW_MBPS(2664), /* At least 333 MHz on bus. */
117};
118
119static struct msm_bus_scale_pdata bus_client_pdata = {
120 .usecase = bw_level_tbl,
121 .num_usecases = ARRAY_SIZE(bw_level_tbl),
122 .active_only = 1,
123 .name = "acpuclock",
124};
125
126static uint32_t bus_perf_client;
127
128/* TODO:
129 * 1) Update MX voltage when they are avaiable
130 * 2) Update bus bandwidth
131 */
132static struct clkctl_acpu_speed acpu_freq_tbl[] = {
133 { 0, 19200, CXO, 0, 0, LVL_LOW, 950000, 0 },
134 { 1, 300000, PLL0, 1, 2, LVL_LOW, 950000, 4 },
135 { 1, 600000, PLL0, 1, 0, LVL_NOM, 950000, 4 },
136 { 1, 748800, ACPUPLL, 5, 0, LVL_HIGH, 1050000, 7 },
137 { 1, 998400, ACPUPLL, 5, 0, LVL_HIGH, 1050000, 7 },
138 { 0 }
139};
140
141/* Update the bus bandwidth request. */
142static void set_bus_bw(unsigned int bw)
143{
144 int ret;
145
146 if (bw >= ARRAY_SIZE(bw_level_tbl)) {
147 pr_err("invalid bandwidth request (%d)\n", bw);
148 return;
149 }
150
151 /* Update bandwidth if request has changed. This may sleep. */
152 ret = msm_bus_scale_client_update_request(bus_perf_client, bw);
153 if (ret)
154 pr_err("bandwidth request failed (%d)\n", ret);
155
156 return;
157}
158
159/* Apply any per-cpu voltage increases. */
160static int increase_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
161{
162 int rc = 0;
163
164 /* Increase vdd_mem before vdd_cpu. vdd_mem should be >= vdd_cpu. */
165 rc = regulator_set_voltage(drv_data.vdd_mem, vdd_mem, MAX_VDD_MEM);
166 if (rc) {
167 pr_err("vdd_mem increase failed (%d)\n", rc);
168 return rc;
169 }
170
171 rc = regulator_set_voltage(drv_data.vdd_cpu, vdd_cpu, MAX_VDD_CPU);
172 if (rc)
173 pr_err("vdd_cpu increase failed (%d)\n", rc);
174
175 return rc;
176}
177
178/* Apply any per-cpu voltage decreases. */
179static void decrease_vdd(unsigned int vdd_cpu, unsigned int vdd_mem)
180{
181 int ret;
182
183 /* Update CPU voltage. */
184 ret = regulator_set_voltage(drv_data.vdd_cpu, vdd_cpu, MAX_VDD_CPU);
185 if (ret) {
186 pr_err("vdd_cpu decrease failed (%d)\n", ret);
187 return;
188 }
189
190 /* Decrease vdd_mem after vdd_cpu. vdd_mem should be >= vdd_cpu. */
191 ret = regulator_set_voltage(drv_data.vdd_mem, vdd_mem, MAX_VDD_MEM);
192 if (ret)
193 pr_err("vdd_mem decrease failed (%d)\n", ret);
194}
195
196static void select_clk_source_div(struct clkctl_acpu_speed *s)
197{
198 u32 regval, rc, src_div;
199 void __iomem *apcs_rcg_config = drv_data.apcs_rcg_config;
200
201 src_div = s->src_div ? ((2 * s->src_div) - 1) : s->src_div;
202
203 regval = readl_relaxed(apcs_rcg_config);
204 regval &= ~RCG_SRC_DIV_MASK;
205 regval |= BVAL(2, 0, s->src_sel) | BVAL(7, 3, src_div);
206 writel_relaxed(regval, apcs_rcg_config);
207
208 /*
209 * Make sure writing of src and div finishes before update
210 * the configuration
211 */
212 mb();
213
214 /* Update the configruation */
215 regval = readl_relaxed(apcs_rcg_config);
216 regval |= RCG_CONFIG_PGM_DATA_BIT | RCG_CONFIG_PGM_ENA_BIT;
217 writel_relaxed(regval, apcs_rcg_config);
218
219 /* Wait for update to take effect */
220 rc = readl_poll_timeout(apcs_rcg_config, regval,
221 !(regval & RCG_CONFIG_PGM_DATA_BIT),
222 POLL_INTERVAL_US,
223 APCS_RCG_UPDATE_TIMEOUT_US);
224 if (rc)
225 pr_warn("acpu rcg didn't update its configuration\n");
226}
227
228static int set_speed(struct clkctl_acpu_speed *tgt_s)
229{
230 int rc = 0;
231 unsigned int tgt_freq_hz = tgt_s->khz * 1000;
232 struct clkctl_acpu_speed *strt_s = drv_data.current_speed;
233 struct clkctl_acpu_speed *cxo_s = &acpu_freq_tbl[0];
234 struct clk *strt = src_clocks[strt_s->src].clk;
235 struct clk *tgt = src_clocks[tgt_s->src].clk;
236
237 if (strt_s->src == ACPUPLL && tgt_s->src == ACPUPLL) {
238 /* Switch to another always on src */
239 select_clk_source_div(cxo_s);
240
241 /* Re-program acpu pll */
242 clk_disable(tgt);
243 rc = clk_set_rate(tgt, tgt_freq_hz);
244 if (rc)
245 pr_err("Failed to set ACPU PLL to %u\n", tgt_freq_hz);
246 BUG_ON(clk_enable(tgt));
247
248 /* Switch back to acpu pll */
249 select_clk_source_div(tgt_s);
250 } else if (strt_s->src != ACPUPLL && tgt_s->src == ACPUPLL) {
251 rc = clk_set_rate(tgt, tgt_freq_hz);
252 if (rc) {
253 pr_err("Failed to set ACPU PLL to %u\n", tgt_freq_hz);
254 return rc;
255 }
256
257 rc = clk_enable(tgt);
258 if (rc) {
259 pr_err("ACPU PLL enable failed\n");
260 return rc;
261 }
262
263 select_clk_source_div(tgt_s);
264
265 clk_disable(strt);
266 } else {
267 rc = clk_enable(tgt);
268 if (rc) {
269 pr_err("%s enable failed\n",
270 src_clocks[tgt_s->src].name);
271 return rc;
272 }
273
274 select_clk_source_div(tgt_s);
275
276 clk_disable(strt);
277 }
278
279 return rc;
280}
281
282static int acpuclk_9625_set_rate(int cpu, unsigned long rate,
283 enum setrate_reason reason)
284{
285 struct clkctl_acpu_speed *tgt_s, *strt_s;
286 int rc = 0;
287
288 if (reason == SETRATE_CPUFREQ)
289 mutex_lock(&drv_data.lock);
290
291 strt_s = drv_data.current_speed;
292
293 /* Return early if rate didn't change */
294 if (rate == strt_s->khz)
295 goto out;
296
297 /* Find target frequency */
298 for (tgt_s = acpu_freq_tbl; tgt_s->khz != 0; tgt_s++)
299 if (tgt_s->khz == rate)
300 break;
301 if (tgt_s->khz == 0) {
302 rc = -EINVAL;
303 goto out;
304 }
305
306 /* Increase VDD levels if needed */
307 if ((reason == SETRATE_CPUFREQ || reason == SETRATE_INIT)
308 && (tgt_s->khz > strt_s->khz)) {
309 rc = increase_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
310 if (rc)
311 goto out;
312 }
313
314 pr_debug("Switching from CPU rate %u KHz -> %u KHz\n",
315 strt_s->khz, tgt_s->khz);
316
317 /* Switch CPU speed. */
318 rc = set_speed(tgt_s);
319 if (rc)
320 goto out;
321
322 drv_data.current_speed = tgt_s;
323 pr_debug("CPU speed change complete\n");
324
325 /* Nothing else to do for SWFI or power-collapse. */
326 if (reason == SETRATE_SWFI || reason == SETRATE_PC)
327 goto out;
328
329 /* Update bus bandwith request */
330 set_bus_bw(tgt_s->bw_level);
331
332 /* Drop VDD levels if we can. */
333 if (tgt_s->khz < strt_s->khz)
334 decrease_vdd(tgt_s->vdd_cpu, tgt_s->vdd_mem);
335
336out:
337 if (reason == SETRATE_CPUFREQ)
338 mutex_unlock(&drv_data.lock);
339 return rc;
340}
341
342static unsigned long acpuclk_9625_get_rate(int cpu)
343{
344 return drv_data.current_speed->khz;
345}
346
347#ifdef CONFIG_CPU_FREQ_MSM
348static struct cpufreq_frequency_table freq_table[30];
349
350static void __init cpufreq_table_init(void)
351{
352 int i, freq_cnt = 0;
353
354 /* Construct the freq_table tables from acpu_freq_tbl. */
355 for (i = 0; acpu_freq_tbl[i].khz != 0
356 && freq_cnt < ARRAY_SIZE(freq_table); i++) {
357 if (!acpu_freq_tbl[i].use_for_scaling)
358 continue;
359 freq_table[freq_cnt].index = freq_cnt;
360 freq_table[freq_cnt].frequency = acpu_freq_tbl[i].khz;
361 freq_cnt++;
362 }
363 /* freq_table not big enough to store all usable freqs. */
364 BUG_ON(acpu_freq_tbl[i].khz != 0);
365
366 freq_table[freq_cnt].index = freq_cnt;
367 freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
368
369 pr_info("CPU: %d scaling frequencies supported.\n", freq_cnt);
370
371 /* Register table with CPUFreq. */
372 cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
373}
374#else
375static void __init cpufreq_table_init(void) {}
376#endif
377
378static struct acpuclk_data acpuclk_9625_data = {
379 .set_rate = acpuclk_9625_set_rate,
380 .get_rate = acpuclk_9625_get_rate,
381 .power_collapse_khz = 19200,
382 .wait_for_irq_khz = 19200,
383};
384
385static int __init acpuclk_9625_probe(struct platform_device *pdev)
386{
387 unsigned long max_cpu_khz = 0;
388 struct resource *res;
389 int i;
390 u32 regval;
391
392 mutex_init(&drv_data.lock);
393
394 bus_perf_client = msm_bus_scale_register_client(&bus_client_pdata);
395 if (!bus_perf_client) {
396 pr_err("Unable to register bus client\n");
397 BUG();
398 }
399
400 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg_base");
401 if (!res)
402 return -EINVAL;
403
404 drv_data.apcs_rcg_config = ioremap(res->start, resource_size(res));
405 if (!drv_data.apcs_rcg_config)
406 return -ENOMEM;
407
408 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwr_base");
409 if (!res)
410 return -EINVAL;
411
412 drv_data.apcs_cpu_pwr_ctl = ioremap(res->start, resource_size(res));
413 if (!drv_data.apcs_cpu_pwr_ctl)
414 return -ENOMEM;
415
416 drv_data.vdd_cpu = regulator_get(&pdev->dev, "a5_cpu");
417 if (IS_ERR(drv_data.vdd_cpu)) {
418 dev_err(&pdev->dev, "regulator for %s get failed\n", "a5_cpu");
419 return PTR_ERR(drv_data.vdd_cpu);
420 }
421
422 drv_data.vdd_mem = regulator_get(&pdev->dev, "a5_mem");
423 if (IS_ERR(drv_data.vdd_mem)) {
424 dev_err(&pdev->dev, "regulator for %s get failed\n", "a5_mem");
425 return PTR_ERR(drv_data.vdd_mem);
426 }
427
428 /* Disable hardware gating of gpll0 to A5SS */
429 regval = readl_relaxed(drv_data.apcs_cpu_pwr_ctl);
430 regval |= GPLL0_TO_A5_ALWAYS_ENABLE;
431 writel_relaxed(regval, drv_data.apcs_cpu_pwr_ctl);
432
433 for (i = 0; i < NUM_SRC; i++) {
434 if (!src_clocks[i].name)
435 continue;
436 src_clocks[i].clk = clk_get(&pdev->dev, src_clocks[i].name);
437 BUG_ON(IS_ERR(src_clocks[i].clk));
438 /*
439 * Prepare the PLLs because we enable/disable them
440 * in atomic context during power collapse/restore.
441 */
442 BUG_ON(clk_prepare(src_clocks[i].clk));
443 }
444
445 /* Improve boot time by ramping up CPU immediately */
446 for (i = 0; acpu_freq_tbl[i].khz != 0 &&
447 acpu_freq_tbl[i].use_for_scaling; i++)
448 max_cpu_khz = acpu_freq_tbl[i].khz;
449
450 acpuclk_9625_set_rate(smp_processor_id(), max_cpu_khz, SETRATE_INIT);
451
452 acpuclk_register(&acpuclk_9625_data);
453 cpufreq_table_init();
454
455 return 0;
456}
457
458static struct of_device_id acpuclk_9625_match_table[] = {
459 {.compatible = "qcom,acpuclk-9625"},
460 {}
461};
462
463static struct platform_driver acpuclk_9625_driver = {
464 .driver = {
465 .name = "acpuclk-9625",
466 .of_match_table = acpuclk_9625_match_table,
467 .owner = THIS_MODULE,
468 },
469};
470
471static int __init acpuclk_9625_init(void)
472{
473 return platform_driver_probe(&acpuclk_9625_driver, acpuclk_9625_probe);
474}
475device_initcall(acpuclk_9625_init);