blob: 1bf9b060d52298cdd089b62bd5bd2064d1994681 [file] [log] [blame]
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +00001/*
2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
6 *
7 * EXYNOS5440 - CPU frequency scaling support
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
Nishanth Menone4db1c72013-09-19 16:03:52 -050023#include <linux/pm_opp.h>
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +000024#include <linux/platform_device.h>
25#include <linux/slab.h>
26
27/* Register definitions */
28#define XMU_DVFS_CTRL 0x0060
29#define XMU_PMU_P0_7 0x0064
30#define XMU_C0_3_PSTATE 0x0090
31#define XMU_P_LIMIT 0x00a0
32#define XMU_P_STATUS 0x00a4
33#define XMU_PMUEVTEN 0x00d0
34#define XMU_PMUIRQEN 0x00d4
35#define XMU_PMUIRQ 0x00d8
36
37/* PMU mask and shift definations */
38#define P_VALUE_MASK 0x7
39
40#define XMU_DVFS_CTRL_EN_SHIFT 0
41
42#define P0_7_CPUCLKDEV_SHIFT 21
43#define P0_7_CPUCLKDEV_MASK 0x7
44#define P0_7_ATBCLKDEV_SHIFT 18
45#define P0_7_ATBCLKDEV_MASK 0x7
46#define P0_7_CSCLKDEV_SHIFT 15
47#define P0_7_CSCLKDEV_MASK 0x7
48#define P0_7_CPUEMA_SHIFT 28
49#define P0_7_CPUEMA_MASK 0xf
50#define P0_7_L2EMA_SHIFT 24
51#define P0_7_L2EMA_MASK 0xf
52#define P0_7_VDD_SHIFT 8
53#define P0_7_VDD_MASK 0x7f
54#define P0_7_FREQ_SHIFT 0
55#define P0_7_FREQ_MASK 0xff
56
57#define C0_3_PSTATE_VALID_SHIFT 8
58#define C0_3_PSTATE_CURR_SHIFT 4
59#define C0_3_PSTATE_NEW_SHIFT 0
60
61#define PSTATE_CHANGED_EVTEN_SHIFT 0
62
63#define PSTATE_CHANGED_IRQEN_SHIFT 0
64
65#define PSTATE_CHANGED_SHIFT 0
66
67/* some constant values for clock divider calculation */
68#define CPU_DIV_FREQ_MAX 500
69#define CPU_DBG_FREQ_MAX 375
70#define CPU_ATB_FREQ_MAX 500
71
72#define PMIC_LOW_VOLT 0x30
73#define PMIC_HIGH_VOLT 0x28
74
75#define CPUEMA_HIGH 0x2
76#define CPUEMA_MID 0x4
77#define CPUEMA_LOW 0x7
78
79#define L2EMA_HIGH 0x1
80#define L2EMA_MID 0x3
81#define L2EMA_LOW 0x4
82
83#define DIV_TAB_MAX 2
84/* frequency unit is 20MHZ */
85#define FREQ_UNIT 20
86#define MAX_VOLTAGE 1550000 /* In microvolt */
87#define VOLTAGE_STEP 12500 /* In microvolt */
88
89#define CPUFREQ_NAME "exynos5440_dvfs"
90#define DEF_TRANS_LATENCY 100000
91
92enum cpufreq_level_index {
93 L0, L1, L2, L3, L4,
94 L5, L6, L7, L8, L9,
95};
96#define CPUFREQ_LEVEL_END (L7 + 1)
97
98struct exynos_dvfs_data {
99 void __iomem *base;
100 struct resource *mem;
101 int irq;
102 struct clk *cpu_clk;
103 unsigned int cur_frequency;
104 unsigned int latency;
105 struct cpufreq_frequency_table *freq_table;
106 unsigned int freq_count;
107 struct device *dev;
108 bool dvfs_enabled;
109 struct work_struct irq_work;
110};
111
112static struct exynos_dvfs_data *dvfs_info;
113static DEFINE_MUTEX(cpufreq_lock);
114static struct cpufreq_freqs freqs;
115
116static int init_div_table(void)
117{
118 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
119 unsigned int tmp, clk_div, ema_div, freq, volt_id;
120 int i = 0;
Nishanth Menon47d43ba2013-09-19 16:03:51 -0500121 struct dev_pm_opp *opp;
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000122
Amit Daniel Kachhap70eb0852013-04-22 00:24:37 +0200123 rcu_read_lock();
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000124 for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
125
Nishanth Menon5d4879c2013-09-19 16:03:50 -0500126 opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000127 freq_tbl[i].frequency * 1000, true);
128 if (IS_ERR(opp)) {
Amit Daniel Kachhap70eb0852013-04-22 00:24:37 +0200129 rcu_read_unlock();
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000130 dev_err(dvfs_info->dev,
131 "failed to find valid OPP for %u KHZ\n",
132 freq_tbl[i].frequency);
133 return PTR_ERR(opp);
134 }
135
136 freq = freq_tbl[i].frequency / 1000; /* In MHZ */
137 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
138 << P0_7_CPUCLKDEV_SHIFT;
139 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
140 << P0_7_ATBCLKDEV_SHIFT;
141 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
142 << P0_7_CSCLKDEV_SHIFT;
143
144 /* Calculate EMA */
Nishanth Menon5d4879c2013-09-19 16:03:50 -0500145 volt_id = dev_pm_opp_get_voltage(opp);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000146 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
147 if (volt_id < PMIC_HIGH_VOLT) {
148 ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
149 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
150 } else if (volt_id > PMIC_LOW_VOLT) {
151 ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
152 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
153 } else {
154 ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
155 (L2EMA_MID << P0_7_L2EMA_SHIFT);
156 }
157
158 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
159 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
160
161 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
162 }
163
Amit Daniel Kachhap70eb0852013-04-22 00:24:37 +0200164 rcu_read_unlock();
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000165 return 0;
166}
167
168static void exynos_enable_dvfs(void)
169{
170 unsigned int tmp, i, cpu;
171 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
172 /* Disable DVFS */
173 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
174
175 /* Enable PSTATE Change Event */
176 tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
177 tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
178 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
179
180 /* Enable PSTATE Change IRQ */
181 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
182 tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
183 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
184
185 /* Set initial performance index */
186 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
187 if (freq_table[i].frequency == dvfs_info->cur_frequency)
188 break;
189
190 if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
191 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
192 /* Assign the highest frequency */
193 i = 0;
194 dvfs_info->cur_frequency = freq_table[i].frequency;
195 }
196
197 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
198 dvfs_info->cur_frequency);
199
200 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
201 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
202 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
203 tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
204 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
205 }
206
207 /* Enable DVFS */
208 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
209 dvfs_info->base + XMU_DVFS_CTRL);
210}
211
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000212static unsigned int exynos_getspeed(unsigned int cpu)
213{
214 return dvfs_info->cur_frequency;
215}
216
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530217static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000218{
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530219 unsigned int tmp;
220 int i;
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000221 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
222
223 mutex_lock(&cpufreq_lock);
224
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000225 freqs.old = dvfs_info->cur_frequency;
226 freqs.new = freq_table[index].frequency;
227
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000228 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
229
230 /* Set the target frequency in all C0_3_PSTATE register */
231 for_each_cpu(i, policy->cpus) {
232 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
233 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
234 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
235
236 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
237 }
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000238 mutex_unlock(&cpufreq_lock);
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530239 return 0;
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000240}
241
242static void exynos_cpufreq_work(struct work_struct *work)
243{
244 unsigned int cur_pstate, index;
245 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
246 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
247
248 /* Ensure we can access cpufreq structures */
249 if (unlikely(dvfs_info->dvfs_enabled == false))
250 goto skip_work;
251
252 mutex_lock(&cpufreq_lock);
253 freqs.old = dvfs_info->cur_frequency;
254
255 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
256 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
257 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
258 else
259 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
260
261 if (likely(index < dvfs_info->freq_count)) {
262 freqs.new = freq_table[index].frequency;
263 dvfs_info->cur_frequency = freqs.new;
264 } else {
265 dev_crit(dvfs_info->dev, "New frequency out of range\n");
266 freqs.new = dvfs_info->cur_frequency;
267 }
268 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
269
270 cpufreq_cpu_put(policy);
271 mutex_unlock(&cpufreq_lock);
272skip_work:
273 enable_irq(dvfs_info->irq);
274}
275
276static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
277{
278 unsigned int tmp;
279
280 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
281 if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
282 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
283 disable_irq_nosync(irq);
284 schedule_work(&dvfs_info->irq_work);
285 }
286 return IRQ_HANDLED;
287}
288
289static void exynos_sort_descend_freq_table(void)
290{
291 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
292 int i = 0, index;
293 unsigned int tmp_freq;
294 /*
295 * Exynos5440 clock controller state logic expects the cpufreq table to
296 * be in descending order. But the OPP library constructs the table in
297 * ascending order. So to make the table descending we just need to
298 * swap the i element with the N - i element.
299 */
300 for (i = 0; i < dvfs_info->freq_count / 2; i++) {
301 index = dvfs_info->freq_count - i - 1;
302 tmp_freq = freq_tbl[i].frequency;
303 freq_tbl[i].frequency = freq_tbl[index].frequency;
304 freq_tbl[index].frequency = tmp_freq;
305 }
306}
307
308static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
309{
Viresh Kumarb249aba2013-10-03 20:29:13 +0530310 return cpufreq_generic_init(policy, dvfs_info->freq_table,
311 dvfs_info->latency);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000312}
313
314static struct cpufreq_driver exynos_driver = {
315 .flags = CPUFREQ_STICKY,
Viresh Kumareea61812013-10-03 20:28:06 +0530316 .verify = cpufreq_generic_frequency_table_verify,
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530317 .target_index = exynos_target,
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000318 .get = exynos_getspeed,
319 .init = exynos_cpufreq_cpu_init,
Viresh Kumareea61812013-10-03 20:28:06 +0530320 .exit = cpufreq_generic_exit,
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000321 .name = CPUFREQ_NAME,
Jungseok Lee5d7e6902013-10-15 08:31:13 +0900322 .attr = cpufreq_generic_attr,
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000323};
324
325static const struct of_device_id exynos_cpufreq_match[] = {
326 {
327 .compatible = "samsung,exynos5440-cpufreq",
328 },
329 {},
330};
331MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
332
333static int exynos_cpufreq_probe(struct platform_device *pdev)
334{
335 int ret = -EINVAL;
336 struct device_node *np;
337 struct resource res;
338
339 np = pdev->dev.of_node;
340 if (!np)
341 return -ENODEV;
342
343 dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
344 if (!dvfs_info) {
345 ret = -ENOMEM;
346 goto err_put_node;
347 }
348
349 dvfs_info->dev = &pdev->dev;
350
351 ret = of_address_to_resource(np, 0, &res);
352 if (ret)
353 goto err_put_node;
354
355 dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
356 if (IS_ERR(dvfs_info->base)) {
357 ret = PTR_ERR(dvfs_info->base);
358 goto err_put_node;
359 }
360
361 dvfs_info->irq = irq_of_parse_and_map(np, 0);
362 if (!dvfs_info->irq) {
363 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
364 ret = -ENODEV;
365 goto err_put_node;
366 }
367
368 ret = of_init_opp_table(dvfs_info->dev);
369 if (ret) {
370 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
371 goto err_put_node;
372 }
373
Nishanth Menon5d4879c2013-09-19 16:03:50 -0500374 ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
375 &dvfs_info->freq_table);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000376 if (ret) {
377 dev_err(dvfs_info->dev,
378 "failed to init cpufreq table: %d\n", ret);
379 goto err_put_node;
380 }
Nishanth Menon5d4879c2013-09-19 16:03:50 -0500381 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000382 exynos_sort_descend_freq_table();
383
384 if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
385 dvfs_info->latency = DEF_TRANS_LATENCY;
386
387 dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
388 if (IS_ERR(dvfs_info->cpu_clk)) {
389 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
390 ret = PTR_ERR(dvfs_info->cpu_clk);
391 goto err_free_table;
392 }
393
394 dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
395 if (!dvfs_info->cur_frequency) {
396 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
397 ret = -EINVAL;
398 goto err_free_table;
399 }
400 dvfs_info->cur_frequency /= 1000;
401
402 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
403 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
404 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
405 CPUFREQ_NAME, dvfs_info);
406 if (ret) {
407 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
408 goto err_free_table;
409 }
410
411 ret = init_div_table();
412 if (ret) {
413 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
414 goto err_free_table;
415 }
416
417 exynos_enable_dvfs();
418 ret = cpufreq_register_driver(&exynos_driver);
419 if (ret) {
420 dev_err(dvfs_info->dev,
421 "%s: failed to register cpufreq driver\n", __func__);
422 goto err_free_table;
423 }
424
425 of_node_put(np);
426 dvfs_info->dvfs_enabled = true;
427 return 0;
428
429err_free_table:
Nishanth Menon5d4879c2013-09-19 16:03:50 -0500430 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000431err_put_node:
432 of_node_put(np);
Sachin Kamat116decb2013-09-18 10:44:53 +0530433 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000434 return ret;
435}
436
437static int exynos_cpufreq_remove(struct platform_device *pdev)
438{
439 cpufreq_unregister_driver(&exynos_driver);
Nishanth Menon5d4879c2013-09-19 16:03:50 -0500440 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
Amit Daniel Kachhap49d7b5b2013-04-08 09:57:34 +0000441 return 0;
442}
443
444static struct platform_driver exynos_cpufreq_platdrv = {
445 .driver = {
446 .name = "exynos5440-cpufreq",
447 .owner = THIS_MODULE,
448 .of_match_table = exynos_cpufreq_match,
449 },
450 .probe = exynos_cpufreq_probe,
451 .remove = exynos_cpufreq_remove,
452};
453module_platform_driver(exynos_cpufreq_platdrv);
454
455MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
456MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
457MODULE_LICENSE("GPL");