blob: a484aaea980910a4b59bc4d2ad61e9143d3ae52d [file] [log] [blame]
Kukjin Kimf7d77072011-06-01 14:18:22 -07001/*
Jaecheol Lee83efc742010-10-12 09:19:38 +09002 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * CPU frequency scaling for S5PC110/S5PV210
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/cpufreq.h>
Huisung Kangfe7f1bc2011-06-24 16:04:18 +090019#include <linux/reboot.h>
Jonghwan Choie8b4c192011-06-24 16:04:14 +090020#include <linux/regulator/consumer.h>
Huisung Kang405e6d62011-06-24 16:04:15 +090021#include <linux/suspend.h>
Jaecheol Lee83efc742010-10-12 09:19:38 +090022
23#include <mach/map.h>
24#include <mach/regs-clock.h>
25
26static struct clk *cpu_clk;
27static struct clk *dmc0_clk;
28static struct clk *dmc1_clk;
29static struct cpufreq_freqs freqs;
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +090030static DEFINE_MUTEX(set_freq_lock);
Jaecheol Lee83efc742010-10-12 09:19:38 +090031
32/* APLL M,P,S values for 1G/800Mhz */
33#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
34#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
35
Huisung Kang405e6d62011-06-24 16:04:15 +090036/* Use 800MHz when entering sleep mode */
37#define SLEEP_FREQ (800 * 1000)
38
Jaecheol Lee83efc742010-10-12 09:19:38 +090039/*
Huisung Kang90d5d0a2011-06-24 16:04:13 +090040 * relation has an additional symantics other than the standard of cpufreq
41 * DISALBE_FURTHER_CPUFREQ: disable further access to target
42 * ENABLE_FURTUER_CPUFREQ: enable access to target
43 */
44enum cpufreq_access {
45 DISABLE_FURTHER_CPUFREQ = 0x10,
46 ENABLE_FURTHER_CPUFREQ = 0x20,
47};
48
49static bool no_cpufreq_access;
50
51/*
Jaecheol Lee83efc742010-10-12 09:19:38 +090052 * DRAM configurations to calculate refresh counter for changing
53 * frequency of memory.
54 */
55struct dram_conf {
56 unsigned long freq; /* HZ */
57 unsigned long refresh; /* DRAM refresh counter * 1000 */
58};
59
60/* DRAM configuration (DMC0 and DMC1) */
61static struct dram_conf s5pv210_dram_conf[2];
62
63enum perf_level {
64 L0, L1, L2, L3, L4,
65};
66
67enum s5pv210_mem_type {
68 LPDDR = 0x1,
69 LPDDR2 = 0x2,
70 DDR2 = 0x4,
71};
72
73enum s5pv210_dmc_port {
74 DMC0 = 0,
75 DMC1,
76};
77
78static struct cpufreq_frequency_table s5pv210_freq_table[] = {
79 {L0, 1000*1000},
80 {L1, 800*1000},
81 {L2, 400*1000},
82 {L3, 200*1000},
83 {L4, 100*1000},
84 {0, CPUFREQ_TABLE_END},
85};
86
Jonghwan Choie8b4c192011-06-24 16:04:14 +090087static struct regulator *arm_regulator;
88static struct regulator *int_regulator;
89
90struct s5pv210_dvs_conf {
91 int arm_volt; /* uV */
92 int int_volt; /* uV */
93};
94
95static const int arm_volt_max = 1350000;
96static const int int_volt_max = 1250000;
97
98static struct s5pv210_dvs_conf dvs_conf[] = {
99 [L0] = {
100 .arm_volt = 1250000,
101 .int_volt = 1100000,
102 },
103 [L1] = {
104 .arm_volt = 1200000,
105 .int_volt = 1100000,
106 },
107 [L2] = {
108 .arm_volt = 1050000,
109 .int_volt = 1100000,
110 },
111 [L3] = {
112 .arm_volt = 950000,
113 .int_volt = 1100000,
114 },
115 [L4] = {
116 .arm_volt = 950000,
117 .int_volt = 1000000,
118 },
119};
120
Jaecheol Lee83efc742010-10-12 09:19:38 +0900121static u32 clkdiv_val[5][11] = {
122 /*
123 * Clock divider value for following
124 * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
125 * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
126 * ONEDRAM, MFC, G3D }
127 */
128
129 /* L0 : [1000/200/100][166/83][133/66][200/200] */
130 {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
131
132 /* L1 : [800/200/100][166/83][133/66][200/200] */
133 {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
134
135 /* L2 : [400/200/100][166/83][133/66][200/200] */
136 {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
137
138 /* L3 : [200/200/100][166/83][133/66][200/200] */
139 {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
140
141 /* L4 : [100/100/100][83/83][66/66][100/100] */
142 {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
143};
144
145/*
146 * This function set DRAM refresh counter
147 * accoriding to operating frequency of DRAM
148 * ch: DMC port number 0 or 1
149 * freq: Operating frequency of DRAM(KHz)
150 */
151static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
152{
153 unsigned long tmp, tmp1;
154 void __iomem *reg = NULL;
155
Jonghwan Choid62fa312011-05-12 18:31:20 +0900156 if (ch == DMC0) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900157 reg = (S5P_VA_DMC0 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900158 } else if (ch == DMC1) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900159 reg = (S5P_VA_DMC1 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900160 } else {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900161 printk(KERN_ERR "Cannot find DMC port\n");
Jonghwan Choid62fa312011-05-12 18:31:20 +0900162 return;
163 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900164
165 /* Find current DRAM frequency */
166 tmp = s5pv210_dram_conf[ch].freq;
167
168 do_div(tmp, freq);
169
170 tmp1 = s5pv210_dram_conf[ch].refresh;
171
172 do_div(tmp1, tmp);
173
174 __raw_writel(tmp1, reg);
175}
176
Axel Lin133de122011-07-08 14:24:36 +0800177static int s5pv210_verify_speed(struct cpufreq_policy *policy)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900178{
179 if (policy->cpu)
180 return -EINVAL;
181
182 return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
183}
184
Axel Lin133de122011-07-08 14:24:36 +0800185static unsigned int s5pv210_getspeed(unsigned int cpu)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900186{
187 if (cpu)
188 return 0;
189
190 return clk_get_rate(cpu_clk) / 1000;
191}
192
193static int s5pv210_target(struct cpufreq_policy *policy,
194 unsigned int target_freq,
195 unsigned int relation)
196{
197 unsigned long reg;
198 unsigned int index, priv_index;
199 unsigned int pll_changing = 0;
200 unsigned int bus_speed_changing = 0;
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900201 int arm_volt, int_volt;
202 int ret = 0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900203
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900204 mutex_lock(&set_freq_lock);
205
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900206 if (relation & ENABLE_FURTHER_CPUFREQ)
207 no_cpufreq_access = false;
208
209 if (no_cpufreq_access) {
210#ifdef CONFIG_PM_VERBOSE
211 pr_err("%s:%d denied access to %s as it is disabled"
212 "temporarily\n", __FILE__, __LINE__, __func__);
213#endif
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900214 ret = -EINVAL;
215 goto exit;
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900216 }
217
218 if (relation & DISABLE_FURTHER_CPUFREQ)
219 no_cpufreq_access = true;
220
221 relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
222
Jaecheol Lee83efc742010-10-12 09:19:38 +0900223 freqs.old = s5pv210_getspeed(0);
224
225 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900226 target_freq, relation, &index)) {
227 ret = -EINVAL;
228 goto exit;
229 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900230
231 freqs.new = s5pv210_freq_table[index].frequency;
232 freqs.cpu = 0;
233
234 if (freqs.new == freqs.old)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900235 goto exit;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900236
237 /* Finding current running level index */
238 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900239 freqs.old, relation, &priv_index)) {
240 ret = -EINVAL;
241 goto exit;
242 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900243
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900244 arm_volt = dvs_conf[index].arm_volt;
245 int_volt = dvs_conf[index].int_volt;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900246
247 if (freqs.new > freqs.old) {
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900248 ret = regulator_set_voltage(arm_regulator,
249 arm_volt, arm_volt_max);
250 if (ret)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900251 goto exit;
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900252
253 ret = regulator_set_voltage(int_regulator,
254 int_volt, int_volt_max);
255 if (ret)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900256 goto exit;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900257 }
258
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900259 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
260
Jaecheol Lee83efc742010-10-12 09:19:38 +0900261 /* Check if there need to change PLL */
262 if ((index == L0) || (priv_index == L0))
263 pll_changing = 1;
264
265 /* Check if there need to change System bus clock */
266 if ((index == L4) || (priv_index == L4))
267 bus_speed_changing = 1;
268
269 if (bus_speed_changing) {
270 /*
271 * Reconfigure DRAM refresh counter value for minimum
272 * temporary clock while changing divider.
273 * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
274 */
275 if (pll_changing)
276 s5pv210_set_refresh(DMC1, 83000);
277 else
278 s5pv210_set_refresh(DMC1, 100000);
279
280 s5pv210_set_refresh(DMC0, 83000);
281 }
282
283 /*
284 * APLL should be changed in this level
285 * APLL -> MPLL(for stable transition) -> APLL
286 * Some clock source's clock API are not prepared.
287 * Do not use clock API in below code.
288 */
289 if (pll_changing) {
290 /*
291 * 1. Temporary Change divider for MFC and G3D
292 * SCLKA2M(200/1=200)->(200/4=50)Mhz
293 */
294 reg = __raw_readl(S5P_CLK_DIV2);
295 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
296 reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
297 (3 << S5P_CLKDIV2_MFC_SHIFT);
298 __raw_writel(reg, S5P_CLK_DIV2);
299
300 /* For MFC, G3D dividing */
301 do {
302 reg = __raw_readl(S5P_CLKDIV_STAT0);
303 } while (reg & ((1 << 16) | (1 << 17)));
304
305 /*
306 * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
307 * (200/4=50)->(667/4=166)Mhz
308 */
309 reg = __raw_readl(S5P_CLK_SRC2);
310 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
311 reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
312 (1 << S5P_CLKSRC2_MFC_SHIFT);
313 __raw_writel(reg, S5P_CLK_SRC2);
314
315 do {
316 reg = __raw_readl(S5P_CLKMUX_STAT1);
317 } while (reg & ((1 << 7) | (1 << 3)));
318
319 /*
320 * 3. DMC1 refresh count for 133Mhz if (index == L4) is
321 * true refresh counter is already programed in upper
322 * code. 0x287@83Mhz
323 */
324 if (!bus_speed_changing)
325 s5pv210_set_refresh(DMC1, 133000);
326
327 /* 4. SCLKAPLL -> SCLKMPLL */
328 reg = __raw_readl(S5P_CLK_SRC0);
329 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
330 reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
331 __raw_writel(reg, S5P_CLK_SRC0);
332
333 do {
334 reg = __raw_readl(S5P_CLKMUX_STAT0);
335 } while (reg & (0x1 << 18));
336
337 }
338
339 /* Change divider */
340 reg = __raw_readl(S5P_CLK_DIV0);
341
342 reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
343 S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
344 S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
345 S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
346
347 reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
348 (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
349 (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
350 (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
351 (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
352 (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
353 (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
354 (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
355
356 __raw_writel(reg, S5P_CLK_DIV0);
357
358 do {
359 reg = __raw_readl(S5P_CLKDIV_STAT0);
360 } while (reg & 0xff);
361
362 /* ARM MCS value changed */
363 reg = __raw_readl(S5P_ARM_MCS_CON);
364 reg &= ~0x3;
365 if (index >= L3)
366 reg |= 0x3;
367 else
368 reg |= 0x1;
369
370 __raw_writel(reg, S5P_ARM_MCS_CON);
371
372 if (pll_changing) {
373 /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
374 __raw_writel(0x2cf, S5P_APLL_LOCK);
375
376 /*
377 * 6. Turn on APLL
378 * 6-1. Set PMS values
379 * 6-2. Wait untile the PLL is locked
380 */
381 if (index == L0)
382 __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
383 else
384 __raw_writel(APLL_VAL_800, S5P_APLL_CON);
385
386 do {
387 reg = __raw_readl(S5P_APLL_CON);
388 } while (!(reg & (0x1 << 29)));
389
390 /*
391 * 7. Change souce clock from SCLKMPLL(667Mhz)
392 * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
393 * (667/4=166)->(200/4=50)Mhz
394 */
395 reg = __raw_readl(S5P_CLK_SRC2);
396 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
397 reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
398 (0 << S5P_CLKSRC2_MFC_SHIFT);
399 __raw_writel(reg, S5P_CLK_SRC2);
400
401 do {
402 reg = __raw_readl(S5P_CLKMUX_STAT1);
403 } while (reg & ((1 << 7) | (1 << 3)));
404
405 /*
406 * 8. Change divider for MFC and G3D
407 * (200/4=50)->(200/1=200)Mhz
408 */
409 reg = __raw_readl(S5P_CLK_DIV2);
410 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
411 reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
412 (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
413 __raw_writel(reg, S5P_CLK_DIV2);
414
415 /* For MFC, G3D dividing */
416 do {
417 reg = __raw_readl(S5P_CLKDIV_STAT0);
418 } while (reg & ((1 << 16) | (1 << 17)));
419
420 /* 9. Change MPLL to APLL in MSYS_MUX */
421 reg = __raw_readl(S5P_CLK_SRC0);
422 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
423 reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
424 __raw_writel(reg, S5P_CLK_SRC0);
425
426 do {
427 reg = __raw_readl(S5P_CLKMUX_STAT0);
428 } while (reg & (0x1 << 18));
429
430 /*
431 * 10. DMC1 refresh counter
432 * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
433 * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
434 */
435 if (!bus_speed_changing)
436 s5pv210_set_refresh(DMC1, 200000);
437 }
438
439 /*
440 * L4 level need to change memory bus speed, hence onedram clock divier
441 * and memory refresh parameter should be changed
442 */
443 if (bus_speed_changing) {
444 reg = __raw_readl(S5P_CLK_DIV6);
445 reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
446 reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
447 __raw_writel(reg, S5P_CLK_DIV6);
448
449 do {
450 reg = __raw_readl(S5P_CLKDIV_STAT1);
451 } while (reg & (1 << 15));
452
453 /* Reconfigure DRAM refresh counter value */
454 if (index != L4) {
455 /*
456 * DMC0 : 166Mhz
457 * DMC1 : 200Mhz
458 */
459 s5pv210_set_refresh(DMC0, 166000);
460 s5pv210_set_refresh(DMC1, 200000);
461 } else {
462 /*
463 * DMC0 : 83Mhz
464 * DMC1 : 100Mhz
465 */
466 s5pv210_set_refresh(DMC0, 83000);
467 s5pv210_set_refresh(DMC1, 100000);
468 }
469 }
470
Todd Poynor74df8e62011-06-24 16:04:17 +0900471 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
472
Jaecheol Lee83efc742010-10-12 09:19:38 +0900473 if (freqs.new < freqs.old) {
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900474 regulator_set_voltage(int_regulator,
475 int_volt, int_volt_max);
476
477 regulator_set_voltage(arm_regulator,
478 arm_volt, arm_volt_max);
Jaecheol Lee83efc742010-10-12 09:19:38 +0900479 }
480
Jaecheol Lee83efc742010-10-12 09:19:38 +0900481 printk(KERN_DEBUG "Perf changed[L%d]\n", index);
482
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900483exit:
484 mutex_unlock(&set_freq_lock);
485 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900486}
487
488#ifdef CONFIG_PM
Rafael J. Wysocki7ca64e22011-03-10 21:13:05 +0100489static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900490{
491 return 0;
492}
493
494static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
495{
496 return 0;
497}
498#endif
499
500static int check_mem_type(void __iomem *dmc_reg)
501{
502 unsigned long val;
503
504 val = __raw_readl(dmc_reg + 0x4);
505 val = (val & (0xf << 8));
506
507 return val >> 8;
508}
509
510static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
511{
512 unsigned long mem_type;
Julia Lawall4911ca12011-06-06 18:59:02 -0700513 int ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900514
515 cpu_clk = clk_get(NULL, "armclk");
516 if (IS_ERR(cpu_clk))
517 return PTR_ERR(cpu_clk);
518
519 dmc0_clk = clk_get(NULL, "sclk_dmc0");
520 if (IS_ERR(dmc0_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700521 ret = PTR_ERR(dmc0_clk);
522 goto out_dmc0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900523 }
524
525 dmc1_clk = clk_get(NULL, "hclk_msys");
526 if (IS_ERR(dmc1_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700527 ret = PTR_ERR(dmc1_clk);
528 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900529 }
530
Julia Lawall4911ca12011-06-06 18:59:02 -0700531 if (policy->cpu != 0) {
532 ret = -EINVAL;
533 goto out_dmc1;
534 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900535
536 /*
537 * check_mem_type : This driver only support LPDDR & LPDDR2.
538 * other memory type is not supported.
539 */
540 mem_type = check_mem_type(S5P_VA_DMC0);
541
542 if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
543 printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
Julia Lawall4911ca12011-06-06 18:59:02 -0700544 ret = -EINVAL;
545 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900546 }
547
548 /* Find current refresh counter and frequency each DMC */
549 s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
550 s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
551
552 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
553 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
554
555 policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
556
557 cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
558
559 policy->cpuinfo.transition_latency = 40000;
560
561 return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
Julia Lawall4911ca12011-06-06 18:59:02 -0700562
563out_dmc1:
564 clk_put(dmc0_clk);
565out_dmc0:
566 clk_put(cpu_clk);
567 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900568}
569
Huisung Kang405e6d62011-06-24 16:04:15 +0900570static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
571 unsigned long event, void *ptr)
572{
573 int ret;
574
575 switch (event) {
576 case PM_SUSPEND_PREPARE:
577 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
578 DISABLE_FURTHER_CPUFREQ);
579 if (ret < 0)
580 return NOTIFY_BAD;
581
582 return NOTIFY_OK;
583 case PM_POST_RESTORE:
584 case PM_POST_SUSPEND:
585 cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
586 ENABLE_FURTHER_CPUFREQ);
587
588 return NOTIFY_OK;
589 }
590
591 return NOTIFY_DONE;
592}
593
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900594static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
595 unsigned long event, void *ptr)
596{
597 int ret;
598
599 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
600 DISABLE_FURTHER_CPUFREQ);
601 if (ret < 0)
602 return NOTIFY_BAD;
603
604 return NOTIFY_DONE;
605}
606
Jaecheol Lee83efc742010-10-12 09:19:38 +0900607static struct cpufreq_driver s5pv210_driver = {
608 .flags = CPUFREQ_STICKY,
609 .verify = s5pv210_verify_speed,
610 .target = s5pv210_target,
611 .get = s5pv210_getspeed,
612 .init = s5pv210_cpu_init,
613 .name = "s5pv210",
614#ifdef CONFIG_PM
615 .suspend = s5pv210_cpufreq_suspend,
616 .resume = s5pv210_cpufreq_resume,
617#endif
618};
619
Huisung Kang405e6d62011-06-24 16:04:15 +0900620static struct notifier_block s5pv210_cpufreq_notifier = {
621 .notifier_call = s5pv210_cpufreq_notifier_event,
622};
623
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900624static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
625 .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
626};
627
Jaecheol Lee83efc742010-10-12 09:19:38 +0900628static int __init s5pv210_cpufreq_init(void)
629{
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900630 arm_regulator = regulator_get(NULL, "vddarm");
631 if (IS_ERR(arm_regulator)) {
632 pr_err("failed to get regulator vddarm");
633 return PTR_ERR(arm_regulator);
634 }
635
636 int_regulator = regulator_get(NULL, "vddint");
637 if (IS_ERR(int_regulator)) {
638 pr_err("failed to get regulator vddint");
639 regulator_put(arm_regulator);
640 return PTR_ERR(int_regulator);
641 }
642
Huisung Kang405e6d62011-06-24 16:04:15 +0900643 register_pm_notifier(&s5pv210_cpufreq_notifier);
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900644 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
Huisung Kang405e6d62011-06-24 16:04:15 +0900645
Jaecheol Lee83efc742010-10-12 09:19:38 +0900646 return cpufreq_register_driver(&s5pv210_driver);
647}
648
649late_initcall(s5pv210_cpufreq_init);