blob: 0eafc528f131931a11e1ed488796effa04388609 [file] [log] [blame]
Kukjin Kimf7d77072011-06-01 14:18:22 -07001/*
Jaecheol Lee83efc742010-10-12 09:19:38 +09002 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * CPU frequency scaling for S5PC110/S5PV210
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/cpufreq.h>
Huisung Kangfe7f1bc2011-06-24 16:04:18 +090019#include <linux/reboot.h>
Jonghwan Choie8b4c192011-06-24 16:04:14 +090020#include <linux/regulator/consumer.h>
Huisung Kang405e6d62011-06-24 16:04:15 +090021#include <linux/suspend.h>
Jaecheol Lee83efc742010-10-12 09:19:38 +090022
23#include <mach/map.h>
24#include <mach/regs-clock.h>
25
26static struct clk *cpu_clk;
27static struct clk *dmc0_clk;
28static struct clk *dmc1_clk;
29static struct cpufreq_freqs freqs;
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +090030static DEFINE_MUTEX(set_freq_lock);
Jaecheol Lee83efc742010-10-12 09:19:38 +090031
32/* APLL M,P,S values for 1G/800Mhz */
33#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
34#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
35
Huisung Kang405e6d62011-06-24 16:04:15 +090036/* Use 800MHz when entering sleep mode */
37#define SLEEP_FREQ (800 * 1000)
38
Jaecheol Lee83efc742010-10-12 09:19:38 +090039/*
Huisung Kang90d5d0a2011-06-24 16:04:13 +090040 * relation has an additional symantics other than the standard of cpufreq
41 * DISALBE_FURTHER_CPUFREQ: disable further access to target
42 * ENABLE_FURTUER_CPUFREQ: enable access to target
43 */
44enum cpufreq_access {
45 DISABLE_FURTHER_CPUFREQ = 0x10,
46 ENABLE_FURTHER_CPUFREQ = 0x20,
47};
48
49static bool no_cpufreq_access;
50
51/*
Jaecheol Lee83efc742010-10-12 09:19:38 +090052 * DRAM configurations to calculate refresh counter for changing
53 * frequency of memory.
54 */
55struct dram_conf {
56 unsigned long freq; /* HZ */
57 unsigned long refresh; /* DRAM refresh counter * 1000 */
58};
59
60/* DRAM configuration (DMC0 and DMC1) */
61static struct dram_conf s5pv210_dram_conf[2];
62
63enum perf_level {
64 L0, L1, L2, L3, L4,
65};
66
67enum s5pv210_mem_type {
68 LPDDR = 0x1,
69 LPDDR2 = 0x2,
70 DDR2 = 0x4,
71};
72
73enum s5pv210_dmc_port {
74 DMC0 = 0,
75 DMC1,
76};
77
78static struct cpufreq_frequency_table s5pv210_freq_table[] = {
79 {L0, 1000*1000},
80 {L1, 800*1000},
81 {L2, 400*1000},
82 {L3, 200*1000},
83 {L4, 100*1000},
84 {0, CPUFREQ_TABLE_END},
85};
86
Jonghwan Choie8b4c192011-06-24 16:04:14 +090087static struct regulator *arm_regulator;
88static struct regulator *int_regulator;
89
90struct s5pv210_dvs_conf {
91 int arm_volt; /* uV */
92 int int_volt; /* uV */
93};
94
95static const int arm_volt_max = 1350000;
96static const int int_volt_max = 1250000;
97
98static struct s5pv210_dvs_conf dvs_conf[] = {
99 [L0] = {
100 .arm_volt = 1250000,
101 .int_volt = 1100000,
102 },
103 [L1] = {
104 .arm_volt = 1200000,
105 .int_volt = 1100000,
106 },
107 [L2] = {
108 .arm_volt = 1050000,
109 .int_volt = 1100000,
110 },
111 [L3] = {
112 .arm_volt = 950000,
113 .int_volt = 1100000,
114 },
115 [L4] = {
116 .arm_volt = 950000,
117 .int_volt = 1000000,
118 },
119};
120
Jaecheol Lee83efc742010-10-12 09:19:38 +0900121static u32 clkdiv_val[5][11] = {
122 /*
123 * Clock divider value for following
124 * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
125 * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
126 * ONEDRAM, MFC, G3D }
127 */
128
129 /* L0 : [1000/200/100][166/83][133/66][200/200] */
130 {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
131
132 /* L1 : [800/200/100][166/83][133/66][200/200] */
133 {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
134
135 /* L2 : [400/200/100][166/83][133/66][200/200] */
136 {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
137
138 /* L3 : [200/200/100][166/83][133/66][200/200] */
139 {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
140
141 /* L4 : [100/100/100][83/83][66/66][100/100] */
142 {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
143};
144
145/*
146 * This function set DRAM refresh counter
147 * accoriding to operating frequency of DRAM
148 * ch: DMC port number 0 or 1
149 * freq: Operating frequency of DRAM(KHz)
150 */
151static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
152{
153 unsigned long tmp, tmp1;
154 void __iomem *reg = NULL;
155
Jonghwan Choid62fa312011-05-12 18:31:20 +0900156 if (ch == DMC0) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900157 reg = (S5P_VA_DMC0 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900158 } else if (ch == DMC1) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900159 reg = (S5P_VA_DMC1 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900160 } else {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900161 printk(KERN_ERR "Cannot find DMC port\n");
Jonghwan Choid62fa312011-05-12 18:31:20 +0900162 return;
163 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900164
165 /* Find current DRAM frequency */
166 tmp = s5pv210_dram_conf[ch].freq;
167
168 do_div(tmp, freq);
169
170 tmp1 = s5pv210_dram_conf[ch].refresh;
171
172 do_div(tmp1, tmp);
173
174 __raw_writel(tmp1, reg);
175}
176
Axel Lin133de122011-07-08 14:24:36 +0800177static unsigned int s5pv210_getspeed(unsigned int cpu)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900178{
179 if (cpu)
180 return 0;
181
182 return clk_get_rate(cpu_clk) / 1000;
183}
184
185static int s5pv210_target(struct cpufreq_policy *policy,
186 unsigned int target_freq,
187 unsigned int relation)
188{
189 unsigned long reg;
190 unsigned int index, priv_index;
191 unsigned int pll_changing = 0;
192 unsigned int bus_speed_changing = 0;
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900193 int arm_volt, int_volt;
194 int ret = 0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900195
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900196 mutex_lock(&set_freq_lock);
197
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900198 if (relation & ENABLE_FURTHER_CPUFREQ)
199 no_cpufreq_access = false;
200
201 if (no_cpufreq_access) {
202#ifdef CONFIG_PM_VERBOSE
203 pr_err("%s:%d denied access to %s as it is disabled"
204 "temporarily\n", __FILE__, __LINE__, __func__);
205#endif
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900206 ret = -EINVAL;
207 goto exit;
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900208 }
209
210 if (relation & DISABLE_FURTHER_CPUFREQ)
211 no_cpufreq_access = true;
212
213 relation &= ~(ENABLE_FURTHER_CPUFREQ | DISABLE_FURTHER_CPUFREQ);
214
Jaecheol Lee83efc742010-10-12 09:19:38 +0900215 freqs.old = s5pv210_getspeed(0);
216
217 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900218 target_freq, relation, &index)) {
219 ret = -EINVAL;
220 goto exit;
221 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900222
223 freqs.new = s5pv210_freq_table[index].frequency;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900224
225 if (freqs.new == freqs.old)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900226 goto exit;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900227
228 /* Finding current running level index */
229 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900230 freqs.old, relation, &priv_index)) {
231 ret = -EINVAL;
232 goto exit;
233 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900234
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900235 arm_volt = dvs_conf[index].arm_volt;
236 int_volt = dvs_conf[index].int_volt;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900237
238 if (freqs.new > freqs.old) {
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900239 ret = regulator_set_voltage(arm_regulator,
240 arm_volt, arm_volt_max);
241 if (ret)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900242 goto exit;
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900243
244 ret = regulator_set_voltage(int_regulator,
245 int_volt, int_volt_max);
246 if (ret)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900247 goto exit;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900248 }
249
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530250 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900251
Jaecheol Lee83efc742010-10-12 09:19:38 +0900252 /* Check if there need to change PLL */
253 if ((index == L0) || (priv_index == L0))
254 pll_changing = 1;
255
256 /* Check if there need to change System bus clock */
257 if ((index == L4) || (priv_index == L4))
258 bus_speed_changing = 1;
259
260 if (bus_speed_changing) {
261 /*
262 * Reconfigure DRAM refresh counter value for minimum
263 * temporary clock while changing divider.
264 * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
265 */
266 if (pll_changing)
267 s5pv210_set_refresh(DMC1, 83000);
268 else
269 s5pv210_set_refresh(DMC1, 100000);
270
271 s5pv210_set_refresh(DMC0, 83000);
272 }
273
274 /*
275 * APLL should be changed in this level
276 * APLL -> MPLL(for stable transition) -> APLL
277 * Some clock source's clock API are not prepared.
278 * Do not use clock API in below code.
279 */
280 if (pll_changing) {
281 /*
282 * 1. Temporary Change divider for MFC and G3D
283 * SCLKA2M(200/1=200)->(200/4=50)Mhz
284 */
285 reg = __raw_readl(S5P_CLK_DIV2);
286 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
287 reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
288 (3 << S5P_CLKDIV2_MFC_SHIFT);
289 __raw_writel(reg, S5P_CLK_DIV2);
290
291 /* For MFC, G3D dividing */
292 do {
293 reg = __raw_readl(S5P_CLKDIV_STAT0);
294 } while (reg & ((1 << 16) | (1 << 17)));
295
296 /*
297 * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
298 * (200/4=50)->(667/4=166)Mhz
299 */
300 reg = __raw_readl(S5P_CLK_SRC2);
301 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
302 reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
303 (1 << S5P_CLKSRC2_MFC_SHIFT);
304 __raw_writel(reg, S5P_CLK_SRC2);
305
306 do {
307 reg = __raw_readl(S5P_CLKMUX_STAT1);
308 } while (reg & ((1 << 7) | (1 << 3)));
309
310 /*
311 * 3. DMC1 refresh count for 133Mhz if (index == L4) is
312 * true refresh counter is already programed in upper
313 * code. 0x287@83Mhz
314 */
315 if (!bus_speed_changing)
316 s5pv210_set_refresh(DMC1, 133000);
317
318 /* 4. SCLKAPLL -> SCLKMPLL */
319 reg = __raw_readl(S5P_CLK_SRC0);
320 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
321 reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
322 __raw_writel(reg, S5P_CLK_SRC0);
323
324 do {
325 reg = __raw_readl(S5P_CLKMUX_STAT0);
326 } while (reg & (0x1 << 18));
327
328 }
329
330 /* Change divider */
331 reg = __raw_readl(S5P_CLK_DIV0);
332
333 reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
334 S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
335 S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
336 S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
337
338 reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
339 (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
340 (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
341 (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
342 (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
343 (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
344 (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
345 (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
346
347 __raw_writel(reg, S5P_CLK_DIV0);
348
349 do {
350 reg = __raw_readl(S5P_CLKDIV_STAT0);
351 } while (reg & 0xff);
352
353 /* ARM MCS value changed */
354 reg = __raw_readl(S5P_ARM_MCS_CON);
355 reg &= ~0x3;
356 if (index >= L3)
357 reg |= 0x3;
358 else
359 reg |= 0x1;
360
361 __raw_writel(reg, S5P_ARM_MCS_CON);
362
363 if (pll_changing) {
364 /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
365 __raw_writel(0x2cf, S5P_APLL_LOCK);
366
367 /*
368 * 6. Turn on APLL
369 * 6-1. Set PMS values
370 * 6-2. Wait untile the PLL is locked
371 */
372 if (index == L0)
373 __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
374 else
375 __raw_writel(APLL_VAL_800, S5P_APLL_CON);
376
377 do {
378 reg = __raw_readl(S5P_APLL_CON);
379 } while (!(reg & (0x1 << 29)));
380
381 /*
382 * 7. Change souce clock from SCLKMPLL(667Mhz)
383 * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
384 * (667/4=166)->(200/4=50)Mhz
385 */
386 reg = __raw_readl(S5P_CLK_SRC2);
387 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
388 reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
389 (0 << S5P_CLKSRC2_MFC_SHIFT);
390 __raw_writel(reg, S5P_CLK_SRC2);
391
392 do {
393 reg = __raw_readl(S5P_CLKMUX_STAT1);
394 } while (reg & ((1 << 7) | (1 << 3)));
395
396 /*
397 * 8. Change divider for MFC and G3D
398 * (200/4=50)->(200/1=200)Mhz
399 */
400 reg = __raw_readl(S5P_CLK_DIV2);
401 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
402 reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
403 (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
404 __raw_writel(reg, S5P_CLK_DIV2);
405
406 /* For MFC, G3D dividing */
407 do {
408 reg = __raw_readl(S5P_CLKDIV_STAT0);
409 } while (reg & ((1 << 16) | (1 << 17)));
410
411 /* 9. Change MPLL to APLL in MSYS_MUX */
412 reg = __raw_readl(S5P_CLK_SRC0);
413 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
414 reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
415 __raw_writel(reg, S5P_CLK_SRC0);
416
417 do {
418 reg = __raw_readl(S5P_CLKMUX_STAT0);
419 } while (reg & (0x1 << 18));
420
421 /*
422 * 10. DMC1 refresh counter
423 * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
424 * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
425 */
426 if (!bus_speed_changing)
427 s5pv210_set_refresh(DMC1, 200000);
428 }
429
430 /*
431 * L4 level need to change memory bus speed, hence onedram clock divier
432 * and memory refresh parameter should be changed
433 */
434 if (bus_speed_changing) {
435 reg = __raw_readl(S5P_CLK_DIV6);
436 reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
437 reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
438 __raw_writel(reg, S5P_CLK_DIV6);
439
440 do {
441 reg = __raw_readl(S5P_CLKDIV_STAT1);
442 } while (reg & (1 << 15));
443
444 /* Reconfigure DRAM refresh counter value */
445 if (index != L4) {
446 /*
447 * DMC0 : 166Mhz
448 * DMC1 : 200Mhz
449 */
450 s5pv210_set_refresh(DMC0, 166000);
451 s5pv210_set_refresh(DMC1, 200000);
452 } else {
453 /*
454 * DMC0 : 83Mhz
455 * DMC1 : 100Mhz
456 */
457 s5pv210_set_refresh(DMC0, 83000);
458 s5pv210_set_refresh(DMC1, 100000);
459 }
460 }
461
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530462 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
Todd Poynor74df8e62011-06-24 16:04:17 +0900463
Jaecheol Lee83efc742010-10-12 09:19:38 +0900464 if (freqs.new < freqs.old) {
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900465 regulator_set_voltage(int_regulator,
466 int_volt, int_volt_max);
467
468 regulator_set_voltage(arm_regulator,
469 arm_volt, arm_volt_max);
Jaecheol Lee83efc742010-10-12 09:19:38 +0900470 }
471
Jaecheol Lee83efc742010-10-12 09:19:38 +0900472 printk(KERN_DEBUG "Perf changed[L%d]\n", index);
473
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900474exit:
475 mutex_unlock(&set_freq_lock);
476 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900477}
478
479#ifdef CONFIG_PM
Rafael J. Wysocki7ca64e22011-03-10 21:13:05 +0100480static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900481{
482 return 0;
483}
484
485static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
486{
487 return 0;
488}
489#endif
490
491static int check_mem_type(void __iomem *dmc_reg)
492{
493 unsigned long val;
494
495 val = __raw_readl(dmc_reg + 0x4);
496 val = (val & (0xf << 8));
497
498 return val >> 8;
499}
500
501static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
502{
503 unsigned long mem_type;
Julia Lawall4911ca12011-06-06 18:59:02 -0700504 int ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900505
506 cpu_clk = clk_get(NULL, "armclk");
507 if (IS_ERR(cpu_clk))
508 return PTR_ERR(cpu_clk);
509
510 dmc0_clk = clk_get(NULL, "sclk_dmc0");
511 if (IS_ERR(dmc0_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700512 ret = PTR_ERR(dmc0_clk);
513 goto out_dmc0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900514 }
515
516 dmc1_clk = clk_get(NULL, "hclk_msys");
517 if (IS_ERR(dmc1_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700518 ret = PTR_ERR(dmc1_clk);
519 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900520 }
521
Julia Lawall4911ca12011-06-06 18:59:02 -0700522 if (policy->cpu != 0) {
523 ret = -EINVAL;
524 goto out_dmc1;
525 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900526
527 /*
528 * check_mem_type : This driver only support LPDDR & LPDDR2.
529 * other memory type is not supported.
530 */
531 mem_type = check_mem_type(S5P_VA_DMC0);
532
533 if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
534 printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
Julia Lawall4911ca12011-06-06 18:59:02 -0700535 ret = -EINVAL;
536 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900537 }
538
539 /* Find current refresh counter and frequency each DMC */
540 s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
541 s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
542
543 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
544 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
545
546 policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
547
Jaecheol Lee83efc742010-10-12 09:19:38 +0900548 policy->cpuinfo.transition_latency = 40000;
549
Viresh Kumar291e8fb2013-09-16 18:56:31 +0530550 return cpufreq_table_validate_and_show(policy, s5pv210_freq_table);
Julia Lawall4911ca12011-06-06 18:59:02 -0700551
552out_dmc1:
553 clk_put(dmc0_clk);
554out_dmc0:
555 clk_put(cpu_clk);
556 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900557}
558
Huisung Kang405e6d62011-06-24 16:04:15 +0900559static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
560 unsigned long event, void *ptr)
561{
562 int ret;
563
564 switch (event) {
565 case PM_SUSPEND_PREPARE:
566 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
567 DISABLE_FURTHER_CPUFREQ);
568 if (ret < 0)
569 return NOTIFY_BAD;
570
571 return NOTIFY_OK;
572 case PM_POST_RESTORE:
573 case PM_POST_SUSPEND:
574 cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
575 ENABLE_FURTHER_CPUFREQ);
576
577 return NOTIFY_OK;
578 }
579
580 return NOTIFY_DONE;
581}
582
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900583static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
584 unsigned long event, void *ptr)
585{
586 int ret;
587
588 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ,
589 DISABLE_FURTHER_CPUFREQ);
590 if (ret < 0)
591 return NOTIFY_BAD;
592
593 return NOTIFY_DONE;
594}
595
Jaecheol Lee83efc742010-10-12 09:19:38 +0900596static struct cpufreq_driver s5pv210_driver = {
597 .flags = CPUFREQ_STICKY,
Viresh Kumar9c3c6e32013-10-03 20:28:22 +0530598 .verify = cpufreq_generic_frequency_table_verify,
Jaecheol Lee83efc742010-10-12 09:19:38 +0900599 .target = s5pv210_target,
600 .get = s5pv210_getspeed,
601 .init = s5pv210_cpu_init,
602 .name = "s5pv210",
603#ifdef CONFIG_PM
604 .suspend = s5pv210_cpufreq_suspend,
605 .resume = s5pv210_cpufreq_resume,
606#endif
607};
608
Huisung Kang405e6d62011-06-24 16:04:15 +0900609static struct notifier_block s5pv210_cpufreq_notifier = {
610 .notifier_call = s5pv210_cpufreq_notifier_event,
611};
612
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900613static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
614 .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
615};
616
Jaecheol Lee83efc742010-10-12 09:19:38 +0900617static int __init s5pv210_cpufreq_init(void)
618{
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900619 arm_regulator = regulator_get(NULL, "vddarm");
620 if (IS_ERR(arm_regulator)) {
621 pr_err("failed to get regulator vddarm");
622 return PTR_ERR(arm_regulator);
623 }
624
625 int_regulator = regulator_get(NULL, "vddint");
626 if (IS_ERR(int_regulator)) {
627 pr_err("failed to get regulator vddint");
628 regulator_put(arm_regulator);
629 return PTR_ERR(int_regulator);
630 }
631
Huisung Kang405e6d62011-06-24 16:04:15 +0900632 register_pm_notifier(&s5pv210_cpufreq_notifier);
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900633 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
Huisung Kang405e6d62011-06-24 16:04:15 +0900634
Jaecheol Lee83efc742010-10-12 09:19:38 +0900635 return cpufreq_register_driver(&s5pv210_driver);
636}
637
638late_initcall(s5pv210_cpufreq_init);