blob: 55a8e9fa9435f3226982de7e0f3133b2146b728e [file] [log] [blame]
Kukjin Kimf7d77072011-06-01 14:18:22 -07001/*
Jaecheol Lee83efc742010-10-12 09:19:38 +09002 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * CPU frequency scaling for S5PC110/S5PV210
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10*/
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/err.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18#include <linux/cpufreq.h>
Huisung Kangfe7f1bc2011-06-24 16:04:18 +090019#include <linux/reboot.h>
Jonghwan Choie8b4c192011-06-24 16:04:14 +090020#include <linux/regulator/consumer.h>
Huisung Kang405e6d62011-06-24 16:04:15 +090021#include <linux/suspend.h>
Jaecheol Lee83efc742010-10-12 09:19:38 +090022
23#include <mach/map.h>
24#include <mach/regs-clock.h>
25
Jaecheol Lee83efc742010-10-12 09:19:38 +090026static struct clk *dmc0_clk;
27static struct clk *dmc1_clk;
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +090028static DEFINE_MUTEX(set_freq_lock);
Jaecheol Lee83efc742010-10-12 09:19:38 +090029
30/* APLL M,P,S values for 1G/800Mhz */
31#define APLL_VAL_1000 ((1 << 31) | (125 << 16) | (3 << 8) | 1)
32#define APLL_VAL_800 ((1 << 31) | (100 << 16) | (3 << 8) | 1)
33
Huisung Kang405e6d62011-06-24 16:04:15 +090034/* Use 800MHz when entering sleep mode */
35#define SLEEP_FREQ (800 * 1000)
36
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +053037/* Tracks if cpu freqency can be updated anymore */
Huisung Kang90d5d0a2011-06-24 16:04:13 +090038static bool no_cpufreq_access;
39
40/*
Jaecheol Lee83efc742010-10-12 09:19:38 +090041 * DRAM configurations to calculate refresh counter for changing
42 * frequency of memory.
43 */
44struct dram_conf {
45 unsigned long freq; /* HZ */
46 unsigned long refresh; /* DRAM refresh counter * 1000 */
47};
48
49/* DRAM configuration (DMC0 and DMC1) */
50static struct dram_conf s5pv210_dram_conf[2];
51
52enum perf_level {
53 L0, L1, L2, L3, L4,
54};
55
56enum s5pv210_mem_type {
57 LPDDR = 0x1,
58 LPDDR2 = 0x2,
59 DDR2 = 0x4,
60};
61
62enum s5pv210_dmc_port {
63 DMC0 = 0,
64 DMC1,
65};
66
67static struct cpufreq_frequency_table s5pv210_freq_table[] = {
68 {L0, 1000*1000},
69 {L1, 800*1000},
70 {L2, 400*1000},
71 {L3, 200*1000},
72 {L4, 100*1000},
73 {0, CPUFREQ_TABLE_END},
74};
75
Jonghwan Choie8b4c192011-06-24 16:04:14 +090076static struct regulator *arm_regulator;
77static struct regulator *int_regulator;
78
79struct s5pv210_dvs_conf {
80 int arm_volt; /* uV */
81 int int_volt; /* uV */
82};
83
84static const int arm_volt_max = 1350000;
85static const int int_volt_max = 1250000;
86
87static struct s5pv210_dvs_conf dvs_conf[] = {
88 [L0] = {
89 .arm_volt = 1250000,
90 .int_volt = 1100000,
91 },
92 [L1] = {
93 .arm_volt = 1200000,
94 .int_volt = 1100000,
95 },
96 [L2] = {
97 .arm_volt = 1050000,
98 .int_volt = 1100000,
99 },
100 [L3] = {
101 .arm_volt = 950000,
102 .int_volt = 1100000,
103 },
104 [L4] = {
105 .arm_volt = 950000,
106 .int_volt = 1000000,
107 },
108};
109
Jaecheol Lee83efc742010-10-12 09:19:38 +0900110static u32 clkdiv_val[5][11] = {
111 /*
112 * Clock divider value for following
113 * { APLL, A2M, HCLK_MSYS, PCLK_MSYS,
114 * HCLK_DSYS, PCLK_DSYS, HCLK_PSYS, PCLK_PSYS,
115 * ONEDRAM, MFC, G3D }
116 */
117
118 /* L0 : [1000/200/100][166/83][133/66][200/200] */
119 {0, 4, 4, 1, 3, 1, 4, 1, 3, 0, 0},
120
121 /* L1 : [800/200/100][166/83][133/66][200/200] */
122 {0, 3, 3, 1, 3, 1, 4, 1, 3, 0, 0},
123
124 /* L2 : [400/200/100][166/83][133/66][200/200] */
125 {1, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
126
127 /* L3 : [200/200/100][166/83][133/66][200/200] */
128 {3, 3, 1, 1, 3, 1, 4, 1, 3, 0, 0},
129
130 /* L4 : [100/100/100][83/83][66/66][100/100] */
131 {7, 7, 0, 0, 7, 0, 9, 0, 7, 0, 0},
132};
133
134/*
135 * This function set DRAM refresh counter
136 * accoriding to operating frequency of DRAM
137 * ch: DMC port number 0 or 1
138 * freq: Operating frequency of DRAM(KHz)
139 */
140static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
141{
142 unsigned long tmp, tmp1;
143 void __iomem *reg = NULL;
144
Jonghwan Choid62fa312011-05-12 18:31:20 +0900145 if (ch == DMC0) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900146 reg = (S5P_VA_DMC0 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900147 } else if (ch == DMC1) {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900148 reg = (S5P_VA_DMC1 + 0x30);
Jonghwan Choid62fa312011-05-12 18:31:20 +0900149 } else {
Jaecheol Lee83efc742010-10-12 09:19:38 +0900150 printk(KERN_ERR "Cannot find DMC port\n");
Jonghwan Choid62fa312011-05-12 18:31:20 +0900151 return;
152 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900153
154 /* Find current DRAM frequency */
155 tmp = s5pv210_dram_conf[ch].freq;
156
157 do_div(tmp, freq);
158
159 tmp1 = s5pv210_dram_conf[ch].refresh;
160
161 do_div(tmp1, tmp);
162
163 __raw_writel(tmp1, reg);
164}
165
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530166static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900167{
168 unsigned long reg;
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530169 unsigned int priv_index;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900170 unsigned int pll_changing = 0;
171 unsigned int bus_speed_changing = 0;
Viresh Kumard4019f02013-08-14 19:38:24 +0530172 unsigned int old_freq, new_freq;
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900173 int arm_volt, int_volt;
174 int ret = 0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900175
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900176 mutex_lock(&set_freq_lock);
177
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900178 if (no_cpufreq_access) {
179#ifdef CONFIG_PM_VERBOSE
180 pr_err("%s:%d denied access to %s as it is disabled"
181 "temporarily\n", __FILE__, __LINE__, __func__);
182#endif
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900183 ret = -EINVAL;
184 goto exit;
Huisung Kang90d5d0a2011-06-24 16:04:13 +0900185 }
186
Viresh Kumar652ed952014-01-09 20:38:43 +0530187 old_freq = policy->cur;
Viresh Kumard4019f02013-08-14 19:38:24 +0530188 new_freq = s5pv210_freq_table[index].frequency;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900189
Jaecheol Lee83efc742010-10-12 09:19:38 +0900190 /* Finding current running level index */
191 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table,
Viresh Kumard4019f02013-08-14 19:38:24 +0530192 old_freq, CPUFREQ_RELATION_H,
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530193 &priv_index)) {
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900194 ret = -EINVAL;
195 goto exit;
196 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900197
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900198 arm_volt = dvs_conf[index].arm_volt;
199 int_volt = dvs_conf[index].int_volt;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900200
Viresh Kumard4019f02013-08-14 19:38:24 +0530201 if (new_freq > old_freq) {
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900202 ret = regulator_set_voltage(arm_regulator,
203 arm_volt, arm_volt_max);
204 if (ret)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900205 goto exit;
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900206
207 ret = regulator_set_voltage(int_regulator,
208 int_volt, int_volt_max);
209 if (ret)
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900210 goto exit;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900211 }
212
213 /* Check if there need to change PLL */
214 if ((index == L0) || (priv_index == L0))
215 pll_changing = 1;
216
217 /* Check if there need to change System bus clock */
218 if ((index == L4) || (priv_index == L4))
219 bus_speed_changing = 1;
220
221 if (bus_speed_changing) {
222 /*
223 * Reconfigure DRAM refresh counter value for minimum
224 * temporary clock while changing divider.
225 * expected clock is 83Mhz : 7.8usec/(1/83Mhz) = 0x287
226 */
227 if (pll_changing)
228 s5pv210_set_refresh(DMC1, 83000);
229 else
230 s5pv210_set_refresh(DMC1, 100000);
231
232 s5pv210_set_refresh(DMC0, 83000);
233 }
234
235 /*
236 * APLL should be changed in this level
237 * APLL -> MPLL(for stable transition) -> APLL
238 * Some clock source's clock API are not prepared.
239 * Do not use clock API in below code.
240 */
241 if (pll_changing) {
242 /*
243 * 1. Temporary Change divider for MFC and G3D
244 * SCLKA2M(200/1=200)->(200/4=50)Mhz
245 */
246 reg = __raw_readl(S5P_CLK_DIV2);
247 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
248 reg |= (3 << S5P_CLKDIV2_G3D_SHIFT) |
249 (3 << S5P_CLKDIV2_MFC_SHIFT);
250 __raw_writel(reg, S5P_CLK_DIV2);
251
252 /* For MFC, G3D dividing */
253 do {
254 reg = __raw_readl(S5P_CLKDIV_STAT0);
255 } while (reg & ((1 << 16) | (1 << 17)));
256
257 /*
258 * 2. Change SCLKA2M(200Mhz)to SCLKMPLL in MFC_MUX, G3D MUX
259 * (200/4=50)->(667/4=166)Mhz
260 */
261 reg = __raw_readl(S5P_CLK_SRC2);
262 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
263 reg |= (1 << S5P_CLKSRC2_G3D_SHIFT) |
264 (1 << S5P_CLKSRC2_MFC_SHIFT);
265 __raw_writel(reg, S5P_CLK_SRC2);
266
267 do {
268 reg = __raw_readl(S5P_CLKMUX_STAT1);
269 } while (reg & ((1 << 7) | (1 << 3)));
270
271 /*
272 * 3. DMC1 refresh count for 133Mhz if (index == L4) is
273 * true refresh counter is already programed in upper
274 * code. 0x287@83Mhz
275 */
276 if (!bus_speed_changing)
277 s5pv210_set_refresh(DMC1, 133000);
278
279 /* 4. SCLKAPLL -> SCLKMPLL */
280 reg = __raw_readl(S5P_CLK_SRC0);
281 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
282 reg |= (0x1 << S5P_CLKSRC0_MUX200_SHIFT);
283 __raw_writel(reg, S5P_CLK_SRC0);
284
285 do {
286 reg = __raw_readl(S5P_CLKMUX_STAT0);
287 } while (reg & (0x1 << 18));
288
289 }
290
291 /* Change divider */
292 reg = __raw_readl(S5P_CLK_DIV0);
293
294 reg &= ~(S5P_CLKDIV0_APLL_MASK | S5P_CLKDIV0_A2M_MASK |
295 S5P_CLKDIV0_HCLK200_MASK | S5P_CLKDIV0_PCLK100_MASK |
296 S5P_CLKDIV0_HCLK166_MASK | S5P_CLKDIV0_PCLK83_MASK |
297 S5P_CLKDIV0_HCLK133_MASK | S5P_CLKDIV0_PCLK66_MASK);
298
299 reg |= ((clkdiv_val[index][0] << S5P_CLKDIV0_APLL_SHIFT) |
300 (clkdiv_val[index][1] << S5P_CLKDIV0_A2M_SHIFT) |
301 (clkdiv_val[index][2] << S5P_CLKDIV0_HCLK200_SHIFT) |
302 (clkdiv_val[index][3] << S5P_CLKDIV0_PCLK100_SHIFT) |
303 (clkdiv_val[index][4] << S5P_CLKDIV0_HCLK166_SHIFT) |
304 (clkdiv_val[index][5] << S5P_CLKDIV0_PCLK83_SHIFT) |
305 (clkdiv_val[index][6] << S5P_CLKDIV0_HCLK133_SHIFT) |
306 (clkdiv_val[index][7] << S5P_CLKDIV0_PCLK66_SHIFT));
307
308 __raw_writel(reg, S5P_CLK_DIV0);
309
310 do {
311 reg = __raw_readl(S5P_CLKDIV_STAT0);
312 } while (reg & 0xff);
313
314 /* ARM MCS value changed */
315 reg = __raw_readl(S5P_ARM_MCS_CON);
316 reg &= ~0x3;
317 if (index >= L3)
318 reg |= 0x3;
319 else
320 reg |= 0x1;
321
322 __raw_writel(reg, S5P_ARM_MCS_CON);
323
324 if (pll_changing) {
325 /* 5. Set Lock time = 30us*24Mhz = 0x2cf */
326 __raw_writel(0x2cf, S5P_APLL_LOCK);
327
328 /*
329 * 6. Turn on APLL
330 * 6-1. Set PMS values
331 * 6-2. Wait untile the PLL is locked
332 */
333 if (index == L0)
334 __raw_writel(APLL_VAL_1000, S5P_APLL_CON);
335 else
336 __raw_writel(APLL_VAL_800, S5P_APLL_CON);
337
338 do {
339 reg = __raw_readl(S5P_APLL_CON);
340 } while (!(reg & (0x1 << 29)));
341
342 /*
343 * 7. Change souce clock from SCLKMPLL(667Mhz)
344 * to SCLKA2M(200Mhz) in MFC_MUX and G3D MUX
345 * (667/4=166)->(200/4=50)Mhz
346 */
347 reg = __raw_readl(S5P_CLK_SRC2);
348 reg &= ~(S5P_CLKSRC2_G3D_MASK | S5P_CLKSRC2_MFC_MASK);
349 reg |= (0 << S5P_CLKSRC2_G3D_SHIFT) |
350 (0 << S5P_CLKSRC2_MFC_SHIFT);
351 __raw_writel(reg, S5P_CLK_SRC2);
352
353 do {
354 reg = __raw_readl(S5P_CLKMUX_STAT1);
355 } while (reg & ((1 << 7) | (1 << 3)));
356
357 /*
358 * 8. Change divider for MFC and G3D
359 * (200/4=50)->(200/1=200)Mhz
360 */
361 reg = __raw_readl(S5P_CLK_DIV2);
362 reg &= ~(S5P_CLKDIV2_G3D_MASK | S5P_CLKDIV2_MFC_MASK);
363 reg |= (clkdiv_val[index][10] << S5P_CLKDIV2_G3D_SHIFT) |
364 (clkdiv_val[index][9] << S5P_CLKDIV2_MFC_SHIFT);
365 __raw_writel(reg, S5P_CLK_DIV2);
366
367 /* For MFC, G3D dividing */
368 do {
369 reg = __raw_readl(S5P_CLKDIV_STAT0);
370 } while (reg & ((1 << 16) | (1 << 17)));
371
372 /* 9. Change MPLL to APLL in MSYS_MUX */
373 reg = __raw_readl(S5P_CLK_SRC0);
374 reg &= ~(S5P_CLKSRC0_MUX200_MASK);
375 reg |= (0x0 << S5P_CLKSRC0_MUX200_SHIFT);
376 __raw_writel(reg, S5P_CLK_SRC0);
377
378 do {
379 reg = __raw_readl(S5P_CLKMUX_STAT0);
380 } while (reg & (0x1 << 18));
381
382 /*
383 * 10. DMC1 refresh counter
384 * L4 : DMC1 = 100Mhz 7.8us/(1/100) = 0x30c
385 * Others : DMC1 = 200Mhz 7.8us/(1/200) = 0x618
386 */
387 if (!bus_speed_changing)
388 s5pv210_set_refresh(DMC1, 200000);
389 }
390
391 /*
392 * L4 level need to change memory bus speed, hence onedram clock divier
393 * and memory refresh parameter should be changed
394 */
395 if (bus_speed_changing) {
396 reg = __raw_readl(S5P_CLK_DIV6);
397 reg &= ~S5P_CLKDIV6_ONEDRAM_MASK;
398 reg |= (clkdiv_val[index][8] << S5P_CLKDIV6_ONEDRAM_SHIFT);
399 __raw_writel(reg, S5P_CLK_DIV6);
400
401 do {
402 reg = __raw_readl(S5P_CLKDIV_STAT1);
403 } while (reg & (1 << 15));
404
405 /* Reconfigure DRAM refresh counter value */
406 if (index != L4) {
407 /*
408 * DMC0 : 166Mhz
409 * DMC1 : 200Mhz
410 */
411 s5pv210_set_refresh(DMC0, 166000);
412 s5pv210_set_refresh(DMC1, 200000);
413 } else {
414 /*
415 * DMC0 : 83Mhz
416 * DMC1 : 100Mhz
417 */
418 s5pv210_set_refresh(DMC0, 83000);
419 s5pv210_set_refresh(DMC1, 100000);
420 }
421 }
422
Viresh Kumard4019f02013-08-14 19:38:24 +0530423 if (new_freq < old_freq) {
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900424 regulator_set_voltage(int_regulator,
425 int_volt, int_volt_max);
426
427 regulator_set_voltage(arm_regulator,
428 arm_volt, arm_volt_max);
Jaecheol Lee83efc742010-10-12 09:19:38 +0900429 }
430
Jaecheol Lee83efc742010-10-12 09:19:38 +0900431 printk(KERN_DEBUG "Perf changed[L%d]\n", index);
432
Arve Hjønnevåg5b02b772011-06-24 16:04:16 +0900433exit:
434 mutex_unlock(&set_freq_lock);
435 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900436}
437
438#ifdef CONFIG_PM
Rafael J. Wysocki7ca64e22011-03-10 21:13:05 +0100439static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
Jaecheol Lee83efc742010-10-12 09:19:38 +0900440{
441 return 0;
442}
443
444static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
445{
446 return 0;
447}
448#endif
449
450static int check_mem_type(void __iomem *dmc_reg)
451{
452 unsigned long val;
453
454 val = __raw_readl(dmc_reg + 0x4);
455 val = (val & (0xf << 8));
456
457 return val >> 8;
458}
459
460static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
461{
462 unsigned long mem_type;
Julia Lawall4911ca12011-06-06 18:59:02 -0700463 int ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900464
Viresh Kumar652ed952014-01-09 20:38:43 +0530465 policy->clk = clk_get(NULL, "armclk");
466 if (IS_ERR(policy->clk))
467 return PTR_ERR(policy->clk);
Jaecheol Lee83efc742010-10-12 09:19:38 +0900468
469 dmc0_clk = clk_get(NULL, "sclk_dmc0");
470 if (IS_ERR(dmc0_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700471 ret = PTR_ERR(dmc0_clk);
472 goto out_dmc0;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900473 }
474
475 dmc1_clk = clk_get(NULL, "hclk_msys");
476 if (IS_ERR(dmc1_clk)) {
Julia Lawall4911ca12011-06-06 18:59:02 -0700477 ret = PTR_ERR(dmc1_clk);
478 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900479 }
480
Julia Lawall4911ca12011-06-06 18:59:02 -0700481 if (policy->cpu != 0) {
482 ret = -EINVAL;
483 goto out_dmc1;
484 }
Jaecheol Lee83efc742010-10-12 09:19:38 +0900485
486 /*
487 * check_mem_type : This driver only support LPDDR & LPDDR2.
488 * other memory type is not supported.
489 */
490 mem_type = check_mem_type(S5P_VA_DMC0);
491
492 if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
493 printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
Julia Lawall4911ca12011-06-06 18:59:02 -0700494 ret = -EINVAL;
495 goto out_dmc1;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900496 }
497
498 /* Find current refresh counter and frequency each DMC */
499 s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
500 s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
501
502 s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
503 s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
504
Viresh Kumarc3d7d872013-10-03 20:29:23 +0530505 return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
Julia Lawall4911ca12011-06-06 18:59:02 -0700506
507out_dmc1:
508 clk_put(dmc0_clk);
509out_dmc0:
Viresh Kumar652ed952014-01-09 20:38:43 +0530510 clk_put(policy->clk);
Julia Lawall4911ca12011-06-06 18:59:02 -0700511 return ret;
Jaecheol Lee83efc742010-10-12 09:19:38 +0900512}
513
Huisung Kang405e6d62011-06-24 16:04:15 +0900514static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
515 unsigned long event, void *ptr)
516{
517 int ret;
518
519 switch (event) {
520 case PM_SUSPEND_PREPARE:
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530521 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
Huisung Kang405e6d62011-06-24 16:04:15 +0900522 if (ret < 0)
523 return NOTIFY_BAD;
524
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530525 /* Disable updation of cpu frequency */
526 no_cpufreq_access = true;
Huisung Kang405e6d62011-06-24 16:04:15 +0900527 return NOTIFY_OK;
528 case PM_POST_RESTORE:
529 case PM_POST_SUSPEND:
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530530 /* Enable updation of cpu frequency */
531 no_cpufreq_access = false;
532 cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
Huisung Kang405e6d62011-06-24 16:04:15 +0900533
534 return NOTIFY_OK;
535 }
536
537 return NOTIFY_DONE;
538}
539
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900540static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
541 unsigned long event, void *ptr)
542{
543 int ret;
544
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530545 ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900546 if (ret < 0)
547 return NOTIFY_BAD;
548
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530549 no_cpufreq_access = true;
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900550 return NOTIFY_DONE;
551}
552
Jaecheol Lee83efc742010-10-12 09:19:38 +0900553static struct cpufreq_driver s5pv210_driver = {
Viresh Kumarae6b4272013-12-03 11:20:45 +0530554 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
Viresh Kumar9c3c6e32013-10-03 20:28:22 +0530555 .verify = cpufreq_generic_frequency_table_verify,
Viresh Kumar9c0ebcf2013-10-25 19:45:48 +0530556 .target_index = s5pv210_target,
Viresh Kumar652ed952014-01-09 20:38:43 +0530557 .get = cpufreq_generic_get,
Jaecheol Lee83efc742010-10-12 09:19:38 +0900558 .init = s5pv210_cpu_init,
559 .name = "s5pv210",
560#ifdef CONFIG_PM
561 .suspend = s5pv210_cpufreq_suspend,
562 .resume = s5pv210_cpufreq_resume,
563#endif
564};
565
Huisung Kang405e6d62011-06-24 16:04:15 +0900566static struct notifier_block s5pv210_cpufreq_notifier = {
567 .notifier_call = s5pv210_cpufreq_notifier_event,
568};
569
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900570static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
571 .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
572};
573
Jaecheol Lee83efc742010-10-12 09:19:38 +0900574static int __init s5pv210_cpufreq_init(void)
575{
Jonghwan Choie8b4c192011-06-24 16:04:14 +0900576 arm_regulator = regulator_get(NULL, "vddarm");
577 if (IS_ERR(arm_regulator)) {
578 pr_err("failed to get regulator vddarm");
579 return PTR_ERR(arm_regulator);
580 }
581
582 int_regulator = regulator_get(NULL, "vddint");
583 if (IS_ERR(int_regulator)) {
584 pr_err("failed to get regulator vddint");
585 regulator_put(arm_regulator);
586 return PTR_ERR(int_regulator);
587 }
588
Huisung Kang405e6d62011-06-24 16:04:15 +0900589 register_pm_notifier(&s5pv210_cpufreq_notifier);
Huisung Kangfe7f1bc2011-06-24 16:04:18 +0900590 register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
Huisung Kang405e6d62011-06-24 16:04:15 +0900591
Jaecheol Lee83efc742010-10-12 09:19:38 +0900592 return cpufreq_register_driver(&s5pv210_driver);
593}
594
595late_initcall(s5pv210_cpufreq_init);