blob: f4cf11d362e1df4f67315a68714cac5faf6b293b [file] [log] [blame]
Michael Henneriche6c91b62008-04-25 04:58:29 +08001/*
Robin Getz96f10502009-09-24 14:11:24 +00002 * Blackfin core clock scaling
Michael Henneriche6c91b62008-04-25 04:58:29 +08003 *
Robin Getz96f10502009-09-24 14:11:24 +00004 * Copyright 2008-2009 Analog Devices Inc.
Michael Henneriche6c91b62008-04-25 04:58:29 +08005 *
Robin Getz96f10502009-09-24 14:11:24 +00006 * Licensed under the GPL-2 or later.
Michael Henneriche6c91b62008-04-25 04:58:29 +08007 */
8
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/init.h>
12#include <linux/cpufreq.h>
13#include <linux/fs.h>
Graf Yang7998a872010-03-08 03:01:35 +000014#include <linux/delay.h>
Michael Henneriche6c91b62008-04-25 04:58:29 +080015#include <asm/blackfin.h>
16#include <asm/time.h>
Mike Frysinger761ec442009-10-15 17:12:05 +000017#include <asm/dpmc.h>
Michael Henneriche6c91b62008-04-25 04:58:29 +080018
Graf Yang6c2b7072010-01-27 11:16:32 +000019#define CPUFREQ_CPU 0
20
Michael Henneriche6c91b62008-04-25 04:58:29 +080021/* this is the table of CCLK frequencies, in Hz */
22/* .index is the entry in the auxillary dpm_state_table[] */
23static struct cpufreq_frequency_table bfin_freq_table[] = {
24 {
25 .frequency = CPUFREQ_TABLE_END,
26 .index = 0,
27 },
28 {
29 .frequency = CPUFREQ_TABLE_END,
30 .index = 1,
31 },
32 {
33 .frequency = CPUFREQ_TABLE_END,
34 .index = 2,
35 },
36 {
37 .frequency = CPUFREQ_TABLE_END,
38 .index = 0,
39 },
40};
41
42static struct bfin_dpm_state {
43 unsigned int csel; /* system clock divider */
44 unsigned int tscale; /* change the divider on the core timer interrupt */
45} dpm_state_table[3];
46
Graf Yang6c2b7072010-01-27 11:16:32 +000047#if defined(CONFIG_CYCLES_CLOCKSOURCE)
Vitja Makarov1bfb4b22008-05-07 11:41:26 +080048/*
Graf Yang6c2b7072010-01-27 11:16:32 +000049 * normalized to maximum frequncy offset for CYCLES,
50 * used in time-ts cycles clock source, but could be used
51 * somewhere also.
Vitja Makarov1bfb4b22008-05-07 11:41:26 +080052 */
53unsigned long long __bfin_cycles_off;
54unsigned int __bfin_cycles_mod;
Graf Yang6c2b7072010-01-27 11:16:32 +000055#endif
Vitja Makarov1bfb4b22008-05-07 11:41:26 +080056
Michael Henneriche6c91b62008-04-25 04:58:29 +080057/**************************************************************************/
Graf Yang6c2b7072010-01-27 11:16:32 +000058static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
Michael Henneriche6c91b62008-04-25 04:58:29 +080059{
60
Graf Yang6c2b7072010-01-27 11:16:32 +000061 unsigned long csel, min_cclk;
Michael Henneriche6c91b62008-04-25 04:58:29 +080062 int index;
63
Graf Yang6c2b7072010-01-27 11:16:32 +000064 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
Sonic Zhang7f3aee32009-05-07 10:04:19 +000065#if ANOMALY_05000273 || ANOMALY_05000274 || \
Jie Zhang41ba6532009-06-16 09:48:33 +000066 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
Michael Henneriche6c91b62008-04-25 04:58:29 +080067 min_cclk = sclk * 2;
68#else
69 min_cclk = sclk;
70#endif
71 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
72
73 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
74 bfin_freq_table[index].frequency = cclk >> index;
75 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
76 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
77
Michael Hennericha10101d2008-10-28 14:18:29 +080078 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
Michael Henneriche6c91b62008-04-25 04:58:29 +080079 bfin_freq_table[index].frequency,
80 dpm_state_table[index].csel,
81 dpm_state_table[index].tscale);
82 }
Graf Yang6c2b7072010-01-27 11:16:32 +000083 return;
84}
85
86static void bfin_adjust_core_timer(void *info)
87{
88 unsigned int tscale;
89 unsigned int index = *(unsigned int *)info;
90
91 /* we have to adjust the core timer, because it is using cclk */
92 tscale = dpm_state_table[index].tscale;
93 bfin_write_TSCALE(tscale);
94 return;
95}
96
97static unsigned int bfin_getfreq_khz(unsigned int cpu)
98{
99 /* Both CoreA/B have the same core clock */
100 return get_cclk() / 1000;
101}
102
Graf Yang6c2b7072010-01-27 11:16:32 +0000103static int bfin_target(struct cpufreq_policy *poli,
104 unsigned int target_freq, unsigned int relation)
105{
106 unsigned int index, plldiv, cpu;
107 unsigned long flags, cclk_hz;
108 struct cpufreq_freqs freqs;
Graf Yang7998a872010-03-08 03:01:35 +0000109 static unsigned long lpj_ref;
110 static unsigned int lpj_ref_freq;
111
Graf Yang6c2b7072010-01-27 11:16:32 +0000112#if defined(CONFIG_CYCLES_CLOCKSOURCE)
113 cycles_t cycles;
114#endif
115
116 for_each_online_cpu(cpu) {
117 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
118
119 if (!policy)
120 continue;
121
122 if (cpufreq_frequency_table_target(policy, bfin_freq_table,
123 target_freq, relation, &index))
124 return -EINVAL;
125
126 cclk_hz = bfin_freq_table[index].frequency;
127
128 freqs.old = bfin_getfreq_khz(0);
129 freqs.new = cclk_hz;
130 freqs.cpu = cpu;
131
132 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
133 cclk_hz, target_freq, freqs.old);
134
135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
136 if (cpu == CPUFREQ_CPU) {
David Howells3b139cd2010-10-07 14:08:52 +0100137 flags = hard_local_irq_save();
Graf Yang6c2b7072010-01-27 11:16:32 +0000138 plldiv = (bfin_read_PLL_DIV() & SSEL) |
139 dpm_state_table[index].csel;
140 bfin_write_PLL_DIV(plldiv);
141 on_each_cpu(bfin_adjust_core_timer, &index, 1);
142#if defined(CONFIG_CYCLES_CLOCKSOURCE)
143 cycles = get_cycles();
144 SSYNC();
145 cycles += 10; /* ~10 cycles we lose after get_cycles() */
146 __bfin_cycles_off +=
147 (cycles << __bfin_cycles_mod) - (cycles << index);
148 __bfin_cycles_mod = index;
149#endif
Graf Yang7998a872010-03-08 03:01:35 +0000150 if (!lpj_ref_freq) {
151 lpj_ref = loops_per_jiffy;
152 lpj_ref_freq = freqs.old;
153 }
154 if (freqs.new != freqs.old) {
155 loops_per_jiffy = cpufreq_scale(lpj_ref,
156 lpj_ref_freq, freqs.new);
157 }
David Howells3b139cd2010-10-07 14:08:52 +0100158 hard_local_irq_restore(flags);
Graf Yang6c2b7072010-01-27 11:16:32 +0000159 }
160 /* TODO: just test case for cycles clock source, remove later */
161 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
162 }
163
164 pr_debug("cpufreq: done\n");
165 return 0;
166}
167
168static int bfin_verify_speed(struct cpufreq_policy *policy)
169{
170 return cpufreq_frequency_table_verify(policy, bfin_freq_table);
171}
172
173static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
174{
175
176 unsigned long cclk, sclk;
177
178 cclk = get_cclk() / 1000;
179 sclk = get_sclk() / 1000;
180
181 if (policy->cpu == CPUFREQ_CPU)
182 bfin_init_tables(cclk, sclk);
Michael Henneriche6c91b62008-04-25 04:58:29 +0800183
Michael Hennerichd887a1c2009-09-25 09:03:21 +0000184 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
185
Michael Henneriche6c91b62008-04-25 04:58:29 +0800186 policy->cur = cclk;
187 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
188 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
189}
190
191static struct freq_attr *bfin_freq_attr[] = {
192 &cpufreq_freq_attr_scaling_available_freqs,
193 NULL,
194};
195
196static struct cpufreq_driver bfin_driver = {
197 .verify = bfin_verify_speed,
198 .target = bfin_target,
Michael Hennericha10101d2008-10-28 14:18:29 +0800199 .get = bfin_getfreq_khz,
Michael Henneriche6c91b62008-04-25 04:58:29 +0800200 .init = __bfin_cpu_init,
201 .name = "bfin cpufreq",
202 .owner = THIS_MODULE,
203 .attr = bfin_freq_attr,
204};
205
206static int __init bfin_cpu_init(void)
207{
208 return cpufreq_register_driver(&bfin_driver);
209}
210
211static void __exit bfin_cpu_exit(void)
212{
213 cpufreq_unregister_driver(&bfin_driver);
214}
215
216MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
217MODULE_DESCRIPTION("cpufreq driver for Blackfin");
218MODULE_LICENSE("GPL");
219
220module_init(bfin_cpu_init);
221module_exit(bfin_cpu_exit);