blob: 6e87dc13f6bfa1dde4dbfa72e4ce506b711cad62 [file] [log] [blame]
Michael Henneriche6c91b62008-04-25 04:58:29 +08001/*
Robin Getz96f10502009-09-24 14:11:24 +00002 * Blackfin core clock scaling
Michael Henneriche6c91b62008-04-25 04:58:29 +08003 *
Michael Hennerich8944b5a2011-02-28 21:23:36 +00004 * Copyright 2008-2011 Analog Devices Inc.
Michael Henneriche6c91b62008-04-25 04:58:29 +08005 *
Robin Getz96f10502009-09-24 14:11:24 +00006 * Licensed under the GPL-2 or later.
Michael Henneriche6c91b62008-04-25 04:58:29 +08007 */
8
9#include <linux/kernel.h>
Paul Gortmaker6a550b92011-08-09 16:54:30 -040010#include <linux/module.h>
Michael Henneriche6c91b62008-04-25 04:58:29 +080011#include <linux/types.h>
12#include <linux/init.h>
Steven Miao96900312012-05-16 17:49:52 +080013#include <linux/clk.h>
Michael Henneriche6c91b62008-04-25 04:58:29 +080014#include <linux/cpufreq.h>
15#include <linux/fs.h>
Graf Yang7998a872010-03-08 03:01:35 +000016#include <linux/delay.h>
Michael Henneriche6c91b62008-04-25 04:58:29 +080017#include <asm/blackfin.h>
18#include <asm/time.h>
Mike Frysinger761ec442009-10-15 17:12:05 +000019#include <asm/dpmc.h>
Michael Henneriche6c91b62008-04-25 04:58:29 +080020
Steven Miao96900312012-05-16 17:49:52 +080021
Michael Henneriche6c91b62008-04-25 04:58:29 +080022/* this is the table of CCLK frequencies, in Hz */
Michael Hennerich8944b5a2011-02-28 21:23:36 +000023/* .index is the entry in the auxiliary dpm_state_table[] */
Michael Henneriche6c91b62008-04-25 04:58:29 +080024static struct cpufreq_frequency_table bfin_freq_table[] = {
25 {
26 .frequency = CPUFREQ_TABLE_END,
27 .index = 0,
28 },
29 {
30 .frequency = CPUFREQ_TABLE_END,
31 .index = 1,
32 },
33 {
34 .frequency = CPUFREQ_TABLE_END,
35 .index = 2,
36 },
37 {
38 .frequency = CPUFREQ_TABLE_END,
39 .index = 0,
40 },
41};
42
43static struct bfin_dpm_state {
44 unsigned int csel; /* system clock divider */
45 unsigned int tscale; /* change the divider on the core timer interrupt */
46} dpm_state_table[3];
47
Graf Yang6c2b7072010-01-27 11:16:32 +000048#if defined(CONFIG_CYCLES_CLOCKSOURCE)
Vitja Makarov1bfb4b22008-05-07 11:41:26 +080049/*
Michael Hennerich8944b5a2011-02-28 21:23:36 +000050 * normalized to maximum frequency offset for CYCLES,
Graf Yang6c2b7072010-01-27 11:16:32 +000051 * used in time-ts cycles clock source, but could be used
52 * somewhere also.
Vitja Makarov1bfb4b22008-05-07 11:41:26 +080053 */
54unsigned long long __bfin_cycles_off;
55unsigned int __bfin_cycles_mod;
Graf Yang6c2b7072010-01-27 11:16:32 +000056#endif
Vitja Makarov1bfb4b22008-05-07 11:41:26 +080057
Michael Henneriche6c91b62008-04-25 04:58:29 +080058/**************************************************************************/
Graf Yang6c2b7072010-01-27 11:16:32 +000059static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
Michael Henneriche6c91b62008-04-25 04:58:29 +080060{
61
Graf Yang6c2b7072010-01-27 11:16:32 +000062 unsigned long csel, min_cclk;
Michael Henneriche6c91b62008-04-25 04:58:29 +080063 int index;
64
Graf Yang6c2b7072010-01-27 11:16:32 +000065 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
Sonic Zhang7f3aee32009-05-07 10:04:19 +000066#if ANOMALY_05000273 || ANOMALY_05000274 || \
Jie Zhang41ba6532009-06-16 09:48:33 +000067 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
Michael Henneriche6c91b62008-04-25 04:58:29 +080068 min_cclk = sclk * 2;
69#else
70 min_cclk = sclk;
71#endif
Steven Miao96900312012-05-16 17:49:52 +080072
73#ifndef CONFIG_BF60x
Michael Henneriche6c91b62008-04-25 04:58:29 +080074 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
Steven Miao96900312012-05-16 17:49:52 +080075#else
76 csel = bfin_read32(CGU0_DIV) & 0x1F;
77#endif
Michael Henneriche6c91b62008-04-25 04:58:29 +080078
79 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
80 bfin_freq_table[index].frequency = cclk >> index;
Steven Miao96900312012-05-16 17:49:52 +080081#ifndef CONFIG_BF60x
Michael Henneriche6c91b62008-04-25 04:58:29 +080082 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
83 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
Steven Miao96900312012-05-16 17:49:52 +080084#else
85 dpm_state_table[index].csel = csel;
86 dpm_state_table[index].tscale = TIME_SCALE >> index;
87#endif
Michael Henneriche6c91b62008-04-25 04:58:29 +080088
Michael Hennericha10101d2008-10-28 14:18:29 +080089 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
Michael Henneriche6c91b62008-04-25 04:58:29 +080090 bfin_freq_table[index].frequency,
91 dpm_state_table[index].csel,
92 dpm_state_table[index].tscale);
93 }
Graf Yang6c2b7072010-01-27 11:16:32 +000094 return;
95}
96
97static void bfin_adjust_core_timer(void *info)
98{
99 unsigned int tscale;
100 unsigned int index = *(unsigned int *)info;
101
102 /* we have to adjust the core timer, because it is using cclk */
103 tscale = dpm_state_table[index].tscale;
104 bfin_write_TSCALE(tscale);
105 return;
106}
107
108static unsigned int bfin_getfreq_khz(unsigned int cpu)
109{
110 /* Both CoreA/B have the same core clock */
111 return get_cclk() / 1000;
112}
113
Bob Liu3a3cf0d2012-05-17 14:21:22 +0800114#ifdef CONFIG_BF60x
Steven Miao96900312012-05-16 17:49:52 +0800115unsigned long cpu_set_cclk(int cpu, unsigned long new)
116{
117 struct clk *clk;
118 int ret;
119
120 clk = clk_get(NULL, "CCLK");
121 if (IS_ERR(clk))
122 return -ENODEV;
123
124 ret = clk_set_rate(clk, new);
125 clk_put(clk);
126 return ret;
127}
Bob Liu3a3cf0d2012-05-17 14:21:22 +0800128#endif
Steven Miao96900312012-05-16 17:49:52 +0800129
Graf Yang6c2b7072010-01-27 11:16:32 +0000130static int bfin_target(struct cpufreq_policy *poli,
131 unsigned int target_freq, unsigned int relation)
132{
Bob Liu3a3cf0d2012-05-17 14:21:22 +0800133#ifndef CONFIG_BF60x
134 unsigned int plldiv;
135#endif
136 unsigned int index, cpu;
Graf Yang6c2b7072010-01-27 11:16:32 +0000137 unsigned long flags, cclk_hz;
138 struct cpufreq_freqs freqs;
Graf Yang7998a872010-03-08 03:01:35 +0000139 static unsigned long lpj_ref;
140 static unsigned int lpj_ref_freq;
Steven Miao96900312012-05-16 17:49:52 +0800141 int ret = 0;
Graf Yang7998a872010-03-08 03:01:35 +0000142
Graf Yang6c2b7072010-01-27 11:16:32 +0000143#if defined(CONFIG_CYCLES_CLOCKSOURCE)
144 cycles_t cycles;
145#endif
146
147 for_each_online_cpu(cpu) {
148 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
149
150 if (!policy)
151 continue;
152
153 if (cpufreq_frequency_table_target(policy, bfin_freq_table,
154 target_freq, relation, &index))
155 return -EINVAL;
156
157 cclk_hz = bfin_freq_table[index].frequency;
158
159 freqs.old = bfin_getfreq_khz(0);
160 freqs.new = cclk_hz;
161 freqs.cpu = cpu;
162
163 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
164 cclk_hz, target_freq, freqs.old);
165
166 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
167 if (cpu == CPUFREQ_CPU) {
David Howells3b139cd2010-10-07 14:08:52 +0100168 flags = hard_local_irq_save();
Steven Miao96900312012-05-16 17:49:52 +0800169#ifndef CONFIG_BF60x
Graf Yang6c2b7072010-01-27 11:16:32 +0000170 plldiv = (bfin_read_PLL_DIV() & SSEL) |
171 dpm_state_table[index].csel;
172 bfin_write_PLL_DIV(plldiv);
Steven Miao96900312012-05-16 17:49:52 +0800173#else
174 ret = cpu_set_cclk(cpu, freqs.new * 1000);
175 if (ret != 0) {
176 pr_debug("cpufreq set freq failed %d\n", ret);
177 break;
178 }
179#endif
Graf Yang6c2b7072010-01-27 11:16:32 +0000180 on_each_cpu(bfin_adjust_core_timer, &index, 1);
181#if defined(CONFIG_CYCLES_CLOCKSOURCE)
182 cycles = get_cycles();
183 SSYNC();
184 cycles += 10; /* ~10 cycles we lose after get_cycles() */
185 __bfin_cycles_off +=
186 (cycles << __bfin_cycles_mod) - (cycles << index);
187 __bfin_cycles_mod = index;
188#endif
Graf Yang7998a872010-03-08 03:01:35 +0000189 if (!lpj_ref_freq) {
190 lpj_ref = loops_per_jiffy;
191 lpj_ref_freq = freqs.old;
192 }
193 if (freqs.new != freqs.old) {
194 loops_per_jiffy = cpufreq_scale(lpj_ref,
195 lpj_ref_freq, freqs.new);
196 }
David Howells3b139cd2010-10-07 14:08:52 +0100197 hard_local_irq_restore(flags);
Graf Yang6c2b7072010-01-27 11:16:32 +0000198 }
199 /* TODO: just test case for cycles clock source, remove later */
200 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
201 }
202
203 pr_debug("cpufreq: done\n");
Steven Miao96900312012-05-16 17:49:52 +0800204 return ret;
Graf Yang6c2b7072010-01-27 11:16:32 +0000205}
206
207static int bfin_verify_speed(struct cpufreq_policy *policy)
208{
209 return cpufreq_frequency_table_verify(policy, bfin_freq_table);
210}
211
Steven Miao96900312012-05-16 17:49:52 +0800212static int __bfin_cpu_init(struct cpufreq_policy *policy)
Graf Yang6c2b7072010-01-27 11:16:32 +0000213{
214
215 unsigned long cclk, sclk;
216
217 cclk = get_cclk() / 1000;
218 sclk = get_sclk() / 1000;
219
220 if (policy->cpu == CPUFREQ_CPU)
221 bfin_init_tables(cclk, sclk);
Michael Henneriche6c91b62008-04-25 04:58:29 +0800222
Michael Hennerichd887a1c2009-09-25 09:03:21 +0000223 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
224
Michael Henneriche6c91b62008-04-25 04:58:29 +0800225 policy->cur = cclk;
226 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
227 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
228}
229
230static struct freq_attr *bfin_freq_attr[] = {
231 &cpufreq_freq_attr_scaling_available_freqs,
232 NULL,
233};
234
235static struct cpufreq_driver bfin_driver = {
236 .verify = bfin_verify_speed,
237 .target = bfin_target,
Michael Hennericha10101d2008-10-28 14:18:29 +0800238 .get = bfin_getfreq_khz,
Michael Henneriche6c91b62008-04-25 04:58:29 +0800239 .init = __bfin_cpu_init,
240 .name = "bfin cpufreq",
241 .owner = THIS_MODULE,
242 .attr = bfin_freq_attr,
243};
244
245static int __init bfin_cpu_init(void)
246{
247 return cpufreq_register_driver(&bfin_driver);
248}
249
250static void __exit bfin_cpu_exit(void)
251{
252 cpufreq_unregister_driver(&bfin_driver);
253}
254
255MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
256MODULE_DESCRIPTION("cpufreq driver for Blackfin");
257MODULE_LICENSE("GPL");
258
259module_init(bfin_cpu_init);
260module_exit(bfin_cpu_exit);