blob: a9ca28447b49874034bc05974308e64b4cfa22a4 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/clockchips.h>
17#include <linux/interrupt.h>
18#include <linux/of_irq.h>
19#include <linux/io.h>
20
21#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000022#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000023
24#include <clocksource/arm_arch_timer.h>
25
26static u32 arch_timer_rate;
27
28enum ppi_nr {
29 PHYS_SECURE_PPI,
30 PHYS_NONSECURE_PPI,
31 VIRT_PPI,
32 HYP_PPI,
33 MAX_TIMER_PPI
34};
35
36static int arch_timer_ppi[MAX_TIMER_PPI];
37
38static struct clock_event_device __percpu *arch_timer_evt;
39
40static bool arch_timer_use_virtual = true;
41
42/*
43 * Architected system timer support.
44 */
45
Stephen Boyd60faddf2013-07-18 16:59:31 -070046static __always_inline
47void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
48 struct clock_event_device *clk)
49{
50 arch_timer_reg_write_cp15(access, reg, val);
51}
52
53static __always_inline
54u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
55 struct clock_event_device *clk)
56{
57 return arch_timer_reg_read_cp15(access, reg);
58}
59
Stephen Boyde09f3cc2013-07-18 16:59:28 -070060static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +000061 struct clock_event_device *evt)
62{
63 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -070064 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +000065 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
66 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -070067 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +000068 evt->event_handler(evt);
69 return IRQ_HANDLED;
70 }
71
72 return IRQ_NONE;
73}
74
75static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
76{
77 struct clock_event_device *evt = dev_id;
78
79 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
80}
81
82static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
83{
84 struct clock_event_device *evt = dev_id;
85
86 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
87}
88
Stephen Boyd60faddf2013-07-18 16:59:31 -070089static __always_inline void timer_set_mode(const int access, int mode,
90 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +000091{
92 unsigned long ctrl;
93 switch (mode) {
94 case CLOCK_EVT_MODE_UNUSED:
95 case CLOCK_EVT_MODE_SHUTDOWN:
Stephen Boyd60faddf2013-07-18 16:59:31 -070096 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +000097 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
Stephen Boyd60faddf2013-07-18 16:59:31 -070098 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +000099 break;
100 default:
101 break;
102 }
103}
104
105static void arch_timer_set_mode_virt(enum clock_event_mode mode,
106 struct clock_event_device *clk)
107{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700108 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000109}
110
111static void arch_timer_set_mode_phys(enum clock_event_mode mode,
112 struct clock_event_device *clk)
113{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700114 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000115}
116
Stephen Boyd60faddf2013-07-18 16:59:31 -0700117static __always_inline void set_next_event(const int access, unsigned long evt,
118 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000119{
120 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700121 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000122 ctrl |= ARCH_TIMER_CTRL_ENABLE;
123 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700124 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
125 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000126}
127
128static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700129 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000130{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700131 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000132 return 0;
133}
134
135static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700136 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000137{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700138 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000139 return 0;
140}
141
142static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
143{
144 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
145 clk->name = "arch_sys_timer";
146 clk->rating = 450;
147 if (arch_timer_use_virtual) {
148 clk->irq = arch_timer_ppi[VIRT_PPI];
149 clk->set_mode = arch_timer_set_mode_virt;
150 clk->set_next_event = arch_timer_set_next_event_virt;
151 } else {
152 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
153 clk->set_mode = arch_timer_set_mode_phys;
154 clk->set_next_event = arch_timer_set_next_event_phys;
155 }
156
157 clk->cpumask = cpumask_of(smp_processor_id());
158
Stephen Boyd1ff99ea2013-07-18 16:59:30 -0700159 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000160
161 clockevents_config_and_register(clk, arch_timer_rate,
162 0xf, 0x7fffffff);
163
164 if (arch_timer_use_virtual)
165 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
166 else {
167 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
168 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
169 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
170 }
171
172 arch_counter_set_user_access();
173
174 return 0;
175}
176
177static int arch_timer_available(void)
178{
179 u32 freq;
180
181 if (arch_timer_rate == 0) {
182 freq = arch_timer_get_cntfrq();
183
184 /* Check the timer frequency. */
185 if (freq == 0) {
186 pr_warn("Architected timer frequency not available\n");
187 return -EINVAL;
188 }
189
190 arch_timer_rate = freq;
191 }
192
193 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
194 (unsigned long)arch_timer_rate / 1000000,
195 (unsigned long)(arch_timer_rate / 10000) % 100,
196 arch_timer_use_virtual ? "virt" : "phys");
197 return 0;
198}
199
200u32 arch_timer_get_rate(void)
201{
202 return arch_timer_rate;
203}
204
Mark Rutland0d651e42013-01-30 17:51:26 +0000205u64 arch_timer_read_counter(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000206{
Mark Rutland0d651e42013-01-30 17:51:26 +0000207 return arch_counter_get_cntvct();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000208}
209
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000210static cycle_t arch_counter_read(struct clocksource *cs)
211{
Mark Rutland0d651e42013-01-30 17:51:26 +0000212 return arch_counter_get_cntvct();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000213}
214
215static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
216{
Mark Rutland0d651e42013-01-30 17:51:26 +0000217 return arch_counter_get_cntvct();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000218}
219
220static struct clocksource clocksource_counter = {
221 .name = "arch_sys_counter",
222 .rating = 400,
223 .read = arch_counter_read,
224 .mask = CLOCKSOURCE_MASK(56),
225 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
226};
227
228static struct cyclecounter cyclecounter = {
229 .read = arch_counter_read_cc,
230 .mask = CLOCKSOURCE_MASK(56),
231};
232
233static struct timecounter timecounter;
234
235struct timecounter *arch_timer_get_timecounter(void)
236{
237 return &timecounter;
238}
239
240static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
241{
242 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
243 clk->irq, smp_processor_id());
244
245 if (arch_timer_use_virtual)
246 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
247 else {
248 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
249 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
250 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
251 }
252
253 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
254}
255
256static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
257 unsigned long action, void *hcpu)
258{
Stephen Boydf31c2f12013-04-17 16:26:18 -0700259 /*
260 * Grab cpu pointer in each case to avoid spurious
261 * preemptible warnings
262 */
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000263 switch (action & ~CPU_TASKS_FROZEN) {
264 case CPU_STARTING:
Stephen Boydf31c2f12013-04-17 16:26:18 -0700265 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000266 break;
267 case CPU_DYING:
Stephen Boydf31c2f12013-04-17 16:26:18 -0700268 arch_timer_stop(this_cpu_ptr(arch_timer_evt));
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000269 break;
270 }
271
272 return NOTIFY_OK;
273}
274
275static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
276 .notifier_call = arch_timer_cpu_notify,
277};
278
279static int __init arch_timer_register(void)
280{
281 int err;
282 int ppi;
283
284 err = arch_timer_available();
285 if (err)
286 goto out;
287
288 arch_timer_evt = alloc_percpu(struct clock_event_device);
289 if (!arch_timer_evt) {
290 err = -ENOMEM;
291 goto out;
292 }
293
294 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
295 cyclecounter.mult = clocksource_counter.mult;
296 cyclecounter.shift = clocksource_counter.shift;
297 timecounter_init(&timecounter, &cyclecounter,
Mark Rutland0d651e42013-01-30 17:51:26 +0000298 arch_counter_get_cntvct());
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000299
300 if (arch_timer_use_virtual) {
301 ppi = arch_timer_ppi[VIRT_PPI];
302 err = request_percpu_irq(ppi, arch_timer_handler_virt,
303 "arch_timer", arch_timer_evt);
304 } else {
305 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
306 err = request_percpu_irq(ppi, arch_timer_handler_phys,
307 "arch_timer", arch_timer_evt);
308 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
309 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
310 err = request_percpu_irq(ppi, arch_timer_handler_phys,
311 "arch_timer", arch_timer_evt);
312 if (err)
313 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
314 arch_timer_evt);
315 }
316 }
317
318 if (err) {
319 pr_err("arch_timer: can't register interrupt %d (%d)\n",
320 ppi, err);
321 goto out_free;
322 }
323
324 err = register_cpu_notifier(&arch_timer_cpu_nb);
325 if (err)
326 goto out_free_irq;
327
328 /* Immediately configure the timer on the boot CPU */
329 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
330
331 return 0;
332
333out_free_irq:
334 if (arch_timer_use_virtual)
335 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
336 else {
337 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
338 arch_timer_evt);
339 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
340 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
341 arch_timer_evt);
342 }
343
344out_free:
345 free_percpu(arch_timer_evt);
346out:
347 return err;
348}
349
Rob Herring0583fe42013-04-10 18:27:51 -0500350static void __init arch_timer_init(struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000351{
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000352 u32 freq;
353 int i;
354
Rob Herring0583fe42013-04-10 18:27:51 -0500355 if (arch_timer_get_rate()) {
356 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
357 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000358 }
359
360 /* Try to determine the frequency from the device tree or CNTFRQ */
361 if (!of_property_read_u32(np, "clock-frequency", &freq))
362 arch_timer_rate = freq;
363
364 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
365 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
366
367 of_node_put(np);
368
369 /*
Marc Zyngier82668912013-01-10 11:13:07 +0000370 * If HYP mode is available, we know that the physical timer
371 * has been configured to be accessible from PL1. Use it, so
372 * that a guest can use the virtual timer instead.
373 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000374 * If no interrupt provided for virtual timer, we'll have to
375 * stick to the physical timer. It'd better be accessible...
376 */
Marc Zyngier82668912013-01-10 11:13:07 +0000377 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000378 arch_timer_use_virtual = false;
379
380 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
381 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
382 pr_warn("arch_timer: No interrupt available, giving up\n");
Rob Herring0583fe42013-04-10 18:27:51 -0500383 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000384 }
385 }
386
Rob Herring0583fe42013-04-10 18:27:51 -0500387 arch_timer_register();
388 arch_timer_arch_init();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000389}
Rob Herring0583fe42013-04-10 18:27:51 -0500390CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
391CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);