blob: 5effd30273192d7b6e9c367061200aa1d7079e79 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010016#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000017#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010018#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000019#include <linux/interrupt.h>
20#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070021#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070023#include <linux/slab.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070024#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000025#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000026
27#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000028#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000029
30#include <clocksource/arm_arch_timer.h>
31
Stephen Boyd22006992013-07-18 16:59:32 -070032#define CNTTIDR 0x08
33#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
34
Robin Murphye392d602016-02-01 12:00:48 +000035#define CNTACR(n) (0x40 + ((n) * 4))
36#define CNTACR_RPCT BIT(0)
37#define CNTACR_RVCT BIT(1)
38#define CNTACR_RFRQ BIT(2)
39#define CNTACR_RVOFF BIT(3)
40#define CNTACR_RWVT BIT(4)
41#define CNTACR_RWPT BIT(5)
42
Stephen Boyd22006992013-07-18 16:59:32 -070043#define CNTVCT_LO 0x08
44#define CNTVCT_HI 0x0c
45#define CNTFRQ 0x10
46#define CNTP_TVAL 0x28
47#define CNTP_CTL 0x2c
48#define CNTV_TVAL 0x38
49#define CNTV_CTL 0x3c
50
51#define ARCH_CP15_TIMER BIT(0)
52#define ARCH_MEM_TIMER BIT(1)
53static unsigned arch_timers_present __initdata;
54
55static void __iomem *arch_counter_base;
56
57struct arch_timer {
58 void __iomem *base;
59 struct clock_event_device evt;
60};
61
62#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
63
Mark Rutland8a4da6e2012-11-12 14:33:44 +000064static u32 arch_timer_rate;
65
66enum ppi_nr {
67 PHYS_SECURE_PPI,
68 PHYS_NONSECURE_PPI,
69 VIRT_PPI,
70 HYP_PPI,
71 MAX_TIMER_PPI
72};
73
74static int arch_timer_ppi[MAX_TIMER_PPI];
75
76static struct clock_event_device __percpu *arch_timer_evt;
77
Marc Zyngierf81f03f2014-02-20 15:21:23 +000078static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010079static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070080static bool arch_timer_mem_use_virtual;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000081
Will Deacon46fd5c62016-06-27 17:30:13 +010082static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
83
84static int __init early_evtstrm_cfg(char *buf)
85{
86 return strtobool(buf, &evtstrm_enable);
87}
88early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
89
Mark Rutland8a4da6e2012-11-12 14:33:44 +000090/*
91 * Architected system timer support.
92 */
93
Stephen Boyd60faddf2013-07-18 16:59:31 -070094static __always_inline
95void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +020096 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -070097{
Stephen Boyd22006992013-07-18 16:59:32 -070098 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
99 struct arch_timer *timer = to_arch_timer(clk);
100 switch (reg) {
101 case ARCH_TIMER_REG_CTRL:
102 writel_relaxed(val, timer->base + CNTP_CTL);
103 break;
104 case ARCH_TIMER_REG_TVAL:
105 writel_relaxed(val, timer->base + CNTP_TVAL);
106 break;
107 }
108 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
109 struct arch_timer *timer = to_arch_timer(clk);
110 switch (reg) {
111 case ARCH_TIMER_REG_CTRL:
112 writel_relaxed(val, timer->base + CNTV_CTL);
113 break;
114 case ARCH_TIMER_REG_TVAL:
115 writel_relaxed(val, timer->base + CNTV_TVAL);
116 break;
117 }
118 } else {
119 arch_timer_reg_write_cp15(access, reg, val);
120 }
Stephen Boyd60faddf2013-07-18 16:59:31 -0700121}
122
123static __always_inline
124u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200125 struct clock_event_device *clk)
Stephen Boyd60faddf2013-07-18 16:59:31 -0700126{
Stephen Boyd22006992013-07-18 16:59:32 -0700127 u32 val;
128
129 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
130 struct arch_timer *timer = to_arch_timer(clk);
131 switch (reg) {
132 case ARCH_TIMER_REG_CTRL:
133 val = readl_relaxed(timer->base + CNTP_CTL);
134 break;
135 case ARCH_TIMER_REG_TVAL:
136 val = readl_relaxed(timer->base + CNTP_TVAL);
137 break;
138 }
139 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
140 struct arch_timer *timer = to_arch_timer(clk);
141 switch (reg) {
142 case ARCH_TIMER_REG_CTRL:
143 val = readl_relaxed(timer->base + CNTV_CTL);
144 break;
145 case ARCH_TIMER_REG_TVAL:
146 val = readl_relaxed(timer->base + CNTV_TVAL);
147 break;
148 }
149 } else {
150 val = arch_timer_reg_read_cp15(access, reg);
151 }
152
153 return val;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700154}
155
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700156static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000157 struct clock_event_device *evt)
158{
159 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200160
Stephen Boyd60faddf2013-07-18 16:59:31 -0700161 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000162 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
163 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700164 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000165 evt->event_handler(evt);
166 return IRQ_HANDLED;
167 }
168
169 return IRQ_NONE;
170}
171
172static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
173{
174 struct clock_event_device *evt = dev_id;
175
176 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
177}
178
179static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
180{
181 struct clock_event_device *evt = dev_id;
182
183 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
184}
185
Stephen Boyd22006992013-07-18 16:59:32 -0700186static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
187{
188 struct clock_event_device *evt = dev_id;
189
190 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
191}
192
193static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
194{
195 struct clock_event_device *evt = dev_id;
196
197 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
198}
199
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530200static __always_inline int timer_shutdown(const int access,
201 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000202{
203 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530204
205 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
206 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
207 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
208
209 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000210}
211
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530212static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000213{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530214 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000215}
216
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530217static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000218{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530219 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000220}
221
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530222static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700223{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530224 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700225}
226
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530227static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700228{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530229 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700230}
231
Stephen Boyd60faddf2013-07-18 16:59:31 -0700232static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200233 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000234{
235 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700236 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000237 ctrl |= ARCH_TIMER_CTRL_ENABLE;
238 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700239 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
240 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000241}
242
243static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700244 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000245{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700246 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000247 return 0;
248}
249
250static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700251 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000252{
Stephen Boyd60faddf2013-07-18 16:59:31 -0700253 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000254 return 0;
255}
256
Stephen Boyd22006992013-07-18 16:59:32 -0700257static int arch_timer_set_next_event_virt_mem(unsigned long evt,
258 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000259{
Stephen Boyd22006992013-07-18 16:59:32 -0700260 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
261 return 0;
262}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000263
Stephen Boyd22006992013-07-18 16:59:32 -0700264static int arch_timer_set_next_event_phys_mem(unsigned long evt,
265 struct clock_event_device *clk)
266{
267 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
268 return 0;
269}
270
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200271static void __arch_timer_setup(unsigned type,
272 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700273{
274 clk->features = CLOCK_EVT_FEAT_ONESHOT;
275
276 if (type == ARCH_CP15_TIMER) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100277 if (arch_timer_c3stop)
278 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700279 clk->name = "arch_sys_timer";
280 clk->rating = 450;
281 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000282 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
283 switch (arch_timer_uses_ppi) {
284 case VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530285 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530286 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700287 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000288 break;
289 case PHYS_SECURE_PPI:
290 case PHYS_NONSECURE_PPI:
291 case HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530292 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530293 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700294 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000295 break;
296 default:
297 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700298 }
299 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800300 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700301 clk->name = "arch_mem_timer";
302 clk->rating = 400;
303 clk->cpumask = cpu_all_mask;
304 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530305 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530306 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700307 clk->set_next_event =
308 arch_timer_set_next_event_virt_mem;
309 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530310 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530311 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700312 clk->set_next_event =
313 arch_timer_set_next_event_phys_mem;
314 }
315 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000316
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530317 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000318
Stephen Boyd22006992013-07-18 16:59:32 -0700319 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
320}
321
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200322static void arch_timer_evtstrm_enable(int divider)
323{
324 u32 cntkctl = arch_timer_get_cntkctl();
325
326 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
327 /* Set the divider and enable virtual event stream */
328 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
329 | ARCH_TIMER_VIRT_EVT_EN;
330 arch_timer_set_cntkctl(cntkctl);
331 elf_hwcap |= HWCAP_EVTSTRM;
332#ifdef CONFIG_COMPAT
333 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
334#endif
335}
336
Will Deacon037f6372013-08-23 15:32:29 +0100337static void arch_timer_configure_evtstream(void)
338{
339 int evt_stream_div, pos;
340
341 /* Find the closest power of two to the divisor */
342 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
343 pos = fls(evt_stream_div);
344 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
345 pos--;
346 /* enable event stream */
347 arch_timer_evtstrm_enable(min(pos, 15));
348}
349
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200350static void arch_counter_set_user_access(void)
351{
352 u32 cntkctl = arch_timer_get_cntkctl();
353
354 /* Disable user access to the timers and the physical counter */
355 /* Also disable virtual event stream */
356 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
357 | ARCH_TIMER_USR_VT_ACCESS_EN
358 | ARCH_TIMER_VIRT_EVT_EN
359 | ARCH_TIMER_USR_PCT_ACCESS_EN);
360
361 /* Enable user access to the virtual counter */
362 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
363
364 arch_timer_set_cntkctl(cntkctl);
365}
366
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000367static bool arch_timer_has_nonsecure_ppi(void)
368{
369 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
370 arch_timer_ppi[PHYS_NONSECURE_PPI]);
371}
372
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400373static int arch_timer_setup(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000374{
Stephen Boyd22006992013-07-18 16:59:32 -0700375 __arch_timer_setup(ARCH_CP15_TIMER, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000376
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000377 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
378
379 if (arch_timer_has_nonsecure_ppi())
380 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000381
382 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100383 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100384 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000385
386 return 0;
387}
388
Stephen Boyd22006992013-07-18 16:59:32 -0700389static void
390arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000391{
Stephen Boyd22006992013-07-18 16:59:32 -0700392 /* Who has more than one independent system counter? */
393 if (arch_timer_rate)
394 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000395
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000396 /*
397 * Try to determine the frequency from the device tree or CNTFRQ,
398 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
399 */
400 if (!acpi_disabled ||
401 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700402 if (cntbase)
403 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
404 else
405 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000406 }
407
Stephen Boyd22006992013-07-18 16:59:32 -0700408 /* Check the timer frequency. */
409 if (arch_timer_rate == 0)
410 pr_warn("Architected timer frequency not available\n");
411}
412
413static void arch_timer_banner(unsigned type)
414{
415 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
416 type & ARCH_CP15_TIMER ? "cp15" : "",
417 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
418 type & ARCH_MEM_TIMER ? "mmio" : "",
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000419 (unsigned long)arch_timer_rate / 1000000,
420 (unsigned long)(arch_timer_rate / 10000) % 100,
Stephen Boyd22006992013-07-18 16:59:32 -0700421 type & ARCH_CP15_TIMER ?
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000422 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700423 "",
424 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
425 type & ARCH_MEM_TIMER ?
426 arch_timer_mem_use_virtual ? "virt" : "phys" :
427 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000428}
429
430u32 arch_timer_get_rate(void)
431{
432 return arch_timer_rate;
433}
434
Stephen Boyd22006992013-07-18 16:59:32 -0700435static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000436{
Stephen Boyd22006992013-07-18 16:59:32 -0700437 u32 vct_lo, vct_hi, tmp_hi;
438
439 do {
440 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
441 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
442 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
443 } while (vct_hi != tmp_hi);
444
445 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000446}
447
Stephen Boyd22006992013-07-18 16:59:32 -0700448/*
449 * Default to cp15 based access because arm64 uses this function for
450 * sched_clock() before DT is probed and the cp15 method is guaranteed
451 * to exist on arm64. arm doesn't use this before DT is probed so even
452 * if we don't have the cp15 accessors we won't have a problem.
453 */
454u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
455
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000456static cycle_t arch_counter_read(struct clocksource *cs)
457{
Stephen Boyd22006992013-07-18 16:59:32 -0700458 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000459}
460
461static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
462{
Stephen Boyd22006992013-07-18 16:59:32 -0700463 return arch_timer_read_counter();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000464}
465
466static struct clocksource clocksource_counter = {
467 .name = "arch_sys_counter",
468 .rating = 400,
469 .read = arch_counter_read,
470 .mask = CLOCKSOURCE_MASK(56),
Stephen Boyd4fbcdc82013-09-27 13:13:12 -0700471 .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000472};
473
474static struct cyclecounter cyclecounter = {
475 .read = arch_counter_read_cc,
476 .mask = CLOCKSOURCE_MASK(56),
477};
478
Julien Grallb4d6ce92016-04-11 16:32:51 +0100479static struct arch_timer_kvm_info arch_timer_kvm_info;
480
481struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
482{
483 return &arch_timer_kvm_info;
484}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000485
Stephen Boyd22006992013-07-18 16:59:32 -0700486static void __init arch_counter_register(unsigned type)
487{
488 u64 start_count;
489
490 /* Register the CP15 based counter if we have one */
Nathan Lynch423bd692014-09-29 01:50:06 +0200491 if (type & ARCH_CP15_TIMER) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000492 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800493 arch_timer_read_counter = arch_counter_get_cntvct;
494 else
495 arch_timer_read_counter = arch_counter_get_cntpct;
Nathan Lynch423bd692014-09-29 01:50:06 +0200496 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700497 arch_timer_read_counter = arch_counter_get_cntvct_mem;
498
Nathan Lynch423bd692014-09-29 01:50:06 +0200499 /* If the clocksource name is "arch_sys_counter" the
500 * VDSO will attempt to read the CP15-based counter.
501 * Ensure this does not happen when CP15-based
502 * counter is not available.
503 */
504 clocksource_counter.name = "arch_mem_counter";
505 }
506
Stephen Boyd22006992013-07-18 16:59:32 -0700507 start_count = arch_timer_read_counter();
508 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
509 cyclecounter.mult = clocksource_counter.mult;
510 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100511 timecounter_init(&arch_timer_kvm_info.timecounter,
512 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200513
514 /* 56 bits minimum, so we assume worst case rollover */
515 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700516}
517
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400518static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000519{
520 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
521 clk->irq, smp_processor_id());
522
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000523 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
524 if (arch_timer_has_nonsecure_ppi())
525 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000526
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530527 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000528}
529
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400530static int arch_timer_cpu_notify(struct notifier_block *self,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000531 unsigned long action, void *hcpu)
532{
Stephen Boydf31c2f12013-04-17 16:26:18 -0700533 /*
534 * Grab cpu pointer in each case to avoid spurious
535 * preemptible warnings
536 */
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000537 switch (action & ~CPU_TASKS_FROZEN) {
538 case CPU_STARTING:
Stephen Boydf31c2f12013-04-17 16:26:18 -0700539 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000540 break;
541 case CPU_DYING:
Stephen Boydf31c2f12013-04-17 16:26:18 -0700542 arch_timer_stop(this_cpu_ptr(arch_timer_evt));
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000543 break;
544 }
545
546 return NOTIFY_OK;
547}
548
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400549static struct notifier_block arch_timer_cpu_nb = {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000550 .notifier_call = arch_timer_cpu_notify,
551};
552
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100553#ifdef CONFIG_CPU_PM
554static unsigned int saved_cntkctl;
555static int arch_timer_cpu_pm_notify(struct notifier_block *self,
556 unsigned long action, void *hcpu)
557{
558 if (action == CPU_PM_ENTER)
559 saved_cntkctl = arch_timer_get_cntkctl();
560 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
561 arch_timer_set_cntkctl(saved_cntkctl);
562 return NOTIFY_OK;
563}
564
565static struct notifier_block arch_timer_cpu_pm_notifier = {
566 .notifier_call = arch_timer_cpu_pm_notify,
567};
568
569static int __init arch_timer_cpu_pm_init(void)
570{
571 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
572}
573#else
574static int __init arch_timer_cpu_pm_init(void)
575{
576 return 0;
577}
578#endif
579
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000580static int __init arch_timer_register(void)
581{
582 int err;
583 int ppi;
584
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000585 arch_timer_evt = alloc_percpu(struct clock_event_device);
586 if (!arch_timer_evt) {
587 err = -ENOMEM;
588 goto out;
589 }
590
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000591 ppi = arch_timer_ppi[arch_timer_uses_ppi];
592 switch (arch_timer_uses_ppi) {
593 case VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000594 err = request_percpu_irq(ppi, arch_timer_handler_virt,
595 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000596 break;
597 case PHYS_SECURE_PPI:
598 case PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000599 err = request_percpu_irq(ppi, arch_timer_handler_phys,
600 "arch_timer", arch_timer_evt);
601 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
602 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
603 err = request_percpu_irq(ppi, arch_timer_handler_phys,
604 "arch_timer", arch_timer_evt);
605 if (err)
606 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
607 arch_timer_evt);
608 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000609 break;
610 case HYP_PPI:
611 err = request_percpu_irq(ppi, arch_timer_handler_phys,
612 "arch_timer", arch_timer_evt);
613 break;
614 default:
615 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000616 }
617
618 if (err) {
619 pr_err("arch_timer: can't register interrupt %d (%d)\n",
620 ppi, err);
621 goto out_free;
622 }
623
624 err = register_cpu_notifier(&arch_timer_cpu_nb);
625 if (err)
626 goto out_free_irq;
627
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100628 err = arch_timer_cpu_pm_init();
629 if (err)
630 goto out_unreg_notify;
631
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000632 /* Immediately configure the timer on the boot CPU */
633 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
634
635 return 0;
636
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100637out_unreg_notify:
638 unregister_cpu_notifier(&arch_timer_cpu_nb);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000639out_free_irq:
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000640 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
641 if (arch_timer_has_nonsecure_ppi())
642 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000643 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000644
645out_free:
646 free_percpu(arch_timer_evt);
647out:
648 return err;
649}
650
Stephen Boyd22006992013-07-18 16:59:32 -0700651static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
652{
653 int ret;
654 irq_handler_t func;
655 struct arch_timer *t;
656
657 t = kzalloc(sizeof(*t), GFP_KERNEL);
658 if (!t)
659 return -ENOMEM;
660
661 t->base = base;
662 t->evt.irq = irq;
663 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
664
665 if (arch_timer_mem_use_virtual)
666 func = arch_timer_handler_virt_mem;
667 else
668 func = arch_timer_handler_phys_mem;
669
670 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
671 if (ret) {
672 pr_err("arch_timer: Failed to request mem timer irq\n");
673 kfree(t);
674 }
675
676 return ret;
677}
678
679static const struct of_device_id arch_timer_of_match[] __initconst = {
680 { .compatible = "arm,armv7-timer", },
681 { .compatible = "arm,armv8-timer", },
682 {},
683};
684
685static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
686 { .compatible = "arm,armv7-timer-mem", },
687 {},
688};
689
Sudeep Hollac387f072014-09-29 01:50:05 +0200690static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200691arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +0200692{
693 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200694 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +0200695
696 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +0100697 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200698 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +0200699 of_node_put(dn);
700
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200701 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +0200702}
703
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200704static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -0700705{
706 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
707
708 /* Wait until both nodes are probed if we have two timers */
709 if ((arch_timers_present & mask) != mask) {
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200710 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200711 return 0;
Laurent Pinchart566e6df2015-03-31 12:12:22 +0200712 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200713 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -0700714 }
715
716 arch_timer_banner(arch_timers_present);
717 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200718 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -0700719}
720
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200721static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000722{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200723 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -0700724 /*
Marc Zyngier82668912013-01-10 11:13:07 +0000725 * If HYP mode is available, we know that the physical timer
726 * has been configured to be accessible from PL1. Use it, so
727 * that a guest can use the virtual timer instead.
728 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000729 * If no interrupt provided for virtual timer, we'll have to
730 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000731 *
732 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
733 * accesses to CNTP_*_EL1 registers are silently redirected to
734 * their CNTHP_*_EL2 counterparts, and use a different PPI
735 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000736 */
Marc Zyngier82668912013-01-10 11:13:07 +0000737 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000738 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000739
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000740 if (is_kernel_in_hyp_mode()) {
741 arch_timer_uses_ppi = HYP_PPI;
742 has_ppi = !!arch_timer_ppi[HYP_PPI];
743 } else {
744 arch_timer_uses_ppi = PHYS_SECURE_PPI;
745 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
746 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
747 }
748
749 if (!has_ppi) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000750 pr_warn("arch_timer: No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200751 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000752 }
753 }
754
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200755 ret = arch_timer_register();
756 if (ret)
757 return ret;
758
759 ret = arch_timer_common_init();
760 if (ret)
761 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +0100762
763 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200764
765 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000766}
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000767
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200768static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000769{
770 int i;
771
772 if (arch_timers_present & ARCH_CP15_TIMER) {
773 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200774 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000775 }
776
777 arch_timers_present |= ARCH_CP15_TIMER;
778 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
779 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
780
781 arch_timer_detect_rate(NULL, np);
782
783 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
784
785 /*
786 * If we cannot rely on firmware initializing the timer registers then
787 * we should use the physical timers instead.
788 */
789 if (IS_ENABLED(CONFIG_ARM) &&
790 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000791 arch_timer_uses_ppi = PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000792
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200793 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000794}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +0200795CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
796CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -0700797
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200798static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -0700799{
800 struct device_node *frame, *best_frame = NULL;
801 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200802 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -0700803 u32 cnttidr;
804
805 arch_timers_present |= ARCH_MEM_TIMER;
806 cntctlbase = of_iomap(np, 0);
807 if (!cntctlbase) {
808 pr_err("arch_timer: Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200809 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -0700810 }
811
812 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -0700813
814 /*
815 * Try to find a virtual capable frame. Otherwise fall back to a
816 * physical capable frame.
817 */
818 for_each_available_child_of_node(np, frame) {
819 int n;
Robin Murphye392d602016-02-01 12:00:48 +0000820 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -0700821
822 if (of_property_read_u32(frame, "frame-number", &n)) {
823 pr_err("arch_timer: Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -0700824 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +0000825 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -0700826 }
827
Robin Murphye392d602016-02-01 12:00:48 +0000828 /* Try enabling everything, and see what sticks */
829 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
830 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
831 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
832 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
833
834 if ((cnttidr & CNTTIDR_VIRT(n)) &&
835 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -0700836 of_node_put(best_frame);
837 best_frame = frame;
838 arch_timer_mem_use_virtual = true;
839 break;
840 }
Robin Murphye392d602016-02-01 12:00:48 +0000841
842 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
843 continue;
844
Stephen Boyd22006992013-07-18 16:59:32 -0700845 of_node_put(best_frame);
846 best_frame = of_node_get(frame);
847 }
848
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200849 ret= -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -0700850 base = arch_counter_base = of_iomap(best_frame, 0);
851 if (!base) {
852 pr_err("arch_timer: Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +0000853 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -0700854 }
855
856 if (arch_timer_mem_use_virtual)
857 irq = irq_of_parse_and_map(best_frame, 1);
858 else
859 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +0000860
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200861 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -0700862 if (!irq) {
863 pr_err("arch_timer: Frame missing %s irq",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200864 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +0000865 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -0700866 }
867
868 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200869 ret = arch_timer_mem_register(base, irq);
870 if (ret)
871 goto out;
872
873 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +0000874out:
875 iounmap(cntctlbase);
876 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +0200877 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -0700878}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +0200879CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -0700880 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000881
882#ifdef CONFIG_ACPI
883static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
884{
885 int trigger, polarity;
886
887 if (!interrupt)
888 return 0;
889
890 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
891 : ACPI_LEVEL_SENSITIVE;
892
893 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
894 : ACPI_ACTIVE_HIGH;
895
896 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
897}
898
899/* Initialize per-processor generic timer */
900static int __init arch_timer_acpi_init(struct acpi_table_header *table)
901{
902 struct acpi_table_gtdt *gtdt;
903
904 if (arch_timers_present & ARCH_CP15_TIMER) {
905 pr_warn("arch_timer: already initialized, skipping\n");
906 return -EINVAL;
907 }
908
909 gtdt = container_of(table, struct acpi_table_gtdt, header);
910
911 arch_timers_present |= ARCH_CP15_TIMER;
912
913 arch_timer_ppi[PHYS_SECURE_PPI] =
914 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
915 gtdt->secure_el1_flags);
916
917 arch_timer_ppi[PHYS_NONSECURE_PPI] =
918 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
919 gtdt->non_secure_el1_flags);
920
921 arch_timer_ppi[VIRT_PPI] =
922 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
923 gtdt->virtual_timer_flags);
924
925 arch_timer_ppi[HYP_PPI] =
926 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
927 gtdt->non_secure_el2_flags);
928
929 /* Get the frequency from CNTFRQ */
930 arch_timer_detect_rate(NULL, NULL);
931
932 /* Always-on capability */
933 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
934
935 arch_timer_init();
936 return 0;
937}
Marc Zyngierae281cb2015-09-28 15:49:17 +0100938CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000939#endif