blob: bf9e9d76375ee3ff06e92139d02ed727404006a0 [file] [log] [blame]
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Marc Zyngierf005bd72016-08-01 10:54:15 +010011
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
Mark Rutland8a4da6e2012-11-12 14:33:44 +000014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +010019#include <linux/cpu_pm.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000020#include <linux/clockchips.h>
Richard Cochran7c8f1e72015-01-06 14:26:13 +010021#include <linux/clocksource.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000022#include <linux/interrupt.h>
23#include <linux/of_irq.h>
Stephen Boyd22006992013-07-18 16:59:32 -070024#include <linux/of_address.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000025#include <linux/io.h>
Stephen Boyd22006992013-07-18 16:59:32 -070026#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010027#include <linux/sched/clock.h>
Stephen Boyd65cd4f62013-07-18 16:21:18 -070028#include <linux/sched_clock.h>
Hanjun Guob09ca1e2015-03-24 14:02:50 +000029#include <linux/acpi.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000030
31#include <asm/arch_timer.h>
Marc Zyngier82668912013-01-10 11:13:07 +000032#include <asm/virt.h>
Mark Rutland8a4da6e2012-11-12 14:33:44 +000033
34#include <clocksource/arm_arch_timer.h>
35
Stephen Boyd22006992013-07-18 16:59:32 -070036#define CNTTIDR 0x08
37#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
38
Robin Murphye392d602016-02-01 12:00:48 +000039#define CNTACR(n) (0x40 + ((n) * 4))
40#define CNTACR_RPCT BIT(0)
41#define CNTACR_RVCT BIT(1)
42#define CNTACR_RFRQ BIT(2)
43#define CNTACR_RVOFF BIT(3)
44#define CNTACR_RWVT BIT(4)
45#define CNTACR_RWPT BIT(5)
46
Stephen Boyd22006992013-07-18 16:59:32 -070047#define CNTVCT_LO 0x08
48#define CNTVCT_HI 0x0c
49#define CNTFRQ 0x10
50#define CNTP_TVAL 0x28
51#define CNTP_CTL 0x2c
52#define CNTV_TVAL 0x38
53#define CNTV_CTL 0x3c
54
55#define ARCH_CP15_TIMER BIT(0)
56#define ARCH_MEM_TIMER BIT(1)
57static unsigned arch_timers_present __initdata;
58
59static void __iomem *arch_counter_base;
60
61struct arch_timer {
62 void __iomem *base;
63 struct clock_event_device evt;
64};
65
66#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
67
Mark Rutland8a4da6e2012-11-12 14:33:44 +000068static u32 arch_timer_rate;
69
70enum ppi_nr {
71 PHYS_SECURE_PPI,
72 PHYS_NONSECURE_PPI,
73 VIRT_PPI,
74 HYP_PPI,
75 MAX_TIMER_PPI
76};
77
78static int arch_timer_ppi[MAX_TIMER_PPI];
79
80static struct clock_event_device __percpu *arch_timer_evt;
81
Marc Zyngierf81f03f2014-02-20 15:21:23 +000082static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +010083static bool arch_timer_c3stop;
Stephen Boyd22006992013-07-18 16:59:32 -070084static bool arch_timer_mem_use_virtual;
Brian Norrisd8ec7592016-10-04 11:12:09 -070085static bool arch_counter_suspend_stop;
Marc Zyngiera86bd132017-02-01 12:07:15 +000086static bool vdso_default = true;
Mark Rutland8a4da6e2012-11-12 14:33:44 +000087
Will Deacon46fd5c62016-06-27 17:30:13 +010088static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
89
90static int __init early_evtstrm_cfg(char *buf)
91{
92 return strtobool(buf, &evtstrm_enable);
93}
94early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
95
Mark Rutland8a4da6e2012-11-12 14:33:44 +000096/*
97 * Architected system timer support.
98 */
99
Marc Zyngierf4e00a12017-01-20 18:28:32 +0000100static __always_inline
101void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
102 struct clock_event_device *clk)
103{
104 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
105 struct arch_timer *timer = to_arch_timer(clk);
106 switch (reg) {
107 case ARCH_TIMER_REG_CTRL:
108 writel_relaxed(val, timer->base + CNTP_CTL);
109 break;
110 case ARCH_TIMER_REG_TVAL:
111 writel_relaxed(val, timer->base + CNTP_TVAL);
112 break;
113 }
114 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
115 struct arch_timer *timer = to_arch_timer(clk);
116 switch (reg) {
117 case ARCH_TIMER_REG_CTRL:
118 writel_relaxed(val, timer->base + CNTV_CTL);
119 break;
120 case ARCH_TIMER_REG_TVAL:
121 writel_relaxed(val, timer->base + CNTV_TVAL);
122 break;
123 }
124 } else {
125 arch_timer_reg_write_cp15(access, reg, val);
126 }
127}
128
129static __always_inline
130u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
131 struct clock_event_device *clk)
132{
133 u32 val;
134
135 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
136 struct arch_timer *timer = to_arch_timer(clk);
137 switch (reg) {
138 case ARCH_TIMER_REG_CTRL:
139 val = readl_relaxed(timer->base + CNTP_CTL);
140 break;
141 case ARCH_TIMER_REG_TVAL:
142 val = readl_relaxed(timer->base + CNTP_TVAL);
143 break;
144 }
145 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
146 struct arch_timer *timer = to_arch_timer(clk);
147 switch (reg) {
148 case ARCH_TIMER_REG_CTRL:
149 val = readl_relaxed(timer->base + CNTV_CTL);
150 break;
151 case ARCH_TIMER_REG_TVAL:
152 val = readl_relaxed(timer->base + CNTV_TVAL);
153 break;
154 }
155 } else {
156 val = arch_timer_reg_read_cp15(access, reg);
157 }
158
159 return val;
160}
161
Marc Zyngier992dd162017-02-01 11:53:46 +0000162/*
163 * Default to cp15 based access because arm64 uses this function for
164 * sched_clock() before DT is probed and the cp15 method is guaranteed
165 * to exist on arm64. arm doesn't use this before DT is probed so even
166 * if we don't have the cp15 accessors we won't have a problem.
167 */
168u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
169
170static u64 arch_counter_read(struct clocksource *cs)
171{
172 return arch_timer_read_counter();
173}
174
175static u64 arch_counter_read_cc(const struct cyclecounter *cc)
176{
177 return arch_timer_read_counter();
178}
179
180static struct clocksource clocksource_counter = {
181 .name = "arch_sys_counter",
182 .rating = 400,
183 .read = arch_counter_read,
184 .mask = CLOCKSOURCE_MASK(56),
185 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
186};
187
188static struct cyclecounter cyclecounter __ro_after_init = {
189 .read = arch_counter_read_cc,
190 .mask = CLOCKSOURCE_MASK(56),
191};
192
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000193struct ate_acpi_oem_info {
194 char oem_id[ACPI_OEM_ID_SIZE + 1];
195 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
196 u32 oem_revision;
197};
198
Scott Woodf6dc1572016-09-22 03:35:17 -0500199#ifdef CONFIG_FSL_ERRATUM_A008585
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000200/*
201 * The number of retries is an arbitrary value well beyond the highest number
202 * of iterations the loop has been observed to take.
203 */
204#define __fsl_a008585_read_reg(reg) ({ \
205 u64 _old, _new; \
206 int _retries = 200; \
207 \
208 do { \
209 _old = read_sysreg(reg); \
210 _new = read_sysreg(reg); \
211 _retries--; \
212 } while (unlikely(_old != _new) && _retries); \
213 \
214 WARN_ON_ONCE(!_retries); \
215 _new; \
216})
Scott Woodf6dc1572016-09-22 03:35:17 -0500217
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000218static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500219{
220 return __fsl_a008585_read_reg(cntp_tval_el0);
221}
222
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000223static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500224{
225 return __fsl_a008585_read_reg(cntv_tval_el0);
226}
227
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000228static u64 notrace fsl_a008585_read_cntvct_el0(void)
Scott Woodf6dc1572016-09-22 03:35:17 -0500229{
230 return __fsl_a008585_read_reg(cntvct_el0);
231}
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000232#endif
233
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000234#ifdef CONFIG_HISILICON_ERRATUM_161010101
235/*
236 * Verify whether the value of the second read is larger than the first by
237 * less than 32 is the only way to confirm the value is correct, so clear the
238 * lower 5 bits to check whether the difference is greater than 32 or not.
239 * Theoretically the erratum should not occur more than twice in succession
240 * when reading the system counter, but it is possible that some interrupts
241 * may lead to more than twice read errors, triggering the warning, so setting
242 * the number of retries far beyond the number of iterations the loop has been
243 * observed to take.
244 */
245#define __hisi_161010101_read_reg(reg) ({ \
246 u64 _old, _new; \
247 int _retries = 50; \
248 \
249 do { \
250 _old = read_sysreg(reg); \
251 _new = read_sysreg(reg); \
252 _retries--; \
253 } while (unlikely((_new - _old) >> 5) && _retries); \
254 \
255 WARN_ON_ONCE(!_retries); \
256 _new; \
257})
258
259static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
260{
261 return __hisi_161010101_read_reg(cntp_tval_el0);
262}
263
264static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
265{
266 return __hisi_161010101_read_reg(cntv_tval_el0);
267}
268
269static u64 notrace hisi_161010101_read_cntvct_el0(void)
270{
271 return __hisi_161010101_read_reg(cntvct_el0);
272}
Marc Zyngierd003d022017-02-21 15:04:27 +0000273
274static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
275 /*
276 * Note that trailing spaces are required to properly match
277 * the OEM table information.
278 */
279 {
280 .oem_id = "HISI ",
281 .oem_table_id = "HIP05 ",
282 .oem_revision = 0,
283 },
284 {
285 .oem_id = "HISI ",
286 .oem_table_id = "HIP06 ",
287 .oem_revision = 0,
288 },
289 {
290 .oem_id = "HISI ",
291 .oem_table_id = "HIP07 ",
292 .oem_revision = 0,
293 },
294 { /* Sentinel indicating the end of the OEM array */ },
295};
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000296#endif
297
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000298#ifdef CONFIG_ARM64_ERRATUM_858921
299static u64 notrace arm64_858921_read_cntvct_el0(void)
300{
301 u64 old, new;
302
303 old = read_sysreg(cntvct_el0);
304 new = read_sysreg(cntvct_el0);
305 return (((old ^ new) >> 32) & 1) ? old : new;
306}
307#endif
308
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000309#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000310DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
311 timer_unstable_counter_workaround);
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000312EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
313
314DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
315EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
316
Marc Zyngier83280892017-01-27 10:27:09 +0000317static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
318 struct clock_event_device *clk)
319{
320 unsigned long ctrl;
321 u64 cval = evt + arch_counter_get_cntvct();
322
323 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
324 ctrl |= ARCH_TIMER_CTRL_ENABLE;
325 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
326
327 if (access == ARCH_TIMER_PHYS_ACCESS)
328 write_sysreg(cval, cntp_cval_el0);
329 else
330 write_sysreg(cval, cntv_cval_el0);
331
332 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
333}
334
335static int erratum_set_next_event_tval_virt(unsigned long evt,
336 struct clock_event_device *clk)
337{
338 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
339 return 0;
340}
341
342static int erratum_set_next_event_tval_phys(unsigned long evt,
343 struct clock_event_device *clk)
344{
345 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
346 return 0;
347}
348
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000349static const struct arch_timer_erratum_workaround ool_workarounds[] = {
350#ifdef CONFIG_FSL_ERRATUM_A008585
351 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000352 .match_type = ate_match_dt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000353 .id = "fsl,erratum-a008585",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000354 .desc = "Freescale erratum a005858",
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000355 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
356 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
357 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000358 .set_next_event_phys = erratum_set_next_event_tval_phys,
359 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000360 },
361#endif
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000362#ifdef CONFIG_HISILICON_ERRATUM_161010101
363 {
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000364 .match_type = ate_match_dt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000365 .id = "hisilicon,erratum-161010101",
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000366 .desc = "HiSilicon erratum 161010101",
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000367 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
368 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
369 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000370 .set_next_event_phys = erratum_set_next_event_tval_phys,
371 .set_next_event_virt = erratum_set_next_event_tval_virt,
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000372 },
Marc Zyngierd003d022017-02-21 15:04:27 +0000373 {
374 .match_type = ate_match_acpi_oem_info,
375 .id = hisi_161010101_oem_info,
376 .desc = "HiSilicon erratum 161010101",
377 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
378 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
379 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
380 .set_next_event_phys = erratum_set_next_event_tval_phys,
381 .set_next_event_virt = erratum_set_next_event_tval_virt,
382 },
Ding Tianhongbb42ca42017-02-06 16:47:42 +0000383#endif
Marc Zyngierfa8d8152017-01-27 12:52:31 +0000384#ifdef CONFIG_ARM64_ERRATUM_858921
385 {
386 .match_type = ate_match_local_cap_id,
387 .id = (void *)ARM64_WORKAROUND_858921,
388 .desc = "ARM erratum 858921",
389 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
390 },
391#endif
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000392};
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000393
394typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
395 const void *);
396
397static
398bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
399 const void *arg)
400{
401 const struct device_node *np = arg;
402
403 return of_property_read_bool(np, wa->id);
404}
405
Marc Zyngier00640302017-03-20 16:47:59 +0000406static
407bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
408 const void *arg)
409{
410 return this_cpu_has_cap((uintptr_t)wa->id);
411}
412
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000413
414static
415bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
416 const void *arg)
417{
418 static const struct ate_acpi_oem_info empty_oem_info = {};
419 const struct ate_acpi_oem_info *info = wa->id;
420 const struct acpi_table_header *table = arg;
421
422 /* Iterate over the ACPI OEM info array, looking for a match */
423 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
424 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
425 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
426 info->oem_revision == table->oem_revision)
427 return true;
428
429 info++;
430 }
431
432 return false;
433}
434
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000435static const struct arch_timer_erratum_workaround *
436arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
437 ate_match_fn_t match_fn,
438 void *arg)
439{
440 int i;
441
442 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
443 if (ool_workarounds[i].match_type != type)
444 continue;
445
446 if (match_fn(&ool_workarounds[i], arg))
447 return &ool_workarounds[i];
448 }
449
450 return NULL;
451}
452
453static
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000454void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
455 bool local)
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000456{
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000457 int i;
458
459 if (local) {
460 __this_cpu_write(timer_unstable_counter_workaround, wa);
461 } else {
462 for_each_possible_cpu(i)
463 per_cpu(timer_unstable_counter_workaround, i) = wa;
464 }
465
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000466 static_branch_enable(&arch_timer_read_ool_enabled);
Marc Zyngiera86bd132017-02-01 12:07:15 +0000467
468 /*
469 * Don't use the vdso fastpath if errata require using the
470 * out-of-line counter accessor. We may change our mind pretty
471 * late in the game (with a per-CPU erratum, for example), so
472 * change both the default value and the vdso itself.
473 */
474 if (wa->read_cntvct_el0) {
475 clocksource_counter.archdata.vdso_direct = false;
476 vdso_default = false;
477 }
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000478}
479
480static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
481 void *arg)
482{
483 const struct arch_timer_erratum_workaround *wa;
484 ate_match_fn_t match_fn = NULL;
Marc Zyngier00640302017-03-20 16:47:59 +0000485 bool local = false;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000486
487 switch (type) {
488 case ate_match_dt:
489 match_fn = arch_timer_check_dt_erratum;
490 break;
Marc Zyngier00640302017-03-20 16:47:59 +0000491 case ate_match_local_cap_id:
492 match_fn = arch_timer_check_local_cap_erratum;
493 local = true;
494 break;
Marc Zyngier5a38bca2017-02-21 14:37:30 +0000495 case ate_match_acpi_oem_info:
496 match_fn = arch_timer_check_acpi_oem_erratum;
497 break;
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000498 default:
499 WARN_ON(1);
500 return;
501 }
502
503 wa = arch_timer_iterate_errata(type, match_fn, arg);
504 if (!wa)
505 return;
506
Marc Zyngier00640302017-03-20 16:47:59 +0000507 if (needs_unstable_timer_counter_workaround()) {
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000508 const struct arch_timer_erratum_workaround *__wa;
509 __wa = __this_cpu_read(timer_unstable_counter_workaround);
510 if (__wa && wa != __wa)
Marc Zyngier00640302017-03-20 16:47:59 +0000511 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000512 wa->desc, __wa->desc);
513
514 if (__wa)
515 return;
Marc Zyngier00640302017-03-20 16:47:59 +0000516 }
517
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000518 arch_timer_enable_workaround(wa, local);
Marc Zyngier00640302017-03-20 16:47:59 +0000519 pr_info("Enabling %s workaround for %s\n",
520 local ? "local" : "global", wa->desc);
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000521}
522
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000523#define erratum_handler(fn, r, ...) \
524({ \
525 bool __val; \
Marc Zyngier6acc71c2017-02-20 18:34:48 +0000526 if (needs_unstable_timer_counter_workaround()) { \
527 const struct arch_timer_erratum_workaround *__wa; \
528 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
529 if (__wa && __wa->fn) { \
530 r = __wa->fn(__VA_ARGS__); \
531 __val = true; \
532 } else { \
533 __val = false; \
534 } \
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000535 } else { \
536 __val = false; \
537 } \
538 __val; \
539})
540
Marc Zyngiera86bd132017-02-01 12:07:15 +0000541static bool arch_timer_this_cpu_has_cntvct_wa(void)
542{
543 const struct arch_timer_erratum_workaround *wa;
544
545 wa = __this_cpu_read(timer_unstable_counter_workaround);
546 return wa && wa->read_cntvct_el0;
547}
Marc Zyngier651bb2e2017-01-19 17:20:59 +0000548#else
549#define arch_timer_check_ool_workaround(t,a) do { } while(0)
Marc Zyngier83280892017-01-27 10:27:09 +0000550#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
551#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000552#define erratum_handler(fn, r, ...) ({false;})
Marc Zyngiera86bd132017-02-01 12:07:15 +0000553#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
Ding Tianhong16d10ef2017-02-06 16:47:41 +0000554#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
Scott Woodf6dc1572016-09-22 03:35:17 -0500555
Stephen Boyde09f3cc2013-07-18 16:59:28 -0700556static __always_inline irqreturn_t timer_handler(const int access,
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000557 struct clock_event_device *evt)
558{
559 unsigned long ctrl;
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200560
Stephen Boyd60faddf2013-07-18 16:59:31 -0700561 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000562 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
563 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700564 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000565 evt->event_handler(evt);
566 return IRQ_HANDLED;
567 }
568
569 return IRQ_NONE;
570}
571
572static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
573{
574 struct clock_event_device *evt = dev_id;
575
576 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
577}
578
579static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
580{
581 struct clock_event_device *evt = dev_id;
582
583 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
584}
585
Stephen Boyd22006992013-07-18 16:59:32 -0700586static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
587{
588 struct clock_event_device *evt = dev_id;
589
590 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
591}
592
593static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
594{
595 struct clock_event_device *evt = dev_id;
596
597 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
598}
599
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530600static __always_inline int timer_shutdown(const int access,
601 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000602{
603 unsigned long ctrl;
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530604
605 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
606 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
607 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
608
609 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000610}
611
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530612static int arch_timer_shutdown_virt(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000613{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530614 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000615}
616
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530617static int arch_timer_shutdown_phys(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000618{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530619 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000620}
621
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530622static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700623{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530624 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700625}
626
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530627static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700628{
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530629 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
Stephen Boyd22006992013-07-18 16:59:32 -0700630}
631
Stephen Boyd60faddf2013-07-18 16:59:31 -0700632static __always_inline void set_next_event(const int access, unsigned long evt,
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200633 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000634{
635 unsigned long ctrl;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700636 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000637 ctrl |= ARCH_TIMER_CTRL_ENABLE;
638 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
Stephen Boyd60faddf2013-07-18 16:59:31 -0700639 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
640 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000641}
642
643static int arch_timer_set_next_event_virt(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700644 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000645{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000646 int ret;
647
648 if (erratum_handler(set_next_event_virt, ret, evt, clk))
649 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000650
Stephen Boyd60faddf2013-07-18 16:59:31 -0700651 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000652 return 0;
653}
654
655static int arch_timer_set_next_event_phys(unsigned long evt,
Stephen Boyd60faddf2013-07-18 16:59:31 -0700656 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000657{
Marc Zyngier01d3e3f2017-01-27 10:27:09 +0000658 int ret;
659
660 if (erratum_handler(set_next_event_phys, ret, evt, clk))
661 return ret;
Marc Zyngier83280892017-01-27 10:27:09 +0000662
Stephen Boyd60faddf2013-07-18 16:59:31 -0700663 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000664 return 0;
665}
666
Stephen Boyd22006992013-07-18 16:59:32 -0700667static int arch_timer_set_next_event_virt_mem(unsigned long evt,
668 struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000669{
Stephen Boyd22006992013-07-18 16:59:32 -0700670 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
671 return 0;
672}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000673
Stephen Boyd22006992013-07-18 16:59:32 -0700674static int arch_timer_set_next_event_phys_mem(unsigned long evt,
675 struct clock_event_device *clk)
676{
677 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
678 return 0;
679}
680
Thomas Gleixnercfb6d652013-08-21 14:59:23 +0200681static void __arch_timer_setup(unsigned type,
682 struct clock_event_device *clk)
Stephen Boyd22006992013-07-18 16:59:32 -0700683{
684 clk->features = CLOCK_EVT_FEAT_ONESHOT;
685
686 if (type == ARCH_CP15_TIMER) {
Lorenzo Pieralisi82a561942014-04-08 10:04:32 +0100687 if (arch_timer_c3stop)
688 clk->features |= CLOCK_EVT_FEAT_C3STOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700689 clk->name = "arch_sys_timer";
690 clk->rating = 450;
691 clk->cpumask = cpumask_of(smp_processor_id());
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000692 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
693 switch (arch_timer_uses_ppi) {
694 case VIRT_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530695 clk->set_state_shutdown = arch_timer_shutdown_virt;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530696 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
Stephen Boyd22006992013-07-18 16:59:32 -0700697 clk->set_next_event = arch_timer_set_next_event_virt;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000698 break;
699 case PHYS_SECURE_PPI:
700 case PHYS_NONSECURE_PPI:
701 case HYP_PPI:
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530702 clk->set_state_shutdown = arch_timer_shutdown_phys;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530703 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
Stephen Boyd22006992013-07-18 16:59:32 -0700704 clk->set_next_event = arch_timer_set_next_event_phys;
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000705 break;
706 default:
707 BUG();
Stephen Boyd22006992013-07-18 16:59:32 -0700708 }
Scott Woodf6dc1572016-09-22 03:35:17 -0500709
Marc Zyngier00640302017-03-20 16:47:59 +0000710 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
Stephen Boyd22006992013-07-18 16:59:32 -0700711 } else {
Stephen Boyd7b52ad22014-01-06 14:56:17 -0800712 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
Stephen Boyd22006992013-07-18 16:59:32 -0700713 clk->name = "arch_mem_timer";
714 clk->rating = 400;
715 clk->cpumask = cpu_all_mask;
716 if (arch_timer_mem_use_virtual) {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530717 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530718 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700719 clk->set_next_event =
720 arch_timer_set_next_event_virt_mem;
721 } else {
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530722 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
Viresh Kumarcf8c5002015-12-23 16:59:12 +0530723 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
Stephen Boyd22006992013-07-18 16:59:32 -0700724 clk->set_next_event =
725 arch_timer_set_next_event_phys_mem;
726 }
727 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000728
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530729 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000730
Stephen Boyd22006992013-07-18 16:59:32 -0700731 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
732}
733
Nathan Lynche1ce5c72014-09-29 01:50:06 +0200734static void arch_timer_evtstrm_enable(int divider)
735{
736 u32 cntkctl = arch_timer_get_cntkctl();
737
738 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
739 /* Set the divider and enable virtual event stream */
740 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
741 | ARCH_TIMER_VIRT_EVT_EN;
742 arch_timer_set_cntkctl(cntkctl);
743 elf_hwcap |= HWCAP_EVTSTRM;
744#ifdef CONFIG_COMPAT
745 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
746#endif
747}
748
Will Deacon037f6372013-08-23 15:32:29 +0100749static void arch_timer_configure_evtstream(void)
750{
751 int evt_stream_div, pos;
752
753 /* Find the closest power of two to the divisor */
754 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
755 pos = fls(evt_stream_div);
756 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
757 pos--;
758 /* enable event stream */
759 arch_timer_evtstrm_enable(min(pos, 15));
760}
761
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200762static void arch_counter_set_user_access(void)
763{
764 u32 cntkctl = arch_timer_get_cntkctl();
765
Marc Zyngiera86bd132017-02-01 12:07:15 +0000766 /* Disable user access to the timers and both counters */
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200767 /* Also disable virtual event stream */
768 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
769 | ARCH_TIMER_USR_VT_ACCESS_EN
Marc Zyngiera86bd132017-02-01 12:07:15 +0000770 | ARCH_TIMER_USR_VCT_ACCESS_EN
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200771 | ARCH_TIMER_VIRT_EVT_EN
772 | ARCH_TIMER_USR_PCT_ACCESS_EN);
773
Marc Zyngiera86bd132017-02-01 12:07:15 +0000774 /*
775 * Enable user access to the virtual counter if it doesn't
776 * need to be workaround. The vdso may have been already
777 * disabled though.
778 */
779 if (arch_timer_this_cpu_has_cntvct_wa())
780 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
781 else
782 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
Nathan Lynch8b8dde02014-09-29 01:50:06 +0200783
784 arch_timer_set_cntkctl(cntkctl);
785}
786
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000787static bool arch_timer_has_nonsecure_ppi(void)
788{
789 return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
790 arch_timer_ppi[PHYS_NONSECURE_PPI]);
791}
792
Marc Zyngierf005bd72016-08-01 10:54:15 +0100793static u32 check_ppi_trigger(int irq)
794{
795 u32 flags = irq_get_trigger_type(irq);
796
797 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
798 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
799 pr_warn("WARNING: Please fix your firmware\n");
800 flags = IRQF_TRIGGER_LOW;
801 }
802
803 return flags;
804}
805
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000806static int arch_timer_starting_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000807{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000808 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Marc Zyngierf005bd72016-08-01 10:54:15 +0100809 u32 flags;
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000810
Stephen Boyd22006992013-07-18 16:59:32 -0700811 __arch_timer_setup(ARCH_CP15_TIMER, clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000812
Marc Zyngierf005bd72016-08-01 10:54:15 +0100813 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
814 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000815
Marc Zyngierf005bd72016-08-01 10:54:15 +0100816 if (arch_timer_has_nonsecure_ppi()) {
817 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
818 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
819 }
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000820
821 arch_counter_set_user_access();
Will Deacon46fd5c62016-06-27 17:30:13 +0100822 if (evtstrm_enable)
Will Deacon037f6372013-08-23 15:32:29 +0100823 arch_timer_configure_evtstream();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000824
825 return 0;
826}
827
Stephen Boyd22006992013-07-18 16:59:32 -0700828static void
829arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000830{
Stephen Boyd22006992013-07-18 16:59:32 -0700831 /* Who has more than one independent system counter? */
832 if (arch_timer_rate)
833 return;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000834
Hanjun Guob09ca1e2015-03-24 14:02:50 +0000835 /*
836 * Try to determine the frequency from the device tree or CNTFRQ,
837 * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
838 */
839 if (!acpi_disabled ||
840 of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
Stephen Boyd22006992013-07-18 16:59:32 -0700841 if (cntbase)
842 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
843 else
844 arch_timer_rate = arch_timer_get_cntfrq();
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000845 }
846
Stephen Boyd22006992013-07-18 16:59:32 -0700847 /* Check the timer frequency. */
848 if (arch_timer_rate == 0)
849 pr_warn("Architected timer frequency not available\n");
850}
851
852static void arch_timer_banner(unsigned type)
853{
854 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
855 type & ARCH_CP15_TIMER ? "cp15" : "",
856 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
857 type & ARCH_MEM_TIMER ? "mmio" : "",
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000858 (unsigned long)arch_timer_rate / 1000000,
859 (unsigned long)(arch_timer_rate / 10000) % 100,
Stephen Boyd22006992013-07-18 16:59:32 -0700860 type & ARCH_CP15_TIMER ?
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000861 (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
Stephen Boyd22006992013-07-18 16:59:32 -0700862 "",
863 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
864 type & ARCH_MEM_TIMER ?
865 arch_timer_mem_use_virtual ? "virt" : "phys" :
866 "");
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000867}
868
869u32 arch_timer_get_rate(void)
870{
871 return arch_timer_rate;
872}
873
Stephen Boyd22006992013-07-18 16:59:32 -0700874static u64 arch_counter_get_cntvct_mem(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000875{
Stephen Boyd22006992013-07-18 16:59:32 -0700876 u32 vct_lo, vct_hi, tmp_hi;
877
878 do {
879 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
880 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
881 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
882 } while (vct_hi != tmp_hi);
883
884 return ((u64) vct_hi << 32) | vct_lo;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000885}
886
Julien Grallb4d6ce92016-04-11 16:32:51 +0100887static struct arch_timer_kvm_info arch_timer_kvm_info;
888
889struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
890{
891 return &arch_timer_kvm_info;
892}
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000893
Stephen Boyd22006992013-07-18 16:59:32 -0700894static void __init arch_counter_register(unsigned type)
895{
896 u64 start_count;
897
898 /* Register the CP15 based counter if we have one */
Nathan Lynch423bd692014-09-29 01:50:06 +0200899 if (type & ARCH_CP15_TIMER) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000900 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
Sonny Rao0b46b8a2014-11-23 23:02:44 -0800901 arch_timer_read_counter = arch_counter_get_cntvct;
902 else
903 arch_timer_read_counter = arch_counter_get_cntpct;
Scott Woodf6dc1572016-09-22 03:35:17 -0500904
Marc Zyngiera86bd132017-02-01 12:07:15 +0000905 clocksource_counter.archdata.vdso_direct = vdso_default;
Nathan Lynch423bd692014-09-29 01:50:06 +0200906 } else {
Stephen Boyd22006992013-07-18 16:59:32 -0700907 arch_timer_read_counter = arch_counter_get_cntvct_mem;
Nathan Lynch423bd692014-09-29 01:50:06 +0200908 }
909
Brian Norrisd8ec7592016-10-04 11:12:09 -0700910 if (!arch_counter_suspend_stop)
911 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
Stephen Boyd22006992013-07-18 16:59:32 -0700912 start_count = arch_timer_read_counter();
913 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
914 cyclecounter.mult = clocksource_counter.mult;
915 cyclecounter.shift = clocksource_counter.shift;
Julien Grallb4d6ce92016-04-11 16:32:51 +0100916 timecounter_init(&arch_timer_kvm_info.timecounter,
917 &cyclecounter, start_count);
Thierry Reding4a7d3e82013-10-15 15:31:51 +0200918
919 /* 56 bits minimum, so we assume worst case rollover */
920 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
Stephen Boyd22006992013-07-18 16:59:32 -0700921}
922
Paul Gortmaker8c37bb32013-06-19 11:32:08 -0400923static void arch_timer_stop(struct clock_event_device *clk)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000924{
925 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
926 clk->irq, smp_processor_id());
927
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000928 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
929 if (arch_timer_has_nonsecure_ppi())
930 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000931
Viresh Kumar46c5bfd2015-06-12 13:30:12 +0530932 clk->set_state_shutdown(clk);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000933}
934
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000935static int arch_timer_dying_cpu(unsigned int cpu)
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000936{
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000937 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000938
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000939 arch_timer_stop(clk);
940 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000941}
942
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100943#ifdef CONFIG_CPU_PM
Marc Zyngierbee67c52017-04-04 17:05:16 +0100944static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100945static int arch_timer_cpu_pm_notify(struct notifier_block *self,
946 unsigned long action, void *hcpu)
947{
948 if (action == CPU_PM_ENTER)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100949 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100950 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
Marc Zyngierbee67c52017-04-04 17:05:16 +0100951 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100952 return NOTIFY_OK;
953}
954
955static struct notifier_block arch_timer_cpu_pm_notifier = {
956 .notifier_call = arch_timer_cpu_pm_notify,
957};
958
959static int __init arch_timer_cpu_pm_init(void)
960{
961 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
962}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000963
964static void __init arch_timer_cpu_pm_deinit(void)
965{
966 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
967}
968
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100969#else
970static int __init arch_timer_cpu_pm_init(void)
971{
972 return 0;
973}
Richard Cochran7e86e8b2016-07-13 17:16:39 +0000974
975static void __init arch_timer_cpu_pm_deinit(void)
976{
977}
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +0100978#endif
979
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000980static int __init arch_timer_register(void)
981{
982 int err;
983 int ppi;
984
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000985 arch_timer_evt = alloc_percpu(struct clock_event_device);
986 if (!arch_timer_evt) {
987 err = -ENOMEM;
988 goto out;
989 }
990
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000991 ppi = arch_timer_ppi[arch_timer_uses_ppi];
992 switch (arch_timer_uses_ppi) {
993 case VIRT_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000994 err = request_percpu_irq(ppi, arch_timer_handler_virt,
995 "arch_timer", arch_timer_evt);
Marc Zyngierf81f03f2014-02-20 15:21:23 +0000996 break;
997 case PHYS_SECURE_PPI:
998 case PHYS_NONSECURE_PPI:
Mark Rutland8a4da6e2012-11-12 14:33:44 +0000999 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1000 "arch_timer", arch_timer_evt);
1001 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
1002 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
1003 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1004 "arch_timer", arch_timer_evt);
1005 if (err)
1006 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
1007 arch_timer_evt);
1008 }
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001009 break;
1010 case HYP_PPI:
1011 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1012 "arch_timer", arch_timer_evt);
1013 break;
1014 default:
1015 BUG();
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001016 }
1017
1018 if (err) {
1019 pr_err("arch_timer: can't register interrupt %d (%d)\n",
1020 ppi, err);
1021 goto out_free;
1022 }
1023
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001024 err = arch_timer_cpu_pm_init();
1025 if (err)
1026 goto out_unreg_notify;
1027
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001028
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001029 /* Register and immediately configure the timer on the boot CPU */
1030 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001031 "clockevents/arm/arch_timer:starting",
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001032 arch_timer_starting_cpu, arch_timer_dying_cpu);
1033 if (err)
1034 goto out_unreg_cpupm;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001035 return 0;
1036
Richard Cochran7e86e8b2016-07-13 17:16:39 +00001037out_unreg_cpupm:
1038 arch_timer_cpu_pm_deinit();
1039
Sudeep KarkadaNagesha346e7482013-08-23 15:53:15 +01001040out_unreg_notify:
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001041 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1042 if (arch_timer_has_nonsecure_ppi())
1043 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001044 arch_timer_evt);
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001045
1046out_free:
1047 free_percpu(arch_timer_evt);
1048out:
1049 return err;
1050}
1051
Stephen Boyd22006992013-07-18 16:59:32 -07001052static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1053{
1054 int ret;
1055 irq_handler_t func;
1056 struct arch_timer *t;
1057
1058 t = kzalloc(sizeof(*t), GFP_KERNEL);
1059 if (!t)
1060 return -ENOMEM;
1061
1062 t->base = base;
1063 t->evt.irq = irq;
1064 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
1065
1066 if (arch_timer_mem_use_virtual)
1067 func = arch_timer_handler_virt_mem;
1068 else
1069 func = arch_timer_handler_phys_mem;
1070
1071 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1072 if (ret) {
1073 pr_err("arch_timer: Failed to request mem timer irq\n");
1074 kfree(t);
1075 }
1076
1077 return ret;
1078}
1079
1080static const struct of_device_id arch_timer_of_match[] __initconst = {
1081 { .compatible = "arm,armv7-timer", },
1082 { .compatible = "arm,armv8-timer", },
1083 {},
1084};
1085
1086static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1087 { .compatible = "arm,armv7-timer-mem", },
1088 {},
1089};
1090
Sudeep Hollac387f072014-09-29 01:50:05 +02001091static bool __init
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001092arch_timer_needs_probing(int type, const struct of_device_id *matches)
Sudeep Hollac387f072014-09-29 01:50:05 +02001093{
1094 struct device_node *dn;
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001095 bool needs_probing = false;
Sudeep Hollac387f072014-09-29 01:50:05 +02001096
1097 dn = of_find_matching_node(NULL, matches);
Marc Zyngier59aa8962014-10-15 16:06:20 +01001098 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001099 needs_probing = true;
Sudeep Hollac387f072014-09-29 01:50:05 +02001100 of_node_put(dn);
1101
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001102 return needs_probing;
Sudeep Hollac387f072014-09-29 01:50:05 +02001103}
1104
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001105static int __init arch_timer_common_init(void)
Stephen Boyd22006992013-07-18 16:59:32 -07001106{
1107 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
1108
1109 /* Wait until both nodes are probed if we have two timers */
1110 if ((arch_timers_present & mask) != mask) {
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001111 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001112 return 0;
Laurent Pinchart566e6df2015-03-31 12:12:22 +02001113 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001114 return 0;
Stephen Boyd22006992013-07-18 16:59:32 -07001115 }
1116
1117 arch_timer_banner(arch_timers_present);
1118 arch_counter_register(arch_timers_present);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001119 return arch_timer_arch_init();
Stephen Boyd22006992013-07-18 16:59:32 -07001120}
1121
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001122static int __init arch_timer_init(void)
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001123{
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001124 int ret;
Doug Anderson65b57322014-10-08 00:33:47 -07001125 /*
Marc Zyngier82668912013-01-10 11:13:07 +00001126 * If HYP mode is available, we know that the physical timer
1127 * has been configured to be accessible from PL1. Use it, so
1128 * that a guest can use the virtual timer instead.
1129 *
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001130 * If no interrupt provided for virtual timer, we'll have to
1131 * stick to the physical timer. It'd better be accessible...
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001132 *
1133 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
1134 * accesses to CNTP_*_EL1 registers are silently redirected to
1135 * their CNTHP_*_EL2 counterparts, and use a different PPI
1136 * number.
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001137 */
Marc Zyngier82668912013-01-10 11:13:07 +00001138 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001139 bool has_ppi;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001140
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001141 if (is_kernel_in_hyp_mode()) {
1142 arch_timer_uses_ppi = HYP_PPI;
1143 has_ppi = !!arch_timer_ppi[HYP_PPI];
1144 } else {
1145 arch_timer_uses_ppi = PHYS_SECURE_PPI;
1146 has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
1147 !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
1148 }
1149
1150 if (!has_ppi) {
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001151 pr_warn("arch_timer: No interrupt available, giving up\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001152 return -EINVAL;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001153 }
1154 }
1155
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001156 ret = arch_timer_register();
1157 if (ret)
1158 return ret;
1159
1160 ret = arch_timer_common_init();
1161 if (ret)
1162 return ret;
Julien Gralld9b5e412016-04-11 16:32:52 +01001163
1164 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001165
1166 return 0;
Mark Rutland8a4da6e2012-11-12 14:33:44 +00001167}
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001168
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001169static int __init arch_timer_of_init(struct device_node *np)
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001170{
1171 int i;
1172
1173 if (arch_timers_present & ARCH_CP15_TIMER) {
1174 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001175 return 0;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001176 }
1177
1178 arch_timers_present |= ARCH_CP15_TIMER;
1179 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
1180 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1181
1182 arch_timer_detect_rate(NULL, np);
1183
1184 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1185
Marc Zyngier651bb2e2017-01-19 17:20:59 +00001186 /* Check for globally applicable workarounds */
1187 arch_timer_check_ool_workaround(ate_match_dt, np);
Scott Woodf6dc1572016-09-22 03:35:17 -05001188
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001189 /*
1190 * If we cannot rely on firmware initializing the timer registers then
1191 * we should use the physical timers instead.
1192 */
1193 if (IS_ENABLED(CONFIG_ARM) &&
1194 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
Marc Zyngierf81f03f2014-02-20 15:21:23 +00001195 arch_timer_uses_ppi = PHYS_SECURE_PPI;
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001196
Brian Norrisd8ec7592016-10-04 11:12:09 -07001197 /* On some systems, the counter stops ticking when in suspend. */
1198 arch_counter_suspend_stop = of_property_read_bool(np,
1199 "arm,no-tick-in-suspend");
1200
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001201 return arch_timer_init();
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001202}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001203CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1204CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
Stephen Boyd22006992013-07-18 16:59:32 -07001205
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001206static int __init arch_timer_mem_init(struct device_node *np)
Stephen Boyd22006992013-07-18 16:59:32 -07001207{
1208 struct device_node *frame, *best_frame = NULL;
1209 void __iomem *cntctlbase, *base;
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001210 unsigned int irq, ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001211 u32 cnttidr;
1212
1213 arch_timers_present |= ARCH_MEM_TIMER;
1214 cntctlbase = of_iomap(np, 0);
1215 if (!cntctlbase) {
1216 pr_err("arch_timer: Can't find CNTCTLBase\n");
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001217 return -ENXIO;
Stephen Boyd22006992013-07-18 16:59:32 -07001218 }
1219
1220 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
Stephen Boyd22006992013-07-18 16:59:32 -07001221
1222 /*
1223 * Try to find a virtual capable frame. Otherwise fall back to a
1224 * physical capable frame.
1225 */
1226 for_each_available_child_of_node(np, frame) {
1227 int n;
Robin Murphye392d602016-02-01 12:00:48 +00001228 u32 cntacr;
Stephen Boyd22006992013-07-18 16:59:32 -07001229
1230 if (of_property_read_u32(frame, "frame-number", &n)) {
1231 pr_err("arch_timer: Missing frame-number\n");
Stephen Boyd22006992013-07-18 16:59:32 -07001232 of_node_put(frame);
Robin Murphye392d602016-02-01 12:00:48 +00001233 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001234 }
1235
Robin Murphye392d602016-02-01 12:00:48 +00001236 /* Try enabling everything, and see what sticks */
1237 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1238 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1239 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
1240 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
1241
1242 if ((cnttidr & CNTTIDR_VIRT(n)) &&
1243 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
Stephen Boyd22006992013-07-18 16:59:32 -07001244 of_node_put(best_frame);
1245 best_frame = frame;
1246 arch_timer_mem_use_virtual = true;
1247 break;
1248 }
Robin Murphye392d602016-02-01 12:00:48 +00001249
1250 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1251 continue;
1252
Stephen Boyd22006992013-07-18 16:59:32 -07001253 of_node_put(best_frame);
1254 best_frame = of_node_get(frame);
1255 }
1256
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001257 ret= -ENXIO;
Stephen Boydf947ee12016-10-26 00:35:50 -07001258 base = arch_counter_base = of_io_request_and_map(best_frame, 0,
1259 "arch_mem_timer");
1260 if (IS_ERR(base)) {
Stephen Boyd22006992013-07-18 16:59:32 -07001261 pr_err("arch_timer: Can't map frame's registers\n");
Robin Murphye392d602016-02-01 12:00:48 +00001262 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001263 }
1264
1265 if (arch_timer_mem_use_virtual)
1266 irq = irq_of_parse_and_map(best_frame, 1);
1267 else
1268 irq = irq_of_parse_and_map(best_frame, 0);
Robin Murphye392d602016-02-01 12:00:48 +00001269
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001270 ret = -EINVAL;
Stephen Boyd22006992013-07-18 16:59:32 -07001271 if (!irq) {
1272 pr_err("arch_timer: Frame missing %s irq",
Thomas Gleixnercfb6d652013-08-21 14:59:23 +02001273 arch_timer_mem_use_virtual ? "virt" : "phys");
Robin Murphye392d602016-02-01 12:00:48 +00001274 goto out;
Stephen Boyd22006992013-07-18 16:59:32 -07001275 }
1276
1277 arch_timer_detect_rate(base, np);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001278 ret = arch_timer_mem_register(base, irq);
1279 if (ret)
1280 goto out;
1281
1282 return arch_timer_common_init();
Robin Murphye392d602016-02-01 12:00:48 +00001283out:
1284 iounmap(cntctlbase);
1285 of_node_put(best_frame);
Daniel Lezcano3c0731d2016-06-06 17:55:40 +02001286 return ret;
Stephen Boyd22006992013-07-18 16:59:32 -07001287}
Daniel Lezcano177cf6e2016-06-07 00:27:44 +02001288CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
Stephen Boyd22006992013-07-18 16:59:32 -07001289 arch_timer_mem_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001290
1291#ifdef CONFIG_ACPI
1292static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1293{
1294 int trigger, polarity;
1295
1296 if (!interrupt)
1297 return 0;
1298
1299 trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1300 : ACPI_LEVEL_SENSITIVE;
1301
1302 polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1303 : ACPI_ACTIVE_HIGH;
1304
1305 return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1306}
1307
1308/* Initialize per-processor generic timer */
1309static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1310{
1311 struct acpi_table_gtdt *gtdt;
1312
1313 if (arch_timers_present & ARCH_CP15_TIMER) {
1314 pr_warn("arch_timer: already initialized, skipping\n");
1315 return -EINVAL;
1316 }
1317
1318 gtdt = container_of(table, struct acpi_table_gtdt, header);
1319
1320 arch_timers_present |= ARCH_CP15_TIMER;
1321
1322 arch_timer_ppi[PHYS_SECURE_PPI] =
1323 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1324 gtdt->secure_el1_flags);
1325
1326 arch_timer_ppi[PHYS_NONSECURE_PPI] =
1327 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1328 gtdt->non_secure_el1_flags);
1329
1330 arch_timer_ppi[VIRT_PPI] =
1331 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1332 gtdt->virtual_timer_flags);
1333
1334 arch_timer_ppi[HYP_PPI] =
1335 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1336 gtdt->non_secure_el2_flags);
1337
1338 /* Get the frequency from CNTFRQ */
1339 arch_timer_detect_rate(NULL, NULL);
1340
1341 /* Always-on capability */
1342 arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1343
Marc Zyngier5a38bca2017-02-21 14:37:30 +00001344 /* Check for globally applicable workarounds */
1345 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1346
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001347 arch_timer_init();
1348 return 0;
1349}
Marc Zyngierae281cb2015-09-28 15:49:17 +01001350CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
Hanjun Guob09ca1e2015-03-24 14:02:50 +00001351#endif